seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
31920261178 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 4 14:59:16 2020
@author: Eric Chen, Graduate Center, City University of New York
@contact: eric.clyang521@gmail.com
"""
import os
os.chdir("/Users/eric/Desktop/paper_figure/solvation_free_energy/Tip3p_bulk")
import numpy as np
from gridData import Grid
TSsix=0.125*Grid("gist-dTSsix-dens.dx").grid
TSsix.shape
file=open("gist-output.dat").readlines()[2:]
pop_grid=np.zeros((56,56,55))
position=[]
for x in range(56):
for y in range(56):
for z in range(55):
position.append((x,y,z))
voxel=0
for line in file:
temp=line.split()
pop_grid[position[voxel]]=float(temp[4])/50000
voxel+=1
np.sum(pop_grid) # water number of GIST grid
np.sum(pop_grid[1:55,1:55,1:54])
np.sum(TSsix)
import matplotlib.pyplot as plt
non_zero=[i for i in list(TSsix.ravel()) if i != 0]
plt.hist(non_zero,bins=100)
plt.ylabel("Frequency")
plt.xlabel("TSsw of the voxel in Kcal/mol")
plt.savefig("TSsix_bulk.png", dpi=400)
plt.show()
| EricChen521/small_mol_project_script | bulk_TSsix.py | bulk_TSsix.py | py | 1,109 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.chdir",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "gridData.Grid",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 47,... |
73944536672 | import neurals.data_generation_config as dgc
import neurals.distancefield_utils as df
import model.param as model_param
import copy
import numpy as np
import open3d as o3d
import os
from scipy.spatial.transform import Rotation as scipy_rot
import torch.utils.data
# Each file correspond to a pointcloud
class SmallDataset(torch.utils.data.dataset.Dataset):
def __init__(self, seed_folder="seeds",
point_clouds=["pose_0_pcd", "pose_1_pcd", "pose_2_pcd", "pose_3_pcd", "pose_4_pcd"],
has_negative_grasp=False,
use_scale = False):
self.point_clouds = []
self.point_normals = []
pose_index = []
for point_cloud in point_clouds:
pose_index.append(int(point_cloud[5:7]))
pcd = o3d.io.read_point_cloud(f"data/{seed_folder}/pointclouds/{point_cloud}.ply")
pcd_np = np.asarray(pcd.points)
normal_np = np.asarray(pcd.normals)
if pcd_np.shape[0] < 1024:
print(pcd_np.shape[0],point_cloud)
assert(False)
idx = np.random.choice(pcd_np.shape[0], 1024, replace=False)
self.point_clouds.append(pcd_np[idx])
self.point_normals.append(normal_np[idx])
self.scales = []
if os.path.isdir(f"data/{seed_folder}/scales") and use_scale:
for idx in pose_index:
scale = np.load(f"data/{seed_folder}/scales/pose_{idx:02}.npy")
self.scales.append(scale)
self.positive_grasps = []
self.positive_pcd_mapping = []
positive_grasp_files = os.listdir(f"data/{seed_folder}/grasps/")
positive_grasp_files.sort(key=lambda x: int(x[5:7]))
positive_files = []
for positive_grasp_file in positive_grasp_files:
if int(positive_grasp_file[5]) in pose_index:
positive_files.append(positive_grasp_file)
for i,positive_grasp_file in enumerate(positive_files):
self.positive_grasps.append(np.load(f"data/{seed_folder}/grasps/{positive_grasp_file}")[:,:,:3])
self.positive_pcd_mapping += [i] * len(self.positive_grasps[-1])
if has_negative_grasp:
self.negative_grasps = []
self.negative_pcd_mapping = []
negative_grasp_files = os.listdir(f"data/{seed_folder}/bad_grasps/")
negative_grasp_files.sort(key=lambda x: int(x[5:7]))
negative_files = []
for negative_grasp_file in negative_grasp_files:
if int(negative_grasp_file[5]) in pose_index:
negative_files.append(negative_grasp_file)
for i, negative_grasp_file in enumerate(negative_files):
self.negative_grasps.append(np.load(f"data/{seed_folder}/bad_grasps/{negative_grasp_file}")[:,:,:3])
self.negative_pcd_mapping += [i] * len(self.negative_grasps[-1])
if has_negative_grasp:
self.grasps = np.concatenate(self.positive_grasps+self.negative_grasps, axis=0)
self.pcd_mapping = np.array(self.positive_pcd_mapping+self.negative_pcd_mapping)
self.labels = np.array([1]*len(self.positive_pcd_mapping)+[0]*len(self.negative_pcd_mapping))
else:
self.grasps = np.concatenate(self.positive_grasps, axis=0)
self.pcd_mapping = np.array(self.positive_pcd_mapping)
self.labels = np.array([1]*len(self.grasps))
self.grasps = self.grasps.reshape(len(self.grasps),-1)
print(self.grasps.shape)
self.point_clouds = np.asarray(self.point_clouds)
def __len__(self):
return len(self.pcd_mapping)
def __getitem__(self, idx):
ans = {}
ans["point_cloud"] = self.point_clouds[self.pcd_mapping[idx]]
ans["point_normals"] = self.point_normals[self.pcd_mapping[idx]]
ans["fingertip_pos"] = self.grasps[idx]
ans["intrinsic_score"] = self.labels[idx] # Good or bad pointclouds
ans["label"] = self.pcd_mapping[idx]
if self.scales != []:
ans["scale"] = self.scales[self.pcd_mapping[idx]]
return ans
# Dataset for score function, add noise tensor in order to prevent overfitting
class ScoreDataset(torch.utils.data.dataset.Dataset):
"""
Remark: To avoid overfitting between each round, we need to recreate dataset after each
optimization iteration.
"""
def __init__(self, score_file="score_data", noise_scale = 0.0, has_distance_field=False):
self.noise_scale = noise_scale
self.has_distance_field = has_distance_field
data = np.load(f"data/score_function_data/{score_file}.npz")
self.scores = data["scores"]
self.point_clouds = data["point_clouds"] + np.random.normal(size=data["point_clouds"].shape,
scale=self.noise_scale)
self.point_clouds = data["point_clouds"]
self.conditions = data["conditions"]
if has_distance_field:
# Need to create different environments for computing distance field
self.point_cloud_labels = data["point_cloud_labels"]
self.envs = [env() for env in df.env_lists]
self.load_env_configs()
self.point_cloud_dfs = []
for i, pcd in enumerate(self.point_clouds):
self.point_cloud_dfs.append(self.compute_distance_field(pcd, int(self.point_cloud_labels[i])))
self.point_cloud_dfs = np.asarray(self.point_cloud_dfs)
def load_env_configs(self):
self.pcd_to_env = []
self.pcd_pos = []
self.pcd_rotation = []
filelist = os.listdir("data/seeds_scale/obj_pose")
filelist.sort(key = lambda x: int(x[5]))
for file in filelist:
z = np.load(f"data/seeds_scale/obj_pose/{file}")
self.pcd_to_env.append(int(z['env_id']))
self.pcd_pos.append(z['trans'])
self.pcd_rotation.append(z['rot'])
def __len__(self):
return len(self.scores)
def compute_distance_field(self, pcd, pcd_id):
"""
Assume pcd is a numpy array
"""
pcd_o3d = o3d.geometry.PointCloud(o3d.utility.Vector3dVector(pcd))
env_id = self.pcd_to_env[pcd_id]
dist_env = self.envs[env_id]
pcd_o3d.rotate(self.pcd_rotation[pcd_id])
pcd_o3d.translate(self.pcd_pos[pcd_id])
points = np.asarray(pcd_o3d.points)
dist_field = dist_env.get_points_distance(points)
return dist_field.numpy()
def __getitem__(self, idx):
ans = {}
ans["point_cloud"] = self.point_clouds[idx] # np.random.normal(size=self.point_clouds[idx].shape, scale=self.noise_scale)
ans["condition"] = self.conditions[idx]
ans["score"] = self.scores[idx]
if self.has_distance_field:
ans["point_cloud_df"] = self.point_cloud_dfs[idx]
return ans
| Ericcsr/synthesize_pregrasp | neurals/dataset.py | dataset.py | py | 6,968 | python | en | code | 8 | github-code | 1 | [
{
"api_name": "torch.utils.data.utils",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.data",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "open3d.io.read_point_cloud",
"line_number": 23,
"usage_type": "call"
},
{
"api_name"... |
36694454341 | from time import time
import pennylane as qml
from scipy.optimize import minimize
import networkx as nx
from .unitaries import (
hypercube_mixer,
phase_shift,
circulant_mixer,
diagonal_pauli_decompose,
complete_eigenvalues,
)
def gen_graph(qubits, seed, prob = 0.25):
# for smaller graphs, increase the probability of edge generation
#prob = prob if prob * (qubits**2) > 1 else 2.0/qubits**2
# ensure that there is at least one edge
n_edges = 0
s = seed
while n_edges == 0:
if type(s) is int:
G = nx.fast_gnp_random_graph(qubits, p=prob, seed=s, directed=False)
s += 1
else:
G = nx.fast_gnp_random_graph(qubits, p=prob, directed=False)
n_edges = G.number_of_edges()
return G
def maxcut_qualities(G, wires):
"""Return a Pauli-string representation of the maxcut cost-function.
Parameters
----------
G : networkx graph
graph for which the maxcut problem is to be solved
wires : list or range
number of wires (qubits), must cover the number of graph vertices
Returns
-------
Hamiltonian
Pauli-string representation of the maxcut cost-function.
"""
wires = list(wires)
C = 0
for i, j in G.edges:
C += qml.Identity(wires) - qml.PauliZ(wires=i) @ qml.PauliZ(wires=j)
return -0.5 * C
def qaoa_hypercube_maxcut_evolution(device, depth, n_expvals, seed):
"""State-evolution benchmark function for the QAOA with a hypercube (transverse-field) mixing operator.
State evolution based on Hamiltonian simulation.
Parameters
----------
device :
an initialised Pennylane device
depth : int
ansatz circuit depth
n_expvals : int
number of times to compute the state evolution and return the
expectation value with random variational parameters
Returns
-------
(float, float)
the last computed expectation value and the time taken to evaluate
n_expvals repeats of the circuit
"""
qubits = len(device.wires)
G = gen_graph(qubits, seed)
wires = range(qubits)
qualities_H = maxcut_qualities(G, wires)
@qml.qnode(device)
def circuit(gammas_ts):
for wire in wires:
qml.Hadamard(wires=wire)
for gamma, t in zip(*qml.numpy.split(gammas_ts, 2)):
phase_shift(gamma, wires, qualities_H)
hypercube_mixer(t, wires)
return qml.expval(qualities_H)
start = time()
rng = qml.numpy.random.default_rng(seed)
gammas_ts = rng.uniform(size=2 * depth, low=0, high=2 * qml.numpy.pi)
for _ in range(n_expvals):
expval = circuit(gammas_ts)
specs = qml.specs(circuit)
gate_sizes = specs(gammas_ts)['resources'].gate_sizes
end = time()
specs = qml.specs(circuit)
gate_sizes = specs(gammas_ts)['resources'].gate_sizes
circuit_depth = specs(gammas_ts)['resources'].depth
return float(expval), end - start, circuit_depth, gate_sizes[1], gate_sizes[2]
def qaoa_complete_maxcut_evolution(device, depth, n_expvals, seed):
"""State-evolution benchmark function for the QAOA with a complete-graph mixing operator.
State evolution based on the QFT.
Parameters
----------
device :
an initialised Pennylane device
depth : int
ansatz circuit depth
n_expvals : int
number of times to compute the state evolution and return the
expectation value with random variational parameters
Returns
-------
(float, float)
the last computed expectation value and the time taken to evaluate
n_expvals repeats of the circuit
"""
qubits = len(device.wires)
G = gen_graph(qubits, seed)
wires = range(qubits)
qualities_H = maxcut_qualities(G, wires)
eigen_decomp = diagonal_pauli_decompose(complete_eigenvalues(2**qubits))
@qml.qnode(device)
def circuit(gammas_ts):
for wire in wires:
qml.Hadamard(wires=wire)
for gamma, t in zip(*qml.numpy.split(gammas_ts, 2)):
phase_shift(gamma, wires, qualities_H)
circulant_mixer(t, wires, eigen_decomp)
return qml.expval(qualities_H)
start = time()
rng = qml.numpy.random.default_rng(seed)
gammas_ts = rng.uniform(size=2 * depth, low=0, high=2 * qml.numpy.pi)
for _ in range(n_expvals):
expval = circuit(gammas_ts)
specs = qml.specs(circuit)
gate_sizes = specs(gammas_ts)['resources'].gate_sizes
end = time()
specs = qml.specs(circuit)
gate_sizes = specs(gammas_ts)['resources'].gate_sizes
circuit_depth = specs(gammas_ts)['resources'].depth
return float(expval), end - start, circuit_depth, gate_sizes[1], gate_sizes[2]
def main_args():
import argparse
parser = argparse.ArgumentParser(description = "Run a test QAOA circuit with random variational parameters.", epilog = "Simulation output: last expectation value, qubits, depth, repeats, backend, graph sparsity, simulation time")
parser.add_argument("-q", "--qubits", default=4, type=int, dest='qubits', nargs='?', help = 'number of qubits, default = 4')
parser.add_argument("-d", "--depth", default=2, type=int, dest='depth', nargs='?', help = 'number of ansatz iterations, default = 2')
parser.add_argument("-r", "--repeats", default=1, type=int, dest='repeats', nargs='?', help = 'number of expectation value evaluations, default = 1')
parser.add_argument("-b", "--backend", default="lightning.qubit", type=str, dest='backend', nargs='?', help = 'simulation backend, default = lightning.qubit')
parser.add_argument("-o", "--options", default=[], dest='options', nargs = '*', help = 'backend-specific keyword options for device creation')
parser.add_argument("-s", "--seed", default=None, dest='seed', type = int, nargs = '?', help = 'seed random number generation')
parser.add_argument("-p", "--plot", default=False, dest='plot', action = 'store_true', help = 'plot the cicuit, do not simulate')
parser.add_argument("-g", "--graph", default=0.1, dest='graph', type=float, help = 'graph sparsity (0,1], default = 0.1')
parser.add_argument("-c", "--circuit", default=None, dest='circuit', type=str, help = 'if present, save the circuit to this path as a QASM file')
return parser.parse_args()
if __name__ == "__main__":
import sys
args = main_args()
if len(args.options) > 0:
device_code = f"qml.device('{args.backend}', wires = {args.qubits}, {' ,'.join(args.options)})"
device = eval(device_code)
else:
device = qml.device(args.backend, wires = args.qubits)
G = gen_graph(args.qubits, args.seed, args.graph)
wires = range(args.qubits)
qualities_H = maxcut_qualities(G, wires)
if not args.seed is None:
rng = qml.numpy.random.default_rng(args.seed)
else:
rng = qml.numpy.random.default_rng()
gammas_ts = rng.uniform(size = 2*args.depth)
@qml.qnode(device)
def circuit(gammas_ts):
for wire in wires:
qml.Hadamard(wires=wire)
for gamma, t in zip(*qml.numpy.split(gammas_ts, 2)):
phase_shift(gamma, wires, qualities_H)
hypercube_mixer(t, wires)
return qml.expval(qualities_H)
if args.plot:
drawer = qml.draw(circuit)
print(drawer(gammas_ts))
sys.exit(0)
start = time()
for _ in range(args.repeats):
expval = circuit(gammas_ts)
end = time()
if not args.circuit is None:
with open(args.circuit, 'w') as f:
f.write(circuit.qtape.to_openqasm())
print(f"{expval},{args.qubits},{args.depth},{args.repeats},{args.backend},{args.graph},{end - start}")
| John-J-Tanner/Quantum_Benchmarking | pennylane/benchmark/qaoa_maxcut.py | qaoa_maxcut.py | py | 7,745 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "networkx.fast_gnp_random_graph",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "networkx.fast_gnp_random_graph",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "pennylane.Identity",
"line_number": 49,
"usage_type": "call"
},
{
"... |
18379379442 | from concurrent.futures import ThreadPoolExecutor
from os.path import splitext
from PIL import Image, ImageFile
from tkinter.filedialog import *
import os.path
ImageFile.LOAD_TRUNCATED_IMAGES = True
files = askopenfilenames()
def convert_image(file):
with Image.open(file) as im:
output_path = splitext(file)[0] + '.jpg'
if not os.path.exists(output_path):
im = im.convert('RGB')
im.save(output_path, "JPEG", quality=100)
else:
print(f'File {output_path} already exists.')
with ThreadPoolExecutor() as executor:
executor.map(convert_image, files) | Genos-Noctua/Scripts | JPEG.py | JPEG.py | py | 636 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "PIL.ImageFile.LOAD_TRUNCATED_IMAGES",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "PIL.ImageFile",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "PIL.Image.open",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "P... |
6493966912 | from msrest.service_client import ServiceClient
from msrest import Configuration, Serializer, Deserializer
from .version import VERSION
from .operations.int_model_operations import IntModelOperations
from . import models
class AutoRestIntegerTestServiceConfiguration(Configuration):
"""Configuration for AutoRestIntegerTestService
Note that all parameters used to create this instance are saved as instance
attributes.
:param str base_url: Service URL
"""
def __init__(
self, base_url=None):
if not base_url:
base_url = 'http://localhost'
super(AutoRestIntegerTestServiceConfiguration, self).__init__(base_url)
self.add_user_agent('autorestintegertestservice/{}'.format(VERSION))
class AutoRestIntegerTestService(object):
"""Test Infrastructure for AutoRest
:ivar config: Configuration for client.
:vartype config: AutoRestIntegerTestServiceConfiguration
:ivar int_model: IntModel operations
:vartype int_model: fixtures.acceptancetestsbodyinteger.operations.IntModelOperations
:param str base_url: Service URL
"""
def __init__(
self, base_url=None):
self.config = AutoRestIntegerTestServiceConfiguration(base_url)
self._client = ServiceClient(None, self.config)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self.api_version = '1.0.0'
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self.int_model = IntModelOperations(
self._client, self.config, self._serialize, self._deserialize)
| testormoo/autorest.ansible | test/vanilla/Expected/AcceptanceTests/BodyInteger/fixtures/acceptancetestsbodyinteger/auto_rest_integer_test_service.py | auto_rest_integer_test_service.py | py | 1,664 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "msrest.Configuration",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "version.VERSION",
"line_number": 24,
"usage_type": "argument"
},
{
"api_name": "msrest.service_client.ServiceClient",
"line_number": 43,
"usage_type": "call"
},
{
"api_n... |
16166437404 | import mongoengine
ROOT_URLCONF = 'parkkeeper.urls'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
MIDDLEWARE_CLASSES = [
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
]
TEMPLATE_CONTEXT_PROCESSORS = [
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.request',
]
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'parkkeeper',
]
SECRET_KEY = "123"
TIME_ZONE = 'UTC'
USE_TZ = True
TEST_RUNNER = 'djutils.testrunner.TestRunnerWithMongo'
MONGODB = {
'NAME': 'parkkeeper',
'HOST': 'localhost',
}
mongoengine.connect(MONGODB['NAME'], tz_aware=True, host='mongodb://%s:27017/%s' % (MONGODB['HOST'], MONGODB['NAME']))
ZMQ_SERVER_ADDRESS = 'localhost'
ZMQ_WORKER_REGISTRATOR_PORT = 5548
ZMQ_EVENT_RECEIVER_PORT = 5549
ZMQ_EVENT_PUBLISHER_PORT = 5550
| telminov/django-park-keeper | test_settings.py | test_settings.py | py | 1,121 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "mongoengine.connect",
"line_number": 42,
"usage_type": "call"
}
] |
28410222567 | from typing import Optional
import unittest
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def __init__(self):
self.global_max = float("-inf")
def postOrderTraversal(self, root: Optional[TreeNode]):
if not root:
return 0
left_sum = max(self.postOrderTraversal(root.left), 0)
right_sum = max(self.postOrderTraversal(root.right), 0)
local_sum = left_sum + right_sum + root.val
self.global_max = max(local_sum, self.global_max)
return root.val + max(left_sum, right_sum)
def maxPathSum(self, root: TreeNode) -> int:
self.postOrderTraversal(root)
global_max = int(self.global_max)
self.global_max = float("-inf")
return global_max
class Test(unittest.TestCase):
def test(self):
# INITIAL TREE
# -10
# / \
# 9 20
# / \
# 7 15
tree = TreeNode(-10)
tree.left = TreeNode(9)
tree.right = TreeNode(20)
tree.right.right = TreeNode(7)
tree.right.left = TreeNode(15)
s = Solution()
self.assertEqual(s.maxPathSum(tree), 42)
if __name__ == "__main__":
unittest.main()
| teimurjan/leetcode | max-path-sum.py | max-path-sum.py | py | 1,353 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "typing.Optional",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "unittest.TestCase",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "unittest.main",
"line_number": 56,
"usage_type": "call"
}
] |
6668321784 | import math
from argparse import ArgumentParser
from datetime import timedelta as delta
import numpy as np
import pytest
from parcels import (
AdvectionEE,
AdvectionRK4,
AdvectionRK45,
FieldSet,
JITParticle,
ParticleSet,
ScipyParticle,
Variable,
timer,
)
ptype = {'scipy': ScipyParticle, 'jit': JITParticle}
method = {'RK4': AdvectionRK4, 'EE': AdvectionEE, 'RK45': AdvectionRK45}
def stommel_fieldset(xdim=200, ydim=200, grid_type='A'):
"""Simulate a periodic current along a western boundary, with significantly
larger velocities along the western edge than the rest of the region
The original test description can be found in: N. Fabbroni, 2009,
Numerical Simulation of Passive tracers dispersion in the sea,
Ph.D. dissertation, University of Bologna
http://amsdottorato.unibo.it/1733/1/Fabbroni_Nicoletta_Tesi.pdf
"""
a = b = 10000 * 1e3
scalefac = 0.05 # to scale for physically meaningful velocities
dx, dy = a / xdim, b / ydim
# Coordinates of the test fieldset (on A-grid in deg)
lon = np.linspace(0, a, xdim, dtype=np.float32)
lat = np.linspace(0, b, ydim, dtype=np.float32)
# Define arrays U (zonal), V (meridional) and P (sea surface height)
U = np.zeros((lat.size, lon.size), dtype=np.float32)
V = np.zeros((lat.size, lon.size), dtype=np.float32)
P = np.zeros((lat.size, lon.size), dtype=np.float32)
beta = 2e-11
r = 1/(11.6*86400)
es = r/(beta*a)
for j in range(lat.size):
for i in range(lon.size):
xi = lon[i] / a
yi = lat[j] / b
P[j, i] = (1 - math.exp(-xi / es) - xi) * math.pi * np.sin(math.pi * yi) * scalefac
if grid_type == 'A':
U[j, i] = -(1 - math.exp(-xi / es) - xi) * math.pi ** 2 * np.cos(math.pi * yi) * scalefac
V[j, i] = (math.exp(-xi / es) / es - 1) * math.pi * np.sin(math.pi * yi) * scalefac
if grid_type == 'C':
V[:, 1:] = (P[:, 1:] - P[:, 0:-1]) / dx * a
U[1:, :] = -(P[1:, :] - P[0:-1, :]) / dy * b
data = {'U': U, 'V': V, 'P': P}
dimensions = {'lon': lon, 'lat': lat}
fieldset = FieldSet.from_data(data, dimensions, mesh='flat')
if grid_type == 'C':
fieldset.U.interp_method = 'cgrid_velocity'
fieldset.V.interp_method = 'cgrid_velocity'
return fieldset
def UpdateP(particle, fieldset, time):
if time == 0:
particle.p_start = fieldset.P[time, particle.depth, particle.lat, particle.lon]
particle.p = fieldset.P[time, particle.depth, particle.lat, particle.lon]
def AgeP(particle, fieldset, time):
particle.age += particle.dt
if particle.age > fieldset.maxage:
particle.delete()
def simple_partition_function(coords, mpi_size=1):
"""A very simple partition function that assigns particles to processors (for MPI testing purposes))"""
return np.linspace(0, mpi_size, coords.shape[0], endpoint=False, dtype=np.int32)
def stommel_example(npart=1, mode='jit', verbose=False, method=AdvectionRK4, grid_type='A',
outfile="StommelParticle.zarr", repeatdt=None, maxage=None, write_fields=True,
custom_partition_function=False):
timer.fieldset = timer.Timer('FieldSet', parent=timer.stommel)
fieldset = stommel_fieldset(grid_type=grid_type)
if write_fields:
filename = 'stommel'
fieldset.write(filename)
timer.fieldset.stop()
# Determine particle class according to mode
timer.pset = timer.Timer('Pset', parent=timer.stommel)
timer.psetinit = timer.Timer('Pset_init', parent=timer.pset)
ParticleClass = JITParticle if mode == 'jit' else ScipyParticle
# Execute for 600 days, with 1-hour timesteps and 5-day output
runtime = delta(days=600)
dt = delta(hours=1)
outputdt = delta(days=5)
class MyParticle(ParticleClass):
p = Variable('p', dtype=np.float32, initial=0.)
p_start = Variable('p_start', dtype=np.float32, initial=0.)
next_dt = Variable('next_dt', dtype=np.float64, initial=dt.total_seconds())
age = Variable('age', dtype=np.float32, initial=0.)
if custom_partition_function:
pset = ParticleSet.from_line(fieldset, size=npart, pclass=MyParticle, repeatdt=repeatdt,
start=(10e3, 5000e3), finish=(100e3, 5000e3), time=0,
partition_function=simple_partition_function)
else:
pset = ParticleSet.from_line(fieldset, size=npart, pclass=MyParticle, repeatdt=repeatdt,
start=(10e3, 5000e3), finish=(100e3, 5000e3), time=0)
if verbose:
print(f"Initial particle positions:\n{pset}")
maxage = runtime.total_seconds() if maxage is None else maxage
fieldset.add_constant('maxage', maxage)
print("Stommel: Advecting %d particles for %s" % (npart, runtime))
timer.psetinit.stop()
timer.psetrun = timer.Timer('Pset_run', parent=timer.pset)
pset.execute(method + pset.Kernel(UpdateP) + pset.Kernel(AgeP), runtime=runtime, dt=dt,
output_file=pset.ParticleFile(name=outfile, outputdt=outputdt))
if verbose:
print(f"Final particle positions:\n{pset}")
timer.psetrun.stop()
timer.pset.stop()
return pset
@pytest.mark.parametrize('grid_type', ['A', 'C'])
@pytest.mark.parametrize('mode', ['jit', 'scipy'])
def test_stommel_fieldset(mode, grid_type, tmpdir):
timer.root = timer.Timer('Main')
timer.stommel = timer.Timer('Stommel', parent=timer.root)
outfile = tmpdir.join("StommelParticle")
psetRK4 = stommel_example(1, mode=mode, method=method['RK4'], grid_type=grid_type, outfile=outfile, write_fields=False)
psetRK45 = stommel_example(1, mode=mode, method=method['RK45'], grid_type=grid_type, outfile=outfile, write_fields=False)
assert np.allclose(psetRK4.lon, psetRK45.lon, rtol=1e-3)
assert np.allclose(psetRK4.lat, psetRK45.lat, rtol=1.1e-3)
err_adv = np.abs(psetRK4.p_start - psetRK4.p)
assert (err_adv <= 1.e-1).all()
err_smpl = np.array([abs(psetRK4.p[i] - psetRK4.fieldset.P[0., psetRK4.lon[i], psetRK4.lat[i], psetRK4.depth[i]]) for i in range(psetRK4.size)])
assert (err_smpl <= 1.e-1).all()
timer.stommel.stop()
timer.root.stop()
timer.root.print_tree()
def main(args=None):
timer.root = timer.Timer('Main')
timer.args = timer.Timer('Args', parent=timer.root)
p = ArgumentParser(description="""
Example of particle advection in the steady-state solution of the Stommel equation""")
p.add_argument('mode', choices=('scipy', 'jit'), nargs='?', default='jit',
help='Execution mode for performing computation')
p.add_argument('-p', '--particles', type=int, default=1,
help='Number of particles to advect')
p.add_argument('-v', '--verbose', action='store_true', default=False,
help='Print particle information before and after execution')
p.add_argument('-m', '--method', choices=('RK4', 'EE', 'RK45'), default='RK4',
help='Numerical method used for advection')
p.add_argument('-o', '--outfile', default='StommelParticle.zarr',
help='Name of output file')
p.add_argument('-r', '--repeatdt', default=None, type=int,
help='repeatdt of the ParticleSet')
p.add_argument('-a', '--maxage', default=None, type=int,
help='max age of the particles (after which particles are deleted)')
p.add_argument('-wf', '--write_fields', default=True,
help='Write the hydrodynamic fields to NetCDF')
p.add_argument('-cpf', '--custom_partition_function', default=False,
help='Use a custom partition_function (for MPI testing purposes)')
args = p.parse_args(args)
timer.args.stop()
timer.stommel = timer.Timer('Stommel', parent=timer.root)
stommel_example(args.particles, mode=args.mode, verbose=args.verbose, method=method[args.method],
outfile=args.outfile, repeatdt=args.repeatdt, maxage=args.maxage, write_fields=args.write_fields,
custom_partition_function=args.custom_partition_function)
timer.stommel.stop()
timer.root.stop()
timer.root.print_tree()
if __name__ == "__main__":
main()
| OceanParcels/parcels | docs/examples/example_stommel.py | example_stommel.py | py | 8,324 | python | en | code | 250 | github-code | 1 | [
{
"api_name": "parcels.ScipyParticle",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "parcels.JITParticle",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "parcels.AdvectionRK4",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "parcel... |
74579624992 | """empty message
Revision ID: 88f77742bc2d
Revises:
Create Date: 2017-07-03 13:15:12.714598
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '88f77742bc2d'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('redemption',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('coupon_id', sa.Integer(), nullable=True),
sa.Column('redemption_date', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['coupon_id'], ['coupon.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('redemption')
# ### end Alembic commands ###
| cokifigue/validateMe | migrations/versions/88f77742bc2d_.py | 88f77742bc2d_.py | py | 852 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "alembic.op.create_table",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integ... |
40270074978 | #!/usr/bin/python3
import os
import argparse
import time
import urllib.request
import email.mime.text
import socket
import getpass
import subprocess
def lookup_mac_vendor(mac_address):
result = ""
if mac_address:
for i in range(3):
vendor = None
try:
# Only first 3 octets are needed for manufacturer identification
# The rest is ommited for privacy reasons
response = urllib.request.urlopen("https://api.macvendors.com/" + mac_address[:8])
vendor = response.read().decode(response.info().get_param('charset') or 'utf-8')
except:
time.sleep(1)
if vendor:
result = vendor
break
return(result)
def send_mail(vendor):
msg = email.mime.text.MIMEText(
"IP: {ip}\n"
"Static: {is_static}\n"
"MAC: {mac}\n"
"Vendor: {vendor}\n"
"Host: {host}\n"
"Static host name: {static}".format(
ip=options.ip,
is_static="yes" if options.is_static == "1" else "no",
mac=options.mac,
vendor=vendor,
host=options.host_name,
static=options.static_host_name
)
)
msg['From'] = getpass.getuser() + "@" + socket.gethostname()
msg['To'] = options.mail_to
msg['Subject'] = "New DHCP lease for {} on {}".format(options.host_name, socket.gethostname())
proc = subprocess.Popen(["/usr/sbin/sendmail", "-t", "-oi"], stdin=subprocess.PIPE)
proc.communicate(bytes(msg.as_string(), "UTF-8"))
def forked_main():
vendor = lookup_mac_vendor(options.mac)
mac_is_known = False
if options.mac:
if os.path.isfile(options.known_macs_file):
with open(options.known_macs_file) as f:
for line in f:
if line.rstrip().upper() == options.mac.upper():
mac_is_known = True
if not mac_is_known:
send_mail(vendor)
if options.mac:
with open(options.known_macs_file, "a") as f:
f.write(options.mac + "\n")
parser = argparse.ArgumentParser(description="New DHCP lease event handler")
parser.add_argument("known_macs_file", help="Known MAC-address list file")
parser.add_argument("ip", nargs="?", default="", help="IP address")
parser.add_argument("mac", nargs="?", default="", help="MAC address")
parser.add_argument("host_name", nargs="?", default="", help="Name of the client")
parser.add_argument("static_host_name", nargs="?", default="", help="Matched static host name")
parser.add_argument("is_static", nargs="?", default="", help="IP is static (0 or 1)")
parser.add_argument('-n', '--no-fork', dest='no_fork', action='store_true',
default=False, help='Do not fork a child process')
parser.add_argument('-m', '--mail-to', dest='mail_to', default="root",
help='Mail message recipient (default: root)', metavar="mail")
options = parser.parse_args()
if options.no_fork:
print("--no-fork option is specified. Running in foreground")
print("IP: {}".format(options.ip))
print("MAC: {}".format(options.mac))
print("Mail to: {}".format(options.mail_to))
forked_main()
elif os.fork() == 0:
forked_main()
os._exit(0)
| cheretbe/notes | files/dhcp/on_dhcp_lease.py | on_dhcp_lease.py | py | 3,273 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "urllib.request.request.urlopen",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "urllib.request.request",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "urllib.request",
"line_number": 20,
"usage_type": "name"
},
{
"api_nam... |
11737051293 | from flask import jsonify, render_template
from app.device import bp, device
from app.models import DeviceModel, Project
@bp.route('/devices', methods=['GET', 'POST'])
def devices():
connected = device.get_devices()
registered = DeviceModel.query.all()
return render_template('device/devices.html', title='All devices',
connected=connected, registered=registered)
# @bp.route('/test_devices', methods=['GET', 'POST'])
# def test_devices():
# registered = Device.query.all()
# return jsonify(Device.serialize_list(registered))
@bp.route('/devices/check', methods=['GET', 'POST'])
def check():
return {}
| johndoe-dev/Ecodroid | app/device/routes.py | routes.py | py | 660 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "app.device.device.get_devices",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "app.device.device",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "app.models.DeviceModel.query.all",
"line_number": 9,
"usage_type": "call"
},
{
"api... |
552195300 | import pandas as pd
import tqdm
import numpy as np
import codecs
import glob
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import accuracy_score, f1_score
from sklearn.linear_model import LogisticRegression
import random
import matplotlib.pyplot as plt
import sys
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
random.seed(16)
np.random.seed(16)
print("Without l1!")
liar_train = codecs.open("data/liar_xtrain.txt", 'r', 'utf-8').read().split('\n')
liar_train = [s.lower() for s in liar_train if len(s) > 1]
liar_train_labels = codecs.open('data/liar_ytrain.txt', 'r', 'utf-8').read().split('\n')
liar_train_lab = [s for s in liar_train_labels if len(s) > 1]
liar_dev = codecs.open("data/liar_xval.txt", 'r', 'utf-8').read().split('\n')
liar_dev = [s.lower() for s in liar_dev if len(s) > 1]
liar_dev_labels = codecs.open("data/liar_yval.txt", 'r', 'utf-8').read().split('\n')
liar_dev_lab = [s for s in liar_dev_labels if len(s) > 1]
liar_test = codecs.open("data/liar_xtest.txt", 'r', 'utf-8').read().split('\n')
liar_test = [s.lower() for s in liar_test if len(s) > 1]
liar_test_labels = codecs.open("data/liar_ytest.txt", 'r', 'utf-8').read().split('\n')
liar_test_lab = [s for s in liar_test_labels if len(s) > 1]
assert len(liar_train) == len(liar_train_lab)
assert len(liar_dev) == len(liar_dev_lab)
assert len(liar_test) == len(liar_test_lab)
le = preprocessing.LabelEncoder()
#classes = ['pants-fire','false','barely-true','half-true','mostly-true','true']
#le.fit_transform(classes)
liar_train_lab = le.fit_transform(liar_train_lab)
liar_dev_lab = le.transform(liar_dev_lab)
liar_test_lab = le.transform(liar_test_lab)
print(le.classes_) #['barely-true' 'false' 'half-true' 'mostly-true' 'pants-fire' 'true']
print(le.transform(le.classes_)) # [0 1 2 3 4 5]
# untrue classes (to be encoded as 0): 4, 1, 0
# true classes (to be encoded as 1): 2, 3, 5
def binarize_label(labels):
labels_transformed = [1 if i in [2,3,5] else 0 for i in labels]
return labels_transformed
def label_switch(labels):
labels_transformed = [1 if i==0 else 0 for i in labels]
return labels_transformed
liar_train_lab = binarize_label(liar_train_lab)
liar_dev_lab = binarize_label(liar_dev_lab)
liar_test_lab = binarize_label(liar_test_lab)
# Kaggle data
data = codecs.open("data/kaggle_trainset.txt", 'r', 'utf-8').read().split('\n')
data = data[:20800]
data = [s.lower() for s in data]
labels = codecs.open("data/kaggle_train_labels.txt", 'r', 'utf-8').read().split('\n')
labels = labels[:20800]
labels = [int(i) for i in labels]
tr, te, trlab, telab = train_test_split(data, labels, test_size=0.33, random_state=42)
kaggle_train = tr
kaggle_train_lab = label_switch(trlab)
kaggle_test = te
kaggle_test_lab = label_switch(telab)
# FakeNewsCorpus data (part of it, 25000 samples in each class)
FNC_fake = codecs.open("data/fake_part1.txt", 'r', 'utf-8').read().split('\n')
FNC_fake = FNC_fake[:25000]
FNC_true = codecs.open("data/true_part1.txt", 'r', 'utf-8').read().split('\n')
FNC_true = FNC_fake[:25000]
FNC_fake_labels = np.zeros(len(FNC_fake))
FNC_true_labels = np.ones(len(FNC_true))
FNC_samples = np.concatenate((FNC_fake, FNC_true))
FNC_labels = np.concatenate((FNC_fake_labels, FNC_true_labels))
assert len(FNC_samples) == len(FNC_labels)
FNC_samples, FNC_labels = shuffle(FNC_samples, FNC_labels, random_state=42)
FNC_Xtrain, FNC_Xtest, FNC_ytrain, FNC_ytest = train_test_split(FNC_samples, FNC_labels, test_size=0.33, random_state=42)
# FakeNewsNet Politifact data
# real = codecs.open("data/RealNewsContent.txt", 'r', 'utf-8').read().split('\n')
# real = [s.lower() for s in real if len(s) > 1]
# fake = codecs.open("data/FakeNewsContent.txt", 'r', 'utf-8').read().split('\n')
# fake = [s.lower() for s in fake if len(s) > 1]
# real_labels = np.ones(len(real))
# fake_labels = np.zeros(len(fake))
# FNN_labels = np.concatenate((real_labels, fake_labels))
# FNN_texts = np.concatenate((real, fake))
# assert len(FNN_labels) == len(FNN_texts)
# FNN_X, FNN_y = shuffle(FNN_texts,FNN_labels, random_state=42)
m= 5000 #number of feats 5000 or 10000
k=5#max ngram
v=1 #min mgram
# Vectorizing
print("Vectorizing...")
liar_vectorizer = TfidfVectorizer(ngram_range=(v,k), max_features=m)
X_train_liar = liar_vectorizer.fit_transform(liar_train)
X_test_liar = liar_vectorizer.transform(liar_test)
X_dev_liar = liar_vectorizer.transform(liar_dev)
liar_feats = ['_'.join(s.split()) for s in liar_vectorizer.get_feature_names()]
kaggle_vectorizer = TfidfVectorizer(ngram_range=(v,k), max_features=m)
X_train_kaggle = kaggle_vectorizer.fit_transform(kaggle_train)
X_test_kaggle = kaggle_vectorizer.transform(kaggle_test)
kaggle_feats = ['_'.join(s.split()) for s in kaggle_vectorizer.get_feature_names()]
FNC_vectorizer = TfidfVectorizer(ngram_range=(v,k), max_features=m)
FNC_Xtrain_vect = FNC_vectorizer.fit_transform(FNC_Xtrain)
FNC_Xtest_vect = FNC_vectorizer.transform(FNC_Xtest)
FNC_feats = ['_'.join(s.split()) for s in FNC_vectorizer.get_feature_names()]
print("Vectorizing done")
print("Fitting classifiers...")
# Classifiers
clf_liar=None
clf_liar = LogisticRegression(random_state=16, solver='saga',C=np.inf, max_iter=10000).fit(X_train_liar,liar_train_lab)
liar_coefs = clf_liar.coef_
allcoefs_liar = pd.DataFrame.from_records(liar_coefs, columns=liar_feats) #add ngrams as colnames
allcoefs_liar.to_csv('NEW_liar_coefs_final.csv', sep='\t', index=False)
clf_kaggle=None
clf_kaggle = LogisticRegression(random_state=16, solver='saga',C=np.inf, max_iter=10000).fit(X_train_kaggle,kaggle_train_lab)
kaggle_coefs = clf_kaggle.coef_
allcoefs_kaggle = pd.DataFrame.from_records(kaggle_coefs, columns=kaggle_feats) #add ngrams as colnames
allcoefs_kaggle.to_csv('NEW_kaggle_coefs_final.csv', sep='\t', index=False)
clf_FNC=None
clf_FNC = LogisticRegression(random_state=16, solver='saga',C=np.inf, max_iter=10000).fit(FNC_Xtrain_vect, FNC_ytrain)
FNC_coefs = clf_FNC.coef_
allcoefs_FNC = pd.DataFrame.from_records(FNC_coefs, columns=FNC_feats)
allcoefs_FNC.to_csv("NEW_FakeNewsCorpus_coefs.csv", sep="\t", index=False)
# get coefs from FakeNewsNet while your at it
# FNN_vectorizer = TfidfVectorizer(ngram_range=(v,k), max_features=m)
# FNN_vect = FNN_vectorizer.fit_transform(FNN_X)
# FNN_feats = ['_'.join(s.split()) for s in FNN_vectorizer.get_feature_names()]
# clf_FNN=None
# clf_FNN = LogisticRegression(random_state=16, solver='saga', penalty='l1', max_iter=10000).fit(FNN_vect,FNN_y)
# FNN_coefs = clf_FNN.coef_
# all_coefs_FNN = pd.DataFrame.from_records(FNN_coefs, columns=FNN_feats)
# all_coefs_FNN.to_csv('FakeNewsNet_coefs_final.csv', sep='\t', index=False)
print("Predicting...")
# Predicting
preds_liar_test = clf_liar.predict(X_test_liar)
preds_liar_valid = clf_liar.predict(X_dev_liar)
preds_liar_train = clf_liar.predict(X_train_liar)
#
preds_kaggle_test = clf_kaggle.predict(X_test_kaggle)
preds_kaggle_tain = clf_kaggle.predict(X_train_kaggle)
preds_FNC_test = clf_FNC.predict(FNC_Xtest_vect)
preds_FNC_train = clf_FNC.predict(FNC_Xtrain_vect)
# cross-dataset predictions
kaggle_test_vectorized_by_liar = liar_vectorizer.transform(kaggle_test)
kaggle_test_predicted_by_liar_classifier = clf_liar.predict(kaggle_test_vectorized_by_liar)
#
kaggle_train_vectorized_by_liar = liar_vectorizer.transform(kaggle_train)
kaggle_train_predicted_by_liar = clf_liar.predict(kaggle_train_vectorized_by_liar)
FNC_test_vectorized_by_liar = liar_vectorizer.transform(FNC_Xtest)
FNC_test_predicted_by_liar = clf_liar.predict(FNC_test_vectorized_by_liar)
FNC_test_vectorized_by_kaggle = kaggle_vectorizer.transform(FNC_Xtest)
FNC_test_predicted_by_kaggle = clf_kaggle.predict(FNC_test_vectorized_by_kaggle)
kaggle_test_vectorized_by_FNC = FNC_vectorizer.transform(kaggle_test)
kaggle_test_predicted_by_FNC = clf_FNC.predict(kaggle_test_vectorized_by_FNC)
liar_test_vectorized_by_FNC = FNC_vectorizer.transform(liar_test)
liar_test_predicted_by_FNC = clf_FNC.predict(liar_test_vectorized_by_FNC)
# using other models to predict on FakeNewsNet
# FNN_vectorized_by_liar = liar_vectorizer.transform(FNN_X)
# FNN_predicted_by_liar = clf_liar.predict(FNN_vectorized_by_liar)
#
# FNN_vectorized_by_kaggle = kaggle_vectorizer.transform(FNN_X)
# FNN_predicted_by_kaggle = clf_kaggle.predict(FNN_vectorized_by_kaggle)
def print_scores(y, y_hat, string):
print(string)
print("binary F1", f1_score(y, y_hat))
print("micro f1:",f1_score(y, y_hat, average='micro'))
print("macro f1:", f1_score(y, y_hat, average='macro'))
print("weighted F1", f1_score(y, y_hat, average='weighted'))
print("accuracy", accuracy_score(y, y_hat))
print()
print_scores(liar_test_lab, preds_liar_test, "Liar Test Scores")
print_scores(liar_dev_lab, preds_liar_valid, "Liar Valid. Scores")
print_scores(liar_train_lab, preds_liar_train, "Liar Train Scores")
#
print_scores(kaggle_test_lab, preds_kaggle_test, "Kaggle Test Scores")
print_scores(kaggle_train_lab, preds_kaggle_tain, "Kaggle Train Scores")
#
print_scores(kaggle_test_lab, kaggle_test_predicted_by_liar_classifier, "Kaggle Test Set Predicted by Classifier Trained on Liar")
print_scores(kaggle_train_lab, kaggle_train_predicted_by_liar, "Kaggle Train Set Predicted by Classifier Trained on Liar")
print_scores(FNC_ytest, preds_FNC_test, "FakeNewsCorpus test prediction scores")
print_scores(FNC_ytrain, preds_FNC_train, "FakeNewsCorpus train prediction scores")
print_scores(FNC_ytest, FNC_test_predicted_by_liar, "FakeNewsCorpus test set predicted by Liar classifier")
print_scores(FNC_ytest, FNC_test_predicted_by_kaggle, "FakeNewsCorpus test set predicted by kaggle classifier")
print_scores(kaggle_test_lab, kaggle_test_predicted_by_FNC, "Kaggle Test Set Predicted by Classifier Trained on FakeNewsCorpus")
print_scores(liar_test_lab, liar_test_predicted_by_FNC, "Liar Test Set Predicted by Classifier Trained on FakeNewsCorpus")
# print_scores(FNN_y, FNN_predicted_by_liar, "FakeNewsNet Predicted by Classifier Trained on Liar")
# print_scores(FNN_y, FNN_predicted_by_kaggle, "FakeNewsNet Predicted by Classifier Trained on Kaggle")
# random liar test set
# random_liar_labels = liar_test_lab.copy()
# np.random.shuffle(random_liar_labels)
# print_scores(random_liar_labels,preds_liar_test, "Scores between predictions on Liar test set and randomized 'true' test labels. Classifier trained on Liar trains set")
print("Done")
| terne/thesis | logreg_experiments/old/all_data_logistic_regression.py | all_data_logistic_regression.py | py | 10,456 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "random.seed",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.random.seed",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "codecs.open",
"li... |
44976337574 | # -*- codeing = utf-8 -*-
# @Time : 2021/5/9 21:41
# @File :校内赛识别图形最终版本.py
# @Software : PyCharm
import cv2 as cv
def detectShape(img):
# 查找轮廓,cv2.RETR_ExTERNAL=获取外部轮廓点, CHAIN_APPROX_NONE = 得到所有的像素点
contours, hierarchy = cv.findContours(img, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_NONE)
# 循环轮廓,判断每一个形状
global count
global objectType
count = 0
objectType = None
for cnt in contours:
# 获取轮廓面积
area = cv.contourArea(cnt)
# 当面积大于1500,代表有形状存在
if area > 1500:
# 绘制所有的轮廓并显示出来
cv.drawContours(imgContour, cnt, -1, (255, 0, 0), 3)
# 计算所有轮廓的周长,便于做多边形拟合
peri = cv.arcLength(cnt, True)
# 多边形拟合,获取每个形状的边
approx = cv.approxPolyDP(cnt, 0.02 * peri, True)
objCor = len(approx)
# 获取每个形状的x,y,w,h
x, y, w, h = cv.boundingRect(approx)
# 计算出边界后,即边数代表形状,如三角形边数=3
if objCor == 3:
objectType = "Triangle"
print("形状是%s,面积是%d" % (objectType, area))
count += 1
elif objCor == 4:
objectType = "Rectangle"
print("形状是%s,面积是%d" % (objectType, area))
count += 1
# 大于4个边的就是五边形
elif 5 <= objCor < 7:
objectType = "Pentagon"
print("形状是%s,面积是%d" % (objectType, area))
count += 1
elif objCor == 8:
objectType = "Circle"
print("形状是%s,面积是%d" % (objectType, area))
count += 1
elif objCor > 8:
objectType = "Star"
print("形状是%s,面积是%d" % (objectType, area))
count += 1
# 绘制文本时需要绘制在图形附件
cv.rectangle(imgContour, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv.putText(imgContour, objectType,
(x + (w // 2) - 10, y + (h // 2) - 10), cv.FONT_HERSHEY_COMPLEX, 0.7,
(0, 0, 0), 2)
# 调用笔记本内置摄像头,所以参数为0,官方摄像头为1
cap = cv.VideoCapture(0, cv.CAP_DSHOW)
while True:
# 从摄像头读取图片
success, img = cap.read()
# 转换大小
img = cv.resize(img, (1000, 700))
# 灰度化
imgGray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
# 高斯平滑
imgBlur = cv.GaussianBlur(imgGray, (7, 7), 1)
# 边缘检测
imgCanny = cv.Canny(imgBlur, 200, 200)
# 膨胀
kernel = cv.getStructuringElement(cv.MORPH_RECT, (2, 2))
swell = cv.dilate(imgCanny, kernel=kernel)
# 调用检测函数
imgContour = img.copy()
detectShape(swell)
# 绘制文本
cv.putText(imgContour, "%s,%d" % (objectType, count), (10, 50), cv.FONT_HERSHEY_PLAIN, 2.0,
(0, 0, 0), 2)
# 若参数delay≤0:表示一直等待按键;
# 若delay取正整数:表示等待按键的时间,比如cv2.waitKey(100),就是等待100毫秒
k = cv.waitKey(100)
# 保持画面的持续。
cv.imshow("img", imgContour)
if k == 27:
# 通过esc键退出摄像
cv.destroyAllWindows()
break
# 关闭摄像头
cap.release()
cv.destroyAllWindows()
| ChangYu-beginner/The-visual-program | 校内赛识别图形最终版本.py | 校内赛识别图形最终版本.py | py | 3,663 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "cv2.findContours",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "cv2.RETR_EXTERNAL",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "cv2.CHAIN_APPROX_NONE",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "cv... |
27031089250 | from win10toast import ToastNotifier
import feedparser
import time
toaster = ToastNotifier()
def getNews():
url = "https://www.youm7.com/rss/SectionRss?SectionID=203" #url rss
feed = feedparser.parse("https://www.youm7.com/rss/SectionRss?SectionID=203") #Get all feed from rss
for item in feed["entries"]:
toaster.show_toast("اليوم السابع",item["title"],duration=100)
while toaster.notification_active():
time.sleep(1000)
if __name__ == '__main__':
getNews()
| peterramsis/RssNotify | main.py | main.py | py | 514 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "win10toast.ToastNotifier",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "feedparser.parse",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 15,
"usage_type": "call"
}
] |
38033746083 | import numpy as np
import pandas as pd
import pickle
from flask import Flask ,app ,request ,jsonify ,render_template ,url_for
app=Flask(__name__)
reg_model=pickle.load(open('regm.pkl','rb')) #load regression model
scaler=pickle.load(open('scale.pkl','rb')) #load scaling model
@app.route('/')
def home():
return render_template('home.html') #redirect to home.html
@app.route('/predict_api',methods=['POST']) #api with post request becuase we take input then generate output using it
def predict1():
data=request.json['data']
print(data) #data is dictionory with elements in key value pair
print(np.array(list(data.values())).reshape(1,-1)) #convert all inputs in a single file of data to add in prediction model
transformed_data=scaler.transform(np.array(list(data.values())).reshape(1,-1))
output=reg_model.predict(transformed_data)
print(output[0]) #output recived is an 2d array
return jsonify(output[0]) #return jsonifoed output
@app.route('/predict',methods=['POST'])
def predict():
input=[float(x) for x in request.form.values()] #Get input from html form
transformed_input=scaler.transform(np.array(input).reshape(1,-1))
output=reg_model.predict(transformed_input)[0]
return render_template("home.html",prediction_text="Predicted House Price is {}".format(output))
if __name__=="__main__":
app.run(debug=True)
| sanchayvashist/boston_house | app.py | app.py | py | 1,532 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.app",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "flask.Flask",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 7,
... |
6712255038 | import curses
import math
import sys
import logging as log
from pprint import pformat
class Menu:
DIRECTION_UP = 'UP'
DIRECTION_DOWN = 'DOWN'
def __init__(self, layout, border=True):
self.win = curses.newwin(layout[0],
layout[1],
layout[2],
layout[3])
self.update()
self.storage = []
self.storage_event = []
self.border = border
# NOTE: Default setting
self.selectable = False
self.page_number = 1
self.page_index = 0
self.selected_index = 0
self.draw_border()
self.user_list = None
self.buffered_line_count = 0
# self.win.bkgd(' ', curses.color_pair(3))
def set_user_list(self, user_list):
self.user_list = user_list
def update(self):
self.win.refresh()
def set_storage(self,data, index=None):
if index:
self.storage[index] = data
else:
self.storage = data
def append_storage(self, data):
self.storage.extend(data)
def append_storage_event(self, events):
self.storage_event.extend(events)
def prepend_storage_event(self, events):
self.storage_event = events + self.storage_event
def prepend_storage(self, data):
self.storage = data + self.storage
def number_of_drawable_rows(self):
return self.win.getmaxyx()[0]
def clear(self):
self.storage = []
self.storage_event = []
self.page_number = 1
self.page_index = 0
self.selected_index = 0
self.buffered_line_count = 0
self.win.clear()
def draw_border(self):
if self.border:
self.win.border(0)
def draw(self):
self.win.clear()
self.draw_border()
self.write_storage()
self.update()
# TODO: Remove this at some point
def write_clear(self):
loc = self.draw_location()
lines = self.page_length()
blank_line = ' ' * self.draw_bounds()[1]
for i in range(0, lines):
draw_y = loc[0] + i
draw_x = loc[1]
if self.draw_inbounds(draw_y, draw_x):
# TODO: Fix with try cache, because addstr \n places cursor, bottom right corner
self.win.addstr(draw_y, draw_x, blank_line)
def write_storage(self):
loc = self.draw_location()
lines = self.read_storage()
local_selected = self.local_selected_index(self.selected_index);
for i in range(0, len(lines)):
draw_y = loc[0] + i
draw_x = loc[1]
line_to_draw = lines[i]
line_width = self.draw_line_width()
if len(line_to_draw) > line_width:
line_to_draw = line_to_draw[0:line_width]
if self.draw_inbounds(draw_y, draw_x):
# TODO: Fix with try cache, because addstr \n places cursor, bottom right corner
if i == local_selected and self.selectable:
self.win.addstr(draw_y, draw_x, line_to_draw, curses.color_pair(1))
else:
self.win.addstr(draw_y, draw_x, line_to_draw)
def local_selected_index(self, index):
return abs(self.page_length() - ((self.page_length() * self.page_number)-self.selected_index))
def read_storage(self):
if self.page_number <= self.page_count():
read = self.page_length() * self.page_number
output = self.storage[self.page_index:read]
return output
return []
def draw_location(self):
if self.border:
return (1,1)
return (0,0)
def draw_bounds(self):
if self.border:
y,x = self.win.getmaxyx()
return (y - 2, x - 2)
return self.win.getmaxyx()
def draw_inbounds(self, y, x):
b_y, b_x = self.draw_bounds()
if y > b_y:
return False
if x > b_x:
return False
if y < 0:
return False
if x < 0:
return False
return True
def draw_line_width(self):
return self.draw_bounds()[1]
def page_count(self):
return math.ceil(len(self.storage)/self.draw_bounds()[0])
def compute_page_count(self, lines):
return math.ceil(len(lines)/self.draw_bounds()[0])
def page_length(self):
return self.draw_bounds()[0]
def select_event(self):
try :
event = self.storage_event[self.selected_index]
return event[0](event[1])
except Exception as e:
log.error(e)
log.error('No Event Action off Select(enter)')
def select_down(self):
if self.selected_index < len(self.storage) - 1:
self.selected_index = self.selected_index + 1
if self.selected_index >= self.page_length() * self.page_number:
self.scroll_down()
def select_up(self):
if self.selected_index > 0:
self.selected_index = self.selected_index - 1
if self.selected_index < self.page_length() * (self.page_number - 1):
self.scroll_up()
def select(self, direction):
if direction == self.DIRECTION_UP:
self.select_up()
elif direction == self.DIRECTION_DOWN:
self.select_down()
self.write_clear()
self.write_storage()
self.update()
def scroll_down(self):
if self.page_number < self.page_count():
self.page_index = self.page_index + self.page_length()
self.selected_index = self.page_index
self.page_number = self.page_number + 1
def scroll_up(self):
if self.page_number > 1:
self.page_index = self.page_index - self.page_length()
self.selected_index = self.page_index + self.page_length() - 1
self.page_number = self.page_number - 1
def scroll(self, direction):
if direction == self.DIRECTION_UP:
self.scroll_up()
elif direction == self.DIRECTION_DOWN:
self.scroll_down()
self.draw()
def scroll_to_bottom(self):
self.page_index = self.page_length()
self.selected_index = self.page_index
self.page_number = self.page_count()
self.draw()
def focus(self):
self.selectable = True
self.draw()
def defocus(self):
self.selectable = False
self.draw()
def eat_key(self, c):
if c == ord('j'):
self.select('DOWN')
pass
elif c == ord('k'):
self.select('UP')
pass
elif c == 10:
self.select_event()
pass
def buffer_lines(self, lines):
number_of_pages = self.compute_page_count(lines)
total_lines = number_of_pages * self.page_length()
total_buffer = total_lines - len(lines)
buffer = [' '] * total_buffer
lines = buffer + lines
return lines
def scroll_to_last_page(self):
self.page_index = self.page_length() * (self.page_count() - 1)
self.selected_index = self.page_index
self.page_number = self.page_count()
def fake_window_scroll(self):
self.clear_previous_scroll()
page_length = self.page_length()
line_count_in_window = len(self.read_storage())
[self.prepend_storage([' ' * self.draw_line_width()]) for _ in range(page_length - line_count_in_window)]
self.buffered_line_count = page_length - line_count_in_window
def clear_previous_scroll(self):
self.storage = self.storage[self.buffered_line_count:]
self.buffered_line_count = 0
| nadr0/branch-delete | menu.py | menu.py | py | 7,999 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "curses.newwin",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "curses.color_pair",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "math.ceil",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "math.ceil",
"line_num... |
10645242083 | import tushare as ts
import numpy as np
import matplotlib.pyplot as plt
import random
import math
import datetime
import tushare as ts
import pymysql
import stock_one
# p = stock_one.oneday('sh.600123', '20150102', '20210218')
# print(p)
# exit()
starttime = '20170102'
endtime = '20210218'
all = []
a = ['SH.600206', datetime.date(2017, 1, 3), 10.0739, 496, datetime.date(2021, 2, 10), 11.33, 12.460511999999987, 236.9613760000003]
b = ['SH.600206', datetime.date(2017, 1, 3), 10.0739, 496, datetime.date(2021, 2, 10), 11.33, 12.460511999999987, 236.9613760000003]
all.append(a)
all.append(b)
x = []
y = []
db = pymysql.connect(host='127.0.0.1', user='root', passwd='', db='tushare', charset='utf8')
cursor = db.cursor()
try:
sql = "select * from stock_basic order by ts_code;"
cursor.execute(sql)
stockbasic = np.array(cursor.fetchall())
for i in range(stockbasic.shape[0]):
isgo = 0
#print(stockbasic[i,:][0], stockbasic[i,:][1])
if int(stockbasic[i,:][1]) >= 600000 and int(stockbasic[i,:][1]) < 600589: #600589:
isgo = 1
if int(stockbasic[i,:][1]) >= 1 and int(stockbasic[i,:][1]) < 898:
isgo = 1
# if int(stockbasic[i,:][1]) == 600160:
# isgo = 1
if isgo == 0:
continue
realcode = stockbasic[i,:][0][7]+stockbasic[i,:][0][8]+'.'+stockbasic[i,:][1]
print(i, realcode)
p = stock_one.oneday(realcode, starttime, endtime)
print(p)
all.append(p)
except Exception as err:
print(err)
cursor.close()
db.close()
all = np.array(all)
v1 = np.sort(all[:,6])
v2 = np.sort(all[:,7])
x = np.arange(all.shape[0])
# out_list = np.array([list(item) for item in zip(x,y)])
# print(out_list)
# out_list = out_list[np.lexsort(out_list.T)]
# print(out_list)
np.savez('all_data', all)
plt.title("ALL")
plt.xlabel("x")
plt.ylabel("price")
plt.ylim([-100,300])
plt.plot(x,v1,linestyle='--',color='red')
plt.plot(x,v2,linestyle='--',color='green')
plt.show()
| moon142857/TradeDoor | stock_all.py | stock_all.py | py | 2,046 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "datetime.date",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pymysql.connect",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_n... |
3890081887 | """
Created on Fri Nov 8 23:23:22 2019
@author: Pratiksha
"""
# app.py for Scara Web App
from flask import Flask, render_template, request
from data import Articles
import cv2
from pyzbar import pyzbar
app = Flask(__name__)
Articles = Articles()
S = []
@app.route('/')
def index():
return render_template('home.html')
@app.route('/about')
def about():
return render_template('about.html')
@app.route('/articles')
def articles():
return render_template('articles.html', articles = Articles)
@app.route("/submit", methods=['GET', 'POST'])
def submit():
if request.method == 'POST':
f = request.files['file']
f.save(f.filename)
image = cv2.imread(f.filename)
barcodes = pyzbar.decode(image)
data = ""
for barcode in barcodes:
# extract the bounding box location of the barcode and draw the
# bounding box surrounding the barcode on the image
(x, y, w, h) = barcode.rect
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255), 2)
# the barcode data is a bytes object so if we want to draw it on
# our output image we need to convert it to a string first
data = barcode.data.decode("utf-8")
for i in range(len(Articles)):
if data==Articles[i]['id']:
S.append([Articles[i]['title'],Articles[i]['mrp']])
return render_template('bill.html', data=S)
if __name__ == '__main__':
app.run(debug = True)
| PratikshaJain37/Scara-Hack36 | app.py | app.py | py | 1,492 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "data.Articles",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "flask.render_template... |
35131163454 | '''
The following codes are from https://github.com/d-li14/mobilenetv2.pytorch
Some helper functions for PyTorch, including:
- get_mean_and_std: calculate the mean and std value of dataset.
- msr_init: net parameter initialization.
- progress_bar: progress bar mimic xlua.progress.
'''
import errno
import os
import torch
from torch import nn
from torch.nn import init
__all__ = ['get_mean_and_std', 'init_params', 'mkdir_p', 'AverageMeter']
def get_mean_and_std(dataset):
'''
Compute the mean and std value of dataset.
:param dataset: input data
:return: (mean, std): mean value of dataset, and standard deviation value of dataset
'''
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, num_workers=2)
mean = torch.zeros(3)
std = torch.zeros(3)
print('==> Computing mean and std..')
for inputs, _ in dataloader:
for i in range(3):
mean[i] += inputs[:,i,:,:].mean()
std[i] += inputs[:,i,:,:].std()
mean.div_(len(dataset))
std.div_(len(dataset))
return mean, std
def init_params(net):
'''
Init layer parameters.
:param net: input network to be initialized
'''
for module in net.modules():
if isinstance(module, nn.Conv2d):
init.kaiming_normal(module.weight, mode='fan_out')
if module.bias:
init.constant(module.bias, 0)
elif isinstance(module, nn.BatchNorm2d):
init.constant(module.weight, 1)
init.constant(module.bias, 0)
elif isinstance(module, nn.Linear):
init.normal(module.weight, std=1e-3)
if module.bias:
init.constant(module.bias, 0)
def mkdir_p(path):
'''
make dir if not exist
:param path: directory path we make
'''
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
class AverageMeter:
"""
Computes and stores the average and current value
Imported from https://github.com/pytorch/examples/blob/master/imagenet/main.py#L247-L262
"""
def __init__(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def reset(self):
""" reset function """
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, num=1):
"""
update function
:param val: input current average value
:param n: number of items for val
"""
self.val = val
self.sum += val * num
self.count += num
self.avg = self.sum / self.count
| snudm-starlab/FALCON2 | src/imagenetutils/misc.py | misc.py | py | 2,761 | python | en | code | 40 | github-code | 1 | [
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "torch.utils",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "torch.zeros",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "torch.zeros"... |
10155952607 | import time
import argparse
import json
import tqdm
import torch
import numpy as np
import torch.nn.functional as F
from sklearn.metrics import roc_auc_score, f1_score
from model.model import Model
from model.data import get_data_frames, create_graph, create_hetero_graph, create_data_loaders
def train(model, optimizer, criterion, data_loader, device, run, epochs, eval_steps):
train_loss, valid_scores, test_scores = [], [], []
start_time = time.time()
for epoch in range(1, epochs + 1):
model.train()
total_loss = total_examples = 0
for batch_data in tqdm.tqdm(data_loader['train'], disable=True):
optimizer.zero_grad()
batch_data = batch_data.to(device)
pred = model(batch_data)
ground_truth = batch_data['user', 'item'].edge_label
loss = criterion(pred, ground_truth)
loss.backward()
optimizer.step()
total_loss += float(loss) * pred.numel()
total_examples += pred.numel()
epoch_loss = total_loss / total_examples
train_loss.append(epoch_loss)
if epoch % eval_steps == 0:
print(f"\nEpoch: {epoch:03d}, Train Loss: {epoch_loss:.4f}")
val_auc_score, val_f1_score = test(model, data_loader['val'], device)
test_auc_score, test_f1_score = test(model, data_loader['test'], device)
valid_scores.append((val_auc_score, val_f1_score))
test_scores.append((test_auc_score, test_f1_score))
spent_time = time.time() - start_time
eval_results = (f'Run: {run + 1:02d}, '
f'Epoch: {epoch:02d}, '
f'Train Loss: {epoch_loss:.4f}, '
f'Valid AUC: {100 * val_auc_score:.2f}%, '
f'Valid F1: {100 * val_f1_score:.2f}%, '
f'Test AUC: {100 * test_auc_score:.2f}%, '
f'Test F1: {100 * test_f1_score:.2f}%')
print(eval_results)
print(f'---\nTraining Time Per Epoch: {spent_time / eval_steps: .4f} s\n---')
start_time = time.time()
return train_loss, valid_scores, test_scores
def test(model, data_loader, device):
model.eval()
predictions, ground_truths = [], []
for batch_data in tqdm.tqdm(data_loader, disable=True):
with torch.no_grad():
batch_data = batch_data.to(device)
predictions.append(model(batch_data))
ground_truths.append(batch_data['user', 'item'].edge_label)
predictions = torch.cat(predictions, dim=0).cpu().numpy()
ground_truths = torch.cat(ground_truths, dim=0).cpu().numpy()
out_sigmoid = torch.Tensor(predictions).sigmoid().cpu().numpy()
out_probabilities = np.rint(out_sigmoid)
auc_sc = roc_auc_score(ground_truths, out_sigmoid)
f1_sc = f1_score(ground_truths, out_probabilities)
return auc_sc, f1_sc
def main():
parser = argparse.ArgumentParser(description='News Recommender GNN Model')
parser.add_argument("--data", default="cit_pt", choices=["adressa_no","adressa_tr",
"cit_pt","cit_tr","cit_en",
"cit_pten","cit_tren",
"mind"],
help = "default: %(default)s")
parser.add_argument('--plm', default="ptbert", choices=["mbert","enbert","nbbert",
"ptbert","xlm","gpt2"],
help = "plm model for item nodes, default: %(default)s")
parser.add_argument('--use_seperate_test_data', action='store_true')
parser.add_argument('--num_user_features', type=int, default=32,
help = "-1 for don't use user_features, default: %(default)i")
parser.add_argument('--num_item_features', type=int, default=768,
help = "-1 for don't use item_features, default: %(default)i")
parser.add_argument('--user_input_layer', default="lin", choices=["emb","lin"],
help = "input layer for user nodes, default: %(default)s")
parser.add_argument('--item_input_layer', default="lin", choices=["emb","lin","emb+lin"],
help = "input layer for item nodes, default: %(default)s")
parser.add_argument('--input_hidden_channels', type=int, default=64,
help = "default: %(default)i")
parser.add_argument('--encoder', default="SAGE", choices=["SAGE","GAT","GCN"],
help = "default: %(default)s")
parser.add_argument('--predictor', default="DOT", choices=["MLP","MLPDOT","DOT"],
help = "default: %(default)s")
parser.add_argument('--encoder_layers', type=int, default=2,
help = "default: %(default)i")
parser.add_argument('--predictor_layers', type=int, default=0,
help = "0 for DOT predictor, default: %(default)i")
parser.add_argument('--encoder_hidden_channels', type=int, default=64,
help = "default: %(default)i")
parser.add_argument('--predictor_hidden_channels', type=int, default=32,
help = "default: %(default)i")
parser.add_argument('--dropout', type=float, default=0.0,
help = "default: %(default)f")
parser.add_argument('--batch_size', type=int, default=1024,
help = "default: %(default)i")
parser.add_argument('--lr', type=float, default=0.001,
help = "default: %(default)f")
parser.add_argument('--epochs', type=int, default=-1,
help = "-1 to use model default value, default: %(default)i")
parser.add_argument('--eval_steps', type=int, default=1,
help = "number of epochs at which logs printed, default: %(default)i")
parser.add_argument('--device', type=int, default=0,
help = "default: %(default)i")
parser.add_argument('--runs', type=int, default=1,
help = "default: %(default)i")
args = parser.parse_args()
print(str(args))
# device setup
device = f'cuda:{args.device}' if torch.cuda.is_available() else 'cpu'
device = torch.device(device)
print(f"Device: {device}")
# get data
with open('datasets/info.json', 'r') as f:
datasets_info = json.load(f)
users_csv = datasets_info[args.data]['users_csv']
items_csv = datasets_info[args.data]['items_csv']
user_naming = datasets_info[args.data]['user_naming']
item_naming = datasets_info[args.data]['item_naming']
feats_file = datasets_info[args.data]['plm'][args.plm]
epochs = datasets_info[args.data]['epochs'] if args.epochs == -1 else args.epochs
print(f'Loading user and item data frames for: {args.data}')
df_users, df_items = get_data_frames(users_csv, items_csv, user_naming, item_naming)
item_feature_tensor = torch.load(feats_file, map_location=device)
user_features_init = '' if args.num_user_features == -1 else 'zero'
disjoint_train_ratio = 0.3
neg_sampling_ratio = 1.0
num_neighbors = [10, 5]
if args.use_seperate_test_data:
print(f'Doing seperate user train and test data for {args.data}')
users_train_csv = datasets_info[args.data]['users_train_csv']
users_test_csv = datasets_info[args.data]['users_test_csv']
print(f'Train:')
df_users_train, _ = get_data_frames(users_train_csv, items_csv, user_naming, item_naming)
train_edges_coo = create_graph(df_users_train, df_items, user_naming, item_naming)
train_data = create_hetero_graph(edges_coo=train_edges_coo,
user_features_init=user_features_init,
user_feature_size=args.num_user_features,
item_feature_tensor=item_feature_tensor)
print(f'Test:')
df_users_test, _ = get_data_frames(users_test_csv, items_csv, user_naming, item_naming)
test_edges_coo = create_graph(df_users_test, df_items, user_naming, item_naming)
test_data = create_hetero_graph(edges_coo=test_edges_coo,
user_features_init=user_features_init,
user_feature_size=args.num_user_features,
item_feature_tensor=item_feature_tensor)
val_ratio, test_ratio = 0.1, 0.1
train_loader, val_loader, _ = create_data_loaders(data=train_data, val_ratio=val_ratio, test_ratio=test_ratio,
disjoint_train_ratio=disjoint_train_ratio,
num_neighbors=num_neighbors,
neg_sampling_ratio=neg_sampling_ratio,
batch_size=args.batch_size, shuffle=True)
val_ratio, test_ratio = 0.0, 0.9
_, _, test_loader = create_data_loaders(data=test_data, val_ratio=val_ratio, test_ratio=test_ratio,
disjoint_train_ratio=disjoint_train_ratio,
num_neighbors=num_neighbors,
neg_sampling_ratio=neg_sampling_ratio,
batch_size=args.batch_size, shuffle=True)
data = train_data
else:
val_ratio, test_ratio = 0.1, 0.1
edges_coo = create_graph(df_users, df_items, user_naming, item_naming)
data = create_hetero_graph(edges_coo=edges_coo,
user_features_init=user_features_init,
user_feature_size=args.num_user_features,
item_feature_tensor=item_feature_tensor)
train_loader, val_loader, test_loader = create_data_loaders(data=data, val_ratio=val_ratio, test_ratio=test_ratio,
disjoint_train_ratio=disjoint_train_ratio,
num_neighbors=num_neighbors,
neg_sampling_ratio=neg_sampling_ratio,
batch_size=args.batch_size, shuffle=True)
data_loader = {
'train': train_loader,
'val': val_loader,
'test': test_loader
}
# TODO Pre-compute GCN normalization
# if args.gnn_model == 'GCN': data.adj_t = gcn_normalization(data.adj_t)
# data contains the whole graph
num_user_nodes = data["user"].num_nodes if 'emb' in args.user_input_layer else -1
num_item_nodes = data["item"].num_nodes if 'emb' in args.item_input_layer else -1
graph_metadata = data.metadata()
model = Model(
graph_metadata=graph_metadata,
num_user_feats=args.num_user_features,
num_item_feats=args.num_item_features,
num_user_nodes=num_user_nodes,
num_item_nodes=num_item_nodes,
user_input_layer=args.user_input_layer,
item_input_layer=args.item_input_layer,
input_hidden_channels=args.input_hidden_channels,
encoder=args.encoder,
predictor=args.predictor,
num_encoder_layers=args.encoder_layers,
num_predictor_layers=args.predictor_layers,
encoder_hidden_channels=args.encoder_hidden_channels,
predictor_hidden_channels=args.predictor_hidden_channels,
dropout=args.dropout
)
model = model.to(device)
print(model)
for run in range(args.runs):
# model.reset_parameters()
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
criterion = F.binary_cross_entropy_with_logits
train_loss, valid_scores, test_scores = train(model, optimizer, criterion,
data_loader, device, run,
epochs, args.eval_steps)
total_params = sum(p.numel() for param in model.parameters() for p in param)
print(f'Total number of model parameters is {total_params}')
if __name__ == "__main__":
main()
| mbekmyrz/newsrec | main.py | main.py | py | 12,722 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "time.time",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "model.model.train",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "model.model",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "tqdm.tqdm",
"line_number"... |
36170840461 | import logging
from typing import List
from volatility3.framework import renderers, exceptions, interfaces
from volatility3.framework.configuration import requirements
from volatility3.framework.interfaces import plugins
from volatility3.framework.layers import intel
vollog = logging.getLogger(__name__)
class Statistics(plugins.PluginInterface):
"""Lists statistics about the memory space."""
_required_framework_version = (2, 0, 0)
@classmethod
def get_requirements(cls) -> List[interfaces.configuration.RequirementInterface]:
return [
requirements.TranslationLayerRequirement(
name="primary",
description="Memory layer for the kernel",
architectures=["Intel32", "Intel64"],
)
]
def _generator(self):
# Do mass mapping and determine the number of different layers and how many pages go to each one
layer = self.context.layers[self.config["primary"]]
page_count = (
swap_count
) = (
invalid_page_count
) = (
large_page_count
) = large_swap_count = large_invalid_count = other_invalid = 0
if isinstance(layer, intel.Intel):
page_addr = 0
expected_page_size = 1 << layer.bits_per_register
while page_addr < layer.maximum_address:
try:
_, _, _, page_size, layer_name = list(
layer.mapping(page_addr, 2 * expected_page_size)
)[0]
if layer_name != layer.config["memory_layer"]:
swap_count += 1
else:
page_count += 1
if page_size > expected_page_size:
large_page_count += 1
except exceptions.SwappedInvalidAddressException as excp:
swap_count += 1
page_size = 1 << excp.invalid_bits
if page_size != expected_page_size:
large_swap_count += 1
except exceptions.PagedInvalidAddressException as excp:
invalid_page_count += 1
page_size = 1 << excp.invalid_bits
if page_size != expected_page_size:
large_invalid_count += 1
except exceptions.InvalidAddressException as excp:
other_invalid += 1
page_size = expected_page_size
vollog.debug(
"A non-page lookup invalid address exception occurred at: {} in layer {}".format(
hex(excp.invalid_address), excp.layer_name
)
)
page_addr += page_size
self._progress_callback(
(page_addr * 100) / layer.maximum_address, "Reading memory"
)
yield (
0,
(
page_count,
large_page_count,
swap_count,
large_swap_count,
invalid_page_count,
large_invalid_count,
other_invalid,
),
)
def run(self):
return renderers.TreeGrid(
[
("Valid pages (all)", int),
("Valid pages (large)", int),
("Swapped Pages (all)", int),
("Swapped Pages (large)", int),
("Invalid Pages (all)", int),
("Invalid Pages (large)", int),
("Other Invalid Pages (all)", int),
],
self._generator(),
)
| volatilityfoundation/volatility3 | volatility3/plugins/windows/statistics.py | statistics.py | py | 3,720 | python | en | code | 1,879 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "volatility3.framework.interfaces.plugins.PluginInterface",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "volatility3.framework.interfaces.plugins",
"line_number": 12,
... |
33049652214 | from django.shortcuts import render
from trino.dbapi import connect
from trino.auth import BasicAuthentication
import pandas as pd
from core.models import Item
import numpy as np
# Create your views here.
conn = connect(host="tcp.cheerful-maggot.dataos.app",
port="7432",
auth=BasicAuthentication(
'balaji', "dG9rZW5fc2NhcmNlbHlfc2VyaW91c2x5X2ZyZXNoX2tpZC43YmJmMDIwZS0xMmJhLTRkNjEtYmFmZS0zNGQzNTcxZDZkOGQ="),
http_scheme="https",
http_headers={"cluster-name": "minervac"} # eg:minervaa
)
def create_prods(request):
skus_df = pd.read_csv('./dbmigrate/skus/skus.csv')
qr = "SELECT * FROM redshift.retail_accelerator.product WHERE sku_id IN ({0})".format(','.join(["'"+s+"'" for s in skus_df['sku_id']]))
cat_qr = 'SELECT * FROM redshift.retail_accelerator.product_category'
cat_df = pd.read_sql(cat_qr, conn)
df = pd.read_sql(qr, conn)
rename_dict = {'sku_id': 'id',
'product_name': 'title',
'list_price': 'price',
'sale_price': 'discount_price',
'product_category_id': 'category',
'product_subcategory_id': 'subcat_id',
'product_subcategory': 'subcat',
'product_description': 'description'
}
df_out = df.rename(columns=rename_dict)
df_out['slug'] = df_out['id'].copy()
#df_out['discount_price'] = df_out['price'].apply(lambda x: float("{:.2f}".format(x*np.random.uniform(low = 0.6, high = 0.95))))
df_out = df_out[list(rename_dict.values())+['slug']]
df_out = df_out.merge(cat_df[['product_category', 'product_category_id']], left_on='category',
right_on='product_category_id', how='left')
df_out.rename(columns={'product_category': 'image'}, inplace=True)
df_out.drop(['product_category_id'], axis=1, inplace=True)
df_out['image'] = df_out['image'].str.replace('mens', 'men')
df_out['image'] = df_out['image'].str.replace('Mens', 'Men')
df_out['image'] = df_out['image'].apply(lambda x: x+'.jpeg')
df_out['label'] = ['P']*len(df_out)
out_records = df_out.to_dict(orient='records')
for r in out_records:
Item.objects.create(id=r['id'], title=r['title'], price=r['price'],
discount_price=r['discount_price'], category=r['category'], slug=r['slug'], subcat_id = r['subcat_id'], subcat = r['subcat'], description=r['description'], image=r['image'], label=r['label'])
return render(request, 'db_check.html')
| ramesh-tmdc/sample | views.py | views.py | py | 2,609 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "trino.dbapi.connect",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "trino.auth.BasicAuthentication",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "pan... |
42169327955 | #! /usr/bin/env python3
import os
import requests
#List all the files in /supplier-data/descriptions
for root, dirs, files in os.walk("./supplier-data/descriptions"):
#Loop through the list of description files of fruits
for file_name in files:
with open(os.path.join(root,file_name)) as file:
#Read through lines of the description file
reader = file.readlines()
#Store name, weight, description and image name in a dictionary
description = {"name": reader[0],
"weight": int(reader[1].split(" ")[0]),
"description": reader[2],
"image_name": file_name.split(".")[0] + ".jpeg"
}
#POST the dictionary to the webserver in JSON format
response = requests.post("http://localhost/fruits/", json=description)
response.raise_for_status()
| peterncs/fruit | run.py | run.py | py | 927 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.walk",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "requests.post",
"line_number": 19,... |
15578279834 | # ================================================================================
# This script instantiates the llm object with the relevant parameters
# ================================================================================
from langchain.llms import CTransformers
# Local CTransformers wrapper for Llama-2-7B-Chat
llm = CTransformers(
model="/Users/deveshparagiri/Downloads/models/sage-v2-q8_0.bin",
model_type="llama", # Model type Llama
config={"max_new_tokens": 256, "temperature": 0.5},
)
| DeveshParagiri/sage | llm.py | llm.py | py | 521 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "langchain.llms.CTransformers",
"line_number": 7,
"usage_type": "call"
}
] |
18323604988 | import aiohttp
import urllib.parse as urlparse
from typing import Optional
from middleware.config import Settings
from middleware.exception import MiddlewareException
from middleware.external.base.application import Application as BaseApplication
class Application(BaseApplication):
@staticmethod
async def _get_internal_schema_info_response_callback(res: aiohttp.ClientResponse):
if res.status != 200:
response = await res.text()
raise MiddlewareException("获取internal schema失败, url: {}, status: {}, response: {}".format(
res.url.human_repr(), res.status, response
))
result = await res.json()
return result["link"]
async def _get_internal_schema_info(self, tenant: str, name: str):
result = await self.settings.session.get(
self.settings.newest_schema_url.format(tenant=tenant, entityType=name),
func=self._get_internal_schema_info_response_callback,
ssl=False, timeout=120
)
result = urlparse.urlparse(result).path
return result
async def get_newest_schema_by_unique_name(self, tenant: str, name: str) -> Optional[str]:
result = await self._get_internal_schema_info(tenant, name)
return "{}#/properties/data".format(result)
def get_app():
return Application(Settings)
| chyaoyuan/data-sync-cdc | middleware/external/schema/application.py | application.py | py | 1,365 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "middleware.external.base.application.Application",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "aiohttp.ClientResponse",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "middleware.exception.MiddlewareException",
"line_number": 15,
... |
74692201954 | #!/usr/bin/env python3
#
# CLIMPORN
#
# Prepare 2D maps (monthly) that will later become a movie!
# NEMO output and observations needed
#
# L. Brodeau, January 2019
#
import sys
import glob
#import argparse as ap
import numpy as nmp
from PIL import Image
#
# ClimPorn:
from climporn.utils import chck4f
import climporn.nemo_hboxes as nhb
#nghost = 3
# About files to use:
fpref_p = './figs/CURLOF/CURLOF_TROPICO05_NST-_ALL_'
fsuff_p = '_on2.png'
fpref_c = './figs/CURLOF/CURLOF_CALEDO10-_ALL_'
fsuff_c = '_on2.png'
fpref_o = './figs/montage_CURLOF_TROPICO05-CALEDO10_'
nratio = 5
ncrop_chld = 3
#ncrop_chld = 1
#ncrop_chld = 3
#ncrop_chld = 10
ipc = 35 ; # location (Fortran indexing) of bottom left corner AGRIF_FixedGrids.in)
jpc = 43 ; # of the child box into parent box in Agrif ! (as in
cdate = '2016-12-22_00'
# Final box to keep (mind that for images it's flipped upside down!):
j1b = 336 ; i1b = 0
j2b = 680 ; i2b = int(16./9.*float(j2b-j1b))
# COLOR FOR THE NESTING BOX LINE:
# (0-255) R G B transparency
#rgb_f = [ 0 , 0 , 0 , 255 ] ; # solid black
rgb_f = [ 255, 237, 0 , 255 ] ; # yellow ON (255,237,0)
npf = 2 ; # number of poins for frame...
list_im_p = glob.glob(fpref_p+"*"+fsuff_p)
list_im_c = glob.glob(fpref_c+"*"+fsuff_c)
nbf = len(list_im_c)
if len(list_im_p) != nbf:
print(' Problem different number of images between parent and child!', len(list_im_p), nbf) ; sys.exit(0)
for j in range(nbf):
cf_prnt = list_im_p[j]
chck4f(cf_prnt)
cdate = str.replace( cf_prnt, fpref_p, '')
cdate = str.replace( cdate, fsuff_p, '')
cf_chld = fpref_c+cdate+fsuff_c ; # that's what we expect
chck4f(cf_chld)
cf_out = str.replace( cf_prnt, fpref_p, fpref_o)
cf_out = str.replace( cf_out, fsuff_p, '.png')
#print(cf_prnt,' ',cf_chld,' ', cdate, )
#print(cf_out)
#sys.exit(0)
chld = Image.open(cf_chld)
prnt = Image.open(cf_prnt)
(nyc,nxc,nrgb_c) = nmp.shape(chld)
(nyp,nxp,nrgb_p) = nmp.shape(prnt)
#print(' *** Image array shape for child = ', nyc,nxc,nrgb_c)
#print(' *** Image array shape for parent = ', nyp,nxp,nrgb_p)
if nrgb_c != 4: print(' Problem #1 with your child image, not what we expected!') ; sys.exit(0)
if nrgb_p != 4: print(' Problem #1 with your parent image, not what we expected!') ; sys.exit(0)
if nxp%nratio!=0 or nyp%nratio!=0: print(' Problem #1 with your parent image, it shoud be a multiple of '+str(nratio)+'!') ; sys.exit(0)
xchld = nmp.array(chld)
xprnt = nmp.array(prnt)
chld.close()
prnt.close()
print("xchld", nmp.shape(xchld))
print("xprnt", nmp.shape(xprnt))
#print(xprnt[0,0,:])
#print(xprnt[100,100,:])
#ip = (ipc - 1) * nratio + nghost
#jp = (jpc - 1) * nratio + nghost
ip = (ipc + 1) * nratio + 2 # I have no fucking clue why ????
jp = (jpc + 1) * nratio + 2 # I have no fucking clue why ????
#ip = ipc * nratio
#jp = jpc * nratio
# Child array we keep after croping:
if ncrop_chld >= 1:
XC = xchld[ncrop_chld:-ncrop_chld,ncrop_chld:-ncrop_chld]
else:
XC = xchld[:,:]
(Ny,Nx,Nr) = nmp.shape(XC)
#print(" shape XC =", Ny,Nx,Nr)
# Drawing frame:
for i in range(4):
XC[ 0:npf,:,i] = rgb_f[i]
XC[Ny-npf:Ny ,:,i] = rgb_f[i]
XC[:, 0:npf,i] = rgb_f[i]
XC[:,Nx-npf:Nx ,i] = rgb_f[i]
j1 = nyp-jp-Ny - ncrop_chld
i1 = ip + ncrop_chld
xprnt[j1:j1+Ny,i1:i1+Nx,:] = XC[:,:,:]
image_out = Image.fromarray(xprnt[j1b:j2b,i1b:i2b,:])
#cf_chld = './child.png'
## Then save it:
#image_chld.save(cf_chld)
#print(' *** Image '+cf_chld+' saved!\n')
# Then save it:
image_out.save(cf_out)
print(' *** Image '+cf_out+' saved!\n')
del XC, xprnt, xchld
| brodeau/climporn | python/scripts/mk_image_montage_agrif.py | mk_image_montage_agrif.py | py | 3,867 | python | en | code | 6 | github-code | 1 | [
{
"api_name": "glob.glob",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "climporn.utils.chck4f",
"line_number... |
32394088452 | import csv
from face import Face
data = csv.DictReader(open("fer2013/fer2013.csv"))
faces_train = []
faces_val = []
faces_test = []
for row in data:
if row.get("Usage") == "Training":
faces_train += [Face(row)]
elif row.get("Usage") == "PublicTest":
faces_val += [Face(row)]
elif row.get("Usage") == "PrivateTest":
faces_test += [Face(row)]
print((len(faces_train), len(faces_val), len(faces_test)))
| eshawang/Facial-Expression-Analysis | format_data.py | format_data.py | py | 439 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "csv.DictReader",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "face.Face",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "face.Face",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "face.Face",
"line_number": 15,
... |
72263561954 | """create auth tables
Revision ID: 6e0daa4be1f8
Revises: cb42b1c187d9
Create Date: 2019-09-26 09:17:12.642904
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '6e0daa4be1f8'
down_revision = 'cb42b1c187d9'
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
'users',
sa.Column('id', sa.Integer, nullable=False),
sa.Column('login', sa.String(256), nullable=False),
sa.Column('passwd', sa.String(256), nullable=False),
sa.Column('is_superuser', sa.Boolean, nullable=False,
server_default='FALSE'),
sa.Column('disabled', sa.Boolean, nullable=False,
server_default='FALSE'),
# indices
sa.PrimaryKeyConstraint('id', name='user_pkey'),
sa.UniqueConstraint('login', name='user_login_key'),
)
op.create_table(
'permissions',
sa.Column('id', sa.Integer, nullable=False),
sa.Column('user_id', sa.Integer, nullable=False),
sa.Column('perm_name', sa.String(64), nullable=False),
# indices
sa.PrimaryKeyConstraint('id', name='permission_pkey'),
sa.ForeignKeyConstraint(['user_id'], ['users.id'],
name='user_permission_fkey',
ondelete='CASCADE'),
)
def downgrade():
op.drop_table('users')
op.drop_table('permissions')
| best-doctor/its_on | db/migrations/versions/6e0daa4be1f8_create_auth_tables.py | 6e0daa4be1f8_create_auth_tables.py | py | 1,439 | python | en | code | 14 | github-code | 1 | [
{
"api_name": "alembic.op.create_table",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integ... |
37897987245 | from flask import Flask
from flask import request
import urllib.request
import re
from requests_html import HTMLSession
app = Flask(__name__)
#Makes all responses plaintext
@app.after_request
def treat_as_plain_text(response):
response.headers["content-type"] = "text/plain"
return response
@app.route('/')
def getList():
url = request.args.get("listurl")
return getPage(url)
@app.route('/sample')
def getSampleList():
return getPage("https://letterboxd.com/tommypedersen/list/tommys-movie-collection/page/3/")
#url = "https://letterboxd.com/tommypedersen/list/2016/"
url = "https://letterboxd.com/tommypedersen/list/tommys-movie-collection/page/3/"
session = HTMLSession()
resp = session.get(url)
def getPage(url):
try:
response = urllib.request.urlopen(url)
print("Established connection")
data = response.read()
page = data.decode("utf8")
#Pure html solution.
films = list(re.findall("alt=\"(.*?)\"", page))
films.pop(0)
i = 1
listStr = ""
for movie in films:
print("[{}] {}".format(i, movie))
listStr += movie + "\n"
i += 1
response.close()
return listStr
except Exception as e:
print(e)
print("Failed to retrieve info from webpage")
return set()
myStr = getPage(url)
| ThatOneCamel/boxdScrape | _oldVersion/app.py | app.py | py | 1,380 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "flask.request.args.get",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "flask.reque... |
72307539555 | from sklearn.preprocessing import normalize
from sklearn.neighbors import NearestNeighbors
from tqdm import tqdm
import os
import pickle
from datetime import datetime
if not os.path.exists("saves/crude_embeddings.pkl"):
raise ValueError("Please run --process first to generate embeddings")
else:
with open("saves/crude_embeddings.pkl", "rb") as f:
crude_embeddings = pickle.load(f)
def normalize_embeddings():
middle_data = {}
for ticker, date in crude_embeddings.keys():
if ticker not in middle_data:
middle_data[ticker] = {}
middle_data[ticker][date] = crude_embeddings[(ticker, date)]
for ticker in middle_data:
# sort the dates in ascending order
sorted_dates = sorted(
middle_data[ticker], key=lambda x: datetime.strptime(x, "%Y-%m-%d")
)
# create a new dictionary with sorted dates
sorted_data = {date: middle_data[ticker][date] for date in sorted_dates}
# replace the original dictionary with the sorted one
middle_data[ticker] = sorted_data
normalized_embeddings = []
for ticker in middle_data.keys():
for date in list(middle_data[ticker].keys())[:-30]:
normalized_embeddings.append(middle_data[ticker][date])
normalized_embeddings = normalize(normalized_embeddings)
embeddings_keys = {}
i = 0
for ticker in tqdm(middle_data.keys(), desc="Creating embeddings keys"):
for date in list(middle_data[ticker].keys())[:-30]:
embeddings_keys[tuple(normalized_embeddings[i])] = (ticker, date)
i += 1
return normalized_embeddings, embeddings_keys
def knn(normalized_embeddings, embeddings_keys):
nbrs = NearestNeighbors(n_neighbors=1, algorithm="ball_tree")
save = [nbrs, normalized_embeddings, embeddings_keys]
# Save the model to disk
with open("saves/knn.pkl", "wb") as f:
pickle.dump(save, f)
| finned-tech/MEIP | model/train.py | train.py | py | 1,936 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.path.exists",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "pickle.load",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.strptime",
... |
37798492039 | import pathlib
import numpy as np
import pytest
import meshio
from . import helpers
@pytest.mark.parametrize(
"mesh",
[
helpers.empty_mesh,
helpers.tet_mesh,
helpers.hex_mesh,
helpers.tet_mesh,
helpers.add_cell_sets(helpers.tet_mesh),
],
)
@pytest.mark.parametrize("binary", [False, True])
def test(mesh, binary, tmp_path):
# mesh.write("out.f3grid")
helpers.write_read(
tmp_path,
lambda f, m: meshio.flac3d.write(f, m, binary=binary),
meshio.flac3d.read,
mesh,
1.0e-15,
)
@pytest.mark.parametrize(
"filename",
["flac3d_mesh_ex.f3grid", "flac3d_mesh_ex_bin.f3grid"],
)
def test_reference_file(filename):
this_dir = pathlib.Path(__file__).resolve().parent
filename = this_dir / "meshes" / "flac3d" / filename
mesh = meshio.read(filename)
# points
assert np.isclose(mesh.points.sum(), 307.0)
# cells
ref_num_cells = [
("quad", 15),
("triangle", 3),
("hexahedron", 45),
("pyramid", 9),
("hexahedron", 18),
("wedge", 9),
("hexahedron", 6),
("wedge", 3),
("hexahedron", 6),
("wedge", 3),
("pyramid", 6),
("tetra", 3),
]
assert [
(cell_block.type, len(cell_block)) for cell_block in mesh.cells
] == ref_num_cells
# Cell sets
for arr in mesh.cell_sets.values():
assert len(arr) == 12
assert [len(arr) for arr in mesh.cell_sets.values()] == [12, 12, 12, 12, 12]
| nschloe/meshio | tests/test_flac3d.py | test_flac3d.py | py | 1,540 | python | en | code | 1,691 | github-code | 1 | [
{
"api_name": "meshio.flac3d.write",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "meshio.flac3d",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "meshio.flac3d",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "pytest.mark... |
29001110749 | import pygame
class playerProjectiles(pygame.sprite.Sprite):
def __init__(self, x):
super().__init__()
self.entity = pygame.image.load("../assets/playerbullet.png").convert_alpha()
self.rect = self.entity.get_rect()
self.rect = self.rect.move(x.rect.left+29, x.rect.top-50)
self.entity = pygame.transform.scale(self.entity, (45,19))
self.entity = pygame.transform.rotate(self.entity, 90)
self.mask = pygame.mask.from_surface(self.entity)
self.playershot = pygame.mixer.music.load('../assets/playershot.mp3')
self.playershot = pygame.mixer.music.play(0)
def checkBounds(self):
if self.rect.top <= 0:
return 1
class playerProjecticleList:
def __init__(self, player):
self.playerProjectileArray = []
self.player = player
def createBullet(self):
self.playerProjectileArray.append(playerProjectiles(self.player))
def moveAll(self):
bullet = 0
for bullet in self.playerProjectileArray:
bullet.rect = bullet.rect.move(0, -15)
if bullet.checkBounds() == 1:
self.playerProjectileArray.remove(bullet) | wiltley/SpaceInvadersPyGame | scripts/bullet.py | bullet.py | py | 1,196 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pygame.sprite",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "pygame.transform.... |
35813740191 | """
Distributor, Layout, Transform, and Transpose class definitions.
"""
import logging
from mpi4py import MPI
import numpy as np
import itertools
from collections import OrderedDict
from ..tools.cache import CachedMethod, CachedAttribute
from ..tools.config import config
from ..tools.array import prod
from ..tools.general import OrderedSet
logger = logging.getLogger(__name__.split('.')[-1])
GROUP_TRANSFORMS = config['transforms'].getboolean('GROUP_TRANSFORMS')
TRANSPOSE_LIBRARY = config['parallelism'].get('TRANSPOSE_LIBRARY')
GROUP_TRANSPOSES = config['parallelism'].getboolean('GROUP_TRANSPOSES')
SYNC_TRANSPOSES = config['parallelism'].getboolean('SYNC_TRANSPOSES')
if TRANSPOSE_LIBRARY.upper() == 'FFTW':
from .transposes import FFTWTranspose as TransposePlanner
elif TRANSPOSE_LIBRARY.upper() == 'MPI':
from .transposes import AlltoallvTranspose as TransposePlanner
from .transposes import RowDistributor, ColDistributor
# Public interface
__all__ = ['Distributor']
class Distributor:
"""
Directs parallelized distribution and transformation of fields.
Parameters
----------
dim : int
Dimension
comm : MPI communicator, optional
MPI communicator (default: comm world)
mesh : tuple of ints, optional
Process mesh for parallelization (default: 1-D mesh of available processes)
Attributes
----------
comm_cart : MPI communicator
Cartesian MPI communicator over mesh
coords : array of ints
Coordinates in cartesian communicator
layouts : list of layout objects
Available layouts
Notes
-----
Computations are parallelized by splitting D-dimensional data fields over
an R-dimensional mesh of MPI processes, where R < D. In coefficient space,
we take the first R dimensions of the data to be distributed over the mesh,
leaving the last (D-R) dimensions local. To transform such a data cube to
grid space, we loop backwards over the D dimensions, performing each
transform if the corresponding dimension is local, and performing an MPI
transpose with the next dimension otherwise. This effectively bubbles the
first local dimension up from the (D-R)-th to the first dimension,
transforming to grid space along the way. In grid space, then, the first
dimensional is local, followed by R dimensions distributed over the mesh,
and the last (D-R-1) dimensions local.
The distributor object for a given dimension constructs layout objects
describing each of the (D+R+1) layouts (sets of transform/distribution
states) and the paths between them (D transforms and R transposes).
"""
def __init__(self, coordsystems, comm=None, mesh=None, dtype=None):
# Accept single coordsystem in place of tuple/list
if not isinstance(coordsystems, (tuple, list)):
coordsystems = (coordsystems,)
# Note if only a single coordsystem for simplicity
if len(coordsystems) == 1:
self.single_coordsys = coordsystems[0]
else:
self.single_coordsys = False
# Get coords
self.coords = tuple([coord for coordsystem in coordsystems for coord in coordsystem.coords])
for coordsystem in coordsystems:
coordsystem.set_distributor(self)
self.coordsystems = coordsystems
# Defaults
if comm is None:
comm = MPI.COMM_WORLD
if mesh is None:
mesh = np.array([comm.size], dtype=int)
else:
if isinstance(mesh, list) or isinstance(mesh, tuple):
mesh = np.array(mesh, dtype=int)
# Trim trailing ones
mesh = 1 + np.trim_zeros(mesh - 1, trim='b')
self.dim = dim = len(self.coords)
# self.dim = dim = sum(coordsystem.dim for coordsystem in coordsystems)
self.comm = comm
self.mesh = mesh = np.array(mesh)
# Check mesh compatibility
logger.debug('Mesh: %s' %str(mesh))
if mesh.size >= dim:
raise ValueError("Mesh (%s) must have lower dimension than distributor (%i)" %(mesh, dim))
if np.prod(mesh) != comm.size:
raise ValueError("Wrong number of processes (%i) for specified mesh (%s)" %(comm.size, mesh))
self.dtype = dtype
# Create cartesian communicator, ignoring axes with m=1
reduced_mesh = [m for m in mesh if m > 1]
self.comm_cart = comm.Create_cart(reduced_mesh)
self.comm_coords = np.array(self.comm_cart.coords, dtype=int)
# Build layout objects
self._build_layouts()
@CachedAttribute
def cs_by_axis(self):
cs_dict = {}
for cs in self.coordsystems:
for subaxis in range(cs.dim):
cs_dict[cs.axis+subaxis] = cs
return cs_dict
def get_coordsystem(self, axis):
return self.cs_by_axis[axis]
def _build_layouts(self, dry_run=False):
"""Construct layout objects."""
D = self.dim
R = np.sum(self.mesh > 1)
# First layout: full coefficient space
local = np.array([True] * D)
local[:self.mesh.size][self.mesh > 1] = False
grid_space = [False] * D
layout_0 = Layout(self, local, grid_space)
layout_0.index = 0
# Layout and path lists
self.layouts = [layout_0]
self.paths = []
self.transforms = []
# Subsequent layouts
for i in range(1, R+D+1):
# Iterate backwards over bases to last coefficient space basis
for d in reversed(range(D)):
if not grid_space[d]:
# Transform if local
if local[d]:
grid_space[d] = True
layout_i = Layout(self, local, grid_space)
if not dry_run:
path_i = Transform(self.layouts[-1], layout_i, d)
self.transforms.insert(0, path_i)
break
# Otherwise transpose
else:
local[d] = True
local[d+1] = False
layout_i = Layout(self, local, grid_space)
if not dry_run:
path_i = Transpose(self.layouts[-1], layout_i, d, self.comm_cart)
break
layout_i.index = i
self.layouts.append(layout_i)
if not dry_run:
self.paths.append(path_i)
# Directly reference coefficient and grid space layouts
self.coeff_layout = self.layouts[0]
self.grid_layout = self.layouts[-1]
# Allow string references to coefficient and grid space layouts
self.layout_references = {'c': self.coeff_layout,
'g': self.grid_layout}
def get_layout_object(self, input):
"""Dereference layout identifiers."""
if isinstance(input, Layout):
return input
else:
return self.layout_references[input]
def buffer_size(self, domain, scales, dtype):
"""Compute necessary buffer size (bytes) for all layouts."""
return max(layout.buffer_size(domain, scales, dtype) for layout in self.layouts)
def remedy_scales(self, scales):
"""Remedy different scale inputs."""
if scales is None:
scales = 1
if not hasattr(scales, "__len__"):
scales = [scales] * self.dim
if 0 in scales:
raise ValueError("Scales must be nonzero.")
return tuple(scales)
def get_transform_object(self, axis):
return self.transforms[axis]
def get_axis(self, coord):
return self.coords.index(coord)
def Field(self, *args, **kw):
"""Alternate constructor for fields."""
from .field import Field
return Field(self, *args, **kw)
def ScalarField(self, *args, **kw):
"""Alternate constructor for scalar fields."""
from .field import ScalarField
return ScalarField(self, *args, **kw)
def VectorField(self, *args, **kw):
"""Alternate constructor for vector fields."""
from .field import VectorField
return VectorField(self, *args, **kw)
def TensorField(self, *args, **kw):
"""Alternate constructor for tensor fields."""
from .field import TensorField
return TensorField(self, *args, **kw)
def IdentityTensor(self, coordsys):
"""Identity tensor field."""
from .field import TensorField
I = TensorField(self, (coordsys, coordsys))
for i in range(coordsys.dim):
I['g'][i, i] = 1
return I
def local_grid(self, basis, scale=None):
# TODO: remove from bases and do it all here?
if basis.dim == 1:
return basis.local_grid(scale=scale)
else:
raise ValueError("Use `local_grids` for multidimensional bases.")
def local_grids(self, *bases, scales=None):
# TODO: remove from bases and do it all here?
return sum((basis.local_grids(scales=scales) for basis in bases), ())
def local_modes(self, basis):
# TODO: remove from bases and do it all here?
return basis.local_modes()
@CachedAttribute
def default_nonconst_groups(self):
return sum((cs.default_nonconst_groups for cs in self.coordsystems), ())
class Layout:
"""
Object describing the data distribution for a given transform and
distribution state.
Attributes
----------
local : array of bools
Axis locality flags (True/False for local/distributed)
grid_space : array of bools
Axis grid-space flags (True/False for grid/coeff space)
"""
def __init__(self, dist, local, grid_space):
self.dist = dist
# Freeze local and grid_space lists into boolean arrays
self.local = np.array(local)
self.grid_space = np.array(grid_space)
# Extend mesh and coordinates to distributor dimension
self.ext_mesh = np.ones(dist.dim, dtype=int)
reduced_mesh = [m for m in dist.mesh if m > 1]
self.ext_mesh[~self.local] = reduced_mesh
self.ext_coords = np.zeros(dist.dim, dtype=int)
self.ext_coords[~self.local] = dist.comm_coords
def global_shape(self, domain, scales):
"""Global data shape."""
scales = self.dist.remedy_scales(scales)
#global_shape = np.array(domain.coeff_shape).copy()
#global_shape[self.grid_space] = np.array(domain.grid_shape(scales))[self.grid_space]
global_shape = domain.global_shape(self, scales)
return tuple(global_shape)
def chunk_shape(self, domain):
"""Chunk shape."""
#scales = self.dist.remedy_scales(scales)
#chunk_shape = np.array(domain.coeff_group_shape).copy()
#chunk_shape[self.grid_space] = 1
chunk_shape = domain.chunk_shape(self)
return tuple(chunk_shape)
def group_shape(self, domain):
"""Chunk shape."""
return tuple(domain.group_shape(self))
def local_chunks(self, domain, scales, rank=None, broadcast=False):
"""Local chunk indices by axis."""
global_shape = self.global_shape(domain, scales)
chunk_shape = self.chunk_shape(domain)
chunk_nums = -(-np.array(global_shape) // np.array(chunk_shape)) # ceil
local_chunks = []
# Get coordinates
if rank is None:
ext_coords = self.ext_coords
else:
ext_coords = np.zeros(self.dist.dim, dtype=int)
ext_coords[~self.local] = self.dist.comm_cart.Get_coords(rank)
# Get chunks axis by axis
for axis, basis in enumerate(domain.full_bases):
if self.local[axis]:
# All chunks for local dimensions
local_chunks.append(np.arange(chunk_nums[axis]))
else:
# Block distribution otherwise
mesh = self.ext_mesh[axis]
if broadcast and (basis is None):
coord = 0
else:
coord = ext_coords[axis]
block = -(-chunk_nums[axis] // mesh)
start = min(chunk_nums[axis], block*coord)
end = min(chunk_nums[axis], block*(coord+1))
local_chunks.append(np.arange(start, end))
return tuple(local_chunks)
def global_elements(self, domain, scales):
"""Global element indices by axis."""
global_shape = self.global_shape(domain, scales)
indices = [np.arange(n) for n in global_shape]
return tuple(indices)
def local_elements(self, domain, scales, rank=None, broadcast=False):
"""Local element indices by axis."""
chunk_shape = self.chunk_shape(domain)
local_chunks = self.local_chunks(domain, scales, rank=rank, broadcast=broadcast)
indices = []
for chunk_size, chunks in zip(chunk_shape, local_chunks):
ax_indices = chunk_size*np.repeat(chunks, chunk_size) + np.tile(np.arange(chunk_size), len(chunks))
indices.append(ax_indices)
return tuple(indices)
@CachedMethod
def valid_elements(self, tensorsig, domain, scales, rank=None, broadcast=False):
"""Make dense array of mode inclusion."""
# Make dense array of local elements
elements = self.local_elements(domain, scales, rank=rank, broadcast=broadcast)
elements = np.array(np.meshgrid(*elements, indexing='ij'))
# Check validity basis-by-basis
grid_space = self.grid_space
vshape = tuple(cs.dim for cs in tensorsig) + elements[0].shape
valid = np.ones(shape=vshape, dtype=bool)
for basis in domain.bases:
basis_axes = slice(basis.first_axis, basis.last_axis+1)
valid &= basis.valid_elements(tensorsig, grid_space[basis_axes], elements[basis_axes])
return valid
def _group_arrays(self, elements, domain):
# Convert to groups basis-by-basis
grid_space = self.grid_space
groups = np.zeros_like(elements)
groups = np.ma.masked_array(groups)
for basis in domain.bases:
basis_axes = slice(basis.first_axis, basis.last_axis+1)
groups[basis_axes] = basis.elements_to_groups(grid_space[basis_axes], elements[basis_axes])
return groups
@CachedMethod
def local_group_arrays(self, domain, scales, rank=None, broadcast=False):
"""Dense array of local groups (first axis)."""
# Make dense array of local elements
elements = self.local_elements(domain, scales, rank=rank, broadcast=broadcast)
elements = np.array(np.meshgrid(*elements, indexing='ij'))
return self._group_arrays(elements, domain)
@CachedMethod
def global_group_arrays(self, domain, scales):
"""Dense array of local groups (first axis)."""
# Make dense array of local elements
elements = self.global_elements(domain, scales)
elements = np.array(np.meshgrid(*elements, indexing='ij'))
return self._group_arrays(elements, domain)
@CachedMethod
def local_groupsets(self, group_coupling, domain, scales, rank=None, broadcast=False):
local_groupsets = self.local_group_arrays(domain, scales, rank=rank, broadcast=broadcast).astype(object)
# Replace non-enumerated axes with None
for axis in range(local_groupsets.shape[0]):
if group_coupling[axis]:
local_groupsets[axis] = None
# Flatten local groupsets
local_groupsets = local_groupsets.reshape((local_groupsets.shape[0], -1))
# Drop masked groups
local_groupsets = np.ma.compress_cols(local_groupsets)
# Return unique groupsets
local_groupsets = tuple(map(tuple, local_groupsets.T))
return OrderedSet(local_groupsets)
@CachedMethod
def local_groupset_slices(self, groupset, domain, scales, rank=None, broadcast=False):
groups = self.local_group_arrays(domain, scales, rank=rank, broadcast=broadcast)
dim = groups.shape[0]
group_shape = self.group_shape(domain)
# find all elements which match group
selections = np.ones(groups[0].shape, dtype=int)
for i, subgroup in enumerate(groupset):
if subgroup is not None:
selections *= (subgroup == groups[i])
# Note: seems to exclude masked elements for ==, unlike other comparisons
# determine which axes to loop over, which to find bounds for
slices = []
for i, subgroup in enumerate(groupset):
if subgroup is None:
subslices = [slice(None)]
else:
# loop over axis i but taking into account group_shape
subslices = [slice(j, j+group_shape[i]) for j in range(0, groups.shape[i+1], group_shape[i])]
slices.append(subslices)
group_slices = []
for s in itertools.product(*slices):
sliced_selections = selections[tuple(s)]
if np.any(sliced_selections): # some elements match group
# assume selected groups are cartesian product, find left and right bounds
lefts = list(map(np.min, np.where(sliced_selections)))
rights = list(map(np.max, np.where(sliced_selections)))
# build multidimensional group slice
group_slice = []
for i in range(dim):
if s[i] != slice(None):
group_slice.append(s[i])
else:
group_slice.append(slice(lefts[i], rights[i]+1))
group_slices.append(tuple(group_slice))
return group_slices
def slices(self, domain, scales):
"""Local element slices by axis."""
local_elements = self.local_elements(domain, scales)
slices = []
for LE in local_elements:
if LE.size:
slices.append(slice(LE.min(), LE.max()+1))
else:
slices.append(slice(0, 0))
return tuple(slices)
@CachedMethod
def local_shape(self, domain, scales, rank = None):
"""Local data shape."""
local_elements = self.local_elements(domain, scales, rank = rank)
shape = tuple(LE.size for LE in local_elements)
return shape
def buffer_size(self, bases, scales, dtype):
"""Local buffer size (bytes)."""
local_shape = self.local_shape(bases, scales)
return prod(local_shape) * np.dtype(dtype).itemsize
# def local_group_index(self, group, domain, scales):
# """Index of a group within local groups."""
# index = []
# for grp, local_grps in zip(group, self.local_groups(domain, scales)):
# if grp is None:
# index.append(None)
# else:
# index.append(local_grps.index(grp))
# return index
# if distribution == 'block':
# index[~local] = (group - start)[~local]
# elif distribution == 'cyclic':
# index[~local] = np.mod(group, mesh)[~local]
# def group(self, local_index):
# pass
# # if distribution == 'block':
# # group = start + index
# # elif distribution == 'cyclic':
# # group = mesh *
# @CachedMethod
# def groups(self, domain, scales):
# """Comptue group sizes."""
# groups = []
# for axis, space in enumerate(domain.spaces):
# if space is None:
# groups.append(1)
# elif self.grid_space[axis]:
# groups.append(1)
# else:
# groups.append(space.group_size)
# return np.array(groups, dtype=int)
# @CachedMethod
# def blocks(self, domain, scales):
# """Compute block sizes for data distribution."""
# global_shape = self.global_shape(domain, scales)
# groups = self.groups(domain, scales)
# return groups * np.ceil(global_shape / groups / self.ext_mesh).astype(int)
# @CachedMethod
# def start(self, domain, scales):
# """Compute starting coordinates for local data."""
# blocks = self.blocks(domain, scales)
# start = self.ext_coords * blocks
# start[domain.constant] = 0
# return start
# @CachedMethod
# def local_shape(self, domain, scales):
# """Compute local data shape."""
# global_shape = self.global_shape(domain, scales)
# blocks = self.blocks(domain, scales)
# start = self.start(domain, scales)
# local_shape = np.minimum(blocks, global_shape-start)
# local_shape = np.maximum(0, local_shape)
# return local_shape
# @CachedMethod
# def slices(self, domain, scales):
# """Compute slices for selecting local portion of global data."""
# start = self.start(domain, scales)
# local_shape = self.local_shape(domain, scales)
# return tuple(slice(s, s+l) for (s, l) in zip(start, local_shape))
class Transform:
"""
Directs spectral transforms between two layouts.
TODO:
- Implement grouped transforms
"""
def __init__(self, layout0, layout1, axis):
self.layout0 = layout0
self.layout1 = layout1
self.axis = axis
def increment(self, fields):
"""Backward transform a list of fields."""
if len(fields) == 1:
self.increment_single(*fields)
elif GROUP_TRANSFORMS:
self.increment_group(fields)
else:
for field in fields:
self.increment_single(field)
def decrement(self, fields):
"""Forward transform a list of fields."""
if len(fields) == 1:
self.decrement_single(*fields)
elif GROUP_TRANSFORMS:
self.decrement_group(fields)
else:
for field in fields:
self.decrement_single(field)
def increment_single(self, field):
"""Backward transform a field."""
axis = self.axis
basis = field.domain.full_bases[axis]
# Reference views from both layouts
cdata = field.data
field.preset_layout(self.layout1)
gdata = field.data
# Transform non-constant bases with local data
if (basis is not None) and prod(cdata.shape):
basis.backward_transform(field, axis, cdata, gdata)
#basis.backward_transform(cdata, gdata, axis, field.scales[axis], field.tensorsig)
#plan = basis.transform_plan(cdata.shape, self.axis, field.scales[self.axis], field.dtype)
#plan.backward(cdata, gdata)
def decrement_single(self, field):
"""Forward transform a field."""
axis = self.axis
basis = field.domain.full_bases[axis]
# Reference views from both layouts
gdata = field.data
field.preset_layout(self.layout0)
cdata = field.data
# Transform non-constant bases with local data
if (basis is not None) and prod(gdata.shape):
basis.forward_transform(field, axis, gdata, cdata)
#basis.forward_transform(gdata, cdata, axis, field.scales[axis], field.tensorsig)
#plan = basis.transform_plan(cdata.shape, self.axis, field.scales[self.axis], field.dtype)
#plan.forward(gdata, cdata)
def increment_group(self, fields):
"""Backward transform multiple fields simultaneously."""
#logger.warning("Group transforms not implemented.")
for field in fields:
self.increment_single(field)
def decrement_group(self, fields):
"""Forward transform multiple fields simultaneously."""
#logger.warning("Group transforms not implemented.")
for field in fields:
self.decrement_single(field)
# @CachedMethod
# def group_data(self, nfields, scales):
# local_shape0 = self.layout0.local_shape(scales)
# local_shape1 = self.layout1.local_shape(scales)
# group_shape0 = [nfields] + list(local_shape0)
# group_shape1 = [nfields] + list(local_shape1)
# group_cdata = fftw.create_array(group_shape0, self.layout0.dtype)
# group_gdata = fftw.create_array(group_shape1, self.layout1.dtype)
# return group_cdata, group_gdata
# def increment_group(self, fields):
# fields = list(fields)
# scales = fields[0].meta[:]['scale']
# cdata, gdata = self.group_data(len(fields), scales)
# for i, field in enumerate(fields):
# np.copyto(cdata[i], field.data)
# self.basis.backward(cdata, gdata, self.axis+1, fields[0].meta[self.axis])
# for i, field in enumerate(fields):
# field.layout = self.layout1
# np.copyto(field.data, gdata[i])
# def decrement_group(self, fields):
# fields = list(fields)
# scales = fields[0].meta[:]['scale']
# cdata, gdata = self.group_data(len(fields), scales)
# for i, field in enumerate(fields):
# np.copyto(gdata[i], field.data)
# self.basis.forward(gdata, cdata, self.axis+1, fields[0].meta[self.axis])
# for i, field in enumerate(fields):
# field.layout = self.layout0
# np.copyto(field.data, cdata[i])
class Transpose:
"""
Directs distributed transposes between two layouts.
TODO:
- Implement grouped transposes
- Transpose all components simultaneously
"""
def __init__(self, layout0, layout1, axis, comm_cart):
self.layout0 = layout0
self.layout1 = layout1
self.axis = axis
self.comm_cart = comm_cart
# Create subgrid communicator along the moving mesh axis
remain_dims = [0] * comm_cart.dim
# No comm cart across axes where mesh = 1
mesh = layout0.dist.mesh
comm_cart_axis = axis - np.sum(mesh[:axis]==1)
remain_dims[comm_cart_axis] = 1
self.comm_sub = comm_cart.Sub(remain_dims)
@CachedMethod
def _sub_shape(self, domain, scales):
"""Build global shape of data assigned to sub-communicator."""
local_shape = self.layout0.local_shape(domain, scales)
global_shape = self.layout0.global_shape(domain, scales)
# Global shape along transposing axes, local shape along others
sub_shape = np.array(local_shape)
sub_shape[self.axis] = global_shape[self.axis]
sub_shape[self.axis+1] = global_shape[self.axis+1]
return tuple(sub_shape)
@CachedMethod
def _plan(self, ncomp, sub_shape, chunk_shape, dtype):
axis = self.axis
if prod(sub_shape) == 0:
return None # no data
elif (sub_shape[axis] == chunk_shape[axis]) and (sub_shape[axis+1] == chunk_shape[axis+1]):
return None # no change
else:
# Add axis for components
full_sub_shape = (ncomp,) + sub_shape
full_chunk_shape = (ncomp,) + chunk_shape
return TransposePlanner(full_sub_shape, full_chunk_shape, dtype, axis+1, self.comm_sub)
def _single_plan(self, field):
"""Build single transpose plan."""
ncomp = int(prod([cs.dim for cs in field.tensorsig]))
sub_shape = self._sub_shape(field.domain, field.scales)
chunk_shape = field.domain.chunk_shape(self.layout0)
return self._plan(ncomp, sub_shape, chunk_shape, field.dtype)
def _group_plans(self, fields):
"""Build group transpose plan."""
# Segment fields by sub_shapes and chunk_shapes
field_groups = OrderedDict()
for field in fields:
sub_shape = self._sub_shape(field.domain, field.scales)
chunk_shape = field.domain.chunk_shape(self.layout0)
if (sub_shape, chunk_shape) in field_groups:
field_groups[(sub_shape, chunk_shape)].append(field)
else:
field_groups[(sub_shape, chunk_shape)] = [field]
# Plan for each field group
plans = []
for (sub_shape, chunk_shape), fields in field_groups.items():
ncomp = 0
for field in fields:
ncomp += int(prod([cs.dim for cs in field.tensorsig]))
plan = self._plan(ncomp, sub_shape, chunk_shape, field.dtype) # Assumes last field's dtype is good for everybody
plans.append((fields, plan))
return plans
def increment(self, fields):
"""Backward transpose a list of fields."""
if SYNC_TRANSPOSES:
self.comm_sub.Barrier()
if len(fields) == 1:
self.increment_single(*fields)
elif GROUP_TRANSPOSES:
self.increment_group(fields)
else:
for field in fields:
self.increment_single(field)
def decrement(self, fields):
"""Forward transpose a list of fields."""
if SYNC_TRANSPOSES:
self.comm_sub.Barrier()
if len(fields) == 1:
self.decrement_single(*fields)
elif GROUP_TRANSPOSES:
self.decrement_group(fields)
else:
for field in fields:
self.decrement_single(field)
def increment_single(self, field):
"""Backward transpose a field."""
plan = self._single_plan(field)
if plan:
# Reference views from both layouts
data0 = field.data
field.preset_layout(self.layout1)
data1 = field.data
# Transpose between data views
plan.localize_columns(data0, data1)
else:
# No communication: just update field layout
field.preset_layout(self.layout1)
def decrement_single(self, field):
"""Forward transpose a field."""
plan = self._single_plan(field)
if plan:
# Reference views from both layouts
data1 = field.data
field.preset_layout(self.layout0)
data0 = field.data
# Transpose between data views
plan.localize_rows(data1, data0)
else:
# No communication: just update field layout
field.preset_layout(self.layout0)
def increment_group(self, fields):
"""Backward transpose multiple fields simultaneously."""
plans = self._group_plans(fields)
for fields, plan in plans:
if plan:
if len(fields) == 1:
field = fields[0]
# Reference views from both layouts
data0 = field.data
field.preset_layout(self.layout1)
data1 = field.data
# Transpose between data views
plan.localize_columns(data0, data1)
else:
# Gather data across fields
data0 = []
data1 = []
for field in fields:
rank = len(field.tensorsig)
# Reference views from both layouts
flat_comp_shape = (-1,) + field.data.shape[rank:]
if field.data.size:
data0.append(field.data.reshape(flat_comp_shape))
field.preset_layout(self.layout1)
flat_comp_shape = (-1,) + field.data.shape[rank:]
if field.data.size:
data1.append(field.data.reshape(flat_comp_shape))
if data0:
data0 = np.concatenate(data0)
else:
data0 = np.zeros(0, dtype=fields[0].dtype) # Assumes same dtypes
if data1:
data1 = np.concatenate(data1)
else:
data1 = np.zeros(0, dtype=fields[0].dtype) # Assumes same dtypes
# Transpose between data views
plan.localize_columns(data0, data1)
# Split up transposed data
i = 0
for field in fields:
ncomp = int(prod([cs.dim for cs in field.tensorsig]))
data = data1[i:i+ncomp]
field.data[:] = data.reshape(field.data.shape)
i += ncomp
else:
# No communication: just update field layouts
for field in fields:
field.preset_layout(self.layout1)
def decrement_group(self, fields):
"""Forward transpose multiple fields simultaneously."""
plans = self._group_plans(fields)
for fields, plan in plans:
if plan:
if len(fields) == 1:
field = fields[0]
# Reference views from both layouts
data1 = field.data
field.preset_layout(self.layout0)
data0 = field.data
# Transpose between data views
plan.localize_rows(data1, data0)
else:
# Gather data across fields
data0 = []
data1 = []
for field in fields:
rank = len(field.tensorsig)
# Reference views from both layouts
flat_comp_shape = (-1,) + field.data.shape[rank:]
if field.data.size:
data1.append(field.data.reshape(flat_comp_shape))
field.preset_layout(self.layout0)
flat_comp_shape = (-1,) + field.data.shape[rank:]
if field.data.size:
data0.append(field.data.reshape(flat_comp_shape))
if data0:
data0 = np.concatenate(data0)
else:
data0 = np.zeros(0, dtype=fields[0].dtype) # Assumes same dtypes
if data1:
data1 = np.concatenate(data1)
else:
data1 = np.zeros(0, dtype=fields[0].dtype) # Assumes same dtypes
# Transpose between data views
plan.localize_rows(data1, data0)
# Split up transposed data
i = 0
for field in fields:
ncomp = int(prod([cs.dim for cs in field.tensorsig]))
data = data0[i:i+ncomp]
field.data[:] = data.reshape(field.data.shape)
i += ncomp
else:
# No communication: just update field layouts
for field in fields:
field.preset_layout(self.layout1)
# def increment_group(self, *fields):
# """Transpose group from layout0 to layout1."""
# scales = unify(field.scales for field in fields)
# plan, buffer0, buffer1 = self._group_plan(len(fields), scales)
# if plan:
# # Copy fields to group buffer
# for i, field in enumerate(fields):
# np.copyto(buffer0[i], field.data)
# # Transpose between group buffer views
# plan.localize_columns(buffer0, buffer1)
# # Copy from group buffer to fields in new layout
# for i, field in enumerate(fields):
# field.preset_layout(self.layout1)
# np.copyto(field.data, buffer1[i])
# else:
# # No data: just update field layouts
# for field in fields:
# field.preset_layout(self.layout1)
# def decrement_group(self, *fields):
# """Transpose group from layout1 to layout0."""
# scales = unify(field.scales for field in fields)
# plan, buffer0, buffer1 = self._group_plan(len(fields), scales)
# if plan:
# # Copy fields to group buffer
# for i, field in enumerate(fields):
# np.copyto(buffer1[i], field.data)
# # Transpose between group buffer views
# plan.localize_rows(buffer1, buffer0)
# # Copy from group buffer to fields in new layout
# for i, field in enumerate(fields):
# field.preset_layout(self.layout0)
# np.copyto(field.data, buffer0[i])
# else:
# # No data: just update field layouts
# for field in fields:
# field.preset_layout(self.layout0)
| DedalusProject/dedalus | dedalus/core/distributor.py | distributor.py | py | 36,686 | python | en | code | 376 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "tools.config.config",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "tools.config.config",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "tools.confi... |
30767921487 | # -*- coding: utf-8 -*-
from django.template import RequestContext
from django.http import HttpResponseRedirect
from django.contrib.auth import logout
from django.http import HttpResponse, Http404
#from django.template import Context
#from django.template.loader import get_template
from django.contrib.auth.models import User
from django.shortcuts import render_to_response, get_object_or_404
from bookmarks.forms import *
from bookmarks.models import *
from django.contrib.auth.decorators import login_required
def main_page(request):
# template = get_template('main_page.html')
# variables = Context({
# 'head_title':'장고 북마크',
# 'page_title':'장고 북마크에 오신 것을 환영합니다.',
# 'page_body': '북마크를 저장하고 공유하세요!',
# 'user':request.user,
# })
# output = template.render(variables)
# return HttpResponse(output)
# return render_to_response(
# 'main_page.html',
# {'user':request.user,
# 'head_title':'장고 북마크',
# 'page_title':'장고 북마크에 오신 것을 환영합니다.',
# 'page_body':'북마크를 저장하고 공유하세요!',
# })
shared_bookmarks = SharedBookmark.objects.order_by(
'-date'
)[:10]
variables = RequestContext(request, {
'user':request.user,
'head_title':'장고 북마크',
'page_title':'장고 북마크에 오신 것을 환영합니다.',
'page_body':'북마크를 저장하고 공유하세요!',
'shared_bookmarks':shared_bookmarks,
})
return render_to_response('main_page.html', variables)
def user_page(request, username):
"""
try:
user = User.objects.get(username=username)
except:
raise Http404('사용자를 찾을 수 없습니다.')
"""#쉽고 빠른 웹개발 5.2.3에서 대체됨
#bookmarks = user.bookmark_set.all()
user = get_object_or_404(User, username=username)
bookmarks = user.bookmark_set.order_by('-id')
# template = get_template('user_page.html')
# variables = Context({
# 'username':username,
# 'bookmarks':bookmarks
# })
# output = template.render(variables)
# return HttpResponse(output)
# return render_to_response(
# 'user_page.html',
# {'username':username,
# 'bookmarks':bookmarks,
# })
variables = RequestContext(request, {
'username':username,
'bookmarks':bookmarks,
'show_tags':True,
'show_edit': username == request.user.username,
# 'show_edit': True,
})
return render_to_response('user_page.html', variables)
def logout_page(request):
logout(request)
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
def register_page(request):
if request.method == 'POST':
form = RegistrationForm(request.POST)
if form.is_valid():
user = User.objects.create_user(
username=form.cleaned_data['username'],
password=form.cleaned_data['password1'],
email=form.cleaned_data['email']
)
return HttpResponseRedirect('/register/success')
# else:
# variables = RequestContext(request, {'form':form})
# return render_to_response('registration/register.html',
# variables)
else:
form = RegistrationForm()
variables = RequestContext(request, {
'form': form
})
return render_to_response('registration/register.html',
variables)
@login_required# 데코레이터, 쉽고 빠른 웹 개발 5.2.1 참조
def user_modification_page(request):
if request.method == 'POST':
form = PasswordModificationForm(request.user, request.POST)
if form.is_valid():
if form.cleaned_data['newPassword2']:
request.user.set_password(form.cleaned_data['newPassword2'])
request.user.save()
return HttpResponseRedirect('/logout/')
elif request.method == 'GET':
# if not request.user.is_authenticated(): # 데코레이터를 상단에 추가했으니 거기서 로그인 여부 검사해줌
# return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
# return HttpResponseRedirect('/')
form = PasswordModificationForm()
else:
raise Http404('views.py.user_modification_page method error else statement')
variables = RequestContext(request, {
'form':form
})
return render_to_response('user_modification.html',
variables)
@login_required # 데코레이터, 쉽고 빠른 웹 개발 5.2.1 참조
def bookmark_save_page(request):
ajax = request.GET.has_key('ajax')
if request.method == 'POST':
form = BookmarkSaveForm(request.POST)
if form.is_valid():
bookmark = _bookmark_save(request, form)
# import pdb
# pdb.set_trace()
if ajax:
variables = RequestContext(request, {
'bookmarks':[bookmark],
'show_edit':True,
'show_tags':True,
})
return render_to_response('bookmark_list.html',
variables)
else:
return HttpResponseRedirect(
'/user/%s/'%request.user
)
else:
if ajax:
return HttpResponse('failure')
elif request.GET.has_key('url'):
url = request.GET['url']
title=''
tags=''
share=True
try:
link = Link.objects.get(url=url)
bookmark = Bookmark.objects.get(
link=link,
user=request.user,
)
title = bookmark.title
tags = ' '.join(
tag.name for tag in bookmark.tag_set.all()
)
SharedBookmark.objects.get(
bookmark=bookmark
)
except ObjectDoesNotExist:
share=False
form = BookmarkSaveForm({
'url':url,
'title':title,
'tags':tags,
'share':share,
})
else:
form = BookmarkSaveForm()
variables = RequestContext(request, {
'form':form
})
if ajax:
return render_to_response(
'bookmark_save_form.html',
variables,
)
else:
return render_to_response('bookmark_save.html', variables)
def tag_page(request, tag_name):
tag = get_object_or_404(Tag, name=tag_name)
bookmarks = tag.bookmarks.order_by('-id')
variables = RequestContext(request, {
'bookmarks':bookmarks,
'tag_name':tag_name,
'show_tags':True,
'show_user':True,
})
return render_to_response('tag_page.html', variables)
def tag_cloud_page(request):
MAX_WEIGHT = 5
tags = Tag.objects.order_by('name')
# Calculate tag, minand max counts.
min_count = max_count = tags[0].bookmarks.count()
for tag in tags:
tag.count = tag.bookmarks.count()
if tag.count < min_count:
min_count = tag.count
if tag.count > max_count:
max_count = tag.count
# Calculate count range. Avoid dividing by zero.
_range = float(max_count - min_count)
if _range == 0.0:
_range = 1.0
# Calculate tag weights.
for tag in tags:
tag.weight = int(
MAX_WEIGHT * (tag.count - min_count) / _range
)
variables = RequestContext(request, {
'tags':tags
})
return render_to_response('tag_cloud_page.html',
variables,)
def search_page(request):
form = SearchForm()
bookmarks = []
show_results = False
if request.GET.has_key('query'):
# if query:
show_results = True
query = request.GET['query'].strip()
if query:
form = SearchForm({'query':query})
bookmarks = \
Bookmark.objects.filter(title__icontains=query)[:10]
variables = RequestContext(request, {
'form':form,
'bookmarks':bookmarks,
'show_results':show_results,
'show_tags':True,
'show_user':True,
})
# import pdb
# pdb.set_trace()
if request.is_ajax():
return render_to_response('bookmark_list.html', variables)
else:
return render_to_response('search.html', variables)
def _bookmark_save(request, form):
link, dummy = Link.objects.get_or_create(
url=form.cleaned_data['url'],
)
# 북마크가 있으면 가져오고 없으면 새로 저장합니다.
bookmark, created = Bookmark.objects.get_or_create(
user=request.user,
link=link,
)
# 북마크 제목을 수정합니다.
bookmark.title = form.cleaned_data['title']
# 북마크를 수정한 경우에는 이전에 입력된 모든 태그를 지웁니다.
if not created:
bookmark.tag_set.clear()
# 태그 목록을 새로 만듭니다.
tag_names = form.cleaned_data['tags'].split()
for tag_name in tag_names:
tag, dummy = Tag.objects.get_or_create(name=tag_name)
bookmark.tag_set.add(tag)
# import pdb
# pdb.set_trace()
if form.cleaned_data['share']:
shared_bookmark, created = SharedBookmark.objects.get_or_create(
bookmark=bookmark
)
if created:
shared_bookmark.users_voted.add(request.user)
shared_bookmark.save()
else:
try:
SharedBookmark.objects.get(bookmark=bookmark).delete()
except ObjectDoesNotExist:
pass
# 북마크를 저장합니다.
bookmark.save()
return bookmark
def ajax_tag_autocomplete(request):
if request.GET.has_key('q'):
tags=Tag.objects.filter(name__istartswith=request.GET['q'])
return HttpResponse('\n'.join(tag.name for tag in tags))
return HttpResponse()
@login_required
def bookmark_vote_page(request):
if request.GET.has_key('id'):
try:
id = request.GET['id']
shared_bookmark = SharedBookmark.objects.get(id=id)
user_voted = shared_bookmark.users_voted.filter(
username=request.user.username
)
# import pdb
# pdb.set_trace()
if not user_voted:
shared_bookmark.votes += 1
shared_bookmark.users_voted.add(request.user)
shared_bookmark.save()
except ObjectDoesNotExist:
raise Http404('북마크를 찾을 수 없습니다.')
if request.META.has_key('HTTP_REFERER'):
return HttpResponseRedirect(request.META['HTTP_REFERER'])
return HttpResponseRedirect('/')
from datetime import datetime, timedelta
def popular_page(request):
today = datetime.today()
yesterday = today - timedelta(1)
shared_bookmarks = SharedBookmark.objects.filter(
date__gt = yesterday
)
shared_bookmarks = shared_bookmarks.order_by(
'-votes'
)[:10]
variables = RequestContext(request, {
'shared_bookmarks':shared_bookmarks
})
return render_to_response('popular_page.html',
variables)
def bookmark_page(request, bookmark_id):
shared_bookmark = get_object_or_404(
SharedBookmark,
id=bookmark_id,
)
variables = RequestContext(request, {
'shared_bookmark': shared_bookmark,
})
return render_to_response('bookmark_page.html',
variables)
| meoooh/Bookmarks | bookmarks/views.py | views.py | py | 9,930 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.template.RequestContext",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render_to_response",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 54,
"usage_type": ... |
19267862140 | #!/usr/bin/python3
import argparse
import sys
def parse_file(f):
grammar = {
'N': [],
'Sigma': [],
'S': 'S',
'P': []
}
for i in range(3):
line = f.readline()
line = line.rstrip()
idx = line.find(' = ')
s = line[:idx]
elements = line[idx+3:].split(' ')
grammar[s] = elements
if s == 'S':
grammar[s] = elements[0]
for line in f:
line = line.rstrip()
idx = line.find(' -> ')
left_hand_side = line[:idx]
right_hand_side = line[idx+4:]
grammar['P'].append((left_hand_side, right_hand_side))
return grammar
# based on https://medium.com/100-days-of-algorithms/day-93-first-follow-cfe283998e3e
def compute_first_follow(grammar):
first = {}
follow = {}
for terminal in grammar['Sigma']:
first[terminal] = {terminal}
follow[terminal] = set()
for nonterminal in grammar['N']:
first[nonterminal] = set()
follow[nonterminal] = set()
follow[grammar['S']].add('$')
epsilon = set()
for left, right in grammar['P']:
if right == '$':
epsilon.add(left)
ok = True
while ok == True:
ok = False
for left, right in grammar['P']:
for symbol in right:
ok |= union(first[left], first[symbol])
if symbol not in epsilon:
break
else:
ok |= union(epsilon, {left})
new = follow[left]
for symbol in reversed(right):
ok |= union(follow[symbol], new)
if symbol in epsilon:
new = new.union(first[symbol])
else:
new = first[symbol]
return first, follow, epsilon
def union(first, begins):
n = len(first)
first |= begins
return len(first) != n
def make_csting(l):
cstring = 'std::string(\"'
for elem in l:
cstring += elem
cstring += '\")'
return cstring
def main():
parser = argparse.ArgumentParser(description='Tool for generating a recursive descent parser.')
parser.add_argument('input', type=str, help='Path to input file (where the grammar in described)')
parser.add_argument('output', type=str, help='Output file name.')
args = parser.parse_args()
inputFile = args.input
outputFile = args.output
f = open(inputFile, 'r')
grammar = parse_file(f)
f.close()
first, follow, epsilon = compute_first_follow(grammar)
print(first)
print(follow)
print(epsilon)
o = open(outputFile, 'w')
# include libraries
o.write('#include <iostream>\n')
o.write('#include <string>\n')
o.write('\n')
# global variables
o.write('std::string s;\n')
o.write('int i = -1;\n')
o.write('char token;\n')
o.write('\n')
for nonterminal in grammar['N']:
o.write('void ' + nonterminal + '();\n')
o.write('void check(std::string alpha);\n')
o.write('void parse_nonterminal(std::string alpha);\n')
o.write('\n')
# first functions
o.write('std::string first(char c) {\n')
o.write(' switch(c) {\n')
for terminal in grammar['Sigma']:
o.write(' case \'' + terminal + '\': return ' + make_csting(first[terminal]) + '; break;\n')
for nonterminal in grammar['N']:
o.write(' case \'' + nonterminal + '\': return ' + make_csting(first[nonterminal]) + '; break;\n')
o.write(' }\n')
o.write('}\n')
o.write('\n')
# follow functions
o.write('std::string follow(char c) {\n')
o.write(' switch(c) {\n')
for terminal in grammar['Sigma']:
o.write(' case \'' + terminal + '\': return ' + make_csting(follow[terminal]) + '; break;\n')
for nonterminal in grammar['N']:
o.write(' case \'' + nonterminal + '\': return ' + make_csting(follow[nonterminal]) + '; break;\n')
o.write(' }\n')
o.write('}\n')
o.write('\n')
# scan function which returns next token
o.write('char scan() {\n')
o.write(' ++i;\n')
o.write(' if (i < s.size()) {\n')
o.write(' return s[i];\n')
o.write(' }\n')
o.write(' return EOF;\n')
o.write('}\n')
o.write('\n')
# check function
o.write('void check_terminal(std::string alpha) {\n')
o.write(' if (alpha[0] == token) {\n')
o.write(' token = scan();\n')
o.write(' }\n')
o.write(' else {\n')
o.write(' std::cout << alpha[0] + \" expected\\n\";\n')
o.write(' }\n')
o.write(' if (alpha.size() >= 2) {\n')
o.write(' check(alpha.substr(1));\n')
o.write(' }\n')
o.write('}\n')
o.write('\n')
o.write('void check_nonterminal(std::string alpha) {\n')
o.write(' parse_nonterminal(alpha);\n')
o.write('}\n')
o.write('\n')
o.write('void check(std::string alpha) {\n')
o.write(' switch(alpha[0]) {\n')
for terminal in grammar['Sigma']:
o.write(' case \'' + terminal + '\': check_terminal(alpha); break;\n')
for nonterminal in grammar['N']:
o.write(' case \'' + nonterminal + '\': check_nonterminal(alpha); break;\n')
o.write(' }\n')
o.write('}\n')
o.write('\n')
# parse function for terminals and nonterminals
o.write('void parse_terminal(std::string alpha) {\n')
o.write(' if (alpha[0] != \'l\') {\n')
o.write(' token = scan();\n')
o.write(' }\n')
o.write(' if (alpha.size() >= 2) {\n')
o.write(' check(alpha.substr(1));\n')
o.write(' }\n')
o.write('}\n')
o.write('\n')
o.write('void parse_nonterminal(std::string alpha) {\n')
o.write(' switch(alpha[0]) {\n')
for nonterminal in grammar['N']:
o.write(' case \'' + nonterminal + '\':' + nonterminal + '(); break;\n')
o.write(' }\n')
o.write(' if (alpha.size() >= 2) {\n')
o.write(' check(alpha.substr(1));\n')
o.write(' }\n')
o.write('}\n')
o.write('\n')
# function for every nonterminal A from N
for nonterminal in grammar['N']:
o.write('void ' + nonterminal + '() {\n')
for left, right in grammar['P']:
if left == nonterminal:
if right[0] != 'l':
o.write(' if (first(\'' + right[0] + '\').find(token) != std::string::npos) {\n')
o.write(' std::cout << \"' + nonterminal + ' -> ' + right + '\\n\";\n')
if right[0] in grammar['N']:
o.write(' parse_nonterminal(\"' + right + '\");\n')
else:
o.write(' parse_terminal(\"' + right + '\");\n')
o.write(' return;\n')
o.write(' }\n')
else:
o.write(' if (follow(\'' + left + '\').find(token) != std::string::npos) {\n')
o.write(' std::cout << \"' + nonterminal + ' -> ' + right + '\\n\";\n')
if right[0] in grammar['N']:
o.write(' parse_nonterminal(\"' + right + '\");\n')
else:
o.write(' parse_terminal(\"' + right + '\");\n')
o.write(' return;\n')
o.write(' }\n')
o.write(' std::cout << \"Se asteapta un token diferit\\n\";\n')
o.write('}\n')
o.write('\n')
o.write('int main() {\n')
o.write(' std::cin >> s;\n')
o.write(' token = scan();\n')
o.write(' ' + grammar['S'] + '();\n')
o.write(' if (token != EOF) {\n')
o.write(" std::cout << \"ERROR: EOF expectd\\n\";\n")
o.write(' }\n')
o.write(' return 0;\n')
o.write('}\n')
if __name__ == '__main__':
sys.exit(main());
| mapaaa/rdp-generator | main.py | main.py | py | 7,755 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 243,
"usage_type": "call"
}
] |
74429573154 | #!/usr/bin/env python
import os, argparse, time
import utils
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout
from keras.layers import LSTM
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, TensorBoard
from keras.optimizers import SGD, RMSprop, Adagrad, Adadelta, Adam, Adamax, Nadam
OUTPUT_SIZE = 129 # 0-127 notes + 1 for rests
def parse_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--data_dir', type=str, default='data/midi',
help='data directory containing .mid files to use for' \
'training')
parser.add_argument('--experiment_dir', type=str,
default='experiments/default',
help='directory to store checkpointed models and tensorboard logs.' \
'if omitted, will create a new numbered folder in experiments/.')
parser.add_argument('--rnn_size', type=int, default=64,
help='size of RNN hidden state')
parser.add_argument('--num_layers', type=int, default=1,
help='number of layers in the RNN')
parser.add_argument('--learning_rate', type=float, default=None,
help='learning rate. If not specified, the recommended learning '\
'rate for the chosen optimizer is used.')
parser.add_argument('--window_size', type=int, default=20,
help='Window size for RNN input per step.')
parser.add_argument('--batch_size', type=int, default=32,
help='minibatch size')
parser.add_argument('--num_epochs', type=int, default=10,
help='number of epochs before stopping training.')
parser.add_argument('--dropout', type=float, default=0.2,
help='percentage of weights that are turned off every training '\
'set step. This is a popular regularization that can help with '\
'overfitting. Recommended values are 0.2-0.5')
parser.add_argument('--optimizer',
choices=['sgd', 'rmsprop', 'adagrad', 'adadelta',
'adam', 'adamax', 'nadam'], default='adam',
help='The optimization algorithm to use. '\
'See https://keras.io/optimizers for a full list of optimizers.')
parser.add_argument('--grad_clip', type=float, default=5.0,
help='clip gradients at this value.')
parser.add_argument('--message', '-m', type=str,
help='a note to self about the experiment saved to message.txt '\
'in --experiment_dir.')
parser.add_argument('--n_jobs', '-j', type=int, default=1,
help='Number of CPUs to use when loading and parsing midi files.')
parser.add_argument('--max_files_in_ram', default=25, type=int,
help='The maximum number of midi files to load into RAM at once.'\
' A higher value trains faster but uses more RAM. A lower value '\
'uses less RAM but takes significantly longer to train.')
return parser.parse_args()
# create or load a saved model
# returns the model and the epoch number (>1 if loaded from checkpoint)
def get_model(args, experiment_dir=None):
epoch = 0
if not experiment_dir:
model = Sequential()
for layer_index in range(args.num_layers):
kwargs = dict()
kwargs['units'] = args.rnn_size
# if this is the first layer
if layer_index == 0:
kwargs['input_shape'] = (args.window_size, OUTPUT_SIZE)
if args.num_layers == 1:
kwargs['return_sequences'] = False
else:
kwargs['return_sequences'] = True
model.add(LSTM(**kwargs))
else:
# if this is a middle layer
if not layer_index == args.num_layers - 1:
kwargs['return_sequences'] = True
model.add(LSTM(**kwargs))
else: # this is the last layer
kwargs['return_sequences'] = False
model.add(LSTM(**kwargs))
model.add(Dropout(args.dropout))
model.add(Dense(OUTPUT_SIZE))
model.add(Activation('softmax'))
else:
model, epoch = utils.load_model_from_checkpoint(experiment_dir)
# these cli args aren't specified if get_model() is being
# being called from sample.py
if 'grad_clip' in args and 'optimizer' in args:
kwargs = { 'clipvalue': args.grad_clip }
if args.learning_rate:
kwargs['lr'] = args.learning_rate
# select the optimizers
if args.optimizer == 'sgd':
optimizer = SGD(**kwargs)
elif args.optimizer == 'rmsprop':
optimizer = RMSprop(**kwargs)
elif args.optimizer == 'adagrad':
optimizer = Adagrad(**kwargs)
elif args.optimizer == 'adadelta':
optimizer = Adadelta(**kwargs)
elif args.optimizer == 'adam':
optimizer = Adam(**kwargs)
elif args.optimizer == 'adamax':
optimizer = Adamax(**kwargs)
elif args.optimizer == 'nadam':
optimizer = Nadam(**kwargs)
else:
utils.log(
'Error: {} is not a supported optimizer. Exiting.'.format(args.optimizer),
True)
exit(1)
else: # so instead lets use a default (no training occurs anyway)
optimizer = Adam()
model.compile(loss='categorical_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
return model, epoch
def get_callbacks(experiment_dir, checkpoint_monitor='val_acc'):
callbacks = []
# save model checkpoints
filepath = os.path.join(experiment_dir,
'checkpoints',
'checkpoint-epoch_{epoch:03d}-val_acc_{val_acc:.3f}.hdf5')
callbacks.append(ModelCheckpoint(filepath,
monitor=checkpoint_monitor,
verbose=1,
save_best_only=False,
mode='max'))
callbacks.append(ReduceLROnPlateau(monitor='val_loss',
factor=0.5,
patience=3,
verbose=1,
mode='auto',
epsilon=0.0001,
cooldown=0,
min_lr=0))
callbacks.append(TensorBoard(log_dir=os.path.join(experiment_dir, 'tensorboard-logs'),
histogram_freq=0,
write_graph=True,
write_images=False))
return callbacks
def main():
args = parse_args()
args.verbose = True
try:
# get paths to midi files in --data_dir
midi_files = [os.path.join(args.data_dir, path) \
for path in os.listdir(args.data_dir) \
if '.mid' in path or '.midi' in path]
except OSError as e:
log('Error: Invalid --data_dir, {} directory does not exist. Exiting.', args.verbose)
exit(1)
utils.log(
'Found {} midi files in {}'.format(len(midi_files), args.data_dir),
args.verbose
)
if len(midi_files) < 1:
utils.log(
'Error: no midi files found in {}. Exiting.'.format(args.data_dir),
args.verbose
)
exit(1)
# create the experiment directory and return its name
experiment_dir = utils.create_experiment_dir(args.experiment_dir, args.verbose)
# write --message to experiment_dir
if args.message:
with open(os.path.join(experiment_dir, 'message.txt'), 'w') as f:
f.write(args.message)
utils.log('Wrote {} bytes to {}'.format(len(args.message),
os.path.join(experiment_dir, 'message.txt')), args.verbose)
val_split = 0.2 # use 20 percent for validation
val_split_index = int(float(len(midi_files)) * val_split)
# use generators to lazy load train/validation data, ensuring that the
# user doesn't have to load all midi files into RAM at once
train_generator = utils.get_data_generator(midi_files[0:val_split_index],
window_size=args.window_size,
batch_size=args.batch_size,
num_threads=args.n_jobs,
max_files_in_ram=args.max_files_in_ram)
val_generator = utils.get_data_generator(midi_files[val_split_index:],
window_size=args.window_size,
batch_size=args.batch_size,
num_threads=args.n_jobs,
max_files_in_ram=args.max_files_in_ram)
model, epoch = get_model(args)
if args.verbose:
print(model.summary())
utils.save_model(model, experiment_dir)
utils.log('Saved model to {}'.format(os.path.join(experiment_dir, 'model.json')),
args.verbose)
callbacks = get_callbacks(experiment_dir)
print('fitting model...')
# this is a somewhat magic number which is the average number of length-20 windows
# calculated from ~5K MIDI files from the Lakh MIDI Dataset.
magic_number = 827
start_time = time.time()
model.fit_generator(train_generator,
steps_per_epoch=len(midi_files) * magic_number / args.batch_size,
epochs=args.num_epochs,
validation_data=val_generator,
validation_steps=len(midi_files) * 0.2 * magic_number / args.batch_size,
verbose=1,
callbacks=callbacks,
initial_epoch=epoch)
utils.log('Finished in {:.2f} seconds'.format(time.time() - start_time), args.verbose)
if __name__ == '__main__':
main() | brannondorsey/midi-rnn | train.py | train.py | py | 10,546 | python | en | code | 154 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentDefaultsHelpFormatter",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "keras.models.Sequential",
"line_number": 65,
"usage_type": "call"
}... |
10565279224 | import pandas as pd
import numpy as np
import tensorflow as tf
from datetime import datetime
import statistics
# connect to db
import pymysql
import sqlalchemy
from sqlalchemy import create_engine
# misc
import random as rn
from sklearn.model_selection import train_test_split
#pipeline
from sklearn.preprocessing import Normalizer, MinMaxScaler
from sklearn.pipeline import Pipeline
# manual parameters
RANDOM_SEED = 42
TRAINING_SAMPLE = 200000
VALIDATE_SIZE = 0.2
import mysql_auth
login = mysql_auth.info
import time
# db 테이블 df로 가져오기
def get_data():
conn = pymysql.connect(
host=login['host'], user=login['user'],
password=login['passwd'], db=login['db'],
charset=login['charset']
)
sql = 'SELECT * FROM {};'.format(login['column'])
df = pd.read_sql(sql,conn)
date=df['FILE_DT']
data = df[df.columns.difference(['EQUIPMENT_NAME', 'DATA_NO', 'FILE_NAME', 'FILE_DT', 'FILE_TIME', 'FIRST_REGIST_DATE', 'NOISE' , 'Unnamed: 0'])]
data[:20]
data = data.apply(pd.to_numeric)
if(data.isnull().values.any()):
data = data.fillna(0)
# data.set_index('EQUIPMENT_NAME',inplace=True)
# data = data.astype('float') #inplace 가 없음
# data.reset_index(inplace=True)
return data, date, conn
def pipeline(X_train, X_validate):
print(X_train)
pipeline = Pipeline([('normalizer', Normalizer()),
('scaler', MinMaxScaler())])
pipeline.fit(X_train)
X_train_transformed = pipeline.transform(X_train)
X_validate_transformed = pipeline.transform(X_validate)
return X_train_transformed, X_validate_transformed
# 데이터 처리
def data_prop(data):
np.random.seed(RANDOM_SEED)
rn.seed(RANDOM_SEED)
tf.random.set_seed(RANDOM_SEED)
X_train, X_test = train_test_split(data,test_size=0.2,shuffle=False,random_state=1004)
#X_train.drop('EQUIPMENT_NAME', axis=1, inplace=True)
print(X_train[:2])
#X_test.drop('EQUIPMENT_NAME', axis=1, inplace=True)
# X_train.astype('float')
# X_test.astype('float')
X_train, X_validate = train_test_split(X_train,
test_size=VALIDATE_SIZE,
random_state=RANDOM_SEED)
X_test = X_test.values
return X_train, X_validate, X_test
# 오토인코더
def model(X_train_transformed, X_validate_transformed, data_out, X_train):
input_dim = X_train_transformed.shape[1]
BATCH_SIZE = 256
EPOCHS = 6
# 모델 불러오기
# if(autoencoder.load('autoencoder_best_weights.hdf5')):
# continue
# else
# https://keras.io/layers/core/
autoencoder = tf.keras.models.Sequential([
# deconstruct / encode
tf.keras.layers.Dense(input_dim, activation='elu', input_shape=(input_dim, )),
tf.keras.layers.Dense(16, activation='elu'),
tf.keras.layers.Dense(8, activation='elu'),
tf.keras.layers.Dense(4, activation='elu'),
tf.keras.layers.Dense(2, activation='elu'),
# reconstruction / decode
tf.keras.layers.Dense(4, activation='elu'),
tf.keras.layers.Dense(8, activation='elu'),
tf.keras.layers.Dense(16, activation='elu'),
tf.keras.layers.Dense(input_dim, activation='elu')
])
autoencoder.compile(optimizer="adam",
loss="mse",
metrics=["acc"])
#####
# current date and time
yyyymmddHHMM = datetime.now().strftime('%Y%m%d%H%M')
# new folder for a new run
log_subdir = f'{yyyymmddHHMM}_batch{BATCH_SIZE}_layers{len(autoencoder.layers)}'
# define our early stopping
early_stop = tf.keras.callbacks.EarlyStopping(
monitor='val_loss',
min_delta=0.0001,
patience=10,
verbose=1,
mode='min',
restore_best_weights=True
)
save_model = tf.keras.callbacks.ModelCheckpoint(
filepath='autoencoder_best_weights.hdf5',
save_best_only=True,
monitor='val_loss',
verbose=0,
mode='min'
)
tensorboard = tf.keras.callbacks.TensorBoard(
f'logs/{log_subdir}',
batch_size=BATCH_SIZE,
update_freq='batch'
)
# callbacks argument only takes a list
cb = [early_stop, save_model, tensorboard]
history = autoencoder.fit(
X_train_transformed, X_train_transformed,
shuffle=True,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
callbacks=cb,
validation_data=(X_validate_transformed, X_validate_transformed)
);
# autoencoder.save("model.h5")
pipeline = Pipeline([('normalizer', Normalizer()),
('scaler', MinMaxScaler())])
pipeline.fit(X_train)
X_test_transformed = pipeline.fit_transform(data_out)
reconstructions = autoencoder.predict(X_test_transformed)
return X_test_transformed, reconstructions
# 이상치 탐지 및 json으로 보낼 데이터 생성
def ab_detec(X_test_transformed, reconstructions, data, date):
mse = np.mean(np.power(X_test_transformed - reconstructions, 2), axis=1)
print(mse, reconstructions, X_test_transformed)
z_score = mad_score(mse)
THRESHOLD = threshold(z_score)
#THRESHOLD = 45
data["label"] = 0
for i in range(len(z_score)):
if(THRESHOLD<z_score[i]):
data.loc[[i],["label"]] = 1 # 이상
z_score = pd.DataFrame(z_score)
z_score.columns=['z_score']
label = pd.DataFrame(data['label'])
label.columns=['label']
# date = pd.DataFrame(data['FILE_DT'])
# date.columns=['FILE_DT']
result = pd.concat([date,z_score,label],axis=1)
return result
# z_score 계산
def mad_score(points):
m = np.median(points)
ad = np.abs(points - m)
mad = np.median(ad)
return 0.6745 * ad / mad
# threshold 계산
def threshold(z_scores):
out=[]
THRESHOLD = 0
#z_scores = mad_score(mse)
outliers = z_scores > THRESHOLD
answer = []
print(len(z_scores))
while(np.sum(outliers)/np.size(z_scores)>0):
if(THRESHOLD==0):
outliers = z_scores > THRESHOLD
compare = np.sum(outliers)/np.size(z_scores)
# out.append(compare)
else:
outliers = z_scores > THRESHOLD
me = np.sum(outliers)/np.size(z_scores)
if compare - me not in out:
out.append(compare - me)
answer.append(THRESHOLD)
compare = me
THRESHOLD+=1
#print(len(out))
if(len(out)%2==0):
out.append(max(out)+1)
idx = []
#print(len(out))
m=statistics.median(list(set(out)))
#print(out)
idx=out.index(m)
return answer[idx]
# main 함수
# def silhang():
def main():
while True:
data,date,conn = get_data()
X_train, X_validate, X_test = data_prop(data)
X_train_transformed, X_validate_transformed = pipeline(X_train, X_validate)
print(X_train.columns)
#data_out = data.drop(['EQUIPMENT_NAME'], axis=1, inplace=False)
X_test_transformed, reconstructions = model(X_train_transformed, X_validate_transformed, data, X_train)
result = ab_detec(X_test_transformed, reconstructions, data, date)
pymysql.install_as_MySQLdb()
engine = create_engine("mysql+mysqldb://" + "root" + ":" + "1234" + "@"+ "127.0.0.1" + ":" + "3306" + "/" + "kepri_data", encoding='utf-8')
conn = engine.connect()
dtype={'FILE_DT':sqlalchemy.types.INT(), 'z_score':sqlalchemy.types.FLOAT(), 'label':sqlalchemy.types.INT()}
try:
result.to_sql(name='ae', con=engine, if_exists='replace', index=False, dtype=dtype)
except:
conn.rollback()
print("It is Failed")
conn.close()
time.sleep(600)
#return result
if __name__ == "__main__":
main() | HBNU-SWUNIV/COME-CAPSTONE22-dashboard | 003 Code/autoEncoder/AE.py | AE.py | py | 7,821 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "mysql_auth.info",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "pymysql.connect",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "pandas.read_sql",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "pandas.to_numeri... |
74227210594 | import uos as os
import lcd160cr
import lcd160cr_test
import pyb
import utime as time
import widgets
from colors import *
from utils import restore_framebuffer
try:
lcd = lcd160cr.LCD160CR('XY')
except OSError:
# I might have plugged in into the other side
lcd = lcd160cr.LCD160CR('YX')
lcd.set_pen(WHITE, BLACK)
lcd.erase()
# ------ Create Button instances -------
PAD = 4
COLS = 2
ROWS = 4
def create_button(i, j, label=""):
return widgets.Button(
lcd,
x1=int(i * (lcd.w / COLS) + PAD),
y1=int(j * (lcd.h / ROWS) + PAD),
x2=int((i + 1) * (lcd.w / COLS) - PAD),
y2=int((j + 1) * (lcd.h / ROWS) - PAD),
label=label,
)
# To illuminate LED's
btn_blue = create_button(0, 0, "Blue")
btn_yellow = create_button(0, 1, "Yellow")
btn_green = create_button(0, 2, "Green")
btn_red = create_button(0, 3, "Red")
# Other fun stuff
btn_jpg = create_button(1, 0, "JPG")
#btn_accel = create_button(1, 1, "Accel")
btn_features = create_button(1, 2, "Features")
btn_mandel = create_button(1, 3, "Mandel")
if btn_blue.is_pressed():
# Take screenshot
# press "Blue" button while booting to take screenshot
from utils import screenshot
screenshot(lcd, '/sd/screenshots/lcd-demo.raw')
# ------ LED utilities -------
led_red = pyb.LED(1)
led_green = pyb.LED(2)
led_yellow = pyb.LED(3)
led_blue = pyb.LED(4)
def set_led(led, value):
if value:
led.on()
else:
led.off()
# ------ DEMO: Displaying a JPEG -------
jpg_idx = 0
jpg_folder = '/sd/images'
jpg_list = ["%s/%s" % (jpg_folder, f) for f in os.listdir(jpg_folder) if f.endswith('.jpg')]
def demo_show_jpg():
# Get name from list, and shift index
global jpg_idx, jpg_list
filename = jpg_list[jpg_idx]
jpg_idx = (jpg_idx + 1) % len(jpg_list)
with open(filename, 'rb') as f:
buf = bytearray(f.read())
lcd.set_pos(0, 0)
lcd.jpeg(buf)
# ------ DEMO: Graph Accelerometer Values -------
def demo_graph_accel(duration=None):
# TODO
time.sleep(duration)
try:
# ------ Loop forever -------
# checking for button presses
while True:
touch_info = lcd.get_touch()
set_led(led_blue, btn_blue.is_pressed(*touch_info))
set_led(led_yellow, btn_yellow.is_pressed(*touch_info))
set_led(led_green, btn_green.is_pressed(*touch_info))
set_led(led_red, btn_red.is_pressed(*touch_info))
if btn_jpg.is_pressed(*touch_info):
# Display a JPG
with restore_framebuffer(lcd):
demo_show_jpg()
time.sleep(5)
#elif btn_accel.is_pressed(*touch_info):
# with restore_framebuffer(lcd):
# demo_graph_accel(10)
elif btn_features.is_pressed(*touch_info):
# Built-in Test - Features
with restore_framebuffer(lcd):
lcd160cr_test.test_features(lcd)
elif btn_mandel.is_pressed(*touch_info):
# Built-in Test - Mandel
with restore_framebuffer(lcd):
lcd160cr_test.test_mandel(lcd)
time.sleep(5)
time.sleep(0.05)
except KeyboardInterrupt:
# User has connected a REPL and pressed Ctrl+C
# Clear screen
lcd.set_pen(RED, BLACK)
lcd.erase()
# Draw an X (clear indication that we should be back into the REPL)
lcd.line(0, 0, lcd.w-1, lcd.h-1)
lcd.line(0, lcd.h-1, lcd.w-1, 0)
| fragmuffin/howto-micropython | examples/lcd-demo/main.py | main.py | py | 3,425 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "lcd160cr.LCD160CR",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "lcd160cr.LCD160CR",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "widgets.Button",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "utils.screenshot",... |
22448311010 | import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
from app import app
from apps import app_individual, app_aggregate
app.layout = html.Div([
dcc.Location(id='url', refresh=False),
html.Div(id='page-content')
])
index_page = html.Div([
dcc.Link('Individual Analysis', href='/individual'),
html.Br(),
dcc.Link('Agregrate Analysis', href='/agregrate'),
])
@app.callback(Output('page-content', 'children'),
[Input('url', 'pathname')])
def display_page(pathname):
if pathname == '/individual':
return app_individual.layout
elif pathname == '/agregrate':
return app_aggregate.layout
else:
return index_page
if __name__=="__main__":
# app.run_server(debug=True, port=5001)
app.run_server(
host='0.0.0.0',
port=8050,
debug=True
) | saeed349/Microservices-Based-Algorithmic-Trading-System-V-2.0 | Storage/dash/index.py | index.py | py | 901 | python | en | code | 38 | github-code | 1 | [
{
"api_name": "app.app.layout",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "app.app",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "dash_html_components.Div",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "dash_core_component... |
30826854011 | from http import HTTPStatus
from typing import Dict
from aiohttp import web
from aiohttp.web import Request, Response, json_response
from botbuilder.core import (
BotFrameworkAdapterSettings,
ConversationState,
MemoryStorage,
UserState,
)
from botbuilder.core.integration import aiohttp_error_middleware
from botbuilder.schema import Activity
from botbuilder.applicationinsights import ApplicationInsightsTelemetryClient
from botbuilder.integration.applicationinsights.aiohttp import (
AiohttpTelemetryProcessor,
bot_telemetry_middleware,
)
from botbuilder.core.telemetry_logger_middleware import TelemetryLoggerMiddleware
from config import DefaultConfig
from dialogs import MainDialog, BookingDialog
from bots import DialogAndWelcomeBot
from adapter_with_error_handler import AdapterWithErrorHandler
from flight_booking_recognizer import FlightBookingRecognizer
CONFIG = DefaultConfig()
# Create adapter.
# See https://aka.ms/about-bot-adapter to learn more about how bots work.
SETTINGS = BotFrameworkAdapterSettings(CONFIG.CHATBOT_BOT_ID, CONFIG.CHATBOT_BOT_PASSWORD)
# Create MemoryStorage, UserState and ConversationState
MEMORY = MemoryStorage()
USER_STATE = UserState(MEMORY)
CONVERSATION_STATE = ConversationState(MEMORY)
# Create adapter.
# See https://aka.ms/about-bot-adapter to learn more about how bots work.
ADAPTER = AdapterWithErrorHandler(SETTINGS, CONVERSATION_STATE)
class CustomApplicationInsightsTelemetryClient(ApplicationInsightsTelemetryClient):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.main_dialog = None
def track_event(
self,
name: str,
properties: Dict[str, object] = None,
measurements: Dict[str, object] = None,
) -> None:
# Add the uuid of the main dialog
if self.main_dialog:
properties["mainDialogUuid"] = self.main_dialog.uuid
super().track_event(name, properties=properties, measurements=measurements)
# Create telemetry client.
# Note the small 'client_queue_size'. This is for demonstration purposes. Larger queue sizes
# result in fewer calls to ApplicationInsights, improving bot performance at the expense of
# less frequent updates.
INSTRUMENTATION_KEY = CONFIG.APPINSIGHTS_INSTRUMENTATIONKEY
#TELEMETRY_CLIENT = ApplicationInsightsTelemetryClient(
TELEMETRY_CLIENT = CustomApplicationInsightsTelemetryClient(
INSTRUMENTATION_KEY, telemetry_processor=AiohttpTelemetryProcessor(), client_queue_size=10
)
TELEMETRY_MIDDLEWARE = TelemetryLoggerMiddleware(
telemetry_client=TELEMETRY_CLIENT,
log_personal_information=True
)
ADAPTER.use(TELEMETRY_MIDDLEWARE)
# Create dialogs and Bot
RECOGNIZER = FlightBookingRecognizer(CONFIG, telemetry_client=TELEMETRY_CLIENT)
BOOKING_DIALOG = BookingDialog()
DIALOG = MainDialog(RECOGNIZER, BOOKING_DIALOG, telemetry_client=TELEMETRY_CLIENT)
BOT = DialogAndWelcomeBot(CONVERSATION_STATE, USER_STATE, DIALOG, TELEMETRY_CLIENT)
TELEMETRY_CLIENT.main_dialog = DIALOG
# # Listen for incoming requests on /api/messages.
# async def index(req: Request) -> Response:
# name = req.match_info.get('name', "Anonymous")
# text = "Hello, " + name
# return web.Response(text=text)
# Listen for incoming requests on /api/messages.
async def messages(req: Request) -> Response:
# Main bot message handler.
if "application/json" in req.headers["Content-Type"]:
body = await req.json()
else:
return Response(status=HTTPStatus.UNSUPPORTED_MEDIA_TYPE)
activity = Activity().deserialize(body)
auth_header = req.headers["Authorization"] if "Authorization" in req.headers else ""
response = await ADAPTER.process_activity(activity, auth_header, BOT.on_turn)
if response:
return json_response(data=response.body, status=response.status)
return Response(status=HTTPStatus.OK)
def get_app(argv):
APP = web.Application(middlewares=[bot_telemetry_middleware, aiohttp_error_middleware])
# APP.router.add_get("/", index)
APP.router.add_post("/api/messages", messages)
return APP
if __name__ == "__main__":
APP = get_app(None)
try:
web.run_app(APP, host="localhost", port=CONFIG.PORT)
except Exception as error:
raise error
| Sako74/p10 | P10_03_chatbot/webapp/app.py | app.py | py | 4,282 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "config.DefaultConfig",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "botbuilder.core.BotFrameworkAdapterSettings",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "botbuilder.core.MemoryStorage",
"line_number": 36,
"usage_type": "call"
... |
1538771716 | import logging
import numpy as np
from src.MiniProjects.Maze.Maze import Maze
__author__ = 'frank.ma'
logger = logging.getLogger(__name__)
class MazeSolver(object):
MOVES = dict(N=(-1, 0), W=(0, 1), S=(1, 0), E=(0, -1))
found_solution = False
def __init__(self,
maze: Maze):
self.maze = maze
self.maze_path = np.zeros(self.maze.maze.shape, dtype=bool)
self.maze_parent_index = self.__init_parent_index(maze.maze)
def solve(self):
pass
def print_solution(self,
output: str):
if not self.found_solution:
logger.info('No solution is found for the given maze.')
else:
maze_display = np.chararray(self.maze.maze.shape)
maze_display.fill(str(' '))
maze_display[self.maze.maze == 1] = '#'
maze_display[self.maze_path] = 'X'
maze_display[self.maze.start] = 'S'
maze_display[self.maze.end] = 'E'
np.savetxt(output, maze_display.decode(), fmt='%s', delimiter='')
logger.info('Maze solution is printed in file %s.' % output)
@staticmethod
def __init_parent_index(maze: np.array):
return np.array([[(x, y) for y in range(maze.shape[1])] for x in range(maze.shape[0])])
| frankma/Finance | src/MiniProjects/Maze/MazeSolver.py | MazeSolver.py | py | 1,299 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "src.MiniProjects.Maze.Maze.Maze",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "numpy.zeros",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.ch... |
892004178 | import sqlite3
import csv
get_sql = """
DELETE
FROM 'table_fees'
WHERE truck_number = ? AND timestamp = ?
"""
def delete_wrong_fees(cursor: sqlite3.Cursor, wrong_fees_file: str) -> None:
with open(f'../{wrong_fees_file}') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',')
for line in spamreader:
cursor.execute(get_sql, (line[0], line[1],))
if __name__ == '__main__':
with sqlite3.connect('../hw.db') as conn:
cursor = conn.cursor()
delete_wrong_fees(cursor, 'wrong_fees.csv') | BogdanNos/PythonNosinovskiy | mod13/task2/task2.py | task2.py | py | 553 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sqlite3.Cursor",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "csv.reader",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 18,
"usage_type": "call"
}
] |
30407259914 | from fastapi import FastAPI
from controllers.routes import router
from services.services import download_and_load_data, download_and_load, clear_db_data
from apscheduler.schedulers.background import BackgroundScheduler
def poll_and_load_datasets_again():
clear_db_data()
download_and_load_data()
app = FastAPI()
app.include_router(router)
@app.on_event("startup")
async def startup_event():
scheduler = BackgroundScheduler()
scheduler.add_job(poll_and_load_datasets_again, 'interval', hours=1)
scheduler.start()
clear_db_data()
download_and_load_data()
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8080)
| A-G-U-P-T-A/LoopApi | main.py | main.py | py | 684 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "services.services.clear_db_data",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "services.services.download_and_load_data",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "fastapi.FastAPI",
"line_number": 12,
"usage_type": "call"
},
{... |
32858455165 | import mysql.connector
import random
import tabulate
class Db_connection:
def __init__(self):
self.connection=mysql.connector.connect(host='localhost',
user='root',
password='123134',
database='inventory')
db_connect=Db_connection()
class Cart:
def __init__(self):
self.cust_name=input('Enter your name: ')
print('Welcome to the Clothing Store Inventory Management System!')
def available_items(self):
cursor=db_connect.connection.cursor()
in_list=[]
cursor.execute(f'''select * from clothing_inventory''')
for i in cursor.fetchall():
in_list.append([i[0],i[1],i[2],i[3],i[4]])
print(tabulate.tabulate(in_list,['ITEM ID','CATEGORY','BRAND','SIZE','PRICE']))
def table_creation(self):
cursor=db_connect.connection.cursor()
cursor.execute('''create table if not exists customer_cart(cust_id int not null,items_id int not null,
categories varchar(255) not null,brand varchar(255) not null,size varchar(5) not null,
no_of_items decimal(10,2) not null,price decimal(10,2),
foreign key(items_id) references clothing_inventory(items_id))''')
def browse_clothing_items(self):
cursor=db_connect.connection.cursor()
ch=int(input('''Display available clothing items by
category Enter-1,
Brand Enter-2,
Price range Enter-3,
Size Enter-4
Enter: '''))
if ch==1:
category_name=input('Enter the category: ')
print(category_name)
cursor.execute(f'''select * from clothing_inventory where categories='{category_name.title()}' ''')
for i in cursor.fetchall():
print(f'Category: {i[1]}')
print(f'Brand: {i[2]}')
print(f'Size: {i[3]}')
print(f'Price: {i[4]}')
print()
elif ch==2:
brand=input('Enter the Brand: ')
cursor.execute(f"select * from clothing_inventory where brand='{brand.title()}' ")
for i in cursor.fetchall():
print(f'Category: {i[1]}')
print(f'Brand: {i[2]}')
print(f'Size: {i[3]}')
print(f'Price: {i[4]}')
print()
elif ch==3:
starting_price=float(input('Enter the price range: '))
ending_price=float(input('Enter the Ending price: '))
cursor.execute(f'''select * from clothing_inventory where price between {starting_price} and {ending_price}''')
for i in cursor.fetchall():
print(f'Category: {i[1]}')
print(f'Brand: {i[2]}')
print(f'Size: {i[3]}')
print(f'Price: {i[4]}')
print()
elif ch==4:
size=input('Enter the Size: ')
cursor.execute(f'''select * from clothing_inventory where categories='{size.upper()}' ''')
for i in cursor.fetchall():
print(f'Category: {i[1]}')
print(f'Brand: {i[2]}')
print(f'Size: {i[3]}')
print(f'Price: {i[4]}')
print()
def add_item_to_cart(self):
cust_id=self.cust_name[:2].upper()+str(random.randrange(10,21))
while True:
cursor=db_connect.connection.cursor()
items_id=int(input("Enter item's id to add item to cart: "))
no_of_items=int(input("Enter Number of items add to cart: "))
cursor.execute(f'''select * from clothing_inventory where items_id={items_id}''')
item=cursor.fetchone()
cursor.execute(f'''insert into customer_cart values ('{cust_id}',{item[0]},'{item[1]}','{item[2]}','{item[3]}',{no_of_items},{no_of_items*item[4]})''')
db_connect.connection.commit()
ch=input("To add another product to cart Enter yes: ")
if ch!='yes':
break
def view_cart(self):
cursor=db_connect.connection.cursor()
cursor.execute('''select * from customer_cart''')
for i in cursor.fetchall():
print(f"Item's id: {i[0]}")
print(f'Category: {i[1]}')
print(f'Brand: {i[2]}')
print(f'Size: {i[3]}')
print(f'Price: {i[4]}')
print()
def checkout(self):
cursor=db_connect.connection.cursor()
cursor.execute('''select * from customer_cart''')
in_list=[]
for i in cursor.fetchall():
in_list.append([i[0],i[1],i[2],i[3],i[4],i[4],i[5]])
cursor.execute('''select sum(price) from customer_cart''')
total_price=cursor.fetchone()
print(tabulate.tabulate([in_list],['CUSTOMER ID','ITEM ID','CATEGORY','BRAND','SIZE','NO.OF ITEMS','PRICE']))
print(f'Amount to pay: {total_price[0]}')
class Inventory:
def add_clothing_item(self):
cursor=db_connect.connection.cursor()
while True:
category=input('Enter Category name: ')
brand=input('Enter Brand name: ')
size=input('Enter Size: ')
price=int(input('Enter the item price: '))
cursor.execute(f'''insert into clothing_inventory (categories,brand,size,price)
values ('{category.title()}','{brand.title()}','{size.upper()}',{price})''')
db_connect.connection.commit()
ch=input("To add another product to inventory Enter yes: ")
if ch!='yes':
break
def retrieve_clothing_item(self):
cursor=db_connect.connection.cursor()
item_id=int(input('Enter Items id: '))
cursor.execute(f'''select * from clothing_inventory where items_id={item_id}''')
for i in cursor.fetchall():
print(f'Category: {i[1]}')
print(f'Brand: {i[2]}')
print(f'Size: {i[3]}')
print(f'Price: {i[4]}')
print()
def update_clothing_item(self):
cursor=db_connect.connection.cursor()
while True:
item_id=int(input('Enter Items id: '))
cursor.execute(f'''select * from clothing_inventory where items_id={item_id}''')
for i in cursor.fetchall():
print(f'Category: {i[1]}')
print(f'Brand: {i[2]}')
print(f'Size: {i[3]}')
print(f'Price: {i[4]}')
print()
column_name=input('Enter column name to change: ')
if column_name!='price':
changed_value=input('Enter the new value: ').title()
elif column_name=='price':
changed_value=int(input('Enter the new value: '))
cursor.execute(f'''update clothing_inventory set {column_name}='{changed_value}' where items_id={item_id}''')
db_connect.connection.commit()
ch=input("To add another product to inventory Enter yes: ")
if ch!='yes':
break
def remove_clothing_item(self):
cursor=db_connect.connection.cursor()
while True:
item_id=int(input('Enter the item ID: '))
cursor.execute(f''' delete from clothing_inventory where {item_id}''')
db_connect.connection.commit()
ch=input("To delete another item from inventory Enter yes: ")
if ch!='yes':
break
def view_orders(self):
cursor=db_connect.connection.cursor()
cursor.execute(f'''select * from clothing_inventory inner join customer_cart on customer_cart.items_id=clothing_inventory.items_id''')
for i in cursor.fetchall():
print(f'Item Id: {i[0]}')
print(f'Category: {i[1]}')
print(f'Brand: {i[2]}')
print(f'Size: {i[3]}')
print(f'Price: {i[4]}')
print(f'Customer id: {i[5]}')
print(f"Selected item's category: {i[7]}")
print(f"Selected item's brand: {i[8]}")
print(f"Selected item's size: {i[9]}")
print(f"Selected item's No.of items: {i[10]}")
print(f"Selected item's price: {i[11]}")
print()
user=int(input('''
Customer Enter-0
Shopkeeper Enter-1
Enter: '''))
if user==0:
print('''
Enter-1 for Browse clothing items
Enter-2 for Add item to cart
Enter-3 for View cart
Enter-4 for Checkout
Enter-5 for Exit
''')
customer=Cart()
while True:
customer.available_items()
user_ch=int(input('Enter your choice: '))
if user_ch==1:
customer.browse_clothing_items()
elif user_ch==2:
customer.add_item_to_cart()
elif user_ch==3:
customer.view_cart()
elif user_ch==4:
customer.checkout()
elif user_ch==5:
break
elif user==1:
print('''
Enter-1 for Add clothing item
Enter-2 for Retrieve clothing item
Enter-3 for Update clothing item
Enter-4 for Remove clothing item
Enter-5 for View orders
Enter-6 for Exit''')
seller=Inventory()
while True:
shopkeeper_ch=int(input('Enter your choice: '))
if shopkeeper_ch==1:
seller.add_clothing_item()
elif shopkeeper_ch==2:
seller.retrieve_clothing_item()
elif shopkeeper_ch==3:
seller.update_clothing_item()
elif shopkeeper_ch==4:
seller.remove_clothing_item()
elif shopkeeper_ch==5:
seller.view_orders()
elif shopkeeper_ch==6:
break
| vjkmr0898/programs | programs/clothing.py | clothing.py | py | 9,958 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "mysql.connector.connector.connect",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "mysql.connector.connector",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "mysql.connector",
"line_number": 6,
"usage_type": "name"
},
{
"api... |
30951274678 | """Main module."""
import os
from datetime import datetime
import numpy as np
from scipy.optimize import least_squares
import click
import matplotlib.pyplot as plt
from geometric_calibration.reader import (
read_img_label_file,
read_projection_hnc,
read_projection_raw,
)
from geometric_calibration.utils import (
deg2rad,
angle2rotm,
get_grayscale_range,
create_camera_matrix,
project_camera_matrix,
drag_and_drop_bbs,
search_bbs_centroids,
)
def calibrate_cbct(projection_dir, bbs_3d, sad, sid):
"""Main CBCT Calibration routines.
:param projection_dir: path to directory containing .raw files
:type projection_dir: str
:param bbs_3d: array containing 3D coordinates of BBs
:type bbs_3d: numpy.array
:param sad: nominal source to isocenter (A) distance
:type sad: float
:param sid: nominal source to image distance
:type sid: float
:return: dictionary with calibration results
:rtype: dict
"""
# RCS: room coordinate system
# A: isocenter
# Read image labels
labels_file_path = os.path.join(projection_dir, "imgLabels.txt")
proj_file, angles = read_img_label_file(labels_file_path)
# Initialize output dictionary
results = {
"proj_angles": [],
"panel_orientation": [],
"sid": [],
"sad": [],
"isocenter": [],
"source": [],
"panel": [],
"img_center": [],
"err_init": [],
"err_final": [],
}
# Calibrate views
with click.progressbar(
iterable=range(len(angles)), fill_char="=", empty_char=" "
) as prog_bar:
for k in prog_bar:
proj_path = os.path.join(
projection_dir, proj_file[k]
) # path of the current image
if k == 0: # no indications other than nominal values
# Calibrate first view with drag and drop procedure
view_results = calibrate_projection(
proj_path,
bbs_3d,
sad=sad,
sid=sid,
angle=angles[k],
angle_offset=0,
img_dim=[1024, 768],
pixel_size=[0.388, 0.388],
search_area=7,
drag_and_drop=True,
)
else: # if not first iteration
# initialize geometry (based on previous optimization)
angle_offset = angles[k] - angles[k - 1]
image_center = view_results["img_center"]
# Calibrate other views without drag and drop procedure
view_results = calibrate_projection(
proj_path,
bbs_3d,
sad=sad,
sid=sid,
angle=angles[k - 1],
angle_offset=angle_offset,
img_dim=[1024, 768],
pixel_size=[0.388, 0.388],
search_area=7,
image_center=image_center,
drag_and_drop=False,
)
# Update output dictionary
results["proj_angles"].append(view_results["proj_angle"])
results["panel_orientation"].append(
view_results["panel_orientation"]
)
results["sid"].append(view_results["sid"])
results["sad"].append(view_results["sad"])
results["isocenter"].append(view_results["isocenter"])
results["source"].append(view_results["source"])
results["panel"].append(view_results["panel"])
results["img_center"].append(view_results["img_center"])
results["err_init"].append(view_results["err_init"])
results["err_final"].append(view_results["err_final"])
return results
def calibrate_2d(projection_dir, bbs_3d, sad, sid):
"""Main 2D Calibration routines.
:param projection_dir: path to directory containing .raw files
:type projection_dir: str
:param bbs_3d: array containing 3D coordinates of BBs
:type bbs_3d: numpy.array
:param sad: nominal source to isocenter (A) distance
:type sad: float
:param sid: nominal source to image distance
:type sid: float
:return: dictionary with calibration results
:rtype: dict
"""
# RCS: room coordinate system
# A: isocenter
# Find projection files in the current folder
proj_file = []
angles = []
for f in os.listdir(projection_dir):
if ("AP" or "RL") and (".raw" or ".hnc") in f:
proj_file.append(f)
if "AP" in f:
angles.append(0)
elif "RL" in f:
angles.append(90)
# Initialize output dictionary
results = {
"proj_angles": [],
"panel_orientation": [],
"sid": [],
"sad": [],
"isocenter": [],
"source": [],
"panel": [],
"img_center": [],
"err_init": [],
"err_final": [],
}
# Calibrate views
with click.progressbar(
iterable=range(len(angles)), fill_char="=", empty_char=" ",
) as prog_bar:
for k in prog_bar:
proj_path = os.path.join(
projection_dir, proj_file[k]
) # path of the current image
# Calibrate views with drag and drop procedure
view_results = calibrate_projection(
proj_path,
bbs_3d,
sad=sad,
sid=sid,
angle=angles[k],
angle_offset=0,
img_dim=[2048, 1536],
pixel_size=[0.388, 0.388],
search_area=14,
resolution_factor=2,
drag_and_drop=True,
)
# Update output dictionary
results["proj_angles"].append(view_results["proj_angle"])
results["panel_orientation"].append(
view_results["panel_orientation"]
)
results["sid"].append(view_results["sid"])
results["sad"].append(view_results["sad"])
results["isocenter"].append(view_results["isocenter"])
results["source"].append(view_results["source"])
results["panel"].append(view_results["panel"])
results["img_center"].append(view_results["img_center"])
results["err_init"].append(view_results["err_init"])
results["err_final"].append(view_results["err_final"])
return results
def calibrate_projection(
projection_file,
bbs_3d,
sad,
sid,
angle,
angle_offset=0,
img_dim=[1024, 768],
pixel_size=[0.388, 0.388],
search_area=7,
resolution_factor=1,
image_center=None,
drag_and_drop=True,
):
"""Calibration of a single projection.
:param projection_file: path to file
:type projection_file: str
:param bbs_3d: 3D coordinates of phantom's reference points
:type bbs_3d: numpy.array
:param sad: nominal source to isocenter (A) distance
:type sad: float
:param sid: nominal source to image distance
:type sid: float
:param angle: gantry angle for current projection
:type angle: float
:param angle_offset: angle offset for panel, defaults to 0
:type angle_offset: int, optional
:param img_dim: image dimensions in pixels, defaults to [1024, 768]
:type img_dim: list, optional
:param pixel_size: pixel dimensions in mm, defaults to [0.388, 0.388]
:type pixel_size: list, optional
:param search_area: dimension of reference point's searching area, defaults
to 7
:type search_area: int, optional
:param resolution_factor: resolution factor, when mode is "cbct" this
parameter equals to 1, in 2D mode is 2 (because resolution is doubled),
defaults to 1
:type resolution_factor: int, optional
:param image_center: [description], defaults to None
:type image_center: [type], optional
:param image_center: center of image, defaults to None
:type image_center: list, optional
:param drag_and_drop: whether or not perform Drag&Drop correction routines,
typically set to True for first projection. Defaults to True
:type drag_and_drop: bool, optional
:raises Exception: if less than 5 BBs centroids are recognized, optimizer
automatically fails since calibration can't be consider reliable
:return: dictionary with calibration results for current projection
:rtype: dict
"""
results = {}
if image_center is None: # in case image_center is not declared
image_center = [img_dim[1] / 2, img_dim[0] / 2]
isocenter = [0, 0, 0]
# panel orientation (from panel to brandis reference - rotation along y)
panel_orientation = np.array([0, deg2rad(angle), 0]) + np.array(
[0, deg2rad(angle_offset), 0]
)
# Load projection
if ".raw" in projection_file:
img = read_projection_raw(projection_file, img_dim)
elif ".hnc" in projection_file:
img = read_projection_hnc(projection_file, img_dim)
# Project points starting from extrinsic and intrinsic parameters
# generate proj_matrix (extrinsic and intrinsic parameters)
T = create_camera_matrix(panel_orientation, sid, sad, pixel_size, isocenter)
# projected coordinates of brandis on panel plane
r2d = project_camera_matrix(
bbs_3d, image_center, T, resolution_factor
) # 2d coordinates of reference points
grayscale_range = get_grayscale_range(img)
if drag_and_drop is True:
# Overlay reference bbs with projection
r2d_corrected = drag_and_drop_bbs(
projection_path=img,
bbs_projected=r2d,
grayscale_range=grayscale_range,
)
# Starting from the updated coordinates, define a search area around them
# and identify the bbs as black pixels inside these areas (brandis are used
# as probes)
if drag_and_drop is True:
bbs_centroid = search_bbs_centroids(
img=img,
ref_2d=r2d_corrected,
search_area=search_area,
dim_img=img_dim,
grayscale_range=grayscale_range,
)
else:
bbs_centroid = search_bbs_centroids(
img=img,
ref_2d=r2d,
search_area=search_area,
dim_img=img_dim,
grayscale_range=grayscale_range,
)
# Calibration - non linear data fitting optimization problem
index = np.where(~np.isnan(bbs_centroid[:, 0]))[0]
# Estimated BBs
bbs_estim_init = bbs_centroid[
~np.isnan(bbs_centroid).any(axis=1)
] # not consider if out of searching area
# Real Brandis BBs
bbs_real_init = bbs_3d[index, :]
# x0
parameters = np.append(panel_orientation, image_center).tolist()
parameters.append(sid)
parameters.append(sad)
# Boundaries
angle_limit = 0.05
sid_sad_limit = 1
low_bound = [
-angle_limit,
-np.pi,
-angle_limit,
0,
0,
sid - sid_sad_limit,
sad - sid_sad_limit,
]
up_bound = [
angle_limit,
np.pi,
angle_limit,
img_dim[1],
img_dim[0],
sid + sid_sad_limit,
sad + sid_sad_limit,
]
if index.shape[0] > 5: # at least 5 BBs
sol = least_squares(
fun=calibration_cost_function,
x0=parameters,
args=(bbs_real_init, pixel_size, bbs_estim_init, isocenter,),
method="trf",
bounds=(low_bound, up_bound)
# verbose=2,
)
sol = sol.x # Solution found
panel_orientation_new = np.array(sol[:3]) # New panel orientation
image_center_new = np.array(sol[3:5]) # New center of image
sid_new = sol[5]
sad_new = sol[6]
isocenter_new = isocenter
else:
raise Exception("Cannot properly process last projection. Please Retry")
# project based on calibration - use new panel orientation,
# tube and panel position
T = create_camera_matrix(
panel_orientation_new, sid_new, sad_new, pixel_size, isocenter_new
) # projected coordinates of brandis on panel plane
bbs_estim_final = project_camera_matrix(
bbs_3d, image_center_new, T
) # projected BBs (considering unknown)
# calculate improvement
err_init = bbs_estim_init - r2d[index, :] # estimated - projected
err_final = bbs_estim_init - bbs_estim_final[index, :]
err_init = np.mean(abs(err_init))
err_final = np.mean(abs(err_final))
# calculate new source/panel position
T_new = angle2rotm(
panel_orientation_new[0],
panel_orientation_new[1],
panel_orientation_new[2],
)
R_new = T_new[:3, :3]
source_new = (isocenter_new + (R_new * np.array([0, 0, sad_new])))[:, 2]
panel_new = (isocenter_new + (R_new * np.array([0, 0, sad_new - sid_new])))[
:, 2
]
# update with new value
results["proj_angle"] = angle
results["panel_orientation"] = panel_orientation_new
results["sid"] = sid_new
results["sad"] = sad_new
results["isocenter"] = isocenter_new
results["source"] = source_new
results["panel"] = panel_new
results["img_center"] = image_center_new
results["err_init"] = err_init
results["err_final"] = err_final
return results
def calibration_cost_function(param, bbs_3d, pixel_size, bbs_2d, isocenter):
"""Cost Function for calibration optimizers.
:param param: parameters to be optimized
:type param: list
:param bbs_3d: 3D coordinates of reference BBs
:type bbs_3d: numpy.array
:param pixel_size: pixel dimensions in mm
:type pixel_size: list
:param bbs_2d: 2D coordinates of BBs projected on the current image
:type bbs_2d: numpy.array
:param isocenter: coordinates of isocenter
:type isocenter: numpy.array
:return: cost function value to be minimized
:rtype: float
"""
# unknown
panel_orientation = np.array(param[:3])
img_center = np.array(param[3:5])
sid = np.array(param[5])
sad = np.array(param[6])
T = create_camera_matrix(
panel_orientation, sid, sad, pixel_size, isocenter
) # projected coordinates of brandis on panel plane
r2d = project_camera_matrix(
bbs_3d, img_center, T
) # projected bbs (considering unknown)
delta = r2d - bbs_2d # Error
diff = np.square(delta[:, 0]) + np.square(
delta[:, 1]
) # consider both directions
return diff
def plot_calibration_results(calib_results):
"""Plot source/panel position after calibration.
:param calib_results: dictionary containing results of a calibration
:type calib_results: dict
"""
source_pos = np.array(calib_results["source"])
panel_pos = np.array(calib_results["panel"])
isocenter = np.array(calib_results["isocenter"])
def on_key_pressed(event):
if event.key == "enter":
plt.close()
# Plot panel and source positions (trajectory)
fig = plt.figure(num="Source/Panel Position")
fig.canvas.mpl_connect("key_press_event", on_key_pressed)
ax = fig.add_subplot(111, projection="3d")
ax.scatter(
source_pos[:, 0],
source_pos[:, 1],
source_pos[:, 2],
marker=".",
c="g",
label="Source Position",
)
ax.scatter(
panel_pos[:, 0],
panel_pos[:, 1],
panel_pos[:, 2],
marker=".",
c="r",
label="Panel Position",
)
ax.scatter(
isocenter[0, 0],
isocenter[0, 1],
isocenter[0, 2],
marker=".",
c="b",
label="Isocenter Position",
)
plt.title("Panel/Source position after calibration\nPress Enter to close")
ax.set_xlabel("X Label [mm]")
ax.set_ylabel("Y Label [mm]")
ax.set_zlabel("Z Label [mm]")
fig.legend(loc="lower right")
plt.show()
def save_lut(path, calib_results, mode):
"""Save LUT file for a calibration.
:param path: path to .raw file directory, where LUT will be saved
:type path: str
:param calib_results: dictionary containing results for a calibration
:type calib_results: dict
:param calib_results: acquisition modality for calibration
:type calib_results: string
"""
angles = calib_results["proj_angles"]
panel_orientation = calib_results["panel_orientation"]
image_center = calib_results["img_center"]
sid = calib_results["sid"]
sad = calib_results["sad"]
clock = datetime.now()
if mode == "cbct":
filename = "CBCT_LUT_{}_{:02}_{:02}-{:02}_{:02}.txt".format(
clock.year, clock.month, clock.day, clock.hour, clock.minute,
)
elif mode == "2d":
filename = "2D_LUT_{}_{:02}_{:02}-{:02}_{:02}.txt".format(
clock.year, clock.month, clock.day, clock.hour, clock.minute,
)
output_file = os.path.join(path, filename)
with open(output_file, "w") as res_file:
res_file.write(f"#Look Up Table for {mode.upper()} reconstruction\n")
res_file.write(
"#Angle (deg) | Panel Orientation(rad) [X Y Z] | Image_center(pixel) X Y | SID(mm) | SAD(mm)\n"
)
res_file.write(
"#Date:{}_{}_{}_Time:{}_{}_{}.{}\n".format(
clock.year,
clock.month,
clock.day,
clock.hour,
clock.minute,
clock.second,
clock.microsecond,
)
)
res_file.write("#\n")
res_file.write("# --> END OF HEADER. FIXED SIZE: 5 lines. \n")
for k in range(len(angles)):
res_file.write(
"{:6.12f} {:6.12f} {:6.12f} {:6.12f} {:6.12f} {:6.12f} {:6.12f} {:6.12f}\n".format(
angles[k],
panel_orientation[k][0],
panel_orientation[k][1],
panel_orientation[k][2],
image_center[k][0],
image_center[k][1],
sid[k],
sad[k],
)
)
res_file.write(
r"# END OF FILE. REQUIRED TO ENSURE '\n' at the end of last calibration line. NO MORE LINES AFTER THIS!!!"
)
| mrossi93/geometric_calibration | geometric_calibration/geometric_calibration.py | geometric_calibration.py | py | 18,359 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "os.path.join",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "geometric_calibration.reader.read_img_label_file",
"line_number": 46,
"usage_type": "call"
},
{
"api_na... |
40091316327 | import torchvision
from torchvision import transforms
from torch.utils import data
def load_data_fashion_mnist(batch_size, resize=None, workers=4):
"""
自动下载FashionMNIST数据集,并返回数据加载器
"""
trans = [transforms.ToTensor()]
if resize:
trans.insert(0, transforms.Resize(resize))
trans = transforms.Compose(trans)
mnist_train = torchvision.datasets.FashionMNIST(
root="../data", train=True, transform=trans, download=True)
mnist_test = torchvision.datasets.FashionMNIST(
root="../data", train=False, transform=trans, download=True)
return (data.DataLoader(mnist_train, batch_size, shuffle=True,
num_workers=workers),
data.DataLoader(mnist_test, batch_size, shuffle=False,
num_workers=workers))
def get_fashion_mnist_labels(labels):
"""返回Fashion-MNIST数据集的文本标签
Defined in :numref:`sec_fashion_mnist`"""
text_labels = ['t-shirt', 'trouser', 'pullover', 'dress', 'coat',
'sandal', 'shirt', 'sneaker', 'bag', 'ankle boot']
return [text_labels[int(i)] for i in labels]
| huangluyao/simple_network | simple_network/dataset/load_data.py | load_data.py | py | 1,177 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "torchvision.transforms.ToTensor",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Resize",
"line_number": 11,
"usage_type": "call"
},
{
... |
15043765898 | from typing import List, Tuple
import pathlib
import copy
import numpy as np
import networkx as nx
from tqdm.auto import tqdm
from defdap import ebsd
from defdap.quat import Quat
from beta_reconstruction.crystal_relations import (
unq_hex_syms, hex_syms, unq_cub_syms, burg_trans
)
def calc_beta_oris(alpha_ori: Quat) -> List[Quat]:
"""Calculate the possible beta orientations for a given alpha orientation.
Uses the Burgers relation and crystal symmetries to calculate beta orientations.
Parameters
----------
alpha_ori
Orientation of an alpha grain
Returns
-------
list of Quat
List of possible beta orientations
"""
beta_oris = []
for sym in unq_hex_syms:
beta_oris.append(burg_trans * sym.conjugate * alpha_ori)
return beta_oris
def beta_oris_from_cub_sym(
alpha_ori: Quat,
unq_cub_sym_idx: int,
hex_sym_idx: int
) -> List[Quat]:
"""
Parameters
----------
alpha_ori
The orientation of the grain in the alpha phase.
unq_cub_sym_idx
hex_sym_idx
Returns
-------
list of Quat
Possible beta orientations from given symmetries
"""
if not (0 <= unq_cub_sym_idx <= 11):
raise ValueError("unq_cub_sym_idx must be between 0 and 11 inclusive")
if not (0 <= hex_sym_idx <= 11):
raise ValueError("hex_sym_idx must be between 0 and 11 inclusive")
beta_oris = []
beta_ori_base = hex_syms[hex_sym_idx].conjugate * alpha_ori
# all cases have one possible beta orientation
beta_oris.append(burg_trans * beta_ori_base)
if unq_cub_sym_idx == 9:
# two extra possible beta orientations
# B - unq_hex_syms[1] is C^+_3z:
beta_oris.append(
burg_trans * unq_hex_syms[1].conjugate * beta_ori_base
)
# C - unq_hex_syms[2] is C^+_6z:
beta_oris.append(
burg_trans * unq_hex_syms[2].conjugate * beta_ori_base
)
if unq_cub_sym_idx > 9:
# one extra possible beta orientations
# D - unq_hex_syms[4] is C'_22:
beta_oris.append(
burg_trans * unq_hex_syms[4].conjugate * beta_ori_base
)
return beta_oris
def calc_misori_of_variants(
alpha_ori_inv: Quat,
neighbour_ori: Quat,
unq_cub_sym_comps: np.ndarray
) -> Tuple[np.ndarray, np.ndarray]:
"""Calculate possible symmetry variants between two orientations.
Calculate all possible sym variants for misorientation between two
orientations undergoing a Burgers type transformation. Then calculate
the misorientation to the nearest cubic symmetry, this is the deviation
to a perfect Burgers transformation.
Parameters
----------
alpha_ori_inv
Inverse of first orientation
neighbour_ori
Second orientation
unq_cub_sym_comps
Components of the unique cubic symmetries
Returns
-------
min_misoris : np.ndarray
The minimum misorientation for each of the possible beta variants - shape (12, 12)
min_cub_sym_idx : np.ndarray
The minimum cubic symmetry index for each of the possible variants - shape (12, 12)
"""
# calculate all possible S^B_m (eqn 11. from [1]) from the
# measured misorientation from 2 neighbour alpha grains
# for each S^B_m calculate the 'closest' cubic symmetry
# (from reduced subset) and the deviation from this symmetry
# Vectorised calculation of:
# hex_sym[j].inv * ((neighbour_ori * alpha_ori_inv) * hex_sym[i])
# labelled: d = h2.inv * (c * h1)
hex_sym_comps = Quat.extract_quat_comps(hex_syms)
c = (neighbour_ori * alpha_ori_inv).quatCoef
h1 = np.repeat(hex_sym_comps, 12, axis=1) # outer loop
h2 = np.tile(hex_sym_comps, (1, 12)) # inner loop
d = np.zeros_like(h1)
c_dot_h1 = c[1]*h1[1] + c[2]*h1[2] + c[3]*h1[3]
c_dot_h2 = c[1]*h2[1] + c[2]*h2[2] + c[3]*h2[3]
h1_dot_h2 = h1[1]*h2[1] + h1[2]*h2[2] + h1[3]*h2[3]
d[0] = (c[0]*h1[0]*h2[0] - h2[0]*c_dot_h1 +
c[0]*h1_dot_h2 + h1[0]*c_dot_h2 +
h2[1] * (c[2]*h1[3] - c[3]*h1[2]) +
h2[2] * (c[3]*h1[1] - c[1]*h1[3]) +
h2[3] * (c[1]*h1[2] - c[2]*h1[1]))
d[1] = (c[0]*h2[0]*h1[1] + h1[0]*h2[0]*c[1] - c[0]*h1[0]*h2[1] +
c_dot_h1*h2[1] + c_dot_h2*h1[1] - h1_dot_h2*c[1] +
h2[0] * (c[2]*h1[3] - c[3]*h1[2]) +
c[0] * (h1[2]*h2[3] - h1[3]*h2[2]) +
h1[0] * (c[2]*h2[3] - c[3]*h2[2]))
d[2] = (c[0]*h2[0]*h1[2] + h1[0]*h2[0]*c[2] - c[0]*h1[0]*h2[2] +
c_dot_h1*h2[2] + c_dot_h2*h1[2] - h1_dot_h2*c[2] +
h2[0] * (c[3]*h1[1] - c[1]*h1[3]) +
c[0] * (h1[3]*h2[1] - h1[1]*h2[3]) +
h1[0] * (c[3]*h2[1] - c[1]*h2[3]))
d[3] = (c[0]*h2[0]*h1[3] + h1[0]*h2[0]*c[3] - c[0]*h1[0]*h2[3] +
c_dot_h1*h2[3] + c_dot_h2*h1[3] - h1_dot_h2*c[3] +
h2[0] * (c[1]*h1[2] - c[2] * h1[1]) +
c[0] * (h1[1]*h2[2] - h1[2] * h2[1]) +
h1[0] * (c[1]*h2[2] - c[2] * h2[1]))
# Vectorised calculation of:
# burg_trans * (d * burg_trans.inv)
# labelled: beta_vars = b * (c * b.inv)
b = burg_trans.quatCoef
beta_vars = np.zeros_like(h1)
b_dot_b = b[1]*b[1] + b[2]*b[2] + b[3]*b[3]
b_dot_d = b[1]*d[1] + b[2]*d[2] + b[3]*d[3]
beta_vars[0] = d[0] * (b[0]*b[0] + b_dot_b)
beta_vars[1] = (d[1] * (b[0]*b[0] - b_dot_b) + 2*b_dot_d*b[1] +
2*b[0] * (b[2]*d[3] - b[3]*d[2]))
beta_vars[2] = (d[2] * (b[0]*b[0] - b_dot_b) + 2*b_dot_d*b[2] +
2*b[0] * (b[3]*d[1] - b[1]*d[3]))
beta_vars[3] = (d[3] * (b[0]*b[0] - b_dot_b) + 2*b_dot_d*b[3] +
2*b[0] * (b[1]*d[2] - b[2]*d[1]))
# calculate misorientation to each of the cubic symmetries
misoris = np.einsum("ij,ik->jk", beta_vars, unq_cub_sym_comps)
misoris = np.abs(misoris)
misoris[misoris > 1] = 1.
misoris = 2 * np.arccos(misoris)
# find the cubic symmetry with minimum misorientation for each of
# the beta misorientation variants
min_cub_sym_idx = np.argmin(misoris, axis=1)
min_misoris = misoris[np.arange(144), min_cub_sym_idx]
# reshape to 12 x 12 for each of the hex sym multiplications
min_cub_sym_idx = min_cub_sym_idx.reshape((12, 12))
min_misoris = min_misoris.reshape((12, 12))
return min_misoris, min_cub_sym_idx
def calc_beta_oris_from_misori(
alpha_ori: Quat,
neighbour_oris: List[Quat],
burg_tol: float = 5
) -> Tuple[List[List[Quat]], List[float]]:
"""Calculate the possible beta orientations for a given alpha
orientation using the misorientation relation to neighbour orientations.
Parameters
----------
alpha_ori
A quaternion representing the alpha orientation
neighbour_oris
Quaternions representing neighbour grain orientations
burg_tol
The threshold misorientation angle to determine neighbour relations
Returns
-------
list of lists of defdap.Quat.quat
Possible beta orientations, grouped by each neighbour. Any
neighbour with deviation greater than the tolerance is excluded.
list of float
Deviations from perfect Burgers transformation
"""
burg_tol *= np.pi / 180.
# This needed to move further up calculation process
unq_cub_sym_comps = Quat.extract_quat_comps(unq_cub_syms)
alpha_ori_inv = alpha_ori.conjugate
beta_oris = []
beta_devs = []
for neighbour_ori in neighbour_oris:
min_misoris, min_cub_sym_idxs = calc_misori_of_variants(
alpha_ori_inv, neighbour_ori, unq_cub_sym_comps
)
# find the hex symmetries (i, j) from give the minimum
# deviation from the burgers relation for the minimum store:
# the deviation, the hex symmetries (i, j) and the cubic
# symmetry if the deviation is over a threshold then set
# cubic symmetry to -1
min_misori_idx = np.unravel_index(np.argmin(min_misoris),
min_misoris.shape)
burg_dev = min_misoris[min_misori_idx]
if burg_dev < burg_tol:
beta_oris.append(beta_oris_from_cub_sym(
alpha_ori, min_cub_sym_idxs[min_misori_idx], int(min_misori_idx[0])
))
beta_devs.append(burg_dev)
return beta_oris, beta_devs
def calc_beta_oris_from_boundary_misori(
grain: ebsd.Grain,
neighbour_network: nx.Graph,
quat_array: np.ndarray,
alpha_phase_id : int,
burg_tol: float = 5
) -> Tuple[List[List[Quat]], List[float], List[Quat]]:
"""Calculate the possible beta orientations for pairs of alpha and
neighbour orientations using the misorientation relation to neighbour
orientations.
Parameters
----------
grain
The grain currently being reconstructed
neighbour_network
A neighbour network mapping grain boundary connectivity
quat_array
Array of quaternions, representing the orientations of the pixels of the EBSD map
burg_tol :
The threshold misorientation angle to determine neighbour relations
Returns
-------
list of lists of defdap.Quat.quat
Possible beta orientations, grouped by each neighbour. Any
neighbour with deviation greater than the tolerance is excluded.
list of float
Deviations from perfect Burgers transformation
list of Quat
Alpha orientations
"""
# This needed to move further up calculation process
unq_cub_sym_comps = Quat.extract_quat_comps(unq_cub_syms)
beta_oris = []
beta_devs = []
alpha_oris = []
neighbour_grains = neighbour_network.neighbors(grain)
neighbour_grains = [grain for grain in neighbour_grains
if grain.phaseID == alpha_phase_id]
for neighbour_grain in neighbour_grains:
bseg = neighbour_network[grain][neighbour_grain]['boundary']
# check sense of bseg
if grain is bseg.grain1:
ipoint = 0
else:
ipoint = 1
for boundary_point_pair in bseg.boundaryPointPairsX:
point = boundary_point_pair[ipoint]
alpha_ori = quat_array[point[1], point[0]]
point = boundary_point_pair[ipoint - 1]
neighbour_ori = quat_array[point[1], point[0]]
min_misoris, min_cub_sym_idxs = calc_misori_of_variants(
alpha_ori.conjugate, neighbour_ori, unq_cub_sym_comps
)
# find the hex symmetries (i, j) from give the minimum
# deviation from the burgers relation for the minimum store:
# the deviation, the hex symmetries (i, j) and the cubic
# symmetry if the deviation is over a threshold then set
# cubic symmetry to -1
min_misori_idx = np.unravel_index(np.argmin(min_misoris),
min_misoris.shape)
burg_dev = min_misoris[min_misori_idx]
if burg_dev < burg_tol / 180 * np.pi:
beta_oris.append(beta_oris_from_cub_sym(
alpha_ori, min_cub_sym_idxs[min_misori_idx], int(min_misori_idx[0])
))
beta_devs.append(burg_dev)
alpha_oris.append(alpha_ori)
return beta_oris, beta_devs, alpha_oris
def count_beta_variants(
beta_oris: List[Quat],
possible_beta_oris: List[List[Quat]],
ori_tol: float
) -> np.ndarray:
"""
Parameters
----------
beta_oris
Possible beta orientations from burgers relation, there are always 6
possible_beta_oris
Possible beta orientations from misorientations between neighbouring grains
ori_tol
Tolerance for binning of the orientations into the possible 6
Returns
-------
list of int:
The variant count for the grain
"""
if not possible_beta_oris:
return np.zeros(6, dtype=int)
# divide 2 because of 2* in misorientation definition
ori_tol = np.cos(ori_tol / 2 * np.pi / 180.)
# flatten list of lists
possible_beta_oris = [item for sublist in possible_beta_oris for item in sublist]
misoris = np.empty((len(possible_beta_oris), 6))
for ori_index, ori in enumerate(possible_beta_oris):
for other_ori_index, other_ori in enumerate(beta_oris):
misoris[ori_index, other_ori_index] = ori.misOri(other_ori, "cubic")
# max is actually min because actual misorientation is arccos of this
max_misoris_idx = np.nanargmax(misoris, axis=1)
max_misoris = misoris[np.arange(len(possible_beta_oris)), max_misoris_idx]
variant_count, _ = np.histogram(max_misoris_idx[max_misoris > ori_tol],
range(0, 7))
return variant_count
def load_map(
ebsd_path: str,
min_grain_size: int = 3,
boundary_tolerance: int = 3,
use_kuwahara: bool = False,
kuwahara_tolerance: int = 5
) -> ebsd.Map:
"""Load in EBSD data and do the required prerequisite computations."""
ebsd_path = pathlib.Path(ebsd_path)
if ebsd_path.suffix == ".ctf":
map_type = "OxfordText"
elif ebsd_path.suffix == ".crc":
map_type = "OxfordBinary"
else:
raise TypeError("Unknown EBSD map type. Can only read .ctf and .crc files.")
ebsd_map = ebsd.Map(ebsd_path.with_suffix(""), dataType=map_type)
ebsd_map.buildQuatArray()
if use_kuwahara:
ebsd_map.filterData(misOriTol=kuwahara_tolerance)
ebsd_map.findBoundaries(boundDef=boundary_tolerance)
ebsd_map.findGrains(minGrainSize=min_grain_size)
ebsd_map.calcGrainAvOris()
ebsd_map.buildNeighbourNetwork()
return ebsd_map
def modal_variant(alpha_grains: List[ebsd.Grain]) -> np.ndarray:
"""Given a map of grains with variant counts, assign the prior beta
orientation of the grains to the variant with the highest count.
Parameters
----------
alpha_grains
Grains to calculate modal variant
Returns
-------
Array of the modal variants
"""
modal_variants = np.empty(len(alpha_grains), dtype=np.int8)
for i, grain in enumerate(alpha_grains):
variant_count = grain.variant_count
mode_variant = np.where(variant_count == np.max(variant_count))[0]
if len(mode_variant) == 1:
mode_variant = mode_variant[0]
else:
# multiple variants with same max
mode_variant = -1
modal_variants[i] = mode_variant
return modal_variants
def assign_beta_variants(
ebsd_map: ebsd.Map,
mode: str = "modal",
alpha_phase_id: int = 0
):
"""Given a map of grains with variant counts, determine the prior
beta orientation of the grains.
Parameters
----------
ebsd_map:
EBSD map to assign the beta variants for.
mode
How to perform beta orientation assignment
'modal': The beta orientation is assigned to the variant
with the highest count.
alpha_phase_id
Index of the alpha phase in the EBSD map.
"""
alpha_grains = [grain for grain in ebsd_map
if grain.phaseID == alpha_phase_id]
if mode == "modal":
assigned_variants = modal_variant(alpha_grains)
else:
raise NotImplementedError(f"Mode '{mode}' is not a recognised "
f"way to assign variants.")
for grain, assigned_variant in zip(alpha_grains, assigned_variants):
if assigned_variant >= 0:
parent_beta_ori = grain.beta_oris[assigned_variant]
else:
parent_beta_ori = None
grain.assigned_variant = assigned_variant
grain.parent_beta_ori = parent_beta_ori
print("Assignment of beta variants complete.")
def construct_variant_map(
ebsd_map: ebsd.Map,
alpha_phase_id: int = 0
) -> np.ndarray:
alpha_grains = (grain for grain in ebsd_map
if grain.phaseID == alpha_phase_id)
all_lists = ((grain.grainID, grain.assigned_variant) for grain in alpha_grains)
grain_ids, assigned_variants = zip(*all_lists)
# points not part of a grain or other phases (-2) and
# those that were not reconstructed (-1)
return ebsd_map.grainDataToMapData(
assigned_variants, grainIds=grain_ids, bg=-2
)
def construct_beta_quat_array(
ebsd_map: ebsd.Map,
alpha_phase_id: int = 0,
variant_map: np.ndarray = None,
) -> np.ndarray:
"""Construct
Parameters
----------
ebsd_map:
EBSD map to assign the beta variants for.
alpha_phase_id
Index of the alpha phase in the EBSD map.
"""
if variant_map is None:
variant_map = construct_variant_map(ebsd_map, alpha_phase_id)
transformations = []
for sym in unq_hex_syms:
transformations.append(burg_trans * sym.conjugate)
trans_comps = Quat.extract_quat_comps(transformations)
trans_comps = trans_comps[:, variant_map[variant_map >= 0]]
quat_comps = Quat.extract_quat_comps(ebsd_map.quatArray[variant_map >= 0])
quat_comps_beta = np.empty_like(quat_comps)
# transformations[variant] * quat
quat_comps_beta[0, :] = (trans_comps[0, :] * quat_comps[0, :]
- trans_comps[1, :] * quat_comps[1, :]
- trans_comps[2, :] * quat_comps[2, :]
- trans_comps[3, :] * quat_comps[3, :])
quat_comps_beta[1, :] = (trans_comps[1, :] * quat_comps[0, :]
+ trans_comps[0, :] * quat_comps[1, :]
- trans_comps[3, :] * quat_comps[2, :]
+ trans_comps[2, :] * quat_comps[3, :])
quat_comps_beta[2, :] = (trans_comps[2, :] * quat_comps[0, :]
+ trans_comps[0, :] * quat_comps[2, :]
- trans_comps[1, :] * quat_comps[3, :]
+ trans_comps[3, :] * quat_comps[1, :])
quat_comps_beta[3, :] = (trans_comps[3, :] * quat_comps[0, :]
+ trans_comps[0, :] * quat_comps[3, :]
- trans_comps[2, :] * quat_comps[1, :]
+ trans_comps[1, :] * quat_comps[2, :])
# swap into positive hemisphere if required
quat_comps_beta[:, quat_comps_beta[0, :] < 0] *= -1
beta_quat_array = np.empty_like(ebsd_map.quatArray)
beta_quat_array[variant_map < 0] = Quat(1, 0, 0, 0)
for i, idx in enumerate(zip(*np.where(variant_map >= 0))):
beta_quat_array[idx] = Quat(quat_comps_beta[:, i])
return beta_quat_array
def create_beta_ebsd_map(
ebsd_map: ebsd.Map,
mode: str = 'only_beta',
beta_quat_array: np.ndarray = None,
variant_map: np.array = None,
alpha_phase_id: int = 0,
beta_phase_id: int = 1,
) -> ebsd.Map:
"""
Parameters
----------
ebsd_map
mode
How to copy data from the input map
'alone': Only include the reconstructed beta
'append': Append reconstructed beta to present beta phase
'add': Create a new phase for reconstructed beta
beta_quat_array
variant_map
alpha_phase_id
beta_phase_id
"""
if variant_map is None:
variant_map = construct_variant_map(
ebsd_map, alpha_phase_id=alpha_phase_id
)
if beta_quat_array is None:
beta_quat_array = construct_beta_quat_array(
ebsd_map, variant_map=variant_map
)
if mode == 'alone':
# Create map with only the reconstructed beta
new_phase = copy.copy(ebsd_map.phases[beta_phase_id])
new_phase.name += " (recon)"
phases = [new_phase]
out_phase_array = np.zeros_like(ebsd_map.phaseArray)
out_phase_array[variant_map >= 0] = 1
out_quat_array = beta_quat_array
elif mode == 'append':
# Append reconstructed beta to original beta phase
phases = copy.copy(ebsd_map.phases)
out_phase_array = np.copy(ebsd_map.phaseArray)
out_phase_array[variant_map >= 0] = beta_phase_id + 1
out_quat_array = np.where(variant_map >= 0, beta_quat_array,
ebsd_map.quatArray)
elif mode == 'add':
# Create addition phase for the reconstructed beta
phases = copy.copy(ebsd_map.phases)
new_phase = copy.copy(ebsd_map.phases[beta_phase_id])
new_phase.name += " (recon)"
phases.append(new_phase)
out_phase_array = np.copy(ebsd_map.phaseArray)
out_phase_array[variant_map >= 0] = ebsd_map.numPhases + 1
out_quat_array = np.where(variant_map >= 0, beta_quat_array,
ebsd_map.quatArray)
else:
raise ValueError(f"Unknown beta map construction mode '{mode}'")
out_euler_array = np.zeros((3,) + ebsd_map.shape)
for i in range(ebsd_map.yDim):
for j in range(ebsd_map.xDim):
out_euler_array[:, i, j] = out_quat_array[i, j].eulerAngles()
beta_ebsd_data = {
'stepSize': ebsd_map.stepSize,
'phases': phases,
'phase': out_phase_array,
'eulerAngle': out_euler_array,
'bandContrast': ebsd_map.bandContrastArray
}
# TODO: Change so quats can be loaded instead of going via Euler angles
beta_map = ebsd.Map(beta_ebsd_data, dataType="PythonDict")
beta_map.quatArray = out_quat_array
return beta_map
def do_reconstruction(
ebsd_map: ebsd.Map,
mode: str = 'average',
burg_tol: float = 5,
ori_tol: float = 3,
alpha_phase_id: int = 0,
beta_phase_id: int = 1
):
"""Apply beta reconstruction to a ebsd map object.
The reconstructed beta map is stored directly in the ebsd map (this
should probably change)
Parameters
----------
ebsd_map:
EBSD map to apply reconstruction to
mode
How to perform reconstruction
'average': grain average orientations
'boundary': grain boundary orientations
'beta': retained beta
burg_tol
Maximum deviation from the Burgers relation to allow (degrees)
ori_tol: float
Maximum deviation from a beta orientation (degrees)
alpha_phase_id: int
Index of the alpha phase in the EBSD map.
beta_phase_id: int
Index of the beta phase in the EBSD map.
"""
# this is the only function that interacts with the ebsd map/grain objects
alpha_grains = [grain for grain in ebsd_map
if grain.phaseID == alpha_phase_id]
first = True
for grain in tqdm(alpha_grains):
beta_oris = calc_beta_oris(grain.refOri)
variant_count = np.zeros(6, dtype=int)
if mode == 'boundary':
if first:
print("Using boundary mode.")
first = False
possible_beta_oris, beta_deviations, alpha_oris = \
calc_beta_oris_from_boundary_misori(
grain, ebsd_map.neighbourNetwork, ebsd_map.quatArray,
alpha_phase_id, burg_tol=burg_tol
)
for possible_beta_ori, beta_deviation, alpha_ori in zip(
possible_beta_oris, beta_deviations, alpha_oris):
beta_oris_l = calc_beta_oris(alpha_ori)
variant_count += count_beta_variants(
beta_oris_l, [possible_beta_ori], ori_tol
)
elif mode == 'beta':
if first:
print("Using beta mode.")
first = False
neighbour_grains = ebsd_map.neighbourNetwork.neighbors(grain)
neighbour_oris = [[grain.refOri] for grain in neighbour_grains
if grain.phaseID == beta_phase_id]
possible_beta_oris = neighbour_oris
beta_deviations = [0.] * len(neighbour_oris)
variant_count += count_beta_variants(
beta_oris, possible_beta_oris, ori_tol
)
elif mode == 'average':
if first:
print("Using average mode.")
first = False
neighbour_grains = ebsd_map.neighbourNetwork.neighbors(grain)
neighbour_oris = [grain.refOri for grain in neighbour_grains
if grain.phaseID == alpha_phase_id]
# determine the possible beta orientations based on misorientation
# between neighbouring alpha grains
possible_beta_oris, beta_deviations = calc_beta_oris_from_misori(
grain.refOri, neighbour_oris, burg_tol=burg_tol
)
variant_count += count_beta_variants(
beta_oris, possible_beta_oris, ori_tol
)
else:
raise ValueError(f"Unknown reconstruction mode '{mode}'")
# save results in the grain objects
grain.beta_oris = beta_oris
grain.possible_beta_oris = possible_beta_oris
grain.beta_deviations = beta_deviations
grain.variant_count = variant_count
| LightForm-group/beta-reconstruction | beta_reconstruction/reconstruction.py | reconstruction.py | py | 25,051 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "defdap.quat.Quat",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "beta_reconstruction.crystal_relations.unq_hex_syms",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "beta_reconstruction.crystal_relations.burg_trans",
"line_number": 34,
... |
26777282377 | import math
import torch
import numpy as np
from torch import autograd, optim
from torch.distributions.multivariate_normal import MultivariateNormal
import scipy.optimize
from tqdm import tqdm
def get_schedule(num, rad=4):
if num == 1:
return np.array([0.0, 1.0])
t = np.linspace(-rad, rad, num)
s = 1.0 / (1.0 + np.exp(-t))
return (s - np.min(s)) / (np.max(s) - np.min(s))
def differentiable_annealed_importance_sampling(s:torch.Tensor, log_likelihood, log_q, n_steps,
step_size, partial=False, gamma=0.9, mass_matrix=None, clip_grad=None,
lrates=None, betas=None, block_grad=False, is_train=True, rng:np.random.RandomState=None):
"""
s: partcle state: n_particles x d
"""
n_particles = s.shape[:-1]
dim = s.shape[-1]
if n_steps == 0:
return - log_q(s) + log_likelihood(s), s
# if lrates is None:
# lrates = step_size * torch.ones(n_steps+1, device=s.device)
if (type(step_size) is torch.Tensor or type(step_size) is np.ndarray) and len(step_size.shape) > 0:
assert step_size.shape == s.shape[:-1]
step_size = step_size[..., None]
if betas is None:
# betas = torch.linspace(1.0/n_steps, 1.0, n_steps, device=s.device)
betas = get_schedule(n_steps+1)
if mass_matrix is None:
mass_matrix = torch.eye(dim, device=s.device)
pi_mean = torch.zeros(dim, device=s.device)
pi = MultivariateNormal(pi_mean, mass_matrix)
inverse_mass_matrix = torch.inverse(mass_matrix)
# s.requires_grad = True
def log_annealed_prob(beta, s: torch.Tensor):
return (1 - beta) * log_q(s) + beta * log_likelihood(s)
def grad_log_annealed_prob(beta, s):
''' it's important to set create_graph=True '''
with torch.enable_grad():
s.requires_grad_()
grad = autograd.grad(log_annealed_prob(beta, s).sum(), s, create_graph=is_train)[0]
if clip_grad is not None:
max_ = torch.prod(torch.tensor(s.shape)) * clip_grad # last dimension of mu_z
grad = torch.clamp(grad, -max_, max_)
return grad
# sample initial momentum
def pi_sample(n_particles):
if rng is None:
eps = pi.sample(n_particles)
else:
eps = torch.tensor(rng.multivariate_normal(pi_mean, mass_matrix, n_particles),
dtype=s.dtype, device=s.device)
return eps
v = pi_sample(n_particles)
with torch.set_grad_enabled(is_train):
elbo = - log_q(s)
for k in range(1, n_steps+1):
assert not torch.any(s.isnan()), "Current state has nan values"
elbo = elbo - pi.log_prob(v)
# leapfrog
s = s + step_size / 2 * v @ inverse_mass_matrix
v = v + step_size * grad_log_annealed_prob(betas[k], s)
s = s + step_size / 2 * v @ inverse_mass_matrix
elbo = elbo + pi.log_prob(v)
if partial:
# partial_refreshment
v = gamma * v + math.sqrt(1 - math.pow(gamma, 2)) * pi_sample(n_particles)
else:
v = pi_sample(n_particles)
elbo = elbo + log_likelihood(s)
return elbo, s
def leapfrogs_and_bounds_optlr(s, log_likelihood, log_q, n_steps,
step_size, partial=False, gamma=0.9, mass_matrix=None):
init_log_lrates = math.log(step_size) * np.ones(n_steps)
bounds = scipy.optimize.Bounds(-np.infty, -1.0)
# log_lrates = torch.tensor(llrates_np, dtype=s.dtype, device=s.device, requires_grad=True)
def func_fn(log_lrates):
log_lrates = torch.tensor(log_lrates, dtype=s.dtype, device=s.device, requires_grad=True)
elbo, _ = differentiable_annealed_importance_sampling(s, log_likelihood, log_q, n_steps,
step_size, partial, gamma, mass_matrix, lrates=torch.exp(log_lrates))
return -elbo.sum().data.cpu().numpy()
def grad_fn(log_lrates):
log_lrates = torch.tensor(log_lrates, dtype=s.dtype, device=s.device, requires_grad=True)
elbo, _ = differentiable_annealed_importance_sampling(s, log_likelihood, log_q, n_steps,
step_size, partial, gamma, mass_matrix, lrates=torch.exp(log_lrates))
loss = -elbo.sum()
return autograd.grad(loss, log_lrates)[0].data.cpu().numpy().astype(np.float64)
res = scipy.optimize.minimize(func_fn, init_log_lrates, jac=grad_fn, bounds=bounds)
log_lrates = torch.tensor(res.x, dtype=s.dtype, device=s.device)
return differentiable_annealed_importance_sampling(s, log_likelihood, log_q, n_steps,
step_size, partial, gamma, mass_matrix, lrates=torch.exp(log_lrates))
| Lucas-Florin/dais_np | src/neural_process/dais.py | dais.py | py | 4,698 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.array",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.min",
"line_number": 1... |
4247894925 | from datetime import timedelta
from airflow import DAG
from airflow.models import Param
from airflow.operators.dummy_operator import DummyOperator
from common.operators.gce import (
StartGCEOperator,
StopGCEOperator,
CloneRepositoryGCEOperator,
SSHGCEOperator,
)
from common.utils import get_airflow_schedule
from airflow.providers.google.cloud.transfers.bigquery_to_gcs import (
BigQueryToGCSOperator,
)
from airflow.providers.google.cloud.operators.bigquery import (
BigQueryExecuteQueryOperator,
)
from airflow.providers.slack.operators.slack_webhook import SlackWebhookOperator
from common import macros
from common.alerts import task_fail_slack_alert
from common.config import (
DAG_FOLDER,
ENV_SHORT_NAME,
MLFLOW_BUCKET_NAME,
SLACK_CONN_ID,
SLACK_CONN_PASSWORD,
MLFLOW_URL,
BIGQUERY_RAW_DATASET,
BIGQUERY_TMP_DATASET,
)
from dependencies.ml.utils import create_algo_training_slack_block
from datetime import datetime
from jobs.ml.constants import IMPORT_TRAINING_SQL_PATH
DATE = "{{ ts_nodash }}"
# Environment variables to export before running commands
dag_config = {
"STORAGE_PATH": f"gs://{MLFLOW_BUCKET_NAME}/algo_training_{ENV_SHORT_NAME}/algo_training_two_towers_{DATE}",
"BASE_DIR": "data-gcp/jobs/ml_jobs/algo_training",
"MODEL_DIR": "two_towers_model",
"TRAIN_DIR": "/home/airflow/train",
"EXPERIMENT_NAME": f"algo_training_qpi_v1.1_{ENV_SHORT_NAME}",
}
# Params
train_params = {
"config_file_name": "user-qpi-features",
"batch_size": 8192,
"validation_steps_ratio": 0.1 if ENV_SHORT_NAME == "prod" else 0.4,
"embedding_size": 64,
"train_set_size": 0.95 if ENV_SHORT_NAME == "prod" else 0.8,
"event_day_number": {"prod": 14, "dev": 365, "stg": 14}[ENV_SHORT_NAME],
}
gce_params = {
"instance_name": f"algo-training-qpi-{ENV_SHORT_NAME}",
"instance_type": {
"dev": "n1-standard-2",
"stg": "n1-standard-8",
"prod": "n1-standard-32",
},
}
schedule_dict = {"prod": "0 12 * * 3", "dev": None, "stg": "0 12 * * 2"}
default_args = {
"start_date": datetime(2023, 3, 17),
"on_failure_callback": task_fail_slack_alert,
"retries": 0,
"retry_delay": timedelta(minutes=2),
}
with DAG(
"algo_training_two_towers_qpi",
default_args=default_args,
description="Custom training job",
schedule_interval=get_airflow_schedule(schedule_dict[ENV_SHORT_NAME]),
catchup=False,
dagrun_timeout=timedelta(minutes=1440),
user_defined_macros=macros.default,
template_searchpath=DAG_FOLDER,
params={
"branch": Param(
default="production" if ENV_SHORT_NAME == "prod" else "master",
type="string",
),
"config_file_name": Param(
default=train_params["config_file_name"],
type="string",
),
"batch_size": Param(
default=str(train_params["batch_size"]),
type="string",
),
"validation_steps_ratio": Param(
default=str(train_params["validation_steps_ratio"]),
type="string",
),
"embedding_size": Param(
default=str(train_params["embedding_size"]),
type="string",
),
"train_set_size": Param(
default=str(train_params["train_set_size"]),
type="string",
),
"event_day_number": Param(
default=str(train_params["event_day_number"]),
type="string",
),
"input_type": Param(
default="enriched_clicks",
type="string",
),
"instance_type": Param(
default=gce_params["instance_type"][ENV_SHORT_NAME],
type="string",
),
"instance_name": Param(
default=gce_params["instance_name"]
+ "-"
+ train_params["config_file_name"],
type="string",
),
"run_name": Param(
default=train_params["config_file_name"], type=["string", "null"]
),
},
) as dag:
start = DummyOperator(task_id="start", dag=dag)
import_tables = {}
for table in [
"recommendation_user_features",
"recommendation_item_features",
"training_data_enriched_clicks",
]:
import_tables[table] = BigQueryExecuteQueryOperator(
task_id=f"import_{table}",
sql=(IMPORT_TRAINING_SQL_PATH / f"{table}.sql").as_posix(),
write_disposition="WRITE_TRUNCATE",
use_legacy_sql=False,
destination_dataset_table=f"{BIGQUERY_RAW_DATASET}.{table}",
dag=dag,
)
# The params.input_type tells the .sql files which table to take as input
for dataset in ["training", "validation", "test"]:
task = BigQueryExecuteQueryOperator(
task_id=f"import_tmp_{dataset}_table",
sql=(
IMPORT_TRAINING_SQL_PATH / f"recommendation_{dataset}_data.sql"
).as_posix(),
write_disposition="WRITE_TRUNCATE",
use_legacy_sql=False,
destination_dataset_table=f"{BIGQUERY_TMP_DATASET}.{DATE}_recommendation_{dataset}_data",
dag=dag,
)
import_tables[dataset] = task
gce_instance_start = StartGCEOperator(
task_id="gce_start_task",
preemptible=False,
instance_name="{{ params.instance_name }}",
instance_type="{{ params.instance_type }}",
accelerator_types=[{"name": "nvidia-tesla-t4", "count": 1}],
retries=2,
labels={"job_type": "ml"},
)
fetch_code = CloneRepositoryGCEOperator(
task_id="fetch_code",
instance_name="{{ params.instance_name }}",
python_version="3.10",
command="{{ params.branch }}",
retries=2,
)
install_dependencies = SSHGCEOperator(
task_id="install_dependencies",
instance_name="{{ params.instance_name }}",
base_dir=dag_config["BASE_DIR"],
command="pip install -r requirements.txt --user",
dag=dag,
)
store_data = {}
for split in ["training", "validation", "test"]:
store_data[split] = BigQueryToGCSOperator(
task_id=f"store_{split}_data",
source_project_dataset_table=f"{BIGQUERY_TMP_DATASET}.{DATE}_recommendation_{split}_data",
destination_cloud_storage_uris=f"{dag_config['STORAGE_PATH']}/raw_recommendation_{split}_data/data-*.parquet",
export_format="PARQUET",
dag=dag,
)
store_data["bookings"] = BigQueryToGCSOperator(
task_id=f"store_bookings_data",
source_project_dataset_table=f"{BIGQUERY_RAW_DATASET}.training_data_bookings",
destination_cloud_storage_uris=f"{dag_config['STORAGE_PATH']}/bookings/data-*.parquet",
export_format="PARQUET",
dag=dag,
)
preprocess_data = {}
for split in ["training", "validation", "test"]:
preprocess_data[split] = SSHGCEOperator(
task_id=f"preprocess_{split}",
instance_name="{{ params.instance_name }}",
base_dir=dag_config["BASE_DIR"],
environment=dag_config,
command=f"PYTHONPATH=. python {dag_config['MODEL_DIR']}/preprocess.py "
"--config-file-name {{ params.config_file_name }} "
f"--input-dataframe-file-name raw_recommendation_{split}_data "
f"--output-dataframe-file-name recommendation_{split}_data",
dag=dag,
)
train = SSHGCEOperator(
task_id="train",
instance_name="{{ params.instance_name }}",
base_dir=dag_config["BASE_DIR"],
environment=dag_config,
command=f"PYTHONPATH=. python {dag_config['MODEL_DIR']}/train.py "
"--config-file-name {{ params.config_file_name }} "
f"--experiment-name {dag_config['EXPERIMENT_NAME']} "
"--batch-size {{ params.batch_size }} "
"--validation-steps-ratio {{ params.validation_steps_ratio }} "
"--embedding-size {{ params.embedding_size }} "
"--seed {{ ds_nodash }} "
"--run-name {{ params.run_name }}",
dag=dag,
)
evaluate = SSHGCEOperator(
task_id="evaluate",
instance_name="{{ params.instance_name }}",
base_dir=dag_config["BASE_DIR"],
environment=dag_config,
command=f"PYTHONPATH=. python evaluate.py "
f"--experiment-name {dag_config['EXPERIMENT_NAME']} "
"--config-file-name {{ params.config_file_name }}",
dag=dag,
)
gce_instance_stop = StopGCEOperator(
task_id="gce_stop_task", instance_name="{{ params.instance_name }}"
)
send_slack_notif_success = SlackWebhookOperator(
task_id="send_slack_notif_success",
http_conn_id=SLACK_CONN_ID,
webhook_token=SLACK_CONN_PASSWORD,
blocks=create_algo_training_slack_block(
dag_config["EXPERIMENT_NAME"], MLFLOW_URL, ENV_SHORT_NAME
),
username=f"Algo trainer robot - {ENV_SHORT_NAME}",
icon_emoji=":robot_face:",
)
(
start
>> [
import_tables["recommendation_user_features"],
import_tables["recommendation_item_features"],
]
>> import_tables["training_data_enriched_clicks"]
>> import_tables["training"]
>> [import_tables["validation"], import_tables["test"]]
>> gce_instance_start
>> fetch_code
>> install_dependencies
>> [
store_data["training"],
store_data["validation"],
store_data["test"],
store_data["bookings"],
]
>> preprocess_data["training"]
>> preprocess_data["validation"]
>> preprocess_data["test"]
>> train
>> evaluate
>> gce_instance_stop
>> send_slack_notif_success
)
| pass-culture/data-gcp | orchestration/dags/jobs/ml/algo_training_qpi.py | algo_training_qpi.py | py | 9,855 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "common.config.MLFLOW_BUCKET_NAME",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "common.config.ENV_SHORT_NAME",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "common.config.ENV_SHORT_NAME",
"line_number": 47,
"usage_type": "name"
},... |
6993276560 | import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
T = 200
h = 1e-2
t = np.arange(start=0, stop=T + h, step=h)
bet, gam = 0.15, 1 / 50
# todo: zmienic poziej na randoma
# S_pocz = np.random.uniform(0.7, 1)
S_start = 0.8
I_start = 1 - S_start
R_start = 0
N = S_start + I_start + R_start # is const
# using odeint
# ---------------------------------------------------------------------------------------------------------------------#
def two_diff_ode_equation(state, t, bet, gam):
S, I = state
return [- bet * I * S / N, bet * I * S / N - gam * I]
def one_diff_equation_ode(state, t, bet, gam):
S = state[0]
C = I_start - gam / bet * np.log(S_start) + S_start # C - const
return [(-bet / N * S * (gam / bet * np.log(S) - S + C))]
def calc_R(S_arr, I_arr):
R_arr = np.zeros(len(t))
for i in range(len(R_arr)):
R_arr[i] = N - S_arr[i] - I_arr[i]
return R_arr
def calc_I(S_arr):
C = I_start - gam / bet * np.log(S_start) + S_start # C - const
I_arr = np.zeros(len(t))
for i in range(len(I_arr)):
I_arr[i] = gam / bet * np.log(S_arr[i]) - S_arr[i] + C
return I_arr
def two_equation_ode_plot(t, sym, labelt='$t$', labels=['S', 'I', 'R']):
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(10, 4))
# plot drawing (S, I)
for i in range(len(labels) - 1):
ax.plot(t, sym[:, i], label=labels[i])
# plot drawing (R)
ax.plot(t, calc_R(sym[:, 0], sym[:, 1]), label=labels[2])
ax.set_xlabel(labelt, fontsize=14)
ax.set_ylabel('stan', fontsize=14)
ax.set_ylim([0, 1])
ax.legend()
plt.show()
def one_equation_ode_plot(t, sym, labelt='$t$', labels=['S', 'I', 'R']):
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(10, 4))
# plot drawing (S)
ax.plot(t, sym[:, 0], label=labels[0])
# plot drawing (I)
I_arr = calc_I(sym[:, 0])
ax.plot(t, I_arr, label=labels[2])
# plot drawing (R)
ax.plot(t, calc_R(sym[:, 0], I_arr), label=labels[2])
ax.set_xlabel(labelt, fontsize=14)
ax.set_ylabel('stan', fontsize=14)
ax.set_ylim([0, 1])
ax.legend()
plt.show()
def two_equation_ode_main():
start_state = S_start, I_start
sym = odeint(two_diff_ode_equation, start_state, t, args=(bet, gam))
two_equation_ode_plot(t, sym, labels=['S', 'I', 'R'])
def one_equation_ode_main():
start_state = S_start
sym = odeint(one_diff_equation_ode, start_state, t, args=(bet, gam))
one_equation_ode_plot(t, sym, labels=['S', 'I', 'R'])
# using manual
# ---------------------------------------------------------------------------------------------------------------------#
S = np.zeros(len(t))
S[0] = S_start
I = np.zeros(len(t))
I[0] = I_start
R = np.zeros(len(t))
R[0] = R_start
def two_diff_equation_manual():
for i in range(t.size - 1):
S[i + 1] = S[i] + h * (- bet * I[i] * S[i] / N)
I[i + 1] = I[i] + h * (bet * I[i] * S[i + 1] / N - gam * I[i])
R[i + 1] = N - S[i + 1] - I[i + 1]
def one_diff_equation_manual():
C = I_start - gam / bet * np.log(S_start) + S_start # C - const
for i in range(t.size - 1):
S[i + 1] = S[i] + h * (-bet / N * S[i] * (gam / bet * np.log(S[i]) - S[i] + C))
I[i + 1] = gam / bet * np.log(S[i + 1]) - S[i + 1] + C
R[i + 1] = N - S[i + 1] - I[i + 1]
def equation_man_plot(t, sirList, labelt='$t$', labels=['S', 'I', 'R']):
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(10, 4))
# plot drawing (R, S, I)
for i in range(len(sirList)):
ax.plot(t, sirList[i], label=labels[i])
ax.set_xlabel(labelt, fontsize=14)
ax.set_ylabel('stan', fontsize=14)
ax.set_ylim([0, 1])
ax.legend()
plt.show()
def two_equation_man_main():
two_diff_equation_manual()
equation_man_plot(t, [S, I, R], labels=['S', 'I', 'R'])
def one_equation_man_main():
one_diff_equation_manual()
equation_man_plot(t, [S, I, R], labels=['S', 'I', 'R'])
if __name__ == "__main__":
# one_equation_ode_main()
# one_equation_man_main()
# two_equation_ode_main()
two_equation_man_main()
exit(0)
| Ukasz09/Machine-learning | SIR_Model_Spread_of_Disease/SIR.py | SIR.py | py | 4,130 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "numpy.arange",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.log",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.log",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 32,
... |
9191488071 | import numpy as np
import os
import argparse
#import torch
#import torch.nn as nn
from sklearn.linear_model import LinearRegression
import math
import random
from utils import *
'''
def quantize(data,pred,error_bound):
radius=32768
diff = data - pred
quant_index = (int) (abs(diff)/ error_bound) + 1
#print(quant_index)
if (quant_index < radius * 2) :
quant_index =quant_index>> 1
half_index = quant_index
quant_index =quant_index<< 1
#print(quant_index)
quant_index_shifted=0
if (diff < 0) :
quant_index = -quant_index
quant_index_shifted = radius - half_index
else :
quant_index_shifted = radius + half_index
decompressed_data = pred + quant_index * error_bound
#print(decompressed_data)
if abs(decompressed_data - data) > error_bound :
#print("b")
return 0,data
else:
#print("c")
data = decompressed_data
return quant_index_shifted,data
else:
#print("a")
return 0,data
'''
parser = argparse.ArgumentParser()
parser.add_argument('--error','-e',type=float,default=1e-3)
parser.add_argument('--input','-i',type=str)
parser.add_argument('--output','-o',type=str)
parser.add_argument('--quant','-q',type=str,default="ml2_q.dat")
parser.add_argument('--unpred','-u',type=str,default="ml2_u.dat")
parser.add_argument('--max_step','-s',type=int,default=-1)
parser.add_argument('--min_coeff_level','-cl',type=int,default=99)
parser.add_argument('--rate','-r',type=float,default=1.0)
parser.add_argument('--rlist',type=float,default=0.0,nargs="+")
parser.add_argument('--maximum_rate','-m',type=float,default=10.0)
parser.add_argument('--cubic','-c',type=int,default=1)
parser.add_argument('--multidim','-d',type=int,default=1)
parser.add_argument('--lorenzo_fallback_check','-l',type=int,default=-1)
parser.add_argument('--fallback_sample_ratio','-f',type=float,default=0.01)
#parser.add_argument('--level_rate','-lr',type=float,default=1.0)
parser.add_argument('--anchor_rate','-a',type=float,default=0.0)
parser.add_argument('--size_x','-x',type=int,default=1800)
parser.add_argument('--size_y','-y',type=int,default=3600)
#parser.add_argument('--level','-l',type=int,default=2)
#parser.add_argument('--noise','-n',type=bool,default=False)
#parser.add_argument('--intercept','-t',type=bool,default=False)
args = parser.parse_args()
size_x=args.size_x
size_y=args.size_y
array=np.fromfile(args.input,dtype=np.float32).reshape((size_x,size_y))
if args.lorenzo_fallback_check>=0:
orig_array=np.copy(array)
rng=(np.max(array)-np.min(array))
error_bound=args.error*rng
max_step=args.max_step
rate=args.rate
if args.rlist!=0:
max_level=int(math.log(max_step,2))
rate_list=args.rlist
if isinstance(rate_list,int):
rate_list=[rate_list]
while len(rate_list)<max_level:
rate_list.insert(0,rate_list[0])
else:
rate_list=None
qs=[]
us=[]
lorenzo_qs=[]
min_coeff_level=args.min_coeff_level
#anchor=args.anchor
if max_step>0:
max_level=int(math.log(max_step,2))
anchor_rate=args.anchor_rate
if anchor_rate>0:
anchor_eb=error_bound/anchor_rate
print("Anchor eb:%f" % anchor_eb)
if max_level>=min_coeff_level:
reg_xs=[]
reg_ys=[]
for x in range(max_step,size_x,max_step):
for y in range(max_step,size_y,max_step):
reg_xs.append(np.array([array[x-max_step][y-max_step],array[x-max_step][y],array[x][y-max_step]],dtype=np.float64))
reg_ys.append(array[x][y])
res=LinearRegression(fit_intercept=True).fit(reg_xs, reg_ys)
coef=res.coef_
ince=res.intercept_
for x in range(0,size_x,max_step):
for y in range(0,size_y,max_step):
orig=array[x][y]
if x and y and max_level>=min_coeff_level:
reg_block=np.array([array[x-max_step][y-max_step],array[x-max_step][y],array[x][y-max_step]],dtype=np.float64)
pred=np.dot(reg_block,coef)+ince
else:
f_01=array[x-max_step][y] if x else 0
f_10=array[x][y-max_step] if y else 0
f_00=array[x-max_step][y-max_step] if x and y else 0
pred=f_01+f_10-f_00
q,decomp=quantize(orig,pred,anchor_eb)
qs.append(q)
if q==0:
us.append(decomp)
array[x][y]=decomp
else:
anchor_eb=0
else:
pass#todo,some preparations before level start
#print(len(qs))
last_x=((size_x-1)//max_step)*max_step
last_y=((size_y-1)//max_step)*max_step
step=max_step//2
level=max_level-1
q_start=len(qs)
u_start=len(us)
cumulated_loss=0.0
while step>0:
cur_qs=[]
cur_us=[]
if rate_list!=None:
cur_eb=error_bound/rate_list[level]
else:
cur_eb=error_bound/min(maximum_rate,(rate**level))
#cur_eb=error_bound/min(args.maximum_rate,(rate**level))
cur_array=np.copy(array[0:last_x+1:step,0:last_y+1:step])
cur_size_x,cur_size_y=cur_array.shape
#print(cur_size_x,cur_size_y)
print("Level %d started. Current step: %d. Current error_bound: %s." % (level,step,cur_eb))
best_preds=None#need to copy
best_absloss=None
best_qs=[]#need to copy
best_us=[]#need to copy
#linear interp
absloss=0
selected_algo="none"
if level>=min_coeff_level:
reg_xs=[]
reg_ys=[]
for x in range(0,cur_size_x,2):
for y in range(1,cur_size_y,2):
reg_xs.append(np.array([cur_array[x][y-1],cur_array[x][y+1]],dtype=np.float64))
reg_ys.append(cur_array[x][y])
res=LinearRegression(fit_intercept=True).fit(reg_xs, reg_ys)
coef=res.coef_
ince=res.intercept_
for x in range(0,cur_size_x,2):
for y in range(1,cur_size_y,2):
if y==cur_size_y-1:
continue
orig=cur_array[x][y]
if level>=min_coeff_level:
pred= np.dot( np.array([cur_array[x][y-1],cur_array[x][y+1]]),coef )+ince
else:
pred=(cur_array[x][y-1]+cur_array[x][y+1])/2
absloss+=abs(orig-pred)
q,decomp=quantize(orig,pred,cur_eb)
cur_qs.append(q)
if q==0:
cur_us.append(decomp)
#absloss+=abs(decomp)
cur_array[x][y]=decomp
if level>=min_coeff_level:
reg_xs=[]
reg_ys=[]
for x in range(1,cur_size_x,2):
for y in range(0,cur_size_y,2):
reg_xs.append(np.array([cur_array[x-1][y],cur_array[x+1][y]],dtype=np.float64))
reg_ys.append(cur_array[x][y])
res=LinearRegression(fit_intercept=True).fit(reg_xs, reg_ys)
coef=res.coef_
ince=res.intercept_
for x in range(1,cur_size_x,2):
for y in range(0,cur_size_y,2):
if x==cur_size_x-1:
continue
orig=cur_array[x][y]
if level>=min_coeff_level:
pred= np.dot( np.array([cur_array[x-1][y],cur_array[x+1][y]]),coef )+ince
else:
pred=(cur_array[x-1][y]+cur_array[x+1][y])/2
absloss+=abs(orig-pred)
q,decomp=quantize(orig,pred,cur_eb)
cur_qs.append(q)
if q==0:
cur_us.append(decomp)
#absloss+=abs(decomp)
cur_array[x][y]=decomp
if level>=min_coeff_level:
md_reg_xs=[]
md_reg_ys=[]
for x in range(1,cur_size_x,2):
for y in range(1,cur_size_y,2):
md_reg_xs.append(np.array([cur_array[x-1][y],cur_array[x+1][y],cur_array[x][y-1],cur_array[x][y+1]],dtype=np.float64))
md_reg_ys.append(cur_array[x][y])
md_res=LinearRegression(fit_intercept=True).fit(md_reg_xs, md_reg_ys)
md_coef=md_res.coef_
md_ince=md_res.intercept_
for x in range(1,cur_size_x,2):
for y in range(1,cur_size_y,2):
if x==cur_size_x-1 or y==cur_size_y-1:
continue
orig=cur_array[x][y]
if level>=min_coeff_level:
pred=np.dot(np.array([cur_array[x-1][y],cur_array[x+1][y],cur_array[x][y-1],cur_array[x][y+1]]),md_coef)+md_ince
else:
pred=(cur_array[x-1][y]+cur_array[x+1][y]+cur_array[x][y-1]+cur_array[x][y+1])/4
absloss+=abs(orig-pred)
q,decomp=quantize(orig,pred,cur_eb)
cur_qs.append(q)
if q==0:
cur_us.append(decomp)
#absloss+=abs(decomp)
cur_array[x][y]=decomp
best_preds=np.copy(cur_array)
best_absloss=absloss
best_qs=cur_qs.copy()
best_us=cur_us.copy()
selected_algo="interp_linear"
#print(len(cur_qs))
#cubic interp
if args.cubic:
#print("cubic")
absloss=0
cur_qs=[]
cur_us=[]
cur_array=np.copy(array[0:last_x+1:step,0:last_y+1:step])#reset cur_array
if level>=min_coeff_level:
reg_xs=[]
reg_ys=[]
for x in range(0,cur_size_x,2):
for y in range(3,cur_size_y,2):
if y+3>=cur_size_y:
continue
reg_xs.append(np.array([cur_array[x][y-3],cur_array[x][y-1],cur_array[x][y+1],cur_array[x][y+3]],dtype=np.float64))
reg_ys.append(cur_array[x][y])
res=LinearRegression(fit_intercept=True).fit(reg_xs, reg_ys)
coef=res.coef_
ince=res.intercept_
for x in range(0,cur_size_x,2):
for y in range(1,cur_size_y,2):
if y==cur_size_y-1:
continue
orig=cur_array[x][y]
if y>=3 and y+3<cur_size_y:
if level>=min_coeff_level:
pred=np.dot(coef,np.array([cur_array[x][y-3],cur_array[x][y-1],cur_array[x][y+1],cur_array[x][y+3]]) )+ince
else:
pred=(-cur_array[x][y-3]+9*cur_array[x][y-1]+9*cur_array[x][y+1]-cur_array[x][y+3])/16
else:
pred=(cur_array[x][y-1]+cur_array[x][y+1])/2
absloss+=abs(orig-pred)
q,decomp=quantize(orig,pred,cur_eb)
cur_qs.append(q)
if q==0:
cur_us.append(decomp)
#absloss+=abs(decomp)
cur_array[x][y]=decomp
if level>=min_coeff_level:
reg_xs=[]
reg_ys=[]
for x in range(3,cur_size_x,2):
for y in range(0,cur_size_y,2):
if x+3>=cur_size_x:
continue
reg_xs.append(np.array([cur_array[x-3][y],cur_array[x-1][y],cur_array[x+1][y],cur_array[x+3][y]],dtype=np.float64))
reg_ys.append(cur_array[x][y])
res=LinearRegression(fit_intercept=True).fit(reg_xs, reg_ys)
coef=res.coef_
ince=res.intercept_
for x in range(1,cur_size_x,2):
for y in range(0,cur_size_y,2):
if x==cur_size_x-1:
continue
orig=cur_array[x][y]
if x>=3 and x+3<cur_size_x:
if level>=min_coeff_level:
pred=np.dot(coef,np.array([cur_array[x-3][y],cur_array[x-1][y],cur_array[x+1][y],cur_array[x+3][y]]) )+ince
else:
pred=(-cur_array[x-3][y]+9*cur_array[x-1][y]+9*cur_array[x+1][y]-cur_array[x+3][y])/16
else:
pred=(cur_array[x-1][y]+cur_array[x+1][y])/2
absloss+=abs(orig-pred)
q,decomp=quantize(orig,pred,cur_eb)
cur_qs.append(q)
if q==0:
cur_us.append(decomp)
#absloss+=abs(decomp)
cur_array[x][y]=decomp
if level>=min_coeff_level:
md_reg_xs=[]
md_reg_ys=[]
for x in range(1,cur_size_x,2):
for y in range(1,cur_size_y,2):
md_reg_xs.append(np.array([cur_array[x-1][y],cur_array[x+1][y],cur_array[x][y-1],cur_array[x][y+1]],dtype=np.float64))
md_reg_ys.append(cur_array[x][y])
md_res=LinearRegression(fit_intercept=True).fit(md_reg_xs, md_reg_ys)
md_coef=md_res.coef_
md_ince=md_res.intercept_
for x in range(1,cur_size_x,2):
for y in range(1,cur_size_y,2):
if x==cur_size_x-1 or y==cur_size_y-1:
continue
orig=cur_array[x][y]
if level>=min_coeff_level:
pred=np.dot(np.array([cur_array[x-1][y],cur_array[x+1][y],cur_array[x][y-1],cur_array[x][y+1]]),md_coef)+md_ince
else:
pred=(cur_array[x-1][y]+cur_array[x+1][y]+cur_array[x][y-1]+cur_array[x][y+1])/4
absloss+=abs(orig-pred)
q,decomp=quantize(orig,pred,cur_eb)
cur_qs.append(q)
if q==0:
cur_us.append(decomp)
#absloss+=abs(decomp)
cur_array[x][y]=decomp
if absloss<best_absloss:
selected_algo="interp_cubic"
best_preds=np.copy(cur_array)
best_absloss=absloss
best_qs=cur_qs.copy()
best_us=cur_us.copy()
if args.multidim:
absloss=0
cur_qs=[]
cur_us=[]
cur_array=np.copy(array[0:last_x+1:step,0:last_y+1:step])#reset cur_array
if level>=min_coeff_level:
md_reg_xs=[]
md_reg_ys=[]
for x in range(1,cur_size_x,2):
for y in range(1,cur_size_y,2):
md_reg_xs.append(np.array([cur_array[x-1][y-1],cur_array[x-1][y+1],cur_array[x+1][y-1],cur_array[x+1][y+1]],dtype=np.float64))
md_reg_ys.append(cur_array[x][y])
md_res=LinearRegression(fit_intercept=True).fit(md_reg_xs, md_reg_ys)
md_coef=md_res.coef_
md_ince=md_res.intercept_
for x in range(1,cur_size_x,2):
for y in range(1,cur_size_y,2):
if x==cur_size_x-1 or y==cur_size_y-1:
continue
orig=cur_array[x][y]
if level>=min_coeff_level:
pred=np.dot(np.array([cur_array[x-1][y-1],cur_array[x-1][y+1],cur_array[x+1][y-1],cur_array[x+1][y+1]]),md_coef)+md_ince
else:
pred=(cur_array[x-1][y-1]+cur_array[x-1][y+1]+cur_array[x+1][y-1]+cur_array[x+1][y+1])/4
absloss+=abs(orig-pred)
q,decomp=quantize(orig,pred,cur_eb)
cur_qs.append(q)
if q==0:
cur_us.append(decomp)
#absloss+=abs(decomp)
cur_array[x][y]=decomp
if level>=min_coeff_level:
md_reg_xs=[]
md_reg_ys=[]
for x in range(0,cur_size_x):
for y in range(1-(x%2),cur_size_y,2):
if x==cur_size_x-1 or y==cur_size_y-1:
continue
md_reg_xs.append(np.array([cur_array[x][y-1],cur_array[x][y+1],cur_array[x-1][y],cur_array[x+1][y]],dtype=np.float64))
md_reg_ys.append(cur_array[x][y])
md_res=LinearRegression(fit_intercept=True).fit(md_reg_xs, md_reg_ys)
md_coef=md_res.coef_
md_ince=md_res.intercept_
for x in range(0,cur_size_x):
for y in range(1-(x%2),cur_size_y,2):
orig=cur_array[x][y]
if x and y and x!=cur_size_x-1 and y!=cur_size_y-1:
if level>=min_coeff_level:
pred=np.dot(md_coef,np.array([cur_array[x][y-1],cur_array[x][y+1],cur_array[x-1][y],cur_array[x+1][y]]))+md_ince
else:
pred=(cur_array[x][y-1]+cur_array[x][y+1]+cur_array[x-1][y]+cur_array[x+1][y])/4
elif x==0 or x==cur_size_x-1:
pred=(cur_array[x][y-1]+cur_array[x][y+1])/2
else:
pred=(cur_array[x-1][y]+cur_array[x+1][y])/2
absloss+=abs(orig-pred)
q,decomp=quantize(orig,pred,cur_eb)
cur_qs.append(q)
if q==0:
cur_us.append(decomp)
#absloss+=abs(decomp)
cur_array[x][y]=decomp
if absloss<best_absloss:
selected_algo="interp_multidim"
best_preds=np.copy(cur_array)
best_absloss=absloss
best_qs=cur_qs.copy()
best_us=cur_us.copy()
#lorenzo
'''
cur_array=np.copy(array[0:last_x+1:step,0:last_y+1:step])#reset cur_array
absloss=0
cur_qs=[]
cur_us=[]
if max_level>=min_coeff_level:
reg_xs=[]
reg_ys=[]
for x in range(cur_size_x):
for y in range(1-(x%2),cur_size_y,2-(x%2)):
if not (x and y):
continue
reg_xs.append(np.array([array[x-1][y-1],array[x-1][y],array[x][y-1]],dtype=np.float64))
reg_ys.append(array[x][y])
res=LinearRegression(fit_intercept=True).fit(reg_xs, reg_ys)
coef=res.coef_
ince=res.intercept_
for x in range(cur_size_x):
for y in range(1-(x%2),cur_size_y,2-(x%2)):
orig=cur_array[x][y]
if 0:#if x and y and max_level>=min_coeff_level:
pred=np.dot(coef,np.array([array[x-1][y-1],array[x-1][y],array[x][y-1]]))+ince
else:
f_01=cur_array[x-1][y] if x else 0
f_10=cur_array[x][y-1] if y else 0
f_00=cur_array[x-1][y-1] if x and y else 0
pred=f_01+f_10-f_00
absloss+=abs(orig-pred)
q,decomp=quantize(orig,pred,cur_eb)
cur_qs.append(q)
if q==0:
cur_us.append(decomp)
#absloss+=abs(decomp)
cur_array[x][y]=decomp
#print(np.max(np.abs(array[0:last_x+1:step,0:last_y+1:step]-cur_array)))
if absloss<best_absloss:
best_preds=np.copy(cur_array)
best_absloss=absloss
best_qs=cur_qs.copy()
best_us=cur_us.copy()
selected_algo="lorenzo"
'''
#Lorenzo fallback
if level<=args.lorenzo_fallback_check:
absloss=0
#cur_qs=[]
#cur_us=[]
#cur_array=np.copy(array[0:last_x+1:step,0:last_y+1:step])#reset cur_array
cur_orig_array=orig_array[0:last_x+1:step,0:last_y+1:step]
total_points=[(x,y) for x in range(cur_orig_array.shape[0]) for y in range(cur_orig_array.shape[1]) if (max_step<=0 or ((x*step)%max_step!=0 and (y*step)%max_step!=0))]
min_sampled_points=100
if len(total_points)<min_sampled_points:
num_sumples=len(total_points)
sampled_points=total_points
else:
num_sumples=max(min_sampled_points,int(len(total_points)*sample_rate) )
sampled_points=random.sample(total_points,num_sumples)
for x,y in sampled_points:
orig=cur_orig_array[x][y]
f_01=cur_orig_array[x-1][y] if x else 0
if x and args.max_step>0 and ((x-1)*step)%max_step==0 and (y*step)%max_step==0:
f_01+=anchor_eb*(2*np.random.rand()-1)
elif x:
f_01+=cur_eb*(2*np.random.rand()-1)
f_10=cur_orig_array[x][y-1] if y else 0
if y and args.max_step>0 and (x*step)%max_step==0 and ((y-1)*step)%max_step==0:
f_10+=anchor_eb*(2*np.random.rand()-1)
elif y:
f_10+=cur_eb*(2*np.random.rand()-1)
f_00=cur_orig_array[x-1][y-1] if x and y else 0
if x and y and args.max_step>0 and ((x-1)*step)%max_step==0 and ((y-1)*step)%max_step==0:
f_00+=anchor_eb*(2*np.random.rand()-1)
elif x and y:
f_00+=cur_eb*(2*np.random.rand()-1)
pred=f_01+f_10-f_00
absloss+=abs(orig-pred)
#print(absloss*len(total_points)/len(sampled_points))
#print(best_absloss)
#print(cumulated_loss)
if absloss*len(total_points)/len(sampled_points)<best_absloss+cumulated_loss:
selected_algo="lorenzo_fallback"
best_absloss=0
best_preds=array[0:last_x+1:step,0:last_y+1:step]
best_qs=[]
best_us=[]
qs=qs[:q_start]
us=us[:u_start]
for x in range(cur_size_x):
for y in range(cur_size_y):
if max_step>0 and (x*step)%max_step==0 and (y*step)%max_step==0:
#print(x,y)
continue
orig=best_preds[x][y]
f_01=best_preds[x-1][y] if x else 0
f_10=best_preds[x][y-1] if y else 0
f_00=best_preds[x-1][y-1] if x and y else 0
pred=f_01+f_10-f_00
best_absloss+=abs(orig-pred)
q,decomp=quantize(orig,pred,cur_eb)
best_qs.append(q)
if q==0:
best_us.append(decomp)
#absloss+=abs(decomp)
best_preds[x][y]=decomp
#print(len(best_qs))
mean_l1_loss=best_absloss/len(best_qs)
if selected_algo!="lorenzo_fallback":
cumulated_loss+=best_absloss
else:
cumulated_loss=best_absloss
#print(np.max(np.abs(array[0:last_x+1:step,0:last_y+1:step]-best_preds)))
array[0:last_x+1:step,0:last_y+1:step]=best_preds
#if args.lorenzo_fallback_check:
# print(np.max(np.abs(orig_array-array))/rng)
qs+=best_qs
us+=best_us
#print(len(qs))
print ("Level %d finished. Selected algorithm: %s. Mean prediction abs loss: %f." % (level,selected_algo,mean_l1_loss))
step=step//2
level-=1
#print(len(qs))
#print(best_absloss)
#print(cumulated_loss)
def lorenzo_2d(array,x_start,x_end,y_start,y_end):
for x in range(x_start,x_end):
for y in range(y_start,y_end):
orig=array[x][y]
f_01=array[x-1][y] if x else 0
f_10=array[x][y-1] if y else 0
f_00=array[x-1][y-1] if x and y else 0
pred=f_01+f_10-f_00
q,decomp=quantize(orig,pred,error_bound)
lorenzo_qs.append(q)
if q==0:
us.append(decomp)
array[x][y]=decomp
lorenzo_2d(array,0,last_x+1,last_y+1,size_y)
lorenzo_2d(array,last_x+1,size_x,0,size_y)
quants=np.concatenate( (np.array(lorenzo_qs,dtype=np.int32),np.array(qs,dtype=np.int32) ) )
unpreds=np.array(us,dtype=np.float32)
array.tofile(args.output)
quants.tofile(args.quant)
unpreds.tofile(args.unpred)
'''
for x in range(size_x):
for y in range(size_y):
if array[x][y]==orig_array[x][y] and x%max_step!=0 and y%max_step!=0:
print(x,y)
''' | Meso272/NNpredictor | multilevel_selective_compress_2d_deprecated.py | multilevel_selective_compress_2d_deprecated.py | py | 24,070 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "numpy.fromfile",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "numpy.copy"... |
29920536522 | from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QApplication, QMainWindow, QPushButton, QStackedWidget, QToolButton, QComboBox, \
QTableWidget, QTableWidgetItem, QLabel
from PyQt5.uic import loadUi
from fileDirectory import FileDirectory
class Ui(QMainWindow):
def __init__(self):
super(Ui, self).__init__()
loadUi('../ui/PICK.ui', self)
self.mainStackedView = self.findChild(QStackedWidget, 'StackView')
self.OV_TeamConfigButton = self.findChild(QPushButton, 'OV_TeamConfigButton')
self.OV_EventConfigButton = self.findChild(QPushButton, 'OV_EventConfigButton')
self.OV_DirectoryConfigButton = self.findChild(QPushButton, 'OV_DirectoryConfigButton')
self.OV_VectorConfigButton = self.findChild(QPushButton, 'OV_VectorConfigButton')
self.OV_LogFileConfigButton = self.findChild(QPushButton, 'OV_LogFileConfigButton')
self.OV_FilterConfigButton = self.findChild(QPushButton, 'OV_FilterConfigButton')
self.OV_LogEntryConfigButton = self.findChild(QPushButton, 'OV_LogEntryConfigButton')
self.OV_ExportConfigButton = self.findChild(QPushButton, 'OV_ExportConfigButton')
self.OV_ChangeConfigButton = self.findChild(QPushButton, 'OV_ChangeConfigButton')
self.OV_VectorDBConfigButton = self.findChild(QPushButton, 'OV_VectorDBConfigButton')
self.OV_IconConfigButton = self.findChild(QPushButton, 'OV_IconConfigButton')
self.OV_GraphBuilderConfigButton = self.findChild(QPushButton, 'OV_GraphBuilderConfigButton')
self.OV_NodesConfigInTableButton = self.findChild(QPushButton, 'OV_NodesConfigInTableButton')
self.OV_NodesConfigInGraphButton = self.findChild(QPushButton, 'OV_NodesConfigInGraphButton')
self.OV_RelationshipConfigButton = self.findChild(QPushButton, 'OV_RelationshipConfigButton')
self.BlueTeamToolButton = self.findChild(QToolButton, 'BlueTeamToolButton')
self.RootDirectoryToolButton = self.findChild(QToolButton, 'RootDirectoryToolButton')
self.RedTeamToolButton = self.findChild(QToolButton, 'RedTeamToolButton')
self.WhiteTeamToolButton = self.findChild(QToolButton, 'WhiteTeamToolButton')
self.OV_TeamConfigButton.clicked.connect(lambda: self.btn(0))
self.OV_EventConfigButton.clicked.connect(lambda: self.btn(1))
self.OV_DirectoryConfigButton.clicked.connect(lambda: self.btn(2))
self.OV_VectorConfigButton.clicked.connect(lambda: self.btn(3))
self.OV_LogFileConfigButton.clicked.connect(lambda: self.btn(4))
self.OV_FilterConfigButton.clicked.connect(lambda: self.btn(5))
self.OV_LogEntryConfigButton.clicked.connect(lambda: self.btn(6))
self.OV_ExportConfigButton.clicked.connect(lambda: self.btn(7))
self.OV_ChangeConfigButton.clicked.connect(lambda: self.btn(8))
self.OV_VectorDBConfigButton.clicked.connect(lambda: self.btn(9))
self.OV_IconConfigButton.clicked.connect(lambda: self.btn(10))
self.OV_GraphBuilderConfigButton.clicked.connect(lambda: self.btn(11))
self.OV_NodesConfigInTableButton.clicked.connect(lambda: self.btn(12))
self.OV_NodesConfigInGraphButton.clicked.connect(lambda: self.btn(13))
self.OV_RelationshipConfigButton.clicked.connect(lambda: self.btn(14))
self.BlueTeamToolButton.clicked.connect(lambda: self.btn(15))
self.RootDirectoryToolButton.clicked.connect(lambda: self.btn(15))
self.RedTeamToolButton.clicked.connect(lambda: self.btn(15))
self.WhiteTeamToolButton.clicked.connect(lambda: self.btn(15))
# Log entry configuration page
self.LECtable = self.findChild(QTableWidget, 'LEC_LET_TtableWidget')
i = 0
while i < 50:
checkbox = QTableWidgetItem()
checkbox.setCheckState(Qt.Unchecked)
self.LECtable.setItem(i, 0, checkbox)
combo = QComboBox()
combo.addItems([' ', '1', '2', '3'])
self.LECtable.setCellWidget(i, 4, combo)
i += 1
# log file configuration table checkboxes
self.LFGtable = self.findChild(QTableWidget, 'LFT_tableWidget')
i = 0
while i < 50:
checkbox = QTableWidgetItem()
checkbox.setCheckState(Qt.Unchecked)
self.LFGtable.setItem(i, 5, checkbox)
i += 1
# Vector configuration table checkboxes
self.VCtable = self.findChild(QTableWidget, 'VC_TableView')
i = 0
while i < 50:
checkbox = QTableWidgetItem()
checkbox.setCheckState(Qt.Unchecked)
self.VCtable.setItem(i, 0, checkbox)
i += 1
# Relationship config table checkboxes
self.RCtable = self.findChild(QTableWidget, 'RelationshipConfigTableWidget')
i = 0
while i < 50:
checkbox = QTableWidgetItem()
checkbox.setCheckState(Qt.Unchecked)
self.RCtable.setItem(i, 0, checkbox)
i += 1
# Pulled vector database table checkboxes
self.PullVDBtable = self.findChild(QTableWidget, 'VDBC_PulledTableWidget')
i = 0
while i < 50:
checkbox = QTableWidgetItem()
checkbox.setCheckState(Qt.Unchecked)
self.PullVDBtable.setItem(i, 0, checkbox)
i += 1
# pushed vector database table check boxes
self.PushVDBtable = self.findChild(QTableWidget, 'VDBC_PushedTableWidget')
i = 0
while i < 50:
checkbox = QTableWidgetItem()
checkbox.setCheckState(Qt.Unchecked)
self.PushVDBtable.setItem(i, 0, checkbox)
i += 1
# approval database table check boxes and drop down
self.ADBtable = self.findChild(QTableWidget, 'VDBC_AS_TableWidget')
i = 0
while i < 50:
checkbox = QTableWidgetItem()
checkbox.setCheckState(Qt.Unchecked)
self.ADBtable.setItem(i, 0, checkbox)
combo = QComboBox()
combo.addItems([' ', '1', '2', '3'])
self.ADBtable.setCellWidget(i, 7, combo)
i += 1
# Node configuration table check boxes and drop down
self.NCtable = self.findChild(QTableWidget, 'NCITF_NT_tableWidget')
i = 1
while i < 50:
checkbox = QTableWidgetItem()
checkbox.setCheckState(Qt.Unchecked)
self.NCtable.setItem(i, 0, checkbox)
i += 1
i = 1
while i < 10:
checkbox = QTableWidgetItem()
checkbox.setCheckState(Qt.Unchecked)
self.NCtable.setItem(0, i, checkbox)
checkbox = QTableWidgetItem()
checkbox.setCheckState(Qt.Unchecked)
self.NCtable.setItem(10, i, checkbox)
i += 1
i = 1
while i < 10:
j = 6
while j < 9:
combo = QComboBox()
combo.addItems([' ', '1', '2', '3'])
self.NCtable.setCellWidget(j, i, combo)
j += 1
i += 1
# Node configuration in graphical view checkbox and combobox
self.NCVtable = self.findChild(QTableWidget, 'NCIGF_TV_tableWidget')
i = 1
while i < 10:
checkbox = QTableWidgetItem()
checkbox.setCheckState(Qt.Unchecked)
self.NCVtable.setItem(i, 0, checkbox)
i += 1
i = 6
while i < 9:
combo = QComboBox()
combo.addItems([' ', '1', '2', '3'])
self.NCVtable.setCellWidget(i, 1, combo)
i += 1
checkbox = QTableWidgetItem()
checkbox.setCheckState(Qt.Unchecked)
self.NCVtable.setItem(0, 1, checkbox)
self.NCVtable.setItem(10, 1, checkbox)
# Icon configuration table checkboxes
self.ICtable = self.findChild(QTableWidget, 'IC_IT_TableWidget')
i = 0
while i < 50:
checkbox = QTableWidgetItem()
checkbox.setCheckState(Qt.Unchecked)
self.ICtable.setItem(i, 0, checkbox)
i += 1
self.connectButton = self.findChild(QPushButton, 'TeamConfigConnectpushButton')
self.connectStatus = self.findChild(QLabel, 'ConnectionStatus')
self.connectButton.clicked.connect(self.connectButtonClicked)
self.show()
def connectButtonClicked(self):
if self.connectButton.text() == 'Disconnect':
self.connectButton.setText('Connect')
self.connectStatus.setText('Not Connected')
else:
self.connectStatus.setText('Connected')
self.connectButton.setText('Disconnect')
def btn(self, index):
if index < 15:
self.StackView.setCurrentIndex(index)
elif index == 15:
self.window = FileDirectory()
self.window.show()
if __name__ == "__main__":
import sys
application = QApplication(sys.argv)
window = Ui()
application.exec_()
| lalocho/software2 | src/PICK.py | PICK.py | py | 9,213 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "PyQt5.QtWidgets.QMainWindow",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "PyQt5.uic.loadUi",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QStackedWidget",
"line_number": 13,
"usage_type": "argument"
},
{
"ap... |
11646312135 | from gtts import gTTS
import wikipedia as wiki
import sys
bad_sections = ['See also', 'References', 'External links', 'Gallery']
# removes the sections which wouldn't be useful in these formats
def remove_extra(page):
new_sections = page.sections
for x in bad_sections:
if x in new_sections:
new_sections.remove(x)
return new_sections
def to_console(page):
new_sections = remove_extra(page)
for x in range(len(new_sections)):
print(new_sections[x])
print(page.section(new_sections[x]))
"""
currently has issues encoding, certain characters cause issues.
"""
def to_file(page, user_input):
fileName = user_input + "_wikipedia.txt"
file = open(fileName, "w")
new_sections = remove_extra(page)
#try:
for x in range(len(new_sections)):
file.write(new_sections[x])
file.write('\n')
print(page.section(new_sections[x]))
file.write(page.section(new_sections[x]))
file.write('\n')
#except:
print("Error! something went wrong when writing to file!")
def menu():
print("For the given summary would you like to:")
print("1. Copy to a text file (currently broken)")
print("2. Print to the console")
print("3. Create an mp3 file")
user_input = input("Please select the number: ")
return user_input
def to_mp3(page, user_input):
new_sections = remove_extra(page)
clean_string = ""
for x in range(len(new_sections)):
clean_string += new_sections[x]
clean_string += '\n'
clean_string += page.section(new_sections[x])
tts = gTTS(text=clean_string, lang='en')
tts.save(user_input + ".mp3")
print("file has been turned into an mp3")
# main
user_input = input("Enter the Wikipedia page you want to search!")
page = wiki.page(user_input)
print(wiki.summary(user_input))
menu_selection = menu()
if menu_selection is "1":
to_file(page, user_input)
print("File successfully saved to: " + user_input + "_wikipedia.txt")
elif menu_selection == "2":
to_console(page)
elif menu_selection == "3":
to_mp3(page, user_input)
else:
print("Invalid input!")
| ajsebastian/audioWiki | main.py | main.py | py | 2,238 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "gtts.gTTS",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "wikipedia.page",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "wikipedia.summary",
"line_number": 63,
"usage_type": "call"
}
] |
23048697587 | import torch
from torchvision import transforms as T
from torch.optim.lr_scheduler import ExponentialLR
from PIL import Image
import os
from tqdm import tqdm
from dalle2_pytorch import DALLE2, DiffusionPriorNetwork, DiffusionPrior, Unet, Decoder, OpenAIClipAdapter, DiffusionPriorTrainer, DecoderTrainer
from dalle2_pytorch.tokenizer import SimpleTokenizer
from dalle2_pytorch.optimizer import get_optimizer
import pandas as pd
# Change your input size here
input_image_size = 256
# Change your batch size here
batch_size = 1
# Change your epoch here
epoch = 5
# Change your train image root path here
train_img_path = "./Flower_Dataset_Combine/ImagesCombine/"
# Change your train annot csv path here
train_annot_path = "./Flower_Dataset_Combine/New_captions.csv"
# Change your device ("cpu" or "cuda")
device = "cuda"
# Change your diffusion prior model save path here (end with ".pth")
diff_save_path = "./diff_prior.pth"
# Change your diffusion prior model save path here (end with ".pth")
decoder_save_path = "./decoder.pth"
# Change the model weight save path here (end with ".pth")
dalle2_save_path = "./dalle2.pth"
transform = T.Compose([
T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img),
T.Resize(input_image_size),
T.CenterCrop(input_image_size),
T.ToTensor()
])
train_csv= pd.read_csv(train_annot_path)
train_csv = train_csv.drop_duplicates()
train_csv = train_csv.dropna()
# openai pretrained clip - defaults to ViT/B-32
OpenAIClip = OpenAIClipAdapter()
prior_network = DiffusionPriorNetwork(
dim = 512,
depth = 6,
dim_head = 64,
heads = 8
).to(device)
diffusion_prior = DiffusionPrior(
net = prior_network,
clip = OpenAIClip,
timesteps = 100,
cond_drop_prob = 0.2
).to(device)
diff_trainer = DiffusionPriorTrainer(
diffusion_prior,
lr = 3e-4,
wd = 1e-2,
ema_beta = 0.99,
ema_update_after_step = 1000,
ema_update_every = 10,
)
unet1 = Unet(
dim = 128,
image_embed_dim = 512,
cond_dim = 128,
channels = 3,
dim_mults=(1, 2, 4, 8)
).to(device)
unet2 = Unet(
dim = 16,
image_embed_dim = 512,
cond_dim = 128,
channels = 3,
dim_mults = (1, 2, 4, 8, 16)
).to(device)
# decoder, which contains the unet and clip
decoder = Decoder(
unet = (unet1, unet2),
image_sizes = (128, 256),
clip = OpenAIClip,
timesteps = 100,
image_cond_drop_prob = 0.1,
text_cond_drop_prob = 0.5,
condition_on_text_encodings=False
).to(device)
decoder_trainer = DecoderTrainer(
decoder,
lr = 3e-4,
wd = 1e-2,
ema_beta = 0.99,
ema_update_after_step = 1000,
ema_update_every = 10,
)
if os.path.exists(diff_save_path):
diffusion_prior.load_state_dict(torch.load(diff_save_path))
if os.path.exists(decoder_save_path):
decoder.load_state_dict(torch.load(decoder_save_path))
train_size = len(train_csv)
idx_list = range(0, train_size, batch_size)
tokenizer = SimpleTokenizer()
# opt = get_optimizer(diffusion_prior.parameters())
# sched = ExponentialLR(opt, gamma=0.01)
for curr_epoch in range(epoch):
print("Run training diffusion prior ...")
print(f"Epoch {curr_epoch+1} / {epoch}")
for batch_idx in tqdm(idx_list):
if (batch_idx + batch_size) > train_size - 1:
iter_idx = range(batch_idx, train_size, 1)
else:
iter_idx = range(batch_idx, batch_idx+batch_size, 1)
batch_len = 0
total_loss = torch.tensor(0., device=device)
for curr_idx in iter_idx:
image_name = train_csv.loc[curr_idx]['file_name']
image_path = os.path.join(train_img_path, image_name)
image = Image.open(image_path)
image = transform(image)
image = image.unsqueeze(0).to(device)
target = [train_csv.loc[curr_idx]['caption']]
texts = tokenizer.tokenize(target).to(device)
for text in texts:
if total_loss == torch.tensor(0., device=device):
total_loss = diff_trainer(text.unsqueeze(0), image)
# total_loss = diffusion_prior(text.unsqueeze(0), image)
else:
total_loss += diff_trainer(text.unsqueeze(0), image)
# total_loss += diffusion_prior(text.unsqueeze(0), image)
batch_len += 1
avg_loss = total_loss / batch_len
# opt.zero_grad()
avg_loss.backward()
diff_trainer.update()
# opt.step()
if batch_idx % 100 == 0:
torch.save(diffusion_prior.state_dict(), diff_save_path)
print(f"average loss: {avg_loss.data}")
# sched.step()
torch.save(diffusion_prior.state_dict(), diff_save_path)
train_size = len(train_csv)
idx_list = range(0, train_size, batch_size)
tokenizer = SimpleTokenizer()
# opt = get_optimizer(decoder.parameters())
# sched = ExponentialLR(opt, gamma=0.01)
for curr_epoch in range(epoch):
print("Run training decoder ...")
print(f"Epoch {curr_epoch+1} / {epoch}")
for batch_idx in tqdm(idx_list):
if (batch_idx + batch_size) > train_size - 1:
iter_idx = range(batch_idx, train_size, 1)
else:
iter_idx = range(batch_idx, batch_idx+batch_size, 1)
for unet_number in (1,2):
batch_len = 0
total_loss = torch.tensor(0., device=device)
for curr_idx in iter_idx:
image_name = train_csv.loc[curr_idx]['file_name']
image_path = os.path.join(train_img_path, image_name)
image = Image.open(image_path)
image = transform(image)
image = image.unsqueeze(0).to(device)
# target = [train_csv.loc[curr_idx]['caption']]
# texts = tokenizer.tokenize(target).to(device)
# for text in texts:
if total_loss == torch.tensor(0., device=device):
total_loss = decoder_trainer(image, unet_number=unet_number)
# total_loss = decoder(image, text.unsqueeze(0))
else:
total_loss += decoder_trainer(image, unet_number=unet_number)
# total_loss += decoder(image, text.unsqueeze(0))
batch_len += 1
avg_loss = total_loss / batch_len
# opt.zero_grad()
avg_loss.backward()
decoder_trainer.update(unet_number=unet_number)
# opt.step()
if batch_idx % 100 == 0:
torch.save(decoder.state_dict(), decoder_save_path)
print(f"average loss: {avg_loss.data}")
# sched.step()
torch.save(decoder.state_dict(), decoder_save_path)
dalle2 = DALLE2(
prior = diffusion_prior,
decoder = decoder
).to(device)
torch.save(dalle2.state_dict(), dalle2_save_path) | goldiusleonard/Dalle2_pytorch_project | train_dalle2_from_csv.py | train_dalle2_from_csv.py | py | 7,138 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "torchvision.transforms.Compose",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Lambda",
"line_number": 40,
"usage_type": "call"
},
{
... |
26275998182 | import json
from http.server import HTTPServer, BaseHTTPRequestHandler
from multiprocessing import active_children, Process
'''
大致原理,程序启动后,会在本地开启一个http服务。Tkinter布局助手上,点击预览后,将拖拽界面的布局转为python代码,
通过网络请求,发送到本服务,服务端接收到代码,使用exec函数执行代码。
因为exec函数,有一定危险性,如果你使用的是fork的项目,请自行检查代码后再执行。官方下载地址如下。
官方地址:https://www.pytk.net/tkinter-helper/preview.py
'''
# 版本号
version = "1.0.0"
# 预览服务默认地址
host = ("127.0.0.1", 12300)
class Handler(BaseHTTPRequestHandler):
def echo(self, data):
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.send_header('Access-Control-Allow-Methods', '*')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(json.dumps(data).encode())
def do_GET(self):
msg = {'version': version, 'msg': "窗口预览服务已启动"}
self.echo(msg)
def do_POST(self):
code = self.rfile.read(int(self.headers['content-length'])).decode("utf8")
for p in active_children():
if p.name == "preview":
p.kill()
print(code)
Process(target=preview, args=(code,), name="preview").start()
msg = {'msg': "发送成功"}
self.echo(msg)
def start_server():
server = HTTPServer(host, Handler)
print("预览服务已启动, 监听地址: %s:%s" % host)
server.serve_forever()
def preview(code):
try:
dic = dict()
exec(code, dic)
except Exception as e:
print("执行异常", e)
if __name__ == "__main__":
start_server()
| iamxcd/tkinter-helper | preview/preview-1.0.0.py | preview-1.0.0.py | py | 1,855 | python | en | code | 408 | github-code | 1 | [
{
"api_name": "http.server.BaseHTTPRequestHandler",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "multiprocessing.active_children",
"line_number": 36,
"usage_type": "call"
},
{
"api... |
28921295761 |
import pandas as pd
import random as rd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import matplotlib.cm as cm
import pickle
import random
import math
class neuron:
def __init__(self,weights):
self.weights = weights
self.outputneuron=None
self.s=None
self.errorneuron=None
self.deltaweights=[random.random()/2 for i in range(len(self.weights))].copy()
self.deltaoldweights=[0 for i in range(len(self.weights))].copy()
def S(self,inputs):
return self.weights[0]+np.dot(self.weights[1:],inputs)
def activation_function(self,inputs):
outputneuron=self.f(self.S(inputs))
return outputneuron
#bacKpropagation
def f(self,x):
return 1/(1+math.exp(-x))
def derivation_function(self,s):
return self.f(s)*(1-self.f(s))
def update_weights_ofsingle_neuronj(self):
for k in range(len(self.deltaweights)):
wjk=self.weights[k]
self.weights[k]=wjk+self.deltaweights[k]
class layer:
def __init__(self,nbrof_neurons,nbrof_inputs):
self.numberof_neurons = nbrof_neurons
self.numerof_inputs= nbrof_inputs
self.my_neurons=layer.generate_my_neurons(nbrof_neurons,nbrof_inputs)
self.inputstolayer=None
self.outputsoflayer=None
def generate_my_neurons(nbrof_neurons,nbrof_inputs):
list_of_neurons=[]
#a=math.sqrt(6)/math.sqrt(nbrof_inputs+nbrof_neurons)
a=math.sqrt(3)/math.sqrt(nbrof_inputs)
#a=1
#a=math.sqrt(6)/math.sqrt(nbrof_inputs+nbrof_neurons)
for i in range(nbrof_neurons):
weights_of_neuron=[np.random.uniform(-a,a) for i in range(nbrof_inputs+1)].copy()
#weights_of_neuron=[1 for i in range(nbrof_inputs+1)].copy()
new_neuron=neuron(weights_of_neuron)
list_of_neurons.append(new_neuron)
return list_of_neurons
def generate_my_outputs(self,nbrof_neurons):
outputslayer=[]
for i in range(nbrof_neurons):
output=self.my_neurons[i].activation_function(self.inputstolayer)
outputslayer.append(output)
self.my_neurons[i].outputneuron=output
self.my_neurons[i].s=self.my_neurons[i].S(self.inputstolayer)
return outputslayer
class network:
def __init__(self,nbrof_layers,nbrof_neuronsinlayer,Inputs,number_of_classes):
self.number_of_layers = nbrof_layers
self.number_of_classes=number_of_classes
self.nbrof_neuronsinlayer=nbrof_neuronsinlayer
self.input_layer=Inputs.copy()
self.myhidden_layers=self.generatemylayers(nbrof_layers,nbrof_neuronsinlayer)
self.output_layer=self.generete_output_layer(number_of_classes)
self.desiredoutput=None
#self.Activate_network(nbrof_neuronsinlayer)
def generatemylayers(self,nbrof_layers,nbrof_neuronsinlayer):
list_of_layers=[]
for i in range (nbrof_layers):
if len(list_of_layers)==0:
nbrof_inputs=len(self.input_layer)
else:
nbrof_inputs=list_of_layers[i-1].numberof_neurons
new_layer=layer(nbrof_neuronsinlayer[i],nbrof_inputs)
list_of_layers.append(new_layer)
return list_of_layers
def generete_output_layer(self,number_of_classes):
if len(self.myhidden_layers)==0:
print("no hiddenlayer")
nbrof_inputstooutlayer=len(self.input_layer)
else:
before_output_layer=self.myhidden_layers[len(self.myhidden_layers)-1]
nbrof_inputstooutlayer=before_output_layer.numberof_neurons
output_layer=layer(number_of_classes,nbrof_inputstooutlayer)
return output_layer
def Activate_network(self,nbrof_neuronsinlayer):
if self.number_of_layers==0:
print("no hidden layers")
self.output_layer.inputstolayer=self.input_layer.copy()
self.output_layer.outputsoflayer=self.output_layer.generate_my_outputs(self.number_of_classes).copy()
else:
self.myhidden_layers[0].inputstolayer=self.input_layer.copy()
self.myhidden_layers[0].outputsoflayer=self.myhidden_layers[0].generate_my_outputs(nbrof_neuronsinlayer[0]).copy()
for i in range(1,self.number_of_layers):
self.myhidden_layers[i].inputstolayer= self.myhidden_layers[i-1].outputsoflayer.copy()
self.myhidden_layers[i].outputsoflayer=self.myhidden_layers[i].generate_my_outputs(nbrof_neuronsinlayer[i]).copy()
self.output_layer.inputstolayer=self.myhidden_layers[self.number_of_layers-1].outputsoflayer.copy()
self.output_layer.outputsoflayer=self.output_layer.generate_my_outputs(self.number_of_classes).copy()
#Backpropagation:
def update_error_output_layer(self,desired_outputs):
H=0.9
L=0.1
n=self.number_of_layers
learning_rate=0.0008
skiped=[]
for i in range(self.number_of_classes):
if desired_outputs[i]==1 and self.output_layer.my_neurons[i].outputneuron>H:
skiped.append(i)
self.output_layer.my_neurons[i].errorneuron=0
continue
elif desired_outputs[i]==0 and self.output_layer.my_neurons[i].outputneuron<L:
skiped.append(i)
self.output_layer.my_neurons[i].errorneuron=0
continue
else:
ei=desired_outputs[i]-self.output_layer.my_neurons[i].outputneuron
si=self.output_layer.my_neurons[i].s
self.output_layer.my_neurons[i].errorneuron=self.output_layer.my_neurons[i].derivation_function(si)*ei
self.calculatedeltaj(self.output_layer.my_neurons[i],self.myhidden_layers[n-1],learning_rate)
return skiped
def update_error_hidden_layers(self):
if n==0:
print("no hidden layers")
return
self.update_error_singlehidden_layer(self.myhidden_layers[n-1],self.output_layer,self.nbrof_neuronsinlayer[n-1],n-1)
for l in range(n-2,-1,-1):
self.update_error_singlehidden_layer(self.myhidden_layers[l],self.myhidden_layers[l+1],self.nbrof_neuronsinlayer[l],l)
return
def update_error_singlehidden_layer(self,current_layer,following_layer,nbrofneuronsinlayer,l):
learning_rate=0.0008
for j in range(nbrofneuronsinlayer):
dotprod=self.dot_weights_errors(j,following_layer)#decalge du bias
sj=current_layer.my_neurons[j].s
current_layer.my_neurons[j].errorneuron=current_layer.my_neurons[j].derivation_function(sj)*dotprod
if l==0:
self.calculatedeltaj_first_layer(current_layer.my_neurons[j],learning_rate)
else:
self.calculatedeltaj(current_layer.my_neurons[j],self.myhidden_layers[j-1],learning_rate)
return
def dot_weights_errors(self,j,following_layer):
j=j+1#decalge du bias
dotproduct=0
for i in range(following_layer.numberof_neurons):
wijerrori=following_layer.my_neurons[i].errorneuron*following_layer.my_neurons[i].weights[j]
dotproduct=dotproduct+wijerrori
return dotproduct
#uppdate weights of neuron j:
def calculatedeltaj(self,neuronj,previous_layer,learning_rate):
alpha=0.8
ej=neuronj.errorneuron#add TO SAVE OLD WEIGHTS
neuronj.deltaweights[0]=learning_rate*ej+alpha*neuronj.deltaoldweights[0]
neuronj.deltaoldweights[0]=neuronj.deltaweights[0]
for k in range(0,previous_layer.numberof_neurons):
hk=previous_layer.outputsoflayer[k]
neuronj.deltaweights[k+1]=learning_rate*ej*hk+alpha*neuronj.deltaoldweights[k+1]
neuronj.deltaoldweights[k+1]=neuronj.deltaweights[k+1]
return
def calculatedeltaj_first_layer(self,neuronj,learning_rate):
alpha=0.8
ej=neuronj.errorneuron#add TO SAVE OLD WEIGHTS
neuronj.deltaweights[0]=learning_rate*ej+alpha*neuronj.deltaoldweights[0]
neuronj.deltaoldweights[0]=neuronj.deltaweights[0]
for k in range(0,len(self.input_layer)):
hk=self.input_layer[k]
neuronj.deltaweights[k+1]=learning_rate*ej*hk+alpha*neuronj.deltaoldweights[k+1]
neuronj.deltaoldweights[k+1]=neuronj.deltaweights[k+1]
return
def backpropagate(self):
skiped=self.update_error_output_layer(self.desiredoutput)
self.update_error_hidden_layers()
for i in range(self.number_of_classes):
if (i in skiped):
continue
else:
self.output_layer.my_neurons[i].update_weights_ofsingle_neuronj()
for i in range(self.number_of_classes):
self.output_layer.my_neurons[i].update_weights_ofsingle_neuronj()
for layer in self.myhidden_layers:
for neuron in layer.my_neurons:
neuron.update_weights_ofsingle_neuronj()
#print("finish backpropagation")
return
def classify(self):
index=np.argmax(self.output_layer.outputsoflayer)
return index
# In[2]:
##
#Import shuffled images
Inputsst=pd.read_csv("Imagestrainshuffled.txt",sep=' ',header=None)
teaching_input=pd.read_csv("Labelstrainshuffled.txt",sep=' ',header=None)
Inputsst=Inputsst.values
teaching_input=teaching_input.values.tolist()
lg_data=len(teaching_input)
##Import testing images
Inputstestu=pd.read_csv("Imagestestshuffled.txt",sep=' ',header=None)
testing_input=pd.read_csv("Labelstestshuffled.txt",sep=' ',header=None)
Inputstestu=Inputstestu.values.tolist()
testing_input=testing_input.values.tolist()
lg_test=len(testing_input)
#Imagestrainshuffled
#Labelstrainshuffled
####Testing the performance and loading the model
import pandas as pd
import random as rd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import matplotlib.cm as cm
import pickle
import random
import math
with open('network2.pkl', 'rb') as inp:
nettrained = pickle.load(inp)
with open('errors3.pkl', 'rb') as inp:
errors = pickle.load(inp)
with open('errorstest3.pkl', 'rb') as inp:
errorstest = pickle.load(inp)
with open('epochs3.pkl', 'rb') as inp:
epochs = pickle.load(inp)
#
from sklearn import metrics
cm=metrics.confusion_matrix(teaching_input,Ypred)
print(cm)
# In[26]:
import seaborn as sns
df_cm = pd.DataFrame(cm, index = [i for i in "0123456789"],
columns = [i for i in "0123456789"])
plt.figure(figsize = (10,6))
heatmap=sns.heatmap(df_cm, annot=True,cmap='BrBG', fmt='g')
bottom, top = heatmap.get_ylim()
heatmap.set_ylim(bottom + 0.5, top - 0.5)
plt.xlabel("Predicted labels")
plt.ylabel("True labels")
plt.title("The confusion matrix of the training data")
plt.show()
# In[22]:
from sklearn import metrics
cmtest=metrics.confusion_matrix(testing_input,Ypredtest)
print(cmtest)
# In[25]:
import seaborn as sns
df_cmtest= pd.DataFrame(cmtest, index = [i for i in "0123456789"],
columns = [i for i in "0123456789"])
plt.figure(figsize = (10,7))
heatmap=sns.heatmap(df_cmtest, annot=True,cmap='BrBG', fmt='g')
bottom, top = heatmap.get_ylim()
heatmap.set_ylim(bottom + 0.5, top - 0.5)
plt.xlabel("Predicted labels")
plt.ylabel("True labels")
plt.title("The confusion matrix of the testing data")
plt.show()
| MeryemEssalmi/Adaptive-Neural-Network-from-scrach | Neural Network code.py | Neural Network code.py | py | 11,523 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "random.random",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "math.exp",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 49,
... |
25191930876 | from pytablewriter import MarkdownTableWriter
def readCustomVCF(fname, keep_cols):
data = []
with open(fname, 'r') as vcf:
for line in vcf:
# skip the header specification part
if line.startswith('##'):
continue
# get the header line but only keep the ones for the md table
elif line.startswith('#C'):
header_line = line.rstrip('\n').split('\t')
header = [header_line[i] for i in keep_cols]
header += ["EXC", "GENE", "AA_POS", "AA_REF", "AA_ALT"]
# all other lines that contain data
elif len(line) != 0:
data_line = line.rstrip('\n').split(sep='\t')
new_data_line = [data_line[i] for i in keep_cols]
# but skip the ones of the sequence ends
if data_line[-1].split(";")[1].split("=")[1] != "seq_end":
new_data_line.append("<br>".join(data_line[-1].split(";")[1].split("=")[1].split(",")))
new_data_line.append(data_line[-1].split(";")[-4].split("=")[1])
new_data_line.append(data_line[-1].split(";")[-3].split("=")[1])
new_data_line.append(data_line[-1].split(";")[-2].split("=")[1])
new_data_line.append(data_line[-1].split(";")[-1].split("=")[1])
data.append(new_data_line)
#data.append([data_line[i] for i in range(len(header)-1)])
else:
break
return header, data
def main():
writer = MarkdownTableWriter()
# name of the vcf file to parse and which columns to select from the file
filename = "problematic_sites_sarsCov2.vcf"
keep = [1, 3, 4, 6]
# title of the table - printed as markdown header before the table
writer.table_name = "Human-friendly version of the vcf file\n"
header, data = readCustomVCF(fname=filename, keep_cols=keep)
# the markdown table does not need a single line for every site at the ends of the sequence
# so insert the beginning and append the ends
data.insert(0, ["1-55", ".", ".", "mask","seq_end",".", ".",".","."])
data.append(["29804-29903", ".", ".", "mask","seq_end",".",".",".","."])
writer.headers = header
writer.value_matrix = data
writer.write_table()
if __name__ == "__main__":
main()
| W-L/ProblematicSites_SARS-CoV2 | src/vcf2markdown.py | vcf2markdown.py | py | 2,381 | python | en | code | 45 | github-code | 1 | [
{
"api_name": "pytablewriter.MarkdownTableWriter",
"line_number": 34,
"usage_type": "call"
}
] |
38918987427 | from django.shortcuts import render , redirect
from django.contrib.auth import login
from django.contrib.auth.forms import UserCreationForm
def register(request):
#регаем нового юзверя
if request.method != 'POST':
#вывод пустой формы реги
form=UserCreationForm()
else:
#Обработка заполненной формы
form=UserCreationForm(data=request.POST)
if form.is_valid():
new_user=form.save() # идет сохранение данных юзера в БД
#Выполнение входа и перенаправка на домашнюю страницу
login (request, new_user)
return redirect ('learning_logs:index')
# вывод пустой или недействительной формы
context={'form': form}
return render (request, 'registration/register.html', context) | Master-sniffer/Learning-PYTHON- | Book_1/Django/Scripts/users/views.py | views.py | py | 973 | python | ru | code | 1 | github-code | 1 | [
{
"api_name": "django.contrib.auth.forms.UserCreationForm",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.forms.UserCreationForm",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.login",
"line_number": 17,
"usag... |
40531959843 | """序列化"""
import json
"""JSON
如果我们要在不同的编程语言之间传递对象,就必须把对象序列化为标准格式,比如XML,
但更好的方法是序列化为JSON,因为JSON表示出来就是一个字符串,可以被所有语言读取,
也可以方便地存储到磁盘或者通过网络传输。
JSON不仅是标准格式,并且比XML更快,而且可以直接在Web页面中读取,非常方便。
JSON表示的对象就是标准的JavaScript语言的对象,JSON和Python内置的数据类型对应如下:"""
"""JSON类型 Python类型
{} dict
[] list
"string" str
1234.56 int或float
true/false True/False
null None
"""
# Python内置的json模块提供了非常完善的Python对象到JSON格式的转换。我们先看看如何把Python对象变成一个JSON:
d = dict(name="Bob", age="22", score="77")
print(json.dumps(d))
# dumps()方法返回一个str,内容就是标准的JSON。类似的,dump()方法可以直接把JSON写入一个file-like Object。
# 要把JSON反序列化为Python对象,用loads()或者对应的load()方法,前者把JSON的字符串反序列化,
# 后者从file-like Object中读取字符串并反序列化:
json_str = '{"name": "Bob", "age": "22", "score": "77"}'
print(json.loads(json_str))
class Student(object):
def __init__(self, name, age, score):
self.name = name
self.age = age
self.score = score
s = Student("Bob", 20, 77)
# print("Student--Json:", json.dumps(s))#会报错 TypeError
def student2dict(std):
return {
'name': std.name,
'age': std.age,
'score': std.score
}
print(json.dumps(s, default=student2dict))
# 因为通常class的实例都有一个__dict__属性,它就是一个dict,用来存储实例变量。也有少数例外,比如定义了__slots__的class。
# 同样的道理,如果我们要把JSON反序列化为一个Student对象实例,loads()方法首先转换出一个dict对象,然后,我们传入的object_hook函数负责把dict转换为Student实例:
def dict2student(d):
return Student(d['name'], d['age'], d['score'])
print(json.dumps(s, default=lambda obj: obj.__dict__))
| zongrh/untitled_python3 | python_io/python_serialize.py | python_serialize.py | py | 2,229 | python | zh | code | 1 | github-code | 1 | [
{
"api_name": "json.dumps",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 52,
... |
27516911233 | # -*- coding:UTF-8 -*-
def get_auth_url():
weibo_auth_url = 'https://api.weibo.com/oauth2/authorize'
redirect_url = 'http://127.0.0.1:8000/complete/weibo/'
auth_url = weibo_auth_url+'?client_id={client_id}&redirect_uri={re_url}'.format(client_id=2113796513,re_url=redirect_url)
print(auth_url)
def get_access_token(code='3ad8eb8a3595bf17f95eba158ae61b1f'):
access_token_url = 'https://api.weibo.com/oauth2/access_token'
import requests
re_dict = requests.post(access_token_url,data={
'client_id':2113796513,
'client_secret':'9292dde7cc66450d7bc1a933fc800e88',
'grant_type':'authorization_code',
'code':code,
'redirect_uri':'http://127.0.0.1:8000/complete/weibo/',
})
# '{"access_token":"2.00hgflGGJCRD_C4cefc7b386kFrOTC","remind_in":"157679998","expires_in":157679998,"uid":"5596816675","isRealName":"true"}'
pass
def get_user_info(access_token='',uid=''):
user_url = 'https://api.weibo.com/2/users/show/json?access_token={token}&uid={uid}'.format(token=access_token,uid=uid)
print(user_url)
if __name__ == '__main__':
# get_auth_url()
# get_access_token(code='3ad8eb8a3595bf17f95eba158ae61b1f')
get_user_info(access_token="2.00hgflGGJCRD_C4cefc7b386kFrOTC",uid="5596816675")
| yuansuixin/Django | apps/utils/weibo_login.py | weibo_login.py | py | 1,282 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "requests.post",
"line_number": 14,
"usage_type": "call"
}
] |
1706775488 | from tkinter import *
from enum import Enum
## WFrame 창의 처음 위치를 나타내는 열거형
class StartPosition(Enum):
default = 0
centerScreen = 1
centerParent = 2
manual = 3
## Windows Forms를 토대로 작성한 Frame
#
# http://effbot.org/tkinterbook/
class WFrame(Frame):
## 제목 표시줄에 표시할 제목
# @var string
text = 'WFrame'
## WFrame 너비
# @var int
width = 300
## WFrame 높이
# @var int
height = 300
## 창 왼쪽 위치
# @var int
x = 0
## 창 위쪽 위치
# @var int
y = 0
## 창의 처음 위치
# @var StartPosition
startPosition = StartPosition.default
## 부모 WFrame
# @see open()
# @var WFrame
parent = None
## 생성자
def __init__(self, master=None, parent=None, **kw):
super().__init__(master, kw)
self.master.protocol('WM_DELETE_WINDOW', self._onClosing)
self.parent = parent if parent != None else self
self._initializeWidget()
self.onLoad()
## 원시 Widget을 초기화한다.
def _initializeWidget(self):
self.initializeWidget()
self.master.title(self.text)
self.master.geometry('%dx%d' % (self.width, self.height))
self.master.geometry('+%d+%d' % self._getPosition())
self.pack(fill=BOTH, expand=TRUE)
self.focus_set()
# onClosing Handler
def _onClosing(self):
self.onClosing()
self.master.destroy()
## startPosition에 근거한 위치를 계산한다.
# @return tuple (x, y)
def _getPosition(self):
self.master.update_idletasks()
(width, height) = self.getSize()
(parentWidth, parentHeight) = self.parent.getSize()
if self.startPosition == StartPosition.default:
self.x = self.master.winfo_x()
self.y = self.master.winfo_y()
elif self.startPosition == StartPosition.centerScreen:
self.x = self.master.winfo_screenwidth() / 2 - width / 2
self.y = self.master.winfo_screenheight() / 2 - height / 2
elif self.startPosition == StartPosition.centerParent:
self.x = self.parent.master.winfo_x() + parentWidth / 2 - width / 2
self.y = self.parent.master.winfo_y() + parentHeight / 2 - height / 2
elif self.startPosition != StartPosition.manual:
raise ValueError('%s is not a valid StartPosition' % str(self.startPosition))
return (self.x, self.y)
## 창의 크기를 잰다.
# @return tuple (width, height)
def getSize(self):
width = self.master.winfo_width() + (self.master.winfo_rootx() - self.master.winfo_x()) * 2
height = self.master.winfo_height() + (self.master.winfo_rooty() - self.master.winfo_y()) * 2
return (width, height)
## Widget을 추가한다.
#
# Windows Forms의 InitializeComponent처럼 작성한다.
def initializeWidget(self):
pass
## load 완료 후 작업
def onLoad(self):
pass
## 창을 닫는다.
def close(self):
self._onClosing()
## 창 끌 때 할 작업
def onClosing(self):
pass
## wFrame을 자식 창에 띄우고 wFrame을 반환한다.
# @param wFrame 자식 창에 띄울 WFrame
# @return WFrame
def open(self, wFrame, **kw):
return wFrame(Toplevel(self), parent=self, **kw)
## 자식 창을 닫을 때까지 부모 창을 숨긴다.
# @see open()
def openDialog(self, wFrame, **kw):
self.master.withdraw()
wframe = self.open(wFrame, **kw)
wframe.wait_window()
self.master.deiconify()
return wframe
| sunghwan2789/EnglishTypingPractice | wframe.py | wframe.py | py | 3,692 | python | ko | code | 1 | github-code | 1 | [
{
"api_name": "enum.Enum",
"line_number": 5,
"usage_type": "name"
}
] |
20782775994 | import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from .utils import download_file_from_gdrive
# mapping network
#################
def pixel_norm(x, epsilon=1e-8):
return x * torch.rsqrt(torch.mean(torch.pow(x, 2), dim=1, keepdim=True) + epsilon)
class MappingNetwork(nn.Module):
def __init__(self,
num_layers=8,
latent_size=512, # input size
mapping_size=512, # hidden layer size
dlatent_size=512, # disentangled latent size (output)
dlatent_broadcast=18, # how many dlatent copies in output
normalize_latents=True,
):
super(MappingNetwork, self).__init__()
self.dlatent_broadcast = dlatent_broadcast
self.normalize_latents = normalize_latents
self.net = self._make_layers(num_layers, latent_size, mapping_size, dlatent_size)
def _make_layers(self, num_layers, latent_size, mapping_size, dlatent_size):
layers = []
for layer_idx in range(num_layers):
in_size = latent_size if layer_idx == 0 else mapping_size
out_size = dlatent_size if layer_idx == num_layers - 1 else mapping_size
layers += [nn.Linear(in_size, out_size), nn.LeakyReLU(negative_slope=0.2)]
return nn.Sequential(*layers)
def forward(self, x):
if self.normalize_latents:
x = pixel_norm(x)
x = self.net(x)
if self.dlatent_broadcast is not None:
x = x.unsqueeze(1) # add a dimension
x = x.repeat(1, self.dlatent_broadcast, 1)
return x
# synth network
###############
def instance_norm(x, epsilon=1e-8):
assert len(x.shape) == 4 # NCHW
x -= torch.mean(x, dim=[2, 3], keepdim=True)
x = x * torch.rsqrt(torch.mean(torch.pow(x, 2), dim=[2, 3], keepdim=True) + epsilon)
return x
def upscale2d(x, factor=2):
assert len(x.shape) == 4
assert isinstance(factor, int) and factor >= 1
# early exit
if factor==1:
return x
s = x.shape
x = x.view(-1, s[1], s[2], 1, s[3], 1)
x = x.repeat(1, 1, 1, factor, 1, factor)
x = x.view(-1, s[1], s[2] * factor, s[3] * factor)
return x
def blur2d(x, f=[1, 2, 1], normalize=True, flip=False, stride=1):
assert len(x.shape) == 4
# modify kernel
f = np.array(f, dtype=np.float32)
if len(f.shape) == 1:
f = f[:, np.newaxis] * f[np.newaxis, :]
assert len(f.shape) == 2
if normalize:
f /= np.sum(f)
if flip:
f = f[::-1, ::-1]
f = f[np.newaxis, np.newaxis, :, :]
num_channels = x.shape[1]
f = torch.from_numpy(f)
f = f.repeat(num_channels, 1, 1, 1)
# convolve via depthwise_conv
x = F.conv2d(x, f, groups=num_channels, padding=1)
return x
class Blur2D(nn.Module):
def __init__(self, num_channels, f=[1, 2, 1], normalize=True, flip=False, stride=1):
super(Blur2D, self).__init__()
self.num_channels = num_channels
f = np.array(f, dtype=np.float32)
if len(f.shape) == 1:
f = f[:, np.newaxis] * f[np.newaxis, :]
assert len(f.shape) == 2
if normalize:
f /= np.sum(f)
if flip:
f = f[::-1, ::-1]
f = f[np.newaxis, np.newaxis, :, :]
f = torch.from_numpy(f)
f = f.repeat(num_channels, 1, 1, 1)
#self.f = f
self.register_buffer("f", f)
def forward(self, x):
x = F.conv2d(x, self.f, groups=self.num_channels, padding=1)
return x
class StyleMod(nn.Module):
def __init__(self, channels_in, channels_out):
super(StyleMod, self).__init__()
self.dense = nn.Linear(channels_in, channels_out)
def forward(self, x, dlatent):
style = self.dense(dlatent)
shape = [-1, 2, x.shape[1]] + [1] * (len(x.shape) - 2)
style = style.view(*shape)
return x * (style[:, 0] + 1) + style[:, 1]
class LearnableBias(nn.Module):
def __init__(self, num_channels):
super(LearnableBias, self).__init__()
self.bias = nn.Parameter(torch.zeros(num_channels))
def forward(self, x):
if len(x.shape) == 2:
return x + self.bias
return x + self.bias.view(1, -1, 1, 1)
class NoiseBlock(nn.Module):
def __init__(self, num_channels, height, width):
# num_channels - input tensor's channels
# height - noise tensor's height
# width - noise tensor's width
super(NoiseBlock, self).__init__()
self.num_channels = num_channels
self.noise = nn.Parameter(torch.randn(1, 1, height, width))
self.weight = nn.Parameter(torch.zeros(num_channels))
def forward(self, x, randomize_noise):
assert len(x.shape) == 4 # NCHW
assert x.shape[1] == self.num_channels
if randomize_noise:
noise = torch.randn((x.shape[0], 1, x.shape[2], x.shape[3]))
else:
noise = self.noise
return x + noise * self.weight.view(1, -1, 1, 1) # nchw + (n/1)1hw * 1c11 = nchw + (n1/1)chw
class LayerEpilogue(nn.Module):
def __init__(self, dlatent_channels, input_channels,
noise_height, noise_width,
use_noise=True, use_pixel_norm=False,
use_instance_norm=True, use_style=True):
super(LayerEpilogue, self).__init__()
self.use_noise = use_noise
self.use_pixel_norm = use_pixel_norm
self.use_instance_norm = use_instance_norm
self.use_style = use_style
self.noise = NoiseBlock(input_channels, noise_height, noise_width)
self.bias = LearnableBias(input_channels)
self.style = StyleMod(dlatent_channels, 2*input_channels)
def forward(self, x, dlatent, randomize_noise=True):
if self.use_noise:
x = self.noise(x, randomize_noise)
x = self.bias(x)
x = nn.LeakyReLU(negative_slope=0.2)(x)
if self.use_pixel_norm:
x = pixel_norm(x)
if self.use_instance_norm:
x = instance_norm(x)
if self.use_style:
x = self.style(x, dlatent)
return x
class ConstSynthBlock(nn.Module):
def __init__(self, dlatent_channels, input_channels, height=4, width=4):
super(ConstSynthBlock, self).__init__()
self.const = nn.Parameter(torch.ones(1, input_channels, height, width))
self.const_epilogue = LayerEpilogue(dlatent_channels, input_channels, height, width)
self.conv = nn.Conv2d(input_channels, input_channels, kernel_size=3, padding=1, bias=False)
self.conv_epilogue = LayerEpilogue(dlatent_channels, input_channels, height, width)
def forward(self, dlatent, randomize_noise):
assert dlatent.shape[1] == 2
x = self.const
x = self.const_epilogue(x, dlatent[:, 0], randomize_noise)
s0 = x.shape
x = self.conv(x)
s1 = x.shape
assert s0 == s1
x = self.conv_epilogue(x, dlatent[:, 1], randomize_noise)
return x
class UpscaleConv2d(nn.Module):
def __init__(self, input_channels, output_channels, kernel_size, fused_scale="auto"):
super(UpscaleConv2d, self).__init__()
assert kernel_size >= 1 and kernel_size % 2 == 1
assert fused_scale in [True, False, "auto"]
self.fused_scale = fused_scale
self.weight = nn.Parameter(torch.randn(output_channels, input_channels, kernel_size, kernel_size))
def forward(self, x):
fused_scale = self.fused_scale
if fused_scale=="auto":
fused_scale = min(x.shape[2:]) * 2 >= 128
if not fused_scale:
x = upscale2d(x)
x = F.conv2d(x, self.weight, padding=1)
else:
w = self.weight.permute(1, 0, 2, 3)
w = F.pad(w, (1,1,1,1))
w = w[:, :, 1:, 1:] + w[:, :, :-1, 1:] + w[:, :, 1:, :-1] + w[:, :, :-1, :-1]
x = F.conv_transpose2d(x, w, stride=2, padding=(w.size(-1) - 1) // 2)
return x
class SynthBlock(nn.Module):
def __init__(self, dlatent_channels, input_channels, output_channels, height, width):
super(SynthBlock, self).__init__()
self.conv0 = UpscaleConv2d(input_channels, output_channels, kernel_size=3)
self.blur2d = Blur2D(output_channels)
self.conv0_epilogue = LayerEpilogue(dlatent_channels, output_channels, height, width)
self.conv1 = nn.Conv2d(output_channels, output_channels, kernel_size=3, padding=1, bias=False)
self.conv1_epilogue = LayerEpilogue(dlatent_channels, output_channels, height, width)
def forward(self, x, dlatent, randomize_noise):
assert dlatent.shape[1] == 2
x = self.conv0(x)
x = self.blur2d(x)
x = self.conv0_epilogue(x, dlatent[:, 0], randomize_noise)
x = self.conv1(x)
x = self.conv1_epilogue(x, dlatent[:, 1], randomize_noise)
return x
class ToRGBBlock(nn.Module):
def __init__(self, input_channels):
super(ToRGBBlock, self).__init__()
self.conv = nn.Conv2d(input_channels, 3, kernel_size=1, bias=False)
self.bias = LearnableBias(3)
def forward(self, x):
x = self.conv(x)
x = self.bias(x)
return x
class SynthNetwork(nn.Module):
def __init__(self, dlatent_channels=512, resolution=1024):
super(SynthNetwork, self).__init__()
def nf(stage, fmap_base = 8192, fmap_decay = 1.0, fmap_max = 512):
return min(int(fmap_base / (2.0 ** (stage * fmap_decay))), fmap_max)
self.resolution = resolution
# early layer
self.block_4x4 = ConstSynthBlock(dlatent_channels, nf(1), 4, 4)
# middle layers
res_log2 = int(np.log2(resolution))
for res in range(3, res_log2 + 1):
hw = 2**res
setattr(self, "block_{0}x{0}".format(hw), SynthBlock(dlatent_channels, nf(res - 2), nf(res - 1), hw, hw))
# rgb image branch
#self.torgb = ToRGBBlock(nf(res - 1))
self.torgb = nn.Conv2d(nf(res - 1), 3, kernel_size=1)
def forward(self, dlatents, use_noise=False):
x = self.block_4x4(dlatents[:, :2], use_noise)
if torch.isnan(x).any():
print("Nan detected!")
import pdb; pdb.set_trace()
res_log2 = int(np.log2(self.resolution))
for res in range(3, res_log2 + 1):
hw = 2**res
x = getattr(self, "block_{0}x{0}".format(hw))(x, dlatents[:, (res * 2 - 4): (res * 2 - 2)], use_noise)
if torch.isnan(x).any():
print("Nan detected!")
import pdb; pdb.set_trace()
x = self.torgb(x)
return x
# stylegan
##########
class StyleGAN(nn.Module):
WEIGHTS_MAP = {
"ffhq": {
"fname":"stylegan_ffhq.pt",
"url": "https://drive.google.com/uc?id=1qnG4jFWnXh3WYqBG4fLw7hHgYdpzng51",
}
}
def __init__(self, weights=None):
super(StyleGAN, self).__init__()
self.mapping = MappingNetwork()
self.synthesis = SynthNetwork()
if weights is not None:
self._load_weights(weights)
def _load_weights(self, weights):
weights_data = self.WEIGHTS_MAP.get(weights)
fname = weights_data.get("fname")
if fname is None:
raise RuntimeError("No such weights: {}".format(fname))
path = os.path.expanduser(os.path.join("~", "neural_obfuscator", fname))
if not os.path.exists(path):
download_file_from_gdrive(weights_data["url"], path)
self.load_state_dict(torch.load(path), strict=False)
def forward(self, latents, use_noise=False, postprocess=True):
if isinstance(latents, np.ndarray):
latents = torch.from_numpy(latents)
x = self.mapping(latents)
x = self.synthesis(x, use_noise=use_noise)
if postprocess:
x = self.postprocess(x)
return x
@staticmethod
def postprocess(images, drange=[-1, 1]):
scale = 255. / (drange[1] - drange[0])
images = images * scale + (0.5 - drange[0] * scale)
images = images.clamp(0, 255)
images = images.data.numpy().astype("uint8")
images = images.transpose(0, 2, 3, 1) # NHWC
images = images[:, :, :, ::-1] # bgr
return images
| tanelp/neural-obfuscator | neural_obfuscator/stylegan.py | stylegan.py | py | 12,295 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "torch.rsqrt",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "torch.mean",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "torch.pow",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number":... |
17847187153 | # -*- test-case-name: xquotient.test.historic.test_composer5to6 -*-
"""
Create stub database for upgrade of L{xquotient.compose.Composer} from version 5
to version 6.
"""
from axiom.test.historic.stubloader import saveStub
from axiom.dependency import installOn
from axiom.userbase import LoginMethod
from xquotient.compose import Composer
LOCAL = u'foo'
DOMAIN = u'bar'
VERIFIED = True
PROTOCOL = u'*'
INTERNAL = False
def createDatabase(store):
LoginMethod(
store=store, localpart=LOCAL, domain=DOMAIN, verified=VERIFIED,
protocol=PROTOCOL, account=store, internal=INTERNAL)
installOn(Composer(store=store), store)
if __name__ == '__main__':
saveStub(createDatabase, 17729)
| rcarmo/divmod.org | Quotient/xquotient/test/historic/stub_composer5to6.py | stub_composer5to6.py | py | 711 | python | en | code | 10 | github-code | 1 | [
{
"api_name": "axiom.userbase.LoginMethod",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "axiom.dependency.installOn",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "xquotient.compose.Composer",
"line_number": 24,
"usage_type": "call"
},
{
"... |
38914409041 | # python dictonary file
from difflib import get_close_matches
from json import load
#import tkinter as tk
from tkinter import *
# loading dictionary to memory
file = open('data.json', 'r')
data = load(file)
file.close()
# fuction declaration
def translate():
output.delete(0, END)
w = entry_key.get()
w = w.lower() # converting to lowercase
try:
write(data[w])
except:
# list of possible matches max lenghth 3
match = get_close_matches(w, data.keys())
if len(match) > 0:
yes_btn['state'] = NORMAL
no_btn['state'] = NORMAL
# looping for each item in list
for item in match:
write(["Did you mean %s indtead ?" % item])
else:
return ["try othe word"]
else:
return ["word don't exist"]
def yes():
print('yes')
def no():
print('no')
def write(s):
for y in s:
output.insert(END, y)
# output / GUI
app = Tk()
# search space
search_key = StringVar()
entry_key = Entry(app, textvariable=search_key)
entry_key.grid(row=0, column=0, padx=20, pady=20)
# output
output = Listbox(app, height=8, width=50)
output.grid(row=1, column=0, columnspan=3, rowspan=10, pady=20, padx=20)
scrollbar = Scrollbar(app)
scrollbar.grid(row=1, column=3)
output.configure(yscrollcommand=scrollbar.set)
scrollbar.configure(command=output.yview)
# buttons
search_btn = Button(app, text="Search", command=translate)
search_btn.grid(row=0, column=1)
yes_btn = Button(app, text="Yes", command=yes)
yes_btn.grid(row=11, column=0)
yes_btn['state'] = DISABLED
no_btn = Button(app, text="No", command=no)
no_btn.grid(row=11, column=1)
no_btn['state'] = DISABLED
app.title('Dictionary')
app.geometry('400x300')
app.mainloop()
| rinkeshsante/ExperimentsBackup | Python Project Dictionary/app_GUI.py | app_GUI.py | py | 1,797 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "json.load",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "difflib.get_close_matches",
"line_number": 25,
"usage_type": "call"
}
] |
20192619756 | import pprint
from urllib import request
import requests
import pandas as pd
from _kluce import *
movie_id = 551
api_version = 3
api_base_url = f"https://api.themoviedb.org/{api_version}"
endpoint_path = f"/search/movie"
search_query = "Matrix"
endpoint = f"{api_base_url}{endpoint_path}?api_key={api_key}&query={search_query}"
#print (endpoint)
r = requests.get(endpoint)
#pprint.pprint(r.json())
if r.status_code in range(200,299):
data = r.json()
results = data['results']
if len(results)>0:
#print(results[0].keys())
movie_ids = set()
for result in results:
_id = result['id']
#print(result['original_title'], _id)
movie_ids.add(_id)
#print(list(movie_ids))
output = 'movies.csv'
movie_data = []
# Tu vytahujem info ku vsetkým najdeným filmom
for movie_id in movie_ids:
api_version = 3
api_base_url = f"https://api.themoviedb.org/{api_version}"
endpoint_path = f"/movie/{movie_id}"
endpoint = f"{api_base_url}{endpoint_path}?api_key={api_key}"
r = requests.get(endpoint)
if r.status_code in range(200,299):
data = r.json()
movie_data.append(data)
df = pd.DataFrame(movie_data)
print(df.head(6))
df.to_csv(output, index=False)
# what is http method that we need | eavf/30-days | Day132/connect.py | connect.py | py | 1,293 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 44,
"usage_type": "call"
}
] |
71975828513 | #!/usr/bin/env python3
import time
from pyftdi.spi import SpiController
from pycrc.algorithms import Crc
# ---------------------------------------------------------------------------
# DSI utilities
# ---------------------------------------------------------------------------
EOTP = [ 0x08, 0x0f, 0x0f, 0x01 ]
DSI_CRC = Crc(width=16, poly=0x1021, xor_in=0xffff, xor_out=0x0000, reflect_in=True, reflect_out=True)
def parity(x):
p = 0
while x:
p ^= x & 1
x >>= 1
return p
def dsi_header(data):
cmd = (data[2] << 16) | (data[1] << 8) | data[0]
ecc = 0
if parity(cmd & 0b111100010010110010110111): ecc |= 0x01;
if parity(cmd & 0b111100100101010101011011): ecc |= 0x02;
if parity(cmd & 0b011101001001101001101101): ecc |= 0x04;
if parity(cmd & 0b101110001110001110001110): ecc |= 0x08;
if parity(cmd & 0b110111110000001111110000): ecc |= 0x10;
if parity(cmd & 0b111011111111110000000000): ecc |= 0x20;
return data + [ecc]
def dsi_crc(payload):
crc = DSI_CRC.bit_by_bit(bytes(payload))
return [ crc & 0xff, (crc >> 8) & 0xff ]
def dcs_short_write(cmd, val=None):
if val is None:
return dsi_header([0x05, cmd, 0x00])
else:
return dsi_header([0x15, cmd, val])
def dcs_long_write(cmd, data):
pl = [ cmd ] + data
l = len(pl)
return dsi_header([0x39, l & 0xff, l >> 8]) + pl + dsi_crc(pl)
def generic_short_write(cmd, val=None):
if val is None:
return dsi_header([0x13, cmd, 0x00])
else:
return dsi_header([0x23, cmd, val])
def generic_long_write(cmd, data):
pl = [ cmd ] + data
l = len(pl)
return dsi_header([0x29, l & 0xff, l >> 8]) + pl + dsi_crc(pl)
class DSIControl(object):
REG_LCD_CTRL = 0x00
REG_DSI_HS_PREP = 0x10
REG_DSI_HS_ZERO = 0x11
REG_DSI_HS_TRAIL = 0x12
REG_PKT_WR_DATA_RAW = 0x20
REG_PKT_WR_DATA_U8 = 0x21
def __init__(self, spi_frequency=15e6, dsi_frequency=84e6):
# Params
self.spi_frequency = spi_frequency
self.dsi_frequency = dsi_frequency
# SPI link
self.spi = SpiController(cs_count=3)
self.spi.configure('ftdi://ftdi:2232h/1')
self.slave = self.spi.get_port(cs=2, freq=self.spi_frequency, mode=0)
# Init LCD
self.init()
def reg_w16(self, reg, v):
self.slave.exchange([reg, v >> 8, v & 0xff])
def reg_w8(self, reg, v):
self.slave.exchange([reg, v])
def init(self):
# Default values
self.backlight = 0x100
# Turn off Back Light / HS clock and assert reset
self.reg_w16(self.REG_LCD_CTRL, 0x8000)
# Wait a bit
time.sleep(0.1)
# Configure backlight and release reset
self.reg_w16(self.REG_LCD_CTRL, self.backlight)
# Configure DSI timings
self.reg_w8(self.REG_DSI_HS_PREP, 0x10)
self.reg_w8(self.REG_DSI_HS_ZERO, 0x18)
self.reg_w8(self.REG_DSI_HS_TRAIL, 0x18)
# Enable HS clock
self.reg_w16(self.REG_LCD_CTRL, 0x4000 | self.backlight)
# Wait a bit
time.sleep(0.1)
# Send DSI packets
self.send_dsi_pkt(
dcs_short_write(0x11) + # Exist sleep
dcs_short_write(0x29) + # Display on
dcs_short_write(0x36, 0x00) + # Set address mode
dcs_short_write(0x3a, 0x55) + # Set pixel format
EOTP # EoTp
)
def send_dsi_pkt(self, data):
# Write data
self.slave.exchange([self.REG_PKT_WR_DATA_RAW] + data)
def set_column_address(self, sc, ec):
self.send_dsi_pkt(dcs_long_write(0x2a, [
sc >> 8,
sc & 0xff,
ec >> 8,
ec & 0xff,
]))
def set_page_address(self, sp, ep):
self.send_dsi_pkt(dcs_long_write(0x2b, [
sp >> 8,
sp & 0xff,
ep >> 8,
ep & 0xff,
]))
def send_frame(self, frame, width=240, height=240):
# Max packet size
mtu = 1024 - 4 - 1 - 2
psz = (mtu // (2 * width)) * (2 * width)
pcnt = (width * height * 2 + psz - 1) // psz
for i in range(pcnt):
self.send_dsi_pkt(
dsi_header([0x39, (psz + 1) & 0xff, (psz + 1) >> 8]) +
[ 0x2C if i == 0 else 0x3C ] +
frame[i*psz:(i+1)*psz] +
[0x00, 0x00]
)
def send_frame8(self, frame, width=240, height=240):
# Max packet size
mtu = 1024 - 4 - 1 - 2
psz = (mtu // (2 * width)) * (2 * width)
pcnt = (width * height * 2 + psz - 1) // psz
for i in range(pcnt):
self.slave.exchange([self.REG_PKT_WR_DATA_U8] +
dsi_header([0x39, (psz + 1) & 0xff, (psz + 1) >> 8]) +
[ 0x2C if i == 0 else 0x3C ] +
frame[i*(psz//2):(i+1)*(psz//2)] +
[ 0x00 ]
)
# ---------------------------------------------------------------------------
# Main
# ---------------------------------------------------------------------------
def nyan_load(filename='nyan-square.data'):
img = open(filename,'rb').read()
dat = []
for i in range(len(img) // 4):
b = img[4*i + 0]
g = img[4*i + 1]
r = img[4*i + 2]
c = ((r >> 3) & 0x1f) << 11;
c |= ((g >> 2) & 0x3f) << 5;
c |= ((b >> 3) & 0x1f) << 0;
dat.append( ((c >> 0) & 0xff) )
dat.append( ((c >> 8) & 0xff) )
return dat
if __name__ == '__main__':
ctrl = DSIControl(spi_frequency=10e6)
data = nyan_load()
ctrl.send_frame(data)
| esden/icebreaker-temp | nano-pmod-up5k/control.py | control.py | py | 4,883 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "pycrc.algorithms.Crc",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pyftdi.spi.SpiController",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "time.sleep"... |
73206785314 | """Support for HomeSeer light-type devices."""
import asyncio
import logging
from typing import Any
from homeassistant.components.light import (
ToggleEntity, LightEntity
)
from homeassistant.const import (
CONF_NAME, STATE_ON, STATE_OFF
)
from .command import (turn_off, turn_light_up)
_LOGGER = logging.getLogger(__name__)
LIGHT_PLATFORMS = ["light"]
async def async_setup_platform(hass, config, async_add_devices, discovery_info=None):
"""Perform the setup for Beverly water purifier."""
light_entities = []
light = MicoeLight(hass, config.get(CONF_NAME))
light_entities.append(light)
_LOGGER.warn('async_setup_platform')
async_add_devices(light_entities)
class MicoeLight(LightEntity):
"""Representation of a HomeSeer light-type device."""
def __init__(self, hass, name):
self._hass = hass
self._name = name
self._attr_is_on = False
@property
def icon(self):
return 'hass:lightbulb'
@property
def name(self):
return self._name
def turn_on(self, **kwargs: Any) -> None:
turn_light_up('Light')
self._attr_is_on = True
def turn_off(self, **kwargs: Any) -> None:
turn_off()
self._attr_is_on = False
| ettingshausen/hass-micoe-bath-heater | custom_components/micoe_bath_light/light.py | light.py | py | 1,252 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "homeassistant.const.CONF_NAME",
"line_number": 24,
"usage_type": "argument"
},
{
"api_name": "homeassistant.components.light.LightEntity",
"line_number": 32,
"usage_type": "name"
... |
35253854263 | import pymongo
import yfinance as yf
from pymongo import collection
from .stock_price import get_stock_price
import os
CLIENT = pymongo.MongoClient("mongodb+srv://App:ZU5u0b56vYc7xY15@stockopositions.r5bip.mongodb.net/StockoPositions?retryWrites=true&w=majority")
DB = CLIENT.test
USER_COLLECTION = DB.get_collection('dev-users')
PRICE_COLLECTION = DB.get_collection('dev-prices')
# db.create_collection('users')
def add_user(username):
if not USER_COLLECTION.find_one({'username': username}):
added_document = USER_COLLECTION.insert_one({'username': username, 'positions': []})
return 1
else:
return 0
# print(USER_COLLECTION.find_one({'username': username}))
def add_positions(username, positions): # positions format [{'ticker': 'ticker', 'avg_price': 250, 'qty': 10}, ...]
# user_positions = USER_COLLECTION.find({'username': username}, {'positions': 1})[0]['positions']
for pos in positions:
add_position(username, pos['ticker'], pos['avg_price'], pos['qty'])
def add_position(username, ticker, avg_price, qty):
get_price(ticker)
add_user(username)
positions = USER_COLLECTION.find({'username': username}, {'positions': 1})[0]['positions']
print(positions)
exists = False
for my_dict in positions:
if my_dict['ticker'] == ticker:
exists = True
break
if not exists:
positions.append({'ticker': ticker, 'avg_price': avg_price, 'qty': qty})
USER_COLLECTION.find_one_and_update({'username': username}, {'$set' : {'positions': positions}})
return 1
else:
print('Ticker already in portfolio')
return 0
def get_positions(username):
add_user(username)
positions = USER_COLLECTION.find({'username': username}, {'positions': 1})[0]['positions']
print(positions)
portfolio_cost = 0
portfolio_value = 0
for position_index in range(len(positions)):
qty = positions[position_index]['qty']
avg_price = positions[position_index]['avg_price']
price = float(get_price(positions[position_index]['ticker']))
positions[position_index]['price'] = price
positions[position_index]['profit'] = round((float(qty)*price)-float(avg_price)*float(qty), 2)
positions[position_index]['pct_change'] = round((price-float(avg_price))/float(avg_price), 2)
portfolio_cost += float(avg_price)*float(qty)
portfolio_value += price*float(qty)
portfolio_pct_change = 0
portfolio_profit = 0
if portfolio_cost != 0:
portfolio_pct_change = round((portfolio_value-portfolio_cost)/portfolio_cost, 2)
portfolio_profit = round(portfolio_value-portfolio_cost, 2)
#positions['portfolio_stats'] = {'pct_change': portfolio_pct_change, 'profit': portfolio_profit}
return (positions, {'portfolio_pct_chage': portfolio_pct_change, 'portfolio_profit': portfolio_profit})
#return USER_COLLECTION.find({'username': username}, {'positions': 1})[0]['positions']
def edit_positions(username, ticker, avg_price, qty):
add_user(username)
positions = USER_COLLECTION.find({'username': username}, {'positions': 1})[0]['positions']
for pos in range(0, len(positions)):
if positions[pos]['ticker'] == ticker:
if qty == 0:
del positions[pos]
else:
if avg_price:
positions[pos]['avg_price'] = avg_price
if qty:
positions[pos]['qty'] = qty
USER_COLLECTION.find_one_and_update({'username': username}, {'$set' : {'positions': positions}})
return 1
return 0
def add_ticker(ticker):
PRICE_COLLECTION.insert_one({'ticker': ticker, 'price': 0})
def change_price(ticker, price):
PRICE_COLLECTION.find_one_and_update({'ticker': ticker}, {'ticker': ticker, 'price': price})
def gather_active_tickers():
all_active_tickers = []
all_users = USER_COLLECTION.find({},{'positions': 1})
for user in all_users:
for pos in user['positions']:
if not pos['ticker'] in all_active_tickers:
all_active_tickers.append(pos['ticker'])
print(all_active_tickers)
return all_active_tickers
def refine_prices_db(active_tickers):
all_prices = PRICE_COLLECTION.find({},{'positions': 1})
for price in all_prices:
if not price['ticker'] in active_tickers:
PRICE_COLLECTION.delete_one({'id': price['id']})
def update_prices(active_tickers):
for ticker in active_tickers:
price = yf.Ticker(ticker).info['regularMarketPrice']
if not PRICE_COLLECTION.find_one_and_update({'ticker': ticker}, {'$set': {'price': price}}):
PRICE_COLLECTION.insert_one({'ticker': ticker, 'price': price})
#yf.Ticker(ticker).info['regularMarketPrice']
def get_price(ticker):
key = open(os.getcwd() + '/backend_api/backend_processing/key.txt').read()
api_urls = {}
api_urls["intraday_url"]= 'https://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY&symbol=' + ticker + '&interval=1min&apikey=' + key
stock_price = get_stock_price(ticker, api_urls)
# print("Stock Price: " + stock_price, end = '\n\n')
return stock_price
def main():
print(get_positions('obradymack'))
#dd_position('obradymack', 'CRM', '100', '5')
# add_user('cadavis21')
# add_user('brendanlucich')
# add_positions('brendanlucich', [{'ticker': 'TEAM', 'qty': 10, 'avg_price': 256}, {'ticker': 'AAPL', 'qty': 20, 'avg_price': 125}])
# update_prices(gather_active_tickers())
# get_price('TEAM')
# update_prices(gather_active_tickers())
# add_user('Brady')
# add_positions('Brady', [{'ticker': 'TEAM', 'qty': 10, 'avg_price': 256}, {'ticker': 'AAPL', 'qty': 20, 'avg_price': 125}])
# add_user('Brendan')
# add_positions('Brendan', [{'ticker': 'F', 'qty': 10, 'avg_price': 256}, {'ticker': 'CRM', 'qty': 20, 'avg_price': 125}])
if __name__ == '__main__':
main()
| Asetka/Stocko | backend_api/backend_processing/db_wrapper.py | db_wrapper.py | py | 6,000 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pymongo.MongoClient",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "yfinance.Ticker",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "os.getcwd",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "stock_price.get_stock_... |
5579649281 | import keyboard
from time import time
from communication.bt_server import BT_Server
from communication.KEYBOARD_CONFIG import KEYBOARD_CONFIG_DICT, DEBOUNCE_INTERVAL
from communication.BT_CONFIG import BT_CONTROLLER_DICT
def graceful_exit(func):
def wrapper(*args, **kw_args):
try:
return func(*args, **kw_args)
except KeyboardInterrupt:
self = args[0]
self.server.clean_up()
print("goodbye.")
return
return wrapper
class Controller:
def __init__(self, debug=False):
# how often should commands be sent
# NOTE: these values are in seconds, the debounce_interval should be
# roughly equal to RUN_TIME defined in the MotorDriver class
self.debounce_interval = DEBOUNCE_INTERVAL
self.last_press = 0.0
self.debug = debug
# keyboard_config_dict should be a dictionary with the following keys
# this decides which keys are listened for and what messages they send
self.right = KEYBOARD_CONFIG_DICT['RIGHT']
self.left = KEYBOARD_CONFIG_DICT['LEFT']
self.forward = KEYBOARD_CONFIG_DICT['FORWARD']
self.backward = KEYBOARD_CONFIG_DICT['BACKWARD']
self.exit = KEYBOARD_CONFIG_DICT['EXIT']
# Run the server. It will be connected to by the leader's client
controller_addr = BT_CONTROLLER_DICT["CONTROLLER"]["ADDR"]
controller_port = BT_CONTROLLER_DICT["CONTROLLER"]["PORT"]
self.server = BT_Server(controller_addr, controller_port, "CONTROLLER", debug=debug)
@graceful_exit
def start(self):
while True:
if keyboard.is_pressed(self.forward) and not self.debounce():
if self.debug:
print("\nForward press detected")
self.last_press = time()
self.server.send('forward')
if keyboard.is_pressed(self.right) and not self.debounce():
if self.debug:
print("\nRight press detected")
self.last_press = time()
self.server.send('right')
if keyboard.is_pressed(self.left) and not self.debounce():
if self.debug:
print("\nLeft press detected")
self.last_press = time()
self.server.send('left')
if keyboard.is_pressed(self.backward) and not self.debounce():
if self.debug:
print("\nBackward press detected")
self.last_press = time()
self.server.send('backward')
if keyboard.is_pressed(self.exit) and not self.debounce():
if self.debug:
print("\nExit press detected")
raise KeyboardInterrupt
@graceful_exit
def debounce(self):
elapsed_time = time() - self.last_press
if elapsed_time > self.debounce_interval:
return False
else:
return True
if __name__ == '__main__':
control = Controller(True)
control.start()
| ankitasharma1/swarmbots | raft/controller.py | controller.py | py | 3,093 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "communication.KEYBOARD_CONFIG.DEBOUNCE_INTERVAL",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "communication.KEYBOARD_CONFIG.KEYBOARD_CONFIG_DICT",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "communication.KEYBOARD_CONFIG.KEYBOARD_CONFIG_... |
10821283659 | import argparse
import torch
import torch.nn as nn
from torch.utils import data
import numpy as np
from torch.autograd import Variable
import torch.optim as optim
import torch.backends.cudnn as cudnn
import os
import os.path as osp
from multiframe3_CoAttentionSTN import FtoFAttentionModel
import scipy.io as sio
from Porcine_Dataloader import PorcineDataset
import torch.nn.functional as F
import visdom
import math
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def CrossEntropyLoss(pred, label):
#label = Variable(label.long()).cuda()
criterion = torch.nn.BCELoss().cuda()
return criterion(pred, label)
def generalized_diceLoss(pred, label):
smooth = 1
inputs = pred.view(-1)
targets = label.view(-1)
intersection = (inputs * targets).sum()
dice = (2. * intersection + smooth) / (inputs.sum() + targets.sum() + smooth)
return 1 - dice
def RegistrationLoss(img_fixed, img_moving, dx, dy, dz):
#num_bats = img_moving.shape[0] #number of samples in a batch
num_rows = img_moving.shape[2]
num_cols = img_moving.shape[3]
num_depth = img_moving.shape[4]
zv, yv, xv = torch.meshgrid([torch.arange(0, num_depth),
torch.arange(0, num_rows),
torch.arange(0, num_cols)])
xv = xv.float().to(device)
yv = yv.float().to(device)
zv = zv.float().to(device)
xv_moved = xv + dx[0, :, :, :]
yv_moved = yv + dy[0, :, :, :]
zv_moved = zv + dz[0, :, :, :]
xv_moved_normalized = 2 * xv_moved / (num_cols - 1) - 1
yv_moved_normalized = 2 * yv_moved / (num_rows - 1) - 1
zv_moved_normalized = 2 * zv_moved / (num_depth - 1) - 1
grid = torch.stack((xv_moved_normalized, yv_moved_normalized, zv_moved_normalized), 3)
grid = grid.unsqueeze(0).float().to(device)
out = F.grid_sample(img_moving.float(), grid, align_corners=True)
return ((img_fixed - out)**2).mean()
class SpatialTransformer(nn.Module):
def __init__(self, mode='bilinear'):
super(SpatialTransformer, self).__init__()
# Create the grid
size = [64, 64, 64]
self.mode = mode
vectors = [torch.arange(0, s) for s in size]
mesh_1, mesh_2, mesh_3 = torch.meshgrid(vectors)
grid = torch.stack((mesh_1, mesh_2, mesh_3), 3)
grid = grid.unsqueeze(0).float().to('cuda')
self.register_buffer('grid', grid)
self.sigmoid = nn.Sigmoid()
def forward(self, src, flow):
shape = flow.shape[2:]
flow = flow.permute(0, 2, 3, 4, 1)
new_loc = self.grid + flow
# Need to normalize grid values to [-1, 1] for resampler
new_loc[:, :, :, :, 0] = 2 * (new_loc[:, :, :, :, 0] / (shape[0] - 1) - 0.5)
new_loc[:, :, :, :, 1] = 2 * (new_loc[:, :, :, :, 1] / (shape[1] - 1) - 0.5)
new_loc[:, :, :, :, 2] = 2 * (new_loc[:, :, :, :, 2] / (shape[2] - 1) - 0.5)
# Need to flip the channels
new_loc = new_loc[..., [2, 1, 0]]
return F.grid_sample(src, new_loc, align_corners=True, mode=self.mode)
class NCC:
"""
Local (over window) normalized cross correlation loss.
"""
def __init__(self, win=None):
self.win = win
def loss(self, y_true, y_pred):
Ii = y_true
Ji = y_pred
# get dimension of volume
# assumes Ii, Ji are sized [batch_size, *vol_shape, nb_feats]
ndims = len(list(Ii.size())) - 2
assert ndims in [1, 2, 3], "volumes should be 1 to 3 dimensions. found: %d" % ndims
# set window size
win = [9] * ndims if self.win is None else self.win
# compute filters
sum_filt = torch.ones([1, 1, *win]).to("cuda")
pad_no = math.floor(win[0] / 2)
if ndims == 1:
stride = (1)
padding = (pad_no)
elif ndims == 2:
stride = (1, 1)
padding = (pad_no, pad_no)
else:
stride = (1, 1, 1)
padding = (pad_no, pad_no, pad_no)
# get convolution function
conv_fn = getattr(F, 'conv%dd' % ndims)
# compute CC squares
I2 = Ii * Ii
J2 = Ji * Ji
IJ = Ii * Ji
I_sum = conv_fn(Ii, sum_filt, stride=stride, padding=padding)
J_sum = conv_fn(Ji, sum_filt, stride=stride, padding=padding)
I2_sum = conv_fn(I2, sum_filt, stride=stride, padding=padding)
J2_sum = conv_fn(J2, sum_filt, stride=stride, padding=padding)
IJ_sum = conv_fn(IJ, sum_filt, stride=stride, padding=padding)
win_size = np.prod(win)
u_I = I_sum / win_size
u_J = J_sum / win_size
cross = IJ_sum - u_J * I_sum - u_I * J_sum + u_I * u_J * win_size
I_var = I2_sum - 2 * u_I * I_sum + u_I * u_I * win_size
J_var = J2_sum - 2 * u_J * J_sum + u_J * u_J * win_size
cc = cross * cross / (I_var * J_var + 1e-5)
return 1 - torch.mean(cc)
def get_arguments():
"""Parse all the arguments provided from the CLI.
Returns:
A list of parsed arguments.
"""
parser = argparse.ArgumentParser(description="Attention Network")
# optimitization configuration
parser.add_argument("--is-training", action="store_true",
help="Whether to update the running means and variances during the training.")
parser.add_argument("--learning-rate", type=float, default=0.00001,
help="Base learning rate for training with polynomial decay.") # 0.001
parser.add_argument("--weight-decay", type=float, default=0.0005,
help="Regularization parameter for L2-loss.") # 0.0005
parser.add_argument("--momentum", type=float, default=0.9,
help="Momentum component of the optimiser.")
parser.add_argument("--power", type=float, default=0.9,
help="Decay parameter to compute the learning rate.")
parser.add_argument("--random-seed", type=int, default=1234,
help="Random seed to have reproducible results.")
return parser.parse_args()
args = get_arguments()
def configure_dataset_init_model(args):
args.batch_size = 1 # 1 card: 5, 2 cards: 10 Number of images sent to the network in one step, 16 on paper
args.maxEpoches = 100 # 1 card: 15, 2 cards: 15 epoches, equal to 30k iterations, max iterations= maxEpoches*len(train_aug)/batch_size_per_gpu'),
def netParams(model):
'''
Computing total network parameters
Args:
model: model
return: total network parameters
'''
total_parameters = 0
for parameter in model.parameters():
i = len(parameter.size())
# print(parameter.size())
p = 1
for j in range(i):
p *= parameter.size(j)
total_parameters += p
return total_parameters
def main():
print("=====> Integrated Motion Tracking (Image Only) - Porcine")
print("=====> Set GPU for training")
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
cudnn.enabled = True
print("=====> Building network")
model = FtoFAttentionModel()
model.to(device)
spatial_transform = SpatialTransformer()
#ncc = NCC().loss
mseloss = nn.MSELoss()
cudnn.benchmark = True
print('=====> Computing network parameters')
total_parameters = netParams(model)
print('Total network parameters: ' + str(total_parameters))
print('Learning Rate: ' + str(args.learning_rate))
maxEpoch = 100
print("=====> Preparing training data")
# optimizer = optim.SGD(model.parameters(), lr=args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay)
optimizer = optim.Adam(model.parameters(), lr=args.learning_rate)
scheduler = optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.95)
optimizer.zero_grad()
vis = visdom.Visdom()
Y1 = torch.zeros((1)).cpu()
X1 = torch.zeros((1)).cpu()
loss_window = vis.line(
Y=np.column_stack((Y1, Y1)),
X=np.column_stack((X1, X1)),
opts=dict(xlabel='epoch', ylabel='Loss', title='CoAttentionSTN+Temporal (3frame)',
legend=['training', 'validation']))
print("=====> Preparing Dataloader")
snapshot_dir = '/data/CoAttentionSTN_MedIA22/full_cycle_results_only_pstrain_model/new_trained_model_results'
train_dataset = PorcineDataset('/data/Training_Data/MedIA2022_CoAttentionSTN/Porcine/full_cycle/train')
val_dataset = PorcineDataset('/data/Training_Data/MedIA2022_CoAttentionSTN/Porcine/full_cycle/validation')
params = {'batch_size': 1,
'shuffle': True}
training_generator = torch.utils.data.DataLoader(train_dataset, **params)
validation_generator = torch.utils.data.DataLoader(val_dataset, **params)
print('Total number of training images: ' + str(len(training_generator)))
print('Total number of validation images: ' + str(len(validation_generator)))
lowest_loss = 1e6
print("=====> Begin to train")
for epoch in range(0, maxEpoch):
train_loss = 0.0
val_loss = 0.0
for phase in ['train', 'val']:
if phase == 'train':
model.train()
np.random.seed(args.random_seed + epoch)
for img, filename in validation_generator:
# Transfer to GPU
local_batch = img.to(device)#, myocardium1.to(device), myocardium2.to(device)
x_batch = local_batch.float()
#myo1 = myo1.float()
#myo2 = myo2.float()
x_batch /= 255
#myocardium1 = myo1.unsqueeze(1)
#myocardium2 = myo2.unsqueeze(1)
x_batch.requires_grad_()
image = Variable(x_batch)
optimizer.zero_grad()
pred_disp_es_ed, frame1, frame2, frame_mid, exemplar_mask, query_mask, interm_mask, pred_disp12, pred_disp23 = model(image)
deformed_img1 = spatial_transform(image[:, 0, :, :, :].unsqueeze(1), pred_disp_es_ed)
deformed_img2 = spatial_transform(frame1, pred_disp_es_ed)
deformed_img3 = spatial_transform(frame1, pred_disp12)
deformed_img4 = spatial_transform(frame_mid, pred_disp23)
deformed_int1 = spatial_transform(image[:, 0, :, :, :].unsqueeze(1), pred_disp12)
deformed_int2 = spatial_transform(image[:, 1, :, :, :].unsqueeze(1), pred_disp23)
deformed_attention_map1 = spatial_transform(exemplar_mask, pred_disp_es_ed)
deformed_attention_map2 = spatial_transform(exemplar_mask, pred_disp12)
deformed_attention_map3 = spatial_transform(interm_mask, pred_disp23)
loss_raw_img1 = mseloss(deformed_img1, image[:, 2, :, :, :].unsqueeze(1))
loss_raw_img2 = mseloss(deformed_int1, image[:, 1, :, :, :].unsqueeze(1))
loss_raw_img3 = mseloss(deformed_int2, image[:, 2, :, :, :].unsqueeze(1))
loss_att_img1 = mseloss(deformed_img2, frame2)
loss_att_img2 = mseloss(deformed_img3, frame_mid)
loss_att_img3 = mseloss(deformed_img4, frame2)
loss_attention1 = NCC().loss(deformed_attention_map1, query_mask)
loss_attention2 = NCC().loss(deformed_attention_map2, interm_mask)
loss_attention3 = NCC().loss(deformed_attention_map3, query_mask)
raw_img_loss = loss_raw_img1 + loss_raw_img2 + loss_raw_img3
att_img_loss = loss_att_img1 + loss_att_img2 + loss_att_img3
att_loss = loss_attention1 + loss_attention2 + loss_attention3
temp_cons = pred_disp12 + pred_disp23
temp_loss = mseloss(temp_cons, pred_disp_es_ed)
total_loss = raw_img_loss + att_img_loss + 0.02 * temp_loss + 0.05 * att_loss
total_loss.backward()
optimizer.step()
train_loss += total_loss.data
else:
model.eval()
for img, filename in validation_generator:
# Transfer to GPU
local_batch = img.to(device)#, myocardium1.to(device), myocardium2.to(device)
x_batch = local_batch.float()
#myo1 = myo1.float()
#myo2 = myo2.float()
x_batch /= 255
#myocardium1 = myo1.unsqueeze(1)
#myocardium2 = myo2.unsqueeze(1)
x_batch.requires_grad_()
image = Variable(x_batch)
with torch.no_grad():
pred_disp_es_ed, frame1, frame2, frame_mid, exemplar_mask, query_mask, interm_mask, pred_disp12, pred_disp23 = model(image)
deformed_img1 = spatial_transform(image[:, 0, :, :, :].unsqueeze(1), pred_disp_es_ed)
deformed_img2 = spatial_transform(frame1, pred_disp_es_ed)
deformed_img3 = spatial_transform(frame1, pred_disp12)
deformed_img4 = spatial_transform(frame_mid, pred_disp23)
deformed_int1 = spatial_transform(image[:, 0, :, :, :].unsqueeze(1), pred_disp12)
deformed_int2 = spatial_transform(image[:, 1, :, :, :].unsqueeze(1), pred_disp23)
deformed_attention_map1 = spatial_transform(exemplar_mask, pred_disp_es_ed)
deformed_attention_map2 = spatial_transform(exemplar_mask, pred_disp12)
deformed_attention_map3 = spatial_transform(interm_mask, pred_disp23)
loss_raw_img1 = mseloss(deformed_img1, image[:, 2, :, :, :].unsqueeze(1))
loss_raw_img2 = mseloss(deformed_int1, image[:, 1, :, :, :].unsqueeze(1))
loss_raw_img3 = mseloss(deformed_int2, image[:, 2, :, :, :].unsqueeze(1))
loss_att_img1 = mseloss(deformed_img2, frame2)
loss_att_img2 = mseloss(deformed_img3, frame_mid)
loss_att_img3 = mseloss(deformed_img4, frame2)
loss_attention1 = NCC().loss(deformed_attention_map1, query_mask)
loss_attention2 = NCC().loss(deformed_attention_map2, interm_mask)
loss_attention3 = NCC().loss(deformed_attention_map3, query_mask)
raw_img_loss = loss_raw_img1 + loss_raw_img2 + loss_raw_img3
att_img_loss = loss_att_img1 + loss_att_img2 + loss_att_img3
att_loss = loss_attention1 + loss_attention2 + loss_attention3
temp_cons = pred_disp12 + pred_disp23
temp_loss = mseloss(temp_cons, pred_disp_es_ed)
total_loss = raw_img_loss + att_img_loss + 0.02 * temp_loss + 0.05 * att_loss
val_loss += total_loss.data
scheduler.step()
train_loss /= len(training_generator)
val_loss /= len(validation_generator)
vis.line(X=torch.ones((1, 1)).cpu() * epoch, Y=torch.Tensor([train_loss]).unsqueeze(0).cpu(), win=loss_window,
name='training',
update='append')
vis.line(X=torch.ones((1, 1)).cpu() * epoch, Y=torch.Tensor([val_loss]).unsqueeze(0).cpu(), win=loss_window,
name='validation',
update='append')
os.chdir(snapshot_dir)
temp_epoch = epoch
if phase == 'val' and val_loss < lowest_loss:
lowest_loss = val_loss
torch.save(model.state_dict(), osp.join(snapshot_dir, 'CoAttentionSTN_3frame_w_att_loss_full_cycle.pth'))
file_name = 'CoAttentionSTN_3frame_w_att_loss_full_cycle.mat'
image = image.cpu().detach().numpy()
exemplar_mask = exemplar_mask.cpu().detach().numpy()
query_mask = query_mask.cpu().detach().numpy()
#int_mask = int_mask.cpu().detach().numpy()
#myocardium1 = myocardium1.cpu().detach().numpy()
#myocardium2 = myocardium2.cpu().detach().numpy()
pred_disp_es_ed = pred_disp_es_ed.cpu().detach().numpy()
#pred_disp12 = pred_disp12.cpu().detach().numpy()
#pred_disp23 = pred_disp23.cpu().detach().numpy()
deformed_img1 = deformed_img1.cpu().detach().numpy()
deformed_img2 = deformed_img2.cpu().detach().numpy()
frame1 = frame1.cpu().detach().numpy()
frame2 = frame2.cpu().detach().numpy()
sio.savemat(file_name, {'image': image, 'pred_disp_es_ed': pred_disp_es_ed, 'deformed1': deformed_img1,
'deformed2': deformed_img2, 'frame1':frame1, 'frame2':frame2, 'epoch':temp_epoch,
'exemplar_mask':exemplar_mask, 'query_mask':query_mask})
#'myocardium1': myocardium1, 'myocardium2': myocardium2})
print('Epoch: {0:3d} | train_loss: {1:2f} | val_loss: {2:2f} | Model Saved'.format(epoch, train_loss, val_loss))
else:
print('Epoch: {0:3d} | train_loss: {1:2f} | val_loss: {2:2f} |'.format(epoch, train_loss, val_loss))
if __name__ == '__main__':
main() | sa867/CoAttentionSTN | train_CoAttentionSTN_temporal.py | train_CoAttentionSTN_temporal.py | py | 17,381 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "torch.device",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.BCELoss... |
32174735176 | import os
import io
import base64
import os.path
from zipfile import ZipFile
from odoo import api, fields, models
from odoo.tools.safe_eval import safe_eval
class ExportNfe(models.TransientModel):
_name = 'wizard.export.nfe'
_description = "Exporta NF-e"
start_date = fields.Date(string="Data Inicial", required=True)
end_date = fields.Date(string="Data Final", required=True)
zip_file = fields.Binary('Arquivo', readonly=True)
zip_file_name = fields.Char('Nome', size=255)
emitter = fields.Selection([('propria', 'Própria'), ('terceiros', 'Terceiros')], string="Emissão")
edoc_type = fields.Selection([('entrada', 'Entrada'), ('saida', 'Saída')], string="Tipo de Operação")
model = fields.Selection(
[('nfe', '55 - NFe'),
('nfce', '65 - NFCe'),
('nfse', 'NFS-e - Nota Fiscal de Servico')],
string='Modelo a exportar')
state = fields.Selection(
[('init', 'init'), ('done', 'done')],
'state', readonly=True, default='init')
def _save_zip(self, xmls, pdfs):
tmp = '/tmp/odoo/nfse-export/'
try:
os.makedirs(tmp)
except:
pass
zip_base64 = io.BytesIO()
zip_file = ZipFile(zip_base64, 'w')
for xml in xmls:
filename = os.path.join(tmp, xml['name'])
with open(filename, 'w') as xml_file:
xml_file.write(xml['content'])
zip_file.write(filename, xml['name'])
for pdf in pdfs:
filename = os.path.join(tmp, pdf['name'])
with open(filename, 'wb') as pdf_file:
pdf_file.write(pdf['content'])
zip_file.write(filename, pdf['name'])
zip_file.close()
zip_base64.seek(0)
return base64.b64encode(zip_base64.getvalue())
def nfse_export(self):
search_vals = []
search_vals.append(('data_emissao', '>=', self.start_date))
search_vals.append(('data_emissao', '<=', self.end_date))
if self.emitter == 'propria':
search_vals.append(('state', 'in', ['cancel', 'done', 'denied']))
elif self.emitter == 'terceiros':
search_vals.append(('state', '=', 'imported'))
else:
search_vals.append(('state', 'in', ['cancel', 'done', 'denied', 'imported']))
if self.edoc_type:
search_vals.append(('tipo_operacao', '=', self.edoc_type))
if self.model:
search_vals.append(('model', '=', self.model))
invoice_ids = self.env['eletronic.document'].search(search_vals)
xmls = []
pdfs = []
for invoice in invoice_ids:
if invoice.nfe_processada:
xmls.append({
'content': base64.decodestring(invoice.nfe_processada).decode(),
'name': invoice.nfe_processada_name
})
if invoice.nfse_pdf:
pdfs.append({
'content': base64.decodestring(invoice.nfse_pdf),
'name': invoice.nfse_pdf_name
})
if invoice.model == 'nfe':
danfe_report = self.env['ir.actions.report'].search(
[('report_name', '=', 'l10n_br_eletronic_document.main_template_br_nfe_danfe')])
report_service = danfe_report.xml_id
danfe, dummy = self.env.ref(report_service)._render_qweb_pdf([invoice.id])
report_name = safe_eval(danfe_report.print_report_name, {'object': invoice})
filename = "%s.%s" % (report_name, "pdf")
pdfs.append({
'content': danfe,
'name': filename
})
self.zip_file = self._save_zip(xmls, pdfs)
self.zip_file_name = 'xml_nfe_exportacao.zip'
self.state = 'done'
mod_obj = self.env['ir.model.data'].search(
[('model', '=', 'ir.ui.view'),
('name', '=', 'view_wizard_export_nfe')])
return {
'type': 'ir.actions.act_window',
'res_model': self._name,
'view_mode': 'form',
'view_type': 'form',
'res_id': self.id,
'views': [(mod_obj.res_id, 'form')],
'target': 'new',
}
| Trust-Code/odoo-brasil | l10n_br_eletronic_document/wizard/export_nfe.py | export_nfe.py | py | 4,275 | python | en | code | 178 | github-code | 1 | [
{
"api_name": "odoo.models.TransientModel",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "odoo.models",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "odoo.fields.Date",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "odoo.fie... |
6421783280 |
import datetime
from flask import jsonify, request, Blueprint
from flask_jwt import jwt_required, current_identity
from flasgger.utils import swag_from
from app.utils.utils import serialize, get_flag_by_id
from app.utils.validate_redflag import Validate_redflag
from app.models.redflag import Redflag
from database.incidents_db import IncidentsDB
from database.media_db import MediaDB
from database.users_db import UsersDB
redflags_view = Blueprint('redflags_view', __name__)
incidents_db = IncidentsDB()
mediaDB = MediaDB()
@redflags_view.route('/ireporter/api/v2/<type>', methods=["GET"])
@swag_from('../docs/redflags/getflags.yml')
def getredflags(type):
""" function to get red-flags """
if not (type in ["red-flags","interventions"]):
return jsonify({"status":"404", "error":"Invalid URL"}), 404
regflags = incidents_db.regflags(type.rstrip('s'))
if not (regflags and regflags != 'False'):
return jsonify({"status":"404", "error":f"No {type} found"}), 404
for flag in regflags:
flag['Video'] = [item[0] for item in mediaDB.flag_media(**{'type':'video','redflag':flag['flag_id']}).get('data',[])]
flag['Image'] = [item[0] for item in mediaDB.flag_media(**{'type':'image','redflag':flag['flag_id']}).get('data',[])]
return jsonify({"status":200,
"data":regflags
}), 200
@redflags_view.route('/ireporter/api/v2/<type>', methods=["POST"])
@jwt_required()
@swag_from('../docs/redflags/postflag.yml')
def postredflag(type):
""" function to add a red flag """
if not (type in ["red-flags","interventions"]):
return jsonify({"status":"404", "error":"Invalid URL"}), 404
try:
data = request.get_json()
except:
return jsonify({"status":400, "error":"No data was posted"}), 400
new_red_flag = Redflag(**data)
title = incidents_db.check_title(new_red_flag.title)
if title and title != 'False':
return jsonify({"status":400, "error":"Incident already exists"}), 400
userdb = UsersDB()
new_red_flag.createdon = datetime.datetime.now().strftime("%Y/%m/%d")
new_red_flag.createdby = current_identity['userid']
new_red_flag.status = 'pending'
new_red_flag.type = type.rstrip('s')
new_red_flag.username = userdb.check_id(current_identity['userid'])["username"]
validate_redflag = Validate_redflag()
thisredflag = validate_redflag.validate(**serialize(new_red_flag))
if thisredflag['message'] != 'successfully validated':
return jsonify({"status":400, "error":thisredflag['message']}), 400
result = incidents_db.register_flag(**serialize(new_red_flag))
if not result['status']:
return jsonify({"status":400, "error":result['error']}), 400
return jsonify({"status":201,
"data":[{
"id":result['data']['flag_id'],
"message":f"Created {type.rstrip('s')} Record"
}]
}), 201
@redflags_view.route('/ireporter/api/v2/<type>/<int:id>', methods=["GET"])
@swag_from('../docs/redflags/getaflag.yml')
def get(type, id):
""" function to get a single redflag by id """
if not (type in ["red-flags","interventions"]):
return jsonify({"status":"404", "error":"Invalid URL"}), 404
regflag = get_flag_by_id(id)
if regflag and regflag != 'False':
regflag['Video'] = [item[0] for item in mediaDB.flag_media(**{'type':'video','redflag':regflag['flag_id']}).get('data',[])]
regflag['Image'] = [item[0] for item in mediaDB.flag_media(**{'type':'image','redflag':regflag['flag_id']}).get('data',[])]
return jsonify({"status":200,
"data":regflag
}), 200
return jsonify({"status":404, "error":f"{type.rstrip('s')} not found"}), 404
@redflags_view.route('/ireporter/api/v2/<type>/<int:id>', methods=["DELETE"])
@jwt_required()
@swag_from('../docs/redflags/deleteaflag.yml')
def delete(type, id):
""" function to delete a redflag """
regflag = get_flag_by_id(id)
if not (current_identity['is_admin'] or (current_identity['userid'] == regflag['createdby']) ):
return jsonify({"status":401,
"error":"Sorry! you are not authorised to perform this action.",
}), 401
if regflag:
incidents_db.delete(id)
return jsonify({"status":200,
"data":[{
"message":f"{type.rstrip('s')} record has been deleted",
"id": id
}]
}), 200
return jsonify({"status":404, "error":f"{type.rstrip('s')} not found"}), 404
@redflags_view.route('/ireporter/api/v2/<type>/<int:id>/<attribute>', methods=["PATCH"])
@jwt_required()
@swag_from('../docs/redflags/patchaflag.yml')
def patch(type, id, attribute):
""" function to update a redflag """
if not (type in ["red-flags","interventions"]):
return jsonify({"status":"404", "error":"Invalid URL"}), 404
if not (attribute in ["comment", "location", "status"]):
return jsonify({"status":"404", "error":"Invalid URL"}), 404
try: data = request.get_json()
except: return jsonify({"status":400, "error":"No data was posted"}), 400
if attribute == "status" and not (data.get('status') in ['under investigation', 'rejected', 'resolved']):
return jsonify({"status":400, "error":"Invalid status"}), 400
regflag = get_flag_by_id(id)
if not regflag:
return jsonify({"status":404, "error":f"{type.rstrip('s')} not found"}), 404
if attribute == "status" and not current_identity['is_admin']:
return jsonify({"status":401,
"error":"Sorry! only administrators allowed.",
}), 401
if not (current_identity['is_admin'] or (current_identity['userid'] == regflag['createdby']) ):
return jsonify({"status":401,
"error":"Sorry! you are not authorised to perform this action.",
}), 401
regflag[attribute] = data[attribute]
regflag['createdon'] = regflag['createdon'].strftime("%Y/%m/%d")
regflag['id'] = regflag['flag_id']
validate_redflag = Validate_redflag()
validited = validate_redflag.validate(**regflag)
if validited['message'] != 'successfully validated':
return jsonify({"status":400, "error":validited['message']}), 400
if incidents_db.update(**regflag) == 'True':
return jsonify({"status":200,
"data":[{
"message":f"Updated {type.rstrip('s')} record's {attribute}",
"id": id
}]
}), 200
@redflags_view.route('/ireporter/api/v2/incidents/<int:id>', methods=["PUT"])
@jwt_required()
def putincident(id):
""" function to update a redflag """
try: data = request.get_json()
except: return jsonify({"status":400, "error":"No data was posted"}), 400
regflag = get_flag_by_id(id)
if not regflag:
return jsonify({"status":404, "error":"Incident not found"}), 404
if not (current_identity['is_admin'] or (current_identity['userid'] == regflag['createdby']) ):
return jsonify({"status":401,
"error":"Sorry! you are not authorised to perform this action.",
}), 401
for key in data:
regflag[key] = data[key]
regflag['createdon'] = regflag['createdon'].strftime("%Y/%m/%d")
regflag['id'] = regflag['flag_id']
validate_redflag = Validate_redflag()
validited = validate_redflag.validate(**regflag)
if validited['message'] != 'successfully validated':
return jsonify({"status":400, "error":validited['message']}), 400
if incidents_db.update(**regflag) == 'True':
return jsonify({"status":200,
"data":[{
"message":f"Successfully Updated Incident record",
"id": id
}]
}), 200
| PatrickMugayaJoel/IReporter-Api | app/views/redflags.py | redflags.py | py | 8,161 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Blueprint",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "database.incidents_db.IncidentsDB",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "database.media_db.MediaDB",
"line_number": 16,
"usage_type": "call"
},
{
"api_n... |
29674277875 | import json
from astar.astar import Astar
from roblib.map import Map
import numpy as np
from roblib.datastructures import MoveCommand
from roblib.datastructures import Coordinate
from filetransfer import Filetransfer
# Path JSON
_PATH_LOCAL = "./path.json"
_PATH_REMOTE = "/home/nao/.local/share/PackageManager/apps/FloorGuide_Map/html/json/path.json"
class Planner():
def __init__(self):
self.map = Map()
self.map.load_json()
self.pepper_matrix = np.ones((self.map.map_size_x, self.map.map_size_y), dtype=int)
for n in self.map.nodes.itervalues():
if (n.is_passable()):
self.pepper_matrix[n.x][n.y] = 0
def get_coord_list(self, current_pos, destination_pos):
a = Astar()
start = (current_pos.getX(), current_pos.getY())
end = (destination_pos.getX(), destination_pos.getY())
path = a.astar(self.pepper_matrix, start, end)
self._write_json_path(path)
simplified_path = self._simplifyPath(path)
coord_list = []
for p in simplified_path:
coord_list.append(Coordinate(p[0], p[1]))
coord_list[len(coord_list) - 1].degrees = destination_pos.degrees
return coord_list
def _simplifyPath(self, path):
path_comp = []
start_node = path[0]
prev_node = path[0]
for node in path:
if (start_node[0] != node[0] and start_node[1] != node[1]):
path_comp.append(prev_node)
start_node = prev_node
prev_node = node
path_comp.append(prev_node)
return path_comp
def get_move_cmd_from_coord(self, currentpos, targetpos):
move_list = []
current_direction = currentpos.getDegrees()
direction = (targetpos.x - currentpos.x, targetpos.y - currentpos.y)
new_direction = current_direction
if direction[0] > 0:
# go right
new_direction = 90
if direction[0] < 0:
# go left
new_direction = 270
if direction[1] < 0:
# go down
new_direction = 0
if direction[1] > 0:
# go up
new_direction = 180
distance = abs(direction[0] + direction[1])
turn = self._getTurnDegrees(current_direction, new_direction)
if turn != 0:
move_list.append(MoveCommand(0, 0, turn))
if distance != 0:
move_list.append(MoveCommand(distance, 0, 0))
node = self.map.nodes['%d:%d' % (targetpos.x, targetpos.y)]
if node.get_naomark() != None:
naoMarkDegree = node.get_naomark().get_degree()
degrees = self._getTurnDegrees(new_direction, naoMarkDegree)
move_list.append(MoveCommand(0, 0, degrees, True, node.get_naomark().get_id()))
if degrees != 0:
move_list.append(MoveCommand(0, 0, -degrees))
if targetpos.getDegrees() != None:
move_list.append(MoveCommand(0, 0, self._getTurnDegrees(new_direction, targetpos.getDegrees())))
currentpos.x = targetpos.x
currentpos.y = targetpos.y
currentpos.degrees = new_direction
return move_list
def _getTurnDegrees(self, currentDirection, newDirection):
turn = newDirection - currentDirection
if turn > 180:
turn -= 360
if turn < -180:
turn += 360
return turn
def _write_json_path(self, path):
path_obj = {}
array = []
for p in path:
obj = {}
obj['x'] = p[0]
obj['y'] = p[1]
array.append(obj)
path_obj['path'] = array
with open(_PATH_LOCAL, 'w') as pathfile:
json.dump(path_obj, pathfile)
Filetransfer.transfer_file_from_local_to_pepper(_PATH_LOCAL, _PATH_REMOTE)
def get_coor_by_room_name(self, room_name):
for n in self.map.nodes.itervalues():
if n.get_room() != None:
if room_name in n.get_room().get_name():
return n.get_coordinate()
return None
| tschibu/hslu-roblab-floorguide | planner.py | planner.py | py | 4,098 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "roblib.map.Map",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "astar.astar.Astar",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "roblib.datastructures.Coo... |
31929396338 | # Standalone 1.5D distributed SpMM implementation
# Largely borrowed from CAGNET
import argparse
import math
import os
import time
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from sparse_coo_tensor_cpp import spmm_gpu
comp_time = dict()
comm_time = dict()
bcast_comm_time = dict()
reduce_comm_time = dict()
timing = False
def start_time(group, rank):
if not timing:
return 0.0
if group is not None:
torch.cuda.synchronize(device=device)
tstart = 0.0
if rank == 0:
tstart = time.time()
return tstart
def stop_time(group, rank, tstart):
if not timing:
return 0.0
if group is not None:
torch.cuda.synchronize(device=device)
tstop = 0.0
if rank == 0:
tstop = time.time()
return tstop - tstart
def dspmm(node_count, am_partitions, inputs, rank, size, replication, row_groups, col_groups, group, device):
global comm_time
global comp_time
global bcast_comm_time
global reduce_comm_time
n_per_proc = math.ceil(float(node_count) / (size / replication))
z_loc = torch.cuda.FloatTensor(am_partitions[0].size(0), inputs.size(1), device=device).fill_(0)
inputs_recv = torch.cuda.FloatTensor(n_per_proc, inputs.size(1), device=device).fill_(0)
rank_c = rank // replication # effectively row-rank
rank_col = rank % replication
stages = size // (replication ** 2)
if rank_col == replication - 1:
stages = (size // replication) - (replication - 1) * stages
for i in range(stages):
# Compute src rank in bcast
q = (rank_col * (size // (replication ** 2)) + i) * replication + rank_col
q_c = q // replication
am_partid = rank_col * (size // replication ** 2) + i
# If this rank is the src rank for bcast, set inputs_recv to the local matrix
# Else, instantiate a new empty matrix
if q == rank:
inputs_recv = inputs.clone()
elif q_c == size // replication - 1:
inputs_recv = torch.cuda.FloatTensor(am_partitions[am_partid].size(1), inputs.size(1), device=device).fill_(0)
inputs_recv = inputs_recv.contiguous()
tstart_comm = start_time(col_groups[rank_col], rank)
dist.broadcast(inputs_recv, src=q, group=col_groups[rank_col])
dur = stop_time(col_groups[rank_col], rank, tstart_comm)
comm_time[rank] += dur
bcast_comm_time[rank] += dur
tstart_comp = start_time(col_groups[rank_col], rank)
spmm_gpu(am_partitions[am_partid].indices()[0].int(), am_partitions[am_partid].indices()[1].int(),
am_partitions[am_partid].values(), am_partitions[am_partid].size(0),
am_partitions[am_partid].size(1), inputs_recv, z_loc)
dur = stop_time(col_groups[rank_col], rank, tstart_comp)
comp_time[rank] += dur
z_loc = z_loc.contiguous()
tstart_comm = start_time(row_groups[rank_c], rank)
dist.all_reduce(z_loc, op=dist.reduce_op.SUM, group=row_groups[rank_c])
dur = stop_time(row_groups[rank_c], rank, tstart_comm)
comm_time[rank] += dur
reduce_comm_time[rank] += dur
return z_loc
def rank_to_devid(rank, acc_per_rank):
return rank % acc_per_rank
def get_proc_groups(rank, size, replication):
rank_c = rank // replication
row_procs = []
for i in range(0, size, replication):
row_procs.append(list(range(i, i + replication)))
col_procs = []
for i in range(replication):
col_procs.append(list(range(i, size, replication)))
row_groups = []
for i in range(len(row_procs)):
row_groups.append(dist.new_group(row_procs[i]))
col_groups = []
for i in range(len(col_procs)):
col_groups.append(dist.new_group(col_procs[i]))
return row_groups, col_groups
def oned_partition(rank, size, inputs, adj_matrix, replication, device):
node_count = inputs.size(0)
# n_per_proc = math.ceil(float(node_count) / size)
n_per_proc = math.ceil(float(node_count) / (size / replication))
am_partitions = None
am_pbyp = None
rank_c = rank // replication
# Compute the adj_matrix and inputs partitions for this process
# TODO: Maybe I do want grad here. Unsure.
with torch.no_grad():
# Column partitions
am_partitions, vtx_indices = split_coo(adj_matrix, node_count, n_per_proc, 1)
print(f"rank: {rank} replication: {replication} rank_c: {rank_c} len_vtxind: {len(vtx_indices)}")
proc_node_count = vtx_indices[rank_c + 1] - vtx_indices[rank_c]
am_pbyp, _ = split_coo(am_partitions[rank_c], node_count, n_per_proc, 0)
for i in range(len(am_pbyp)):
if i == size // replication - 1:
last_node_count = vtx_indices[i + 1] - vtx_indices[i]
am_pbyp[i] = torch.sparse_coo_tensor(am_pbyp[i], torch.ones(am_pbyp[i].size(1)),
size=(last_node_count, proc_node_count),
requires_grad=False)
else:
am_pbyp[i] = torch.sparse_coo_tensor(am_pbyp[i], torch.ones(am_pbyp[i].size(1)),
size=(n_per_proc, proc_node_count),
requires_grad=False)
for i in range(len(am_partitions)):
proc_node_count = vtx_indices[i + 1] - vtx_indices[i]
am_partitions[i] = torch.sparse_coo_tensor(am_partitions[i],
torch.ones(am_partitions[i].size(1)),
size=(node_count, proc_node_count),
requires_grad=False)
input_partitions = torch.split(inputs, math.ceil(float(inputs.size(0)) / (size / replication)), dim=0)
adj_matrix_loc = am_partitions[rank_c]
inputs_loc = input_partitions[rank_c]
print(f"rank: {rank} adj_matrix_loc.size: {adj_matrix_loc.size()}", flush=True)
print(f"rank: {rank} inputs_loc.size: {inputs_loc.size()}", flush=True)
return inputs_loc, adj_matrix_loc, am_pbyp
# Split a COO into partitions of size n_per_proc
# Basically torch.split but for Sparse Tensors since pytorch doesn't support that.
def split_coo(adj_matrix, node_count, n_per_proc, dim):
vtx_indices = list(range(0, node_count, n_per_proc))
vtx_indices.append(node_count)
am_partitions = []
for i in range(len(vtx_indices) - 1):
am_part = adj_matrix[:,(adj_matrix[dim,:] >= vtx_indices[i]).nonzero().squeeze(1)]
am_part = am_part[:,(am_part[dim,:] < vtx_indices[i + 1]).nonzero().squeeze(1)]
am_part[dim] -= vtx_indices[i]
am_partitions.append(am_part)
return am_partitions, vtx_indices
def main(mata_indices_path, k, acc_per_rank, replication):
# Load matrices as pytorch tensors
mata_indices = torch.load(mata_indices_path)
if not isinstance(mata_indices, torch.Tensor): # if Reddit/Cora
mata_indices = mata_indices[0].edge_index
print(mata_indices)
node_count = torch.max(mata_indices[0]) + 1
matb = torch.rand(node_count, k)
# Initialize process groups
mp.set_start_method('spawn', force=True)
if "OMPI_COMM_WORLD_RANK" in os.environ.keys():
os.environ["RANK"] = os.environ["OMPI_COMM_WORLD_RANK"]
dist.init_process_group(backend='nccl')
rank = dist.get_rank()
size = dist.get_world_size()
print("Processes: " + str(size))
devid = rank_to_devid(rank, acc_per_rank)
device = torch.device('cuda:{}'.format(devid))
torch.cuda.set_device(device)
curr_devid = torch.cuda.current_device()
print(f"curr_devid: {curr_devid}", flush=True)
devcount = torch.cuda.device_count()
group = dist.new_group(list(range(size)))
row_groups, col_groups = get_proc_groups(rank, size, replication)
rank_c = rank // replication
if rank_c >= (size // replication):
return
# Partition both input matrices across process grid and get local mata and matb copies
matb_loc, mata_loc, mata_pbyp = oned_partition(rank, size, matb, mata_indices, replication, device)
mata_loc = mata_loc.to(device)
matb_loc = matb_loc.to(device)
for i in range(len(mata_pbyp)):
mata_pbyp[i] = mata_pbyp[i].t().coalesce().to(device)
mata_loc = mata_loc.coalesce()
comm_time[rank] = 0.0
comp_time[rank] = 0.0
bcast_comm_time[rank] = 0.0
reduce_comm_time[rank] = 0.0
dist.barrier(group)
if rank == 0:
summa_start_time = time.time()
# Call 1.5D distributed SpMM algorithm
z = dspmm(mata_loc.size(0), mata_pbyp, matb_loc, rank, size, replication, \
row_groups, col_groups, group, device)
dist.barrier(group)
if rank == 0:
print(f"summa_time: {time.time() - summa_start_time}")
print(f"rank: {rank} comm_time: {comm_time[rank]}")
print(f"rank: {rank} comp_time: {comp_time[rank]}")
print(f"rank: {rank} bcast_comm_time: {bcast_comm_time[rank]}")
print(f"rank: {rank} reduce_comm_time: {reduce_comm_time[rank]}")
print(f"rank: {rank} {outputs}")
print(z)
print(torch.sum(z))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--mata-indices", type=str)
parser.add_argument("--k", type=int)
parser.add_argument("--accperrank", type=int)
parser.add_argument("--replication", type=int)
parser.add_argument("--timing", type=str)
args = parser.parse_args()
print(args)
mata_indices_path = args.mata_indices
acc_per_rank = args.accperrank
replication = args.replication
timing = args.timing == "True"
main(mata_indices_path, args.k, acc_per_rank, replication)
| alokpathy/spmm | nccl_ex.py | nccl_ex.py | py | 9,877 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "torch.cuda.synchronize",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "torch.cuda.synchroni... |
19444944835 | """The core module of this code base.
Includes the agent, environment, and experiment APIs.
"""
import logging
import sys
import time
from abc import ABCMeta, abstractmethod
import numpy as np
LOGGER = logging.getLogger('experiment')
LOGGER.setLevel(logging.DEBUG)
class Agent(metaclass=ABCMeta):
"""An abstract base class for agents.
Attributes:
eval_mode: (bool) Whether the agent is currently in evaluation mode as
opposed to training mode.
"""
def __init__(self):
self.eval_mode = False
@abstractmethod
def seed(self, seed):
"""Seed the random number generators of the agent.
Args:
seed: (int) The seed to use.
"""
def begin_episode(self, observation):
"""Run a procedure at the beginning of an episode.
Args:
observation: (Observation) The initial observation of the episode.
"""
@abstractmethod
def act(self):
"""Choose an action.
Returns:
(int) The chosen action.
"""
def learn(self, reward, observation):
"""Learn from the most recent observation and reward.
This method is called immediately after the environment advances by one
step and the resulting reward and observation are recorded.
Args:
reward: (float) The reward for the previous action.
observation: (Observation) A new observation of the environment.
"""
def end_episode(self):
"""Run a procedure at the end of an episode."""
class Environment(metaclass=ABCMeta):
"""An abstract base class for environments."""
@abstractmethod
def seed(self, seed):
"""Seed the random number generators of the environment.
Args:
seed: (int) The seed to use.
"""
@abstractmethod
def step(self, action):
"""Advance the environment by one step.
Args:
action: (int) The action chosen by the agent.
Returns:
(ndarray) The next observation for the agent.
(float) The reward for the agents action.
(bool) An indicator for whether the episode has ended.
(dict) Diagnostic information for debugging.
"""
@abstractmethod
def reset(self):
"""Reset the environment.
Returns:
(ndarray) The initial observation of a new episode.
"""
class Experiment:
"""An object that handles running experiments.
An experiment controls interactions between an agent and an environment and
keeps relevant statistics and logs.
"""
def __init__(
self,
agent,
environment,
num_iterations,
train_steps,
eval_steps,
max_steps_per_episode,
seed=None,
iteration_callback=None,
episode_callback=None
):
"""Initialize an experiment.
Args:
environment: (Environment) The environment to test the agent in.
agent: (Agent) The agent to act in the experiment.
num_iterations: (int) The number of iterations to run.
train_steps: (int) The number of training steps per iteration.
eval_steps: (int) The number of evaluation steps per iteration.
max_steps_per_episode: (int) The maximum number of steps after which
an episode terminates.
seed: (int) Optional. A seed for the experiment. If possible, this
fixes all randomness related to the experiment.
iteration_callback: (func) Optional. A function to be run after
every iteration.
episode_callback: (func) Optional. A function to be run after every
episode.
"""
self._environment = environment
self._agent = agent
self._num_iterations = num_iterations
self._train_steps = train_steps
self._eval_steps = eval_steps
self._max_steps_per_episode = max_steps_per_episode
self._seed = seed
if seed is not None:
self._agent.seed(seed)
self._environment.seed(seed)
def do_nothing(*args, **kwargs): pass
self._iteration_callback = iteration_callback or do_nothing
self._episode_callback = episode_callback or do_nothing
self._stats = {}
def run(self):
"""Run the experiment from start to finish.
An experiment consists of repeated iterations, each of which has a
training phase followed by an evaluation phase. During a phase, episodes
of agent/environment interactions are simulated until a given number of
steps is reached.
Returns:
(dict) Statistics about the experiment.
"""
self._reset()
LOGGER.info('Beginning the experiment...')
for iteration in range(self._num_iterations):
LOGGER.info('Starting iteration %d', iteration)
self._run_train_phase()
self._run_eval_phase()
self._iteration_callback(self, self._stats)
return self._stats
def _reset(self):
"""Reset the experiment.
Note: This does not reset any of the random number generators related to
the agent or environment, so that running the experiment multiple times
in a row does not generate identical outcomes.
"""
self._stats = {
'train_average_returns': [],
'train_episode_counts': [],
'eval_average_returns': [],
'eval_episode_counts': []
}
def _run_train_phase(self):
"""Run one training phase.
Returns:
(int) The number of episodes run in this phase.
(float) The average return generated in this phase.
"""
# Prepare phase
self._agent.eval_mode = False
# Run phase
start_time = time.time()
num_steps, total_return, num_episodes = self._run_one_phase(
min_steps=self._train_steps,
run_mode='train'
)
time_delta = time.time() - start_time
# Statistics
average_return = total_return / num_episodes if num_episodes > 0 else 0.
self._stats['train_average_returns'].append(average_return)
self._stats['train_episode_counts'].append(num_episodes)
# Logging
LOGGER.info(
'Average undiscounted return per training episode: %.2f',
average_return
)
LOGGER.info(
'Average training steps per second: %.2f',
num_steps / time_delta
)
def _run_eval_phase(self):
"""Run one evaluation phase.
Returns:
(int) The number of episodes run in this phase.
(float) The average return generated in this phase.
"""
# Prepare phase
self._agent.eval_mode = True
# Run phase
_, total_return, num_episodes = self._run_one_phase(
min_steps=self._eval_steps,
run_mode='eval'
)
# Statistics
average_return = total_return / num_episodes if num_episodes > 0 else 0.
self._stats['eval_average_returns'].append(average_return)
self._stats['eval_episode_counts'].append(num_episodes)
# Logging
LOGGER.info(
'Average undiscounted return per evaluation episode: %.2f',
average_return
)
def _run_one_phase(self, min_steps, run_mode):
"""Runs the agent/environment loop for a desired number of steps.
When the desired number of steps is reached, the running episode is
finished before stopping.
Args:
min_steps: (int) The minimum number of steps to generate.
run_mode: (str) The run mode. Either 'train' or 'eval'.
Returns:
(int) The number of steps taken.
(float) The total return accumulated.
(int) The number of episodes performed.
"""
step_count = 0
num_episodes = 0
total_return = 0.
while step_count < min_steps:
episode_length, episode_return = self._run_one_episode()
self._episode_callback(self, self._stats)
# TODO: Record episode length and return as statistics
step_count += episode_length
total_return += episode_return
num_episodes += 1
# We use sys.stdout.write instead of logger so as to flush
# frequently without generating a line break.
# sys.stdout.write(
# 'Steps executed: {} '.format(step_count) +
# 'Episode length: {} '.format(episode_length) +
# 'Return: {}\r'.format(episode_return)
# )
# sys.stdout.flush()
return step_count, total_return, num_episodes
def _run_one_episode(self):
"""Run a single episode of agent/environment interactions.
An episode ends when either the environment reaches a terminal state or
a specified maximum number of steps is reached.
Returns:
(int) The number of steps taken.
(float) The total reward.
"""
initial_observation = self._environment.reset()
self._agent.begin_episode(initial_observation)
is_terminal = False
step_count = 0
total_reward = 0.
while True:
action = self._agent.act()
observation, reward, is_terminal, _ = self._environment.step(action)
self._agent.learn(reward, observation)
total_reward += reward
step_count += 1
if is_terminal or step_count == self._max_steps_per_episode:
break
self._agent.end_episode()
return step_count, total_reward
| christopher-wolff-zz/lab-old | lab/core.py | core.py | py | 9,902 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "abc.ABCMeta",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "abc.abstractmethod",... |
19078811720 | from django.shortcuts import render
from django.http import HttpResponseRedirect
from django.db.models import Q
from django.urls import reverse_lazy
from django.views.generic import ListView
from applications.Producto.models import ProductoServicio
from applications.PaginaVenta.models import Suscripciones
from applications.Compra.models import Compra,Detalle
from . import functions
class CarritoListView(ListView):
model = Detalle
template_name = "Compra/carrito.html"
context_object_name = 'carrito'
def get_queryset(self):
queryset = super(CarritoListView, self).get_queryset()
id_compra = 'compra' in self.request.session
if id_compra:
queryset = Detalle.objects.get_carrito(
self.request.session['compra']
)
return queryset
def get_context_data(self, **args):
context = super(CarritoListView, self).get_context_data(**args)
context["suscripciones"]=Suscripciones.objects.get_suscripciones(self.request.user.id)
return context
def validar_compra(compra,producto):
created, total = functions.agregar_producto_a_compra(
compra=compra,
producto=producto,
precio=producto.precio,
)
if created:
compra.total=total
compra.save()
return HttpResponseRedirect(
"http://localhost:8000/main/?mensaje=producto añadirdo correctamente")
return HttpResponseRedirect(
"http://localhost:8000/main/?mensaje=no se pudo agregar el producto, intente de nuevo")
def agregar_a_carrito(request,id1,id2):
producto = ProductoServicio.objects.filter(
id=id1,
id_pagina_ventas=id2
).first()
if producto:
compra = 'compra' in request.session
if compra:
compra = Compra.objects.filter(id=request.session['compra']).first()
return validar_compra(compra,producto)
else:
compra = Compra.objects.create(
usuario=request.user,
total=0,
nombre_cliente=request.user.nombre
)
request.session['compra']=compra.id
return validar_compra(compra,producto)
return HttpResponseRedirect(
"http://localhost:8000/main/?mensaje=error al añadir producto")
def eliminar_de_carrito(request,id,idproducto):
try:
functions.eliminar_detalle(id,idproducto)
return HttpResponseRedirect(
"http://localhost:8000/main/?mensaje=producto eliminado")
except:
return HttpResponseRedirect(
"http://localhost:8000/main/?mensaje=error al eliminar producto, intente de nuevo")
def cancelar_compra(request):
compra = 'compra' in request.session
if compra:
compra = Compra.objects.filter(id=request.session['compra']).first()
compra.estado = '2'
details = Detalle.objects.filter(id_compra=compra.id)
if details:
for d in details:
functions.eliminar_detalle(d.id, d.id_producto.id,False)
details.delete()
request.session['compra']=None
compra.save()
return HttpResponseRedirect(
"http://localhost:8000/main/?mensaje=compra cancelada")
request.session['compra']=None
compra.save()
return HttpResponseRedirect(
"http://localhost:8000/main/?mensaje=no hay productos en el carrito")
return HttpResponseRedirect(
"http://localhost:8000/main/?mensaje=no hay un carrito de compra") | jhonny212/portalventas | applications/Compra/views.py | views.py | py | 3,549 | python | es | code | 0 | github-code | 1 | [
{
"api_name": "django.views.generic.ListView",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "applications.Compra.models.Detalle",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "applications.Compra.models.Detalle.objects.get_carrito",
"line_number": 21,
... |
26514033503 | from multiprocessing import Process, Value, RLock
import time
start = time.time()
numa = Value('i', 0)
numb = Value('i', 0)
rlock = RLock()
def do_sth():
""" """
"""
rlock.acquire()
try:
adda()
addb()
finally:
rlock.release()
"""
with rlock:
for i in range(1000000):
numa.value += 1
adda()
addb()
def adda():
global numa
"""
rlock.acquire()
try:
numa.value += 1
finally:
rlock.release()
"""
with rlock:
for i in range(1000000):
numa.value += 1
def addb():
global numb
"""
rlock.acquire()
try:
numb.value += 1
finally:
rlock.release()
"""
with rlock:
for i in range(1000000):
numb.value += 1
plist = []
for i in range(10):
p = Process(target=do_sth)
plist.append(p)
p.start()
for item in plist:
item.join()
print(numa.value)
print(numb.value)
stop = time.time()
print(stop - start)
| hemuke/python | 17_process_thread/31_1_multiprocess_rlock.py | 31_1_multiprocess_rlock.py | py | 1,027 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "time.time",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Value",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Value",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "multiprocessing.RL... |
27168642483 | # Databricks notebook source
# MAGIC %pip install git+https://github.com/rafa-arana/fire.git
# COMMAND ----------
# MAGIC %md
# MAGIC ---
# MAGIC + <a href="$./00.DLT-SAN-Setup">STAGE 0</a>: Setup
# MAGIC + <a href="$./00.DLT-SAN-File Ingestion with Autoloader">STAGE 0 bis</a>: File Ingestion with Autoloader
# MAGIC + <a href="$./01.DLT-SAN-Autoloader-template">STAGE 1</a>: Data Reliable Pipelines with Live Tables and Autoloader
# MAGIC + <a href="$./03.Querying the Delta Live Tables event log">STAGE 3</a>: Querying the Delta Live Tables event log
# MAGIC ---
# COMMAND ----------
# MAGIC %md
# MAGIC # Load CSV files from Cloud in Streaming Mode
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ## Diving into the code👇
# COMMAND ----------
# MAGIC %md
# MAGIC We retrieve the name of the entity to get the FIRE data model for as well as the directory (distributed file storage) where we expect new raw files to land. These parameters are passed to the delta live table notebook via job configuration as per the screenshot above.
# COMMAND ----------
try:
# the name of the fire entity we want to process
fire_entity = spark.conf.get("fire_entity")
except:
raise Exception("Please provide [fire_entity] as job configuration")
try:
# where new data file will be received
landing_zone = spark.conf.get("landing_zone")
except:
raise Exception("Please provide [landing_zone] as job configuration")
try:
# where corrupted data file will be stored
invalid_format_path = spark.conf.get("invalid_format_path")
except:
raise Exception("Please provide [invalid_format_path] as job configuration")
try:
# format we ingest raw data
file_format = spark.conf.get("file_format", "csv")
except:
raise Exception("Please provide [file_format] as job configuration")
try:
# format we ingest raw data
delimiter = spark.conf.get("delimiter", "|")
except:
raise Exception("Please provide [delimiter] as job configuration")
try:
# number of new file to read at each iteration
max_files = int(spark.conf.get("max_files", "1"))
except:
raise Exception("Please provide [max_files] as job configuration")
# COMMAND ----------
# DLT import
import dlt
from pyspark.sql.functions import *
# Pyspark functions
from pyspark.sql.types import StringType, StructField, StructType, IntegerType, DateType, TimestampType, DecimalType, LongType
from fire.spark import FireModel
# COMMAND ----------
ingestionDate = current_date()
ingestionTime = current_timestamp()
# COMMAND ----------
schemas_dir = "/dbfs/Users/rafael.arana@databricks.com/DLT/dlt_san/entities/"
fire_model = FireModel(schemas_dir).load(fire_entity)
fire_schema = fire_model.schema
fire_constraints = fire_model.constraints
# COMMAND ----------
# DBTITLE 1,Ingest files with Autoloader into Bronze layer
@dlt.table(
name=fire_entity+"_bronze",
comment="This is an incremental streaming source from autoloader csv files on ADLS",
table_properties={
"delta.autoOptimize.optimizeWrite" : "true",
"quality" : "bronze"
})
def get_cloudfiles():
return (
spark.readStream
.format("cloudFiles")
.option("cloudFiles.format",file_format)
.option("badRecordsPath", invalid_format_path)
.option("rescuedDataColumn", "_rescue")
.option("cloudFiles.maxFilesPerTrigger", max_files)
.option("header", "true")
.option("delimiter", delimiter)
.schema(fire_schema)
.load(landing_zone)
.withColumn("InputFileName",input_file_name())
.withColumn("IngestionDate",ingestionDate)
.withColumn("IngestionTime",ingestionTime)
)
# COMMAND ----------
# DBTITLE 1,Silver table with clean and validated data
@dlt.table(
name=fire_entity+"_silver",
comment="This is an incremental streaming source from Auto loader csv files on ADLS",
table_properties={
"delta.autoOptimize.optimizeWrite" : "true",
"quality" : "silver"
})
@dlt.expect_all_or_drop(dict(zip(fire_constraints, fire_constraints)))
def silver():
return dlt.read_stream(fire_entity+"_bronze")
# COMMAND ----------
@udf("array<string>")
def failed_expectations(expectations):
# retrieve the name of each failed expectation I’m
return [name for name, success in zip(fire_constraints, expectations) if not success]
# COMMAND ----------
# DBTITLE 1,Quarantine table with invalid data
@dlt.table(
name=fire_entity+"_quarantine",
comment="This is an incremental streaming source from Auto loader csv files on ADLS"
)
def quarantine():
return (
dlt
.read_stream(fire_entity+"_bronze")
.withColumn("_fire", array([expr(value) for value in fire_constraints]))
.withColumn("_fire", failed_expectations("_fire"))
.filter(size("_fire") > 0)
)
# COMMAND ----------
# MAGIC %md
# MAGIC ---
# MAGIC + <a href="$./00.DLT-SAN-Setup">STAGE 0</a>: Setup
# MAGIC + <a href="$./00.DLT-SAN-File Ingestion with Autoloader">STAGE 0 bis</a>: File Ingestion with Autoloader
# MAGIC + <a href="$./01.DLT-SAN-Autoloader-template">STAGE 1</a>: Data Reliable Pipelines with Live Tables and Autoloader
# MAGIC + <a href="$./03.Querying the Delta Live Tables event log">STAGE 3</a>: Querying the Delta Live Tables event log
# MAGIC ---
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC © 2021 Databricks, Inc. All rights reserved. The source in this notebook is provided subject to the Databricks License [https://databricks.com/db-license-source]. All included or referenced third party libraries are subject to the licenses set forth below. | rafa-arana/dlt-regulatory-reporting | 01.DLT-SAN-Autoloader-template.py | 01.DLT-SAN-Autoloader-template.py | py | 5,618 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "fire.spark.FireModel",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "dlt.table",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "dlt.read_stream",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "dlt.table",
"line... |
74229119712 | from typing import List
class Solution:
def eraseOverlapIntervals(self, intervals: List[List[int]]) -> int:
intervals.sort()
res = 0
prevEnd = intervals[0][1]
for start, end in intervals[1:]:
if start >= prevEnd:
prevEnd = end
else:
res += 1
prevEnd = min(end, prevEnd) # delete the interval that has the bigger end value
return res | TimHung000/leetcode | 0435_nonOverlappingIntervals/main.py | main.py | py | 450 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "typing.List",
"line_number": 4,
"usage_type": "name"
}
] |
6377740088 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ***************************************************************************
# Copyright (c) 2018 西安交通大学
# All rights reserved
#
# 文件名称:Mayavi_test.py
#
# 摘 要:数据可视化mayavi库测试文件
#
# 创 建 者:上官栋栋
#
# 创建日期:2018年11月9日
#
# 修改记录
# 日期 修改者 版本 修改内容
# ------------- ------- ------------------------
# ***************************************************************************
from tvtk.tools import tvtk_doc,ivtk
from tvtk.api import tvtk
import numpy as np
from mayavi import mlab
from traits.api import HasTraits ,Delegate,Instance,Int,Str
class DataVisual(object):
def test1(self):
'''调用帮助文档T'''
tvtk_doc.main()
def test2(self):
'''展开交互图像窗口的常用流程'''
s=tvtk.CubeSource(x_length=1.0,y_length=2.0,z_length=3.0)
m=tvtk.PolyDataMapper(input_connection=s.output_port)
a=tvtk.Actor(mapper=m)
r=tvtk.Renderer(background=(0.5,0.5,0.5))
r.add_actor(a)
w=tvtk.RenderWindow(size=(500,500))
w.add_renderer(r)
i=tvtk.RenderWindowInteractor(render_window=w)
i.initialize()
i.start()
def test3(self):
plot3d=tvtk.MultiBlockPLOT3DReader(xyz_file_name="combxyz.bin",
q_file_name="combq,bin",
scalar_function_number=100,
vector_function_number=200)
plot3d.update()
grid=plot3d.output.get_block(0)
con=tvtk.ContourFilter()
con.set_input_data(grid)
con.generate_values(10,grid.point_data.scalars.range)
m=tvtk.PolyDataMapper(scalar_range=grid.point_data.scalars.range,
input_connection=con.output_port)
a=tvtk.Actor(mapper=m)
a.property.opacity=0.5
win=self.ivtk_scene(a)
win.scene.isometric_view()
self.event_loop()
# 命名为tvtkfunc.py
def ivtk_scene(self,actors):
from tvtk.tools import ivtk
# 创建一个带Crust(Python Shell)的窗口
win = ivtk.IVTKWithCrustAndBrowser()
win.open()
win.scene.add_actor(actors)
# 修正窗口错误
dialog = win.control.centralWidget().widget(0).widget(0)
from pyface.qt import QtCore
dialog.setWindowFlags(QtCore.Qt.WindowFlags(0x00000000))
dialog.show()
return win
def event_loop(self):
from pyface.api import GUI
gui = GUI()
gui.start_event_loop()
def test4(self):
s=tvtk.CubeSource(x_length=1.0,y_length=2.0,z_length=3.0)
m=tvtk.PolyDataMapper(input_connection=s.output_port)
a=tvtk.Actor(mapper=m)
gui=GUI()
# win=ivtk.IVTKWithCrust()
# win=ivtk.IVTK()
win=ivtk.IVTKWithCrustAndBrowser()
win.open()
win.scene.add_actor(a)
dialog=win.control.centralWidget().widget(0).widget(0)
from pyface.qt import QtCore
dialog.setWindowFlags(QtCore.Qt.WindowFlags(0x00000000))
dialog.show()
gui.start_event_loop()
# imageData数据类型 and RectilinearGrid数据类型
def test5(self):
# imageData数据类型
img=tvtk.ImageData(spacing=(1,1,1),origin=(0,0,0),dimensions=(3,4,5))
#RectilinearGrid数据类型
x=np.array([0,3,5,6])
y=np.array([3,5,9,12])
z=np.array([4,5,7,9])
r=tvtk.RectilinearGrid()
r.x_coordinates=x
r.y_coordinates=y
r.z_coordinates=z
r.dimensions=len(x),len(y),len(z)
for i in range(64):
print(r.get_point(i))
#StructureGrid数据类
def test6(self):
def generate_annulus(r, theta, z):
""" Generate points for structured grid for a cylindrical annular
volume. This method is useful for generating a unstructured
cylindrical mesh for VTK.
"""
# Find the x values and y values for each plane.
x_plane = (np.cos(theta) * r[:, None]).ravel()
y_plane = (np.sin(theta) * r[:, None]).ravel()
# Allocate an array for all the points. We'll have len(x_plane)
# points on each plane, and we have a plane for each z value, so
# we need len(x_plane)*len(z) points.
points = np.empty([len(x_plane) * len(z), 3])
# Loop through the points for each plane and fill them with the
# correct x,y,z values.
start = 0
for z_plane in z:
end = start + len(x_plane)
# slice out a plane of the output points and fill it
# with the x,y, and z values for this plane. The x,y
# values are the same for every plane. The z value
# is set to the current z
plane_points = points[start:end]
plane_points[:, 0] = x_plane
plane_points[:, 1] = y_plane
plane_points[:, 2] = z_plane
start = end
return points
dims = (3, 4, 3)
r = np.linspace(5, 15, dims[0])
theta = np.linspace(0, 0.5 * np.pi, dims[1])
z = np.linspace(0, 10, dims[2])
pts = generate_annulus(r, theta, z)
sgrid = tvtk.StructuredGrid(dimensions=(dims[1], dims[0], dims[2]))
sgrid.points = pts
s = np.random.random((dims[0] * dims[1] * dims[2]))
sgrid.point_data.scalars = np.ravel(s.copy())
sgrid.point_data.scalars.name = 'scalars'
#读取STL文件
def test7(self):
s = tvtk.STLReader(file_name="E:/mesh.stl") # 调用stl文件
m = tvtk.PolyDataMapper(input_connection=s.output_port)
a = tvtk.Actor(mapper=m)
# win=ivtk.IVTKWithCrust()
# win=ivtk.IVTK()
win = ivtk.IVTKWithCrustAndBrowser()
win.open()
win.scene.add_actor(a)
win.scene.isometric_view()
dialog = win.control.centralWidget().widget(0).widget(0)
from pyface.qt import QtCore
dialog.setWindowFlags(QtCore.Qt.WindowFlags(0x00000000))
dialog.show()
gui = GUI()
gui.start_event_loop()
def test8(self):
plot3d = tvtk.MultiBlockPLOT3DReader(
xyz_file_name="combxyz.bin", # 网格文件
q_file_name="combq.bin", # 空气动力学结果文件
scalar_function_number=100, # 设置标量数据数量
vector_function_number=200 # 设置矢量数据数量
)
plot3d.update()
grid = plot3d.output.get_block(0)
con=tvtk.ContourFilter()
con.set_input_data(grid)
con.generate_values(10,grid.point_data.scalars.range)
m=tvtk.PolyDataMapper(scalar_range=grid.point_data.scalars.range,
input_connection=con.output_port)
a=tvtk.Actor(mapper=m)
a.property.opacity=0.5
win=self.ivtk_scene(a)
win.scene.isometric_view()
self.event_loop()
#mayavi mlab example
def test9(self):
# x=[[-1,1,1,-1,-1],[-1,1,1,-1,-1]]
# y=[[-1,-1,-1,-1,-1],[2,2,2,2,2]]
# z=[[1,1,-1,-1,1],[1,1,-1,-1,1]]
x=[[0,0],[0,1]]
y=[[0,0],[1,1]]
z=[[0,1],[0,0]]
mlab.mesh(x,y,z)
mlab.show()
def test10(self):
from numpy import pi,sin,cos
dphi,dtheta=pi/30,pi/30
[phi,theta]=np.mgrid[0:pi+dphi*1.5:dphi,0:2*pi+dtheta*1.5:dtheta]
r=7
x=r*sin(phi)*cos(theta)
y=r*cos(phi)
z=r*sin(phi)*sin(theta)
mlab.mesh(x,y,z,representation='wireframe')
mlab.show()
def test11(self):
t=np.linspace(0,4*np.pi,20)
x=np.sin(2*t)
y=np.cos(t)
z=np.cos(2*t)
s=np.zeros(20)+1
mlab.points3d(x, y, z, s, colormap='Spectral', scale_factor=0.025)
mlab.plot3d(x,y,z,s,colormap='Spectral',tube_radius=0.025)
mlab.show()
#imshow函数
def test12(self):
data = np.loadtxt('C:\\Users\\Administrator\\Documents\\GitHub\\DifferentialEvolution\\map.csv',
delimiter=',')
mlab.imshow(data,colormap='gist_earth',interpolate=False)
mlab.show()
#surf函数
def test13(self):
data=np.loadtxt('./Data/map.csv', delimiter=',')
row=data.shape[0]
column=data.shape[1]
x,y=np.mgrid[0:row:1,0:column:1]
surf=mlab.surf(x,y,data)
lut=surf.module_manager.scalar_lut_manager.lut.table.to_array()
lut[:,-1]=np.linspace(0,125,256)
surf.module_manager.scalar_lut_manager.lut.table=lut
mlab.show()
#contour函数
def test14(self):
data=np.loadtxt('./Data/map.csv', delimiter=',')
row=data.shape[0]
column=data.shape[1]
x,y=np.mgrid[0:row:1,0:column:1]
x,y=np.mgrid[0:row:1,0:column:1]
mlab.contour_surf(x,y,data,contours=100)
mlab.show()
#向量场绘制
def test15(self):
from numpy import mgrid,sqrt,sin,zeros_like
x, y, z = mgrid[-0:3:0.6, -0:3:0.6, 0:3:0.3]
r = sqrt(x ** 2 + y ** 2 + z ** 4)
u = y * sin(r) / (r + 0.001)
v = -x * sin(r) / (r + 0.001)
w = zeros_like(r)
mlab.quiver3d(x, y, z, u, v, w)
mlab.colorbar()
mlab.show()
#鼠标选取物体
def test16(self):
figure=mlab.gcf()
figure.scene.disable_render=True
x1,y1,z1=np.random.random((3,10))
red_glyphs=mlab.points3d(x1,y1,z1,color=(1,0,0),resolution=10)
x2,y2,z2=np.random.random((3,10))
white_glyphs=mlab.points3d(x2,y2,z2,color=(0.9,0.9,0.9),resolution=10)
outline=mlab.outline(line_width=3)
outline.outline_mode='cornered'
outline.bounds=(x1[0]-0.1,x1[0]+0.1,
y1[0] - 0.1, y1[0] + 0.1,
z1[0] - 0.1, z1[0] + 0.1)
figure.scene.disable_render=False
glyph_points=red_glyphs.glyph.glyph_source.glyph_source.output.points.to_array()
def picker_callback(picker):
if picker.actor in red_glyphs.actor.actors:
point_id =int(picker.point_id/glyph_points.shape[0])
if point_id!=-1:
x,y,z=x1[point_id],y1[point_id],z1[point_id]
outline.bounds=(x-0.1,x+0.1,
y - 0.1, y + 0.1,
z - 0.1, z + 0.1)
picker=figure.on_mouse_pick(picker_callback)
picker.tolerance=0.01
mlab.title('Click on red balls')
mlab.show()
#标量数据可视化
def test17(self):
x,y,z=np.ogrid[-10:10:20j,-10:10:20j,-10:10:20j]
s=np.sin(x*y*z)/(x*y*z)
src=mlab.pipeline.scalar_field(s)
# mlab.pipeline.image_plane_widget(src,plane_orientation='x_axes',slice_index=10)
# mlab.pipeline.image_plane_widget(src, plane_orientation='y_axes', slice_index=10)
# mlab.pipeline.iso_surface(src,contours=[s.min()+0.1*s.ptp()],opacity=0.1)
# mlab.pipeline.iso_surface(src,contours=[s.max()-0.1*s.ptp()])
# mlab.contour3d(s,contours=5,transparent=True)
mlab.pipeline.volume(src)
mlab.outline()
mlab.show()
#向量数据可视化
def test18(self):
x, y, z = np.mgrid[0:1:20j, 0:1:20j, 0:1:20j]
u = np.sin(np.pi * x) * np.cos(np.pi * z)
v = -2 * np.sin(np.pi * y) * np.cos(2 * np.pi * z)
w = np.cos(np.pi * x) * np.sin(np.pi * z) + np.cos(np.pi * y) * np.sin(2 * np.pi * z)
#以下是四种不同的向量场绘制函数,mlab.pipeline.vector_field是对原向量场的降采样
# #1.绘制等势面,即曲面向量范数相同
# src=mlab.pipeline.vector_field(u,v,w)
# magnitude=mlab.pipeline.extract_vector_norm(src)
# mlab.pipeline.iso_surface(magnitude,contours=[1.9,1.8])
# #2.绘制整个向量场
# src=mlab.pipeline.vector_field(u,v,w)
# mlab.pipeline.vectors(src,mask_points=10,scale_factor=1)
# #3.仅绘制一个平面中的向量,且这个平面可以移动
# src=mlab.pipeline.vector_field(u,v,w)
# mlab.pipeline.vector_cut_plane(src,mask_points=10,scale_factor=1)
# #4.绘制流线图
# src=mlab.pipeline.vector_field(u,v,w)
# magnitude=mlab.pipeline.extract_vector_norm(src)
# flow = mlab.pipeline.streamline(magnitude, seedtype='point',
# seed_visible=False,
# seed_scale=0.5,
# seed_resolution=5 )
#以下的方法针对所有数据
#显示所有数据
# mlab.quiver3d(u,v,w)
#绘制流线图
mlab.flow(u,v,w,seed_scale=1,seed_resolution=3,integration_direction='both',seedtype='sphere')
mlab.show()
def showKrigingData(path):
graphData = []
pointData = []
with open(path,'r') as file:
texts = file.readlines()
pos = 0
import re
reg_int = re.compile(r'-?\d+')
reg_float = re.compile(r'-?\d\.\d+e[\+,-]\d+')
num = int(reg_int.search(texts[pos]).group(0))
pos+=1
for k in range(num//3):
row = int(reg_int.search(texts[pos]).group(0))
col = int(reg_int.search(texts[pos+1]).group(0))
pos+=3
x = np.zeros((row,col))
for i in range(row):
text_list = reg_float.findall(texts[pos+i])
for j in range(col):
x[i,j] = float(text_list[j])
graphData.append(x)
pos+=row+1
y = np.zeros((row,col))
for i in range(row):
text_list = reg_float.findall(texts[pos+i])
for j in range(col):
y[i,j] = float(text_list[j])
graphData.append(y)
pos+=row+1
v = np.zeros((row,col))
for i in range(row):
text_list = reg_float.findall(texts[pos+i])
for j in range(col):
v[i,j] = float(text_list[j])
graphData.append(v)
pos+=row
num = int(reg_int.search(texts[pos]).group(0))
pos+=1
for k in range(num//2):
row = int(reg_int.search(texts[pos]).group(0))
col = int(reg_int.search(texts[pos+1]).group(0))
pos+=3
x = np.zeros((row,col))
for i in range(row):
text_list = reg_float.findall(texts[pos+i])
for j in range(col):
x[i,j] = float(text_list[j])
pointData.append(x)
pos+=row+1
y = np.zeros(row)
for i in range(row):
y[i] = float(reg_float.search(text_list[j]).group(0))
pointData.append(y)
pos+=row+1
#当前只针对len(graphData)==3&len(pointData)==2的情况,多余的数据不显示:
mlab.figure(size=[1024,800])
graph = mlab.imshow(graphData[0],graphData[1],graphData[2])
# mlab.surf(x,y,s)
if len(pointData)==2:
x = pointData[0][:,0]
y = pointData[0][:,1]
z = np.zeros_like(x)
mlab.points3d(x,y,z,scale_factor=0.03)
# 点的大小和颜色随顺序变化
# x = pointData[0][:,0]
# y = pointData[0][:,1]
# newPointNum = x.shape[0]
# z = np.zeros(newPointNum)
# c = np.linspace(1,2,newPointNum)
# mlab.points3d(x,y,z,c,colormap="copper", scale_factor=.25)
# mlab.outline()
# mlab.axes(xlabel='x', ylabel='y', zlabel='z')
mlab.colorbar(graph,'value',orientation='vertical',label_fmt='%.2f')
mlab.view(0,0)
photoPath = re.sub(r'.txt','.png',path)
mlab.savefig(photoPath)
mlab.close()
# mlab.show()
def test1():
'''G8函数显示'''
# f = lambda x,y:-(np.sin(2*np.pi*x)**3*np.sin(2*np.pi*y))/(x**3*(x+y))
f = lambda x:-(np.sin(2*np.pi*x[0])**3*np.sin(2*np.pi*x[1]))/(x[0]**3*(x[0]+x[1]))
g1 = lambda x:x[0]**2-x[1]+1
g2 = lambda x:1-x[0]+(x[1]-4)**2
# min = [0.001,0.001]
# max = [10,10]
min = [1,3]
max = [2,5]
mesh_x ,mesh_y = np.mgrid[min[0]:max[0]:500j,min[1]:max[1]:500j]
v = np.zeros_like(mesh_x)
for i in range(mesh_x.shape[0]):
for j in range(mesh_x.shape[1]):
p = np.array([mesh_x[i,j],mesh_y[i,j]])
v[i,j] = np.log(f(p)+10000)
# if g1(p)<0 and g2(p)<0:
# v[i,j] = f(p)
# else:
# v[i,j] = 0
mlab.figure(size=[1024,800])
mlab.imshow(mesh_x,mesh_y,v)
mlab.show()
def test2():
'''遍历文件集'''
root_path = '/home/sgdd/Optimization-under-Constraint/Data/约束优化算法测试1/stepC_5'
for k in range(11):
path = root_path+'/Kriging_Predicte_Model_%d.txt'%k
showKrigingData(path)
path = root_path+'/Kriging_Varience_Model_%d.txt'%k
showKrigingData(path)
path = root_path+'/Kriging_EI_Model_%d.txt'%k
showKrigingData(path)
def test3():
'''显示特定文件'''
showKrigingData('./Data/约束优化算法测试1/G8_Function.txt')
# showKrigingData('./Data/Kriging_EI加点模型测试1/A_Kriging_Predicte_Model.txt')
# showKrigingData('./Data/Kriging_EI加点模型测试1/A_Kriging_Varience_Model.txt')
# showKrigingData('./Data/Kriging_EI加点模型测试1/A_Kriging_EI_Model.txt')
if __name__=='__main__':
# test=DataVisual()
# test.test13()
test3()
| sgdd66/Optimization-under-Constraint | Mayavi_test.py | Mayavi_test.py | py | 18,371 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "tvtk.tools.tvtk_doc.main",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "tvtk.tools.tvtk_doc",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "tvtk.api.tvtk.CubeSource",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": ... |
22452873346 | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn import tree, ensemble
from sklearn.model_selection import cross_val_score
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import KFold
from sklearn.tree import plot_tree
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
# Assignment Constants
RS = 10 #Random_State
FIGSIZE = (5,3)
cancer = load_breast_cancer()
df = pd.DataFrame(cancer.data, columns = cancer.feature_names)
target = pd.DataFrame(cancer.target, columns=['class'])
ratio = 1 - sum(target["class"]) / len(target)
print("shape = " ,df.shape)
print( "the base rate of malignant cancer occurrence :" , "{:.3f}".format(ratio) )
##### Decision Tree
clf = tree.DecisionTreeClassifier(max_depth=2,random_state=RS)
clf = clf.fit(cancer.data, cancer.target)
clf.score(cancer.data, cancer.target)
plt.figure(dpi=150)
plot_tree(clf, feature_names=df.columns, class_names=True)
print("Example: Decision Tree, max_depth=2")
plt.show()
##### Cross_Validation
tmp = 10
tmp1 = "Max depth"
R = pd.DataFrame(np.zeros([1,tmp]),index=[tmp1]) #Result
for i in range(0,tmp):
clf = tree.DecisionTreeClassifier(max_depth=i+1,random_state=RS)
clf.fit(cancer.data, cancer.target) #<- for just Score1
R.loc[tmp1,i] = i+1
R.loc["Score1",i] = clf.score(cancer.data, cancer.target) #Full Set
R.loc["Score2",i] = np.mean(cross_val_score(clf, cancer.data, cancer.target, cv=KFold(n_splits=10,random_state=RS,shuffle=True))) #Cross-Validated
R.iloc[0,:] = R.iloc[0,:].apply("{:.0f}".format)
x = R.loc[tmp1 ,:]
plt.figure(figsize=FIGSIZE)
plt.xlabel(tmp1)
plt.ylabel("Accuracy")
plt.plot(x, R.loc["Score1",:], label="All_Data")
plt.plot(x, R.loc["Score2",:], label="Cross_Validation")
plt.legend(fontsize="small")
for i in [1,2]:
R.iloc[i,:] = R.iloc[i,:].apply("{:.3f}".format)
print("The accuracy")
R
##### Importance factor
X_train, X_test, y_train, y_test = train_test_split(cancer.data, cancer.target, stratify=cancer.target, random_state=10)
clf = DecisionTreeClassifier(max_depth=2, random_state=RS)
clf.fit(X_train, y_train)
# print(clf.feature_importances_)
df2 = pd.DataFrame(
{'feature':cancer.feature_names, 'importance':clf.feature_importances_})
fig, ax = plt.subplots()
fig.subplots_adjust(left=0.3)
ax.barh(df2.feature, df2.importance)
print("Example: Importance factor, max_depth=2")
plt.show()
##### Hold Out
tmp = 10
tmp1 = "Max depth"
R = pd.DataFrame(np.zeros([1,tmp]),index=[tmp1])
for i in range(0,tmp):
clf = tree.DecisionTreeClassifier(max_depth=i+1,random_state=RS)
X_train, X_test, y_train, y_test = train_test_split(cancer.data, cancer.target, test_size=0.25, stratify=cancer.target, random_state=RS)
clf.fit(X_train, y_train)
R.loc[tmp1,i] = i+1
R.loc["Training score",i] = clf.score(X_train, y_train)
R.loc["Test score",i] = clf.score(X_test, y_test)
clf = clf.fit(df, target)
predicted = clf.predict(df)
R.loc["Match",i] = sum(predicted == target["class"]) / len(target)
R.iloc[0,:] = R.iloc[0,:].apply("{:.0f}".format)
x = R.loc[tmp1,:]
plt.figure(figsize=FIGSIZE)
plt.xlabel(tmp1)
plt.ylabel("Test score")
plt.plot(x, R.loc["Training score",:],label="Training")
plt.plot(x, R.loc["Test score",:],label="Test")
plt.legend(fontsize="large")
tmp = R.loc["Max depth",R.loc["Test score",:] == max(R.loc["Test score",:])]
for i in range(1,len(R)):
R.iloc[i,:] = R.iloc[i,:].apply("{:.3f}".format)
print("for your reference: Hold out test")
print("Best Max depth = " ,int(tmp), "(Based on Test score)")
R
##### Consider n_estimators
tmp = 20
tmp1 = ["Max_depth","n_estimators","Average score"] + ["Test" + str(i) for i in range(1,11)]
tmp2 = "n_estimators"
MD = 5 #max_depth
R = pd.DataFrame(np.zeros([len(tmp1),tmp]),index=tmp1) #Result
R.loc["Max_depth",:] = MD
for i in range(0,tmp):
kf = KFold(n_splits=10, shuffle=True, random_state=RS)
forest = RandomForestClassifier(max_depth = MD, n_estimators=i+1, random_state=RS)
dtc_scores = cross_val_score(forest, cancer.data, cancer.target, cv=kf)
R.loc[tmp2,i] = i+1
R.loc[tmp1[3:],i] = dtc_scores
R.loc["Average score",i] = np.mean(dtc_scores)
x = R.loc[tmp2,:]
y = R.loc["Average score",:]
plt.figure(figsize=FIGSIZE)
plt.xlabel(tmp2)
plt.ylabel("Average score")
plt.xticks( np.arange(0, 20, 4))
plt.yticks( np.arange(0.92, 0.971, 0.01))
plt.plot(x, y)
plt.show()
tmp = R.loc[tmp2,R.loc["Average score",:] == max(R.loc["Average score",:])]
tmp = list(tmp)[0]
for i in range(0,2):
R.iloc[i,:] = R.iloc[i,:].apply("{:.0f}".format)
for i in range(2,len(R)):
R.iloc[i,:] = R.iloc[i,:].apply("{:.3f}".format)
print("10-fold cross-validated accuracy.")
print("Max_depth =", MD, " (fixed)")
print("Best ",tmp2,"=" ,int(tmp))
R
##### Consider Max_depth
tmp = 10
tmp1 = ["Max_depth","n_estimators","Average score"] + ["Test" + str(i) for i in range(1,11)]
tmp2 = "Max_depth"
NE = 16 #n_estimators
R = pd.DataFrame(np.zeros([len(tmp1),tmp]),index=tmp1) #Result
R.loc["n_estimators",:] = NE
for i in range(0,tmp):
kf = KFold(n_splits=10, shuffle=True, random_state=10)
forest = RandomForestClassifier(max_depth = i+1, n_estimators=NE, random_state=10)
dtc_scores = cross_val_score(forest, cancer.data, cancer.target, cv=kf)
R.loc[tmp2,i] = i+1
R.loc[tmp1[3:],i] = dtc_scores
R.loc["Average score",i] = np.mean(dtc_scores)
x = R.loc[tmp2,:]
y = R.loc["Average score",:]
plt.figure(figsize=FIGSIZE)
plt.xlabel(tmp2)
plt.ylabel("Average score")
plt.xticks( np.arange(0, 10, 2))
plt.yticks( np.arange(0.92, 0.971, 0.01))
plt.plot(x, y)
plt.show()
tmp = R.loc[tmp2,R.loc["Average score",:] == max(R.loc["Average score",:])]
tmp = list(tmp)[0]
for i in range(0,2):
R.iloc[i,:] = R.iloc[i,:].apply("{:.0f}".format)
for i in range(2,len(R)):
R.iloc[i,:] = R.iloc[i,:].apply("{:.3f}".format)
print("10-fold cross-validated accuracy.")
print("n_estimators =" , NE , "(fixed)")
print("Best ",tmp2,"=" ,int(tmp))
R
| ken-100/edX | MachineLearning/HW01_DecisionTree_Cross-Validation.py | HW01_DecisionTree_Cross-Validation.py | py | 6,116 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sklearn.datasets.load_breast_cancer",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": ... |
39674393133 | import logging
import csv
import re
from . import Indicator
from ..interfaces import CountryPolyInterface
class WebsiteIndicator(Indicator):
"""
Indicator which detects the TLD of the website in the users profile and maps it to an area.
Note: domains such as .com, .net and .org are ignored, and only country domains are detected.
"""
tld_regex = r"[^\s\/](?:\.([a-z]{2,3}))?(?:\s|\/|$)"
def __init__(self, config):
super().__init__()
csv_loc = config.get("multiindicator", "tld_csv")
polydb_url = config.get("multiindicator", "gadm_polydb_path")
self.weight = config.getfloat("mi_weights", "WS_1")
self.cpi = CountryPolyInterface(polydb_url)
# load the tlds from the csv, and get and store the polygons
self.codes = {}
with open(csv_loc, newline='') as csvfile:
tldreader = csv.reader(csvfile, delimiter=',', quotechar='|')
for row in tldreader:
countries = self.cpi.get_polys(row[1], self.get_weight(1))
self.codes[row[0]] = (row[1], countries)
self.tldre = re.compile(self.tld_regex)
def get_loc(self, website):
if website is None:
return []
# search the field for all websites
tlds = self.tldre.findall(website)
polys = []
for t in tlds:
# find the polys from the prebuilt dictionary
td = t.strip()
logging.debug("TLD found: %s exists = %s" % (td, (td in self.codes)))
if td in self.codes:
polys += self.codes[td][1]
return polys
| Humpheh/twied | src/twied/multiind/indicators/websiteindicator.py | websiteindicator.py | py | 1,626 | python | en | code | 11 | github-code | 1 | [
{
"api_name": "interfaces.CountryPolyInterface",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "csv.reader",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "logging.debug",... |
26055383293 | import json
import torch
import yaml
from torch.utils.data import DataLoader
from torchvision.transforms import transforms
from tqdm import tqdm
from src.data.tooth_dataset import ToothDataset
from src.model.vgg.vgg import Vgg
from src.utils.transforms import SquarePad
if __name__ == "__main__":
with open("./src/model/unet/scripts/config.yml", "r") as f:
config = yaml.safe_load(f)
# Find out whether gpu is available
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Load model
model = Vgg.load_from_checkpoint(
f"checkpoints/vgg/version_29/checkpoints/epoch=epoch=14-val_loss=val_f1=0.72.ckpt",
map_location=device
)
model.eval()
# Load data
with open("data/final/train_quadrant_enumeration_disease_healthy_unpacked_test.json") as f:
X = json.load(f)
if model.hparams.config["n_classes"] > 1:
# Remove healthy samples
X = list(filter(lambda x: int(x["annotation"]["category_id_3"]) != 4, X))
else:
X = list(filter(lambda x: "score" not in x["annotation"] or x["annotation"]["score"] >= .9, X))
for x in X:
if x["annotation"]["category_id_3"] == 4:
x["annotation"]["category_id_3"] = 0
else:
x["annotation"]["category_id_3"] = 1
transform = transforms.Compose([
SquarePad(),
transforms.Resize(224, interpolation=transforms.InterpolationMode.BILINEAR)
])
to_pil = transforms.ToPILImage()
# Define dataset
dataset_args = dict(image_dir=f"data/raw/training_data/quadrant_enumeration_disease/xrays",
n_classes=model.hparams.config["n_classes"])
dataset = ToothDataset(X, transform=transform, **dataset_args)
# Define loaders
loader_args = dict(batch_size=config["batch_size"], num_workers=0, pin_memory=True)
loader_train = DataLoader(dataset, **loader_args)
# Predict for each sample
prediction_list = []
for sample in tqdm(dataset, total=len(dataset)):
input = sample["image"]
file_name = sample["file_name"]
label = sample["label"]
with torch.no_grad():
prediction = model(input.unsqueeze(0).to(device))[0]
prediction_list.append(dict(
file_name=file_name,
label=label.cpu().tolist(),
prediction=prediction.detach().cpu().tolist()))
with open(f"output/vgg_binary_predict.json", "w") as f:
json.dump(prediction_list, f)
| tudordascalu/2d-teeth-detection-challenge | src/model/vgg/scripts/predict.py | predict.py | py | 2,508 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "yaml.safe_load",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torch.device",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
... |
42595708920 | from typing import Any, Dict
from django.contrib.auth.mixins import PermissionRequiredMixin
from .exceptions import UnfoldException
class UnfoldModelAdminViewMixin(PermissionRequiredMixin):
"""
Prepares views to be displayed in admin
"""
def get_context_data(self, **kwargs) -> Dict[str, Any]:
if "model_admin" not in self.kwargs:
raise UnfoldException(
"UnfoldModelAdminViewMixin was not provided with 'model_admin' argument"
)
model_admin = self.kwargs["model_admin"]
context_data = super().get_context_data(
**kwargs, **model_admin.admin_site.each_context(self.request)
)
return context_data
| unfoldadmin/django-unfold | src/unfold/views.py | views.py | py | 708 | python | en | code | 506 | github-code | 1 | [
{
"api_name": "django.contrib.auth.mixins.PermissionRequiredMixin",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "exceptions.UnfoldException",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "typing.Dict",
"line_number": 13,
"usage_type": "name"
},
... |
21372179198 |
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.chrome.options import Options
import time
import os
import shutil
# Setup of chrome preferences (download directory) -------
chromeOptions = webdriver.ChromeOptions()
prefs = {"download.default_directory" : "C:\journals\BJUI\Landing"}
chromeOptions.add_experimental_option("prefs",prefs)
driver = webdriver.Chrome(ChromeDriverManager().install(), options=chromeOptions)
volume_issue = []
def years(start_year,end_year):
while start_year <= end_year:
driver.get("https://bjui-journals.onlinelibrary.wiley.com/loi/1464410x/year/"+str(start_year))
driver.implicitly_wait(40)
_issues(start_year)
start_year += 1
def _issues(start_year):
global volume_issue
WebDriverWait(driver, 10).until(EC.presence_of_element_located
((By.CLASS_NAME, "visitable")))
issue_list = driver.find_elements_by_class_name('visitable')
issue_link = driver.current_url
issue_num = 0
while issue_num <= len(issue_list):
issue_list = driver.find_elements_by_class_name('visitable')
issue_list[issue_num].click()
time.sleep(3)
volume_issue = driver.find_element_by_class_name('cover-image__parent-item').text
volume_issue = volume_issue.replace('Volume ','V')
volume_issue = volume_issue.replace('Issue ','I')
volume_issue = volume_issue.split(", ")
WebDriverWait(driver, 10).until(EC.presence_of_element_located
((By.CLASS_NAME, "issue-item__title")))
article_list = driver.find_elements_by_class_name('issue-item__title')
article_link = driver.current_url
issue_num += 1
article_num = 0
while article_num <= len(article_list):
article_list = driver.find_elements_by_class_name('issue-item__title')
article_list[article_num].click()
time.sleep(3)
_infoGrabber(start_year)
driver.get(article_link)
WebDriverWait(driver, 10).until(EC.presence_of_element_located
((By.CLASS_NAME, "issue-item__title")))
article_num += 1
driver.get(issue_link)
def _infoGrabber(start_year):
title = driver.find_element_by_class_name('citation__title').text
page = driver.find_element_by_class_name('page-range').text
doi = driver.find_element_by_class_name('epub-doi').text
doi = doi.replace('https://doi.org/','')
doi = doi.replace('.','_')
doi = doi.replace('/','_')
pdflink = driver.find_element_by_class_name('coolBar__ctrl.pdf-download').get_attribute('href')
driver.get(pdflink)
time.sleep(3)
rename = f"{volume_issue[0]}"f"_{volume_issue[1]}"f"_{doi}" f"_{title}.pdf"
if len(rename) >= 255:
rename = rename[0:255]
if not driver.find_elements_by_id('app-navbar'):
print('Article not available for download: 'f"{rename}")
return
driver.find_element_by_tag_name('body').send_keys('g')
time.sleep(2)
# shortcut for opening a download popup in the BJUI journal viewer
filename = _fileName(10)
source = "c:\\BJUI\\landing" f"\\{filename}"
destination = "c:\\BJUI" f"\\{start_year}"
path = os.path.join(destination)
if not os.path.exists(path):
os.makedirs(path)
# looks for existing file path, creates if absent
shutil.move(source,destination)
try:
os.rename(f"{destination}"f"\\{filename}",f"{destination}"f"\\{rename}")
except FileExistsError:
os.remove(f"{destination}"f"\\{filename}")
def _fileName(waitTime):
# driver.execute_script("window.open()")
# WebDriverWait(driver,10).until(EC.new_window_is_opened)
# driver.switch_to.window(driver.window_handles[-1])
driver.get("chrome://downloads/")
endTime = time.time()+waitTime
while True:
try:
# https://stackoverflow.com/questions/34548041/selenium-give-file-name-when-downloading
# downloadPercentage = driver.execute_script(
# "return document.querySelector('downloads-manager').shadowRoot.querySelector('#downloadsList downloads-item').shadowRoot.querySelector('#progress').value")
# check if downloadPercentage is 100 (otherwise the script will keep waiting)
time.sleep(10)
# if downloadPercentage == 100:
# return the file name once the download is completed
return driver.execute_script("return document.querySelector('downloads-manager').shadowRoot.querySelector('#downloadsList downloads-item').shadowRoot.querySelector('div#content #file-link').text")
except:
pass
time.sleep(1)
if time.time() > endTime:
break
# this goes into downloads and finds the name of the most recently downloaded file
years(1996, 1997)
| pdatlab/urology_journal_retriever | JournalGrabber.py | JournalGrabber.py | py | 5,308 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "selenium.webdriver.ChromeOptions",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 20,
"usage_type": "call"
},
{
"api... |
42500434502 | from django.urls import path
from . import views
app_name = 'polls'
urlpatterns = [
# polls的首页
path('', views.IndexView.as_view(), name='index'),
# 显示第三个问题的内容,例如: /polls/2/
path('<int:pk>/', views.DetailView.as_view(), name='detail'),
# 显示第三个问题的内容回答,例如:: /polls/2/results/
path('<int:pk>/results/', views.ResultsView.as_view(), name='results'),
# 显示第三个问题的打分,例如:: /polls/2/vote/
path('<int:question_id>/vote/', views.vote, name='vote'),
] | cybercampus/mysite-polls | polls/urls.py | urls.py | py | 571 | python | zh | code | 0 | github-code | 1 | [
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
31851582700 | import pprint
import re # noqa: F401
import six
from asposeslidescloud.models.math_element import MathElement
class BorderBoxElement(MathElement):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'type': 'str',
'base': 'MathElement',
'hide_top': 'bool',
'hide_bottom': 'bool',
'hide_left': 'bool',
'hide_right': 'bool',
'strikethrough_horizontal': 'bool',
'strikethrough_vertical': 'bool',
'strikethrough_bottom_left_to_top_right': 'bool',
'strikethrough_top_left_to_bottom_right': 'bool'
}
attribute_map = {
'type': 'type',
'base': 'base',
'hide_top': 'hideTop',
'hide_bottom': 'hideBottom',
'hide_left': 'hideLeft',
'hide_right': 'hideRight',
'strikethrough_horizontal': 'strikethroughHorizontal',
'strikethrough_vertical': 'strikethroughVertical',
'strikethrough_bottom_left_to_top_right': 'strikethroughBottomLeftToTopRight',
'strikethrough_top_left_to_bottom_right': 'strikethroughTopLeftToBottomRight'
}
type_determiners = {
'type': 'BorderBox',
}
def __init__(self, type='BorderBox', base=None, hide_top=None, hide_bottom=None, hide_left=None, hide_right=None, strikethrough_horizontal=None, strikethrough_vertical=None, strikethrough_bottom_left_to_top_right=None, strikethrough_top_left_to_bottom_right=None): # noqa: E501
"""BorderBoxElement - a model defined in Swagger""" # noqa: E501
super(BorderBoxElement, self).__init__(type)
self._base = None
self._hide_top = None
self._hide_bottom = None
self._hide_left = None
self._hide_right = None
self._strikethrough_horizontal = None
self._strikethrough_vertical = None
self._strikethrough_bottom_left_to_top_right = None
self._strikethrough_top_left_to_bottom_right = None
self.type = 'BorderBox'
if base is not None:
self.base = base
if hide_top is not None:
self.hide_top = hide_top
if hide_bottom is not None:
self.hide_bottom = hide_bottom
if hide_left is not None:
self.hide_left = hide_left
if hide_right is not None:
self.hide_right = hide_right
if strikethrough_horizontal is not None:
self.strikethrough_horizontal = strikethrough_horizontal
if strikethrough_vertical is not None:
self.strikethrough_vertical = strikethrough_vertical
if strikethrough_bottom_left_to_top_right is not None:
self.strikethrough_bottom_left_to_top_right = strikethrough_bottom_left_to_top_right
if strikethrough_top_left_to_bottom_right is not None:
self.strikethrough_top_left_to_bottom_right = strikethrough_top_left_to_bottom_right
@property
def base(self):
"""Gets the base of this BorderBoxElement. # noqa: E501
Base # noqa: E501
:return: The base of this BorderBoxElement. # noqa: E501
:rtype: MathElement
"""
return self._base
@base.setter
def base(self, base):
"""Sets the base of this BorderBoxElement.
Base # noqa: E501
:param base: The base of this BorderBoxElement. # noqa: E501
:type: MathElement
"""
self._base = base
@property
def hide_top(self):
"""Gets the hide_top of this BorderBoxElement. # noqa: E501
Hide Top Edge # noqa: E501
:return: The hide_top of this BorderBoxElement. # noqa: E501
:rtype: bool
"""
return self._hide_top
@hide_top.setter
def hide_top(self, hide_top):
"""Sets the hide_top of this BorderBoxElement.
Hide Top Edge # noqa: E501
:param hide_top: The hide_top of this BorderBoxElement. # noqa: E501
:type: bool
"""
self._hide_top = hide_top
@property
def hide_bottom(self):
"""Gets the hide_bottom of this BorderBoxElement. # noqa: E501
Hide Bottom Edge # noqa: E501
:return: The hide_bottom of this BorderBoxElement. # noqa: E501
:rtype: bool
"""
return self._hide_bottom
@hide_bottom.setter
def hide_bottom(self, hide_bottom):
"""Sets the hide_bottom of this BorderBoxElement.
Hide Bottom Edge # noqa: E501
:param hide_bottom: The hide_bottom of this BorderBoxElement. # noqa: E501
:type: bool
"""
self._hide_bottom = hide_bottom
@property
def hide_left(self):
"""Gets the hide_left of this BorderBoxElement. # noqa: E501
Hide Left Edge # noqa: E501
:return: The hide_left of this BorderBoxElement. # noqa: E501
:rtype: bool
"""
return self._hide_left
@hide_left.setter
def hide_left(self, hide_left):
"""Sets the hide_left of this BorderBoxElement.
Hide Left Edge # noqa: E501
:param hide_left: The hide_left of this BorderBoxElement. # noqa: E501
:type: bool
"""
self._hide_left = hide_left
@property
def hide_right(self):
"""Gets the hide_right of this BorderBoxElement. # noqa: E501
Hide Right Edge # noqa: E501
:return: The hide_right of this BorderBoxElement. # noqa: E501
:rtype: bool
"""
return self._hide_right
@hide_right.setter
def hide_right(self, hide_right):
"""Sets the hide_right of this BorderBoxElement.
Hide Right Edge # noqa: E501
:param hide_right: The hide_right of this BorderBoxElement. # noqa: E501
:type: bool
"""
self._hide_right = hide_right
@property
def strikethrough_horizontal(self):
"""Gets the strikethrough_horizontal of this BorderBoxElement. # noqa: E501
Strikethrough Horizontal # noqa: E501
:return: The strikethrough_horizontal of this BorderBoxElement. # noqa: E501
:rtype: bool
"""
return self._strikethrough_horizontal
@strikethrough_horizontal.setter
def strikethrough_horizontal(self, strikethrough_horizontal):
"""Sets the strikethrough_horizontal of this BorderBoxElement.
Strikethrough Horizontal # noqa: E501
:param strikethrough_horizontal: The strikethrough_horizontal of this BorderBoxElement. # noqa: E501
:type: bool
"""
self._strikethrough_horizontal = strikethrough_horizontal
@property
def strikethrough_vertical(self):
"""Gets the strikethrough_vertical of this BorderBoxElement. # noqa: E501
Strikethrough Vertical # noqa: E501
:return: The strikethrough_vertical of this BorderBoxElement. # noqa: E501
:rtype: bool
"""
return self._strikethrough_vertical
@strikethrough_vertical.setter
def strikethrough_vertical(self, strikethrough_vertical):
"""Sets the strikethrough_vertical of this BorderBoxElement.
Strikethrough Vertical # noqa: E501
:param strikethrough_vertical: The strikethrough_vertical of this BorderBoxElement. # noqa: E501
:type: bool
"""
self._strikethrough_vertical = strikethrough_vertical
@property
def strikethrough_bottom_left_to_top_right(self):
"""Gets the strikethrough_bottom_left_to_top_right of this BorderBoxElement. # noqa: E501
Strikethrough Bottom-Left to Top-Right # noqa: E501
:return: The strikethrough_bottom_left_to_top_right of this BorderBoxElement. # noqa: E501
:rtype: bool
"""
return self._strikethrough_bottom_left_to_top_right
@strikethrough_bottom_left_to_top_right.setter
def strikethrough_bottom_left_to_top_right(self, strikethrough_bottom_left_to_top_right):
"""Sets the strikethrough_bottom_left_to_top_right of this BorderBoxElement.
Strikethrough Bottom-Left to Top-Right # noqa: E501
:param strikethrough_bottom_left_to_top_right: The strikethrough_bottom_left_to_top_right of this BorderBoxElement. # noqa: E501
:type: bool
"""
self._strikethrough_bottom_left_to_top_right = strikethrough_bottom_left_to_top_right
@property
def strikethrough_top_left_to_bottom_right(self):
"""Gets the strikethrough_top_left_to_bottom_right of this BorderBoxElement. # noqa: E501
Strikethrough Top-Left to Bottom-Right. # noqa: E501
:return: The strikethrough_top_left_to_bottom_right of this BorderBoxElement. # noqa: E501
:rtype: bool
"""
return self._strikethrough_top_left_to_bottom_right
@strikethrough_top_left_to_bottom_right.setter
def strikethrough_top_left_to_bottom_right(self, strikethrough_top_left_to_bottom_right):
"""Sets the strikethrough_top_left_to_bottom_right of this BorderBoxElement.
Strikethrough Top-Left to Bottom-Right. # noqa: E501
:param strikethrough_top_left_to_bottom_right: The strikethrough_top_left_to_bottom_right of this BorderBoxElement. # noqa: E501
:type: bool
"""
self._strikethrough_top_left_to_bottom_right = strikethrough_top_left_to_bottom_right
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, BorderBoxElement):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| aspose-slides-cloud/aspose-slides-cloud-python | asposeslidescloud/models/border_box_element.py | border_box_element.py | py | 10,885 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "asposeslidescloud.models.math_element.MathElement",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "six.iteritems",
"line_number": 284,
"usage_type": "call"
},
{
"api_name": "pprint.pformat",
"line_number": 306,
"usage_type": "call"
}
] |
183727253 | import cv2
import numpy as np
import os
import svgwrite
# load image, change color spaces, and smoothing
img = cv2.imread(os.path.dirname(os.path.abspath(__file__))+"/INIAD_logo.png")
im=cv2.resize(img,(10000,10000))
im_gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
retval, im_bw = cv2.threshold(im_gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
# 輪郭の検出
contours, hierarchy = cv2.findContours(im_bw, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
# 全ての輪郭を書き込んで出力
#im_con = im.copy()
#cv2.drawContours(im_con, contours, -1, (0,255,0), 2)
# 輪郭直線近似
approx = []
for i in range(len(contours)):
cnt = contours[i]
epsilon = 0.0001*cv2.arcLength(cnt,True)
a = len(cv2.approxPolyDP(cnt,epsilon,True))
for j in range(a):
x = cv2.approxPolyDP(cnt,epsilon,True)[j][0][0]*0.01
y = cv2.approxPolyDP(cnt,epsilon,True)[j][0][1]*0.01
#print(x)
approx.append([x, y])
#print(cv2.approxPolyDP(cnt,epsilon,True))
#approx.append(cv2.approxPolyDP(cnt,epsilon,True))
#print(approx)
img = cv2.imread(os.path.dirname(os.path.abspath(__file__))+"/INIAD_logo.png")
img_HSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
img_HSV = cv2.GaussianBlur(img_HSV, (9, 9), 3)
# detect
img_H, img_S, img_V = cv2.split(img_HSV)
ret,img_1 = cv2.threshold(img_H, 50, 255, cv2.THRESH_BINARY)
cv2.imwrite(os.path.dirname(os.path.abspath(__file__))+"/iniad1.bmp", img_1)
# load image, change color spaces, and smoothing
img1 = cv2.imread(os.path.dirname(os.path.abspath(__file__))+"/iniad1.bmp")
im=cv2.resize(img1,(10000,10000))
im1 = cv2.bitwise_not(im)
im_gray1 = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)
retval, im_bw = cv2.threshold(im_gray1, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
# 輪郭の検出
contours, hierarchy = cv2.findContours(im_bw, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
# 輪郭直線近似
approx1 = []
for i in range(len(contours)):
cnt = contours[i]
epsilon = 0.0001*cv2.arcLength(cnt,True)
a = len(cv2.approxPolyDP(cnt,epsilon,True))
for j in range(a):
x = cv2.approxPolyDP(cnt,epsilon,True)[j][0][0]*0.01
y = cv2.approxPolyDP(cnt,epsilon,True)[j][0][1]*0.01
approx1.append([x, y])
color = "blue"
dwg = svgwrite.Drawing(os.path.dirname(os.path.abspath(__file__))+"/INIAD_logo.svg")
dwg.add( dwg.polygon( points=approx ) )
dwg.add( dwg.polygon( points=approx1, fill=color ) )
dwg.save() | s1F101900723/CG | CG_python/11/11-5_4.py | 11-5_4.py | py | 2,421 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "cv2.imread",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_numb... |
30332025823 | from pathlib import Path
import shutil
CURRENT = Path(__file__).resolve().parent
ConfPath = CURRENT / ".vscode"
if ConfPath.exists():
shutil.rmtree(ConfPath)
ConfPath.mkdir()
tasks_conf = ConfPath / "tasks.json"
if False:
tasks_conf.write_text(
"""
{
"version": "2.0.0",
"tasks": [
{
"label": "build",
"type": "shell",
"command": ["pwsh", "build.ps1"]
}
]
}"""
)
build_dir = list(CURRENT.glob("build-qt_stub*"))
if len(build_dir) != 1:
print("error: mutiple build path")
exit(-1)
build_dir = build_dir[0]
import glob
built_exes = list(glob.glob("**/*.exe", root_dir=build_dir, recursive=True))
conf_single = """
{
"name": "{progname}",
"type": "cppdbg",
"request": "launch",
"program": "${workspaceFolder}/{dirname}/{program}",
"args": [],
"stopAtEntry": false,
"cwd": "${workspaceFolder}",
"environment": [],
"externalConsole": false,
"MIMode": "gdb",
"miDebuggerPath": "gdb.exe",
"setupCommands": [
{
"description": "Enable pretty-printing for gdb",
"text": "-enable-pretty-printing",
"ignoreFailures": true
},
{
"description": "Set Disassembly Flavor to Intel",
"text": "-gdb-set disassembly-flavor intel",
"ignoreFailures": true
}
]
}"""
dirname = build_dir.name
conf_single = conf_single.replace("{dirname}", dirname)
confs = []
for built_exe in built_exes:
built_exe = built_exe.replace("\\", "/")
prog_name = Path(built_exe).resolve().name
conf = conf_single.replace("{program}", built_exe).replace("{progname}", prog_name)
confs.append(conf)
confs_str = ",".join(confs)
launch_conf = ConfPath / "launch.json"
launch_conf.write_text(
"""{
// Use IntelliSense to learn about possible attributes.
// Hover to view descriptions of existing attributes.
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
"version": "0.2.0",
"configurations": [{confs_str}
]
}""".replace(
"{confs_str}", confs_str
)
)
| soda92/external_sorting | generate_vscode_files.py | generate_vscode_files.py | py | 2,338 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pathlib.Path",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "shutil.rmtree",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": ... |
38940929836 | import functools
import io
import json
import logging
import mimetypes
import os.path
import re
import shutil
import urllib.parse
from typing import Dict, Type, TextIO
import yaml
from swagger_ui_bundle import swagger_ui_3_path # type: ignore[import]
from werkzeug import Response
from werkzeug.exceptions import HTTPException
from werkzeug.routing import Map, Submount, Rule
from cmk.gui import config
from cmk.gui.exceptions import MKUserError, MKAuthException
from cmk.gui.openapi import ENDPOINT_REGISTRY
from cmk.gui.plugins.openapi.utils import problem, ProblemException
from cmk.gui.wsgi.auth import verify_user, bearer_auth
from cmk.gui.wsgi.middleware import with_context_middleware, OverrideRequestMethod
from cmk.gui.wsgi.wrappers import ParameterDict
from cmk.utils import paths, crash_reporting
from cmk.utils.exceptions import MKException
ARGS_KEY = 'CHECK_MK_REST_API_ARGS'
logger = logging.getLogger('cmk.gui.wsgi.rest_api')
EXCEPTION_STATUS: Dict[Type[Exception], int] = {
MKUserError: 400,
MKAuthException: 401,
}
def spec_file() -> TextIO:
spec_buffer = io.StringIO()
with open(openapi_spec_dir() + "/checkmk.yaml", "r") as yaml_file:
shutil.copyfileobj(yaml_file, spec_buffer)
spec_buffer.seek(0)
return spec_buffer
def openapi_spec_dir():
return paths.web_dir + "/htdocs/openapi"
def serve_content(file_handle: TextIO, content_type):
file_handle.seek(0)
content = file_handle.read()
resp = Response()
resp.content_type = content_type
resp.status_code = 200
resp.data = content
resp.freeze()
return resp
def json_file(file_handle: TextIO) -> TextIO:
"""
>>> yf = io.StringIO("data:\\n foo:\\n - bar\\n")
>>> json_file(yf).read()
'{"data": {"foo": ["bar"]}}'
Args:
file_handle:
Returns:
"""
file_handle.seek(0)
data = yaml.safe_load(file_handle)
buffer = io.StringIO()
json.dump(data, buffer)
buffer.seek(0)
return buffer
class Authenticate:
"""Wrap an Endpoint so it will be authenticated
This is not very memory efficient as it wraps every individual endpoint in its own
authenticator, even though this does not need to be. This has to be done this way right now,
because we have multiple endpoints without authentication in this app. A refactoring to lower
the memory foot-print of this is feasible and should be done if a good way has been found.
"""
def __init__(self, func):
self.func = func
def __call__(self, environ, start_response):
path_args = environ[ARGS_KEY]
auth_header = environ.get('HTTP_AUTHORIZATION', '')
try:
rfc7662 = bearer_auth(auth_header)
except MKException as exc:
return problem(
status=401,
title=str(exc),
ext={'auth_header': auth_header},
)(environ, start_response)
with verify_user(rfc7662['sub'], rfc7662):
wsgi_app = self.func(ParameterDict(path_args))
return wsgi_app(environ, start_response)
@functools.lru_cache
def serve_file(file_path):
with open(file_path, "r") as fh:
file_size = os.path.getsize(file_path)
return serve_file_handle(fh, file_path, file_size)
def serve_file_handle(fh, file_path, file_size=None):
resp = Response(fh)
resp.direct_passthrough = True
if file_size is not None:
resp.headers['Content-Length'] = file_size
content_type, _ = mimetypes.guess_type(file_path)
if content_type is not None:
resp.headers['Content-Type'] = content_type
resp.freeze()
return resp
class ServeSwaggerUI:
def __init__(self, prefix=''):
self.prefix = prefix
def __call__(self, environ, start_response):
path_info = environ['PATH_INFO']
path = re.sub(self.prefix, '', path_info)
prefix = path_info[:-len(path)]
if prefix.endswith("/ui"):
prefix = prefix[:-3]
if path == "/":
path = "/index.html"
if path == "/index.html":
with open(swagger_ui_3_path + path) as fh:
content = fh.read()
content = content.replace("https://petstore.swagger.io/v2/swagger.json",
prefix + "/openapi.yaml")
resp = Response()
resp.content_type = 'text/html'
resp.status_code = 200
resp.data = content
resp.freeze()
return resp(environ, start_response)
return serve_file(swagger_ui_3_path + path)(environ, start_response)
class CheckmkRESTAPI:
def __init__(self, debug: bool = False):
self.debug = debug
# TODO: Add resources for swagger-ui and json/yaml endpoints.
# TODO: Add redoc.js endpoint.
rules = []
for endpoint in ENDPOINT_REGISTRY:
if self.debug:
# This helps us to make sure we can always generate a valid OpenAPI yaml file.
_ = endpoint.to_operation_dict()
rules.append(
Rule(endpoint.default_path,
methods=[endpoint.method],
endpoint=Authenticate(endpoint.wrapped)))
spec_file_buffer = spec_file()
swagger_ui = ServeSwaggerUI(prefix="^/[^/]+/check_mk/api/[^/]+/ui")
self.url_map = Map([
Submount(
"/<path:_path>",
[
Rule("/ui/", endpoint=swagger_ui),
Rule("/ui/<path:path>", endpoint=swagger_ui),
# Rule("/doc/<path:file>", endpoint=serve_content()),
Rule(
"/openapi.yaml",
endpoint=serve_content(
file_handle=spec_file_buffer,
content_type='application/x-yaml; charset=utf-8',
),
),
Rule(
"/openapi.json",
endpoint=serve_content(
file_handle=json_file(spec_file_buffer),
content_type='application/json',
),
),
*rules,
],
),
])
self.wsgi_app = with_context_middleware(OverrideRequestMethod(self._wsgi_app))
def __call__(self, environ, start_response):
return self.wsgi_app(environ, start_response)
def _wsgi_app(self, environ, start_response):
urls = self.url_map.bind_to_environ(environ)
try:
wsgi_app, path_args = urls.match()
# Remove this again (see Submount above), so the validators don't go crazy.
del path_args['_path']
# This is an implicit dependency, as we only know the args at runtime, but the
# function at setup-time.
environ[ARGS_KEY] = path_args
return wsgi_app(environ, start_response)
except ProblemException as exc:
# ProblemException is derived from HTTPException, so we have to catch it first.
return exc.to_problem()(environ, start_response)
except HTTPException as e:
# We don't want to log explicit HTTPExceptions as these are intentional.
# HTTPExceptions are WSGI apps
return e(environ, start_response)
except MKException as exc:
if self.debug:
raise
return problem(
status=EXCEPTION_STATUS.get(type(exc), 500),
title=str(exc),
detail="An exception occurred.",
)(environ, start_response)
except Exception as exc:
crash = APICrashReport.from_exception()
crash_reporting.CrashReportStore().save(crash)
logger.exception("Unhandled exception (Crash-ID: %s)", crash.ident_to_text())
if self.debug:
raise
crash_url = f"/{config.omd_site()}/check_mk/crash.py?" + urllib.parse.urlencode([
("crash_id", crash.ident_to_text()),
("site", config.omd_site()),
],)
return problem(status=EXCEPTION_STATUS.get(type(exc), 500),
title=str(exc),
detail="An internal error occured while processing your request.",
ext={
'crash_report': {
'href': crash_url,
'method': 'get',
'rel': 'cmk/crash-report',
'type': 'text/html',
},
'crash_id': crash.ident_to_text(),
})(environ, start_response)
class APICrashReport(crash_reporting.ABCCrashReport):
"""API specific crash reporting class.
"""
@classmethod
def type(cls):
return "rest_api"
| superbjorn09/checkmk | cmk/gui/wsgi/applications/rest_api.py | rest_api.py | py | 9,052 | python | en | code | null | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "typing.Dict",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "typing.Type",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "cmk.gui.exceptions.MKUserEr... |
57138372 | """
please place a file gmail_settings.py with two varialbes:
GMAIL_ACCOUNT = ""
GMAIL_PASSWORD = ""
or set the environment variables
GMAIL_ACCOUNT = ""
GMAIL_PASSWORD = ""
"""
from settings import GMAIL_ACCOUNT, GMAIL_PASSWORD
import smtplib
from email.MIMEMultipart import MIMEMultipart
from email.MIMEBase import MIMEBase
from email.MIMEText import MIMEText
from email import Encoders
import os
class Gmail(object):
def __init__(self,gmail_id = GMAIL_ACCOUNT, gmail_pwd = GMAIL_PASSWORD):
self.gmail_id = gmail_id
self.gmail_pwd = gmail_pwd
self.gmail_server = smtplib.SMTP("smtp.gmail.com", 587)
self.gmail_server.ehlo()
self.gmail_server.starttls()
self.gmail_server.ehlo()
self.gmail_server.login(self.gmail_id, self.gmail_pwd)
def disconnected(self):
self.gmail_server.close()
def create_new_message(self, subject = "[Gmail Notifier TEST] Sending mail TESTING", type = 'alternative'):
if type == None:
self.msg = MIMEMultipart()
else:
self.msg = MIMEMultipart(type)
self.msg["From"] = self.gmail_id
self.msg["Subject"] = subject
def add_text_into_message(self, text = "Sending mail TESTING"):
self.msg.attach(MIMEText(text,'plain'))
def add_html_into_message(self, html = "<h1>Sending mail TESTING</h1>"):
self.msg.attach(MIMEText(html,'html'))
def attach_files_into_message(self, files_path_list = []):
for attach in files_path_list:
part = MIMEBase('application', 'octet-stream')
part.set_payload(open(attach, 'rb').read())
Encoders.encode_base64(part)
part.add_header('Content-Disposition',
'attachment; filename="%s"' % os.path.basename(attach))
self.msg.attach(part)
def send_message(self, mail_to_list = ["c3h3.tw@gmail.com"], send_at_once=False):
if send_at_once:
self.gmail_server.sendmail(self.gmail_id, mail_to_list, self.msg.as_string())
else:
for to in mail_to_list:
self.msg["To"] = to
self.gmail_server.sendmail(self.gmail_id, to, self.msg.as_string())
| c3h3/gmaildotpy | gmail/gmail.py | gmail.py | py | 2,382 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "settings.GMAIL_ACCOUNT",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "settings.GMAIL_PASSWORD",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "smtplib.SMTP",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "email.MIM... |
71571421794 | from typing import Iterable, Optional
def main(data: Iterable[str]):
nums = [int(line) for line in data]
num_set = set(nums)
def search(num: int) -> Optional[int]:
target = 2020 - num
if target in num_set:
return target
return None
while nums:
num1 = nums.pop()
for num2 in nums:
num3 = search(num1 + num2)
if num3:
print(num1 * num2 * num3)
return
# target = 2020 - num
# if target in num_set:
# print(num * target)
# return
print("None found?")
| Lexicality/advent-of-code | src/aoc/y2020/day1.py | day1.py | py | 620 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "typing.Iterable",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 8,
"usage_type": "name"
}
] |
15685220785 | from aiogram.dispatcher import Dispatcher, FSMContext
from aiogram.types import CallbackQuery, Message
from tgbot.misc import AddChannel
from tgbot.models import Channels
async def start_add_chanel(callback: CallbackQuery) -> None:
await callback.message.edit_text('Отправте нам сылку на канал')
await AddChannel.get_channel_url.set()
async def get_chanel_url(message: Message, state: FSMContext) -> None:
channel = await Channels().add_channel(message.text)
if channel != -1:
await message.reply(f'Успешно добавлено его id {channel}')
await state.finish()
return None
await message.reply('Этот канал уже существует')
def register_add_chanel_handlers(dp: Dispatcher) -> None:
dp.register_callback_query_handler(start_add_chanel,
lambda callback: callback.data == 'add_chanel')
dp.register_message_handler(get_chanel_url,
state=AddChannel.get_channel_url)
| sardor86/something_telegram_bot | tgbot/handlers/admin/add_chanel.py | add_chanel.py | py | 1,050 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "aiogram.types.CallbackQuery",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "tgbot.misc.AddChannel.get_channel_url.set",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "tgbot.misc.AddChannel.get_channel_url",
"line_number": 11,
"usage_ty... |
3898644156 | import discord
import random
import os
import csv
import paralleldots
from datetime import datetime
import asyncio
#please make you have installed the packages or just run on replit
from discord_components import *
from discord.ext.commands import bot
from discord.utils import get
from discord.ext import commands, tasks
import json
import requests
intents = discord.Intents.default() #please make sure that you have enabled the Intents in discord dev portal
intents.members = True
intents = intents.all()
#client = discord.Client()
client=commands.Bot(command_prefix="!", intents=intents)
client.remove_command('help') #to remove the default help command
sentiment_API = os.environ['SENTIMENT_API']
paralleldots.set_api_key(sentiment_API)
# Submission timeout (hours and minutes need to update later accordingly)
PYCON_DATE = datetime(year=2021, month=11, day=21, hour=5, minute=30)
countdown = PYCON_DATE - datetime.now().replace(microsecond=0)
# Bot will only work on these channels
Bot_Channels=[
881582264419500032, # Write your gaming channel id here
857549642934124545,
857569486303264768,
857551907714760704,
870716941663350825,
857549120943292436,
857549492324139059,
863495216467804220,
874356441069269032,
874630819388473344,
870532018054783027
]
# "$" special character is to replace with user name
words_response = [
"Hi! $", "How are things with you $",
"It’s good to see you $", "Howdy! $",
"Hi, $. What’s new?",
"Good to see you $",
"Look who it is! $",
"Oh, it’s you $! Hi there!",
"Hi $, it’s me again!",
"Hang in there $ ,i am busy!",
"Yes Honey $"
]
# Dont mess with this names
Sentiments = [
"Happy",
"Sad",
"Excited",
"Angry",
"Bored",
"Fear"
]
sentiment_emojis = {}
# If you want bot to react with specific emoji add your user_name here
user_names = [
"rehh",
"EFFLUX",
"moon57",
"Here4Quantum",
"Ryah",
"Zoheb"
]
# You must have a emoji specific to your user_name in our server , if you dont have submit a emoji in "emoji-and-stickers submission" & ask Admin to add it with your user name
Custom_emojis = {}
#=============== EMOTIONS CHECK ========================
def check_sentiment(message):
emotions= paralleldots.emotion( message ).get('emotion')
Max_emotion=max(emotions, key=emotions.get)
print(Max_emotion)
if Max_emotion in Sentiments:
return Max_emotion
else:
return 0;
# Turn this to true if you dont want sentiment analysis
disable_sentiment_analysis = True;
@client.event
async def on_ready():
DiscordComponents(client) #for discord buttons
for name in user_names:
Custom_emojis[name] = discord.utils.get(client.emojis, name=name)
for sentiment in Sentiments:
sentiment_emojis[sentiment] = discord.utils.get(client.emojis, name=sentiment)
print("Updated sentiment emojis")
print("Bot is ready {0.user}".format(client))
@client.event
async def on_message(message):
if message.channel.id not in Bot_Channels:
return
text = message.content.lower().strip()
if message.author == client.user:
return
if "limbo" in text:
response_message = random.choice(words_response)
user_name = message.author.name
response_message = response_message.replace("$", user_name)
await auto_response(True,message,response_message)
if message.author.name in user_names:
await message.add_reaction(Custom_emojis.get(message.author.name))
# Direct links of limbohacks for easy access with '!' prefix
await auto_response(text.startswith('!website'),message,"https://limbohacks.tech/")
await auto_response(text.startswith('!devpost'),message,"https://limbo-hacks-12968.devpost.com/")
await auto_response(text.startswith('!discord'),message,"https://discord.com/invite/8XJSzmtWPp")
await auto_response(text.startswith('!time'), message, f"⏳ {countdown} Time left for the submission ⏳")
await client.process_commands(message) # This line makes your other commands work.
#Getting message sentiment
if message.channel.id!=881582264419500032:
result = check_sentiment(text)
await auto_react(result ,message,sentiment_emojis.get(result))
async def auto_response(condition,message,content):
if condition:
await message.channel.send(content)
async def auto_react(condition,message,content):
if condition:
await message.add_reaction(content)
#===============LEVEL================
@client.command()
async def levels(ctx):
with open('level.json') as f:
users=json.load(f)
values = list(users.values())
new_dict = {}
for k, v in users.items():
new_dict.setdefault(v, []).append(k)
values=list(new_dict.values())
k=0
top_users=['```POINTS PLAYERS```']
for i in values:
for j in i:
k+=1
if k >5: #for top 5 users
break
top_users.append(f"```{users[j]} {client.get_user(int(j))}```")
e1 = discord.Embed(title=" Leaderboard ", description='\n'.join(top_users),color=0x00FF00)
await ctx.send(embed=e1)
await ctx.reply(f"```Your points : {users[str(ctx.author.id)]}```")
#===============HELP==================
@client.command()
async def help(ctx):
HELP=['**!website\n**:> Get official website\n','**!devpost\n**:> Get Devpost Link\n','**!discord\n**:> Discord server link\n','**!time\n**:> Time left for submission\n','**!games\n**:> Try amazing games']
embed=discord.Embed(title="Limbo Hacks",description=''.join(HELP),color=0x3498db)
await ctx.send(embed=embed)
#================QUIZ==================
@client.command()
async def quiz(ctx):
e1 = discord.Embed(title=f"{ctx.author.name} , You Guessed It Right!", description="> You have scored! <",color=0x00FF00)
e2 = discord.Embed(title=f"{ctx.author.name} , You Lost!", description=f"> Try again <",color=discord.Color.red())
e3 = discord.Embed(title=f"{ctx.author.name}, You didn't Click on Time", description="> Timed Out! <",color=discord.Color.red())
url='https://opentdb.com/api.php?amount=1&category=18&difficulty=easy&type=multiple'
response=requests.get(url)
json_data=json.loads(response.text)
question=(list(json_data.values())[1][0]["question"])
p=(list(json_data.values())[1][0]["correct_answer"])
t=(list(json_data.values())[1][0]["incorrect_answers"])
t.append(p)
random.shuffle(t)
def check(res):
return ctx.author == res.user and res.channel == ctx.channel
e = discord.Embed(title=f"{ctx.author.name}'s QUIZ Game!", description=f"**Q) {question}**",color=0x3498db)
m = await ctx.reply(embed=e,components=[[Button(style=1, label=f"{t[0]}"),Button(style=3, label=f"{t[1]}"),Button(style=ButtonStyle.red,label=f"{t[2]}"),Button(style=ButtonStyle.grey,label=f"{t[3]}")]],)
try:
res = await client.wait_for("button_click", check=check, timeout=20)
except asyncio.TimeoutError:
await m.edit(embed=e3,components=[],)
return
if res.component.label==p:
with open('level.json') as f:
users=json.load(f)
if str(ctx.author.id) not in users:
users[str(ctx.author.id)]=1
if str(ctx.author.id) in users:
users[str(ctx.author.id)]+=3
e1.set_footer(text="Your gained 3 points", icon_url=ctx.author.avatar_url)
await m.edit(embed=e1,components=[],)
see(users)
else:
await m.edit(embed=e2,components=[],)
#===========NEW USER FOR GAME==========
def see(users):
with open('level.json','w') as fin:
json.dump(users,fin)
#================GAMES=================
@client.command()
async def games(ctx):
if ctx.message.channel.id != 881582264419500032: #Please change this id to your game channel id
await ctx.reply("Please go to gaming channel, I am waiting there...")
return
Game=['**!bonk\n**:> play Whac-A-Mole\n !bonk @member\n !bonk @member @member #for 3 players\n','**!rps\n**:> play Rock Paper Scissors (2 points)\n','**!guess\n**:> Can you guess which colour is it ?(1 point)\n','**!amongus\n**:> shhhhhhhhh!(1 point)\n','**!football\n**:> Wanna goal ?(2 points)\n','**!quiz\n**:> Answer Answer Answer whooo...(3 points)\n']
game=discord.Embed(title='Games', description =''.join(Game),color=0x3498db)
await ctx.send(embed=game)
with open('level.json') as f:
users=json.load(f)
if str(ctx.author.id) not in users:
users[str(ctx.author.id)]=1
see(users)
#=================AMONG US==============
@client.command()
async def amongus(ctx):
if ctx.message.channel.id != 881582264419500032: #Please change this id to your game channel id
await ctx.reply("Please go to gaming channel, I am waiting there...")
return
ch=['Blue ඞ','Green ඞ','Red ඞ','grey ඞ']
comp=random.choice(ch)
e = discord.Embed(title=f"{ctx.author.name}'s' amongus Game!", description="> Kill the imposter fast! <",color=0x3498db)
e1 = discord.Embed(title=f"{ctx.author.name}, You Guessed It Right!", description="> You have won! <",color=0x00FF00)
e3 = discord.Embed(title=f"{ctx.author.name}, You didn't Click on Time", description="> Timed Out! <",color=discord.Color.red())
e2 = discord.Embed(title=f"{ctx.author.name}, You Lost!", description=f"> You have lost! < It was {comp}",color=discord.Color.red())
m = await ctx.reply(
embed=e,
components=[[Button(style=1, label="Blue ඞ"),Button(style=3, label="Green ඞ"),Button(style=ButtonStyle.red,label="Red ඞ"),Button(style=ButtonStyle.grey,label="grey ඞ")]
],
)
def check(res):
return ctx.author == res.user and res.channel == ctx.channel
try:
res = await client.wait_for("button_click", check=check, timeout=5)
if res.component.label==comp:
with open('level.json') as f:
users=json.load(f)
if str(ctx.author.id) not in users:
users[str(ctx.author.id)]=1
if str(ctx.author.id) in users:
users[str(ctx.author.id)]+=1
e1.set_footer(text="Your gained 1 point", icon_url=ctx.author.avatar_url)
await m.edit(embed=e1,components=[],)
see(users)
else:
await m.edit(embed=e2, components=[],)
except asyncio.TimeoutError:
await m.edit(
embed=e3,
components=[],
)
#=============Rock Paper Scissors========
@client.command()
async def rps(ctx):
if ctx.message.channel.id != 881582264419500032: #Please change this id to your game channel id
await ctx.reply("Please go to gaming channel, I am waiting there...")
return
ch1 = ["Rock","Scissors","Paper"]
comp = random.choice(ch1)
yet = discord.Embed(title=f"{ctx.author.display_name}'s ROCK PAPER SCISSORS Game",description=">status: Waiting for a click , 5 sec left" )
win = discord.Embed(title=f"{ctx.author.display_name}, You won!",description=f">status: You Won -- Bot had chosen {comp}")
out = discord.Embed(title=f"{ctx.author.display_name}' You didn't click on time",description=">status: Time Out!!")
lost = discord.Embed(title=f"{ctx.author.display_name}You lost the Game",description=f">status: bot had chosen {comp}")
tie = discord.Embed(title=f"{ctx.author.display_name} Game Tie>",description=">status: It was tie")
m = await ctx.reply(
embed=yet,
components=[[Button(style=1, label="Rock",emoji="💎"),Button(style=3, label="Paper",emoji="📝"),Button(style=ButtonStyle.red, label="Scissors",emoji="✂️")]
],
)
def check(res):
return ctx.author == res.user and res.channel == ctx.channel
try:
res = await client.wait_for("button_click", check=check, timeout=7)
player = res.component.label
if player==comp:
await m.edit(embed=tie,components=[])
if player=="Rock" and comp=="Paper":
await m.edit(embed=lost,components=[])
if player=="Rock" and comp=="Scissors":
with open('level.json') as f:
users=json.load(f)
if str(ctx.author.id) not in users:
users[str(ctx.author.id)]=1
if str(ctx.author.id) in users:
users[str(ctx.author.id)]+=2
win.set_footer(text="Your gained 2 points", icon_url=ctx.author.avatar_url)
await m.edit(embed=win,components=[])
see(users)
if player=="Paper" and comp=="Rock":
with open('level.json') as f:
users=json.load(f)
if str(ctx.author.id) not in users:
users[str(ctx.author.id)]=1
if str(ctx.author.id) in users:
users[str(ctx.author.id)]+=2
win.set_footer(text="Your gained 2 points", icon_url=ctx.author.avatar_url)
await m.edit(embed=win,components=[])
see(users)
if player=="Paper" and comp=="Scissors":
await m.edit(embed=lost,components=[])
if player=="Scissors" and comp=="Rock":
await m.edit(embed=lost,components=[])
if player=="Scissors" and comp=="Paper":
with open('level.json') as f:
users=json.load(f)
if str(ctx.author.id) not in users:
users[str(ctx.author.id)]=1
if str(ctx.author.id) in users:
users[str(ctx.author.id)]+=2
win.set_footer(text="Your gained 2 points", icon_url=ctx.author.avatar_url)
await m.edit(embed=win,components=[])
see(users)
except asyncio.TimeoutError:
await m.edit(
embed=out,
components=[],
)
#=========Whac-A-Mole===========
@client.command(aliases=["wam", "whac"])
async def bonk(ctx, member : discord.Member=None, member1 : discord.Member=None):
if ctx.message.channel.id != 881582264419500032: #Please change this id to your game channel id
await ctx.reply("Please go to gaming channel, I am waiting there...")
return
await ctx.reply('```By default quit time is 10 sec of inactivity```')
points = {ctx.author: 0, member: 0,member1: 0}
random_time = random.randrange(5,25)
if member == None:
await ctx.send(f"{ctx.author.mention}, You need to mention a member to play with.")
if member == client.user:
await ctx.send(f"{ctx.author.mention}, Hey! Are you trying to catch me??! Mention someone else.")
if member.bot == True:
await ctx.send(f"{ctx.author.mention}, You can't play with a bot.")
else:
game = True
try:
await ctx.send(f"{ctx.author.mention} and {member.mention} and {member1.mention}, I will alert you when a Mole will jump so you can bonk it 🔨")
except:
await ctx.send(f"{ctx.author.mention} and {member.mention}, I will alert you when a Mole will jump so you can bonk it 🔨")
def check(m):
return m.author.id == member.id or m.author.id == ctx.author.id or m.author.id == member1.id
while game:
try:
await asyncio.sleep(random_time)
try:
await ctx.send(f"{ctx.author.mention}, {member.mention}and {member1.mention}, A Mole has jumped! Type `bonk` to bonk it!")
except:
await ctx.send(f"{ctx.author.mention} and {member.mention}, A Mole has jumped! Type `bonk` to bonk it!")
message = await client.wait_for("message", check=check, timeout=15)
if message.author.id == member.id and message.content.lower() == "bonk":
points[member] += 1
await ctx.send(f"{member.name} has bonk the mole! They have **{points[member]}** point(s)!")
elif message.author.id == ctx.author.id and message.content.lower() == "bonk":
points[ctx.author] += 1
await ctx.send(f"{ctx.author.name} has bonk the mole! They have **{points[ctx.author]}** point(s)!")
elif message.author.id == member1.id and message.content.lower() == "bonk":
points[member1] += 1
await ctx.send(f"{member1.name} has bonk the mole! They have **{points[member1]}** point(s)!")
except:
game = False
embed = discord.Embed(
title = "Game Over",
description = "No one bonk 🔨 the mole in time so the game is over. Final Scores Below.")
try:
embed.add_field(name = f"{member.name}'s score", value = f"{points[member]}")
embed.add_field(name = f"{member1.name}'s score", value = f"{points[member1]}")
embed.add_field(name = f"{ctx.author.name}'s score", value = f"{points[ctx.author]}")
except:
embed.add_field(name = f"{member.name}'s score", value = f"{points[member]}")
embed.add_field(name = f"{ctx.author.name}'s score", value = f"{points[ctx.author]}")
await ctx.send(embed=embed)
#=============Football========
@client.command()
async def football(ctx):
if ctx.message.channel.id != 881582264419500032: #Please change this id to your game channel id
await ctx.reply("Please go to gaming channel, I am waiting there...")
return
options=["LEFT",'MIDDLE','RIGHT']
computerOption = random.choice(options)
def goal():
if computerOption=='LEFT':
return('.🧍♂️')
if computerOption=='MIDDLE':
return ('⁃⁃⁃⁃⁃⁃⁃🧍♂️⁃⁃⁃⁃⁃')
if computerOption=='RIGHT':
return ('⁃⁃⁃⁃⁃⁃⁃⁃⁃⁃⁃⁃⁃🧍♂️')
yet = discord.Embed(title=f"{ctx.author.display_name}'s PENALTY SHOOTOUT GAME",description=">status: Waiting for a click , 5 sec left" )
yet.add_field(name=".🥅 🥅 🥅", value=goal() , inline=False)
out = discord.Embed(title=f"{ctx.author.display_name}' You didn't click on time",description=">status: Time Out!!")
win = discord.Embed(title=f"{ctx.author.display_name}, congratulations!",description="GOOOOOAL !!!!")
miss = discord.Embed(title="MISSED !!",description="Keeper dived")
save = discord.Embed(title="SAVED !!",description="Keeper saved")
def check(msg):
return msg.author == ctx.author and msg.channel == ctx.channel
m = await ctx.reply(
embed=yet,
components=[[Button(style=1, label="LEFT",emoji="⚽"),Button(style=3, label="MIDDLE",emoji="⚽"),Button(style=ButtonStyle.red, label="RIGHT",emoji="⚽")]
],
)
missChance=random.randint(1,2)
try:
res = await client.wait_for("button_click", check=check, timeout=7)
shoot = res.component.label
if shoot == computerOption :
await m.edit(embed=save,components=[])
elif missChance == 1:
await m.edit(embed=miss,components=[])
else :
with open('level.json') as f:
users=json.load(f)
if str(ctx.author.id) not in users:
users[str(ctx.author.id)]=1
if str(ctx.author.id) in users:
users[str(ctx.author.id)]+=2
win.set_footer(text="Your gained 2 points", icon_url=ctx.author.avatar_url)
await m.edit(embed=win,components=[])
see(users)
except asyncio.TimeoutError:
await m.edit(
embed=out,
components=[],
)
#=======GUESS===========
@client.command()
async def guess(ctx):
if ctx.message.channel.id != 881582264419500032: #Please change this id to your game channel id
await ctx.reply("Please go to gaming channel, I am waiting there...")
return
ch=['Blue','Green','Red','Grey']
comp=random.choice(ch)
e = discord.Embed(title=f"{ctx.author.name}'s' Guessing Game!", description="> Click a button to choose! <",color=0x3498db)
e1 = discord.Embed(title=f"{ctx.author.name}, You Guessed It Right!", description="> You have won! <",color=0x00FF00)
e3 = discord.Embed(title=f"{ctx.author.name}, You didn't Click on Time", description="> Timed Out! <",color=discord.Color.red())
e2 = discord.Embed(title=f"{ctx.author.name}, You Lost!", description=f"> You have lost! < It was {comp}",color=discord.Color.red())
m = await ctx.reply(
embed=e,
components=[[Button(style=1, label="Blue"),Button(style=3, label="Green"),Button(style=ButtonStyle.red,label="Red"),Button(style=ButtonStyle.grey,label="Grey")]
],
)
def check(res):
return ctx.author == res.user and res.channel == ctx.channel
try:
res = await client.wait_for("button_click", check=check, timeout=5)
if res.component.label==comp:
with open('level.json') as f:
users=json.load(f)
if str(ctx.author.id) not in users:
users[str(ctx.author.id)]=1
if str(ctx.author.id) in users:
users[str(ctx.author.id)]+=1
e1.set_footer(text="Your gained 1 point", icon_url=ctx.author.avatar_url)
await m.edit(embed=e1,components=[],)
see(users)
else:
await m.edit(embed=e2, components=[],)
except asyncio.TimeoutError:
await m.edit(
embed=e3,
components=[],
)
client.run(os.environ['TOKEN'])
| Limbo-Hacks/Mr-Limbo-bot | main.py | main.py | py | 21,202 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "discord.Intents.default",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "discord.Intents",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands.Bot",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": ... |
27400999130 | from __future__ import annotations
import itertools
from abc import ABC, abstractmethod
from collections import defaultdict
from collections.abc import Iterable, Iterator
from dataclasses import dataclass, field
from typing import cast
from cognite.client.data_classes import data_modeling as dm
from cognite.client.data_classes.data_modeling.data_types import ListablePropertyType
from typing_extensions import Self
from cognite.pygen import config as pygen_config
from cognite.pygen.utils.text import create_name
_PRIMITIVE_TYPES = (dm.Text, dm.Boolean, dm.Float32, dm.Float64, dm.Int32, dm.Int64, dm.Timestamp, dm.Date, dm.Json)
_EXTERNAL_TYPES = (dm.TimeSeriesReference, dm.FileReference, dm.SequenceReference)
@dataclass(frozen=True)
class ViewSpaceExternalId:
"""
This represents a view id.
The motivation for this class is that API class and data classes are independent of the view version (given
that all properties are equal). This enables reuse of data classes and APIs across view versions.
Note this could be considered an edge case not worth supporting. However, when you publish data models
from a .graphql format, the autogenerated view versions for identical types will be different when those types
are part of two different data models.
"""
space: str
external_id: str
@classmethod
def from_(cls, view_id: dm.ViewId | dm.View) -> Self:
return cls(space=view_id.space, external_id=view_id.external_id)
@dataclass(frozen=True)
class Field(ABC):
"""
A field represents a pydantic field in the generated pydantic class.
Args:
name: The name of the field. This is used in the generated Python code.
prop_name: The name of the property in the data model. This is used when reading and writing to CDF.
pydantic_field: The name to use for the import 'from pydantic import Field'. This is used in the edge case
when the name 'Field' name clashes with the data model class name.
"""
name: str
prop_name: str
pydantic_field: str
@property
def need_alias(self) -> bool:
return self.name != self.prop_name
@classmethod
def from_property(
cls,
prop_name: str,
prop: dm.MappedProperty | dm.ConnectionDefinition,
data_class_by_view_id: dict[ViewSpaceExternalId, DataClass],
field_naming: pygen_config.FieldNaming,
view_name: str,
pydantic_field: str = "Field",
) -> Field:
name = create_name(prop_name, field_naming.name)
if isinstance(prop, dm.SingleHopConnectionDefinition):
variable = create_name(prop_name, field_naming.variable)
edge_api_class_input = f"{view_name}_{prop_name}"
edge_api_class = f"{create_name(edge_api_class_input, field_naming.edge_api_class)}API"
edge_api_attribute = create_name(prop_name, field_naming.api_class_attribute)
return EdgeOneToMany(
name=name,
prop_name=prop_name,
prop=prop,
data_class=data_class_by_view_id[ViewSpaceExternalId(prop.source.space, prop.source.external_id)],
variable=variable,
pydantic_field=pydantic_field,
edge_api_class=edge_api_class,
edge_api_attribute=edge_api_attribute,
)
elif isinstance(prop, dm.MappedProperty) and (
isinstance(prop.type, _PRIMITIVE_TYPES) or isinstance(prop.type, _EXTERNAL_TYPES)
):
type_ = _to_python_type(prop.type)
if isinstance(prop.type, ListablePropertyType) and prop.type.is_list:
return PrimitiveListField(
name=name,
prop_name=prop_name,
type_=type_,
is_nullable=prop.nullable,
prop=prop,
pydantic_field=pydantic_field,
)
elif isinstance(prop.type, dm.CDFExternalIdReference):
# Note: these are only CDF External Fields that are not listable. Listable CDF External Fields
# are handled above.
edge_api_class_input = f"{view_name}_{prop_name}"
edge_api_class = f"{create_name(edge_api_class_input, field_naming.edge_api_class)}"
edge_api_attribute = create_name(prop_name, field_naming.api_class_attribute)
return CDFExternalField(
name=name,
prop_name=prop_name,
type_=type_,
is_nullable=prop.nullable,
prop=prop,
pydantic_field=pydantic_field,
edge_api_class=edge_api_class,
edge_api_attribute=edge_api_attribute,
)
else:
return PrimitiveField(
name=name,
prop_name=prop_name,
type_=type_,
is_nullable=prop.nullable,
default=prop.default_value,
prop=prop,
pydantic_field=pydantic_field,
)
elif isinstance(prop, dm.MappedProperty) and isinstance(prop.type, dm.DirectRelation):
# For direct relation the source is required.
view_id = cast(dm.ViewId, prop.source)
target_data_class = data_class_by_view_id[ViewSpaceExternalId(view_id.space, view_id.external_id)]
return EdgeOneToOne(
name=name, prop_name=prop_name, prop=prop, data_class=target_data_class, pydantic_field=pydantic_field
)
else:
raise NotImplementedError(f"Property type={type(prop)!r} is not supported")
@abstractmethod
def as_read_type_hint(self) -> str:
raise NotImplementedError()
@abstractmethod
def as_write_type_hint(self) -> str:
raise NotImplementedError()
@property
@abstractmethod
def is_edge(self) -> bool:
raise NotImplementedError()
@property
@abstractmethod
def is_time_field(self) -> bool:
raise NotImplementedError()
@property
@abstractmethod
def is_text_field(self) -> bool:
raise NotImplementedError()
@dataclass(frozen=True)
class PrimitiveFieldCore(Field, ABC):
type_: str
is_nullable: bool
prop: dm.MappedProperty
@property
def is_edge(self) -> bool:
return False
@property
def is_time_field(self) -> bool:
return self.type_ in ("datetime.datetime", "datetime.date")
@property
def is_text_field(self) -> bool:
return self.type_ == "str"
@dataclass(frozen=True)
class PrimitiveField(PrimitiveFieldCore):
"""
This represents a basic type such as str, int, float, bool, datetime.datetime, datetime.date.
"""
default: str | int | dict | None = None
def as_read_type_hint(self) -> str:
if self.need_alias:
return f'Optional[{self.type_}] = {self.pydantic_field}({self.default}, alias="{self.prop_name}")'
else:
return f"Optional[{self.type_}] = {self.default}"
def as_write_type_hint(self) -> str:
out_type = self.type_
if self.is_nullable:
out_type = f"Optional[{out_type}] = {self.default}"
return out_type
@dataclass(frozen=True)
class PrimitiveListField(PrimitiveFieldCore):
"""
This represents a list of basic types such as list[str], list[int], list[float], list[bool],
list[datetime.datetime], list[datetime.date].
"""
def as_read_type_hint(self) -> str:
if self.need_alias:
return f'Optional[list[{self.type_}]] = {self.pydantic_field}(None, alias="{self.prop_name}")'
else:
return f"Optional[list[{self.type_}]] = None"
def as_write_type_hint(self) -> str:
if self.is_nullable:
return f"Optional[list[{self.type_}]] = None"
else:
return f"list[{self.type_}]"
@dataclass(frozen=True)
class CDFExternalField(PrimitiveFieldCore):
edge_api_class: str
edge_api_attribute: str
def as_read_type_hint(self) -> str:
if self.need_alias:
return f'Optional[{self.type_}] = {self.pydantic_field}(None, alias="{self.prop_name}")'
else:
return f"Optional[{self.type_}] = None"
def as_write_type_hint(self) -> str:
out_type = self.type_
if self.is_nullable:
out_type = f"Optional[{out_type}] = None"
return out_type
@dataclass(frozen=True)
class EdgeField(Field, ABC):
"""
This represents an edge field linking to another data class.
"""
data_class: DataClass
@property
def is_edge(self) -> bool:
return True
@property
def is_time_field(self) -> bool:
return False
@property
def is_text_field(self) -> bool:
return False
@dataclass(frozen=True)
class EdgeOneToOne(EdgeField):
"""
This represents an edge field linking to another data class.
"""
prop: dm.MappedProperty
def as_read_type_hint(self) -> str:
if self.need_alias:
return f'Optional[str] = {self.pydantic_field}(None, alias="{self.prop_name}")'
else:
return "Optional[str] = None"
def as_write_type_hint(self) -> str:
return f"Union[{self.data_class.write_name}, str, None] = {self.pydantic_field}(None, repr=False)"
@dataclass(frozen=True)
class EdgeOneToMany(EdgeField):
"""
This represents a list of edge fields linking to another data class.
"""
variable: str
edge_api_class: str
edge_api_attribute: str
prop: dm.SingleHopConnectionDefinition
def as_read_type_hint(self) -> str:
if self.need_alias:
return f"Optional[list[str]] = {self.pydantic_field}(None, alias='{self.prop_name}')"
else:
return "Optional[list[str]] = None"
def as_write_type_hint(self) -> str:
return (
f"Union[list[{self.data_class.write_name}], list[str], None]"
f" = {self.pydantic_field}(default=None, repr=False)"
)
@dataclass(frozen=True)
class DataClass:
"""
This represents a data class. It is created from a view.
"""
view_name: str
read_name: str
write_name: str
read_list_name: str
write_list_name: str
variable: str
variable_list: str
file_name: str
view_id: ViewSpaceExternalId
fields: list[Field] = field(default_factory=list)
@classmethod
def from_view(cls, view: dm.View, data_class: pygen_config.DataClassNaming) -> Self:
view_name = (view.name or view.external_id).replace(" ", "_")
class_name = create_name(view_name, data_class.name)
variable_name = create_name(view_name, data_class.variable)
variable_list = create_name(view_name, data_class.variable_list)
if variable_name == variable_list:
variable_list = f"{variable_list}_list"
file_name = f"_{create_name(view_name, data_class.file)}"
return cls(
view_name=view_name,
read_name=class_name,
write_name=f"{class_name}Apply",
read_list_name=f"{class_name}List",
write_list_name=f"{class_name}ApplyList",
variable=variable_name,
variable_list=variable_list,
file_name=file_name,
view_id=ViewSpaceExternalId.from_(view),
)
def update_fields(
self,
properties: dict[str, dm.MappedProperty | dm.ConnectionDefinition],
data_class_by_view_id: dict[ViewSpaceExternalId, DataClass],
field_naming: pygen_config.FieldNaming,
) -> None:
pydantic_field = self.pydantic_field
for prop_name, prop in properties.items():
field_ = Field.from_property(
prop_name, prop, data_class_by_view_id, field_naming, self.view_name, pydantic_field=pydantic_field
)
self.fields.append(field_)
@property
def text_field_names(self) -> str:
return f"{self.read_name}TextFields"
@property
def field_names(self) -> str:
return f"{self.read_name}Fields"
@property
def properties_dict_name(self) -> str:
return f"_{self.read_name.upper()}_PROPERTIES_BY_FIELD"
@property
def pydantic_field(self) -> str:
if any(
name == "Field" for name in [self.read_name, self.write_name, self.read_list_name, self.write_list_name]
):
return "pydantic.Field"
else:
return "Field"
@property
def init_import(self) -> str:
import_classes = [self.read_name, self.write_name, self.read_list_name, self.write_list_name]
if not self.has_only_one_to_many_edges:
import_classes.append(self.field_names)
if self.has_text_field:
import_classes.append(self.text_field_names)
return f"from .{self.file_name} import {', '.join(sorted(import_classes))}"
def __iter__(self) -> Iterator[Field]:
return iter(self.fields)
@property
def one_to_one_edges(self) -> Iterable[EdgeOneToOne]:
return (field_ for field_ in self.fields if isinstance(field_, EdgeOneToOne))
@property
def one_to_many_edges(self) -> Iterable[EdgeOneToMany]:
return (field_ for field_ in self.fields if isinstance(field_, EdgeOneToMany))
@property
def primitive_fields(self) -> Iterable[PrimitiveField]:
return (field_ for field_ in self.fields if isinstance(field_, PrimitiveField))
@property
def primitive_core_fields(self) -> Iterable[PrimitiveFieldCore]:
return (field_ for field_ in self.fields if isinstance(field_, PrimitiveFieldCore))
@property
def text_fields(self) -> Iterable[PrimitiveFieldCore]:
return (field_ for field_ in self.primitive_core_fields if field_.is_text_field)
@property
def cdf_external_fields(self) -> Iterable[CDFExternalField]:
return (field_ for field_ in self.fields if isinstance(field_, CDFExternalField))
@property
def single_timeseries_fields(self) -> Iterable[CDFExternalField]:
return (field_ for field_ in self.cdf_external_fields if isinstance(field_.prop.type, dm.TimeSeriesReference))
@property
def has_one_to_many_edges(self) -> bool:
return any(isinstance(field_, EdgeOneToMany) for field_ in self.fields)
@property
def has_edges(self) -> bool:
return any(isinstance(field_, EdgeField) for field_ in self.fields)
@property
def has_primitive_fields(self) -> bool:
return any(isinstance(field_, PrimitiveFieldCore) for field_ in self.fields)
@property
def has_only_one_to_many_edges(self) -> bool:
return all(isinstance(field_, EdgeOneToMany) for field_ in self.fields)
@property
def fields_by_container(self) -> dict[dm.ContainerId, list[PrimitiveFieldCore | EdgeOneToOne]]:
result: dict[dm.ContainerId, list[PrimitiveFieldCore | EdgeOneToOne]] = defaultdict(list)
for field_ in self:
if isinstance(field_, (PrimitiveFieldCore, EdgeOneToOne)):
result[field_.prop.container].append(field_)
return dict(result)
@property
def has_time_field(self) -> bool:
return any(field_.is_time_field for field_ in self.fields)
@property
def has_text_field(self) -> bool:
return any(field_.is_text_field for field_ in self.fields)
@property
def _field_type_hints(self) -> Iterable[str]:
return (hint for field_ in self.fields for hint in (field_.as_read_type_hint(), field_.as_write_type_hint()))
@property
def use_optional_type(self) -> bool:
return any("Optional" in hint for hint in self._field_type_hints)
@property
def use_pydantic_field(self) -> bool:
pydantic_field = self.pydantic_field
return any(pydantic_field in hint for hint in self._field_type_hints)
@property
def import_pydantic_field(self) -> str:
if self.pydantic_field == "Field":
return "from pydantic import Field"
else:
return "import pydantic"
@property
def dependencies(self) -> list[DataClass]:
unique: dict[ViewSpaceExternalId, DataClass] = {}
for field_ in self.fields:
if isinstance(field_, EdgeField):
# This will overwrite any existing data class with the same view id
# however, this is not a problem as all data classes are uniquely identified by their view id
unique[field_.data_class.view_id] = field_.data_class
return sorted(unique.values(), key=lambda x: x.write_name)
@property
def has_single_timeseries_fields(self) -> bool:
return any(
isinstance(field_.prop.type, dm.TimeSeriesReference) and not isinstance(field_, PrimitiveListField)
for field_ in self.single_timeseries_fields
)
@property
def primitive_fields_literal(self) -> str:
return ", ".join(
f'"{field_.prop_name}"' for field_ in self if isinstance(field_, (PrimitiveField, CDFExternalField))
)
@property
def text_fields_literals(self) -> str:
return ", ".join(f'"{field_.name}"' for field_ in self.text_fields)
@property
def fields_literals(self) -> str:
return ", ".join(f'"{field_.name}"' for field_ in self if isinstance(field_, PrimitiveFieldCore))
@dataclass(frozen=True)
class APIClass:
client_attribute: str
name: str
file_name: str
view_id: ViewSpaceExternalId
@classmethod
def from_view(cls, view: dm.View, api_class: pygen_config.APIClassNaming) -> APIClass:
raw_name = view.name or view.external_id
raw_name = raw_name.replace(" ", "_")
return cls(
client_attribute=create_name(raw_name, api_class.client_attribute),
name=f"{create_name(raw_name, api_class.name)}API",
file_name=create_name(raw_name, api_class.file_name),
view_id=ViewSpaceExternalId.from_(view),
)
@dataclass(frozen=True)
class MultiAPIClass:
"""
This represents a set of APIs which are generated from a single data model.
The motivation for having this class is the case when you want to create one SDK for multiple data models.
"""
sub_apis: list[APIClass]
client_attribute: str
name: str
model: dm.DataModel[dm.View]
@property
def model_id(self) -> dm.DataModelId:
return self.model.as_id()
@classmethod
def from_data_model(
cls,
data_model: dm.DataModel[dm.View],
api_class_by_view_id: dict[ViewSpaceExternalId, APIClass],
multi_api_class: pygen_config.MultiAPIClassNaming,
) -> MultiAPIClass:
sub_apis = sorted(
[api_class_by_view_id[ViewSpaceExternalId.from_(view)] for view in data_model.views],
key=lambda api: api.name,
)
data_model_name = data_model.name or data_model.external_id
return cls(
sub_apis=sub_apis,
client_attribute=create_name(data_model_name, multi_api_class.client_attribute),
name=f"{create_name(data_model_name, multi_api_class.name)}APIs",
model=data_model,
)
@dataclass
class FilterParameter:
name: str
type_: str
default: None = None
space: str | None = None
@property
def annotation(self) -> str:
return f"{self.type_} | None"
@property
def is_time(self) -> bool:
return self.type_ in ("datetime.datetime", "datetime.date")
@dataclass
class FilterCondition:
filter: type[dm.Filter]
prop_name: str
keyword_arguments: dict[str, FilterParameter]
@property
def condition(self) -> str:
if self.filter is dm.filters.In:
parameter = next(iter(self.keyword_arguments.values())).name
return f"{parameter} and isinstance({parameter}, list)"
elif self.filter is dm.filters.Equals:
parameter = next(iter(self.keyword_arguments.values())).name
return f"{parameter} and isinstance({parameter}, str)"
return " or ".join(arg.name for arg in self.keyword_arguments.values())
@property
def arguments(self) -> str:
if self.prop_name == "externalId":
property_ref = '["node", "externalId"], '
else:
property_ref = f'view_id.as_property_ref("{self.prop_name}"), '
filter_args = self._create_filter_args()
return f"{property_ref}{', '.join(filter_args)}"
def _create_filter_args(self) -> list[str]:
filter_args: list[str] = []
for keyword, arg in self.keyword_arguments.items():
if arg.is_time:
filter_args.append(f"{keyword}={arg.name}.isoformat() if {arg.name} else None")
else:
filter_args.append(f"{keyword}={arg.name}")
return filter_args
@property
def filter_call(self) -> str:
return f"dm.filters.{self.filter.__name__}"
@dataclass
class FilterConditionOnetoOneEdge(FilterCondition):
instance_type: type
@property
def condition(self) -> str:
if self.filter is dm.filters.In:
parameter = next(iter(self.keyword_arguments.values())).name
return (
f"{parameter} and isinstance({parameter}, list) and "
f"isinstance({parameter}[0], {self.instance_type.__name__})"
)
elif self.filter is dm.filters.Equals:
parameter = next(iter(self.keyword_arguments.values())).name
return f"{parameter} and isinstance({parameter}, {self.instance_type.__name__})"
raise NotImplementedError(f"Unsupported filter {self.filter} for Direct Relation")
def _create_filter_args(self) -> list[str]:
filter_args: list[str] = []
for keyword, arg in self.keyword_arguments.items():
if self.instance_type is str and self.filter is dm.filters.Equals:
filter_args.append(f'{keyword}={{"space": "{arg.space}", "externalId": {arg.name}}}')
elif self.instance_type is tuple and self.filter is dm.filters.Equals:
filter_args.append(f'{keyword}={{"space": {arg.name}[0], "externalId": {arg.name}[1]}}')
elif self.instance_type is str and self.filter is dm.filters.In:
filter_args.append(f'{keyword}=[{{"space": "{arg.space}", "externalId": item}} for item in {arg.name}]')
elif self.instance_type is tuple and self.filter is dm.filters.In:
filter_args.append(f'{keyword}=[{{"space": item[0], "externalId": item[1]}} for item in {arg.name}]')
else:
raise NotImplementedError(f"Unsupported filter {self.filter} for Direct Relation")
return filter_args
# This field is used when creating the list method.
_EXTERNAL_ID_FIELD = PrimitiveField(
name="external_id",
prop_name="externalId",
type_="str",
is_nullable=False,
default=None,
prop=dm.MappedProperty(
container_property_identifier="externalId",
type=dm.Text(),
nullable=False,
auto_increment=False,
container=dm.ContainerId("dummy", "dummy"),
),
pydantic_field="Field",
)
@dataclass
class ListMethod:
parameters: list[FilterParameter]
filters: list[FilterCondition]
@classmethod
def from_fields(cls, fields: Iterable[Field], config: pygen_config.Filtering) -> Self:
parameters_by_name: dict[str, FilterParameter] = {}
list_filters: list[FilterCondition] = []
for field_ in itertools.chain(fields, (_EXTERNAL_ID_FIELD,)):
# Only primitive and edge one-to-one fields supported for now
if isinstance(field_, PrimitiveField):
for selected_filter in config.get(field_.prop.type, field_.prop_name):
if selected_filter is dm.filters.Equals:
if field_.name not in parameters_by_name:
parameter = FilterParameter(name=field_.name, type_=field_.type_)
parameters_by_name[parameter.name] = parameter
else:
# Equals and In filter share parameter, you have to extend the type hint.
parameter = parameters_by_name[field_.name]
parameter.type_ = f"{field_.type_} | {parameter.type_}"
list_filters.append(
FilterCondition(
filter=selected_filter,
prop_name=field_.prop_name,
keyword_arguments=dict(value=parameter),
)
)
elif selected_filter is dm.filters.In:
if field_.name not in parameters_by_name:
parameter = FilterParameter(field_.name, type_=f"list[{field_.type_}]")
parameters_by_name[parameter.name] = parameter
else:
# Equals and In filter share parameter, you have to extend the type hint.
parameter = parameters_by_name[field_.name]
parameter.type_ = f"{parameter.type_} | list[{field_.type_}]"
list_filters.append(
FilterCondition(
filter=selected_filter,
prop_name=field_.prop_name,
keyword_arguments=dict(values=parameter),
)
)
elif selected_filter is dm.filters.Prefix:
parameter = FilterParameter(name=f"{field_.name}_prefix", type_=field_.type_)
parameters_by_name[parameter.name] = parameter
list_filters.append(
FilterCondition(
filter=selected_filter,
prop_name=field_.prop_name,
keyword_arguments=dict(value=parameter),
)
)
elif selected_filter is dm.filters.Range:
min_parameter = FilterParameter(name=f"min_{field_.name}", type_=field_.type_)
max_parameter = FilterParameter(name=f"max_{field_.name}", type_=field_.type_)
parameters_by_name[min_parameter.name] = min_parameter
parameters_by_name[max_parameter.name] = max_parameter
list_filters.append(
FilterCondition(
filter=selected_filter,
prop_name=field_.prop_name,
keyword_arguments=dict(gte=min_parameter, lte=max_parameter),
)
)
else:
# This is a filter not supported by the list method.
continue
elif isinstance(field_, EdgeOneToOne):
for selected_filter in config.get(field_.prop.type, field_.prop_name):
if selected_filter is dm.filters.Equals:
if field_.name not in parameters_by_name:
parameter = FilterParameter(
name=field_.name, type_="str | tuple[str, str]", space=field_.data_class.view_id.space
)
parameters_by_name[parameter.name] = parameter
else:
# Equals and In filter share parameter, you have to extend the type hint.
parameter = parameters_by_name[field_.name]
parameter.type_ = f"str | tuple[str, str] | {parameter.type_}"
list_filters.extend(
[
FilterConditionOnetoOneEdge(
filter=selected_filter,
prop_name=field_.prop_name,
keyword_arguments=dict(value=parameter),
instance_type=condition_type,
)
for condition_type in (str, tuple)
]
)
elif selected_filter is dm.filters.In:
if field_.name not in parameters_by_name:
parameter = FilterParameter(
name=field_.name,
type_="list[str] | list[tuple[str, str]]",
space=field_.data_class.view_id.space,
)
parameters_by_name[parameter.name] = parameter
else:
# Equals and In filter share parameter, you have to extend the type hint.
parameter = parameters_by_name[field_.name]
parameter.type_ = f"{parameter.type_} | list[str] | list[tuple[str, str]]"
list_filters.extend(
[
FilterConditionOnetoOneEdge(
filter=selected_filter,
prop_name=field_.prop_name,
keyword_arguments=dict(values=parameter),
instance_type=condition_type,
)
for condition_type in (str, tuple)
]
)
else:
# This is a filter not supported.
continue
return cls(parameters=list(parameters_by_name.values()), filters=list_filters)
def _to_python_type(type_: dm.DirectRelationReference | dm.PropertyType) -> str:
if isinstance(type_, (dm.Int32, dm.Int64)):
out_type = "int"
elif isinstance(type_, dm.Boolean):
out_type = "bool"
elif isinstance(type_, (dm.Float32, dm.Float64)):
out_type = "float"
elif isinstance(type_, dm.Date):
out_type = "datetime.date"
elif isinstance(type_, dm.Timestamp):
out_type = "datetime.datetime"
elif isinstance(type_, dm.Json):
out_type = "dict"
elif isinstance(type_, (dm.Text, dm.DirectRelation, dm.CDFExternalIdReference, dm.DirectRelationReference)):
out_type = "str"
else:
raise ValueError(f"Unknown type {type_}")
return out_type
| cognitedata/pygen | cognite/pygen/_core/data_classes.py | data_classes.py | py | 31,015 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "cognite.client.data_classes.data_modeling.Text",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "cognite.client.data_classes.data_modeling",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "cognite.client.data_classes.data_modeling.Boolean",... |
12752730558 | """
Convert the CSV file with the information of transfers into a directed graph that can be used for debugging.
"""
import pandas as pd
import networkx as nx
import matplotlib.pyplot as plt
# Read CSV file
csv_file = "../data/n_100_cifar10/transfers.csv"
df = pd.read_csv(csv_file)
# Create a directed graph
G = nx.DiGraph()
# Replace node IDs with an index
node_ids = set()
for index, row in df.iterrows():
node_ids.add(row['from'])
node_ids.add(row['to'])
node_ids_list = list(node_ids)
for index, row in df.iterrows():
df.loc[index, 'from'] = node_ids_list.index(row['from'])
df.loc[index, 'to'] = node_ids_list.index(row['to'])
# Make node IDs unique by adding round num
for index, row in df.iterrows():
df.loc[index, 'from'] = "%s_%d" % (row['from'], row['round'])
if row['type'] == "aggregated":
df.loc[index, 'to'] = "%s_%d" % (row['to'], row['round'] + 1)
else:
df.loc[index, 'to'] = "%s_%d" % (row['to'], row['round'])
# Add edges and nodes to the graph
labels = {}
for index, row in df.iterrows():
from_node = row['from']
to_node = row['to']
labels[from_node] = from_node.split("_")[0]
labels[to_node] = to_node.split("_")[0]
round_num = row['round']
edge_type = row['type']
G.add_node(from_node)
G.add_node(to_node)
G.add_edge(from_node, to_node, round=round_num, style='dashed' if edge_type == 'trained' else 'solid')
# Spring layout
pos = nx.spring_layout(G, iterations=1)
# Draw nodes
nx.draw_networkx_nodes(G, pos)
# Draw edges
for edge in G.edges(data=True):
nx.draw_networkx_edges(G, pos, edgelist=[(edge[0], edge[1])], edge_color='black', style=edge[2]['style'])
# Draw node labels
nx.draw_networkx_labels(G, pos, labels=labels)
# Save graph to PDF
plt.axis('off')
plt.savefig('graph_output.pdf', format='pdf') | devos50/decentralized-learning | scripts/convert_transfers_to_graph.py | convert_transfers_to_graph.py | py | 1,825 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "pandas.read_csv",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "networkx.DiGraph",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "networkx.spring_layout",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "networkx.draw... |
33427742555 | from setuptools import setup, find_packages
########################################################################################################################
with open("README.rst", "r") as handler:
LONG_DESC = handler.read()
setup(
author="Shapelets.io",
author_email="dev@shapelets.io",
name="khiva",
version='0.5.0',
long_description = LONG_DESC,
description="Python bindings for khiva",
license="MPL 2.0",
url="http://shapelets.io",
packages=find_packages(),
zip_safe=True
)
| shapelets/khiva-python | setup.py | setup.py | py | 533 | python | de | code | 46 | github-code | 1 | [
{
"api_name": "setuptools.setup",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "setuptools.find_packages",
"line_number": 17,
"usage_type": "call"
}
] |
32996852891 | from setuptools import find_packages, setup
third_party_dependencies = (
"Flask",
"flask-GraphQL",
"graphene_sqlalchemy",
"psycopg2",
"SQLAlchemy",
"requests",
)
tests_require = (
"nose",
)
setup(
name="slack-server-python",
version="0.1.0",
author="Richard Shen",
author_email="rich.shen@nyu.edu",
description="Slack clone backend",
long_description=open("README.md").read(),
packages=find_packages(exclude=["ez_setup"]),
include_package_data=True,
zip_safe=False,
test_suite="nose.collector",
install_requires=third_party_dependencies,
test_require=tests_require,
classifiers=[
"Framework :: Flask",
"Programming Language :: Python",
],
)
| ariia-git/slack-server-python | setup.py | setup.py | py | 744 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "setuptools.setup",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "setuptools.find_packages",
"line_number": 23,
"usage_type": "call"
}
] |
26390497940 | from django.db import models
from django.utils.translation import gettext_lazy as _
class QuestionType(models.TextChoices):
NUMERIC = 'NUMERIC', _('Numeric')
TEXT = 'TEXT', _('Text')
DROPDOWN = 'DROPDOWN', _('Dropdown')
CHECKBOX = 'CHECKBOX', _('Checkbox')
RADIO_BUTTON = 'RADIO_BUTTON', _('RadioButton')
class SubmissionStatus(models.TextChoices):
STARTED = 'STARTED', _('Started')
FINISHED = 'FINISHED', _('Finished')
ABANDONED = 'ABANDONED', _('Abandoned')
| mehedi-shafi/simple-survey | api/survey/enums.py | enums.py | py | 495 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.db.models.TextChoices",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.gettext_lazy",
"line_number": 5,
"usage_type": "call"
},
{
... |
31317534219 | from typing import List
from scripts.faceswaplab_ui.faceswaplab_inpainting_ui import face_inpainting_ui
from scripts.faceswaplab_swapping.face_checkpoints import get_face_checkpoints
import gradio as gr
from modules import shared
from scripts.faceswaplab_utils.sd_utils import get_sd_option
def faceswap_unit_advanced_options(
is_img2img: bool, unit_num: int = 1, id_prefix: str = "faceswaplab_"
) -> List[gr.components.Component]:
with gr.Accordion(f"Post-Processing & Advanced Mask Options", open=False):
gr.Markdown(
"""Post-processing and mask settings for unit faces. Best result : checks all, use LDSR, use Codeformer"""
)
with gr.Row():
face_restorer_name = gr.Radio(
label="Restore Face",
choices=["None"] + [x.name() for x in shared.face_restorers],
value=get_sd_option(
"faceswaplab_default_upscaled_swapper_face_restorer",
"None",
),
type="value",
elem_id=f"{id_prefix}_face{unit_num}_face_restorer",
)
with gr.Column():
face_restorer_visibility = gr.Slider(
0,
1,
value=get_sd_option(
"faceswaplab_default_upscaled_swapper_face_restorer_visibility",
1.0,
),
step=0.001,
label="Restore visibility",
elem_id=f"{id_prefix}_face{unit_num}_face_restorer_visibility",
)
codeformer_weight = gr.Slider(
0,
1,
value=get_sd_option(
"faceswaplab_default_upscaled_swapper_face_restorer_weight", 1.0
),
step=0.001,
label="codeformer weight",
elem_id=f"{id_prefix}_face{unit_num}_face_restorer_weight",
)
upscaler_name = gr.Dropdown(
choices=[upscaler.name for upscaler in shared.sd_upscalers],
value=get_sd_option("faceswaplab_default_upscaled_swapper_upscaler", ""),
label="Upscaler",
elem_id=f"{id_prefix}_face{unit_num}_upscaler",
)
improved_mask = gr.Checkbox(
get_sd_option("faceswaplab_default_upscaled_swapper_improved_mask", False),
interactive=True,
label="Use improved segmented mask (use pastenet to mask only the face)",
elem_id=f"{id_prefix}_face{unit_num}_improved_mask",
)
color_corrections = gr.Checkbox(
get_sd_option("faceswaplab_default_upscaled_swapper_fixcolor", False),
interactive=True,
label="Use color corrections",
elem_id=f"{id_prefix}_face{unit_num}_color_corrections",
)
sharpen_face = gr.Checkbox(
get_sd_option("faceswaplab_default_upscaled_swapper_sharpen", False),
interactive=True,
label="sharpen face",
elem_id=f"{id_prefix}_face{unit_num}_sharpen_face",
)
erosion_factor = gr.Slider(
0.0,
10.0,
get_sd_option("faceswaplab_default_upscaled_swapper_erosion", 1.0),
step=0.01,
label="Upscaled swapper mask erosion factor, 1 = default behaviour.",
elem_id=f"{id_prefix}_face{unit_num}_erosion_factor",
)
components = [
face_restorer_name,
face_restorer_visibility,
codeformer_weight,
upscaler_name,
improved_mask,
color_corrections,
sharpen_face,
erosion_factor,
]
for component in components:
setattr(component, "do_not_save_to_config", True)
return components
def faceswap_unit_ui(
is_img2img: bool, unit_num: int = 1, id_prefix: str = "faceswaplab"
) -> List[gr.components.Component]:
with gr.Tab(f"Face {unit_num}"):
with gr.Column():
gr.Markdown(
"""Reference is an image. First face will be extracted.
First face of batches sources will be extracted and used as input (or blended if blend is activated)."""
)
with gr.Row():
img = gr.components.Image(
type="pil",
label="Reference",
elem_id=f"{id_prefix}_face{unit_num}_reference_image",
)
batch_files = gr.components.File(
type="file",
file_count="multiple",
label="Batch Sources Images",
optional=True,
elem_id=f"{id_prefix}_face{unit_num}_batch_source_face_files",
)
gr.Markdown(
"""Face checkpoint built with the checkpoint builder in tools. Will overwrite reference image."""
)
with gr.Row():
face = gr.Dropdown(
choices=get_face_checkpoints(),
label="Face Checkpoint (precedence over reference face)",
elem_id=f"{id_prefix}_face{unit_num}_face_checkpoint",
)
refresh = gr.Button(
value="↻",
variant="tool",
elem_id=f"{id_prefix}_face{unit_num}_refresh_checkpoints",
)
def refresh_fn(selected: str):
return gr.Dropdown.update(
value=selected, choices=get_face_checkpoints()
)
refresh.click(fn=refresh_fn, inputs=face, outputs=face)
with gr.Row():
enable = gr.Checkbox(
False,
placeholder="enable",
label="Enable",
elem_id=f"{id_prefix}_face{unit_num}_enable",
)
blend_faces = gr.Checkbox(
True,
placeholder="Blend Faces",
label="Blend Faces ((Source|Checkpoint)+References = 1)",
elem_id=f"{id_prefix}_face{unit_num}_blend_faces",
interactive=True,
)
gr.Markdown(
"""Select the face to be swapped, you can sort by size or use the same gender as the desired face:"""
)
with gr.Row():
same_gender = gr.Checkbox(
False,
placeholder="Same Gender",
label="Same Gender",
elem_id=f"{id_prefix}_face{unit_num}_same_gender",
)
sort_by_size = gr.Checkbox(
False,
placeholder="Sort by size",
label="Sort by size (larger>smaller)",
elem_id=f"{id_prefix}_face{unit_num}_sort_by_size",
)
target_faces_index = gr.Textbox(
value=f"{unit_num-1}",
placeholder="Which face to swap (comma separated), start from 0 (by gender if same_gender is enabled)",
label="Target face : Comma separated face number(s)",
elem_id=f"{id_prefix}_face{unit_num}_target_faces_index",
)
gr.Markdown(
"""The following will only affect reference face image (and is not affected by sort by size) :"""
)
reference_faces_index = gr.Number(
value=0,
precision=0,
minimum=0,
placeholder="Which face to get from reference image start from 0",
label="Reference source face : start from 0",
elem_id=f"{id_prefix}_face{unit_num}_reference_face_index",
)
gr.Markdown(
"""Configure swapping. Swapping can occure before img2img, after or both :""",
visible=is_img2img,
)
swap_in_source = gr.Checkbox(
False,
placeholder="Swap face in source image",
label="Swap in source image (blended face)",
visible=is_img2img,
elem_id=f"{id_prefix}_face{unit_num}_swap_in_source",
)
swap_in_generated = gr.Checkbox(
True,
placeholder="Swap face in generated image",
label="Swap in generated image",
visible=is_img2img,
elem_id=f"{id_prefix}_face{unit_num}_swap_in_generated",
)
gr.Markdown(
"""
## Advanced Options
**Simple :** If you have bad results and don't want to fine-tune here, just enable Codeformer in "Global Post-Processing".
Otherwise, read the [doc](https://glucauze.github.io/sd-webui-faceswaplab/doc/) to understand following options.
"""
)
with gr.Accordion("Similarity", open=False):
gr.Markdown("""Discard images with low similarity or no faces :""")
with gr.Row():
check_similarity = gr.Checkbox(
False,
placeholder="discard",
label="Check similarity",
elem_id=f"{id_prefix}_face{unit_num}_check_similarity",
)
compute_similarity = gr.Checkbox(
False,
label="Compute similarity",
elem_id=f"{id_prefix}_face{unit_num}_compute_similarity",
)
min_sim = gr.Slider(
0,
1,
0,
step=0.01,
label="Min similarity",
elem_id=f"{id_prefix}_face{unit_num}_min_similarity",
)
min_ref_sim = gr.Slider(
0,
1,
0,
step=0.01,
label="Min reference similarity",
elem_id=f"{id_prefix}_face{unit_num}_min_ref_similarity",
)
with gr.Accordion(label="Pre-Inpainting (before swapping)", open=False):
gr.Markdown("Pre-inpainting sends face to inpainting before swapping")
pre_inpainting = face_inpainting_ui(
id_prefix=f"{id_prefix}_face{unit_num}_preinpainting",
)
options = faceswap_unit_advanced_options(is_img2img, unit_num, id_prefix)
with gr.Accordion(label="Post-Inpainting (After swapping)", open=False):
gr.Markdown("Pre-inpainting sends face to inpainting before swapping")
post_inpainting = face_inpainting_ui(
id_prefix=f"{id_prefix}_face{unit_num}_postinpainting",
)
gradio_components: List[gr.components.Component] = (
[
img,
face,
batch_files,
blend_faces,
enable,
same_gender,
sort_by_size,
check_similarity,
compute_similarity,
min_sim,
min_ref_sim,
target_faces_index,
reference_faces_index,
swap_in_source,
swap_in_generated,
]
+ pre_inpainting
+ options
+ post_inpainting
)
# If changed, you need to change FaceSwapUnitSettings accordingly
# ORDER of parameters is IMPORTANT. It should match the result of FaceSwapUnitSettings
return gradio_components
| Navezjt/sd-webui-faceswaplab | scripts/faceswaplab_ui/faceswaplab_unit_ui.py | faceswaplab_unit_ui.py | py | 11,583 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "gradio.Accordion",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "gradio.Markdown",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "gradio.Row",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "gradio.Radio",
"line_... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.