seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
43355916513 | import sys
read = sys.stdin.readline
N, M = map(int,read().rstrip().split())
field = []
for _ in range(M):
field.append(list(read().rstrip()))
def dfs(x,y,flag,color,depth):
# 최대 깊이 알아내는 방법 고안!
# depth를 return 하면 ?! 최종 녀석의 depth가 return 값이 될 것이다.
for dx, dy in (-1,0),(1,0),(0,-1),(0,1):
move_x = x+dx
move_y = y+dy
if move_x < 0 or move_x >= M or move_y < 0 or move_y >= N:
continue
if flag[move_x][move_y] and field[move_x][move_y] == color:
flag[move_x][move_y] = False
# color_dict[color] += 1
depth = dfs(move_x,move_y,flag,color, depth+1)
return depth
flag = [[True]*N for _ in range(M)]
# color_dict= {"W":0,"B":0}
ans=[0,0]
for i in range(M):
for j in range(N):
if flag[i][j]:
color = field[i][j]
# color_dict[color] = 1
flag[i][j] = False
# print("FINISH DFS",dfs(i,j,flag,color,1))
depth = dfs(i,j,flag,color,1)
if color == "W":
ans[0] += depth**2
else:
ans[1] += depth**2
print(*ans)
| w00sung/Algorithm | BOJ/1303_war.py | 1303_war.py | py | 1,202 | python | en | code | 0 | github-code | 13 |
70220884178 | import torch.nn as nn
class Autoencoder(nn.Module):
def __init__(self):
super(Autoencoder, self).__init__()
# define: encoder
self.encoder = nn.Sequential(
# in_channel, out_channel, kernel_size, stride, padding
# nn.Conv2d(3, 8, 3, 2, 1),
# nn.Conv2d(8, 16, 3, 2, 1),
nn.Conv2d(3, 32, kernel_size=3, padding=1), # 32, 32, 32
nn.LeakyReLU(),
nn.BatchNorm2d(32),
nn.MaxPool2d(2), # 32, 16, 16
nn.Conv2d(32, 64, kernel_size=3, padding=1), # 64, 16, 16
nn.LeakyReLU(),
nn.BatchNorm2d(64),
nn.MaxPool2d(2), # 64, 8, 8
nn.Conv2d(64, 128, kernel_size=3, padding=1),
nn.LeakyReLU(),
nn.BatchNorm2d(128),
nn.MaxPool2d(2), # 128, 4, 4
nn.Conv2d(128, 256, kernel_size=3, padding=1),
nn.LeakyReLU(),
nn.BatchNorm2d(256),
nn.MaxPool2d(2) # 256, 2, 2
)
# define: decoder
self.decoder = nn.Sequential(
# nn.ConvTranspose2d(16, 8, 2, 2),
# nn.ConvTranspose2d(8, 3, 2, 2),
nn.ConvTranspose2d(256, 128, 2, 2), #128, 4, 4
nn.LeakyReLU(),
nn.BatchNorm2d(128),
nn.ConvTranspose2d(128, 64, 2, 2), #64, 8, 8
nn.LeakyReLU(),
nn.BatchNorm2d(64),
nn.ConvTranspose2d(64, 32, 2, 2), #32, 16, 16
nn.LeakyReLU(),
nn.BatchNorm2d(32),
nn.ConvTranspose2d(32, 3, 2, 2), #3,32,32
nn.BatchNorm2d(3),
nn.Tanh(),
)
self.fc1 = nn.Sequential(
nn.Linear(2*2*256, 256),
nn.ReLU(),
nn.BatchNorm1d(256),
nn.Linear(256, 128),
nn.ReLU(),
nn.BatchNorm1d(128),
nn.Linear(128, 50),
)
self.fc2 = nn.Sequential(
nn.Linear(50, 128),
nn.ReLU(),
nn.BatchNorm1d(128),
nn.Linear(128, 256),
nn.ReLU(),
nn.BatchNorm1d(256),
nn.Linear(256, 2*2*256),
)
def forward(self, x):
encoded = self.encoder(x)
encoded = encoded.view(-1, 2*2*256)
encoded = self.fc1(encoded)
decoded = self.fc2(encoded)
decoded = decoded.view(60, 256, 2, 2)
decoded = self.decoder(decoded)
# Total AE: return latent & reconstruct
return encoded, decoded
| timlee0119/NTU-Machine-Learning-2019 | image_clustering/hw4_common.py | hw4_common.py | py | 2,537 | python | en | code | 0 | github-code | 13 |
6045355495 | """
@author: Ida Bagus Dwi Satria Kusuma - @dskusuma
"""
import cv2
import numpy as np
# Baca gambar
img = cv2.imread('gambar1.jpg',1)
# Ambil tinggi dan lebar gambar
height,width,depth = img.shape
# Buat gambar kosong
img_ycrcb = np.zeros((height,width,3))
# kalkulasi
for i in np.arange(height):
for j in np.arange(width):
r = img.item(i,j,0)
g = img.item(i,j,1)
b = img.item(i,j,2)
# Default RGB to YCbCr equation
# Y = (0.299*r) + (0.587*g) + (0.114*b)
# U = b - Y
# V = r - Y
# Cb = U / (1.772+0.5)
# Cr = V / (1.402+0.5)
# RGB to YCbCr and representing it in 255
Y = 16 + ((65.481*r)/256. + (128.553*g)/256. + (24.966*b)/256.)
Cb = 128 + ((-37.797*r)/256. - (74.203*g)/256. + (112.0*b)/256.)
Cr = 128 + ((112.0*r)/256. - (93.786*g)/256. - (18.214*b)/256.)
img_ycrcb.itemset((i,j,0),Y)
img_ycrcb.itemset((i,j,1),Cr)
img_ycrcb.itemset((i,j,2),Cb)
cv2.imwrite('image_ycrcb.jpg',img_ycrcb)
| MultimediaLaboratory-TelkomUniversity/fg-image-processing | Python/Conversion/RGBtoYCrCb.py | RGBtoYCrCb.py | py | 1,073 | python | en | code | 0 | github-code | 13 |
42591671284 | """ Missing data filling functionality
Transformations for handling missing values, such as simple replacements, and
more advanced extrapolations.
"""
from typing import Any, List, Optional, Literal
import multiprocessing as mp
import numpy as np # type: ignore
import pandas as pd # type: ignore
from sklearn.experimental import enable_iterative_imputer # type: ignore # noqa: F401, E501 # pylint: disable=unused-import
from sklearn.impute import IterativeImputer # type: ignore
from sklearn.linear_model import BayesianRidge # type: ignore
def replace_na(df: pd.DataFrame, replacement = 0):
return df.replace(np.nan,replacement)
def list_totally_missing(df: pd.DataFrame) -> List[str]:
""" Get a list of columns for which all values are missing """
cols = []
for col in df:
if df[col].isnull().mean() == 1.0:
cols.append(col)
return cols
def fill_groups_with_time_means(df: pd.DataFrame) -> pd.DataFrame:
""" Fill completely missing groups with time means """
# Only fill numeric cols
cols = list(df.select_dtypes(include=[np.number]).columns.values)
for _, g_df in df.groupby(level=1):
# If missing everything from a group
if g_df.isnull().all().all():
# Get the times for this group
times_group = g_df.index.get_level_values(0)
# Fill all columns with the time mean
df.loc[g_df.index, cols] = (
df.loc[times_group, cols].groupby(level=0).mean().values
)
return df
def fill_with_group_and_global_means(df: pd.DataFrame) -> pd.DataFrame:
""" Impute missing values to group-level or global means. """
for col in df.columns:
# impute with group level mean
df[col].fillna(
df.groupby(level=1)[col].transform("mean"), inplace=True
)
# fill remaining NaN with df level mean
df[col].fillna(df[col].mean(), inplace=True)
return df
def extrapolate(
df: pd.DataFrame,
limit_direction: str = "both",
limit_area: Optional[str] = None,
) -> pd.DataFrame:
""" Interpolate and extrapolate """
return (
df.sort_index()
.groupby(level=1)
.apply(
lambda group: group.interpolate(
limit_direction=limit_direction, limit_area=limit_area
)
)
)
def _fill_by_group(
group: Any,
limit_direction: Literal["forward", "backward", "both"],
limit_area: Optional[Literal["inside", "outside"]],
) -> Any:
# Get the outer boundaries of the group data.
first_id = group.first_valid_index()
last_id = group.last_valid_index()
# Fill group according to set params.
if limit_area is not None:
# Assume forward if default "both" is passed with area "inside".
if limit_area == "inside" and limit_direction != "backward":
group[first_id:last_id] = group[first_id:last_id].ffill()
if limit_area == "inside" and limit_direction == "backward":
group[first_id:last_id] = group[first_id:last_id].bfill()
if limit_area == "outside":
id_min, id_max = group.index.min(), group.index.max()
group[id_min:first_id] = group[id_min:first_id].bfill()
group[last_id:id_max] = group[last_id:id_max].ffill()
elif limit_direction == "forward":
group = group.ffill()
elif limit_direction == "backward":
group = group.bfill()
else:
group = group.ffill().bfill()
return group
def fill(
s: pd.Series,
limit_direction: Literal["forward", "backward", "both"] = "both",
limit_area: Optional[Literal["inside", "outside"]] = None,
) -> pd.Series:
""" Fill column in dataframe with optional direction and area.
Args:
s: Pandas series to apply filling to.
limit_direction: Direction in which to fill.
limit_area: Area to fill. Default None refers to the entire series.
"""
return (
s.sort_index()
.groupby(level=1)
.apply(
lambda group: _fill_by_group(
group=group,
limit_direction=limit_direction,
limit_area=limit_area,
),
)
)
def _fill_iterative(
df: pd.DataFrame,
seed: int = 1,
max_iter: int = 10,
estimator: Any = BayesianRidge(),
):
""" Gets a single imputation using IterativeImputer from sklearn.
Uses BayesianRidge() from sklearn.
Changed default of sample_posterior to True as we're doing
multiple imputation.
Clips imputed values to min-max of observed values to avoid
brokenly large values. When imputation model doesn't converge
nicely we otherwise end up with extreme values that are out of
range of the float32 type used by model training, causing crashes.
Consider this clipping a workaround until a more robust imputation
strategy is in place.
"""
# Only impute numberic cols
cols_numeric = list(df.select_dtypes(include=[np.number]).columns.values)
cols_not_numeric = [col for col in df.columns if col not in cols_numeric]
# Get bounds so we can clip imputed values to not be outside
# observed values
observed_min = df[cols_numeric].min()
observed_max = df[cols_numeric].max()
df_imputed = df.loc[:, []].copy()
for col in df:
df_imputed[col] = np.nan
df_imputed[cols_numeric] = IterativeImputer(
random_state=seed, max_iter=max_iter, estimator=estimator
).fit_transform(df[cols_numeric])
df_imputed[cols_not_numeric] = df[cols_not_numeric]
# Clip imputed values to observed min-max range
df_imputed[cols_numeric] = df_imputed[cols_numeric].clip(
observed_min, observed_max, axis=1
)
return df_imputed
def impute_mice_generator(
df, n_imp, estimator=None, parallel=False, n_jobs=mp.cpu_count()
):
""" Impute df with MICE """
if parallel:
with mp.Pool(processes=n_jobs, maxtasksperchild=1) as pool:
results = [
pool.apply_async(_fill_iterative, (df, imp, 10, estimator,))
for imp in range(n_imp)
]
for result in results:
yield result.get()
else:
for imp in range(n_imp):
yield _fill_iterative(df, seed=imp, estimator=estimator)
| prio-data/views_transformation_library | views_transformation_library/missing.py | missing.py | py | 6,351 | python | en | code | 0 | github-code | 13 |
28075799137 | # System imports
import sys
# Simulation imports
import Const
import NVTree
import ExtCP
import InvIdx
import ProgressBar
import IO
class Simulation:
def __init__(self, setup):
# Get the settings from the setup dictionary
## GENERIC SETTINGS
self.Experiment = setup['Experiment']
self.Setup = setup['Setup']
self.IO_Costs = setup['IO_Costs']
self.Output_Freq = setup['Output_Freq']
self.Total_Inserts = setup['Total_Inserts']
self.Write_IOtrace = setup['Write_Iotrace']
self.Log_Name = setup['Log_Name']
# Create and initialize the IO cost center for the simulation
self.IO = IO.IO(self.Experiment,
setup
)
self.IO.set_io_cost_param_array(self.IO_Costs)
# Create the tree instance
if self.Experiment == Const.NVTREE:
self.Index = NVTree.NVTree(self.IO, setup)
elif self.Experiment == Const.EXT_CP:
self.Index = ExtCP.ExtCP(self.IO, setup)
elif self.Experiment == Const.INV_IDX:
self.Index = InvIdx.InvIdx(self.IO, setup)
else:
raise NotImplementedError("Add new simulation for undefined index type")
# Start the simulation ... it creates the tree and starts inserting
def simulate(self):
# Initialize the progressbar for console
p = ProgressBar.ProgressBar(0, self.Total_Inserts, 77)
# Loop through all insertions
inserted_descriptors = 0
while inserted_descriptors < self.Total_Inserts:
# Write IO stats regularly as dictated by parameters
if inserted_descriptors % self.Output_Freq == 0:
# Update the progress bar
p.update_progress(inserted_descriptors)
# Print IO stats and tree stats
out_stats = self.IO.get_io_stats()
self.print_stats(inserted_descriptors, out_stats)
self.print_stats(inserted_descriptors,
self.Index.get_index_stats())
# Make sure the stuff is printed to disk
sys.stdout.flush()
# Insert the descriptors, one at a time
self.Index.insert()
inserted_descriptors += 1
# Get the final IO stats and print them to console
p.update_progress(inserted_descriptors)
#flush OS buffer (if any)
self.Index.clear_osbuf()
if self.Write_IOtrace == True:
self.print_ioqueue(self.IO.IODepthDict)
out_stats = self.IO.get_io_stats()
self.print_stats(inserted_descriptors, out_stats)
self.print_stats(inserted_descriptors,
self.Index.get_index_stats())
def print_ioqueue(self, queue_dict):
with open(self.Log_Name, 'w') as f:
f.write("IO_type\t")
f.write("\t".join(Const.IO_DEPTHS))
f.write("\n")
for idx, x in enumerate(queue_dict.values()):
f.write("%s\t" % (Const.L_IO_TYPE_NAMES[idx]))
f.write("\t".join(str(item) for item in x))
f.write("\n")
# Helper function for formatting the IO statistics
def print_stats(self, inserted, stat_list):
stat_string = "%s\t%s\t%f\t%s\n"
for l in stat_list:
sys.stdout.write(stat_string % (self.Experiment,
self.Setup,
inserted / 1000000.0,
l))
| Jubas/index-cost-sim | Simulation.py | Simulation.py | py | 3,683 | python | en | code | 1 | github-code | 13 |
3249205493 | # Lab 20, credit card validation
# user_input = input('Enter your credit card number with a space between each number: ')
user_input = '4 5 5 6 7 3 7 5 8 6 8 9 9 8 5 5'
#convert input string to list of integers
cc = user_input.split(' ')
for i in range(len(cc)):
cc[i] = int(cc[i])
# cc = [int(i) for i in user_input.split(' ')]
# Slice off the last digit. That is the check digit.
check_digit = cc.pop(-1)
print(check_digit)
#Reverse the digits
cc = cc[::-1]
print(cc)
total = 0
for i in range(len(cc)):
if i % 2 == 0: # Double every other element in the reversed list
cc_doubled = cc[i]*2
print(cc_doubled)
if cc_doubled >= 9:
cc_doubled -= 9 # Subtract nine from numbers over nine
total += cc_doubled # Sum all even values
else:
total += cc[i] # Sum all odd values
print(f'Total = {total}')
# Take the second digit of the total
def second_digit(num):
if num < 10:
return None
return num % 10
# If the second_digit matches the check digit, the whole card number is valid
if second_digit(total) == check_digit:
print('Valid')
else:
print('Invalid')
| PdxCodeGuild/class_sheep | Code/Lane/python/lab20-credit_card_validation.py | lab20-credit_card_validation.py | py | 1,146 | python | en | code | 1 | github-code | 13 |
10639108847 | from distutils.version import LooseVersion
import os
import sys
from setuptools import __version__ as setuptools_version
from setuptools import find_packages
from setuptools import setup
from setuptools.command.test import test as TestCommand
version = '1.7.0.dev0'
# Remember to update local-oldest-requirements.txt when changing the minimum
# acme/certbot version.
install_requires = [
'setuptools',
'zope.interface',
]
if not os.environ.get('SNAP_BUILD'):
install_requires.extend([
'acme>=0.31.0',
'certbot>=1.1.0',
])
elif 'bdist_wheel' in sys.argv[1:]:
raise RuntimeError('Unset SNAP_BUILD when building wheels '
'to include certbot dependencies.')
if os.environ.get('SNAP_BUILD'):
install_requires.append('packaging')
setuptools_known_environment_markers = (LooseVersion(setuptools_version) >= LooseVersion('36.2'))
if setuptools_known_environment_markers:
install_requires.append('mock ; python_version < "3.3"')
elif 'bdist_wheel' in sys.argv[1:]:
raise RuntimeError('Error, you are trying to build certbot wheels using an old version '
'of setuptools. Version 36.2+ of setuptools is required.')
elif sys.version_info < (3,3):
install_requires.append('mock')
# This package normally depends on dns-lexicon>=3.2.1 to address the
# problem described in https://github.com/AnalogJ/lexicon/issues/387,
# however, the fix there has been backported to older versions of
# lexicon found in various Linux distros. This conditional helps us test
# that we've maintained compatibility with these versions of lexicon
# which allows us to potentially upgrade our packages in these distros
# as necessary.
if os.environ.get('CERTBOT_OLDEST') == '1':
install_requires.append('dns-lexicon>=2.2.1')
else:
install_requires.append('dns-lexicon>=3.2.1')
docs_extras = [
'Sphinx>=1.0', # autodoc_member_order = 'bysource', autodoc_default_flags
'sphinx_rtd_theme',
]
class PyTest(TestCommand):
user_options = []
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = ''
def run_tests(self):
import shlex
# import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(shlex.split(self.pytest_args))
sys.exit(errno)
setup(
name='certbot-dns-desec',
version=version,
description="deSEC DNS Authenticator plugin for Certbot",
url='https://github.com/akararsse/certbot-dns-desec',
author="Certbot Project",
author_email='akash.karar@securesystems.de',
license='Apache License 2.0',
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Plugins',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Security',
'Topic :: System :: Installation/Setup',
'Topic :: System :: Networking',
'Topic :: System :: Systems Administration',
'Topic :: Utilities',
],
packages=find_packages(),
include_package_data=True,
install_requires=install_requires,
extras_require={
'docs': docs_extras,
},
entry_points={
'certbot.plugins': [
'dns-desec = certbot_dns_desec.dns_desec:Authenticator',
],
},
tests_require=["pytest"],
test_suite='certbot_dns_desec',
cmdclass={"test": PyTest},
)
| akararsse/certbot-dns-desec | setup.py | setup.py | py | 4,007 | python | en | code | 1 | github-code | 13 |
18243034156 |
from numpy.linalg import norm
from re import X
import threading
import cv2
import os
from facenet_pytorch import InceptionResnetV1
# import tensorflow as tf
import time
import torch
import cv2
import numpy as np
import cv2
from align_faces import warp_and_crop_face, get_reference_facial_points
from mtcnn.detector import MtcnnDetector
detector = MtcnnDetector()
def mask_detect(image):
img = image.copy()
(h,w) = image.shape[:2]
boxes, facial5points = detector.detect_faces(image)
if(len(boxes)!=0):
for box in boxes:
(startX,startY,endX,endY)=box[:4].astype('int')
#ensure the bounding boxes fall within the dimensions of the frame
(startX,startY)=(max(0,startX),max(0,startY))
(endX,endY)=(min(w-1,endX), min(h-1,endY))
#extract the face ROI, convert it from BGR to RGB channel, resize it to 224,224 and preprocess it
face=img[startY:endY, startX:endX]
color = (255,0,0)
cv2.rectangle(img,(startX,startY),(endX,endY),color,2)
return img
def PlayCamera(id):
video_capture = cv2.VideoCapture(id)
face = None
width = int(video_capture.get(3))
height = int(video_capture.get(4))
if height > width:
scale = height / width
else:
scale = width / height
if width == height:
scale = 2
while height>1000 or width>1000:
height = int(height/scale)
width = int(width/scale)
while True:
x = time.time()
ret, frame = video_capture.read()
# img = frame[0:128,0:128]
# print(model.predict(np.array([img])))
# img = mask_detect(frame)
if ret ==True:
frame = cv2.resize(frame,(width,height))
img = mask_detect(frame)
print(time.time() - x)
cv2.imshow('{}'.format(id), img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
def run():
idw = "../../public/videos/1305.mp4"
cameraIDs = [idw]
threads = []
for id in cameraIDs:
threads += [threading.Thread(target=PlayCamera, args=(id,))]
for t in threads:
t.start()
for t in threads:
t.join()
if __name__ == '__main__':
run()
| Truyen724/Zalo_liveness_Detection | face_detect_main/detect.py | detect.py | py | 2,294 | python | en | code | 1 | github-code | 13 |
37924071948 | import AthenaPython.PyAthena as PyAthena
trans = PyAthena.cobs('/data/ilija/AOD.067184.big.pool.root','/data/ilija/tmp.pool.root')
# trans.mkProject()
# resizing BS and adding CL=7
trans.setTreeToSkip('##Links')
trans.setTreeToSkip('##Shapes')
trans.setTreeToSkip('##Params')
trans.setTreeMemory(10*1024,'POOLContainer_DataHeader')
trans.resizeBaskets()
trans1 = PyAthena.cobs('/data/ilija/tmp.pool.root','/data/ilija/oBS_orderedByEvent.pool.root')
trans1.order(2)
| rushioda/PIXELVALID_athena | athena/Database/AthenaPOOL/RootFileTools/python/full.py | full.py | py | 469 | python | en | code | 1 | github-code | 13 |
43986853131 | #import the packages
import cv2
import matplotlib.pyplot as plt
# Load the image using cv2
img = cv2.imread("crazy_full_class.jpg")
img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
#Convert to grayscale and apply median blur to reduce image noise
grayimg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
grayimg = cv2.medianBlur(grayimg, 5)
#Get the edges
edges = cv2.adaptiveThreshold(grayimg, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 5, 5)
#Convert to a cartoon version
color = cv2.bilateralFilter(img, 9, 250, 250)
cartoon = cv2.bitwise_and(color, color, mask=edges)
#Display original image
plt.figure(figsize=(2,2))
plt.imshow(img)
plt.axis("off")
plt.title("Original Image")
plt.show()
#Display cartoon image
plt.figure(figsize=(10, 10))
plt.imshow(cartoon)
plt.axis("off")
plt.title("Cartoon Image")
plt.show()
| pauldubois98/RefresherMaths2023 | Pictures/cartoonify_v0.py | cartoonify_v0.py | py | 819 | python | en | code | 1 | github-code | 13 |
74564481938 | """
_CountFinishedSubscriptionsByTask_
MySQL implementation of Subscription.CountFinishedSubscriptionsByTask
"""
from WMCore.Database.DBFormatter import DBFormatter
class CountFinishedSubscriptionsByTask(DBFormatter):
"""
Gets count subscriptions given task
"""
sql = """SELECT ww.name as workflow, ww.task as task, wst.name as jobtype,
COUNT(case when ws.finished = 1 then 1 else null end) as finished,
COUNT(case when ws.finished = 0 then 1 else null end) as open,
COUNT(ws.id) as total,
MAX(ws.last_update) as updated
FROM wmbs_subscription ws
INNER JOIN wmbs_sub_types wst ON wst.id = ws.subtype
INNER JOIN wmbs_workflow ww ON ww.id = ws.workflow
GROUP BY ww.name, ww.task, wst.name
"""
def execute(self, conn = None, transaction = False):
"""
_execute_
This DAO returns a list of dictionaries containing
the key 'id' with the id of the finished subscriptions
"""
result = self.dbi.processData(self.sql,
conn = conn, transaction = transaction)
return self.formatDict(result)
| dmwm/WMCore | src/python/WMCore/WMBS/MySQL/Subscriptions/CountFinishedSubscriptionsByTask.py | CountFinishedSubscriptionsByTask.py | py | 1,214 | python | en | code | 44 | github-code | 13 |
26228796714 | import cx_Freeze
import sys
import os
base = None
if sys.platform == 'win32':
base == "Win32GUI"
os.environ['TCL_LIBRARY'] = r"C:\Users\Admin\AppData\Local\Programs\Python\Python38\tcl\tcl8.6"
os.environ['TK_LIBRARY'] = r"C:\Users\Admin\AppData\Local\Programs\Python\Python38\tcl\tk8.6"
executables = [cx_Freeze.Executable("Face_Recognition_Software.py", base=base, icon="face.ico")]
cx_Freeze.setup(
name = "Facial Recognition Software",
options = {"build_exe": {"packages":["tkinter", "os"], "include_files": ["face.ico", 'tcl86t.dll', 'tk8t.dll', 'college_images', 'data', 'database', 'attendance_report']}},
version = "1.0",
description = "Face Recognition Automatic Attendace System | Developed By NguyenAnhQuan",
executables = executables
) | NguyAnhQuan/face_recognition_system_eaut | setup.py | setup.py | py | 777 | python | en | code | 1 | github-code | 13 |
32670324599 | from read_xml import *
from sklearn.model_selection import train_test_split
from tensorflow.keras.models import Model
from tensorflow.keras.applications.resnet50 import ResNet50
from sklearn.metrics import confusion_matrix
from tensorflow.keras.layers import Activation, Dropout, Flatten, Dense
from tensorflow.keras.preprocessing.image import ImageDataGenerator
images,df = collect_data()
images,target = collect_data()
images = np.array(images)
confs = []
scores = []
INPUT_SHAPE = (224, 224, 3)
train_aug = ImageDataGenerator(
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True)
base_model = ResNet50(weights = 'imagenet', include_top = False, input_shape = INPUT_SHAPE)
for layer in base_model.layers[-26:]:
layer.trainable = False
x = Flatten()(base_model.output)
x = Dense(1000)(x)
x = Activation("relu")(x)
x = Dropout(0.5)(x)
predictions = Dense(1, activation = 'sigmoid')(x)
model = Model(inputs = base_model.input, outputs = predictions)
model.compile(optimizer="adam", loss='binary_crossentropy', metrics=['accuracy'])
from sklearn.model_selection import KFold
kfold = KFold(n_splits=10, shuffle=True, random_state=42)
from sklearn import metrics
print(model.summary())
from tensorflow.keras.callbacks import EarlyStopping
earlystopping = EarlyStopping(monitor ="val_loss",
mode ="min", patience = 5,
restore_best_weights = True)
for train, test in kfold.split(images, target):
x_train = images[train]
y_train = target[train]
x_test = images[test]
y_test = target[test]
model.fit(train_aug.flow(x_train,y_train),
validation_data=(x_test,y_test),verbose=1,
epochs=1,callbacks =[earlystopping])
pred = model.predict(x_test)
prediction=np.round(pred).reshape(y_test.shape)
score = metrics.accuracy_score(y_test, prediction)
scores.append(score)
confs.append(confusion_matrix(y_test,prediction))
print(confusion_matrix(y_test,prediction))
"""model.save("resnet_aug_1000_kfold_model.h5")
np.save("confusions.npy",np.array(confs))
np.save("scores.npy",np.array(scores))"""
| ahmetsalavran/SurgeryData | resnet_last.py | resnet_last.py | py | 2,277 | python | en | code | 0 | github-code | 13 |
15227202527 | class Node:
def __init__(self,value):
self.value = value
self.right = None
self.left = None
class BinarySerchTree:
def __init__(self):
self.root = None
def add_val_tree(self,value):
new_node = Node(value)
if self.root is None:
self.root = new_node
return True
temp = self.root
while(True):
if new_node.value == temp.value:
return False
if new_node.value < temp.value:
if temp.left is None:
temp.left = new_node
return True
temp = temp.left
if new_node.value > temp.value:
if temp.right is None:
temp.right = new_node
return True
temp = temp.right
def contains_value(self,value):
if self.root is None:
return False
temp = self.root
while(temp is not None):
if value < temp.value:
temp = temp.left
if value > temp.value:
temp = temp.right
else:
return True
return False
bst = BinarySerchTree()
bst.add_val_tree(2)
bst.add_val_tree(3)
bst.add_val_tree(1)
print(bst.root.right.value) | yogeskc/DataStructure-Algorithms | PracticeDS/BST.py | BST.py | py | 1,457 | python | en | code | 1 | github-code | 13 |
9481130996 | from .base import stressModelBase, np
from ..base import ts_float
class stressModelBase_f(stressModelBase):
"""
A stress-model base-class that supports setting the stress by
controlling the frequency-dependent coherence between velocity
components.
"""
def __new__(cls, turbModel, *args, **kwargs):
self = super(stressModelBase_f, cls).__new__(
cls, turbModel, *args, **kwargs)
self._rstrCoh = np.zeros([self.n_comp] +
list(self.grid.shape) +
[self.n_f], dtype=ts_float)
# In the future I need to overwrite the _setPhases routine to
# allow for more control of the _rstrCoh array. For example:
# It may be worthwhile to base the cross-coherence
# function _rstrCoh, on observed cross-component
# coherences (and phases). Perhaps this is a gaussion
# distribution (with some width) of phase shifts
# vs. frequency. For now we simply set a fraction of the
# phase shifts to be the same between the components to
# control the Reynold's stress.
# For now, I have simply copied the code that was here before I simplified
# the stressModelBase class.
def _setPhases(self,):
"""
Here we control the Reynold's stress by setting the 'random'
phases between components to be the same for a fraction of the
frequencies.
"""
# fudge_factor=0.93 #!!!FIXTHIS: The 0.93 is a fudge factor to account
# for ... ???
fudge_factor = 1
self._rstrCoh = self.stress[..., None] / self.stress_max[..., None]
rstrmat = self.grid.flatten(
self._rstrCoh) # This doesn't currently work
srt = np.sort(np.abs(rstrmat), axis=0)
#rem=1+srt[0]-srt[1]-srt[2]
if ((1 + srt[0] - srt[1] - srt[2] < 0).any()
or (((rstrmat < 0).sum(0) == 1)
& (srt.sum(0) > 1)).any()):
# We can't have rem<0, or only one negative correlation if the
# total correlation is greater than 1.
raise Exception('The input reynolds stresses are inconsistent.')
ovr = np.minimum((srt[0] * srt[1] + srt[0] * srt[2]) / 2, srt[0])
# average the product of the smallest value with the
# two larger ones. Then take the minimum value of that
# with the smallest value. This is the 'overlap', i.e.
# the fraction of points that will have the same phase
# for all three components.
ovr[(rstrmat < 0).sum(0)
== 1] = 0 # If there is only 1 negative stress than the overlap must be zero.
rgen = self.grid.randgen.rand
shp = (self.grid.n_p, self.grid.n_f)
####
# First we set the 'overlap' stress. i.e. the phases that are the same
# (or opposite) for all three components.
inds_used = (rgen(*shp) * fudge_factor) < ovr
self.rand[2][inds_used] = np.sign(
rstrmat[1][inds_used]) * self.rand[0][inds_used]
self.rand[1][inds_used] = np.sign(
rstrmat[0][inds_used]) * self.rand[0][inds_used]
####
# Now set the u'v' non-overlap piece.
inds = ((rgen(*shp) * fudge_factor) <
np.abs(rstrmat[0]) - ovr) & (~inds_used)
self.rand[1][inds] = np.sign(rstrmat[0][inds]) * self.rand[0][inds]
inds_used |= inds
####
# Now set the u'w' non-overlap piece.
inds = ((rgen(*shp) * fudge_factor) <
np.abs(rstrmat[1]) - ovr) & (~inds_used)
self.rand[2][inds] = np.sign(rstrmat[1][inds]) * self.rand[0][inds]
inds_used |= inds
####
# Now set the v'w' non-overlap piece.
inds = ((rgen(*shp) * fudge_factor) <
np.abs(rstrmat[2]) - ovr) & (~inds_used)
self.rand[2][inds] = np.sign(rstrmat[2][inds]) * self.rand[1][inds]
inds_used |= inds
| lkilcher/pyTurbSim | pyts/stressModels/stress_freq.py | stress_freq.py | py | 4,073 | python | en | code | 12 | github-code | 13 |
26925302366 | class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def detectCycle(self, head: ListNode) -> ListNode:
data = {}
index = 0
while head:
if head not in data:
data[head] = index
index += 1
head = head.next
else:
return head
return None
head = ListNode(1)
head.next = ListNode(2)
l = Solution().detectCycle(head)
print(l)
| hwngenius/leetcode | learning/fast&slow_poionters/142.py | 142.py | py | 499 | python | en | code | 1 | github-code | 13 |
20214421983 | class Human:
email = 'jayabhaskarreddy98@.com'
address = '560043'
def verify():
if Human.address == '560043':
print('Correct')
else:
print('Wrong')
def sent_email():
print('email sent')
print(Human.email)
print(Human.address)
Human.sent_email()
Human.verify()
| Jayabhaskarreddy98/python_practice | oops/sample.py | sample.py | py | 364 | python | en | code | 1 | github-code | 13 |
18200513131 | #!/usr/bin/env python
# coding:utf-8
"""
@author: nivic ybyang7
@license: Apache Licence
@file: server
@time: 2022/10/28
@contact: ybyang7@iflytek.com
@site:
@software: PyCharm
# code is far away from bugs with the god animal protecting
I love animals. They taste delicious.
┏┓ ┏┓
┏┛┻━━━┛┻┓
┃ ☃ ┃
┃ ┳┛ ┗┳ ┃
┃ ┻ ┃
┗━┓ ┏━┛
┃ ┗━━━┓
┃ 神兽保佑 ┣┓
┃ 永无BUG! ┏┛
┗┓┓┏━┳┓┏┛
┃┫┫ ┃┫┫
┗┻┛ ┗┻┛
"""
import datetime
import importlib
import logging
import sys
import time
# Copyright (c) 2022. Lorem ipsum dolor sit amet, consectetur adipiscing elit.
# Morbi non lorem porttitor neque feugiat blandit. Ut vitae ipsum eget quam lacinia accumsan.
# Etiam sed turpis ac ipsum condimentum fringilla. Maecenas magna.
# Proin dapibus sapien vel ante. Aliquam erat volutpat. Pellentesque sagittis ligula eget metus.
# Vestibulum commodo. Ut rhoncus gravida arcu.
from concurrent import futures
from io import StringIO
from logging.handlers import QueueHandler, QueueListener
# from queue import Queue
from multiprocessing import Queue
import grpc
from aiges.aiges_inner import aiges_inner_pb2
from aiges.aiges_inner import aiges_inner_pb2_grpc
from aiges.aiges_inner import grpc_stdio_pb2
from aiges.aiges_inner import grpc_stdio_pb2_grpc
from aiges.dto import DataListCls
from aiges.errors import *
from aiges.utils.log import getLogger
from grpc_health.v1 import health_pb2, health_pb2_grpc
from grpc_health.v1.health import HealthServicer
log = getLogger(fmt=" %(name)s:%(funcName)s:%(lineno)s - %(levelname)s: %(message)s", name="wrapper")
wrapper_module = "wrapper"
wrapper_class = "Wrapper"
class StdioService(grpc_stdio_pb2_grpc.GRPCStdioServicer):
def __init__(self, log):
self.log = log
def StreamStdio(self, request, context):
while True:
sd = grpc_stdio_pb2.StdioData(channel=1, data=self.log.read())
yield sd
class Logger:
def __init__(self):
self.stream = StringIO() #
que = Queue(-1) # no limit on size
self.queue_handler = QueueHandler(que)
self.handler = logging.StreamHandler()
self.listener = QueueListener(que, self.handler)
self.log = logging.getLogger('python-plugin')
self.log.setLevel(logging.INFO)
self.logFormatter = logging.Formatter('%(asctime)s %(levelname)s %(name)s %(pathname)s:%(lineno)d - %('
'message)s')
# self.handler.setFormatter(self.logFormatter)
for handler in self.log.handlers:
self.log.removeHandler(handler)
self.log.addHandler(self.handler)
self.listener.start()
def __del__(self):
pass
# self.listener.stop()
def read(self):
self.handler.flush()
ret = self.logFormatter.format(self.listener.queue.get()) + "\n"
return ret.encode("utf-8")
class WrapperServiceServicer(aiges_inner_pb2_grpc.WrapperServiceServicer):
"""Provides methods that implement functionality of route guide server."""
def __init__(self, q):
self.response_queue = q
self.count = 0
self.userWrapperObject = None
pass
def wrapperInit(self, request, context):
log.info("Importing module from wrapper.py: %s", wrapper_module)
try:
interface_file = importlib.import_module(wrapper_module)
user_wrapper_cls = getattr(interface_file, wrapper_class)
self.userWrapperObject = user_wrapper_cls()
log.info("User Wrapper newed Success.. starting call user init functions...")
ret = self.userWrapperObject.wrapperInit(request.config)
if ret != 0:
log.error("User wrapperInit function failed.. ret: %s" % str(ret))
return aiges_inner_pb2.Ret(ret=USER_INIT_ERROR)
except Exception as e:
log.error(e)
ret = INIT_ERROR
return aiges_inner_pb2.Ret(ret=ret)
return aiges_inner_pb2.Ret(ret=OK)
def wrapperOnceExec(self, request, context):
if not self.userWrapperObject:
return aiges_inner_pb2.Response(ret=USER_EXEC_ERROR)
self.count += 1
user_resp = self.userWrapperObject.wrapperOnceExec(request.params, self.convertPbReq2Req(request))
if not user_resp or not user_resp.list:
return aiges_inner_pb2.Response(ret=USER_EXEC_ERROR)
d_list = []
for ur in user_resp.list:
d = aiges_inner_pb2.ResponseData(key=ur.key, data=ur.data, len=ur.len, status=ur.status)
d_list.append(d)
r = aiges_inner_pb2.Response(list=d_list, tag=request.tag)
call_back(self.response_queue, r)
return aiges_inner_pb2.Response(list=[])
def convertPbReq2Req(self, req):
r = DataListCls()
r.list = req.list
return r
def testStream(self, request_iterator, context):
prev_notes = []
for new_note in request_iterator:
print(new_note.data)
yield aiges_inner_pb2.Response(list=[])
prev_notes.append(new_note)
def communicate(self, request_iterator, context):
# 这里无需双向似乎,如有必要,需要在加载器中传入相关信息
while True:
data = self.response_queue.get()
yield data
def call_back(response_queue, r):
response_queue.put(r)
def send_to_queue(q):
x = 0
while True:
x += 1
time.sleep(1)
# print("sending... {}".format(x))
msg = "count: {} . now : {}".format(x, datetime.datetime.now())
d = aiges_inner_pb2.ResponseData(key=str(x), data=msg.encode("utf-8"), len=x, status=3)
r = aiges_inner_pb2.Response(list=[d])
# q.put(r)
def serve():
work_q = Queue()
# w = threading.Thread(target=send_to_queue, args=(work_q,))
# w.start()
# We need to build a health service to work with go-plugin
health = HealthServicer()
health.set("plugin", health_pb2.HealthCheckResponse.ServingStatus.Value('SERVING'))
# Start the server.
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
aiges_inner_pb2_grpc.add_WrapperServiceServicer_to_server(
WrapperServiceServicer(work_q), server)
# add stdio service
# 这里没有必要,因为go-plugin似乎已经捕捉了 标准输出
# grpc_stdio_pb2_grpc.add_GRPCStdioServicer_to_server(StdioService(logger), server)
health_pb2_grpc.add_HealthServicer_to_server(health, server)
server.add_insecure_port('[::]:50055')
server.start()
# Output information
print("1|1|tcp|127.0.0.1:50055|grpc")
sys.stdout.flush()
server.wait_for_termination()
def run():
logging.basicConfig()
serve()
if __name__ == '__main__':
run()
| iflytek/aiges | grpc/examples/wrapper-python/plugin.py | plugin.py | py | 7,114 | python | en | code | 271 | github-code | 13 |
31046745326 | import argparse
from random import randint
class Operation():
PLUS, MINUS, MULT, DIV = range(4)
_f = [
lambda x, y: x + y,
lambda x, y: x - y,
lambda x, y: x * y,
lambda x, y: x / y
]
_s = [
'+',
'-',
'*',
'/'
]
def evaluate(op, o1, o2):
return Operation._f[op](o1, o2)
def to_str(op):
return Operation._s[op]
class PostfixTree:
def __init__(self, op=None, left=None, right=None, leaf=None):
self.leaf, self.left, self.right, self.op = leaf, left, right, op
def evaluate(self):
if self.leaf != None:
return self.leaf
return Operation.evaluate(self.op, self.left.evaluate(), self.right.evaluate())
def depth(self):
if self.leaf != None:
return 0
else:
return max(self.left.depth()+1, self.right.depth()+1)
def __str__(self):
if self.leaf != None:
return '[' + str(self.leaf) + ']'
return '[' + str(self.left) + str(self.right) + Operation.to_str(self.op) + ']'
def to_infix_str(self):
if self.leaf != None:
return '[' + str(self.leaf) + ']'
return '[' + self.left.to_infix_str() + Operation.to_str(self.op) + self.right.to_infix_str() + ']'
import functools
@functools.lru_cache(maxsize=None)
def generateFullTrees(lb, ub, ops, depth):
trees = []
if depth <= 0:
for i in range(lb, ub+1):
trees.append(PostfixTree(leaf=i))
else:
subtrees = generateFullTrees(lb, ub, ops, depth-1)
for op in ops:
for x in subtrees:
for y in subtrees:
trees.append(PostfixTree(left=x, right=y, op=int(op)))
return trees
#this function generates all trees that are of n depth
@functools.lru_cache(maxsize=None)
def _generateAllTrees(lb, ub, ops, depth):
trees_lt_max_depth = []
trees_of_max_depth = []
if depth <= 0:
for i in range(lb, ub+1):
t = PostfixTree(leaf=i)
trees_of_max_depth.append(t)
else:
eq_depth, lt_depth = _generateAllTrees(lb, ub, ops, depth-1)
for op in ops:
for x in eq_depth:
for y in eq_depth + lt_depth:
trees_of_max_depth.append(PostfixTree(left=x, right=y, op=int(op)))
if x != y:
trees_of_max_depth.append(PostfixTree(left=y, right=x, op=int(op)))
trees_lt_max_depth = eq_depth + lt_depth
return (trees_of_max_depth, trees_lt_max_depth)
def generateAllTrees(lb, ub, ops, depth):
eq_depth, lt_depth = _generateAllTrees(lb, ub, ops, depth)
return eq_depth + lt_depth
_get_op = lambda x: int(x[randint(0,len(x)-1)])
def _generateRandomTree(lb, ub, ops, num_nodes):
if num_nodes == 1:
return PostfixTree(leaf=randint(lb, ub))
else:
left = randint(1, num_nodes-1)
return PostfixTree(
op = _get_op(ops),
left = _generateRandomTree(lb, ub, ops, left),
right = _generateRandomTree(lb, ub, ops, num_nodes-left))
#this is a uniformly random generation of trees with at most n number of nodes
def generateRandomTrees(lb, ub, ops, num_nodes, num_samples):
for i in range(0, num_samples):
yield _generateRandomTree(lb, ub, list(ops), randint(1, num_nodes))
def generateRandomTreesFixedNodes(lb, ub, ops, num_nodes, num_samples):
for i in range(0, num_samples):
yield _generateRandomTree(lb, ub, list(ops), num_nodes)
#special trees
@functools.lru_cache(maxsize=None)
def _generateRightLeaningTrees(lb, ub, ops, depth):
trees = []
if depth <= 0:
for i in range(lb, ub+1):
t = PostfixTree(leaf=i)
trees.append(t)
else:
subtrees = _generateRightLeaningTrees(lb, ub, ops, depth-1)
for op in ops:
for x in subtrees:
left = PostfixTree(leaf=randint(lb, ub))
t = PostfixTree(
left=left,
right=x,
op=int(op))
trees.append(t)
return trees
def generateRightLeaningTrees(lb, ub, ops, depth):
return sum(map(lambda depth: _generateRightLeaningTrees(lb, ub, ops, depth+1), range(0, depth+1)), [])
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('tree_type', help='the name of the class which generates the tree', type=str)
parser.add_argument('--p1', help='the first parameter for the tree generator')
parser.add_argument('--p2', help='the second parameter for the tree generator')
parser.add_argument('--p3', help='the third parameter for the tree generator')
parser.add_argument('--p4', help='the fourth parameter for the tree generator')
parser.add_argument('--p5', help='the fifth parameter for the tree generator')
parser.add_argument('--postfix', help='don\'t output postfix notation', action='store_const', const=False, default=True)
parser.add_argument('--infix', help='output infix notation', action='store_const', const=True, default=False)
parser.add_argument('--depth', help='output the depth of the expression', action='store_const', const=True, default=False)
args = parser.parse_args()
#tree types
if args.tree_type == 'generateFullTrees':
generator = lambda : generateFullTrees(int(args.p1), int(args.p2), str(args.p3), int(args.p4))
elif args.tree_type == 'generateAllTrees':
generator = lambda : generateAllTrees(int(args.p1), int(args.p2), str(args.p3), int(args.p4))
elif args.tree_type == 'generateRandomTrees':
generator = lambda : generateRandomTrees(int(args.p1), int(args.p2), str(args.p3), int(args.p4), int(args.p5))
elif args.tree_type == 'generateRandomTreesFixedNodes':
generator = lambda : generateRandomTreesFixedNodes(int(args.p1), int(args.p2), str(args.p3), int(args.p4), int(args.p5))
elif args.tree_type == 'generateRightLeaningTrees':
generator = lambda : generateRightLeaningTrees(int(args.p1), int(args.p2), str(args.p3), int(args.p4))
else:
print('error')
exit()
for tree in generator():
line = []
if args.postfix:
line.append(str(tree))
if args.infix:
line.append(tree.to_infix_str())
if args.depth:
line.append(str(tree.depth()))
line.append(str(tree.evaluate()))
print('\t'.join(line))
| johntzwei/neural-postfix-calculator | trees.py | trees.py | py | 6,576 | python | en | code | 0 | github-code | 13 |
28758316589 | import pytest
from yahtzee_api.player import Player
class TestPlayer:
"""Class containing all unit tests for the Player class."""
def test_roll_rolls_left(self):
"""Tests ValueError when roll(to_roll) method is called
without any rolls left in the turn.
"""
p = Player("Tom")
p.rolls_left = 0
with pytest.raises(ValueError):
p.roll([0, 0, 0, 0, 0])
p.rolls_left = -1
with pytest.raises(ValueError):
p.roll([0, 0, 0, 0, 0])
def test_roll_to_roll_length(self):
"""Tests ValueError when roll(to_roll) method is called with a to_roll
list not of length 5.
"""
p = Player("Tom")
with pytest.raises(ValueError):
p.roll([0, 0, 0, 0])
with pytest.raises(ValueError):
p.roll([0, 0, 0, 0, 0, 0])
def test_roll_to_roll_value_type(self):
"""Tests TypeError when roll(to_roll) method is called with a to_roll
list that contains non-binary values.
"""
p = Player("Tom")
with pytest.raises(TypeError):
p.roll([3, 0, 0, 1, 0])
def test_roll_first_roll_has_five(self):
"""Tests VaueError when roll(to_roll) method tries to roll fewer than
5 dice on the first roll of the turn.
"""
p = Player("Tom")
with pytest.raises(ValueError):
p.roll([1, 0, 0, 0, 0])
def test_roll_to_roll_type(self):
"""Tests TypeError when roll(to_roll) method is called with a to_roll
not of type list.
"""
p = Player("Tom")
to_roll = True
with pytest.raises(TypeError):
p.roll(to_roll)
def test_roll_roll_proper_dice(self):
"""Tests that roll(to_roll) method rolls proper dice according to user
input list of binary values.
"""
p = Player("Tom")
p.roll([0, 0, 0, 0, 0])
# Check that dice are all rolled from the initial configuration.
for i in range(5):
assert p.dice[i] >= 1 and p.dice[i] <= 6
# Check that the die in a position marked "False" is not rolled.
temp = p.dice[0]
p.roll([1, 0, 0, 0, 0])
assert temp == p.dice[0]
def test_end_turn_score_type_value(self):
"""Tests ValueError when end_turn(score_type) method is called with
score_type not between 0 and 12, inclusive.
"""
p = Player("Tom")
with pytest.raises(ValueError):
p.end_turn(-1)
with pytest.raises(ValueError):
p.end_turn(13)
| TheophileDiot/yahtzee-api | tests/test_player.py | test_player.py | py | 2,595 | python | en | code | 0 | github-code | 13 |
33627744023 | from __future__ import annotations
from pycaputo.grid import Points
from pycaputo.logging import get_logger
from pycaputo.utils import Array
logger = get_logger(__name__)
# {{{ Lagrange Riemann-Liouville integral
def lagrange_riemann_liouville_integral(
p: Points,
alpha: float,
n: int,
*,
q: int = 0,
) -> Array:
r"""Compute the Riemann-Liouville integral of Lagrange polynomials.
.. math::
I^{q, \alpha}_{nk} \triangleq
\frac{1}{\Gamma(\alpha)}
int_{x_k}^{x_{k + 1}} (x_n - s)^{\alpha - 1}
\prod_{i = 0}^p \frac{x - x_i}{x_j - x_i}
\,\mathrm{d} s
for :math:`k \in \{0, \dots, n\}`.
:arg n: target point for which to compute the integrals.
:arg q: order of the Lagrange polynomials to compute the integral of.
:returns: the integrals :math:`L^{q, \alpha}_{nk}` for every subinterval
:math:`[x_k, x_{k + 1}]`
"""
raise NotImplementedError
# }}}
# {{{ Lagrange Caputo derivative
def lagrange_caputo_derivative(
p: Points,
alpha: float,
n: int,
*,
q: int = 0,
) -> Array:
r"""Compute the Caputo derivative of Lagrange polynomials.
.. math::
D^{q, \alpha}_{nk} \triangleq
\frac{1}{\Gamma(m - \alpha)}
int_{x_k}^{x_{k + 1}} (x_n - s)^{m - \alpha - 1}
\frac{\mathrm{d}^m}{\mathrm{d} s^m}
\left(\prod_{i = 0}^p \frac{x - x_i}{x_j - x_i}\right)
\,\mathrm{d} s
for :math:`k \in \{0, \dots, n\}` and :math:`m - 1 < \alpha \le m`.
:arg n: target point for which to compute the derivatives.
:arg q: order of the Lagrange polynomials to compute the derivative of.
:returns: the derivatives :math:`L^{q, \alpha}_{nk}` for every subinterval
:math:`[x_k, x_{k + 1}]`
"""
raise NotImplementedError
# }}}
| alexfikl/pycaputo | pycaputo/lagrange.py | lagrange.py | py | 1,854 | python | en | code | 1 | github-code | 13 |
8055931336 | '''Continuized CCG with generalized application, lifting and lowering.
'''
from collections import defaultdict
from lambekseq.lbnoprod import usecache
from lambekseq.lib.cterm import towerSplit, catIden
from lambekseq.lib.cterm import unslash, addHypo
from lambekseq.lib.tobussccg import toBussCcg
Conns = {'/', '\\', '^', '!'}
towerSplit = usecache(towerSplit)
class Result:
def __init__(self, cat:str, links=frozenset()):
self.cat = cat
self.links = links
def __iter__(self):
return iter(self.links)
def __eq__(self, other):
return (self.cat == other.cat
and self.links == other.links)
def __hash__(self):
return hash((self.cat, self.links))
def __repr__(self):
return self.cat
def __add__(self, others):
return reduce(self, others)
@staticmethod
def _lowering(s:str):
a, d, e = towerSplit(s)
if not d:
return a, frozenset()
else:
c, pairs = Result._lowering(a)
iden, more = catIden(c, e)
if iden:
return d, pairs | more
else:
c = addHypo(e, '^', c)
c = addHypo(d, '!', c)
return c, pairs
def collapse(self):
'''Recursive lowering.'''
cat, pairs = self._lowering(self.cat)
self.cat = cat
self.links |= pairs
return self
def propogate(xlist, ylist, i, j, cat):
'''Propogate a reduction at cell (i, j) back to (0, 0).'''
for k in range(j, -1, -1):
cat = addHypo(cat, *ylist[k][1:])
for k in range(i, -1, -1):
cat = addHypo(cat, *xlist[k][1:])
return cat
def cellAppl(xlist, ylist, i, j, slash):
if i < len(xlist) - 1:
if xlist[i + 1][1] == slash:
iden, pairs = catIden(xlist[i + 1][2], ylist[j][0])
if iden:
cat = propogate(xlist, ylist, i, j, xlist[i + 1][0])
return {Result(cat, pairs)}
if j == len(ylist) - 1:
c, a, b = towerSplit(ylist[j][0])
if a:
if slash == '/':
res = reduce(Result(xlist[i][0]), Result(c))
elif slash == '\\':
res = reduce(Result(c), Result(xlist[i][0]))
for r in res:
r.cat = addHypo(b, '^', r.cat)
r.cat = addHypo(a, '!', r.cat)
if r._earlyCollapse: r.collapse()
r.cat = propogate(xlist, ylist, i, j, r.cat)
return {r for r in res}
return set()
def reduce(x:Result, y:Result) -> set:
'''Use only the 0-th row and 0-th column of the reduction table'''
xlist, ylist = unslash(x.cat), unslash(y.cat)
res = set()
for s in range(len(xlist) + len(ylist) - 1):
for i in range(s, -1, -1):
j = s - i
if (i and j or i >= len(xlist)
or j >= len(ylist)): continue
res.update(cellAppl(xlist, ylist, i, j, '/'))
res.update(cellAppl(ylist, xlist, j, i, '\\'))
if res: break
xyLinks = x.links | y.links
for r in res: r.links |= xyLinks
return {r for r in res}
class Cntccg:
def __init__(self, con:str, pres:list, *,
matchCon=True, earlyCollapse=True, **kwargs):
self.con = con
self.pres = list(pres)
Cntccg._matchCon = matchCon
Result._earlyCollapse = earlyCollapse
towerSplit.cache.clear()
def __len__(self):
return len(self.pres)
@property
def allProofs(self):
return self._proofSpan[0, len(self) - 1]
@property
def proofs(self):
return list(filter(lambda r: catIden(r.cat, self.con)[0],
self.allProofs))
@property
def proofCount(self):
return len(self.proofs if self._matchCon else self.allProofs)
def printProofs(self):
pool = self.proofs if self._matchCon else self.allProofs
for r in pool:
s = sorted('(%s, %s)' % (i, j) for i, j in r.links)
print(', '.join(s))
if pool: print()
def buildTree(self):
self._tree = {k: v for k, v in self._tree.items()}
def printTree(self, space='.' * 4):
def onCall(proofs, indent=''):
for r in proofs:
if not indent:
s = sorted('(%s, %s)' % (i, j) for i, j in r.links)
print(', '.join(s) + '\n' + '-' * 10 + '\n')
if r in self._tree:
for sub in self._tree[r]:
onCall([sub], indent + space)
print(indent, r.cat)
onCall(self.proofs if self._matchCon else self.allProofs)
@property
def bussproof(self):
return toBussCcg(self._tree,
self.proofs if self._matchCon else self.allProofs)
def parse(self):
'''CKY parsing.'''
span = defaultdict(set)
tree = {}
for i in range(len(self)):
span[i, i] = {Result(self.pres[i])}
for step in range(1, len(self)):
for i in range(len(self) - step):
k = i + step
for j in range(i + 1, k + 1):
for x in span[i, j - 1]:
for y in span[j, k]:
res = x + y
for r in res:
if r not in tree: tree[r] = (x, y)
span[i, k].update(res)
if not Result._earlyCollapse:
span[0, len(self) - 1] = {r.collapse()
for r in span[0, len(self) - 1]}
if self._matchCon:
for r in span[0, len(self) - 1]:
r.links |= catIden(r.cat, self.con)[1]
self._proofSpan = span
self._tree = tree
def selfTest():
from lambekseq.lib.cindex import indexSeq
con, *pres = 's', '(s^np)!s', '(np\\s)/np', '(s^np)!s', '(s\\s)/np', '(s^np)!s'
(con, *pres), _ = indexSeq(con, pres)
cntccg = Cntccg(con, pres, earlyCollapse=False)
cntccg.parse()
cntccg.buildTree()
cntccg.printTree()
print('Total:', cntccg.proofCount)
if __name__ == '__main__':
selfTest()
| PterosDiacos/lambekseq | cntccg.py | cntccg.py | py | 6,257 | python | en | code | 15 | github-code | 13 |
15734778623 | from utils.embeddings import get_embeddings
from Implementation.code_vulnerability_detection.dataloader import VulnerabilityDataloader
from Models.transformer import TransormerClassifierModel
# Load data loader
dataloader = VulnerabilityDataloader('data/sample_index.csv','data/sample_index.csv',
batch_size=100, eval_portion=0.2, max_length=207)
# get word2vec embedding list
embedding, vector_length = get_embeddings('data/word2vec.model', padding=True)
save_name = 'vul'
# Implement model
model = TransormerClassifierModel('models/'+save_name, 'logs/'+save_name, embedding=embedding,
d_features=100,d_meta=None, max_length=207, d_classifier=256, n_classes=2,
n_layers=6, n_head=8, dropout=0.1, use_bottleneck=True, d_bottleneck=128)
# Training
model.train(400, dataloader.train_dataloader(), dataloader.val_dataloader(),
device='cuda', save_mode='best', smoothing=False, earlystop=False)
# Evaluation
pred, real = model.get_predictions(dataloader.test_dataloader(), 'cuda')
import numpy as np
pred_ = np.array(pred)[:, 1]
real = np.array(real).astype(int)
from utils.plot_curves import precision_recall, plot_pr_curve
area, precisions, recalls, thresholds = precision_recall(pred_, real)
plot_pr_curve(recalls, precisions, auc=area)
from utils.plot_curves import auc_roc, plot_roc_curve
auc, fprs, tprs, thresholds = auc_roc(pred_, real)
plot_roc_curve(fprs, tprs, auc)
from Implementation.ciena.metrics import results
df = results(real, np.array(pred).argmax(axis=-1), 0.5) | Jincheng-Sun/Kylearn-pytorch | Implementation/code_vulnerability_detection/training.py | training.py | py | 1,605 | python | en | code | 0 | github-code | 13 |
41115654831 | from mogul.locale import localize
_ = localize.get_translator('mogul.media')
__all__ = ['Image']
ID3_IMAGE_TYPE = {
0x00: _('Other'),
0x01: _('32x32 pixels \'file icon\' (PNG only)'),
0x02: _('Other file icon'),
0x03: _('Cover (front)'),
0x04: _('Cover (back)'),
0x05: _('Leaflet page'),
0x06: _('Media (e.g. label side of CD)'),
0x07: _('Lead artist/lead performer/soloist'),
0x08: _('Artist/performer'),
0x09: _('Conductor'),
0x0A: _('Band/Orchestra'),
0x0B: _('Composer'),
0x0C: _('Lyricist/text writer'),
0x0D: _('Recording Location'),
0x0E: _('During recording'),
0x0F: _('During performance'),
0x10: _('Movie/video screen capture'),
0x11: _('A bright coloured fish'),
0x12: _('Illustration'),
0x13: _('Band/artist logotype'),
0x14: _('Publisher/Studio logotype'),
}
class Attachment(object):
def __init__(self, mime_type, data):
self.mime_type = mime_type
self.data = data
self.item_id = ''
self.library_id = ''
self.description = ''
class Image(Attachment):
"""Image Attachment
mkv - file name, mime type, description, data, uid
mp4 - data type, element name, locale (language, country)
id3v2 - mime type, picture type (byte), description
image type, mime type, data, description, uid
"""
def __init__(self, image_type=0x03, mime_type=None, data=None):
self.image_type = image_type
self.width = 0
self.height = 0
# Normalise image/jpg to image/jpeg
if mime_type is not None:
try:
main, sub = mime_type.split('/')
except ValueError:
main = 'image'
sub = mime_type
if sub == 'jpg':
sub = 'jpeg'
mime_type = '%s/%s' % (main, sub)
Attachment.__init__(self, mime_type, data)
def write(self, filename='', with_extension=True):
if filename == '' and self.image_type == 0x03:
filename = 'cover'
if with_extension:
if self.mime_type == 'image/png':
filename += '.png'
elif self.mime_type == 'image/jpeg':
filename += '.jpg'
elif self.mime_type == 'image/bmp':
filename += '.bmp'
fp = open(filename, 'wb')
fp.write(self.data)
fp.close()
| sffjunkie/media | src/media/attachment.py | attachment.py | py | 2,475 | python | en | code | 0 | github-code | 13 |
12935752926 | # coding=UTF-8
from natcap.invest.ui import model, inputs
from natcap.invest.wind_energy import wind_energy
class WindEnergy(model.InVESTModel):
def __init__(self):
model.InVESTModel.__init__(
self,
label='Wind Energy',
target=wind_energy.execute,
validator=wind_energy.validate,
localdoc='../documentation/wind_energy.html',
suffix_args_key='suffix'
)
self.wind_data = inputs.File(
args_key='wind_data_uri',
helptext=(
"A CSV file that represents the wind input data "
"(Weibull parameters). Please see the User's Guide for "
"a more detailed description of the parameters."),
label='Wind Data Points (CSV)',
validator=self.validator)
self.add_input(self.wind_data)
self.aoi = inputs.File(
args_key='aoi_uri',
helptext=(
"Optional. An OGR-supported vector file containing a "
"single polygon defining the area of interest. The "
"AOI must be projected with linear units equal to "
"meters. If the AOI is provided it will clip and "
"project the outputs to that of the AOI. The Distance "
"inputs are dependent on the AOI and will only be "
"accessible if the AOI is selected. If the AOI is "
"selected and the Distance parameters are selected, "
"then the AOI should also cover a portion of the land "
"polygon to calculate distances correctly. An AOI is "
"required for valuation."),
label='Area Of Interest (Vector) (Optional)',
validator=self.validator)
self.add_input(self.aoi)
self.bathymetry = inputs.File(
args_key='bathymetry_uri',
helptext=(
"A GDAL-supported raster file containing elevation "
"values represented in meters for the area of "
"interest. The DEM should cover at least the entire "
"span of the area of interest and if no AOI is "
"provided then the default global DEM should be used."),
label='Bathymetric Digital Elevation Model (Raster)',
validator=self.validator)
self.add_input(self.bathymetry)
self.land_polygon = inputs.File(
args_key='land_polygon_uri',
helptext=(
"An OGR-supported polygon vector that represents the "
"land and coastline that is of interest. For this "
"input to be selectable the AOI must be selected. The "
"AOI should also cover a portion of this land polygon "
"to properly calculate distances. This coastal "
"polygon, and the area covered by the AOI, form the "
"basis for distance calculations for wind farm "
"electrical transmission. This input is required for "
"masking by distance values and for valuation."),
interactive=False,
label='Land Polygon for Distance Calculation (Vector)',
validator=self.validator)
self.add_input(self.land_polygon)
self.global_wind_parameters = inputs.File(
args_key='global_wind_parameters_uri',
helptext=(
"A CSV file that holds wind energy model parameters "
"for both the biophysical and valuation modules. "
"These parameters are defaulted to values that are "
"supported and reviewed in the User's Guide. It is "
"recommended that careful consideration be taken "
"before changing these values and to make a new CSV "
"file so that the default one always remains."),
label='Global Wind Energy Parameters (CSV)',
validator=self.validator)
self.add_input(self.global_wind_parameters)
self.turbine_group = inputs.Container(
label='Turbine Properties')
self.add_input(self.turbine_group)
self.turbine_parameters = inputs.File(
args_key='turbine_parameters_uri',
helptext=(
"A CSV file that contains parameters corresponding to "
"a specific turbine type. The InVEST package comes "
"with two turbine model options, 3.6 MW and 5.0 MW. A "
"new turbine class may be created by using the "
"existing file format conventions and filling in new "
"parameters. Likewise an existing class may be "
"modified according to the user's needs. It is "
"recommended that the existing default CSV files are "
"not overwritten."),
label='Turbine Type Parameters File (CSV)',
validator=self.validator)
self.turbine_group.add_input(self.turbine_parameters)
self.number_of_machines = inputs.Text(
args_key='number_of_turbines',
helptext=(
"An integer value indicating the number of wind "
"turbines per wind farm."),
label='Number Of Turbines',
validator=self.validator)
self.turbine_group.add_input(self.number_of_machines)
self.min_depth = inputs.Text(
args_key='min_depth',
helptext=(
"A floating point value in meters for the minimum "
"depth of the offshore wind farm installation."),
label='Minimum Depth for Offshore Wind Farm Installation (meters)',
validator=self.validator)
self.turbine_group.add_input(self.min_depth)
self.max_depth = inputs.Text(
args_key='max_depth',
helptext=(
"A floating point value in meters for the maximum "
"depth of the offshore wind farm installation."),
label='Maximum Depth for Offshore Wind Farm Installation (meters)',
validator=self.validator)
self.turbine_group.add_input(self.max_depth)
self.min_distance = inputs.Text(
args_key='min_distance',
helptext=(
"A floating point value in meters that represents the "
"minimum distance from shore for offshore wind farm "
"installation. Required for valuation."),
interactive=False,
label=(
'Minimum Distance for Offshore Wind Farm Installation '
'(meters)'),
validator=self.validator)
self.turbine_group.add_input(self.min_distance)
self.max_distance = inputs.Text(
args_key='max_distance',
helptext=(
"A floating point value in meters that represents the "
"maximum distance from shore for offshore wind farm "
"installation. Required for valuation."),
interactive=False,
label=(
'Maximum Distance for Offshore Wind Farm Installation '
'(meters)'),
validator=self.validator)
self.turbine_group.add_input(self.max_distance)
self.valuation_container = inputs.Container(
args_key='valuation_container',
expandable=True,
expanded=False,
label='Valuation')
self.add_input(self.valuation_container)
self.foundation_cost = inputs.Text(
args_key='foundation_cost',
helptext=(
"A floating point number for the unit cost of the "
"foundation type (in millions of dollars). The cost of "
"a foundation will depend on the type selected, which "
"itself depends on a variety of factors including "
"depth and turbine choice. Please see the User's "
"Guide for guidance on properly selecting this value."),
label='Cost of the Foundation Type (USD, in Millions)',
validator=self.validator)
self.valuation_container.add_input(self.foundation_cost)
self.discount_rate = inputs.Text(
args_key='discount_rate',
helptext=(
"The discount rate reflects preferences for immediate "
"benefits over future benefits (e.g., would an "
"individual rather receive $10 today or $10 five years "
"from now?). See the User's Guide for guidance on "
"selecting this value."),
label='Discount Rate',
validator=self.validator)
self.valuation_container.add_input(self.discount_rate)
self.grid_points = inputs.File(
args_key='grid_points_uri',
helptext=(
"An optional CSV file with grid and land points to "
"determine cable distances from. An example:<br/> "
"<table border='1'> <tr> <th>ID</th> <th>TYPE</th> "
"<th>LATI</th> <th>LONG</th> </tr> <tr> <td>1</td> "
"<td>GRID</td> <td>42.957</td> <td>-70.786</td> </tr> "
"<tr> <td>2</td> <td>LAND</td> <td>42.632</td> "
"<td>-71.143</td> </tr> <tr> <td>3</td> <td>LAND</td> "
"<td>41.839</td> <td>-70.394</td> </tr> </table> "
"<br/><br/>Each point location is represented as a "
"single row with columns being <b>ID</b>, <b>TYPE</b>, "
"<b>LATI</b>, and <b>LONG</b>. The <b>LATI</b> and "
"<b>LONG</b> columns indicate the coordinates for the "
"point. The <b>TYPE</b> column relates to whether it "
"is a land or grid point. The <b>ID</b> column is a "
"simple unique integer. The shortest distance between "
"respective points is used for calculations. See the "
"User's Guide for more information."),
label='Grid Connection Points (Optional)',
validator=self.validator)
self.valuation_container.add_input(self.grid_points)
self.avg_grid_dist = inputs.Text(
args_key='avg_grid_distance',
helptext=(
"<b>Always required, but NOT used in the model if "
"Grid Points provided</b><br/><br/>A number in "
"kilometres that is only used if grid points are NOT "
"used in valuation. When running valuation using the "
"land polygon to compute distances, the model uses an "
"average distance to the onshore grid from coastal "
"cable landing points instead of specific grid "
"connection points. See the User's Guide for a "
"description of the approach and the method used to "
"calculate the default value."),
label='Average Shore to Grid Distance (Kilometers)',
validator=self.validator)
self.valuation_container.add_input(self.avg_grid_dist)
self.price_table = inputs.Checkbox(
args_key='price_table',
helptext=(
"When checked the model will use the social cost of "
"wind energy table provided in the input below. If "
"not checked the price per year will be determined "
"using the price of energy input and the annual rate "
"of change."),
label='Use Price Table')
self.valuation_container.add_input(self.price_table)
self.wind_schedule = inputs.File(
args_key='wind_schedule',
helptext=(
"A CSV file that has the price of wind energy per "
"kilowatt hour for each year of the wind farms life. "
"The CSV file should have the following two "
"columns:<br/><br/><b>Year:</b> a set of integers "
"indicating each year for the lifespan of the wind "
"farm. They can be in date form such as : 2010, 2011, "
"2012... OR simple time step integers such as : 0, 1, "
"2... <br/><br/><b>Price:</b> a set of floats "
"indicating the price of wind energy per kilowatt hour "
"for a particular year or time step in the wind farms "
"life.<br/><br/>An example:<br/> <table border='1'> "
"<tr><th>Year</th> <th>Price</th></tr><tr><td>0</td><t "
"d>.244</td></tr><tr><td>1</td><td>.255</td></tr><tr>< "
"td>2</td><td>.270</td></tr><tr><td>3</td><td>.275</td "
"></tr><tr><td>4</td><td>.283</td></tr><tr><td>5</td>< "
"td>.290</td></tr></table><br/><br/><b>NOTE:</b> The "
"number of years or time steps listed must match the "
"<b>time</b> parameter in the <b>Global Wind Energy "
"Parameters</b> input file above. In the above "
"example we have 6 years for the lifetime of the farm, "
"year 0 being a construction year and year 5 being the "
"last year."),
interactive=False,
label='Wind Energy Price Table (CSV)',
validator=self.validator)
self.valuation_container.add_input(self.wind_schedule)
self.wind_price = inputs.Text(
args_key='wind_price',
helptext=(
"The price of energy per kilowatt hour. This is the "
"price that will be used for year or time step 0 and "
"will then be adjusted based on the rate of change "
"percentage from the input below. See the User's "
"Guide for guidance about determining this value."),
label='Price of Energy per Kilowatt Hour ($/kWh)',
validator=self.validator)
self.valuation_container.add_input(self.wind_price)
self.rate_change = inputs.Text(
args_key='rate_change',
helptext=(
"The annual rate of change in the price of wind "
"energy. This should be expressed as a decimal "
"percentage. For example, 0.1 for a 10% annual price "
"change."),
label='Annual Rate of Change in Price of Wind Energy',
validator=self.validator)
self.valuation_container.add_input(self.rate_change)
# Set interactivity, requirement as input sufficiency changes
self.aoi.sufficiency_changed.connect(
self.land_polygon.set_interactive)
self.land_polygon.sufficiency_changed.connect(
self.min_distance.set_interactive)
self.land_polygon.sufficiency_changed.connect(
self.max_distance.set_interactive)
self.price_table.sufficiency_changed.connect(
self._toggle_price_options)
def _toggle_price_options(self, use_price_table):
self.wind_schedule.set_interactive(use_price_table)
self.wind_price.set_interactive(not use_price_table)
self.rate_change.set_interactive(not use_price_table)
def assemble_args(self):
args = {
self.workspace.args_key: self.workspace.value(),
self.suffix.args_key: self.suffix.value(),
self.wind_data.args_key: self.wind_data.value(),
self.bathymetry.args_key: self.bathymetry.value(),
self.global_wind_parameters.args_key:
self.global_wind_parameters.value(),
self.turbine_parameters.args_key: self.turbine_parameters.value(),
self.number_of_machines.args_key: self.number_of_machines.value(),
self.min_depth.args_key: self.min_depth.value(),
self.max_depth.args_key: self.max_depth.value(),
self.valuation_container.args_key: self.valuation_container.value(),
self.avg_grid_dist.args_key: self.avg_grid_dist.value(),
}
if self.aoi.value():
args[self.aoi.args_key] = self.aoi.value()
if self.land_polygon.value():
args[self.land_polygon.args_key] = self.land_polygon.value()
if self.min_distance.value():
args[self.min_distance.args_key] = self.min_distance.value()
if self.max_distance.value():
args[self.max_distance.args_key] = self.max_distance.value()
if self.grid_points.value():
args[self.grid_points.args_key] = self.grid_points.value()
# Include these args if valuation is checked.
if args[self.valuation_container.args_key]:
args[self.foundation_cost.args_key] = self.foundation_cost.value()
args[self.discount_rate.args_key] = self.discount_rate.value()
args[self.price_table.args_key] = self.price_table.value()
args[self.wind_schedule.args_key] = self.wind_schedule.value()
args[self.wind_price.args_key] = self.wind_price.value()
args[self.rate_change.args_key] = self.rate_change.value()
return args
| jandrewjohnson/hazelbean | hazelbean/ui/examples/wind_energy.py | wind_energy.py | py | 17,234 | python | en | code | 1 | github-code | 13 |
32826183115 | # Find out how many cakes Pete could bake considering his recipes.
def cakes(recipe, available):
whole_cake = []
for ingredient in recipe:
if ingredient in available:
whole_cake.append(ingredient)
else:
return 0
max_ingredients = [(available[key] // recipe[key]) for key in whole_cake]
return min(max_ingredients)
print(cakes({"flour": 500, "sugar": 200, "eggs": 1}, {"flour": 1200, "sugar": 1200, "eggs": 5, "milk": 200})) | RealMrSnuggles/Python | CodeWars/Pete, the baker.py | Pete, the baker.py | py | 493 | python | en | code | 0 | github-code | 13 |
28955385995 | import six
from .test_base import TestBase
import b2.utils
class TestChooseParts(TestBase):
def test_it(self):
self._check_one([(0, 100), (100, 100)], 200, 100)
self._check_one([(0, 149), (149, 150)], 299, 100)
self._check_one([(0, 100), (100, 100), (200, 100)], 300, 100)
ten_TB = 10 * 1000 * 1000 * 1000 * 1000
one_GB = 1000 * 1000 * 1000
expected = [(i * one_GB, one_GB) for i in six.moves.range(10000)]
actual = b2.utils.choose_part_ranges(ten_TB, 100 * 1000 * 1000)
self.assertEqual(expected, actual)
def _check_one(self, expected, content_length, min_part_size):
self.assertEqual(expected, b2.utils.choose_part_ranges(content_length, min_part_size))
class TestFormatAndScaleNumber(TestBase):
def test_it(self):
self._check_one('1 B', 1)
self._check_one('999 B', 999)
self._check_one('1.00 kB', 1000)
self._check_one('999 kB', 999000)
def _check_one(self, expected, x):
self.assertEqual(expected, b2.utils.format_and_scale_number(x, 'B'))
class TestFormatAndScaleFraction(TestBase):
def test_it(self):
self._check_one('0 / 100 B', 0, 100)
self._check_one('0.0 / 10.0 kB', 0, 10000)
self._check_one('9.4 / 10.0 kB', 9400, 10000)
def _check_one(self, expected, numerator, denominator):
self.assertEqual(expected, b2.utils.format_and_scale_fraction(numerator, denominator, 'B'))
| jhill69/Hello-World | test/test_utils.py | test_utils.py | py | 1,461 | python | en | code | 0 | github-code | 13 |
29485243556 | import sys
import numpy as np
from keras.models import Sequential
from data_helper import load_data, split_data_targets
from models import NeuralNetwork, LSTMNetwork, CNNNetwork, CNNNetwork2
# function that uses k-fold cross validation to evaluate a model
def cv_k_fold(model_info, k=10, verbose=0):
# load data, targets and the splits
data, targets, splits = load_data(k)
# used for storing all model scores
scores = []
# iterate through the splits for CV
for train_index, test_index in splits:
# splits data and targets using indices given and returns them ready for training and testing
(train_data, train_targets), (test_data, test_targets) = split_data_targets(data,
targets,
train_index,
test_index,
model_info.input_shape)
# trains model and returns the score
score = train(model_info, train_data, train_targets, test_data, test_targets, verbose)
# store the score of this model
scores.append(score)
# print the accuracy metric score
print('Fold: ' + str(len(scores)) + ', Accuracy: ' + str(round(score * 100, 3)) + '%')
# calculate the mean score of the all the trained models
cv_score = float(np.mean(scores) * 100)
cv_std = float(np.std(scores) * 100)
# print the CV accuracy score
print('Final Accuracy:', str(round(cv_score, 3)) + '%', '(+/-', str(round(cv_std, 3)) + '%)')
# function that trains a NN model with given configuration
def train(model_info, train_data, train_targets, test_data, test_targets, verbose):
# initialise structure of model
model = Sequential(model_info.get_structure())
# configure optimiser, loss and metrics
model.compile(optimizer=model_info.optimizer, loss=model_info.loss, metrics=['accuracy'])
# trains the model by fit the train data and targets. configure number of epochs
model.fit(train_data, train_targets, epochs=model_info.epochs, verbose=verbose, batch_size=model_info.batch_size)
# evaluate the trained model using test parts
score = model.evaluate(test_data, test_targets, verbose=verbose)
return score[1]
# main function to run when script is called
def main():
# remove first system argument
args = sys.argv[1:]
# variable for number of arguments
num_args = len(args)
# check number of arguments passed to script is correct
if num_args != 1 and num_args != 3:
# helpful messages to help user
print('Error, was expecting 1 argument or 3 arguments: <model_type> [<n_splits> <verbose>]')
print('Found:', num_args)
return
# if 3 arguments are passed then check they are correct
if num_args == 3:
# second argument must be digit
if not args[1].isdigit():
print('Error, <n_splits> was expecting: k > 1')
print('Found:', args[1])
return
# second argument must be greater than 1
if int(args[1]) <= 1:
print('Error, <n_splits> was expecting: k > 1')
print('Found:', args[1])
return
# third argument must be a digit
if not args[2].isdigit():
print('Error, <verbose> was expecting: 0 (off) or 1 (on)')
print('Found:', args[2])
return
# third argument must be a 0 or 1
if int(args[2]) != 0 and int(args[2]) != 1:
print('Error, <verbose> was expecting: 0 (off) or 1 (on)')
print('Found:', args[2])
return
# checks if thr first argument is a valid model_type
if args[0] == 'nn':
if num_args == 3:
# cross validate neural network model with args
cv_k_fold(NeuralNetwork(), int(args[1]), int(args[2]))
else:
# cross validate neural network model
cv_k_fold(NeuralNetwork())
elif args[0] == 'lstm':
if num_args == 3:
# cross validate LSTM network model with args
cv_k_fold(LSTMNetwork(), int(args[1]), int(args[2]))
else:
# cross validate LSTM network model
cv_k_fold(LSTMNetwork())
elif args[0] == 'cnn':
if num_args == 3:
# cross validate CNN network model with args
cv_k_fold(CNNNetwork(), int(args[1]), int(args[2]))
else:
# cross validate CNN network model
cv_k_fold(CNNNetwork())
elif args[0] == 'cnn2':
if num_args == 3:
# cross validate CNN network model with args
cv_k_fold(CNNNetwork2(), int(args[1]), int(args[2]))
else:
# cross validate CNN network model
cv_k_fold(CNNNetwork2())
else:
# first argument is not valid
# message displays list of possible model_types
print('Error, <model_type> was expecting: \'nn\', \'lstm\', \'cnn\'')
print('Found: \'' + args[0] + '\'')
# main program cross validates given neural network model
if __name__ == "__main__":
main()
| Flyer4109/mnist-digit-recogniser | cv_model.py | cv_model.py | py | 5,300 | python | en | code | 0 | github-code | 13 |
73900101456 | import scrapy
class QuotesSpider(scrapy.Spider):
name = "amzn"
start_urls = [
'https://www.amazon.com/dp/B07FK8SQDQ/ref=twister_B00WS2T4ZA?_encoding=UTF8&th=1',
]
def parse(self, response):
yield {
'title': response.xpath("div[@id='title_feature_div']/div[@id='titleSection']/h1[@id='title']/span[@id='productTitle']/text()").get(),
'price': response.xpath("td[@class='a-span12']/span[@id='priceblock_ourprice']/text()").get(),
} | AllenSun7/Beary-Chat | scrapy/tutorial/tutorial/spiders/amzn_spider.py | amzn_spider.py | py | 496 | python | en | code | 0 | github-code | 13 |
6644937564 | """Test for OT3StateManager object."""
import asyncio
from typing import AsyncGenerator, Generator
from unittest.mock import Mock, patch
import pytest
from opentrons.hardware_control.types import Axis
from state_manager.messages import MoveMessage
from state_manager.ot3_state import OT3State
from state_manager.pipette_model import PipetteModel
from state_manager.state_manager import OT3StateManager
from state_manager.util import Direction
from tests.udp_client import EchoClientProtocol
HOST = "localhost"
PORT = 8088
@pytest.fixture(scope="function")
def ot3_state() -> OT3State:
"""Create OT3State object."""
return OT3State.build(
PipetteModel.SINGLE_20.value, PipetteModel.SINGLE_20.value, True
)
@pytest.fixture
def loop(
event_loop: asyncio.AbstractEventLoop,
) -> Generator[asyncio.AbstractEventLoop, None, None]:
"""Create and yield event_loop."""
yield event_loop
@pytest.fixture
async def server(
loop: asyncio.AbstractEventLoop, ot3_state: OT3State
) -> AsyncGenerator[OT3StateManager, None]:
"""Start OT3StateManager server."""
server = await OT3StateManager(ot3_state).start_server(HOST, PORT)
await server.is_connected()
yield server
await server.close()
@pytest.fixture
async def client(
loop: asyncio.AbstractEventLoop,
) -> AsyncGenerator[EchoClientProtocol, None]:
"""Create UDP Client."""
client = await EchoClientProtocol.build(HOST, PORT, loop)
await client.is_connected()
yield client
await client.close()
@patch.object(OT3StateManager, "datagram_received")
async def test_message_received(
patched_object: Mock,
server: OT3StateManager,
client: EchoClientProtocol,
loop: asyncio.AbstractEventLoop,
ot3_state: OT3State,
) -> None:
"""Confirm that pulse messages work correctly."""
move_message_bytes = MoveMessage(Axis.X, Direction.POSITIVE).to_bytes()
client.send_message(move_message_bytes)
await asyncio.wait_for(server.is_connected(), 15.0)
datagram_received_data_arg_content = patched_object.call_args.args[0]
assert datagram_received_data_arg_content == move_message_bytes
| Opentrons/ot3-firmware | state_manager/tests/test_ot3_state_manager.py | test_ot3_state_manager.py | py | 2,143 | python | en | code | 15 | github-code | 13 |
32873008032 | from pyspark.sql import SparkSession
from pyspark.sql.functions import from_json, col, udf
from pyspark.sql.types import StringType, StructType, IntegerType, StructField, DateType, LongType, FloatType
from constants.constants import KAFKA_URI, TOPIC_JOB, TOPIC_USER, CHECKPOINT_PATH
def main():
concurrent_job = 3
spark = SparkSession \
.builder \
.appName("dbstreaming") \
.getOrCreate()
spark.conf.set("spark.sql.execution.arrow.pyspark.enabled", "true")
spark.conf.set("spark.sql.legacy.timeParserPolicy", "LEGACY")
spark.sparkContext.setLogLevel("ERROR")
spark.conf.set("spark.streaming.concurrentJobs", str(concurrent_job))
df = spark \
.readStream \
.format("kafka") \
.option("kafka.bootstrap.servers", KAFKA_URI) \
.option("subscribe", TOPIC_JOB) \
.load()
schema_job = StructType([
StructField("id", LongType()),
StructField("application_deadline", DateType()),
StructField("company_address", IntegerType()),
StructField("salary", FloatType()),
StructField("ages", StringType()),
StructField("education_level", StringType()),
StructField("position", StringType()),
StructField("job_attribute", StringType()),
StructField("year_experiences", FloatType()),
])
data_job = df.withColumn(
"data", from_json(col("value").astype(StringType()), schema_job)
).select("key", "offset", "partition", "timestamp", "timestampType", "topic", "data.*")
data_job.createOrReplaceTempView("userId")
data = spark.sql("select * from userId where salary > 1.0")
check_matching = udf(
lambda x: "userId--5--" + str(x), StringType()
)
data = data.withColumn("value", check_matching(col("key")))
data.writeStream \
.format("kafka") \
.option("kafka.bootstrap.servers", KAFKA_URI) \
.option("checkpointLocation", CHECKPOINT_PATH + '/user1') \
.trigger(processingTime='5 minutes') \
.option("topic", TOPIC_USER).start()
spark.streams.awaitAnyTermination()
if __name__ == '__main__':
main()
| vuminhhieucareer172/SparkPushNotification | streaming/job_streaming_example.py | job_streaming_example.py | py | 2,151 | python | en | code | 0 | github-code | 13 |
72033327378 | from __future__ import annotations
from typing import TYPE_CHECKING, List
from ..l0.Activity import Activity
from ..ns import *
if TYPE_CHECKING:
from rdflib import Graph, Literal
from ..l0.Agent import Agent
from .TransparencyActivityTypology import TransparencyActivityTypology
from .TransparencyObligation import TransparencyObligation
from .TransparencyResource import TransparencyResource
class TransparencyActivity(Activity):
__type__ = TRANSP["TransparencyActivity"]
generatesResource: List[TransparencyResource] = None
hasTransparencyActivityTypology: List[TransparencyActivityTypology] = None
triggeredBy: List[TransparencyObligation] = None
isPerformedByAgent: Agent = None
date: Literal = None
def _addProperties(self, g: Graph):
super()._addProperties(g)
if self.generatesResource:
for generatesResource in self.generatesResource:
g.add(
(self.uriRef, TRANSP["generatesResource"], generatesResource.uriRef))
if self.hasTransparencyActivityTypology:
for hasTransparencyActivityTypology in self.hasTransparencyActivityTypology:
g.add((self.uriRef, TRANSP["hasTransparencyActivityTypology"],
hasTransparencyActivityTypology.uriRef))
if self.triggeredBy:
for triggeredBy in self.triggeredBy:
g.add((self.uriRef, TRANSP["triggeredBy"], triggeredBy.uriRef))
if self.isPerformedByAgent:
g.add(
(self.uriRef, TRANSP["isPerformedByAgent"], self.isPerformedByAgent.uriRef))
if self.date:
g.add((self.uriRef, TI["date"], self.date))
| luca-martinelli-09/ontopia-py | ontopia_py/transparency/TransparencyActivity.py | TransparencyActivity.py | py | 1,709 | python | en | code | 0 | github-code | 13 |
38005689238 | import ROOT
def rebin2 (h, name, gx=1, gy=1):
"""Rebin the 2D histogram H.
Use NAME for the new histogram.
Group together GX bins in x and GY bins in y.
"""
old_nx = h.GetNbinsX()
old_ny = h.GetNbinsY()
new_nx = old_nx//gx
new_ny = old_ny//gy
hnew = ROOT.TH2F (name,
h.GetTitle(),
new_nx,
h.GetXaxis().GetXmin(),
h.GetXaxis().GetXmax(),
new_ny,
h.GetYaxis().GetXmin(),
h.GetYaxis().GetXmax())
for ix in range(0, new_nx):
for iy in range(0, new_ny):
sum = 0
for iix in range(0, gx):
for iiy in range(0, gy):
sum += h.GetBinContent(ix*gx+iix+1, iy*gy+iiy+1)
hnew.SetBinContent(ix+1, iy+1, sum)
return hnew
| rushioda/PIXELVALID_athena | athena/PhysicsAnalysis/PyAnalysis/PyAnalysisUtils/python/rebin2.py | rebin2.py | py | 873 | python | en | code | 1 | github-code | 13 |
28375139539 | #!/usr/bin/python3.6
#-*- coding: utf-8 -*-
def zadanie1(s):
Liczby= {
"jeden" : 1,
"dwa" : 2,
"trzy" : 3,
"cztery" : 4,
"pięć" : 5,
"sześć" : 6,
"siedem" : 7,
"osiem" : 8,
"dziewięć" : 9,
"dziesięć" : 10,
"jedenaście" : 11,
"dwanaście" : 12,
"trzynaście" : 13,
"czternaście" : 14,
"pietnaście" : 15,
"szesnaście" : 16,
"siedemnaście" : 17,
"osiemnaście" : 18,
"dziewiętnaście" : 19,
}
Dziesiatki ={
"dwadzieścia" : 20,
"trzydzieści" : 30,
"czterdzieści" : 40,
"pięćdziesiąt" : 50,
"sześćdziesiąt" : 60,
"siedemdziesiąt" : 70,
"osiemdziesiąt" : 80,
"dziewięćdziesiąt" : 90
}
s = s.lower()
a = s.split(" ")
try:
x = Dziesiatki[a[0]]
try:
y = Liczby[a[1]]
z = x+y
print(z)
return z
except:
print(x)
return x
except:
x = Liczby[a[0]]
print(x)
return x
assert (zadanie1("jeden"))==1
assert (zadanie1("trzydzieści trzy"))==33
assert (zadanie1("trzynaście"))==13 | aszpatowski/JSP2019 | lista5/zadanie1.py | zadanie1.py | py | 1,258 | python | pl | code | 0 | github-code | 13 |
42482656431 | # -*- coding: utf-8 -*-
from tqdm import tqdm
import json
from collections import Counter, defaultdict
from models.utils import is_chinese_token, get_token_pinyin
from typing import Dict, List, Optional, Tuple
def get_frequency(
file_path: str,
need_pinyin_freq: bool = False
) -> Tuple[Tuple[List[str], List[int]], Optional[Dict[str, Tuple[List[str],
List[int]]]]]:
lines = open(file_path, 'r').readlines()
token_counter = Counter()
if need_pinyin_freq:
pinyin_token_dict = defaultdict(Counter)
for line in tqdm(lines):
_, num_targets, source, *targets = line.split('\t')
if num_targets == 0:
targets = [source]
for target in targets:
chinese = [token for token in target if is_chinese_token(token)]
token_counter.update(chinese)
if need_pinyin_freq:
for token in chinese:
token_pinyin = get_token_pinyin(token)
token_pinyin_counter = pinyin_token_dict[token_pinyin]
token_pinyin_counter.update([token])
token_total = sum(token_counter.values())
tokens = [token for token in token_counter.keys()]
tokens_freq = [value / token_total for value in token_counter.values()]
pingyin_tokens_freq = dict()
if need_pinyin_freq:
for pinyin, pinyin_counter in pinyin_token_dict.items():
pinyin_tokens = [token for token in pinyin_counter.keys()]
pinyin_total = sum(pinyin_counter.values())
pinyin_freq = [value / pinyin_total for value in pinyin_counter.values()]
pingyin_tokens_freq[pinyin] = (pinyin_tokens, pinyin_freq)
return {"tokens": tokens, "tokens_freq": tokens_freq}, pingyin_tokens_freq
return {"tokens": tokens, "tokens_freq": tokens_freq}
def main():
tokens_freq_dict, pinyin_freq_dict = get_frequency(
"./data/train.txt",
need_pinyin_freq=True,
)
json.dump(
tokens_freq_dict,
open("./resources/tokens_freq.json", "w"),
ensure_ascii=False,
indent=4,
)
json.dump(
pinyin_freq_dict,
open("./resources/pinyin_freq.json", "w"),
ensure_ascii=False,
indent=4,
)
if __name__ == '__main__':
main()
| Peter-Chou/cgec-initialized-with-plm | get_tokens_pinyin_frequency.py | get_tokens_pinyin_frequency.py | py | 2,179 | python | en | code | 3 | github-code | 13 |
3455079071 | from rdflib import Variable
from thesis.graph import PREFIX_REL, InternalNS, internal, RelationNS, BGP
from thesis.rewriting import QueryRewriter
def singleton_predicate(pred, rid):
name = str(pred)[len(str(RelationNS)):]
return internal(f"{name}-{rid}")
class SingletonQueryRewriter(QueryRewriter):
LAST_ID = 0
@staticmethod
def _get_next_id():
SingletonQueryRewriter.LAST_ID = SingletonQueryRewriter.LAST_ID + 1
return SingletonQueryRewriter.LAST_ID
@staticmethod
def get_id_node():
return Variable(f"id_{SingletonQueryRewriter._get_next_id()}") # BNode()
def get_subject_predicate_pairs(self):
query_reification = f"""
PREFIX thi: <http://thesis.de/internal/>
SELECT ?subject ?predicate
{self.get_from_clause()}
WHERE {{
?subject ?singletonProperty ?o .
?singletonProperty thi:singletonPropertyOf ?predicate .
}}"""
return self.db.query(query_reification.strip())
@staticmethod
def reify_triples(triples):
def get_pred_var(predicate, pid):
pred_name = str(predicate) if isinstance(predicate, Variable) else str(predicate)[len(PREFIX_REL):]
return Variable(pred_name + "_" + str(pid))
new_triples = BGP()
predicates = set(p for _, p, _ in triples)
predicate_id = SingletonQueryRewriter._get_next_id()
# Replace predicate with singleton property variable
for s, p, o in triples:
new_triples.add((s, get_pred_var(p, predicate_id), o))
# Add singletonPropertyOf edge to original predicate
for pred in predicates:
new_triples.add((
get_pred_var(pred, predicate_id),
InternalNS.singletonPropertyOf,
pred
))
# Add a common Variable ?id for the join
if len(triples) > 1:
id_node = SingletonQueryRewriter.get_id_node()
for pred in predicates:
new_triples.add((
get_pred_var(pred, predicate_id),
InternalNS.id,
id_node
))
new_triples.add_filter(f'isLiteral(?{id_node})')
return new_triples
| johnruth96/semantics-refinement | src/thesis/models/singleton.py | singleton.py | py | 2,266 | python | en | code | 0 | github-code | 13 |
13103245684 | from sys import stdin
def isMixed(notes):
for i in range(1, len(notes)):
if abs(notes[i] - notes[i-1]) != 1:
return True
return False
notes = [int(i) for i in stdin.readline().split()]
if isMixed(notes):
print("mixed")
else:
if notes[0] == 1:
print("ascending")
else:
print("descending")
| olwooz/algorithm-practice | practice/2022_01/220117_Baekjoon_2920_Notes_Python/220117_Baekjoon_2920_Notes.py | 220117_Baekjoon_2920_Notes.py | py | 349 | python | en | code | 0 | github-code | 13 |
37909796118 | #
# set inFileName and outFileName to convert TProfile2D weights to pool
#
import AthenaCommon.Constants as Lvl
from AthenaCommon.AppMgr import ServiceMgr as svcMgr
from AthenaCommon.AppMgr import theApp
import IOVDbSvc.IOVDb
from AthenaCommon.AlgSequence import AlgSequence
topSequence = AlgSequence()
from CaloLocalHadCalib.CaloLocalHadCalibConf import CaloReadLCWeightsFile
ReadLCWeights = CaloReadLCWeightsFile("ReadLCWeights")
ReadLCWeights.LCWeightFileName = inFileName
ReadLCWeights.CorrectionKey="H1ClusterCellWeights"
topSequence += ReadLCWeights
# Number of events to be processed (default is 10)
theApp.EvtMax = 1
AthenaPoolCnvSvc = Service( "AthenaPoolCnvSvc" )
AthenaSealSvc = Service( "AthenaSealSvc" )
AthenaSealSvc.CheckDictionary = True
from RegistrationServices.OutputConditionsAlg import OutputConditionsAlg
myOCA=OutputConditionsAlg("myOCA",outFileName)
# List of objects container type#key#foldername
myOCA.ObjectList = ["CaloLocalHadCoeff#H1ClusterCellWeights#/CALO/HadCalibration2/H1ClusterCellWeights"]
myOCA.WriteIOV = True
myOCA.IOVTagList = ["CaloH1CellWeights2-GEO08-QGSP-BERT-Log" ]
myOCA.OutputLevel= Lvl.DEBUG
# Set output level threshold (2=DEBUG, 3=INFO, 4=WARNING, 5=ERROR, 6=FATAL )
MessageSvc = Service( "MessageSvc" )
MessageSvc.OutputLevel = Lvl.INFO
DetectorStore = svcMgr.DetectorStore
DetectorStore.Dump = TRUE
DetectorStore.OutputLevel = Lvl.INFO
iovdbsvc = Service( "IOVDbSvc" )
IOVDbSvc.OutputLevel = Lvl.INFO
svcMgr.IOVDbSvc.dbConnection = "sqlite://X;schema=myWeightGeo08BERTLog200.db;dbname=OFLP200"
| rushioda/PIXELVALID_athena | athena/Calorimeter/CaloLocalHadCalib/share/CaloReadLCWeightsFile.py | CaloReadLCWeightsFile.py | py | 1,560 | python | en | code | 1 | github-code | 13 |
43263049552 | def main():
v, u = -1, -1
v_cnt = u_cnt = 0
ans = 0
for v, lv in VL:
if v == u:
ans += min(lv, u_cnt - v_cnt)
v_cnt += lv
if u_cnt >= v_cnt:
continue
for u, lu in UL:
if u == v:
ans += min(lu, v_cnt - u_cnt)
u_cnt += lu
if u_cnt >= v_cnt:
break
return print(ans)
if __name__ == '__main__':
L, N, M = map(int, input().split())
VL = [list(map(int, input().split())) for _ in range(N)]
UL = iter([list(map(int, input().split())) for _ in range(M)])
main()
| Shirohi-git/AtCoder | abc291-/abc294_e.py | abc294_e.py | py | 617 | python | en | code | 2 | github-code | 13 |
23606068239 | #@ type: compute
#@ parents:
#@ - func1
#@ - func2
#@ - func3
#@ - func4
#@ corunning:
#@ mem1:
#@ trans: mem1
#@ type: rdma
import struct
import threading
import time
import pickle
import sys
import copy
import codecs
import copyreg
import collections
from base64 import b64encode
from collections import deque, Counter
from typing import List
import cv2
import numpy as np
import pickle
import disaggrt.buffer_pool_lib as buffer_pool_lib
from disaggrt.rdma_array import remote_array
class Box:
def __init__(self, wmin: int, hmin: int, wmax: int, hmax: int, compressed_img: List[int]):
self.wmin = wmin
self.hmin = hmin
self.wmax = wmax
self.hmax = hmax
self.compressed_img = compressed_img
def serialize(self):
return [self.wmin, self.hmin, self.wmax, self.hmax] + self.compressed_img
@staticmethod
def deserialize(obj):
return Box(obj[0], obj[1], obj[2], obj[3], obj[4:])
def main(params, action):
trans = action.get_transport('mem1', 'rdma')
trans.reg(buffer_pool_lib.buffer_size)
# Get image length
trans.read(4, 0, 0)
print("Get image length")
imglength = struct.unpack_from('@I', trans.buf[0:4])[0]
print(imglength)
remoteIndex = 4
remoteIndex += imglength
# Get label length
trans.read(4, remoteIndex, 0)
print("Get label length")
seclength = struct.unpack_from('@I', trans.buf[0:4])[0]
remoteIndex += 4
print(seclength)
remoteIndex += seclength
# Fetch boxes data from remote memory server
trans.read(4, remoteIndex, 0)
print("Get boxes data length")
thirdlength = struct.unpack_from('@I', trans.buf[0:4])[0]
remoteIndex += 4
print(thirdlength)
begin = 0
blockSize = 1000000
while begin + blockSize < thirdlength:
trans.read(blockSize, remoteIndex, begin)
begin += blockSize
remoteIndex += blockSize
trans.read(thirdlength - begin, remoteIndex, begin)
remoteIndex += thirdlength - begin
print("Finish read boxes data")
boxesBytes = trans.buf[0:thirdlength]
serializedBoxes = pickle.loads(boxesBytes)
boxes = list()
for box in serializedBoxes:
boxes.append(Box.deserialize(box))
# Fetch predictions data from remote memory server
trans.read(4, remoteIndex, 0)
print("Get predictions data length")
predictlength = struct.unpack_from('@I', trans.buf[0:4])[0]
remoteIndex += 4
print(predictlength)
begin = 0
while begin + blockSize < predictlength:
trans.read(blockSize, remoteIndex, begin)
begin += blockSize
remoteIndex += blockSize
trans.read(predictlength - begin, remoteIndex, begin)
remoteIndex += predictlength - begin
print("Finish read boxes data")
predictBytes = trans.buf[0:predictlength]
predictions = pickle.loads(predictBytes)
# serialize predictions data
predictBytes = pickle.dumps(predictions)
length = len(predictBytes)
struct.pack_into('@I', trans.buf, 0, length)
trans.write(4, remoteIndex, 0)
remoteIndex += 4
filename = "data/1234567890.png"
output_name = "data/1234567890_ocr.png"
img = cv2.imread(filename)
h, w, _ = img.shape
for box, prediction in zip(boxes, predictions):
img = cv2.rectangle(img, (box.wmin, box.hmin), (box.wmax, box.hmax), (87, 201, 0), 2)
img = cv2.putText(img, str(prediction), ((box.wmin + box.wmax) // 2, box.hmin), cv2.FONT_HERSHEY_COMPLEX, min(h, w) / 500, (255, 144, 30), 2)
cv2.imwrite(output_name, img)
print("Finish output image")
return {}
| zerotrac/CSE291_mnist | Mnist/func5.o.py | func5.o.py | py | 3,651 | python | en | code | 2 | github-code | 13 |
17531066389 | import sys
import logging
import warnings
import re
import time
import uuid
from weakref import ref
from weakref import WeakSet
from .. import _p4p
from .._p4p import (Server as _Server,
StaticProvider as _StaticProvider,
DynamicProvider as _DynamicProvider,
ServerOperation,
)
if sys.version_info >= (3, 0):
unicode = str
_log = logging.getLogger(__name__)
__all__ = (
'Server',
'installProvider',
'removeProvider',
'StaticProvider',
'DynamicProvider',
'ServerOperation',
)
def installProvider(name, provider):
_p4p._providers[name] = ref(provider)
def removeProvider(name):
_p4p._providers.pop(name, None)
def clearProviders():
_p4p._providers.clear()
class Server(object):
"""Server(conf=None, useenv=True, providers=[""])
:param providers: A list of provider names or instances. See below.
:param dict conf: Configuration keys for the server. Uses same names as environment variables (aka. EPICS_PVAS_*)
:param bool useenv: Whether to use process environment in addition to provided config.
:param bool isolate: If True, override conf= and useenv= to select a configuration suitable for isolated testing.
eg. listening only on localhost with a randomly chosen port number. Use `conf()` to determine
which port is being used.
Run a PVAccess server serving Channels from the listed providers.
The server is running after construction, until stop(). ::
S = Server(providers=["example"])
# do something else
S.stop()
As a convenience, a Server may be used as a context manager to automatically `stop()`. ::
with Server(providers=["example"]) as S:
# do something else
When configuring a Server, conf keys provided to the constructor have the same name as the environment variables.
If both are given, then the provided conf dict is used.
Call Server.conf() to see a list of valid server (EPICS_PVAS_*) key names and the actual values.
The providers list may contain: name strings (cf. installProvider()),
`StaticProvider` or `DynamicProvider` instances, or a dict "{'pv:name':`SharedPV`}" to implicitly creat a `StaticProvider`.
Each entry may also be a tuple "(provider, order)" where "provider" is any of the allowed types,
and "order" is an integer used to resolve ambiguity if more than one provider may claim a PV name.
(lower numbers are queried first, the default order is 0)
"""
def __init__(self, providers, isolate=False, **kws):
self.__keep_alive = [] # ick...
if isinstance(providers, (bytes, unicode)):
providers = providers.split() # split on space
warnings.warn("Server providers list should be a list", DeprecationWarning)
Ps = []
for provider in providers:
if isinstance(provider, tuple):
provider, order = provider
elif hasattr(provider, 'order'):
order = provider.order
else:
order = 0
if isinstance(provider, (bytes, unicode)):
if not re.match(r'^[^ \t\n\r]+$', provider):
raise ValueError("Invalid provider name: '%s'"%provider)
Ps.append((provider, order))
elif isinstance(provider, (_StaticProvider, _DynamicProvider, _p4p.Source)):
Ps.append((provider, order))
elif hasattr(provider, 'items'):
P = StaticProvider()
for name, pv in provider.items():
P.add(name, pv)
Ps.append((P, order))
# Normally user code is responsible for keeping the StaticProvider alive.
# Not possible in this case though.
self.__keep_alive.append(P)
else:
raise ValueError("providers=[] must be a list of string, SharedPV, or dict. Not %s"%provider)
if isolate:
assert 'useenv' not in kws and 'conf' not in kws, kws
kws['useenv'] = False
kws['conf'] = {
'EPICS_PVAS_INTF_ADDR_LIST': '127.0.0.1',
'EPICS_PVA_ADDR_LIST': '127.0.0.1',
'EPICS_PVA_AUTO_ADDR_LIST': '0',
'EPICS_PVA_SERVER_PORT': '0',
'EPICS_PVA_BROADCAST_PORT': '0',
}
_log.debug("Starting Server isolated=%s, %s", isolate, kws)
self._S = _Server(providers=Ps, **kws)
self.tostr = self._S.tostr
self._S.start()
try:
if _log.isEnabledFor(logging.DEBUG):
_log.debug("New Server: %s", self.tostr(5))
except:
self._S.stop()
raise
def __enter__(self):
return self
def __exit__(self, A, B, C):
self.stop()
def conf(self):
"""Return a dict() with the effective configuration this server is using.
Suitable to pass to another Server to duplicate this configuration,
or to a client Context to allow it to connect to this server. ::
with Server(providers=["..."], isolate=True) as S:
with p4p.client.thread.Context('pva', conf=S.conf(), useenv=False) as C:
print(C.get("pv:name"))
"""
return self._S.conf()
def stop(self):
"""Force server to stop serving, and close connections to existing clients.
"""
_log.debug("Stopping Server")
self._S.stop()
self.__keep_alive = []
@classmethod
def forever(klass, *args, **kws):
"""Create a server and block the calling thread until KeyboardInterrupt.
Shorthand for: ::
with Server(*args, **kws):
try;
time.sleep(99999999)
except KeyboardInterrupt:
pass
"""
with klass(*args, **kws):
_log.info("Running server")
try:
while True:
time.sleep(100)
except KeyboardInterrupt:
pass
finally:
_log.info("Stopping server")
class StaticProvider(_StaticProvider):
"""A channel provider which servers from a clearly defined list of names.
This list may change at any time.
:param str name: Provider name. Must be unique within the local context in which it is used.
None, the default, will choose an appropriate value.
"""
def __init__(self, name=None):
if name is None:
# Caller doesn't care. Pick something unique w/o spaces
name = str(uuid.uuid4())
super(StaticProvider, self).__init__(name)
class DynamicProvider(_DynamicProvider):
"""A channel provider which does not maintain a list of provided channel names.
The following example shows a simple case, in fact so simple that StaticProvider
is a better fit. ::
class DynHandler(object):
def __init__(self):
self.pv = SharedPV()
def testChannel(self, name): # return True, False, or DynamicProvider.NotYet
return name=="blah"
def makeChannel(self, name, peer):
assert name=="blah"
return self.pv
provider = DynamicProvider("arbitrary", DynHandler())
server = Server(providers=[provider])
"""
# Return from Handler.testChannel() to prevent caching of negative result.
# Use when testChannel('name') might shortly return True
NotYet = b'nocache'
def __init__(self, name, handler):
_DynamicProvider.__init__(self, name, self._WrapHandler(handler))
class _WrapHandler(object):
"Wrapper around user Handler which logs exception"
def __init__(self, real):
self._real = real
def testChannel(self, name):
try:
return self._real.testChannel(name)
except:
_log.exception("Unexpected")
def makeChannel(self, name, peer):
try:
return self._real.makeChannel(name, peer)
except:
_log.exception("Unexpected")
def _cleanup_servers():
_log.debug("Stopping all Server instances")
servers = list(_p4p.all_servers)
for srv in servers:
srv.stop()
| mdavidsaver/p4p | src/p4p/server/__init__.py | __init__.py | py | 8,516 | python | en | code | 20 | github-code | 13 |
21251247612 | #Name: Shezan Alam
#Email: shezan.alam48@myhunter.cuny.edu
#Date: October 4th, 2019
#Imports the turtle commands
import turtle
#Created a turtle, named: taylorS
taylor = turtle.Turtle()
for i in range(90,0,-2):
taylor.forward(25)
taylor.left(i)
| shezalam29/simple-python-projects | SpiralSA.py | SpiralSA.py | py | 262 | python | en | code | 0 | github-code | 13 |
69797748817 | from faker import Faker
import json
def write_json():
fake = Faker()
fakedata = []
for detail in range(1000):
details = {"name":fake.name(),
"age":fake.random_int(min=20, max=65, step=1),
"city":fake.city()}
fakedata.append(details)
with open("data.json", "w", newline='') as f:
json.dump(fakedata, f)
write_json() | Limookiplimo/json-with-faker | write_json_data.py | write_json_data.py | py | 411 | python | en | code | 0 | github-code | 13 |
19057367006 | ### ML/AI/Geodata utils
# from pyrsgis import raster
# import torch
import numpy as np
import matplotlib.pyplot as plt
# from torch.utils.data import Dataset, DataLoader
# from torchvision import transforms, utils
import fiona
import rioxarray
from rioxarray import merge
import rasterio
import rasterstats
from rasterio import Affine # or from affine import Affine
from rasterio.mask import mask
from rasterio.plot import show, reshape_as_image
from rasterio.features import rasterize
from shapely.geometry import mapping, Point, Polygon
from shapely.ops import cascaded_union
import cv2
# import shapefile # pyshp
# for evaluating the model
# from torch.autograd import Variable
# from torch.nn import Linear, ReLU, CrossEntropyLoss, Sequential, Conv2d, MaxPool2d, Module, Softmax, BatchNorm2d, Dropout
# from torch.optim import Adam, SGD
### Path utils
from pathlib import Path
import os
import glob
### Data utils
import pandas as pd
import geopandas as gpd
##########################################################################################
import sys, math
def nztm2latlong(X, Y):
a = 6378137
f = 1 / 298.257222101
phizero = 0
lambdazero = 173
Nzero = 10000000
Ezero = 1600000
kzero = 0.9996
N, E = Y, X
b = a * (1 - f)
esq = 2 * f - pow(f, 2)
Z0 = 1 - esq / 4 - 3 * pow(esq, 2) / 64 - 5 * pow(esq, 3) / 256
A2 = 0.375 * (esq + pow(esq, 2) / 4 + 15 * pow(esq, 3) / 128)
A4 = 15 * (pow(esq, 2) + 3 * pow(esq, 2) / 4) / 256
A6 = 35 * pow(esq, 3) / 3072
Nprime = N - Nzero
mprime = Nprime / kzero
smn = (a - b) / (a + b)
G = a * (1 - smn) * (1 - pow(smn, 2)) * (1 + 9 * pow(smn, 2) / 4 + 225 * pow(smn, 4) / 64) * math.pi / 180.0
sigma = mprime * math.pi / (180 * G)
phiprime = sigma + (3 * smn / 2 - 27 * pow(smn, 3) / 32) * math.sin(2 * sigma) + (21 * pow(smn, 2) / 16 - 55 * pow(smn, 4) / 32) * math.sin(4 * sigma) + (151 * pow(smn, 3) / 96) * math.sin(6 * sigma) + (1097 * pow(smn, 4) / 512) * math.sin(8 * sigma)
rhoprime = a * (1 - esq) / pow(pow((1 - esq * math.sin(phiprime)), 2), 1.5)
upsilonprime = a / math.sqrt(1 - esq * pow(math.sin(phiprime), 2))
psiprime = upsilonprime / rhoprime
tprime = math.tan(phiprime)
Eprime = E - Ezero
chi = Eprime / (kzero * upsilonprime)
term_1 = tprime * Eprime * chi / (kzero * rhoprime * 2)
term_2 = term_1 * pow(chi, 2) / 12 * (-4 * pow(psiprime, 2) + 9 * psiprime * (1 - pow(tprime, 2)) + 12 * pow(tprime, 2))
term_3 = tprime * Eprime * pow(chi, 5) / (kzero * rhoprime * 720) * (8 * pow(psiprime, 4) * (11 - 24 * pow(tprime, 2)) - 12 * pow(psiprime, 3) * (21 - 71 * pow(tprime, 2)) + 15 * pow(psiprime, 2) * (15 - 98 * pow(tprime, 2) + 15 * pow(tprime, 4)) + 180 * psiprime * (5 * pow(tprime, 2) - 3 * pow(tprime, 4)) + 360 * pow(tprime, 4))
term_4 = tprime * Eprime * pow(chi, 7) / (kzero * rhoprime * 40320) * (1385 + 3633 * pow(tprime, 2) + 4095 * pow(tprime, 4) + 1575 * pow(tprime, 6))
term1 = chi * (1 / math.cos(phiprime))
term2 = pow(chi, 3) * (1 / math.cos(phiprime)) / 6 * (psiprime + 2 * pow(tprime, 2))
term3 = pow(chi, 5) * (1 / math.cos(phiprime)) / 120 * (-4 * pow(psiprime, 3) * (1 - 6 * pow(tprime, 2)) + pow(psiprime, 2) * (9 - 68 * pow(tprime, 2)) + 72 * psiprime * pow(tprime, 2) + 24 * pow(tprime, 4))
term4 = pow(chi, 7) * (1 / math.cos(phiprime)) / 5040 * (61 + 662 * pow(tprime, 2) + 1320 * pow(tprime, 4) + 720 * pow(tprime, 6))
latitude = (phiprime - term_1 + term_2 - term_3 + term_4) * 180 / math.pi
longitude = lambdazero + 180 / math.pi * (term1 - term2 + term3 - term4)
return (latitude, longitude)
## Path definitions
file_dir = Path(os.path.dirname(os.path.realpath(__file__)))
parent_dir = file_dir.parent.absolute()
data_dir = Path(os.path.join(parent_dir, "data"))
df_water_quality_file = os.path.join(data_dir, "NZRiverMaps_data", "NZRiverMaps_data_water_quality_2021-04-17.csv")
test_tiff = os.path.join(data_dir, "03_Healthy_local_waterways", "3A_Pansharpen_8band", "013930604250_01", "013930604250_01_P001_PSH", "20FEB24223135-S3DS_R2C1-013930604250_01_P001.TIF")
rivers_poly_filename = os.path.join(data_dir, "lds-nz-river-polygons-topo-150k-SHP", "nz-river-polygons-topo-150k.shp")
rivers_line_filename = os.path.join(data_dir, "lds-nz-river-centrelines-topo-150k-SHP", "nz-river-centrelines-topo-150k.shp")
test_dir = os.path.join(data_dir, "03_Healthy_local_waterways", "3A_Pansharpen_8band", "013930604250_01", "013930604250_01_P001_PSH")
train_dir = os.path.join(data_dir, "03_Healthy_local_waterways", "3A_Pansharpen_8band", "013930604250_01", "013930604250_01_P002_PSH")
all_images = glob.glob(os.path.join(test_dir, "*.TIF")) + glob.glob(os.path.join(train_dir, "*.TIF"))
## Merge data together
def merge_tiffs(all_image_files):
# all_image_files = glob.glob(os.path.join(dir_path, "*.TIF"))
elements = []
for im in all_image_files:
elements.append(rioxarray.open_rasterio(im))
return merge.merge_arrays(elements, nodata=0.0)
all = merge_tiffs(all_images)
# print(all_test)
all.rio.to_raster("out.TIF")
## Load dataframe
df_water_quality = pd.read_csv(df_water_quality_file)
column_of_latlong_tuples = [nztm2latlong(a, b) for (a, b) in zip(df_water_quality["NZTM_Easting"], df_water_quality["NZTM_Northing"])]
df_water_quality['lat'] = [t[0] for t in column_of_latlong_tuples]
df_water_quality['long'] = [t[1] for t in column_of_latlong_tuples]
print(df_water_quality.columns)
## load tiff as raster array
tiff_arr = rioxarray.open_rasterio(test_tiff)
# load datafile as shapefile
points_shape = gpd.GeoDataFrame(df_water_quality, geometry = gpd.points_from_xy(df_water_quality.NZTM_Easting, df_water_quality.NZTM_Northing), crs = tiff_arr.rio.crs)
# tiff_arr = None
# load river shapefiles
# rivers_poly = shapefile.Reader(rivers_poly_filename)
rivers_poly = gpd.read_file(rivers_poly_filename)
# rivers_line = shapefile.Reader(rivers_line_filename)
rivers_line = gpd.read_file(rivers_line_filename)
print(points_shape)
print(rivers_poly)
print(rivers_line)
# def extract_shp_from_tiff(vector, raster_file):
# # extract the geometry in GeoJSON format
# geoms = vector.geometry.values # list of shapely geometries
# geometry = geoms[0] # shapely geometry
# # transform to GeJSON format
# geoms = [mapping(geoms[0])]
# # extract the raster values values within the polygon
# with rasterio.open(raster_file) as src:
# out_image, out_transform = mask(src, geoms, crop=True)
# return out_image
# # no data values of the original raster
# # no_data=src.nodata
# # extract the values of the masked array
# # data = out_image.data[0]
# # extract the row, columns of the valid values
# # row, col = np.where(data != no_data)
# # elev = np.extract(data != no_data, data)
# # T1 = out_transform * Affine.translation(0.5, 0.5) # reference the pixel centre
# rc2xy = lambda r, c: (c, r) * T1
# Normalised difference water index
# NDWI = (XGreen - XNIR) / (XGreen + XNIR)
# NDVI = (NIR - Red) / (NIR + RED)
# def NDWI(raster_file):
# with rasterio.open as src:
# with rasterio.open(url+redband) as src:
# profile = src.profile
# oviews = src.overviews(1) # list of overviews from biggest to smallest
# oview = oviews[1] # Use second-highest resolution overview
# print('Decimation factor= {}'.format(oview))
# red = src.read(1, out_shape=(1, int(src.height // oview), int(src.width // oview)))
# for _, geom in rivers_poly.geometry.apply(mapping).iteritems():
# clipped = tiff_arr.rio.clip([geom], rivers_poly.crs)
# cropped.rio.to_raster("out.TIF")
# with Raster(raster, affine, nodata, band_num) as rast:
# features_iter = read_features(vectors, layer)
# for i, feat in enumerate(features_iter):
# geom = shape(feat['geometry'])
# if 'Point' in geom.type:
# geom = boxify_points(geom, rast)
# geom_bounds = tuple(geom.bounds)
# fsrc = rast.read(bounds=geom_bounds)
# create ndarray of rasterized geometry
# rv_array = rasterize_geom(geom, like=fsrc, all_touched=all_touched)
# with fiona.open(rivers_poly_filename) as fds:
# for row in fds:
# clipped = tiff_arr.rio.clip([row["geometry"]], fds.crs_wkt)
# clipped.rio.to_raster("out.TIF")
# with rasterio.open(test_tiff, "r") as src:
# raster_img = src.read()
# raster_meta = src.meta
#
# train_df = gpd.read_file(rivers_poly_filename)
#
# print("CRS Raster: {}, CRS Vector {}".format(train_df.crs, src.crs))
#
# def poly_from_utm(polygon, transform):
# poly_pts = []
#
# poly = cascaded_union(polygon)
# for i in np.array(poly.exterior.coords):
#
# # Convert polygons to the image CRS
# poly_pts.append(~transform * tuple(i))
#
# # Generate a polygon object
# new_poly = Polygon(poly_pts)
# return new_poly
#
#
# # Generate Binary maks
#
# poly_shp = []
# im_size = (src.meta['height'], src.meta['width'])
# for num, row in train_df.iterrows():
# if row['geometry'].geom_type == 'Polygon':
# poly = poly_from_utm(row['geometry'], src.meta['transform'])
# poly_shp.append(poly)
# else:
# for p in row['geometry']:
# poly = poly_from_utm(p, src.meta['transform'])
# poly_shp.append(poly)
#
# mask = rasterize(shapes=poly_shp,
# out_shape=im_size)
#
# # Plot the mask
#
# plt.figure(figsize=(15,15))
# plt.imshow(mask)
#
# mask = mask.astype("uint8")
# save_path = "train.tif"
# bin_mask_meta = src.meta.copy()
# bin_mask_meta.update({'count': 1})
# with rasterio.open(save_path, 'w', **bin_mask_meta) as dst:
# dst.write(mask * 255, 1)
#
#
#
# def generate_mask(raster_path, shape_path, output_path, file_name):
#
# """Function that generates a binary mask from a vector file (shp or geojson)
#
# raster_path = path to the .tif;
#
# shape_path = path to the shapefile or GeoJson.
#
# output_path = Path to save the binary mask.
#
# file_name = Name of the file.
#
# """
#
# #load raster
#
# with rasterio.open(raster_path, "r") as src:
# raster_img = src.read()
# raster_meta = src.meta
#
# #load o shapefile ou GeoJson
# train_df = gpd.read_file(shape_path)
#
# #Verify crs
# if train_df.crs != src.crs:
# print(" Raster crs : {}, Vector crs : {}.\n Convert vector and raster to the same CRS.".format(src.crs,train_df.crs))
#
#
# #Function that generates the mask
# def poly_from_utm(polygon, transform):
# poly_pts = []
#
# poly = cascaded_union(polygon)
# for i in np.array(poly.exterior.coords):
#
# poly_pts.append(~transform * tuple(i))
#
# new_poly = Polygon(poly_pts)
# return new_poly
#
#
# poly_shp = []
# im_size = (src.meta['height'], src.meta['width'])
# for num, row in train_df.iterrows():
# if row['geometry'].geom_type == 'Polygon':
# poly = poly_from_utm(row['geometry'], src.meta['transform'])
# poly_shp.append(poly)
# else:
# for p in row['geometry']:
# poly = poly_from_utm(p, src.meta['transform'])
# poly_shp.append(poly)
#
# mask = rasterize(shapes=poly_shp,
# out_shape=im_size)
#
# #Salve
# mask = mask.astype("uint8")
#
# bin_mask_meta = src.meta.copy()
# bin_mask_meta.update({'count': 1})
# os.chdir(output_path)
# with rasterio.open(file_name, 'w', **bin_mask_meta) as dst:
# dst.write(mask * 255, 1)
#
#
# generate_mask(test_tiff, rivers_poly_filename, "./", "out.tif")
# with fiona.open(rivers_poly_filename, "r") as shapefile:
# shapes = [feature["geometry"] for feature in shapefile]
#
# with rasterio.open(test_tiff) as src:
# out_image, out_transform = rasterio.mask.mask(src, shapes, crop=True)
# out_meta = src.meta
#
#
# out_meta.update({"driver": "GTiff",
# "height": out_image.shape[1],
# "width": out_image.shape[2],
# "transform": out_transform})
#
# with rasterio.open("RGB.byte.masked.tif", "w", **out_meta) as dest:
# dest.write(out_image)
# print(rasterstats.zonal_stats(rivers_poly_filename, test_tiff, stats="count", raster_out=True)[0].keys())
# cropped.rio.to_raster("out.TIF")
# out_image = extract_shp_from_tiff(rivers_poly, test_tiff)
# print(out_image)
# write shape to file
# points_shape.to_file(driver = 'ESRI Shapefile', filename = os.path.join("datapoints", "data.shp"))
| jakewilliami/scripts | python/geospatial/train.py | train.py | py | 12,595 | python | en | code | 3 | github-code | 13 |
70556135058 | import os
import re
from statistics import mean
import numpy as np
import scipy
from conch import analyze_segments
from conch.analysis.praat import PraatAnalysisFunction
from conch.analysis.segments import SegmentMapping
from conch.analysis.formants import PraatSegmentFormantTrackFunction, FormantTrackFunction, \
PraatSegmentFormantPointFunction
from pyraat.parse_outputs import parse_point_script_output
from ...exceptions import AcousticError
from ..io import point_measures_from_csv, point_measures_to_csv
from ..classes import Track, TimePoint
def sanitize_bandwidths(value):
"""Cleans bandwidth data from dictionary form.
Parameters
----------
value : dict
Observation values produced by reading out from Praat.
Returns
-------
float
The first bandwidth.
float
The second bandwidth.
float
The third bandwidth.
"""
try:
b1 = value['B1'][0]
except TypeError:
b1 = value['B1']
if b1 is None:
b1 = 0
try:
b2 = value['B2'][0]
except TypeError:
b2 = value['B2']
if b2 is None:
b2 = 0
try:
b3 = value['B3'][0]
except TypeError:
b3 = value['B3']
if b3 is None:
b3 = 0
return b1, b2, b3
def track_nformants(track):
"""Gets the number of formants used to arrive at a given track.
Parameters
----------
track : dict
The measured track.
Returns
-------
int
The number of formants used to measure that track
"""
numbers = set(int(x[1]) for x in track.keys() if x.startswith('F'))
return max(numbers)
def parse_multiple_formant_output(output):
output = output.replace(r'\r\n', r'\n')
listing_list = re.split(r'\r?\n\r?\n', output)
to_return = {}
for item in listing_list:
output = parse_point_script_output(item)
reported_nformants = output.pop('num_formants')
to_return[reported_nformants] = output
return to_return
def generate_variable_formants_point_function(corpus_context, min_formants, max_formants):
"""Generates a function used to call Praat to measure formants and bandwidths with variable num_formants.
This specific function returns a single point per formant at a third of the way through the segment
Parameters
----------
corpus_context : :class:`~polyglot.corpus.context.CorpusContext`
The CorpusContext object of the corpus.
min_formants : int
The minimum number of formants to measure with on subsequent passes (default is 4).
max_formants : int
The maximum number of formants to measure with on subsequent passes (default is 7).
Returns
-------
formant_function : Partial function object
The function used to call Praat.
"""
max_freq = 5500
script_dir = os.path.dirname(os.path.abspath(__file__))
script = os.path.join(script_dir, 'multiple_num_formants.praat')
formant_function = PraatAnalysisFunction(script, praat_path=corpus_context.config.praat_path,
arguments=[0.01, 0.025, min_formants, max_formants, max_freq])
formant_function._function._output_parse_function = parse_multiple_formant_output
return formant_function
def generate_formants_point_function(corpus_context, gender=None):
"""Generates a function used to call Praat to measure formants and bandwidths with variable num_formants.
Parameters
----------
corpus_context : :class:`~polyglot.corpus.context.CorpusContext`
The CorpusContext object of the corpus.
min_formants : int
The minimum number of formants to measure with on subsequent passes (default is 4).
max_formants : int
The maximum number of formants to measure with on subsequent passes (default is 7).
Returns
-------
formant_function : Partial function object
The function used to call Praat.
"""
max_freq = 5500
formant_function = PraatSegmentFormantPointFunction(praat_path=corpus_context.config.praat_path,
max_frequency=max_freq, num_formants=5, window_length=0.025,
time_step=0.01)
return formant_function
def get_mean_SD(data, prototype_parameters=None):
"""Generates per-vowel-class means and covariance matrices for an arbitrary set of parameters (such as F1, F2, F3, B1, B2, B3) .
Parameters
----------
corpus_context : :class:`~polyglot.corpus.context.CorpusContext`
The CorpusContext object of the corpus.
data : dict
Track data from which means and covariance matrices will be generated.
Returns
-------
metadata : dict
Means and covariance matrices per vowel class.
"""
if prototype_parameters is None:
prototype_parameters = ['F1', 'F2', 'F3', 'B1', 'B2', 'B3']
metadata = {}
phones = set()
for seg, value in data.items():
phones.add(seg['label'])
for phone in phones:
observation_list = []
for seg, value in data.items():
if seg['label'] == phone:
observation = [value[pp] for pp in prototype_parameters]
# observation = [
# value['F1'],
# value['F2'],
# value['F3'],
# value['B1'],
# value['B2'],
# value['B3']
# ]
observation_list.append([x if x else 0 for x in observation])
# f1_mean, f2_mean, f3_mean = mean(x[0] for x in observation_list), mean(x[1] for x in observation_list), mean(
# x[2] for x in observation_list)
# b1_mean, b2_mean, b3_mean = mean(x[3] for x in observation_list), mean(x[4] for x in observation_list), mean(
# x[5] for x in observation_list)
# all_means = [f1_mean, f2_mean, f3_mean, b1_mean, b2_mean, b3_mean]
all_means = [mean(x[i] for x in observation_list) for i, pp in enumerate(prototype_parameters)]
observation_list = np.array(observation_list)
cov = np.cov(observation_list.T)
measurements = [all_means, cov.tolist()]
metadata[phone] = measurements
return metadata
def get_mahalanobis(prototype, observation, inverse_covariance):
"""Gets the Mahalanobis distance between an observation and the prototype.
Parameters
----------
prototype : list
Prototype data.
observation : list
Given observation of a vowel instance.
inverse_covariance : list
The inverse of the covariance matrix for the vowel class.
Returns
-------
distance : float
The Mahalanobis distance for the observation.
"""
prototype = np.array(prototype)
observation = np.array(observation)
inverse_covariance = np.array(inverse_covariance)
distance = scipy.spatial.distance.mahalanobis(prototype, observation, inverse_covariance)
return distance
def save_formant_point_data(corpus_context, data, num_formants=False):
header = ['id', 'F1', 'F2', 'F3', 'B1', 'B2', 'B3', 'A1', 'A2', 'A3', 'Ax', 'drop_formant']
if num_formants:
header += ['num_formants']
point_measures_to_csv(corpus_context, data, header)
header_info = {}
for h in header:
if h == 'id':
continue
if h != 'num_formants' or h != 'drop_formant':
header_info[h] = float
# elif h != 'Fx':
# header_info[h] = str
else:
header_info[h] = int
point_measures_from_csv(corpus_context, header_info)
def extract_and_save_formant_tracks(corpus_context, data, num_formants=False, stop_check=None, multiprocessing=True):
'''This function takes a dictionary with the best parameters for each vowels, then recalculates the formants
as tracks rather than as points'''
#Dictionary of segment mapping objects where each n_formants has its own segment mapping object
segment_mappings = {}
save_padding = 0.02
for k, v in data.items():
k.begin -= save_padding
k.end += save_padding
if "num_formants" in v:
n_formants = v["num_formants"]
else:
#There was not enough samples, so we use the default n
n_formants = 5
if not n_formants in segment_mappings:
segment_mappings[n_formants] = SegmentMapping()
segment_mappings[n_formants].segments.append(k)
outputs = {}
for n_formants in segment_mappings:
func = PraatSegmentFormantTrackFunction(praat_path=corpus_context.config.praat_path,
max_frequency=5500, num_formants=n_formants,
window_length=0.025,
time_step=0.01)
output = analyze_segments(segment_mappings[n_formants], func,
stop_check=stop_check,
multiprocessing=multiprocessing) # Analyze the phone
outputs.update(output)
formant_tracks = ['F1', 'F2', 'F3', 'B1', 'B2', 'B3']
tracks = {}
for k, v in outputs.items():
vowel_id = k.properties["id"]
track = Track()
for time, formants in v.items():
tp = TimePoint(time)
for f in formant_tracks:
tp.add_value(f, formants[f])
track.add(tp)
if not k["speaker"] in tracks:
tracks[k["speaker"]] = {}
tracks[k["speaker"]][k] = track
if 'formants' not in corpus_context.hierarchy.acoustics:
corpus_context.hierarchy.add_acoustic_properties(corpus_context, 'formants', [(x, float) for x in formant_tracks])
for speaker, track_dict in tracks.items():
corpus_context.save_acoustic_tracks('formants', track_dict, speaker)
def generate_base_formants_function(corpus_context, gender=None, source='praat'):
"""
Parameters
----------
corpus_context : :class:`polyglot.corpus.context.CorpusContext`
The CorpusContext object of the corpus.
gender : str
The gender to use for the function, if "M"(male) then
the max frequency is 5000 Hz, otherwise 5500
source : str
The source of the function, if it is "praat" then the formants
will be calculated with Praat over each segment otherwise
it will simply be tracks
Returns
-------
formant_function : Partial function object
The function used to call Praat.
"""
max_freq = 5500
if gender == 'M':
max_freq = 5000
if source == 'praat':
if getattr(corpus_context.config, 'praat_path', None) is None:
raise (AcousticError('Could not find the Praat executable'))
formant_function = PraatSegmentFormantTrackFunction(praat_path=corpus_context.config.praat_path,
max_frequency=max_freq, num_formants=5, window_length=0.025,
time_step=0.01)
else:
formant_function = FormantTrackFunction(max_frequency=max_freq,
time_step=0.01, num_formants=5,
window_length=0.025)
return formant_function
| MontrealCorpusTools/PolyglotDB | polyglotdb/acoustics/formants/helper.py | helper.py | py | 11,446 | python | en | code | 31 | github-code | 13 |
13952556588 | import os
for subdir, dirs, files in os.walk("./"):
# print(subdir)
# print("--------")
for dire in dirs:
if dire[0:6] == "result":
print(subdir + os.sep +dire)
benchmark = subdir[1:]
freq = dire[25:31]
filepath = "../powertraces"+benchmark+"/"+freq
# print("filepath: ", filepath)
# print("--------\n")
# for file in files:
# #print os.path.join(subdir, file)
# filepath = subdir + os.sep + file
## if filepath.endswith(".asm"):
# print (filepath)
| sudam41/SLICER | benchmarks/HotSniper_Output/test.py | test.py | py | 486 | python | en | code | 0 | github-code | 13 |
43105006572 | def power(x, n=2):
s = 1
while n > 0:
n = n - 1
s = s * x
return s
print(power(5, 3))
print(power(5))
def add_end_e(L=[]):
if L is None:
L = []
L.append('END')
return L
print(add_end_e())
print(add_end_e())
def add_end(L=None):
if L is None:
L = []
L.append('END')
return L
print(add_end())
print(add_end())
def calc1(numbers):
sum = 0
for n in numbers:
sum = sum + n * n
return sum
print(calc1([1, 2, 3]))
def calc2(*numbers):
sum = 0
for n in numbers:
sum = sum + n * n
return sum
print(calc2(1, 2, 3))
nums = (1, 2, 3)
print(calc2(*nums))
def person(name, age, **kw):
print('name:', name, 'age:', age, 'other:', kw)
person('Michael', 30)
person('Bob', 35, city='Beijing')
person('Adam', 45, gender='M', job='Engineer')
def person(name, age, *, city, job):
print(name, age, city, job)
person('Jack', 24, city='Beijing', job='Engineer')
def person(name, age, *args, city, job):
print(name, age, args, city, job)
person('Jack', 24, 1, 2, city='Beijing', job='Engineer')
| JinhaoPlus/LiaoxuefengPython | ch3_function/3.3-funtion-args.py | 3.3-funtion-args.py | py | 1,120 | python | en | code | 0 | github-code | 13 |
73832684818 | import time
import pandas as pd
import numpy as np
from textdistance import levenshtein
from lib.config import *
df = pd.read_csv("/Users/calvinwalker/Documents/Projects/FPL/data/master_players.csv")
df = df[df['avg_xGBuildup'].notna()]
df = df[df['kickoff_time'] < '2023-08-10']
spi = pd.read_csv('/Users/calvinwalker/Documents/Projects/FPL/data/soccer-spi/spi_matches.csv')
spi = spi[(spi['league'] == 'Barclays Premier League') | (spi['league'] == 'French Ligue 1') | (spi['league'] == 'English League Championship') |
(spi['league'] == 'German Bundesliga') | (spi['league'] == 'Italy Serie A') | (spi['league'] == 'Spanish Primera Division')]
matches = [(name1, name2) for name1 in np.unique(spi['team1']) for name2 in np.unique(df['h_team'])
if levenshtein(name1[:min(len(name1), len(name2))], name2[:min(len(name1), len(name2))]) <= 1
and name1 != name2 and name1 != 'Brest' and name1 != 'Lecce' and name2 != 'Brest']
matches.append(("AFC Bournemouth", "Bournemouth"))
matches.append(("RB Leipzig", "RasenBallsport Leipzig"))
matches.append(("VfL Wolfsburg", "Wolfsburg"))
matches.append(("Borussia Monchengladbach", "Borussia M.Gladbach"))
matches.append(("SC Freiburg", "Freiburg"))
matches.append(("FC Augsburg", "Augsburg"))
matches.append(("Sevilla FC", "Sevilla"))
matches.append(("AS Monaco", "Monaco"))
matches.append(("St Etienne", "Saint-Etienne"))
matches.append(("1. FC Union Berlin", "Union Berlin"))
matches.append(("Spal", "SPAL 2013"))
matches.append(("AS Roma", "Roma"))
matches.append(("TSG Hoffenheim", "Hoffenheim"))
matches.append(("Athletic Bilbao", "Athletic Club"))
for name1, name2 in matches:
spi.loc[spi['team1'] == name1, 'team1'] = name2
spi.loc[spi['team2'] == name1, 'team2'] = name2
# print(np.unique(spi['team1']))
# print(np.unique(df['h_team']))
spi.drop(columns=['prob1','prob2','probtie', 'proj_score1', 'proj_score2', 'importance1', 'importance2', 'score1','score2', 'xg1', 'xg2', 'nsxg1', 'nsxg2', 'adj_score1', 'adj_score2'], inplace=True)
for i, row in df.iterrows():
print(i)
date, t1, t2 = row['kickoff_time'], row['h_team'], row['a_team']
if date == "2023-04-30" and t1 == "Strasbourg" and t2 == "Lyon":
continue
if date > '2023-08-10':
continue
# print(spi['date'])
# print(spi.loc[(spi['date'] == date) & (spi['team1'] == t1) & (spi['team2'] == t2), 'spi1'])
# print(spi[spi['date'] == date])
df.loc[i, 'spi1'] = spi.loc[(spi['date'] == date) & (spi['team1'] == t1) & (spi['team2'] == t2), 'spi1'].values[0]
df.loc[i, 'spi2'] = spi.loc[(spi['date'] == date) & (spi['team1'] == t1) & (spi['team2'] == t2), 'spi2'].values[0]
# df = df.merge(spi, left_on=['kickoff_time', 'h_team', 'a_team'], right_on=['date', 'team1', 'team2'])
print(df.loc[:, ['was_home', 'spi1', 'spi2']])
# for col in df.columns:
# print(col)
df.loc[df['was_home'] == True, 'team_spi'] = df.loc[df['was_home'] == True, 'spi1']
df.loc[df['was_home'] == True, 'opponent_spi'] = df.loc[df['was_home'] == True, 'spi2']
df.loc[df['was_home'] == False, 'team_spi'] = df.loc[df['was_home'] == False, 'spi2']
df.loc[df['was_home'] == False, 'opponent_spi'] = df.loc[df['was_home'] == False, 'spi1']
print(df.loc[:, ['team_spi', 'was_home', 'opponent_spi', 'spi1', 'spi2']])
df.to_csv('/Users/calvinwalker/Documents/Projects/FPL/data/training_data.csv') | ucswalker1/FPL | process/clean_previous.py | clean_previous.py | py | 3,394 | python | en | code | 0 | github-code | 13 |
17048456264 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AnttechOceanbaseTestplatformTaskSyncModel(object):
def __init__(self):
self._branch = None
self._commit_id = None
self._fail_msg = None
self._result_type = None
self._task_id = None
self._task_status = None
self._test_case_result = None
self._test_info = None
@property
def branch(self):
return self._branch
@branch.setter
def branch(self, value):
self._branch = value
@property
def commit_id(self):
return self._commit_id
@commit_id.setter
def commit_id(self, value):
self._commit_id = value
@property
def fail_msg(self):
return self._fail_msg
@fail_msg.setter
def fail_msg(self, value):
self._fail_msg = value
@property
def result_type(self):
return self._result_type
@result_type.setter
def result_type(self, value):
self._result_type = value
@property
def task_id(self):
return self._task_id
@task_id.setter
def task_id(self, value):
self._task_id = value
@property
def task_status(self):
return self._task_status
@task_status.setter
def task_status(self, value):
self._task_status = value
@property
def test_case_result(self):
return self._test_case_result
@test_case_result.setter
def test_case_result(self, value):
self._test_case_result = value
@property
def test_info(self):
return self._test_info
@test_info.setter
def test_info(self, value):
self._test_info = value
def to_alipay_dict(self):
params = dict()
if self.branch:
if hasattr(self.branch, 'to_alipay_dict'):
params['branch'] = self.branch.to_alipay_dict()
else:
params['branch'] = self.branch
if self.commit_id:
if hasattr(self.commit_id, 'to_alipay_dict'):
params['commit_id'] = self.commit_id.to_alipay_dict()
else:
params['commit_id'] = self.commit_id
if self.fail_msg:
if hasattr(self.fail_msg, 'to_alipay_dict'):
params['fail_msg'] = self.fail_msg.to_alipay_dict()
else:
params['fail_msg'] = self.fail_msg
if self.result_type:
if hasattr(self.result_type, 'to_alipay_dict'):
params['result_type'] = self.result_type.to_alipay_dict()
else:
params['result_type'] = self.result_type
if self.task_id:
if hasattr(self.task_id, 'to_alipay_dict'):
params['task_id'] = self.task_id.to_alipay_dict()
else:
params['task_id'] = self.task_id
if self.task_status:
if hasattr(self.task_status, 'to_alipay_dict'):
params['task_status'] = self.task_status.to_alipay_dict()
else:
params['task_status'] = self.task_status
if self.test_case_result:
if hasattr(self.test_case_result, 'to_alipay_dict'):
params['test_case_result'] = self.test_case_result.to_alipay_dict()
else:
params['test_case_result'] = self.test_case_result
if self.test_info:
if hasattr(self.test_info, 'to_alipay_dict'):
params['test_info'] = self.test_info.to_alipay_dict()
else:
params['test_info'] = self.test_info
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AnttechOceanbaseTestplatformTaskSyncModel()
if 'branch' in d:
o.branch = d['branch']
if 'commit_id' in d:
o.commit_id = d['commit_id']
if 'fail_msg' in d:
o.fail_msg = d['fail_msg']
if 'result_type' in d:
o.result_type = d['result_type']
if 'task_id' in d:
o.task_id = d['task_id']
if 'task_status' in d:
o.task_status = d['task_status']
if 'test_case_result' in d:
o.test_case_result = d['test_case_result']
if 'test_info' in d:
o.test_info = d['test_info']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/AnttechOceanbaseTestplatformTaskSyncModel.py | AnttechOceanbaseTestplatformTaskSyncModel.py | py | 4,387 | python | en | code | 241 | github-code | 13 |
23407075490 | import unittest
import requests
from app.malware_api import app
class test_malware_api(unittest.TestCase):\
def test_url_get(self):
with app.test_client() as testClient:
url = "http://127.0.0.1:5000/v1/urlinfo/malware_url?url=http://222.138.204.18:39382/Mozi.m"
resp=testClient.get(url)
assert resp.json[0]['data'] == True
def test_url_post(self):
with app.test_client() as testClient:
url = 'http://127.0.0.1:5000/v1/urlinfo/add_url?url=http://222.138.204.18:39382/Mozi.m'
resp = testClient.post(url)
assert resp.json[0]['data']==True
| enakshi194/url_lookup_service | unit_tests/test_malware_api.py | test_malware_api.py | py | 643 | python | en | code | 0 | github-code | 13 |
8630442891 | # -*- coding: utf-8 -*-
from copy import deepcopy
from Products.MeetingCommunes.profiles.testing import import_data as mc_import_data
from Products.MeetingLalouviere.config import LLO_ITEM_COLLEGE_WF_VALIDATION_LEVELS, DG_GROUP_ID
from Products.MeetingLalouviere.config import LLO_ITEM_COUNCIL_WF_VALIDATION_LEVELS
from Products.PloneMeeting.profiles import UserDescriptor, OrgDescriptor
from Products.PloneMeeting.profiles.testing import import_data as pm_import_data
data = deepcopy(mc_import_data.data)
# USERS
pmServiceHead1 = UserDescriptor("pmServiceHead1", [])
pmServiceHead2 = UserDescriptor("pmServiceHead2", [])
pmOfficeManager1 = UserDescriptor("pmOfficeManager1", [])
pmOfficeManager2 = UserDescriptor("pmOfficeManager2", [])
pmDivisionHead1 = UserDescriptor("pmDivisionHead1", [])
pmDivisionHead2 = UserDescriptor("pmDivisionHead2", [])
pmDirector1 = UserDescriptor("pmDirector1", [])
pmDirector2 = UserDescriptor("pmDirector2", [])
pmDg = UserDescriptor("pmDg", [])
pmCreator2 = UserDescriptor("pmCreator2", [])
pmAdviser1 = UserDescriptor("pmAdviser1", [])
pmAdviser2 = UserDescriptor("pmAdviser2", [])
voter1 = UserDescriptor("voter1", [], fullname="M. Voter One")
voter2 = UserDescriptor("voter2", [], fullname="M. Voter Two")
pmAlderman1 = UserDescriptor(
"pmAlderman1", [], email="pmalderman1@plonemeeting.org", fullname="M. PMAlderman One"
)
pmAlderman2 = UserDescriptor(
"pmAlderman2", [], email="pmalderman2@plonemeeting.org", fullname="M. PMAlderman One"
)
pmFollowup1 = UserDescriptor("pmFollowup1", [])
pmFollowup2 = UserDescriptor("pmFollowup2", [])
pmBudgetReviewer1 = UserDescriptor("pmBudgetReviewer1", [])
pmBudgetReviewer2 = UserDescriptor("pmBudgetReviewer2", [])
# Inherited users
pmReviewer1 = pm_import_data.pmReviewer1
pmReviewer2 = pm_import_data.pmReviewer2
pmReviewerLevel1 = pm_import_data.pmReviewerLevel1
pmReviewerLevel2 = pm_import_data.pmReviewerLevel2
pmManager = pm_import_data.pmManager
# GROUPS
developers = data.orgs[0]
# custom groups
developers.serviceheads.append(pmServiceHead1)
developers.serviceheads.append(pmReviewerLevel1)
developers.serviceheads.append(pmManager)
developers.officemanagers.append(pmOfficeManager1)
developers.officemanagers.append(pmManager)
developers.divisionheads.append(pmDivisionHead1)
developers.divisionheads.append(pmManager)
developers.directors.append(pmDirector1)
developers.directors.append(pmReviewerLevel2)
developers.directors.append(pmReviewer1)
developers.directors.append(pmManager)
developers.budgetimpactreviewers.append(pmManager)
developers.budgetimpactreviewers.append(pmBudgetReviewer1)
developers.alderman.append(pmReviewerLevel2)
developers.alderman.append(pmReviewer1)
developers.alderman.append(pmManager)
developers.alderman.append(pmAlderman1)
developers.followupwriters.append(pmFollowup1)
developers.observers.append(pmFollowup1)
vendors = data.orgs[1]
vendors.serviceheads.append(pmServiceHead2)
vendors.officemanagers.append(pmOfficeManager2)
vendors.divisionheads.append(pmDivisionHead2)
vendors.directors.append(pmDirector2)
vendors.directors.append(pmReviewer2)
vendors.directors.append(pmReviewerLevel2)
vendors.budgetimpactreviewers.append(pmBudgetReviewer2)
vendors.alderman.append(pmReviewer2)
vendors.alderman.append(pmAlderman2)
vendors.alderman.append(pmReviewerLevel2)
vendors.followupwriters.append(pmFollowup2)
vendors.observers.append(pmFollowup2)
dg = OrgDescriptor(DG_GROUP_ID, 'Dg', u'Dg')
data.orgs += (dg,)
dg.creators.append(pmDg)
dg.directors.append(pmDg)
dg.directors.append(pmManager)
dg.budgetimpactreviewers.append(pmDg)
# COLLEGE
collegeMeeting = deepcopy(mc_import_data.collegeMeeting)
collegeMeeting.itemWFValidationLevels = deepcopy(LLO_ITEM_COLLEGE_WF_VALIDATION_LEVELS)
collegeMeeting.itemAdviceStates = [
"proposed_to_alderman",
]
collegeMeeting.itemAdviceEditStates = [
"proposed_to_alderman",
"validated"
]
usedItemAttributes = list(collegeMeeting.usedItemAttributes) + [u"providedFollowUp",]
collegeMeeting.usedItemAttributes = tuple(usedItemAttributes)
# COUNCIL
councilMeeting = deepcopy(mc_import_data.councilMeeting)
councilMeeting.itemWFValidationLevels = deepcopy(LLO_ITEM_COUNCIL_WF_VALIDATION_LEVELS)
councilMeeting.itemAdviceStates = [
"proposed_to_director",
]
councilMeeting.itemAdviceEditStates = [
"proposed_to_director",
"validated"
]
# councilMeeting.workflowAdaptations = deepcopy(LLO_APPLYED_COUNCIL_WFA)
usedItemAttributes = list(councilMeeting.usedItemAttributes) + [u"committeeTranscript",]
councilMeeting.usedItemAttributes = tuple(usedItemAttributes)
councilMeeting.itemPreferredMeetingStates += ('decided',)
data.meetingConfigs = (collegeMeeting, councilMeeting)
| IMIO/Products.MeetingLalouviere | src/Products/MeetingLalouviere/profiles/testing/import_data.py | import_data.py | py | 4,680 | python | en | code | 0 | github-code | 13 |
34885434504 | import math
# Color configuration based on state
COLORS = {
"STANDARD":(255, 255, 255),
"BARRIER":(0, 0, 0),
"START":(255, 102, 102),
"GOAL":(102, 255, 153),
"OPEN":(255, 153, 0),
"CLOSED":(0, 153, 204),
"PATH":(255, 204, 102)
}
# Darkens shade of cells with higher weight. Best results in range 0.2-0.5
DARKEN_FACTOR = 0.2
def set_color(state, weight):
"""
Sets color based on state
Args:
weight(int): weight of a standard cell
Return:
color((int,int,int)): RGB color
This function darkens color of STANDARD cells based on weight.
"""
if weight > 1 and state == "STANDARD":
color = tuple([c * (1 - DARKEN_FACTOR)**weight for c in COLORS[state]])
else:
color = COLORS[state]
return color
class Cell(object):
"""
Individual cell component used to create map
"""
def __init__(self, row, col, state, weight):
"""
Initializes cell
Args:
row(int): row position of cell
col(int): column position of cell
state(str): cell state
weight(int): weight of cell
This weight is associated with how costly it is to move through this
cell for a given algorithm.
A* dependent variables are also initialized here.
If more algorithms were to be implemented, storing them in the cell
class would not be the best option, but since we only concider A* here
it is tolerable.
"""
self.row = row
self.col = col
self.state = state
self.weight = weight
self.color = set_color(self.state, self.weight)
# A* dependent variables
self.neighbours = []
self.parent = None
self.g = math.inf
self.h = math.inf
self.f = self.g + self.h
def set_state(self, state, weight = None):
"""
Sets cell state
Args:
state(str): cell state
weight(int): weight of cell
It is often desirable to only change state, and not having to reset
weight. Thus default weight is None;
"""
self.state = state
if weight is not None:
self.weight = weight
self.color = set_color(self.state, self.weight)
def reset_scores(self):
"""
Resets A* scores
"""
self.g = math.inf
self.h = math.inf
self.f = self.g + self.h
def __eq__(self, other):
"""
Overriden dunder method which compares equality of two cells based
purely on grid position
Args:
other(Cell): cell to compare with
Return:
equality(bool): equality based on grid position
"""
return self.row == other.row and self.col == other.col
| fredrvaa/A-star-Visualizer | cell.py | cell.py | py | 2,871 | python | en | code | 0 | github-code | 13 |
74460915538 | import sys
sys.path.insert(0, "..")
import logging
import time
import os
import json
from asyncua.sync import Client, ua
from softioc import softioc, builder
import cothread
from functools import partial
from dbtoolspy import load_template_file, load_database_file
import datetime
forceExit = False
class SubHandler(object):
"""
Subscription Handler.
"""
def status_change_notification(self, status):
global forceExit
print("status_change_notification")
print("status: ", hex(status.Status))
forceExit = True
def datachange_notification(self, node, val, data):
epicsName = self.clients[self.clientName]["opcuaToEpicsNames"][str(node)]
if self.debug:
print("client Name:", self.clientName)
print("client Url:", self.clientUrl)
print("client Node:", node)
print("client val:", val)
print("epics name", epicsName)
print(str(self.epicsPvs[epicsName]["epicsType"]))
if self.epicsPvs[epicsName]["initialized"]:
if self.epicsPvs[epicsName]["epicsType"] in ["AI", "BI"]:
if self.debug:
print("val:", val)
print("val type:", type(val))
print("val name:", epicsName)
if isinstance(val, datetime.datetime):
self.epicsPvs[epicsName]["pv"].set(datetime.datetime.timestamp(val))
else:
self.epicsPvs[epicsName]["pv"].set(val)
else:
if self.epicsPvs[epicsName]["epicsType"] in ["STRINGIN"]:
self.epicsPvs[epicsName]["pv"].set(str(val))
else:
if isinstance(val, datetime.datetime):
if self.epicsPvs[epicsName]["epicsType"] in ["AI"]:
self.epicsPvs[epicsName]["pv"].set(datetime.datetime.timestamp(val))
else:
self.epicsPvs[epicsName]["pv"].set(str(val))
self.epicsPvs[epicsName]["initialized"] = True
else:
if val is not None:
self.epicsPvs[epicsName]["pv"].set(val)
self.epicsPvs[epicsName]["initialized"] = True
def event_notification(self, event):
if self.debug:
print("Python: New event", event)
print("Python: New event", event)
def setClientNameAndUrl(self, name, url):
self.clientName = name
self.clientUrl = url
def setClients(self, clients):
self.clients = clients
def setEpicsPvs(self, epicsPvs):
self.epicsPvs = epicsPvs
def setDebug(self, debug):
self.debug = debug
if __name__ == "__main__":
def on_epics_pv_update(
val,
opcuaClientName,
epicsPvName,
epicsType,
opcuaType,
opcuaName,
opcuaClients,
ZNAM=None,
ONAM=None,
debug=None,
**kwargs,
):
if debug:
print("on_epics_pv_update val:", val)
print("on_epics_pv_update clientName:", opcuaClientName)
print("on_epics_pv_update epicsPvName:", epicsPvName)
print("on_epics_pv_update epicsType:", epicsType)
client = opcuaClients[opcuaClientName]["client"]
var = client.get_node(opcuaName)
currentValue = var.get_value()
if "BO" in epicsType:
x = int(val) == 1
dv = ua.DataValue(ua.Variant(int(val) == 1, ua.VariantType.Boolean))
var.set_value(dv)
else:
newValue = str(val)
if newValue != str(currentValue):
if opcuaType == "Float":
dv = ua.DataValue(ua.Variant(float(newValue), ua.VariantType.Float))
var.set_value(dv)
elif opcuaType == "Double":
dv = ua.DataValue(
ua.Variant(float(newValue), ua.VariantType.Double)
)
var.set_value(dv)
elif opcuaType == "Uint16":
dv = ua.DataValue(
ua.Variant(int(float(newValue)), ua.VariantType.UInt16)
)
var.set_value(dv)
elif opcuaType == "Int16":
dv = ua.DataValue(
ua.Variant(int(float(newValue)), ua.VariantType.Int16)
)
var.set_value(dv)
elif opcuaType == "Int32":
dv = ua.DataValue(
ua.Variant(int(float(newValue)), ua.VariantType.Int32)
)
var.set_value(dv)
elif opcuaType == "Uint32":
dv = ua.DataValue(
ua.Variant(int(float(newValue)), ua.VariantType.UInt32)
)
var.set_value(dv)
elif opcuaType == "Int64":
dv = ua.DataValue(
ua.Variant(int(float(newValue)), ua.VariantType.Int64)
)
var.set_value(dv)
elif opcuaType == "Uint64":
dv = ua.DataValue(
ua.Variant(int(float(newValue)), ua.VariantType.UInt64)
)
var.set_value(dv)
elif opcuaType == "Byte":
dv = ua.DataValue(
ua.Variant(int(float(newValue)), ua.VariantType.Byte)
)
var.set_value(dv)
elif opcuaType == "Sbyte":
dv = ua.DataValue(
ua.Variant(int(float(newValue)), ua.VariantType.SByte)
)
var.set_value(dv)
elif opcuaType == "String":
dv = ua.DataValue(ua.Variant(str(newValue), ua.VariantType.String))
var.set_value(dv)
else:
print(f"incorrect opcua type: {opcuaType}")
if debug:
print(
f"on_epics_pv_update pv: {epicsPvName} opcua currentValue: {currentValue}"
)
clients = {}
epicsPvs = {}
logging.basicConfig(level=logging.WARNING)
try:
debug = os.getenv("debug", False) == "True"
name = os.getenv("name", None)
url = os.getenv("url", None)
secure = os.getenv("secure", False) == "True"
try:
subscriptionRate = int(os.environ["subscriptionRate"])
except:
subscriptionRate = 1000
db = load_database_file("bridge.db")
clients[name] = {}
opcuaClient = clients[name]
opcuaClient["client"] = Client(url)
if secure:
opcuaClient["client"].set_security_string(
"Basic256Sha256,SignAndEncrypt,../certificates/client.der,../certificates/client_private_key.pem"
)
opcuaClient["client"].connect()
opcuaClient["handler"] = SubHandler()
opcuaClient["handler"].setClientNameAndUrl(name, url)
opcuaClient["handler"].setClients(clients)
opcuaClient["handler"].setEpicsPvs(epicsPvs)
opcuaClient["handler"].setDebug(debug)
opcuaClient["sub"] = opcuaClient["client"].create_subscription(
subscriptionRate, opcuaClient["handler"]
)
opcuaClient["opcuaToEpicsNames"] = {}
opcuaClient["epicsToOpcuaNames"] = {}
for record in db.values():
DTYP = str(record.fields["DTYP"]).upper()
if "OPCUA" in DTYP:
epicsPvName = str(record.name)
epicsType = str(record.rtyp).upper()
opcuaType = (DTYP.split("_"))[1].capitalize()
epicsPvs[epicsPvName] = {}
epicsPvs[epicsPvName]["initialized"] = False
if "AI" in epicsType:
opcuaName = str(record.fields["INP"])
fields = {}
for field in record.fields:
upper = str(field).upper()
if upper in ["DESC", "EGU", "HOPR", "LOPR", "PREC"]:
fields[upper] = record.fields[field]
epicsPvs[epicsPvName]["pv"] = builder.aIn(epicsPvName, **fields)
epicsPvs[epicsPvName]["epicsType"] = "AI"
if "STRINGIN" in epicsType:
opcuaName = str(record.fields["INP"])
fields = {}
for field in record.fields:
upper = str(field).upper()
if upper in ["DESC"]:
fields[upper] = record.fields[field]
epicsPvs[epicsPvName]["pv"] = builder.stringIn(
epicsPvName, **fields
)
epicsPvs[epicsPvName]["epicsType"] = "STRINGIN"
if "STRINGOUT" in epicsType:
opcuaName = str(record.fields["INP"])
fields = {}
for field in record.fields:
upper = str(field).upper()
if upper in ["DESC"]:
fields[upper] = record.fields[field]
epicsPvs[epicsPvName]["pv"] = builder.stringOut(
epicsPvName,
on_update=partial(
on_epics_pv_update,
opcuaClientName=name,
epicsPvName=epicsPvName,
epicsType=epicsType,
opcuaName=opcuaName,
opcuaType=opcuaType,
opcuaClients=clients,
debug=debug,
**fields,
),
**fields,
)
epicsPvs[epicsPvName]["epicsType"] = "STRINGOUT"
if "AO" in epicsType:
opcuaName = str(record.fields["OUT"])
fields = {}
for field in record.fields:
upper = str(field).upper()
if upper in ["DESC", "EGU", "HOPR", "LOPR", "PREC"]:
fields[upper] = record.fields[field]
epicsPvs[epicsPvName]["pv"] = builder.aOut(
epicsPvName,
on_update=partial(
on_epics_pv_update,
opcuaClientName=name,
epicsPvName=epicsPvName,
epicsType=epicsType,
opcuaName=opcuaName,
opcuaType=opcuaType,
opcuaClients=clients,
debug=debug,
**fields,
),
**fields,
)
epicsPvs[epicsPvName]["epicsType"] = "AO"
elif "BO" in epicsType:
opcuaName = str(record.fields["OUT"])
fields = {}
for field in record.fields:
upper = str(field).upper()
if upper in [
"ZNAM",
"ONAM",
"DESC",
"EGU",
"HOPR",
"LOPR",
"PREC",
]:
fields[upper] = record.fields[field]
epicsPvs[epicsPvName]["pv"] = builder.boolOut(
epicsPvName,
on_update=partial(
on_epics_pv_update,
opcuaClientName=name,
epicsPvName=epicsPvName,
epicsType=epicsType,
opcuaName=opcuaName,
opcuaType=opcuaType,
opcuaClients=clients,
**fields,
),
**fields,
)
epicsPvs[epicsPvName]["epicsType"] = "BO"
elif "BI" in epicsType:
opcuaName = str(record.fields["INP"])
fields = {}
for field in record.fields:
upper = str(field).upper()
if upper in [
"ZNAM",
"ONAM",
"DESC",
"EGU",
"HOPR",
"LOPR",
"PREC",
]:
fields[upper] = record.fields[field]
epicsPvs[epicsPvName]["pv"] = builder.boolIn(epicsPvName, **fields)
epicsPvs[epicsPvName]["epicsType"] = "BI"
opcuaClient["opcuaToEpicsNames"][str(opcuaName)] = str(epicsPvName)
opcuaClient["epicsToOpcuaNames"][str(epicsPvName)] = str(opcuaName)
opcuaClient["sub"].subscribe_data_change(
opcuaClient["client"].get_node(opcuaName)
)
except Exception as e:
print("exception", e)
forceExit = True
if not forceExit:
builder.LoadDatabase()
softioc.iocInit()
print("EPICS OPCUA bridge loaded")
print(f"OPCUA HOST URL: {url}")
print("\nThe following bridge PVs are loaded:\n")
softioc.dbgrep("*")
print("\n")
try:
while True:
cothread.Sleep(0.1)
if forceExit:
exit(1)
finally:
for clientName in clients:
client = clients[clientName]["client"]
client.disconnect()
| React-Automation-Studio/OPCUA-EPICS-BRIDGE | opcuaEpicsBridge/bridge.py | bridge.py | py | 14,055 | python | en | code | 0 | github-code | 13 |
20319084690 | from django.urls import path, include
from . import views
from django.views.generic.base import TemplateView
from rest_framework import routers
from django.conf.urls import url
router = routers.DefaultRouter()
urlpatterns = [
#Path for profile
url('profile/', views.ProfileView.as_view()),
#path for getting all school data
path('index/', views.index.as_view(), name='index'),
#path for registering a user
url('register/', views.RegisterView.as_view()),
#Path for home
url('home/', views.HomePageView.as_view()),
#Path for home
url('uploadView/', views.UploadView.as_view()),
#path for charts
url('charts/', views.ChartsView.as_view()),
#Path for signup
url('login/', views.LoginView.as_view()),
#Path for getting data from department
path('single/<str:schoolName>/<str:departmentName>/', views.singleData.as_view(), name = 'singleData'),
#Path for getting multiple departments from single school
path('multiple/<str:schoolName>/', views.multipleData.as_view(), name = 'multipleData'),
#Path for uploading data
path('upload/', views.uploadFile, name = 'uploadFile'),
#Path for training
path('train/', views.trainModel, name = 'trainModel'),
#Path for sending data to the oracle
path('markov/<str:incomingStudents>/', views.testData.as_view(), name = 'testData'),
#Path for creating a user
url('createUser', views.createUser, name = 'createUser'),
#Path for sending emails
url('email', views.sendEmail, name = 'sendEmail'),
#Path for giving permission
path('permission/', views.givePerm, name = 'givePerm'),
#Path for getting all permissions
path('getpermission/', views.getPerm, name = 'getPerm'),
]
| djbursch/csuSeer-server | server/insert2DB/urls.py | urls.py | py | 1,765 | python | en | code | 0 | github-code | 13 |
42417230059 | # -*- coding: utf-8 -*-
''' This module solves a sudoku, This is actually written by Peter Norvig
Code and Explanation can be found here : norvig.com/sudoku.html'''
def cross(A, B):
return [a+b for a in A for b in B]
digits = '123456789'
rows = 'ABCDEFGHI'
cols = digits
squares = cross(rows, cols)
unitlist = ([cross(rows, c) for c in cols] + [cross(r, cols) for r in rows] + [cross(rs, cs) for rs in ('ABC','DEF','GHI') for cs in ('123','456','789')])
units = dict((s, [u for u in unitlist if s in u]) for s in squares)
# print(units)
# print(unitlist)
# peers = dict((s, set(sum(units[s],[]))-set([s])) for s in squares)
peers =dict()
for s in squares:
peers[s] = set(sum(units[s],[]))-set([s])
print(peers)
| shreyanshu/sudoku_project | s.py | s.py | py | 722 | python | en | code | 0 | github-code | 13 |
16083305696 | import sys
all_nodes = []
class Node(object):
word = ''
path_to = []
letters = [0] * 26
index = -1
# The class "constructor" - It's actually an initializer
def __init__(self, word, path_to, letters, index):
self.word = word
self.path_to = path_to
self.letters = letters
self.index = index
def make_node(word, path_to, letters, index):
node = Node(word, path_to, letters, index)
return node
def read_words(wordfile):
f = open(wordfile, 'r') # kör en egen fil
lines = f.readlines()
counter = 0
while counter < len(lines):
word = lines[counter][:5]
this_letters = [0]*26
for i in range(5):
this_letters[ord(word[i]) - 97] += 1
this_index = counter
new_node = make_node(word, [], this_letters, this_index)
make_pointers(new_node, all_nodes)
all_nodes.append(new_node)
counter += 1
def make_pointers(n, nodelist): # ska ta o(n) då den anropas i en loop
for temp_node in nodelist:
nm_path = True # n --> m
l_m = [0]*26 # temp_node.letters.copy() #o(n)
for i in range(26):
l_m[i] = temp_node.letters[i]
mn_path = True # m --> n
l_n = [0]*26 # n.letters.copy() #o(n)
for i in range(26):
l_n[i] = n.letters[i]
for j in range(4):
if nm_path: # n --> m
place1 = ord(n.word[j + 1]) - 97
if l_m[place1] > 0:
l_m[place1] -= 1
else:
nm_path = False
if mn_path: # m --> n
place2 = ord(temp_node.word[j + 1]) - 97
if l_n[place2] > 0:
l_n[place2] -= 1
else:
mn_path = False
if nm_path is False and mn_path is False:
break
if nm_path:
n.path_to.append(temp_node)
if mn_path:
temp_node.path_to.append(n)
def find_paths(in_file): # ska ta o(n+m), denna är för långsam nu!
infile = open(in_file, 'r')
lines = infile.readlines()
is_paths = [0] * (len(lines))
counter = 0
while counter < len(lines): # find the first word
root = None
words = lines[counter].split()
word1 = words[0]
word2 = words[1]
if word1 == word2:
is_paths[counter] = 0
else:
new_ind = 1
for nw in n.word:
new_ind *= (ord(nw) - 96)
for c in sorted_nodes[new_ind]:
if c.word == word1:
root = c
break
is_paths[counter] = path_exists(root, word2)
counter += 1
return is_paths
def path_exists(root, word): # Broadth first search
first_encounter = [True] * len(all_nodes)
distance = 1
layer = [root]
new_layer = []
while True:
for l in layer: # här blir det väldigt många itterationer, vilket nog är rimligt
for ll in l.path_to:
if ll.word == word:
return distance
if first_encounter[ll.index]:
new_layer.append(ll)
first_encounter[ll.index] = False
layer = new_layer
new_layer = []
distance += 1
if len(layer) == 0:
return -1
flm = sys.argv[1]
read_words(flm)
ln = [Node]
sorted_nodes = [ln] * 11881376 # 5476 # -9409
k = 0
while k < len(all_nodes):
n = all_nodes[k]
ind = 1
for w in n.word:
ind *= (ord(w)-96)
if sorted_nodes[ind] == 0:
sorted_nodes[ind] = [n]
else:
sorted_nodes[ind].append(n)
k += 1
fpt = sys.argv[2]
res = find_paths(fpt)
for place in res:
print(place) | marygee/EDAF05 | Labb2/wordladders.py | wordladders.py | py | 3,911 | python | en | code | 0 | github-code | 13 |
15127618722 | import math as m
import os
import pyspark
from pyspark.sql import SparkSession
import pyspark.sql.functions as f
from pyspark.sql.types import *
def quiet_logs(sc):
logger = sc._jvm.org.apache.log4j
logger.LogManager.getLogger("org"). setLevel(logger.Level.ERROR)
logger.LogManager.getLogger("akka").setLevel(logger.Level.ERROR)
@f.udf
def get_crime_rate(count, population):
return (float(count) / float(population)) * 1000
spark = SparkSession \
.builder \
.appName("Count crimes by closest police station distance") \
.config("spark.mongodb.output.uri", "mongodb://asvsp:asvsp@mongo:27017/asvsp.crime_rate_by_comuntiy_area_avg?authSource=admin") \
.getOrCreate()
quiet_logs(spark)
HDFS_NAMENODE = os.environ["namenode"]
IN_PATH = os.environ["in_path"]
df = spark.read.format("csv").option('header', 'true').load(HDFS_NAMENODE + IN_PATH)
census = spark.read.format("csv").option('header', 'true').option("mode", "DROPMALFORMED").option("delimiter", " ").load(HDFS_NAMENODE + '/data/Census_Data_By_Community_Area.csv')
grouping_cols = ["Community Area", "Year"]
by_comunity_area = df.groupBy(grouping_cols).agg(f.count(f.col("*")).alias("crimes_count"))
by_comunity_area.show()
by_comunity_area = by_comunity_area.groupBy(grouping_cols).agg(f.mean(f.col("crimes_count")).alias("crimes_avg_year"))
by_comunity_area = by_comunity_area.groupBy(["Community Area"]).agg(f.mean(f.col("crimes_avg_year")).alias("crimes_avg"))
by_comunity_area.show()
joined = df.join(by_comunity_area, ["Community Area"]).dropDuplicates()
result = joined.join(census, joined["Community Area"] == census["Comunity_number"]) \
.withColumn('Crime Rate', f.lit(get_crime_rate(f.col("crimes_avg"), f.col("Total population")))) \
.select("Community Area", "Crime Rate", "Total population", "crimes_avg", "Hispanic", "Non-Hispanic Black", "Non-Hispanic White", "Non-Hispanic Asian").distinct()
result.show()
result.write.format("mongo").mode("overwrite").save()
| mihajlo-perendija/ASVSP | spark-jobs/crime_rate_by_comuntiy_area.py | crime_rate_by_comuntiy_area.py | py | 2,002 | python | en | code | 0 | github-code | 13 |
7151289819 | __author__ = "Trevor Maco <tmaco@cisco.com>"
__copyright__ = "Copyright (c) 2022 Cisco and/or its affiliates."
__license__ = "Cisco Sample Code License, Version 1.1"
import sys
import random
import string
from webex_bot.models.command import Command
from webex_bot.models.response import Response
from webexteamssdk import WebexTeamsAPI, ApiError
from config import *
from webex_cards import *
# Define WebexTeamsSDK Entry point
api = WebexTeamsAPI(access_token=BOT_TOKEN)
# Get Webex PA Help Space Room ID
rooms = api.rooms.list(type='group')
roomID = ''
for room in rooms:
if room.title == HELP_SPACE:
roomID = room.id
break
# If room doesn't exist, raise error
if roomID == '':
print('Error: PA Help Space room not found. Please ensure the configured room exists!')
sys.exit(1)
# Get Avatar for cards
avatar = api.people.me().avatar
class HelpCommand(Command):
def __init__(self):
super().__init__(
help_message="Help Command",
delete_previous_message=False
)
def execute(self, message, attachment_actions, activity):
response = Response()
help_space = False
# We are in help space
if attachment_actions.roomId == roomID:
help_space = True
# Define card attachment with proper format
response.attachments = {
"contentType": "application/vnd.microsoft.card.adaptive",
"content": get_help_card(avatar, help_space=help_space)
}
response.text = 'Something has gone wrong'
return response
class RequestPA(Command):
def __init__(self):
super().__init__(
command_keyword="request",
help_message="Request help from PA Space",
delete_previous_message=False
)
def execute(self, message, attachment_actions, activity):
response = Response()
# Disabled requests command in Help Space (can't globally disable, it needs to be callable)
if attachment_actions.roomId == roomID:
response.text = 'Error: requests not allowed in the Help Space! Please use "help" command to see valid ' \
'commands. '
return response
# Define card attachment with proper format
response.attachments = {
"contentType": "application/vnd.microsoft.card.adaptive",
"content": get_request_card(avatar)
}
# Fallback text
response.text = "Something has gone wrong"
return response
class PostToPASpace(Command):
def __init__(self):
super().__init__(
card_callback_keyword="submit",
help_message="Post card request to PA space",
delete_previous_message=True
)
def execute(self, message, attachment_actions, activity):
response = Response()
# Attempting to call submit command directly
if not hasattr(attachment_actions, 'inputs'):
response.text = 'Error: Command is reserved and not callable. Please use "help" command to see valid ' \
'commands. '
return response
# Populate request card with requestor information
person = api.people.get(attachment_actions.personId)
details = attachment_actions.inputs['information']
request_id = ''.join(random.choices(string.digits, k=10))
if details == '':
details = 'N/A'
# Define card attachment with proper format
attachments = [{
"contentType": "application/vnd.microsoft.card.adaptive",
"content": get_response_card(person.displayName, person.id, person.firstName, details, request_id, avatar)
}]
try:
# Post message with card to Help Space
api.messages.create(roomId=roomID, text='A new Radiologist request has been submitted.',
attachments=attachments)
response.text = "Request successfully submitted. Once the request is accepted, a 1-1 Webex Space will " \
"be created between you and the accepting PA!"
except ApiError as e:
response.text = "There was an error with submitting the request {}. Please try again.".format(e)
return response
class AcceptRequest(Command):
def __init__(self):
super().__init__(
card_callback_keyword="accept",
help_message="Accept Radiologist request posted to PA space",
delete_previous_message=True
)
def execute(self, message, attachment_actions, activity):
response = Response()
# Attempting to call accept command directly
if not hasattr(attachment_actions, 'inputs'):
response.text = 'Error: Command is reserved and not callable. Please use "help" command to see valid ' \
'commands. '
return response
# Generate unique 1-1 room name
requester_firstname = attachment_actions.inputs['requester_firstname']
accepter_firstname = api.people.get(attachment_actions.personId).firstName
request_id = attachment_actions.inputs['request_id']
room_title = '{} - {} Help Request ({})'.format(requester_firstname, accepter_firstname, request_id)
# Create 1-1 Room with Radiologist and PA, add them to the room, remove the bot from the room
try:
new_room = api.rooms.create(title=room_title)
accepter_id = attachment_actions.personId
requester_id = attachment_actions.inputs['requester_id']
# Add members
api.memberships.create(roomId=new_room.id, personId=accepter_id)
api.memberships.create(roomId=new_room.id, personId=requester_id)
# Remove bot
members = api.memberships.list(new_room.id)
for member in members:
if member.personDisplayName == BOT_NAME:
api.memberships.delete(member.id)
response.text = "Request {} accepted, 1-1 space successfully created.".format(request_id)
except ApiError as e:
response.text = "There was an error with creating or adding members to the 1-1 room {}. Please try again.".format(
e)
return response
| gve-sw/gve_devnet_webex_bot_help_request | app.py | app.py | py | 6,360 | python | en | code | 0 | github-code | 13 |
17050928374 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class DatadigitalFincloudFinsaasTenantchannelListBatchqueryModel(object):
def __init__(self):
self._channel_category = None
self._status = None
self._tenant_code = None
@property
def channel_category(self):
return self._channel_category
@channel_category.setter
def channel_category(self, value):
self._channel_category = value
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = value
@property
def tenant_code(self):
return self._tenant_code
@tenant_code.setter
def tenant_code(self, value):
self._tenant_code = value
def to_alipay_dict(self):
params = dict()
if self.channel_category:
if hasattr(self.channel_category, 'to_alipay_dict'):
params['channel_category'] = self.channel_category.to_alipay_dict()
else:
params['channel_category'] = self.channel_category
if self.status:
if hasattr(self.status, 'to_alipay_dict'):
params['status'] = self.status.to_alipay_dict()
else:
params['status'] = self.status
if self.tenant_code:
if hasattr(self.tenant_code, 'to_alipay_dict'):
params['tenant_code'] = self.tenant_code.to_alipay_dict()
else:
params['tenant_code'] = self.tenant_code
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = DatadigitalFincloudFinsaasTenantchannelListBatchqueryModel()
if 'channel_category' in d:
o.channel_category = d['channel_category']
if 'status' in d:
o.status = d['status']
if 'tenant_code' in d:
o.tenant_code = d['tenant_code']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/DatadigitalFincloudFinsaasTenantchannelListBatchqueryModel.py | DatadigitalFincloudFinsaasTenantchannelListBatchqueryModel.py | py | 2,016 | python | en | code | 241 | github-code | 13 |
297974129 | class User():
def __init__(self,first_name,last_name,birth_year):
self.first_name = first_name.title()
self.last_name = last_name.title()
self.birth_year = birth_year
self.full_name = first_name.title() + " " + last_name.title()
def describe_user(self):
print("Fist-Name: " + self.first_name + ".")
print("Last-Name: " + self.last_name + ".")
print("Birth-Year: " + str(self.birth_year) + ".")
def greet_user(self):
print("Hello, " + self.full_name + ".")
user_1 = User("eason","zhu",1999)
user_2 = User('kris','wu',1990)
user_3 = User("maggie",'q',1980)
user_1.describe_user()
user_2.greet_user()
user_3.describe_user()
user_3.greet_user() | ZYC0515/LearnPython | Python编程从入门到实践/第九章学习/代码实现/test_9_3.py | test_9_3.py | py | 718 | python | en | code | 0 | github-code | 13 |
27522982511 | # coding=utf-8
import simple_salesforce
from simple_salesforce import SalesforceMalformedRequest
import logging
class SalesforceWrapper:
def __init__(self, _email, _password, _security_token, _sandbox):
self.sf = simple_salesforce.Salesforce(username=_email, password=_password, security_token=_security_token,
sandbox=_sandbox)
self.sf.headers['Sforce-Auto-Assign'] = 'FALSE'
self.current_lead_ids = []
self.current_account_ids = []
self.current_opportunity_ids = []
@staticmethod
def escape_query_argument(query_argument):
escaped_argument = query_argument.replace("'", r"\'").replace('"', r'\"').replace('\\', r'\\\\').replace("?",
r"\?").replace(
"&", r"\&").replace("|", r"\|").replace("!", r"\!").replace("^", r"\^").replace("$", r"\$").replace("*",
r"\*").replace(
"+", r"\+").replace("-", r"\-").replace("~", r"\~")
logging.debug("Escaping {0} to {1}...".format(query_argument, escaped_argument))
return escaped_argument
def does_lead_exist(self, email, expa_id):
query = "SELECT Id FROM Lead WHERE RecordTypeId = '{0}' AND (Email = '{1}' OR EXPA_ID__c = {2})".format(
'01220000000MHoeAAG', email, expa_id)
try:
query_result = self.sf.query_all(query)
if self.is_query_result_empty(query_result):
return False
else:
self.current_lead_ids = []
for record in query_result["records"]:
self.current_lead_ids.append(record["Id"])
return True
except Exception:
logging.exception('An error has occured while searching for Salesforce leads!')
def does_account_exist(self, email, expa_id=None):
query = "SELECT Id FROM Account WHERE PersonEmail = '{0}'".format(email)
if expa_id is not None:
query += " OR EXPA_ID__c = {0}".format(expa_id)
try:
query_result = self.sf.query_all(query)
if self.is_query_result_empty(query_result):
return False
else:
self.current_account_ids = []
for record in query_result["records"]:
self.current_account_ids.append(record["Id"])
return True
except Exception:
logging.exception('An error has occured while searching for Salesforce accounts!')
def does_opportunity_exist(self, expa_id):
query = "SELECT Id FROM TN__c WHERE Opportunity_ID__c = {0}".format(expa_id)
try:
query_result = self.sf.query_all(query)
if self.is_query_result_empty(query_result):
return False
else:
self.current_opportunity_ids = []
for record in query_result["records"]:
self.current_opportunity_ids.append(record["Id"])
return True
except Exception:
logging.exception('An error has occured while searching for Salesforce opportunities!')
@staticmethod
def is_query_result_empty(query_result):
return query_result is None or query_result["totalSize"] == 0
def create_account(self, profile_dictionary):
result = self.sf.Account.create(profile_dictionary)
return result['id']
def create_lead(self, profile_dictionary):
result = self.sf.Lead.create(profile_dictionary)
return result['id']
def update_lead(self, profile_dictionary):
for record in self.current_lead_ids:
try:
self.sf.Lead.update(record, profile_dictionary)
except SalesforceMalformedRequest as smr:
logging.warning(smr)
def update_account(self, profile_dictionary):
profile_dictionary.pop("FirstName", None)
profile_dictionary.pop("LastName", None)
profile_dictionary.pop("Email", None)
profile_dictionary.pop("OwnerId", None)
profile_dictionary.pop("closest_city__c", None)
profile_dictionary.pop("Minimum_Duration_of_Internship__c", None)
profile_dictionary.pop("Maximum_Duration_of_Internship__c", None)
profile_dictionary.pop("EXPA_SignUp_Date__c", None)
profile_dictionary.pop("Area_of_world_interested_in_going__c", None)
profile_dictionary.pop("specific_countries__c", None)
for record in self.current_account_ids:
try:
self.sf.Account.update(record, profile_dictionary)
except SalesforceMalformedRequest as smr:
logging.warning(smr)
def update_opportunity(self, opportunity_dictionary, expa_id_company):
for record in self.current_opportunity_ids:
try:
company_sf_id = self.get_company(expa_id_company)
if company_sf_id is not None:
opportunity_dictionary['Account__c'] = company_sf_id
self.sf.TN__c.update(record, opportunity_dictionary)
return record
except SalesforceMalformedRequest as smr:
logging.warning(smr)
def create_opportunity(self, opportunity_dictionary, expa_id_company):
company_sf_id = self.get_company(expa_id_company)
if company_sf_id is not None:
opportunity_dictionary['Account__c'] = company_sf_id
result = self.sf.TN__c.create(opportunity_dictionary)
return result['id']
def get_company(self, expa_id):
result = None
query = "SELECT Id FROM Account WHERE EXPA_ID__c = {0} AND IsPersonAccount = false".format(expa_id)
try:
query_result = self.sf.query_all(query)
if not self.is_query_result_empty(query_result):
for record in query_result["records"]:
result = record["Id"]
return result
except Exception:
logging.exception('An error has occured while searching for Salesforce trainees!')
def get_applicants(self, opportunity_id):
result = []
query = "SELECT Id, EXPA_EP_ID__c FROM Account WHERE Opportunity__c = '{0}'".format(opportunity_id)
try:
query_result = self.sf.query_all(query)
if not self.is_query_result_empty(query_result):
for record in query_result["records"]:
logging.info("record: {0}".format(record))
result.append({'expa_id': record["EXPA_EP_ID__c"], 'sf_id': record["Id"]})
return result
except Exception:
logging.exception('An error has occured while searching for Salesforce trainees!')
def does_match_object_exist(self, opportunity_id, applicant_id):
query = "SELECT Id FROM Match2__c WHERE Opportunity__c = '{0}' AND Trainee__c = '{1}'".format(opportunity_id, applicant_id)
try:
query_result = self.sf.query_all(query)
return not self.is_query_result_empty(query_result)
except Exception:
logging.exception('An error has occured while searching for Salesforce match objects!')
return False
def update_match_object(self, opportunity_id, match_data, applicant_id):
query = "SELECT Id FROM Match2__c WHERE Opportunity__c = '{0}' AND Trainee__c = '{1}'".format(opportunity_id, applicant_id)
try:
query_result = self.sf.query_all(query)
for record in query_result["records"]:
match_dictionary = {"Match_Date__c": match_data['matched_date'], "Application_ID__c": match_data['application_id']}
if 'realized_date' in match_data:
match_dictionary['Realized_Date__c'] = match_data['realized_date']
self.sf.Match2__c.update(record['Id'], match_dictionary)
except Exception:
logging.exception('An error has occured while creating a Salesforce match object!')
def create_match_object(self, opportunity_id, match_data, account_id):
query = "SELECT Id FROM Account WHERE Opportunity__c = '{0}' AND EXPA_EP_ID__c = {1}".format(opportunity_id, match_data['person'])
try:
query_result = self.sf.query_all(query)
for record in query_result["records"]:
account_id = record['Id']
match_dictionary = {"Trainee__c": account_id, "Opportunity__c": opportunity_id, "Match_Date__c": match_data['matched_date']}
if 'realized_date' in match_data:
match_dictionary['Realized_Date__c'] = match_data['realized_date']
self.sf.Match2__c.create(match_dictionary)
except Exception:
logging.exception('An error has occured while creating a Salesforce match object!')
| NickPl/gis-sync | salesforce_wrapper.py | salesforce_wrapper.py | py | 9,017 | python | en | code | 0 | github-code | 13 |
3685907973 | import os
from setuptools import find_packages, setup
__version__ = os.getenv('tinynn', '0.1.0')
def setup_tinynn():
requires = [
'numpy'
]
setup(
name='tinynn',
version=__version__,
description='my deep learning study',
python_requires='>=3.8',
install_requires=requires,
packages=find_packages(
include=['tinynn', 'tinynn.*']),
)
if __name__ == '__main__':
setup_tinynn()
| yewentao256/TinyNN | setup.py | setup.py | py | 468 | python | en | code | 12 | github-code | 13 |
31002320074 |
from pandas import DataFrame
from pandas import read_csv, to_datetime
from sklearn import linear_model
from sklearn.metrics import mean_squared_error
import numpy as np
import os
import matplotlib.pyplot as plt
import seaborn as sns
from collections import namedtuple
from pandas import concat
from operator import itemgetter
from itertools import product
from sklearn.metrics import mean_squared_error
# Sources:
# https://matplotlib.org/gallery/lines_bars_and_markers/scatter_hist.html#sphx-glr-gallery-lines-bars-and-markers-scatter-hist-py
# https://towardsdatascience.com/the-art-of-effective-visualization-of-multi-dimensional-data-6c7202990c57
# https://becominghuman.ai/linear-regression-in-python-with-pandas-scikit-learn-72574a2ec1a5
# https://nbviewer.jupyter.org/github/rayryeng/make-money-ml-course/blob/master/week2/Week_2_Make_Money_with_Machine_Learning_Homework.ipynb
def load_and_format_csv(filename='./data/AAPL.csv'):
""" Load csv file to dataframe and format Dates as datetime64
"""
assert os.path.isfile(filename), f'{filename} not found'
df = read_csv(filename) # loading csv
if "Date" in df.columns:
df['Date'] = to_datetime(df['Date'], format='%Y-%m-%d')
return df
def split_training_testing_set(x_df=None, y_df=None, size=50, fraction=0.8):
"""
First, x_df is divided into x-windows of size=size.
"""
assert len(x_df) == len(y_df), f'x_df must have size of y_df, now {len(x_df)} != {len(y_df)}'
nb_samples = len(x_df) - size
indices = np.arange(nb_samples).astype(np.int)[:,None] + np.arange(size + 1).astype(np.int)
data = y_df.values[indices]
X = data[:, :-1]
Y = data[:, -1]
idx = int(fraction * nb_samples)
training_testing = namedtuple('TT', 'x_train y_train x_test y_test size idx fraction')
return training_testing(X[:idx], Y[:idx], X[idx:], Y[idx:], size, idx, fraction)
class DataCleaning:
@staticmethod
def rolling_mean(serie, window):
return serie.rolling(window=window).mean()
class Regression:
model_params = namedtuple('MP', 'function params')
# Change params to test if prediction is better or not
modelname_fun = {
'LinearRegression': model_params(linear_model.LinearRegression,
{'fit_intercept': [False, True],
'normalize': [True, False]}),
'Ridge': model_params(linear_model.Ridge,
{'fit_intercept': [False, True],
'normalize': [True, False],
'alpha': np.arange(0.1, 2., 0.2)}),
'Lasso': model_params(linear_model.Lasso,
{'fit_intercept': [True, False],
'normalize': [True, False],
'alpha': np.arange(0.1, 2., 0.2),
}),
'LassoLars': model_params(linear_model.LassoLars,
{'fit_intercept': [True, False],
'normalize': [True, False],
'alpha': np.arange(0.1, 2., 0.2),
}),
'BayesianRidge': model_params(linear_model.BayesianRidge,
{'fit_intercept': [True, False],
'normalize': [True, False],
'n_iter': [300],
})
}
@staticmethod
def get_model(model):
try:
return Regression.modelname_fun[model]
except KeyError:
raise Exception(f'Unknown model {model}')
@staticmethod
def linear(df, x_df, y_df, model_name, size, fraction):
""" Find the best linear regression model comparing configurations
"""
sets = split_training_testing_set(x_df, y_df, size=size, fraction=fraction)
results = []
model_params = Regression.get_model(model_name)
# For a model, get prediction for all configurations
config_dicts = [ dict(zip(model_params.params, v)) for v in product(*model_params.params.values())]
for config_dict in config_dicts:
config = [f'{k}={v}' for k,v in config_dict.items()]
name = f'{model_name} s={size} frac={fraction}' + " ".join(config)
model = model_params.function(**config_dict)
model.fit(sets.x_train, sets.y_train)
y_pred = model.predict(sets.x_test)
result_df = df.copy()
result_df.drop(['Open', 'High', 'Low', 'Close', 'Volume'], axis=1, inplace=True)
result_df.set_index('Date', inplace=True)
result_df = result_df.iloc[sets.idx + sets.size:]
result_df[name] = y_pred
record = (name, np.sqrt(mean_squared_error(sets.y_test, y_pred)), result_df)
results.append(record)
return results
def compare_rolling_mean(df, col='Adj Close'):
assert col in df.columns, f'missing {col} in df'
cols = [col]
for win_size in range(0, 50, 5):
colname = f'window: {win_size}'
df[colname] = DataCleaning.rolling_mean(df['Adj Close'],
window=win_size)
cols.append(colname)
df[cols].plot(title='Impact of the rolling mean window argument on AdjClose Serie')
plt.show()
if __name__ == '__main__':
# Save RSME for each model + params
model_score = {}
# Reading source file
df = load_and_format_csv()
# Testing several configurations to understand impact of prediction
sizes = [10, 15, 30, 45, 55] # windows size for prediction (in days)
fractions = [0.7, 0.8] # sample fraction for training / testing
dfs = []
# Run all combinations and save rmse
for size, fraction, modelname in product(sizes, fractions, Regression.modelname_fun.keys()):
for (name, rmse, result_df) in Regression.linear(df, df['Date'], df['Adj Close'], modelname, size, fraction):
model_score[name] = rmse
if dfs:
result_df.drop('Adj Close', axis=1, inplace=True)
dfs.append(result_df)
# Sort results according to RMSE (Best is the first)
top10 = ['Adj Close'] # keep the Top 10
for idx, (name, score) in enumerate(sorted(model_score.items(), key=itemgetter(1)), start=1):
msg = f"[{idx}][model={name}] score={score}"
if idx == 1:
msg += " ** BEST"
print(msg)
if idx <= 10:
top10.append(name)
print("-"*80)
# Display chart
compare_df = concat(dfs, axis=1)
top10_df = compare_df[top10]
top10_df.plot(grid=True, title='Comparison of multiples linear models')
plt.show()
| mathbeal/p_stock_prediction | main.py | main.py | py | 6,882 | python | en | code | 0 | github-code | 13 |
35478816328 | import logging
import os
import uvicorn
from fastapi import FastAPI, Request, Response
from fastapi.middleware.cors import CORSMiddleware
from db.models import database, start_db
from schemas.models import (
SearchResult,
DatasetInfo,
FieldsList,
DatasetStat,
Baskets,
StatusCode,
Queries,
)
from utils.generate_models import (
generate_dataset_result,
generate_fields_result,
generate_search_result,
)
from utils.query import SEARCH_QUERY, DATASET_INFO, FIELDS_INFO
from utils.requests_utils import (
send_requests,
buy_dataset_db,
get_dataset_stat,
start_query,
stop_query_db,
get_queries_db,
get_from_basket_db,
drop_from_basket_db,
add_to_basket_db,
)
from utils.tools import get_user_id, logger_configuration
log = logger_configuration(logging.getLogger(__name__))
app = FastAPI(
title="DataZilla",
version="0.0.1",
contact={
"name": "Полный привод",
},
)
origins = ["*"]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
GRAPHQL_URL = os.getenv("GRAPHQL_URL", "http://datahub.yc.pbd.ai:9002/api/graphql/")
@app.on_event("startup")
async def startup():
log.info("app startup")
start_db()
await database.connect()
@app.on_event("shutdown")
async def shutdown():
log.info("app shutdown")
await database.disconnect()
@app.get("/search/", response_model=SearchResult)
async def search(
*, query: str, start: int = 0, count: int = 10, request: Request, response: Response
):
user_id = get_user_id(request, log)
log.info(f"{user_id}: search dataset by query={query} start={start} count={count}")
data = {"query": query, "start": start, "count": count}
query = SEARCH_QUERY % data
response.headers["Access-Control-Allow-Origin"] = "*"
return await send_requests(GRAPHQL_URL, query, generate_search_result)
@app.get("/get_dataset/", response_model=DatasetInfo)
async def get_dataset(*, urn: str, request: Request, response: Response):
user_id = get_user_id(request, log)
log.info(f"{user_id}: get dataset info by name={urn}")
data = {"name": urn}
query = DATASET_INFO % data
response.headers["Access-Control-Allow-Origin"] = "*"
return await send_requests(GRAPHQL_URL, query, generate_dataset_result)
@app.get("/get_fields/", response_model=FieldsList)
async def get_fields(*, urn: str, request: Request, response: Response):
user_id = get_user_id(request, log)
log.info(f"{user_id}: get dataset fields by urn={urn}")
data = {"name": urn}
query = FIELDS_INFO % data
response.headers["Access-Control-Allow-Origin"] = "*"
return await send_requests(GRAPHQL_URL, query, generate_fields_result)
@app.post("/buy_dataset/", response_model=StatusCode)
async def buy_dataset(*, urn: str, request: Request, response: Response):
user_id = get_user_id(request, log)
log.info(f"{user_id}: buy dataset={urn}")
response.headers["Access-Control-Allow-Origin"] = "*"
return await buy_dataset_db(user_id, urn)
@app.get("/get_dataset_stat/", response_model=DatasetStat)
async def get_dataset_statistic(*, urn: str, request: Request, response: Response):
user_id = get_user_id(request, log)
log.info(f"{user_id}: get dataset statistic by urn={urn}")
response.headers["Access-Control-Allow-Origin"] = "*"
return await get_dataset_stat(urn, GRAPHQL_URL)
@app.post("/send_query/", response_model=StatusCode)
async def send_query(*, urn: str, query: str, request: Request, response: Response):
user_id = get_user_id(request, log)
log.info(f"{user_id}: send query by urn={urn}")
response.headers["Access-Control-Allow-Origin"] = "*"
return await start_query(user_id, urn, query)
@app.get("/stop_query/", response_model=StatusCode)
async def stop_query(*, query_id: str, request: Request, response: Response):
user_id = get_user_id(request, log)
log.info(f"{user_id}: stop query by id={query_id}")
response.headers["Access-Control-Allow-Origin"] = "*"
return await stop_query_db(query_id)
@app.post("/get_queries/", response_model=Queries)
async def get_queries(*, request: Request, response: Response):
user_id = get_user_id(request, log)
log.info(f"{user_id}: get queries for user_id={user_id}")
response.headers["Access-Control-Allow-Origin"] = "*"
return await get_queries_db(user_id)
@app.post("/add_to_basket/")
async def add_to_basket(*, urn: str, request: Request, response: Response):
user_id = get_user_id(request, log)
log.info(f"{user_id}: add to basket userId={user_id} urn={urn}")
response.headers["Access-Control-Allow-Origin"] = "*"
return await add_to_basket_db(user_id, urn)
@app.get("/get_from_basket/", response_model=Baskets)
async def get_from_basket(*, request: Request, response: Response):
user_id = get_user_id(request, log)
log.info(f"{user_id}: get from basket userId={user_id}")
response.headers["Access-Control-Allow-Origin"] = "*"
return await get_from_basket_db(user_id)
@app.delete("/drop_from_basket/", response_model=StatusCode)
async def drop_from_basket(*, urn: str, request: Request, response: Response):
user_id = get_user_id(request, log)
log.info(f"{user_id}: drop from basket userId={user_id} urn={urn}")
response.headers["Access-Control-Allow-Origin"] = "*"
return await drop_from_basket_db(user_id, urn)
if __name__ == "__main__":
uvicorn.run(app, port=8000, loop="asyncio")
| Diroman/VTB_more_tech_3_2021 | backend/main.py | main.py | py | 5,562 | python | en | code | 0 | github-code | 13 |
12250553240 | class Solution:
def maxIncreaseKeepingSkyline(self, grid: List[List[int]]) -> int:
grid_t = [[] for i in range(len(grid[0]))]
for row in grid:
for column in range(len(row)):
grid_t[column].append(row[column])
answer = 0
for row in range(len(grid)):
for column in range(len(grid[row])):
target = grid[row][column]
row_max = max(grid[row])
column_max = max(grid_t[column])
growth = min([row_max, column_max])
if target < growth:
answer+=(growth-target)
return answer
| wellslu/LeetCode-Python | medium/Max_Increase_to_Keep_City_Skyline.py | Max_Increase_to_Keep_City_Skyline.py | py | 649 | python | en | code | 3 | github-code | 13 |
16076532170 | import socket
import os, sys
import hashlib
import tqdm
import struct
import time
import shutil
#server name goes in HOST
HOST = 'localhost'
PORT = 5000
UDP_port = 9999
cache_dict={}
cache_size=3
dir="./Cachefolder"
os.mkdir(dir)
def IndexGet(command):
sckt = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sckt.connect((HOST, PORT))
comm = command.split(' ')
if comm[1] == 'shortlist':
send_one_message(sckt, command.encode('utf-8'))
rec = sckt.recv(1024)
rec=rec.decode('utf-8')
recvd = rec.split('\n')
for filename in recvd:
print (filename)
elif (comm[1]=='longlist'):
send_one_message(sckt, command.encode('utf-8'))
filelist = sckt.recv(1024)
filelist=filelist.decode('utf-8')
filelist=filelist.split("\n")
for i in range(0,len(filelist)):
print(filelist[i])
sckt.close()
return
def listdirServer(command):
sckt=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sckt.connect((HOST, PORT))
send_one_message(sckt, command.encode('utf-8'))
listoffiles = sckt.recv(1024).decode('utf-8')
listoffiles = listoffiles.split("\n")
for filename in listoffiles:
print(filename)
sckt.close()
return
def upload(command):
comm = command.split(' ')
mode=comm[2]
filename = comm[1]
sckt = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sckt.connect((HOST, PORT))
send_one_message(sckt, command.encode('utf-8'))
if mode == 'UDP':
udp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
f=open(filename,"rb")
data = f.read(4096)
while (data):
if(udp.sendto(data,(HOST,UDP_port))):
data = f.read(4096)
if len(data) < 4096:
break
udp.close()
f.close()
else:
with open(filename, 'rb') as sendfile:
for data in sendfile:
send_one_message(sckt, data)
print ('Upload Finished with TCP')
sckt.close()
return
def cacheshow():
listoffile=os.listdir("./Cachefolder/")
if len(listoffile) ==0 :
print("Cache Folder is empty")
else:
for files in listoffile:
filesize = os.stat("./Cachefolder/"+files).st_size
print("Filename: ", files, " Filesize: ", filesize)
def cacheverify(command):
comm = command.split(' ')
filename = comm[2]
if filename in cache_dict.keys():
cache_dict[filename]['count']=cache_dict[filename]['count']+1
print("File already present in cache")
print("Filename: ",filename," File size: ",cache_dict[filename]['Filesize'])
else:
filesize=download('FileDownload'+' '+filename+' '+'TCP','cache')
no_of_items=len(cache_dict)
if no_of_items==cache_size:
l1=list(cache_dict.keys())[0]
l2=list(cache_dict.keys())[1]
l3=list(cache_dict.keys())[2]
num1=cache_dict[l1]['count']
num2=cache_dict[l2]['count']
num3=cache_dict[l3]['count']
min_count=min(num1,num2,num3)
if min_count==num1:
cache_dict.pop(l1)
os.remove('./Cachefolder/'+ l1)
elif min_count==num2:
cache_dict.pop(l2)
os.remove('./Cachefolder/'+ l2)
elif min_count==num3:
cache_dict.pop(l3)
os.remove('./Cachefolder/'+ l3)
cache_dict[filename]={'Filesize': filesize, 'count':1}
def download(command,flag='normaldownload'):
sckt = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sckt.connect((HOST, PORT))
send_one_message(sckt, command.encode('utf-8'))
comm = command.split(' ')
filename = comm[1]
mode=comm[2]
if mode=='UDP':
udp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
udp.bind((HOST,UDP_port))
CountC, countaddress = udp.recvfrom(4096)
tillC = CountC.decode('utf8')
tillCC = int(tillC)
C1, c1 = udp.recvfrom(4096)
C2, c2 = udp.recvfrom(4096)
C3, c3 = udp.recvfrom(4096)
print('Downloading %s'%filename + ' of size -> %s B'%int(C1.decode('utf8'))+' Last modified at -> %s'%str(C2.decode('utf8')))
print('MD5SUM => ',str(C3.decode('utf8')))
BigC = open(filename, "wb")
while tillCC != 0:
ClientBData, clientbAddr = udp.recvfrom(4096)
BigC.write(ClientBData)
tillCC = tillCC - 1
print("File Downloaded using UDP")
udp.close()
BigC.close()
else:
filesize = recv_one_message(sckt)
filesize = filesize.decode()
filetime = recv_one_message(sckt)
filetime = filetime.decode()
print('Downloading %s'%filename + ' of size -> %s B'%filesize+' Last modified at -> %s'%filetime)
filehash = recv_one_message(sckt)
filehash = filehash.decode()
print('MD5SUM => ',filehash)
if flag=='cache':
with open('./Cachefolder/'+filename, 'wb') as savefile:
while True:
data = sckt.recv(1024)
if not data:
break
savefile.write(data)
savefile.close()
else:
with open(filename, 'wb') as savefile:
while True:
data = sckt.recv(1024)
if not data:
break
savefile.write(data)
savefile.close()
print ('Download Finished')
sckt.close()
if mode=='TCP':
return filesize
def md5sum(filename, blocksize=65536):
hash = hashlib.md5()
if os.path.isfile(filename):
with open(filename, "rb") as f:
for block in iter(lambda: f.read(blocksize), b""):
hash.update(block)
return hash.hexdigest()
else:
return None
def send_one_message(sock, data):
length = len(data)
sock.sendall(struct.pack('!I', length))
sock.sendall(data)
def recvall(sock, count):
buf = b''
while count:
newbuf = sock.recv(count)
if not newbuf: return None
buf += newbuf
count -= len(newbuf)
return buf
def recv_one_message(sock):
lengthbuf = recvall(sock, 4)
if isinstance(lengthbuf,type(None)):
return None
length, = struct.unpack('!I', lengthbuf)
return recvall(sock, length)
def Verify(command):
sckt = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sckt.connect((HOST, PORT))
send_one_message(sckt, command.encode('utf-8'))
comm = command.split(' ')
filename = comm[2]
server_hash = sckt.recv(1024).decode('utf-8')
client_hash = md5sum(filename)
if client_hash is None:
print('Requested filename unavailable')
sckt.close()
return None
lastmod = sckt.recv(1024).decode('utf-8')
print('Hash from server = %s'%(server_hash))
print('Hash from client = %s'%(client_hash))
print('Last Modified ->',lastmod)
if server_hash == client_hash:
print('Matched')
else:
print('Not Matched')
sckt.close()
return filename+' '+server_hash+' '+lastmod
def Checkall(command):
sckt = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sckt.connect((HOST, PORT))
send_one_message(sckt, command.encode('utf-8'))
comm = command.split(' ')
storage = []
while True:
f = recv_one_message(sckt)
if f is None:
print('Completed')
break
f_hash_client = md5sum(f)
if f_hash_client is None:
print('WARNING -> Requested file missing')
f_hash_server = recv_one_message(sckt)
f_hash_server = f_hash_server.decode()
f_last_mod = recv_one_message(sckt)
f_last_mod = f_last_mod.decode()
print('Filename -> ',f)
print('Hash from server = %s'%(f_hash_server))
print('Hash from client = %s'%(f_hash_client))
print('Last Modified =>',f_last_mod)
if f_hash_server == f_hash_client:
print('Matched')
else:
print('Not Matched')
print('-----------------------------------------------')
storage.append(f.decode()+' '+f_hash_server+' '+f_last_mod)
sckt.close()
return storage
def FileHash(command):
comm = command.split(' ')
if comm[1] == 'verify':
return Verify(command)
elif comm[1] == 'checkall':
return Checkall(command)
def quit(command):
sckt = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sckt.connect((HOST, PORT))
send_one_message(sckt, command.encode('utf-8'))
sckt.close()
return
name = input('Enter your name :-')
print('Welcome to FTP. This is client. Start server first.')
historylist=[]
while(True):
sys.stdout.write('%s -> ' %name)
sys.stdout.flush()
command = sys.stdin.readline().strip()
historylist.append(command)
if (command == 'quit'):
quit(command)
shutil.rmtree(dir)
break
elif(command=='history'):
for hist in historylist:
print(hist)
elif (command == 'lc'):
path = os.getcwd()
dirs = os.listdir(path)
for f in dirs:
print(f)
elif (command == 'ls'):
listdirServer(command)
else:
ip = command.split()
if (ip[0]=='IndexGet'):
IndexGet(command)
elif (ip[0]=='FileHash'):
filehashdata = FileHash(command)
elif (ip[0]=='FileUpload'):
upload(command)
elif (ip[0]=='FileDownload'):
download(command)
elif (ip[0]=='Cache'):
if (ip[1]=='verify'):
cacheverify(command)
elif (ip[1]=='show'):
cacheshow() | AnoushkaVyas/Client-Server-Networking | client.py | client.py | py | 9,865 | python | en | code | 1 | github-code | 13 |
70348935698 | import unittest
from xivo_dao.data_handler.configuration import services
from mock import patch
from hamcrest.core import assert_that
from hamcrest.core.core.isequal import equal_to
class TestConfiguration(unittest.TestCase):
@patch('xivo_dao.data_handler.configuration.dao.is_live_reload_enabled')
def test_get_live_reload_status(self, is_live_reload_enabled):
is_live_reload_enabled.return_value = False
expected_result = {'enabled': False}
result = services.get_live_reload_status()
assert_that(result, equal_to(expected_result))
is_live_reload_enabled.assert_called_once_with()
@patch('xivo_dao.data_handler.configuration.notifier.live_reload_status_changed')
@patch('xivo_dao.data_handler.configuration.dao.set_live_reload_status')
@patch('xivo_dao.data_handler.configuration.validator.validate_live_reload_data')
def test_set_live_reload_status(self, validate_live_reload_data, set_live_reload_status, live_reload_status_changed):
data = {'enabled': True}
services.set_live_reload_status(data)
validate_live_reload_data.assert_called_once_with(data)
set_live_reload_status.assert_called_once_with(data)
live_reload_status_changed.assert_called_once_with(data)
| jaunis/xivo-dao | xivo_dao/data_handler/configuration/tests/test_services.py | test_services.py | py | 1,278 | python | en | code | 0 | github-code | 13 |
15214083392 | # -*- coding: utf-8 -*-
from odoo import api, models, fields, tools, _
import re
from datetime import datetime
from lxml import etree, objectify
class AccountEdiFormat(models.Model):
_inherit = 'account.edi.format'
def _is_compatible_with_journal(self, journal):
# OVERRIDE
self.ensure_one()
if self.code != 'cfdi_3_3':
return super()._is_compatible_with_journal(journal)
return (journal.type == 'sale' or journal.code == 'MIGR') and journal.country_code == 'MX'
@api.model
def _l10n_mx_edi_get_serie_and_folio(self, move):
if move.journal_id.code == 'MIGR':
name_numbers = list(re.finditer('\d+', move.ref))
serie_number = move.ref[:name_numbers[-1].start()]
folio_number = name_numbers[-1].group().lstrip('0')
return {
'serie_number': serie_number,
'folio_number': folio_number,
}
else:
return super()._l10n_mx_edi_get_serie_and_folio(move)
def _create_invoice_from_attachment(self, attachment):
res = super()._create_invoice_from_attachment(attachment)
if res:
attachment.write({'res_id': res.id, 'edi_import_status': 'done'})
# self.env.cr.commit()
else:
attachment.write({'edi_import_status': 'error'})
# self.env.cr.commit()
return res
def _update_invoice_from_attachment(self, attachment, invoice):
res = super()._update_invoice_from_attachment(attachment, invoice)
if res:
if res.line_ids:
attachment.write({'edi_import_status': 'done'})
# self.env.cr.commit()
else:
attachment.write({'edi_import_status': 'error'})
# self.env.cr.commit()
return res
def _is_cfdi_vendor_bill(self, tree):
if self.code == 'cfdi_3_3' and tree.tag == '{http://www.sat.gob.mx/cfd/3}Comprobante':
return True
def _create_invoice_from_xml_tree(self, filename, tree):
self.ensure_one()
if self._is_cfdi_vendor_bill(tree):
invoice = self._import_cfdi_vendor_bill(filename, tree, self.env['account.move'])
return invoice
res = super()._create_invoice_from_xml_tree(filename, tree)
return res
def _update_invoice_from_xml_tree(self, filename, tree, invoice):
self.ensure_one()
if self._is_cfdi_vendor_bill(tree):
invoice = self._import_cfdi_vendor_bill(filename, tree, invoice)
return invoice
res = super()._update_invoice_from_xml_tree(filename, tree, invoice)
return res
def _import_cfdi_vendor_bill(self, filename, tree, invoice):
""" Decodes a CFDI invoice into an invoice.
:param tree: the cfdi tree to decode.
:param invoice: the invoice to update or an empty recordset.
:returns: the invoice where the cfdi data was imported.
"""
# convert back to string, than objectify
self = self.with_user(self.env.ref('base.partner_root').id)
error_msg = '<ul class="text-danger">Importar %s no es posible:' % filename
invoice = invoice
original_invoice = self.env['account.move']
if invoice and invoice.line_ids:
original_invoice = invoice
invoice = self.env['account.move']
xml = objectify.fromstring(etree.tostring(tree))
if xml.attrib['TipoDeComprobante'] == 'I':
move_type = 'in_invoice'
elif xml.attrib['TipoDeComprobante'] == 'E':
move_type = 'in_refund'
else:
error_msg += '<li>El tipo de comprobante no es I(ngreso) o E(greso)</li></ul>'
invoice = self._post_import_error_message(error_msg, original_invoice, invoice)
return invoice
# load demo RFC if in test mode
if xml.Receptor.attrib['Rfc'] not in (self.env.company.vat, self.env.company.company_registry, 'XAXX010101000', 'XEXX010101000'):
error_msg += '<li>El receptor está mal: %s RFC: %s</li></ul>' % (xml.Receptor.attrib['Nombre'], xml.Receptor.attrib['Rfc'])
invoice = self._post_import_error_message(error_msg, original_invoice, invoice)
return invoice
success_msg = ''
journal = self.env['account.move'].with_context(default_move_type='in_invoice')._get_default_journal()
vendor = self.env['res.partner'].search([('vat', '=', xml.Emisor.attrib['Rfc'])], limit=1)
if not vendor:
vendor = self.env['res.partner'].create({
'name': xml.Emisor.attrib['Nombre'],
'zip': xml.attrib['LugarExpedicion'],
'vat': xml.Emisor.attrib['Rfc'],
'company_type': 'company',
'type': 'contact'
})
# this is not an error message
success_msg += '<p>Se ha creado un nuevo proveedor:' \
'<a href=# data-oe-model=account.move data-oe-id=%d>%s</a></br>' \
'Por favor, verifique los datos de contacto.</p>' % (vendor.id, vendor.name)
ref = ''
if xml.attrib.get('Serie', False):
ref = xml.attrib['Serie']
if xml.attrib.get('Folio', False):
ref += xml.attrib['Folio']
if hasattr(xml, 'Complemento'):
uuid = xml.Complemento.xpath(
'tfd:TimbreFiscalDigital[1]', namespaces={'tfd': 'http://www.sat.gob.mx/TimbreFiscalDigital'}
)[0].attrib.get('UUID')
if ref == '':
ref = uuid
currency = xml.attrib['Moneda']
if currency == 'XXX':
currency = 'MXN'
currency_id = self._retrieve_currency(currency)
if not currency_id:
error_msg += '<li>La moneda %s no está activada en el sistema</li></ul>' % currency
invoice = self._post_import_error_message(error_msg, original_invoice, invoice)
return invoice
invoice_date = datetime.strptime(xml.attrib['Fecha'], '%Y-%m-%dT%H:%M:%S')
# check if same ref already exists... this would be a constraint error later on creation
old_invoice = self.env['account.move'].search([
('move_type', '=', move_type),
('partner_id', '=', vendor.id),
('ref', '=', ref),
('state', '!=', 'cancel')
], limit=1)
if old_invoice:
error_msg += '<p>Ya existe una factura con la misma referencia para el proveedor %s: ' \
'<a href=# data-oe-model=account.move data-oe-id=%d>%s</a></p>' \
% (vendor.name, old_invoice[0].id, old_invoice[0].name)
invoice = self._post_import_error_message(error_msg, original_invoice, invoice)
return invoice
invoice_line_vals = []
seq = 0
for line in xml.Conceptos.Concepto:
# treat first tax ids
tax_ids = []
if hasattr(line, 'Impuestos'):
for impuesto in line.Impuestos:
if hasattr(impuesto, 'Traslados'):
for traslado in line.Impuestos.Traslados.Traslado:
if traslado.attrib['TipoFactor'] != 'Exento':
tax_group = {'001': 'ISR Traslado', '002': 'IVA Traslado', '003': 'IEPS Traslado'}. \
get(traslado.attrib['Impuesto'])
tax = self.env['account.tax'].search([
('company_id', '=', self.env.company.id),
('type_tax_use', '=', journal.type),
('tax_group_id.name', '=', tax_group),
('amount', '=', float(traslado.attrib['TasaOCuota']) * 100),
], limit=1)
if not tax:
error_msg += '<li>El impuesto TRASLADO no está configurado en el sistema: ' \
'Impuesto=%s TipoFactor=%s TasaOCuota=%s</li></ul>' \
% (traslado.attrib['Impuesto'], traslado.attrib['TipoFactor'],
traslado.attrib['TasaOCuota'])
invoice = self._post_import_error_message(error_msg, original_invoice, invoice)
return invoice
tax_ids.append(tax.id)
if hasattr(impuesto, 'Retenciones'):
for retencion in line.Impuestos.Retenciones.Retencion:
tax_group = {'001': 'ISR Retenido', '002': 'IVA Retenido', '003': 'IEPS Retenido'}. \
get(retencion.attrib['Impuesto'])
tax = self.env['account.tax'].search([
('company_id', '=', self.env.company.id),
('type_tax_use', '=', journal.type),
('tax_group_id.name', '=', tax_group),
('amount', '=', -float(retencion.attrib['TasaOCuota']) * 100),
], limit=1)
if not tax:
error_msg += '<li>El impuesto de RETENCION no está configurado en el sistema: ' \
'Impuesto=%s TipoFactor=%s TasaOCuota=%s</li></ul>' \
% (retencion.attrib['Impuesto'], retencion.attrib['TipoFactor'],
retencion.attrib['TasaOCuota'])
invoice = self._post_import_error_message(error_msg, original_invoice, invoice)
return invoice
tax_ids.append(tax.id)
seq += 1
code = False
if line.get('NoIdentificacion', False):
code = line.attrib['NoIdentificacion']
product = self._search_product(vendor, code, line.attrib['Descripcion'], line.attrib['ClaveProdServ'])
if line.get('Descuento', False):
discount = float(line.attrib['Descuento']) / float(line.attrib['Importe']) * 100
else:
discount = False
invoice_line_vals.append((0, 0, {
'sequence': seq,
'name': line.attrib['Descripcion'],
'product_id': product and product.id or False,
'product_uom_id': product and product.uom_po_id.id or False,
'quantity': float(line.attrib['Cantidad']),
'discount': discount,
'price_unit': float(line.attrib['ValorUnitario']),
'tax_ids': [(6, 0, tax_ids)],
# 'analytic_account_id': xid,
# 'analytic_tag_ids': [(6, 0, xid)],
}))
invoice_vals = {
'company_id': self.env.company.id,
'ref': ref,
'move_type': move_type,
'invoice_date': invoice_date,
'currency_id': currency_id.id,
'partner_id': vendor.id,
'invoice_payment_term_id': vendor.property_supplier_payment_term_id.id,
'journal_id': journal.id,
'invoice_line_ids': invoice_line_vals,
'narration': 'UUID: ' + uuid,
}
if invoice:
invoice.write(invoice_vals)
else:
invoice = invoice.with_context(default_move_type=move_type).create([invoice_vals])
# self.env.cr.commit()
if success_msg:
invoice.message_post(body=success_msg)
return invoice
def _post_import_error_message(self, msg, original_invoice, invoice):
if original_invoice:
original_invoice.message_post(body=msg)
return original_invoice
elif invoice:
invoice.message_post(body=msg)
return invoice
else:
new_invoice = self.env['account.move'].with_user(self.env.ref('base.partner_root').id).create({'move_type': 'in_invoice'})
new_invoice.with_user(self.env.ref('base.partner_root').id).message_post(body=msg)
return new_invoice
def _search_product(self, vendor, code, descr, clave):
product = self.env['product.product']
if code:
product = product.search([('default_code', '=', code)], limit=1)
if not product:
product = product.search([
'|',
('default_code', '=', descr),
('name', 'ilike', descr)
], limit=1)
if not product:
seller_id = self.env['product.supplierinfo'].search([
('name', '=', vendor.id),
'|',
('product_code', '=', code),
('product_name', 'ilike', descr)
], limit=1)
if seller_id:
product = seller_id.product_id
if not product:
for n in range(8, 1, -1):
product = product.search([
('l10n_mx_edi_clave_search_pattern', '!=', False),
('l10n_mx_edi_clave_search_pattern', 'like', clave[:n])
], limit=1)
if product:
break
if not product:
# TODO company check
journal_id = self.env['account.journal'].search([('type', '=', 'purchase')], limit=1)
product = product.search([
('type', '=', 'service'),
('property_account_expense_id', '=', journal_id.default_account_id)
], limit=1)
return product
| sgrebur/e3a | integreat_mx_edi_extended/models/account_edi_format.py | account_edi_format.py | py | 13,829 | python | en | code | 0 | github-code | 13 |
17333165674 | """Module for the Code Completion Provider which handles all code completion requests."""
import os
import logging
from attr import Factory, attrib, attrs, validators
from pygls.lsp import CompletionParams, CompletionList, CompletionItem, CompletionItemKind
from pygls.server import LanguageServer
from aac.io.parser import parse
from aac.io.parser._parser_error import ParserError
from aac.lang.active_context_lifecycle_manager import get_active_context
from aac.lang.definitions.collections import get_definitions_by_root_key
from aac.lang.definitions.definition import Definition
from aac.plugins.first_party.lsp_server.providers.lsp_provider import LspProvider
SPACE_TRIGGER = " "
@attrs
class CodeCompletionProvider(LspProvider):
"""Resolve various code completion triggers to specific code completion functions.
Attributes:
completion_callbacks (dict[str, Callable]): The dict of trigger characters to corresponding code completion functions
"""
completion_callbacks: dict = attrib(default=Factory(dict), validator=validators.instance_of(dict))
def __attrs_post_init__(self):
"""Post init hook for attrs classes."""
self.completion_callbacks[SPACE_TRIGGER] = _handle_space_code_completion
def get_trigger_characters(self) -> list[str]:
"""Return a list of the currently registered trigger characters."""
return list(self.completion_callbacks.keys())
def handle_request(self, language_server: LanguageServer, params: CompletionParams):
"""Resolve the trigger to the corresponding code completion function, then execute it."""
trigger_character = params.context.trigger_character
if not trigger_character:
source_document_content = _get_code_completion_parent_text_file(language_server, params).split(os.linesep)
trigger_character = source_document_content[params.position.line][params.position.character - 1]
callback_function = self.completion_callbacks.get(trigger_character)
if callback_function:
return callback_function(language_server, params)
else:
logging.debug(
f"Failed to find corresponding code completion function for the registered trigger: {trigger_character} in registered callbacks: {self.completion_callbacks}"
)
def _handle_space_code_completion(language_server: LanguageServer, params: CompletionParams):
source_document_content = _get_code_completion_parent_text_file(language_server, params).split(os.linesep)
position_line = source_document_content[params.position.line]
if position_line.strip().startswith("type:"):
return _get_reference_completion_items(language_server, params)
def _get_reference_completion_items(language_server: LanguageServer, params: CompletionParams):
active_context = get_active_context()
primitives_definition = active_context.get_primitives_definition()
primitive_references = {}
if primitives_definition:
primitive_references = {field: "Primitive type" for field in active_context.get_primitive_types()}
schema_definition_references = _convert_definitions_to_name_description_dict(
active_context.get_definitions_by_root_key("schema")
)
try:
file_definitions = parse(_get_code_completion_parent_text_file(language_server, params))
except ParserError as error:
raise ParserError(error.source, error.errors) from None
else:
file_schema_references = _convert_definitions_to_name_description_dict(
get_definitions_by_root_key("schema", file_definitions)
)
available_references = primitive_references | schema_definition_references | file_schema_references
return CompletionList(
is_incomplete=False,
items=[
CompletionItem(label=name, kind=CompletionItemKind.Reference, documentation=description)
for name, description in available_references.items()
],
)
def _get_code_completion_parent_text_file(language_server: LanguageServer, params: CompletionParams):
return language_server.workspace.documents.get(params.text_document.uri).source
def _convert_definitions_to_name_description_dict(definitions: list[Definition]) -> dict:
return {definition.name: definition.get_top_level_fields().get("description") or "" for definition in definitions}
| jondavid-black/AaC | python/src/aac/plugins/first_party/lsp_server/providers/code_completion_provider.py | code_completion_provider.py | py | 4,430 | python | en | code | 14 | github-code | 13 |
12054116187 | class ListNode:
def __init__(self, val = 0, next = None):
self.val = val
self.next = next
def addTwoNumbers(l1, l2):
result = ListNode(0) # create a node to store result
curr = result
carry_next = 0 # initialize carry to 0
while l1 is not None or l2 is not None or carry_next != 0:
# perform the addition
l1_val = l1.val if l1 else 0
l2_val = l2.val if l2 else 0
column_sum = l1_val + l2_val + carry_next
carry_next, rem = divmod(column_sum, 10) # get the quotient & remainder
# make a new node to hold add_result & pass to next step
newNode = ListNode(rem)
curr.next = newNode
curr = newNode
l1 = l1.next if l1 else None
l2 = l2.next if l2 else None
return result.next | Akorex/Algorithms-From-Scratch | Leetcode Challenges/Python/Add_two_numbers.py | Add_two_numbers.py | py | 803 | python | en | code | 0 | github-code | 13 |
26660684376 | from samaritan import app
from flask_sqlalchemy import SQLAlchemy
from flask_jwt_extended import JWTManager
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql+psycopg2://test:test@db:5432/samaritan'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['JWT_SECRET_KEY'] = 'jwt-secret-string'
app.config['JWT_BLACKLIST_ENABLED'] = True
app.config['JWT_BLACKLIST_TOKEN_CHECKS'] = ['access', 'refresh']
db = SQLAlchemy(app)
jwt = JWTManager(app)
@app.before_first_request
def create_tables():
db.create_all()
| librowski/Samaritan | samaritan-backend/samaritan/models/db.py | db.py | py | 522 | python | en | code | 0 | github-code | 13 |
8855334879 | # -*- coding: utf-8 -*-
from darkflow.net.build import TFNet
import sys
import dlib
import cv2
from collections import Counter
options = {"model": "cfg/tiny-yolo-voc.cfg", "load": "bin/tiny-yolo-voc.weights", "threshold": 0.7, "saveVideo":""}
tfnet = TFNet(options)
objectTrackers = {}
objectNames = {}
frameCounter = 0
currentObjectID = 0
tracker = dlib.correlation_tracker()
cap = cv2.VideoCapture("Video.MOV")
f_width = 320
f_height = 240
out = cv2.VideoWriter('output.avi',cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 10.0, (f_width,f_height))
found_object = False
if __name__ == '__main__':
cv2.namedWindow("image", cv2.WINDOW_AUTOSIZE)
while(1):
ret, frame = cap.read()
if not ret:
break
reframe = cv2.resize( frame, ( f_width, f_height))
for fid in objectTrackers.keys():
trackingQuality = objectTrackers[ fid ].update( reframe )
objects_inframe = tfnet.return_predict(reframe)
for object_inframe in objects_inframe:
x0 = object_inframe['topleft']['x']
y0 = object_inframe['topleft']['y']
x1 = object_inframe['bottomright']['x']
y1 = object_inframe['bottomright']['y']
w = object_inframe['bottomright']['x'] - object_inframe['topleft']['x']
h = object_inframe['bottomright']['y'] -object_inframe['topleft']['y']
label = object_inframe['label']
#calculate the centerpoint
x_bar = x0 + (x1 - x0) * 0.5
y_bar = y0 + (y1 - y0) * 0.5
matchedFid = None
#if label in 'person':
###################################Check object Exist ###################################################
for fid in objectTrackers.keys():
tracked_position = objectTrackers[fid].get_position()
t_x = int(tracked_position.left())
t_y = int(tracked_position.top())
t_w = int(tracked_position.width())
t_h = int(tracked_position.height())
#calculate the centerpoint
t_x_bar = t_x + 0.5 * t_w
t_y_bar = t_y + 0.5 * t_h
#check if the centerpoint of the object is within the
#rectangleof a tracker region. Also, the centerpoint
#of the tracker region must be within the region
#detected as a object. If both of these conditions hold
#we have a match
if ( ( t_x <= x_bar <= (t_x + t_w)) and
( t_y <= y_bar <= (t_y + t_h)) and
( x0 <= t_x_bar <= (x0 + w )) and
( y0 <= t_y_bar <= (y0 + h ))):
matchedFid = fid
################################### Add new object ###################################################
if matchedFid is None:
print("Creating new tracker " + str(currentObjectID))
#Create and store the tracker
tracker = dlib.correlation_tracker()
tracker.start_track(reframe, dlib.rectangle(x0,y0, x1, y1))
objectTrackers[ currentObjectID ] = tracker
objectNames[ currentObjectID ] = label
cv2.rectangle(reframe, (x0,y0), (x1, y1), (0, 255, 0), 3, 1)
roi = reframe[y0:y0+h, x0:x0+w]
objsave_name = label + '_' + str(currentObjectID) + '.jpg'
cv2.imwrite(objsave_name,roi);
#Increase the currentObjectID counter
currentObjectID += 1
################################### Objects Tracking ###################################################
for fid in objectTrackers.keys():
tracked_position = objectTrackers[fid].get_position()
t_x = int(tracked_position.left())
t_y = int(tracked_position.top())
t_w = int(tracked_position.width())
t_h = int(tracked_position.height())
rectangleColor = (0,165,255)
cv2.rectangle(reframe, (t_x, t_y),(t_x + t_w , t_y + t_h),rectangleColor ,2)
if fid in objectNames.keys():
object_index =str(fid) + '.'+objectNames[fid]
cv2.putText(reframe, object_index ,
(int(t_x + t_w/2), int(t_y)),
cv2.FONT_HERSHEY_SIMPLEX,
0.5, (255, 255, 255), 2)
else:
cv2.putText(reframe, "Detecting..." ,
(int(t_x + t_w/2), int(t_y)),
cv2.FONT_HERSHEY_SIMPLEX,
0.5, (255, 255, 255), 2)
count_list= list(objectNames.values())
print(Counter(count_list))
cv2.putText(reframe, str(Counter(count_list)),(10, 10),cv2.FONT_HERSHEY_SIMPLEX,
0.5, (255, 255, 255), 2)
out.write(reframe)
cv2.imshow("image", reframe)
if cv2.waitKey(10) == 27:
break
cap.release()
cv2.destroyAllWindows()
| leimpengpeng/ComputerVision_script | yolo_count_object.py | yolo_count_object.py | py | 5,250 | python | en | code | 1 | github-code | 13 |
20951113448 | from controller import Robot, Keyboard
TIME_STEP = 64
robot = Robot()
keyboard = Keyboard()
keyboard.enable(TIME_STEP)
ds = []
dsNames = ['ds_right', 'ds_left']
for i in range(2):
ds.append(robot.getDevice(dsNames[i]))
ds[i].enable(TIME_STEP)
wheels = []
wheelsNames = ['wheel1', 'wheel2', 'wheel3', 'wheel4']
for i in range(4):
wheels.append(robot.getDevice(wheelsNames[i]))
wheels[i].setPosition(float('inf'))
wheels[i].setVelocity(0.0)
linearMotor = robot.getDevice("LinearMotor")
linearPos = 0
linearMotor.setPosition(linearPos)
rotationalMotor = robot.getDevice("RotationalMotor")
rotatePos = 0
rotationalMotor.setPosition(rotatePos)
avoidObstacleCounter = 0
while robot.step(TIME_STEP) != -1:
leftSpeed = 1.0
rightSpeed = 1.0
if avoidObstacleCounter > 0:
avoidObstacleCounter -= 1
leftSpeed = 1.0
rightSpeed = -1.0
else:
for i in range(2):
if ds[i].getValue() < 950.0:
avoidObstacleCounter = 100
wheels[0].setVelocity(leftSpeed)
wheels[1].setVelocity(rightSpeed)
wheels[2].setVelocity(leftSpeed)
wheels[3].setVelocity(rightSpeed)
currentKey = keyboard.getKey()
print(currentKey)
if currentKey == 87 and linearPos < 0.14:
linearPos += 0.005
if currentKey == 83 and linearPos > 0:
linearPos -= 0.005
if currentKey == 88:
rotatePos += 0.01
if currentKey == 90:
rotatePos -= 0.01
linearMotor.setPosition(linearPos)
rotationalMotor.setPosition(rotatePos)
| davs28/Webots | webot_works/lesson2/controllers/four_wheeled_collision_avoidance/four_wheeled_collision_avoidance.py | four_wheeled_collision_avoidance.py | py | 1,653 | python | en | code | 0 | github-code | 13 |
72913515217 | from tabuleiro import *
j = Jogo()
j.inicio()
print("Menu:\n")
print("1. Iniciar jogo;")
print("2. Visualizar tabuleiro;")
print("3. Fazer jogada;")
print("4. Retornar para jogo passado;")
print("5. Acabar partida; \n")
print ("Escolha uma numeração do menu de 1 a 5, para fazer a acao desejada.")
escolha = int(input("Numero do menu"))
print("")
if escolhe == 1:
j.criar_tabuleiro()
j.menu()
elif escolha == 2:
j.visualizar_tabuleiro()
j.menu()
elif escolha == 3:
j.jogada()
j.menu()
elif escolha == 4:
j.finalizar_jogo()
| arturgirao/CampoMinado | principal.py | principal.py | py | 622 | python | pt | code | 0 | github-code | 13 |
72106030739 | import sys
import socket,select
port = 11111
socket_list = []
users = {}
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind(('127.0.0.1',port))
server_socket.listen(2)
socket_list.append(server_socket)
while True:
ready_to_read,ready_to_write,in_error = select.select(socket_list,[],[],0)
for sock in ready_to_read:
if sock == server_socket:
connect, addr = server_socket.accept()
socket_list.append(connect)
if users:
users['bob'] = connect
connect.send("alice".encode())
print ("User Bob added and connection is established")
users['alice'].send("Hello".encode())
else:
users['alice'] = connect
connect.send("bob".encode())
print ("User Alice added and connection is established")
else:
data = sock.recv(2048).decode()
if data.startswith("@"):
print("Message Transfered to "+data[1:data.index(':')])
print("AES Encrypted Message : " +data[data.index(':')+1:data.index('#')])
print("SHA512 Hash : "+data[data.index('#')+1:data.index('$')])
print("HMAC SHA256 Digest : "+data[data.index('$')+1:])
users[data[1:data.index(':')].lower()].send(data[data.index(':')+1:].encode())
server_socket.close() | abhishekbvs/cryptography | Assignment3/server.py | server.py | py | 1,500 | python | en | code | 0 | github-code | 13 |
70657856978 | from django.contrib import admin
from django.urls import path
from .views import *
app_name = 'service'
urlpatterns = [
path('', HomeList.as_view(), name='home'),
path('services/', ServiceList.as_view(), name='Service_list'),
path('last/', LastServiceList.as_view(), name='Last_Service'),
path('about/', AboutList.as_view(), name='about_list'),
path('pricing/', PricingList.as_view(), name='pricing_list'),
path('Services/<slug:slug>', ServiceDetail.as_view(), name='Service_detail'),
path("contact", contact, name="contact"),
path("quote", quote, name="quote"),
]
| AbdulrahmanElsharef/Logis_Services_Django | service/urls.py | urls.py | py | 599 | python | en | code | 0 | github-code | 13 |
15322248101 | import numpy as np
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.naive_bayes import GaussianNB
from sklearn.model_selection import cross_val_score
# context = np.loadtxt("../data/untolerance/Hepatitis.txt")
# rows, cols = context.shape
#
# # attributes = [i for i in range(cols-1)]
# attributes = [1, 2, 5, 8, 9, 10, 14, 15, 17]
# attributes = list(set(attributes))
# data = []
# target = []
# for i in range(rows):
# temp = []
# for j in range(len(attributes)):
# p = attributes[j]
# ret = context[i][p]
# temp.append(ret)
# data.append(temp.copy())
# target.append(context[i][cols-1])
# classes = len(list(set(target)))
# print("class nums", classes)
# train = np.array(data)
# test = target.copy()
# svc = SVC(kernel='rbf', probability=True) # svm分类器
# scores = cross_val_score(svc, train, test, cv=10, scoring='accuracy')
# print(scores)
# average_scores = sum(scores)/10
# print("svm average accuracy", average_scores * 100, "%")
#
#
# knn = KNeighborsClassifier(classes)
#
# scores = cross_val_score(knn, train, test, cv=10, scoring='accuracy')
# print(scores)
# average_scores = sum(scores)/10
# print("knn average accuracy", average_scores * 100, "%")
#
from Readfile.read_file import readFile
def get_accuracy(file, attributes):
# context = np.loadtxt(file,delimiter=',')
context = np.loadtxt(file)
rows, cols = context.shape
# attributes = [i for i in range(cols-1)]
attributes = list(set(attributes))
data = []
target = []
for i in range(rows):
temp = []
for j in range(len(attributes)):
p = attributes[j]
ret = context[i][p]
temp.append(ret)
data.append(temp.copy())
target.append(context[i][cols - 1])
classes = len(list(set(target)))
print("class nums", classes)
train = np.array(data)
test = target.copy()
svc = SVC(kernel='rbf', probability=True) # svm分类器
scores = cross_val_score(svc, train, test, cv=10, scoring='accuracy')
# print(scores)
svm_average_scores = sum(scores) / 10
print("svm", '%.4f'%svm_average_scores)
knn = KNeighborsClassifier(classes)#knn
scores = cross_val_score(knn, train, test, cv=10, scoring='accuracy')
# print(scores)
knn_average_scores = sum(scores) / 10
# print("knn average accuracy", average_scores * 100, "%")
print("knn", '%.4f'%knn_average_scores)
# gnb = GaussianNB()#高斯朴素贝叶斯
# # clf = clf.fit(iris.data, iris.target)
# # y_pred = clf.predict(iris.data)
# scores = cross_val_score(gnb, train, test, cv=10, scoring='accuracy')
# # print(scores)
# gnb_average_scores = sum(scores) / 10
# # print("knn average accuracy", average_scores * 100, "%")
# print("高斯朴素贝叶斯", '%.4f' % gnb_average_scores)
return svm_average_scores
if __name__ == "__main__":
file="../data/tolerance/lung_cancer.txt"
# context = np.loadtxt()
attributes =[1,14,45,13,9,34]
# data = readFile(file)
print("svm",'%.4f'%get_accuracy(file,attributes)) | dejiehu/Equivalence_division | classification_accuration/Classification_accuracy2.py | Classification_accuracy2.py | py | 3,126 | python | en | code | 0 | github-code | 13 |
37964392358 | from TrigMonitorBase.TrigGenericMonitoringToolConfig import defineHistogram, TrigGenericMonitoringToolConfig
class SiTrigSpacePointFormatMonitorBase(TrigGenericMonitoringToolConfig):
def __init__(self, name="SiTrigSpacePointFormatMonitorBase", type="electron"):
super (SiTrigSpacePointFormatMonitorBase, self).__init__(name)
maxclu = 250
from InDetTrigRecExample.InDetTrigSliceSettings import InDetTrigSliceSettings
deta = InDetTrigSliceSettings[('etaHalfWidth',type)]
if deta>=3.:
maxclu = 2000
elif deta>=0.2:
maxclu = 1000
self.Histograms += [ defineHistogram('numSctClusters',
type='TH1F',
title="Number of SCT Clusters",
xbins = 250, xmin=0., xmax=maxclu)]
self.Histograms += [ defineHistogram('numPixClusters',
type='TH1F',
title="Number of PIXEL Clusters",
xbins = 250, xmin=0., xmax=maxclu)]
self.Histograms += [ defineHistogram('numSctSpacePoints',
type='TH1F',
title="Number of SCT Space Points",
xbins = 250, xmin=0., xmax=maxclu)]
self.Histograms += [ defineHistogram('numPixSpacePoints',
type='TH1F',
title="Number of PIXEL Space Points",
xbins = 250, xmin=0., xmax=maxclu)]
self.Histograms += [ defineHistogram('PixSPHashId',
type='TH1F',
title="SP hash ID in PIXEL",
xbins = 200, xmin=0., xmax=2000.)]
self.Histograms += [ defineHistogram('SctSPHashId',
type='TH1F',
title="SP hash ID in SCT",
xbins = 200, xmin=0., xmax=8300.)]
if (deta>=0.2):
self.Histograms += [ defineHistogram('numSctClusters,numSctSpacePoints',
type='TH2F',
title="SCT Clusters vs Space Points (Zero)",
xbins = 50, xmin=0., xmax=50,
ybins = 50, ymin=0., ymax=50)]
self.Histograms += [ defineHistogram('numPixClusters,numPixSpacePoints',
type='TH2F',
title="PIXEL Clusters vs Space Points (Zero)",
xbins = 50, xmin=0., xmax=50,
ybins = 50, ymin=0., ymax=50)]
#
#-- online
#
class SiTrigSpacePointFormatOnlineMonitor(SiTrigSpacePointFormatMonitorBase):
def __init__(self, name="SiTrigSpacePointFormatOnlineMonitor", type="electron"):
super (SiTrigSpacePointFormatOnlineMonitor, self).__init__(name,type)
self.defineTarget("Online")
#
#-- validation
#
class SiTrigSpacePointFormatValidationMonitor(SiTrigSpacePointFormatMonitorBase):
def __init__(self, name="SiTrigSpacePointFormatValidationMonitor", type="electron"):
super (SiTrigSpacePointFormatValidationMonitor, self).__init__(name,type)
self.defineTarget("Validation")
| rushioda/PIXELVALID_athena | athena/InnerDetector/InDetTrigRecAlgs/SiTrigSpacePointFormation/python/SiTrigSpacePointFormatMonitoring.py | SiTrigSpacePointFormatMonitoring.py | py | 3,751 | python | en | code | 1 | github-code | 13 |
35182203370 | from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = 'Displays current time'
def handle(self, *args, **kwargs):
from diocese.models import Archdiocese
from diocese.models import Diocese
states_dict = {
'AK': 'Alaska',
'AL': 'Alabama',
'AR': 'Arkansas',
'AS': 'American Samoa',
'AZ': 'Arizona',
'CA': 'California',
'CO': 'Colorado',
'CT': 'Connecticut',
'DC': 'District of Columbia',
'DE': 'Delaware',
'FL': 'Florida',
'GA': 'Georgia',
'GU': 'Guam',
'HI': 'Hawaii',
'IA': 'Iowa',
'ID': 'Idaho',
'IL': 'Illinois',
'IN': 'Indiana',
'KS': 'Kansas',
'KY': 'Kentucky',
'LA': 'Louisiana',
'MA': 'Massachusetts',
'MD': 'Maryland',
'ME': 'Maine',
'MI': 'Michigan',
'MN': 'Minnesota',
'MO': 'Missouri',
'MP': 'Northern Mariana Islands',
'MS': 'Mississippi',
'MT': 'Montana',
'NA': 'National',
'NC': 'North Carolina',
'ND': 'North Dakota',
'NE': 'Nebraska',
'NH': 'New Hampshire',
'NJ': 'New Jersey',
'NM': 'New Mexico',
'NV': 'Nevada',
'NY': 'New York',
'OH': 'Ohio',
'OK': 'Oklahoma',
'OR': 'Oregon',
'PA': 'Pennsylvania',
'PR': 'Puerto Rico',
'RI': 'Rhode Island',
'SC': 'South Carolina',
'SD': 'South Dakota',
'TN': 'Tennessee',
'TX': 'Texas',
'UT': 'Utah',
'VA': 'Virginia',
'VI': 'Virgin Islands',
'VT': 'Vermont',
'WA': 'Washington',
'WI': 'Wisconsin',
'WV': 'West Virginia',
'WY': 'Wyoming'
}
from django.utils.dateparse import parse_date
date_str = "2018-01-21"
in_file = open(".\diocese.txt","r")
name = ""
city = ""
state = ""
mother_church_name = ""
mother_church_address = ""
mother_church_zipcode = ""
arch_name = ""
get_address = False
get_city = False
get_archdiocese = False
establish_date = parse_date(date_str)
insert_rec = False
arch_obj = None
for line in in_file:
if "Diocese" in line:
parsed_line = line.split(" ")
name = " ".join(parsed_line[2:])
name = str(name.strip())
self.stdout.write( "diocese name "+repr(name) )
get_archdiocese = True
elif get_archdiocese:
parsed_line = line.split(" ")
if parsed_line[0] == 'arch':
arch_name = " ".join(parsed_line[1:])
arch_name = arch_name.title()
arch_name = arch_name.strip()
print ("arch name = "+repr(arch_name))
get_archdiocese = False
get_address = True
elif get_address:
self.stdout.write ("address line = "+repr(line))
try:
arch_obj = Archdiocese.objects.get(name=arch_name)
insert_rec = True
except Exception as e:
print ("Excecption "+repr(e))
pass
mother_church_address = str(line)
get_address = False
get_city = True
elif get_city:
parsed_line = line.split(" ")
city = str(parsed_line[0])
city = city.replace(",","")
try:
state = states_dict[str(parsed_line[1])]
zip = str(parsed_line[2])
zip = zip.split("-")
zip = str(zip[0])
zip = zip.strip()
print ("address "+repr(mother_church_address))
print ("city "+repr(city))
print ("state "+repr(state))
print ("zip "+repr(zip))
diocese = Diocese(name = name, city=city, state=state,
archdiocese = arch_obj,
establish_date = establish_date)
'''
class Diocese(models.Model):
archdiocese = models.ForeignKey(
'Archdiocese',
on_delete=models.CASCADE,
)
name = models.CharField(max_length=200)
city = models.CharField(max_length=200)
state = models.CharField(max_length=200)
establish_date = models.DateField('date established')
created_from = models.ForeignKey('self', on_delete=models.CASCADE,blank=True,null=True)
'''
diocese.save()
name = ""
city = ""
state = ""
mother_church_name = ""
mother_church_address = ""
arch_name = ""
mother_church_zipcode = ""
get_archdiocese = False
get_city = False
arch_obj = None
except:
pass | jplaschke/rcc_abuse | diocese/management/commands/load_db.py | load_db.py | py | 5,505 | python | en | code | 0 | github-code | 13 |
12642768795 | def calculate_waiting_and_turnaround_times(processes_info, num_processes, burst_times, waiting_times, turnaround_times):
# FCFS algorithm
waiting_times[0] = 0
for i in range(1, num_processes):
waiting_times[i] = burst_times[i - 1] + waiting_times[i - 1]
for i in range(num_processes):
turnaround_times[i] = burst_times[i] + waiting_times[i]
def find_waiting_and_turnaround_times_shortest_job_first(processes_info, num_processes, burst_times):
waiting_times = [0] * num_processes
turnaround_times = [0] * num_processes
remaining_burst_times = burst_times.copy()
current_time = 0
while True:
done = True
# Find the process with the shortest remaining burst time
min_burst_time = float('inf')
shortest_process_index = -1
for i in range(num_processes):
if remaining_burst_times[i] > 0 and processes_info[i][1] <= current_time:
if remaining_burst_times[i] < min_burst_time:
shortest_process_index = i
min_burst_time = remaining_burst_times[i]
# If no process is found, break the loop
if shortest_process_index == -1:
break
# Update time and remaining burst time for the chosen process
current_time += min_burst_time
remaining_burst_times[shortest_process_index] -= min_burst_time
waiting_times[shortest_process_index] = current_time - processes_info[shortest_process_index][1]
turnaround_times[shortest_process_index] = waiting_times[shortest_process_index] + burst_times[shortest_process_index]
return waiting_times, turnaround_times
def find_waiting_and_turnaround_times_priority_scheduling(processes_info, num_processes, burst_times):
waiting_times = [0] * num_processes
turnaround_times = [0] * num_processes
remaining_burst_times = burst_times.copy()
current_time = 0
while True:
done = True
# Find the process with the highest priority
max_priority = -1
highest_priority_process_index = -1
for i in range(num_processes):
if remaining_burst_times[i] > 0 and processes_info[i][1] <= current_time:
if processes_info[i][2] > max_priority:
highest_priority_process_index = i
max_priority = processes_info[i][2]
# If no process is found, break the loop
if highest_priority_process_index == -1:
break
# Update time and remaining burst time for the chosen process
current_time += 1
remaining_burst_times[highest_priority_process_index] -= 1
waiting_times[highest_priority_process_index] = current_time - processes_info[highest_priority_process_index][1]
turnaround_times[highest_priority_process_index] = waiting_times[highest_priority_process_index] + burst_times[highest_priority_process_index]
return waiting_times, turnaround_times
def find_waiting_and_turnaround_times_round_robin(processes_info, num_processes, burst_times, time_quantum=4):
waiting_times = [0] * num_processes
turnaround_times = [0] * num_processes
remaining_burst_times = burst_times.copy()
current_time = 0
while True:
done = True
for i in range(num_processes):
if remaining_burst_times[i] > 0:
done = False
if remaining_burst_times[i] > time_quantum:
current_time += time_quantum
remaining_burst_times[i] -= time_quantum
else:
current_time += remaining_burst_times[i]
waiting_times[i] = current_time - processes_info[i][1]
remaining_burst_times[i] = 0
turnaround_times[i] = waiting_times[i] + burst_times[i]
if done:
break
return waiting_times, turnaround_times
def main():
# Process information: (Process Name, Arrival Time, Burst Time, Priority)
processes_info = [("P1", 0, 24, 3), ("P2", 4, 3, 1), ("P3", 5, 3, 4), ("P4", 6, 12, 2)]
num_processes = len(processes_info)
burst_times = [process[2] for process in processes_info]
# Initialize waiting_time and turnaround_time arrays
waiting_times_fcfs = [0] * num_processes
turnaround_times_fcfs = [0] * num_processes
waiting_times_sjf = [0] * num_processes
turnaround_times_sjf = [0] * num_processes
waiting_times_priority = [0] * num_processes
turnaround_times_priority = [0] * num_processes
waiting_times_rr = [0] * num_processes
turnaround_times_rr = [0] * num_processes
# Calculate waiting times and turnaround times for each algorithm
calculate_waiting_and_turnaround_times(processes_info, num_processes, burst_times, waiting_times_fcfs, turnaround_times_fcfs)
waiting_times_sjf, turnaround_times_sjf = find_waiting_and_turnaround_times_shortest_job_first(processes_info, num_processes, burst_times)
waiting_times_priority, turnaround_times_priority = find_waiting_and_turnaround_times_priority_scheduling(processes_info, num_processes, burst_times)
waiting_times_rr, turnaround_times_rr = find_waiting_and_turnaround_times_round_robin(processes_info, num_processes, burst_times)
# Calculate average waiting time and average turnaround time
avg_waiting_time_fcfs = sum(waiting_times_fcfs) / num_processes
avg_turnaround_time_fcfs = sum(turnaround_times_fcfs) / num_processes
avg_waiting_time_sjf = sum(waiting_times_sjf) / num_processes
avg_turnaround_time_sjf = sum(turnaround_times_sjf) / num_processes
avg_waiting_time_priority = sum(waiting_times_priority) / num_processes
avg_turnaround_time_priority = sum(turnaround_times_priority) / num_processes
avg_waiting_time_rr = sum(waiting_times_rr) / num_processes
avg_turnaround_time_rr = sum(turnaround_times_rr) / num_processes
# Determine the best scheduling algorithm based on average turnaround time
best_algorithm = None
best_avg_turnaround_time = float('inf')
for algorithm, avg_turnaround_time in [("FCFS", avg_turnaround_time_fcfs),
("SJF", avg_turnaround_time_sjf),
("Priority Scheduling", avg_turnaround_time_priority),
("Round Robin (Time Quantum = 4)", avg_turnaround_time_rr)]:
if avg_turnaround_time < best_avg_turnaround_time:
best_avg_turnaround_time = avg_turnaround_time
best_algorithm = algorithm
# Display results
print(f"Best Scheduling Algorithm based on Average Turnaround Time: {best_algorithm}")
print(f"Average Turnaround Time: {best_avg_turnaround_time}")
# Display results
print("First-Come, First-Served (FCFS):")
for i in range(num_processes):
print(f"{processes_info[i][0]} - Waiting Time: {waiting_times_fcfs[i]}, Turnaround Time: {turnaround_times_fcfs[i]}")
print(f"Average Waiting Time: {avg_waiting_time_fcfs}")
print(f"Average Turnaround Time: {avg_turnaround_time_fcfs}")
print()
print("Shortest Job First (SJF):")
for i in range(num_processes):
print(f"{processes_info[i][0]} - Waiting Time: {waiting_times_sjf[i]}, Turnaround Time: {turnaround_times_sjf[i]}")
print(f"Average Waiting Time: {avg_waiting_time_sjf}")
print(f"Average Turnaround Time: {avg_turnaround_time_sjf}")
print()
print("Priority Scheduling:")
for i in range(num_processes):
print(f"{processes_info[i][0]} - Waiting Time: {waiting_times_priority[i]}, Turnaround Time: {turnaround_times_priority[i]}")
print(f"Average Waiting Time: {avg_waiting_time_priority}")
print(f"Average Turnaround Time: {avg_turnaround_time_priority}")
print()
print("Round Robin (Time Quantum = 4):")
for i in range(num_processes):
print(f"{processes_info[i][0]} - Waiting Time: {waiting_times_rr[i]}, Turnaround Time: {turnaround_times_rr[i]}")
print(f"Average Waiting Time: {avg_waiting_time_rr}")
print(f"Average Turnaround Time: {avg_turnaround_time_rr}")
if __name__ == "__main__":
main()
| sreevalli27/AI_industry | SE20UARI148_Assignment-3/Scheduling-1/scheduling_1.py | scheduling_1.py | py | 8,184 | python | en | code | 0 | github-code | 13 |
9050803627 | import json
import uma_dwh.utils.opsgenie as opsgenie
from uma_dwh.utils import date_diff_in_seconds
from datetime import datetime
from .mssql_db import execute_sp, get_sp_result_set, get_out_arg
from .exceptions import SPException
from .utils import execute_sp_with_required_in_args, fill_in_sp_in_args
def fetch_current_status():
result = execute_sp(
'MWH.GET_CURRENT_ETL_CYCLE_STATUS',
{
'FirstDataMartInCycle': 'I3_NON-MCS'
}
)
data_marts = []
for data_mart_data in result[0]:
data_mart = get_data_mart(data_mart_data)
data_marts.append(data_mart)
return data_marts
def check_current_status():
"""
Checks the data mart statuses and sends the Opsgenie alert.
"""
result = execute_sp(
'MWH.GET_CURRENT_ETL_CYCLE_STATUS',
{
'FirstDataMartInCycle': 'I3_NON-MCS'
}
)
data_marts = {}
for data_mart_data in result[0]:
data_mart = get_data_mart(data_mart_data)
data_marts[data_mart['data_mart_name']] = data_mart
last_run_result = execute_admin_console_sp(
'MWH.UMA_WAREHOUSE_ADMIN_CONSOLE_REPORTS',
'GET_LAST_DATAMART_RUN'
)
alerts_sent = []
for data_mart_last_run in last_run_result:
data_mart_name = data_mart_last_run['data_mart_name']
if data_mart_name in data_marts:
data_mart = data_marts[data_mart_name]
done_dttm = data_mart_last_run['done_dttm']
now_datetime = datetime.now()
if data_mart['data_mart_status'] == 'FAILED' and date_diff_in_seconds(now_datetime, done_dttm) > 3600:
last_alert_result = execute_admin_console_sp(
'MWH.MANAGE_OPS_GENIE_ALERT',
'GET DATE BY DATAMART NAME',
'',
'',
'',
'',
'',
'',
'',
data_mart_name
)
if len(last_alert_result) > 0 and last_alert_result[0]['insert_dttm'].date() < datetime.today().date():
alerts_sent.append(data_mart)
res = opsgenie.send_etl_status_alert(data_mart)
execute_admin_console_sp(
'MWH.MANAGE_OPS_GENIE_ALERT',
'NEW',
res['id'],
'OPENED',
res['priority'],
datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
'',
res['title'],
json.dumps(res['details']),
data_mart['data_mart_name']
)
return alerts_sent
def fetch_servers():
"""
Returns the ETL servers with their dbs and procedures.
"""
server_dbs = execute_admin_console_sp('MWH.UMA_WAREHOUSE_ADMIN_CONSOLE', 'GET SERVER DB LIST')
tmp_servers_dict = {}
for server in server_dbs:
if server['source_server_name'] not in tmp_servers_dict:
tmp_servers_dict[server['source_server_name']] = []
tmp_servers_dict[server['source_server_name']].append(server['source_db_name'])
servers = []
for server_name in tmp_servers_dict:
server = {
'name': server_name,
'dbs': []
}
for db_name in tmp_servers_dict[server_name]:
procedures = execute_admin_console_sp(
'MWH.UMA_WAREHOUSE_ADMIN_CONSOLE',
'GET TABLES AND STORED PROCEDURES',
server_name,
db_name
)
server['dbs'].append({
'name': db_name,
'procedures': procedures
})
servers.append(server)
return servers
def queue_stats(tables):
"""
Queue stats on the specified tables
:param tables: List of schemas and tables
:type tables: list
"""
for table in tables:
execute_sp(
'MWH.MANAGE_STATISTICS_SP',
{
'VARCHAR_01': 'QUEUE',
'VARCHAR_02': 'MLK-EDM-D-SQ02',
'VARCHAR_03': table['database'],
'VARCHAR_04': table['schema'],
'VARCHAR_05': table['table'],
'VARCHAR_06': 'FULLSCAN'
}
)
def dequeue_stats(tables):
"""
Dequeue stats on the specified tables
:param tables: List of schemas and tables
:type tables: list
"""
for table in tables:
execute_sp(
'MWH.MANAGE_STATISTICS_SP',
{
'VARCHAR_01': 'DEQUEUE',
'VARCHAR_02': 'MLK-EDM-D-SQ02',
'VARCHAR_03': table['database'],
'VARCHAR_04': table['schema'],
'VARCHAR_05': table['table'],
'VARCHAR_06': 'FULLSCAN'
}
)
def fetch_error(error_id):
"""
Returns the ETL error record.
:param error_id: Error record ID
:type error_id: int
"""
result = execute_sp(
'MWH.UMA_WAREHOUSE_ADMIN_CONSOLE',
fill_in_sp_in_args({
'message': 'GET_ERROR_TEXT',
'VARCHAR_01': error_id
})
)
return result[0][0]
def get_data_mart(raw_data_mart):
"""
Helper function to return a data mart data from its raw data.
:param raw_data_mart: Data mart data
:type raw_data_mart: dict
:return: dict
"""
data_mart = raw_data_mart.copy()
current_status = data_mart['data_mart_status']
if current_status == 'STOPPED!':
current_status = 'FAILED'
elif current_status == 'NOT STARTED':
current_status = 'FAILED'
elif current_status == 'STOPPED':
current_status = 'PAUSED'
# Set the status
data_mart['data_mart_status_internal'] = data_mart['data_mart_status']
data_mart['data_mart_status'] = current_status
return data_mart
def execute_admin_console_sp(*args, out_arg='sp_status_code'):
"""
Helper function to execute the MWH.UMA_WAREHOUSE_ADMIN_CONSOLE stored procedure.
:return: Stored procedure result sets and out argument
:rtype: list
"""
results = execute_sp_with_required_in_args(*args)
status_code = get_out_arg(results, out_arg)
if status_code > 1:
raise SPException(f'Stored Procedure call to "{args[0]}" failed.', status_code)
result = get_sp_result_set(results, 0, out_arg)
if not result:
return []
return result
| pcs2112/UMA-DWH | uma_dwh/db/etl.py | etl.py | py | 6,575 | python | en | code | 0 | github-code | 13 |
74261928976 | from __future__ import print_function
#
# @brief The output class (write a file to disk)
#
class Writer():
#
# @brief Write a header file
#
# @param functions A list of function-objects to write
def headerFile(self, functionList):
# open the output file for writing
output = open(self.outputFile, 'w')
# write all function headers
throwaway = ["((^\s*)|(\s+))extern\s+.*\("]
for function in functionList:
for match in throwaway:
if re.search(match, function.getDefinition()):
function.name = ''
if function.type == '':
continue
elif function.name == '':
continue
else:
print(function.getDefinition(), end=';\n', sep='', file=output)
# close the file
output.close()
def writeHeaderBegin(self, output, functionList, includes, globalOnce, precompiler=[]):
# write all needed includes
for p in precompiler:
print('#' + p, end='\n', file=output)
print("// global includes: ", file=output)
for i in includes:
i = i if i.startswith("<") else "<" + i + ">"
print('#include ', i, end='\n', file=output)
print('\n', file=output)
# write all global-Templates
for function in functionList:
functionVariables = self.functionVariables(function)
for templ in function.usedTemplateList:
if templ.output('global') != '':
print(templ.output('global', functionVariables), file=output)
print("", file=output)
# write global once from template
print(globalOnce, file=output)
return output
def writeBefore(self, function, output, functionVariables):
# write the before-template for this function
for templ in function.usedTemplateList:
outputString = templ.output('before', functionVariables).strip()
if outputString != '':
print('\t', outputString, end='\n', sep='', file=output)
# write the beforeLast-template for this function
for templ in reversed(function.usedTemplateList):
outputString = templ.output('beforeLast', functionVariables).strip()
if outputString != '':
print('\t', outputString, end='\n', sep='', file=output)
def writeAfter(self, function, output, functionVariables):
# write all after-templates for this function
for templ in function.usedTemplateList:
outputString = templ.output('after', functionVariables).strip()
if outputString != '':
print('\t', outputString, end='\n', sep='', file=output)
for templ in reversed(function.usedTemplateList):
outputString = templ.output('afterLast', functionVariables).strip()
if outputString != '':
print('\t', outputString, end='\n', sep='', file=output)
# write all after-templates for this function
for templ in function.usedTemplateList:
outputString = templ.output('cleanup', functionVariables).strip()
if outputString != '':
print('\t', outputString, end='\n', sep='', file=output)
for templ in reversed(function.usedTemplateList):
outputString = templ.output('cleanupLast', functionVariables).strip()
if outputString != '':
print('\t', outputString, end='\n', sep='', file=output)
def functionVariables(self, function, call=""):
return {"FUNCTION_NAME" : function.name,
"FUNCTION_CALL" : call}
| jakobluettgau/feign | tools/gen/lib/skeletonBuilder.py | skeletonBuilder.py | py | 3,864 | python | en | code | 0 | github-code | 13 |
35441191190 | def validSSN():
s = input('Enter a Social Security number: ')
if len(s) == 11:
a = s.split('-')
if len(a[0]) == 3 and len(a[1]) == 2 or len(a[-1]) == 4:
for i in a:
if i.isdigit():
print('Valid SSN')
return True
else:
print('Invalid SSN')
validSSN()
| minzhou1003/intro-to-programming-using-python | practice3/8_1.py | 8_1.py | py | 355 | python | en | code | 0 | github-code | 13 |
38637174812 |
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
import os
from model import charLM
from utilities import *
from collections import namedtuple
from test import test
def preprocess():
word_dict, char_dict = create_word_char_dict("valid.txt", "train.txt", "test.txt")
num_words = len(word_dict)
num_char = len(char_dict)
char_dict["BOW"] = num_char+1
char_dict["EOW"] = num_char+2
char_dict["PAD"] = 0
# dict of (int, string)
reverse_word_dict = {value:key for key, value in word_dict.items()}
max_word_len = max([len(word) for word in word_dict])
objects = {
"word_dict": word_dict,
"char_dict": char_dict,
"reverse_word_dict": reverse_word_dict,
"max_word_len": max_word_len
}
torch.save(objects, "cache/prep.pt")
print("Preprocess done.")
def to_var(x):
if torch.cuda.is_available():
x = x.cuda()
return Variable(x)
def train(net, data, opt):
torch.manual_seed(1024)
train_input = torch.from_numpy(data.train_input)
train_label = torch.from_numpy(data.train_label)
valid_input = torch.from_numpy(data.valid_input)
valid_label = torch.from_numpy(data.valid_label)
# [num_seq, seq_len, max_word_len+2]
num_seq = train_input.size()[0] // opt.lstm_seq_len
train_input = train_input[:num_seq*opt.lstm_seq_len, :]
train_input = train_input.view(-1, opt.lstm_seq_len, opt.max_word_len+2)
num_seq = valid_input.size()[0] // opt.lstm_seq_len
valid_input = valid_input[:num_seq*opt.lstm_seq_len, :]
valid_input = valid_input.view(-1, opt.lstm_seq_len, opt.max_word_len+2)
num_epoch = opt.epochs
num_iter_per_epoch = train_input.size()[0] // opt.lstm_batch_size
learning_rate = opt.init_lr
old_PPL = 100000
best_PPL = 100000
# Log-SoftMax
criterion = nn.CrossEntropyLoss()
# word_emb_dim == hidden_size / num of hidden units
hidden = (to_var(torch.zeros(2, opt.lstm_batch_size, opt.word_embed_dim)),
to_var(torch.zeros(2, opt.lstm_batch_size, opt.word_embed_dim)))
for epoch in range(num_epoch):
################ Validation ####################
net.eval()
loss_batch = []
PPL_batch = []
iterations = valid_input.size()[0] // opt.lstm_batch_size
valid_generator = batch_generator(valid_input, opt.lstm_batch_size)
vlabel_generator = batch_generator(valid_label, opt.lstm_batch_size*opt.lstm_seq_len)
for t in range(iterations):
batch_input = valid_generator.__next__()
batch_label = vlabel_generator.__next__()
hidden = [state.detach() for state in hidden]
valid_output, hidden = net(to_var(batch_input), hidden)
length = valid_output.size()[0]
# [num_sample-1, len(word_dict)] vs [num_sample-1]
valid_loss = criterion(valid_output, to_var(batch_label))
PPL = torch.exp(valid_loss.data)
loss_batch.append(float(valid_loss))
PPL_batch.append(float(PPL))
PPL = np.mean(PPL_batch)
print("[epoch {}] valid PPL={}".format(epoch, PPL))
print("valid loss={}".format(np.mean(loss_batch)))
print("PPL decrease={}".format(float(old_PPL - PPL)))
# Preserve the best model
if best_PPL > PPL:
best_PPL = PPL
torch.save(net.state_dict(), "cache/model.pt")
torch.save(net, "cache/net.pkl")
# Adjust the learning rate
if float(old_PPL - PPL) <= 1.0:
learning_rate /= 2
print("halved lr:{}".format(learning_rate))
old_PPL = PPL
##################################################
#################### Training ####################
net.train()
optimizer = optim.SGD(net.parameters(),
lr = learning_rate,
momentum=0.85)
# split the first dim
input_generator = batch_generator(train_input, opt.lstm_batch_size)
label_generator = batch_generator(train_label, opt.lstm_batch_size*opt.lstm_seq_len)
for t in range(num_iter_per_epoch):
batch_input = input_generator.__next__()
batch_label = label_generator.__next__()
# detach hidden state of LSTM from last batch
hidden = [state.detach() for state in hidden]
output, hidden = net(to_var(batch_input), hidden)
# [num_word, vocab_size]
loss = criterion(output, to_var(batch_label))
net.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm(net.parameters(), 5, norm_type=2)
optimizer.step()
if (t+1) % 100 == 0:
print("[epoch {} step {}] train loss={}, Perplexity={}".format(epoch+1,
t+1, float(loss.data), float(np.exp(loss.data))))
torch.save(net.state_dict(), "cache/model.pt")
print("Training finished.")
################################################################
if __name__=="__main__":
word_embed_dim = 300
char_embedding_dim = 15
if os.path.exists("cache/prep.pt") is False:
preprocess()
objetcs = torch.load("cache/prep.pt")
word_dict = objetcs["word_dict"]
char_dict = objetcs["char_dict"]
reverse_word_dict = objetcs["reverse_word_dict"]
max_word_len = objetcs["max_word_len"]
num_words = len(word_dict)
print("word/char dictionary built. Start making inputs.")
if os.path.exists("cache/data_sets.pt") is False:
train_text = read_data("./train.txt")
valid_text = read_data("./valid.txt")
test_text = read_data("./test.txt")
train_set = np.array(text2vec(train_text, char_dict, max_word_len))
valid_set = np.array(text2vec(valid_text, char_dict, max_word_len))
test_set = np.array(text2vec(test_text, char_dict, max_word_len))
# Labels are next-word index in word_dict with the same length as inputs
train_label = np.array([word_dict[w] for w in train_text[1:]] + [word_dict[train_text[-1]]])
valid_label = np.array([word_dict[w] for w in valid_text[1:]] + [word_dict[valid_text[-1]]])
test_label = np.array([word_dict[w] for w in test_text[1:]] + [word_dict[test_text[-1]]])
category = {"tdata":train_set, "vdata":valid_set, "test": test_set,
"trlabel":train_label, "vlabel":valid_label, "tlabel":test_label}
torch.save(category, "cache/data_sets.pt")
else:
data_sets = torch.load("cache/data_sets.pt")
train_set = data_sets["tdata"]
valid_set = data_sets["vdata"]
test_set = data_sets["test"]
train_label = data_sets["trlabel"]
valid_label = data_sets["vlabel"]
test_label = data_sets["tlabel"]
DataTuple = namedtuple("DataTuple",
"train_input train_label valid_input valid_label test_input test_label")
data = DataTuple(train_input=train_set,
train_label=train_label,
valid_input=valid_set,
valid_label=valid_label,
test_input=test_set,
test_label=test_label)
print("Loaded data sets. Start building network.")
USE_GPU = True
cnn_batch_size = 700
lstm_seq_len = 35
lstm_batch_size = 20
# cnn_batch_size == lstm_seq_len * lstm_batch_size
net = charLM(char_embedding_dim,
word_embed_dim,
num_words,
len(char_dict),
use_gpu=USE_GPU)
for param in net.parameters():
nn.init.uniform(param.data, -0.05, 0.05)
Options = namedtuple("Options", [
"cnn_batch_size", "init_lr", "lstm_seq_len",
"max_word_len", "lstm_batch_size", "epochs",
"word_embed_dim"])
opt = Options(cnn_batch_size=lstm_seq_len*lstm_batch_size,
init_lr=1.0,
lstm_seq_len=lstm_seq_len,
max_word_len=max_word_len,
lstm_batch_size=lstm_batch_size,
epochs=35,
word_embed_dim=word_embed_dim)
print("Network built. Start training.")
# You can stop training anytime by "ctrl+C"
try:
train(net, data, opt)
except KeyboardInterrupt:
print('-' * 89)
print('Exiting from training early')
torch.save(net, "cache/net.pkl")
print("save net")
test(net, data, opt)
| FengZiYjun/CharLM | train.py | train.py | py | 8,706 | python | en | code | 33 | github-code | 13 |
74338529616 | from .IRremoteESP8266 import *
from .IRutils import *
ONCE = 0
# Constants
kHeader = 2 # Usual nr. of header entries.
kFooter = 2 # Usual nr. of footer (stop bits) entries.
kStartOffset = 1 # Usual rawbuf entry to start from.
def MS_TO_USEC(x):
return x * 1000 # Convert milli-Seconds to micro-Seconds.
# Marks tend to be 100us too long, and spaces 100us too short
# when received due to sensor lag.
kMarkExcess = 50
kRawBuf = 100 # Default length of raw capture buffer
kRepeat = UINT64_MAX
# Default min size of reported UNKNOWN messages.
kUnknownThreshold = 6
# receiver states
kIdleState = 2
kMarkState = 3
kSpaceState = 4
kStopState = 5
kTolerance = 25 # default percent tolerance in measurements.
kUseDefTol = 255 # Indicate to use the class default tolerance.
kRawTick = 2 # Capture tick to uSec factor.
RAWTICK = kRawTick # Deprecated. For legacy user code support only.
# How long (ms) before we give up wait for more data?
# Don't exceed kMaxTimeoutMs without a good reason.
# That is the capture buffers maximum value size. (UINT16_MAX / kRawTick)
# Typically messages/protocols tend to repeat around the 100ms timeframe,
# thus we should timeout before that to give us some time to try to decode
# before we need to start capturing a possible new message.
# Typically 15ms suits most applications. However, some protocols demand a
# higher value. e.g. 90ms for XMP-1 and some aircon units.
kTimeoutMs = 15 # In MilliSeconds.
TIMEOUT_MS = kTimeoutMs # For legacy documentation.
kMaxTimeoutMs = kRawTick * (UINT16_MAX / MS_TO_USEC(1))
# Use FNV hash algorithm: http:#isthe.com/chongo/tech/comp/fnv/#FNV-param
kFnvPrime32 = 16777619
kFnvBasis32 = 2166136261
# Which of the ESP32 timers to use by default. (0-3)
kDefaultESP32Timer = 3
# Hitachi AC is the current largest state size.
kStateSizeMax = kHitachiAc2StateLength
# Types
# information for the interrupt handler
class irparams_t(object):
def __init__(self):
self.recvpin = None # pin for IR data from detector
self.rcvstate = None # state machine
self.timer = None # state timer, counts 50uS ticks.
self.bufsize = None # max. nr. of entries in the capture buffer.
self.rawbuf = None # raw data
# is used for rawlen as it saves 3 bytes of iram in the interrupt
# handler. Don't ask why, I don't know. It just does.
self.rawlen = None # counter of entries in rawbuf.
self.overflow = None # Buffer overflow indicator.
self.timeout = None # Nr. of milliSeconds before we give up.
# results from a data match
class match_result_t(object):
def __init__(self):
self.success = False # Was the match successful?
self.data = None # The data found.
self.used = None # How many buffer positions were used.
# Classes
# Results returned from the decoder
class decode_results(object):
def __init__(self):
self.decode_type = None # NEC, SONY, RC5, UNKNOWN
# value, address, & command are all mutually exclusive with state.
# i.e. They MUST NOT be used at the same time as state, so we can use a union
# structure to save us a handful of valuable bytes of memory.
self.value = None # Decoded value
self.address = None # Decoded device address.
self.command = None # Decoded command.
self.state = [0x0] * kStateSizeMax # Multi-byte results.
self.bits = None # Number of bits in decoded value
self.rawbuf = None # Raw intervals in .5 us ticks
self.rawlen = None # Number of records in rawbuf.
self.overflow = False
self.repeat = False # Is the result a repeat code?
# main class for receiving IR
class IRrecv(object):
def __init__(self, recvpin, bufsize=kRawBuf, timeout=kTimeoutMs, save_buffer=False): # Constructor
# Class constructor
# Args:
# recvpin: GPIO pin the IR receiver module's data pin is connected to.
# bufsize: Nr. of entries to have in the capture buffer. (Default: kRawBuf)
# timeout: Nr. of milli-Seconds of no signal before we stop capturing data.
# (Default: kTimeoutMs)
# save_buffer: Use a second (save) buffer to decode from. (Default: False)
# timer_num: Which ESP32 timer number to use? ESP32 only, otherwise unused.
# (Range: 0-3. Default: kDefaultESP32Timer)
# Returns:
# An IRrecv class object.
self.irparams = irparams_t()
self.irparams.recvpin = recvpin
self.irparams.bufsize = bufsize
# Ensure we are going to be able to store all possible values in the
# capture buffer.
self.irparams.timeout = min(timeout, kMaxTimeoutMs)
self.irparams.rawbuf = [0x0] * bufsize
# If we have been asked to use a save buffer (for decoding), then create one.
if save_buffer:
self.irparams_save = irparams_t()
self.irparams_save.rawbuf = [0x0] * bufsize
# Check we allocated the memory successfully.
else:
self.irparams_save = None
self._unknown_threshold = kUnknownThreshold
self._tolerance = kTolerance
def setTolerance(self, percent=kTolerance):
# Set the base tolerance percentage for matching incoming IR messages.
self._tolerance = min(percent, 100)
def getTolerance(self):
# Get the base tolerance percentage for matching incoming IR messages.
return self._tolerance
def decode(self, results, save=None):
# Decodes the received IR message.
# If the interrupt state is saved, we will immediately resume waiting
# for the next IR message to avoid missing messages.
# Note: There is a trade-off here. Saving the state means less time lost until
# we can receiving the next message vs. using more RAM. Choose appropriately.
#
# Args:
# results: A pointer to where the decoded IR message will be stored.
# save: A pointer to an irparams_t instance in which to save
# the interrupt's memory/state. NULL means don't save it.
# Returns:
# A boolean indicating if an IR message is ready or not.
# Proceed only if an IR message been received.
if self.irparams.rcvstate != kStopState:
return False
# Clear the entry we are currently pointing to when we got the timeout.
# i.e. Stopped collecting IR data.
# It's junk as we never wrote an entry to it and can only confuse decoding.
# This is done here rather than logically the best place in read_timeout()
# as it saves a few bytes of ICACHE_RAM as that routine is bound to an
# interrupt. decode() is not stored in ICACHE_RAM.
# Another better option would be to zero the entire irparams.rawbuf[] on
# resume() but that is a much more expensive operation compare to this.
self.irparams.rawbuf[self.irparams.rawlen] = 0
# If we were requested to use a save buffer previously, do so.
if save is None:
save = self.irparams_save
if not save.rawbuf:
# We haven't been asked to copy it so use the existing memory.
results.rawbuf = self.irparams.rawbuf
results.rawlen = self.irparams.rawlen
results.overflow = self.irparams.overflow
else:
self.copyIrParams(self.irparams, save) # Duplicate the interrupt's memory.
# Point the results at the saved copy.
results.rawbuf = save.rawbuf
results.rawlen = save.rawlen
results.overflow = save.overflow
# Reset any previously partially processed results.
results.decode_type = UNKNOWN
results.bits = 0
results.value = 0
results.address = 0
results.command = 0
results.repeat = False
print("Attempting Aiwa RC T501 decode")
# Try decodeAiwaRCT501() before decodeSanyoLC7461() & decodeNEC()
# because the protocols are similar. This protocol is more specific than
# those ones, so should got before them.
if self.decodeAiwaRCT501(results):
return True
print("Attempting Sanyo LC7461 decode")
# Try decodeSanyoLC7461() before decodeNEC() because the protocols are
# similar in timings & structure, but the Sanyo one is much longer than the
# NEC protocol (42 vs 32 bits) so this one should be tried first to try to
# reduce False detection as a NEC packet.
if self.decodeSanyoLC7461(results):
return True
print("Attempting Carrier AC decode")
# Try decodeCarrierAC() before decodeNEC() because the protocols are
# similar in timings & structure, but the Carrier one is much longer than the
# NEC protocol (3x32 bits vs 1x32 bits) so this one should be tried first to
# try to reduce False detection as a NEC packet.
if self.decodeCarrierAC(results):
return True
print("Attempting Pioneer decode")
# Try decodePioneer() before decodeNEC() because the protocols are
# similar in timings & structure, but the Pioneer one is much longer than the
# NEC protocol (2x32 bits vs 1x32 bits) so this one should be tried first to
# try to reduce False detection as a NEC packet.
if self.decodePioneer(results):
return True
print("Attempting NEC decode")
if self.decodeNEC(results):
return True
print("Attempting Sony decode")
if self.decodeSony(results):
return True
print("Attempting Mitsubishi decode")
if self.decodeMitsubishi(results):
return True
print("Attempting Mitsubishi AC decode")
if self.decodeMitsubishiAC(results):
return True
print("Attempting Mitsubishi2 decode")
if self.decodeMitsubishi2(results):
return True
print("Attempting RC5 decode")
if self.decodeRC5(results):
return True
print("Attempting RC6 decode")
if self.decodeRC6(results):
return True
print("Attempting RC-MM decode")
if self.decodeRCMM(results):
return True
# Fujitsu A/C needs to precede Panasonic and Denon as it has a short
# message which looks exactly the same as a Panasonic/Denon message.
print("Attempting Fujitsu A/C decode")
if self.decodeFujitsuAC(results):
return True
# Denon needs to precede Panasonic as it is a special case of Panasonic.
print("Attempting Denon decode")
if (
self.decodeDenon(results, kDenon48Bits) or
self.decodeDenon(results, kDenonBits) or
self.decodeDenon(results, kDenonLegacyBits)
):
return True
print("Attempting Panasonic decode")
if self.decodePanasonic(results):
return True
print("Attempting LG (28-bit) decode")
if self.decodeLG(results, kLgBits, True):
return True
print("Attempting LG (32-bit) decode")
# LG32 should be tried before Samsung
if self.decodeLG(results, kLg32Bits, True):
return True
# Note: Needs to happen before JVC decode, because it looks similar except
# with a required NEC-like repeat code.
print("Attempting GICable decode")
if self.decodeGICable(results):
return True
print("Attempting JVC decode")
if self.decodeJVC(results):
return True
print("Attempting SAMSUNG decode")
if self.decodeSAMSUNG(results):
return True
print("Attempting Samsung36 decode")
if self.decodeSamsung36(results):
return True
print("Attempting Whynter decode")
if self.decodeWhynter(results):
return True
print("Attempting DISH decode")
if self.decodeDISH(results):
return True
print("Attempting Sharp decode")
if self.decodeSharp(results):
return True
print("Attempting Coolix decode")
if self.decodeCOOLIX(results):
return True
print("Attempting Nikai decode")
if self.decodeNikai(results):
return True
# Kelvinator based-devices use a similar code to Gree ones, to avoid False
# matches this needs to happen before decodeGree().
print("Attempting Kelvinator decode")
if self.decodeKelvinator(results):
return True
print("Attempting Daikin decode")
if self.decodeDaikin(results):
return True
print("Attempting Daikin2 decode")
if self.decodeDaikin2(results):
return True
print("Attempting Daikin216 decode")
if self.decodeDaikin216(results):
return True
print("Attempting Toshiba AC decode")
if self.decodeToshibaAC(results):
return True
print("Attempting Midea decode")
if self.decodeMidea(results):
return True
print("Attempting Magiquest decode")
if self.decodeMagiQuest(results):
return True
# NOTE: Disabled due to poor quality.
# The Sanyo S866500B decoder is very poor quality & depricated.
# *IF* you are going to enable it, do it near last to avoid False positive
# matches.
print("Attempting Sanyo SA8650B decode")
if self.decodeSanyo(results):
return True
# Some devices send NEC-like codes that don't follow the True NEC spec.
# This should detect those. e.g. Apple TV remote etc.
# This needs to be done after all other codes that use strict and some
# other protocols that are NEC-like as well, as turning off strict may
# cause this to match other valid protocols.
print("Attempting NEC (non-strict) decode")
if self.decodeNEC(results, kNECBits, False):
results.decode_type = NEC_LIKE
return True
print("Attempting Lasertag decode")
if self.decodeLasertag(results):
return True
# Gree based-devices use a similar code to Kelvinator ones, to avoid False
# matches this needs to happen after decodeKelvinator().
print("Attempting Gree decode")
if self.decodeGree(results):
return True
print("Attempting Haier AC decode")
if self.decodeHaierAC(results):
return True
print("Attempting Haier AC YR-W02 decode")
if self.decodeHaierACYRW02(results):
return True
# HitachiAc424 should be checked before HitachiAC & HitachiAC2
print("Attempting Hitachi AC 424 decode")
if self.decodeHitachiAc424(results, kHitachiAc424Bits):
return True
# HitachiAC2 should be checked before HitachiAC
print("Attempting Hitachi AC2 decode")
if self.decodeHitachiAC(results, kHitachiAc2Bits):
return True
print("Attempting Hitachi AC decode")
if self.decodeHitachiAC(results, kHitachiAcBits):
return True
print("Attempting Hitachi AC1 decode")
if self.decodeHitachiAC(results, kHitachiAc1Bits):
return True
print("Attempting Whirlpool AC decode")
if self.decodeWhirlpoolAC(results):
return True
print("Attempting Samsung AC (extended) decode")
# Check the extended size first, as it should fail fast due to longer length.
if self.decodeSamsungAC(results, kSamsungAcExtendedBits, False):
return True
# Now check for the more common length.
print("Attempting Samsung AC decode")
if self.decodeSamsungAC(results, kSamsungAcBits):
return True
print("Attempting Electra AC decode")
if self.decodeElectraAC(results):
return True
print("Attempting Panasonic AC decode")
if self.decodePanasonicAC(results):
return True
print("Attempting Panasonic AC short decode")
if self.decodePanasonicAC(results, kPanasonicAcShortBits):
return True
print("Attempting Lutron decode")
if self.decodeLutron(results):
return True
print("Attempting MWM decode")
if self.decodeMWM(results):
return True
print("Attempting Vestel AC decode")
if self.decodeVestelAc(results):
return True
# Mitsubish112 and Tcl112 share the same decoder.
print("Attempting Mitsubishi112/TCL112AC decode")
if self.decodeMitsubishi112(results):
return True
print("Attempting Teco decode")
if self.decodeTeco(results):
return True
print("Attempting LEGOPF decode")
if self.decodeLegoPf(results):
return True
print("Attempting MITSUBISHIHEAVY (152 bit) decode")
if self.decodeMitsubishiHeavy(results, kMitsubishiHeavy152Bits):
return True
print("Attempting MITSUBISHIHEAVY (88 bit) decode")
if self.decodeMitsubishiHeavy(results, kMitsubishiHeavy88Bits):
return True
print("Attempting Argo decode")
if self.decodeArgo(results):
return True
print("Attempting SHARP_AC decode")
if self.decodeSharpAc(results):
return True
print("Attempting GOODWEATHER decode")
if self.decodeGoodweather(results):
return True
print("Attempting Inax decode")
if self.decodeInax(results):
return True
print("Attempting Trotec decode")
if self.decodeTrotec(results):
return True
print("Attempting Daikin160 decode")
if self.decodeDaikin160(results):
return True
print("Attempting Neoclima decode")
if self.decodeNeoclima(results):
return True
print("Attempting Daikin176 decode")
if self.decodeDaikin176(results):
return True
print("Attempting Daikin128 decode")
if self.decodeDaikin128(results):
return True
print("Attempting Amcor decode")
if self.decodeAmcor(results):
return True
print("Attempting Daikin152 decode")
if self.decodeDaikin152(results):
return True
print("Attempting Mitsubishi136 decode")
if self.decodeMitsubishi136(results):
return True
# Typically new protocols are added above this line.
# decodeHash returns a hash on any input.
# Thus, it needs to be last in the list.
# If you add any decodes, add them before this.
if self.decodeHash(results):
return True
# Throw away and start over
return False
def getBufSize(self):
# Obtain the maximum number of entries possible in the capture buffer.
# i.e. It's size.
return self.irparams.bufsize
def setUnknownThreshold(self, length):
# Set the minimum length we will consider for reporting UNKNOWN message types.
self._unknown_threshold = length
def match(self, measured, desired, tolerance=kUseDefTol, delta=0):
# Check if we match a pulse(measured) with the desired within
# +/-tolerance percent and/or +/- a fixed delta range.
#
# Args:
# measured: The recorded period of the signal pulse.
# desired: The expected period (in useconds) we are matching against.
# tolerance: A percentage expressed as an integer. e.g. 10 is 10%.
# delta: A non-scaling (+/-) error margin (in useconds).
#
# Returns:
# Boolean: True if it matches, False if it doesn't.
measured *= kRawTick # Convert to uSecs.
print("Matching: ")
print(self.ticksLow(desired, tolerance, delta))
print(" <= ")
print(measured)
print(" <= ")
print(self.ticksHigh(desired, tolerance, delta))
def matchMark(self, measured, desired, tolerance=kUseDefTol, excess=kMarkExcess):
# Check if we match a mark signal(measured) with the desired within
# +/-tolerance percent, after an expected is excess is added.
#
# Args:
# measured: The recorded period of the signal pulse.
# desired: The expected period (in useconds) we are matching against.
# tolerance: A percentage expressed as an integer. e.g. 10 is 10%.
# excess: Nr. of useconds.
#
# Returns:
# Boolean: True if it matches, False if it doesn't.
print("Matching MARK ")
print(measured * kRawTick)
print(" vs ")
print(desired)
print(" + ")
print(excess)
print(". ")
return self.match(measured, desired + excess, tolerance)
def matchSpace(self, measured, desired, tolerance=kUseDefTol, excess=kMarkExcess):
# Check if we match a space signal(measured) with the desired within
# +/-tolerance percent, after an expected is excess is removed.
#
# Args:
# measured: The recorded period of the signal pulse.
# desired: The expected period (in useconds) we are matching against.
# tolerance: A percentage expressed as an integer. e.g. 10 is 10%.
# excess: Nr. of useconds.
#
# Returns:
# Boolean: True if it matches, False if it doesn't.
print("Matching SPACE ")
print(measured * kRawTick)
print(" vs ")
print(desired)
print(" - ")
print(excess)
print(". ")
return self.match(measured, desired - excess, tolerance)
# These are called by decode
def _validTolerance(self, percentage):
# Convert the tolerance percentage into something valid.
return self._tolerance if percentage > 100 else percentage
@staticmethod
def copyIrParams(src, dst):
# Make a copy of the interrupt state & buffer data.
# Needed because irparams is marked as volatile, thus memcpy() isn't allowed.
# Only call this when you know the interrupt handlers won't modify anything.
# i.e. In kStopState.
#
# Args:
# src: Pointer to an irparams_t structure to copy from.
# dst: Pointer to an irparams_t structure to copy to.
# Save the pointer to the destination's rawbuf so we don't lose it as
# the for-loop/copy after this will overwrite it with src's rawbuf pointer.
# This isn't immediately obvious due to typecasting/different variable names.
dst_rawbuf_ptr = dst.rawbuf[:]
# Restore the buffer pointer
dst.rawbuf = dst_rawbuf_ptr
# Copy the rawbuf
for i in range(dst.bufsize):
dst.rawbuf[i] = src.rawbuf[i]
@staticmethod
def compare(oldval, newval):
# * -----------------------------------------------------------------------
# * hashdecode - decode an arbitrary IR code.
# * Instead of decoding using a standard encoding scheme
# * (e.g. Sony, NEC, RC5), the code is hashed to a 32-bit value.
# *
# * The algorithm: look at the sequence of MARK signals, and see if each one
# * is shorter (0), the same length (1), or longer (2) than the previous.
# * Do the same with the SPACE signals. Hash the resulting sequence of 0's,
# * 1's, and 2's to a 32-bit value. This will give a unique value for each
# * different code (probably), for most code systems.
# *
# * http:#arcfn.com/2010/01/using-arbitrary-remotes-with-arduino.html
# */
# Compare two tick values, returning 0 if newval is shorter,
# 1 if newval is equal, and 2 if newval is longer
# Use a tolerance of 20%
if newval < oldval * 0.8:
return 0
elif oldval < newval * 0.8:
return 2
else:
return 1
def ticksLow(self, usecs, tolerance=kUseDefTol, delta=0):
# Calculate the lower bound of the nr. of ticks.
#
# Args:
# usecs: Nr. of uSeconds.
# tolerance: Percent as an integer. e.g. 10 is 10%
# delta: A non-scaling amount to reduce usecs by.
# Returns:
# Nr. of ticks.
# max() used to ensure the result can't drop below 0 before the cast.
return (
max((usecs * (1.0 - self._validTolerance(tolerance) / 100.0) - delta), 0)
)
def ticksHigh(self, usecs, tolerance=kUseDefTol, delta=0):
# Calculate the upper bound of the nr. of ticks.
#
# Args:
# usecs: Nr. of uSeconds.
# tolerance: Percent as an integer. e.g. 10 is 10%
# delta: A non-scaling amount to increase usecs by.
# Returns:
# Nr. of ticks.
return (
(usecs * (1.0 + self._validTolerance(tolerance) / 100.0)) + 1 + delta
)
def matchAtLeast(self, measured, desired, tolerance=kUseDefTol, delta=0):
# Check if we match a pulse(measured) of at least desired within
# tolerance percent and/or a fixed delta margin.
#
# Args:
# measured: The recorded period of the signal pulse.
# desired: The expected period (in useconds) we are matching against.
# tolerance: A percentage expressed as an integer. e.g. 10 is 10%.
# delta: A non-scaling amount to reduce usecs by.
#
# Returns:
# Boolean: True if it matches, False if it doesn't.
measured *= kRawTick # Convert to uSecs.
print("Matching ATLEAST ")
print(measured)
print(" vs ")
print(desired)
print(". Matching: ")
print(measured)
print(" >= ")
print(self.ticksLow(min(desired, MS_TO_USEC(self.irparams.timeout)), tolerance, delta))
print(" [min(")
print(self.ticksLow(desired, tolerance, delta))
print(", ")
print(self.ticksLow(MS_TO_USEC(self.irparams.timeout), tolerance, delta))
print(")]")
# We really should never get a value of 0, except as the last value
# in the buffer. If that is the case, then assume infinity and return True.
if measured == 0:
return True
return measured >= ticksLow(min(desired, MS_TO_USEC(self.irparams.timeout)), tolerance, delta)
def _matchGeneric(
self,
data_ptr,
result_bits_ptr,
result_bytes_ptr,
use_bits,
remaining,
nbits,
hdrmark,
hdrspace,
onemark,
onespace,
zeromark,
zerospace,
footermark,
footerspace,
atleast=False,
tolerance=kUseDefTol,
excess=kMarkExcess,
MSBfirst=True
):
# Match & decode a generic/typical IR message.
# The data is stored in result_bits_ptr or result_bytes_ptr depending on flag
# `use_bits`.
# Values of 0 for hdrmark, hdrspace, footermark, or footerspace mean skip
# that requirement.
#
# Args:
# data_ptr: A pointer to where we are at in the capture buffer.
# result_bits_ptr: A pointer to where to start storing the bits we decoded.
# result_bytes_ptr: A pointer to where to start storing the bytes we decoded.
# use_bits: A flag indicating if we are to decode bits or bytes.
# remaining: The size of the capture buffer are remaining.
# nbits: Nr. of data bits we expect.
# hdrmark: Nr. of uSeconds for the expected header mark signal.
# hdrspace: Nr. of uSeconds for the expected header space signal.
# onemark: Nr. of uSeconds in an expected mark signal for a '1' bit.
# onespace: Nr. of uSeconds in an expected space signal for a '1' bit.
# zeromark: Nr. of uSeconds in an expected mark signal for a '0' bit.
# zerospace: Nr. of uSeconds in an expected space signal for a '0' bit.
# footermark: Nr. of uSeconds for the expected footer mark signal.
# footerspace: Nr. of uSeconds for the expected footer space/gap signal.
# atleast: Is the match on the footerspace a matchAtLeast or matchSpace?
# tolerance: Percentage error margin to allow. (Def: kUseDefTol)
# excess: Nr. of useconds. (Def: kMarkExcess)
# MSBfirst: Bit order to save the data in. (Def: True)
# Returns:
# A uint16_t: If successful, how many buffer entries were used. Otherwise 0.
# If we are expecting byte sizes, check it's a factor of 8 or fail.
if not use_bits and nbits % 8 != 0:
return 0
# Calculate how much remaining buffer is required.
min_remaining = nbits * 2
if hdrmark:
min_remaining += 1
if hdrspace:
min_remaining += 1
if footermark:
min_remaining += 1
# Don't need to extend for footerspace because it could be the end of message
# Check if there is enough capture buffer to possibly have the message.
if remaining < min_remaining:
return 0 # Nope, so abort.
offset = 0
# Header
offset += 1
if hdrmark and not self.matchMark(data_ptr[offset], hdrmark, tolerance, excess):
return 0
offset += 1
if hdrspace and not self.matchSpace(data_ptr[offset], hdrspace, tolerance, excess):
return 0
# Data
if use_bits: # Bits.
result = self.matchData(
data_ptr[offset],
nbits,
onemark,
onespace,
zeromark,
zerospace,
tolerance,
excess,
MSBfirst
)
if not result.success:
return 0
result_bits_ptr += result.data[:]
offset += result.used
else: # bytes
data_used = self.matchBytes(
data_ptr[offset],
result_bytes_ptr,
remaining - offset,
nbits / 8,
onemark,
onespace,
zeromark,
zerospace,
tolerance,
excess,
MSBfirst
)
if not data_used:
return 0
offset += data_used
# Footer
offset += 1
if footermark and not self.matchMark(data_ptr[offset], footermark, tolerance, excess):
return 0
# If we have something still to match & haven't reached the end of the buffer
if footerspace and offset < remaining and atleast:
if not self.matchAtLeast(data_ptr[offset], footerspace, tolerance, excess):
return 0
elif not self.matchSpace(data_ptr[offset], footerspace, tolerance, excess):
return 0
offset += 1
return offset
def matchData(
self,
data_ptr,
nbits,
onemark,
onespace,
zeromark,
zerospace,
tolerance=kUseDefTol,
excess=kMarkExcess,
MSBfirst=True
):
# Match & decode the typical data section of an IR message.
# The data value is stored in the least significant bits reguardless of the
# bit ordering requested.
#
# Args:
# data_ptr: A pointer to where we are at in the capture buffer.
# nbits: Nr. of data bits we expect.
# onemark: Nr. of uSeconds in an expected mark signal for a '1' bit.
# onespace: Nr. of uSeconds in an expected space signal for a '1' bit.
# zeromark: Nr. of uSeconds in an expected mark signal for a '0' bit.
# zerospace: Nr. of uSeconds in an expected space signal for a '0' bit.
# tolerance: Percentage error margin to allow. (Def: kUseDefTol)
# excess: Nr. of useconds. (Def: kMarkExcess)
# MSBfirst: Bit order to save the data in. (Def: True)
# Returns:
# A match_result_t structure containing the success (or not), the data value,
# and how many buffer entries were used.
result = match_result_t()
result.success = False # Fail by default.
result.data = 0
result.used = 0
while result.used < nbits * 2:
# Is the bit a '1'?
if (
self.matchMark(data_ptr[0], onemark, tolerance, excess) and
self.matchSpace(data_ptr[1], onespace, tolerance, excess)
):
result.data = (result.data << 1) | 1
elif (
self.matchMark(data_ptr[0], zeromark, tolerance, excess) and
self.matchSpace(data_ptr[1], zerospace, tolerance, excess)
):
result.data <<= 1 # The bit is a '0'.
else:
if not MSBfirst:
result.data = self.reverseBits(result.data, result.used / 2)
return result # It's neither, so fail.
result.used += 2
data_ptr += 2
result.success = True
if not MSBfirst:
result.data = self.reverseBits(result.data, nbits)
return result
def matchBytes(
self,
data_ptr,
result_ptr,
remaining,
nbytes,
onemark,
onespace,
zeromark,
zerospace,
tolerance=kUseDefTol,
excess=kMarkExcess,
MSBfirst=True
):
# Match & decode the typical data section of an IR message.
# The bytes are stored at result_ptr. The first byte in the result equates to
# the first byte encountered, and so on.
#
# Args:
# data_ptr: A pointer to where we are at in the capture buffer.
# result_ptr: A pointer to where to start storing the bytes we decoded.
# remaining: The size of the capture buffer are remaining.
# nbytes: Nr. of data bytes we expect.
# onemark: Nr. of uSeconds in an expected mark signal for a '1' bit.
# onespace: Nr. of uSeconds in an expected space signal for a '1' bit.
# zeromark: Nr. of uSeconds in an expected mark signal for a '0' bit.
# zerospace: Nr. of uSeconds in an expected space signal for a '0' bit.
# tolerance: Percentage error margin to allow. (Def: kUseDefTol)
# excess: Nr. of useconds. (Def: kMarkExcess)
# MSBfirst: Bit order to save the data in. (Def: True)
# Returns:
# A uint16_t: If successful, how many buffer entries were used. Otherwise 0.
# Check if there is enough capture buffer to possibly have the desired bytes.
if remaining < nbytes * 8 * 2:
return 0 # Nope, so abort.
offset = 0
for byte_pos in range(nbytes):
result = self.matchData(
data_ptr[offset],
8,
onemark,
onespace,
zeromark,
zerospace,
tolerance,
excess,
MSBfirst
)
if result.success is False:
return 0 # Fail
result_ptr[byte_pos] = result.data
offset += result.used
return offset
def matchGeneric(
self,
data_ptr,
result_ptr,
remaining,
nbits,
hdrmark,
hdrspace,
onemark,
onespace,
zeromark,
zerospace,
footermark,
footerspace,
atleast=False,
tolerance=kUseDefTol,
excess=kMarkExcess,
MSBfirst=True
):
if result_ptr <= 255:
return self._matchGeneric(
data_ptr,
NULL,
result_ptr,
False,
remaining,
nbits,
hdrmark,
hdrspace,
onemark,
onespace,
zeromark,
zerospace,
footermark,
footerspace,
atleast,
tolerance,
excess,
MSBfirst
)
else:
return self._matchGeneric(
data_ptr,
result_ptr,
NULL,
True,
remaining,
nbits,
hdrmark,
hdrspace,
onemark,
onespace,
zeromark,
zerospace,
footermark,
footerspace,
atleast,
tolerance,
excess,
MSBfirst
)
def decodeHash(self, results):
# * Converts the raw code values into a 32-bit hash code.
# * Hopefully this code is unique for each button.
# * This isn't a "real" decoding, just an arbitrary value.
# */
# Require at least some samples to prevent triggering on noise
if results.rawlen < self._unknown_threshold:
return False
hsh = kFnvBasis32
# 'rawlen - 2' to avoid the look ahead from going out of bounds.
# Should probably be -3 to avoid comparing the trailing space entry,
# however it is left this way for compatibility with previously captured
# values.
for i in range(1, results.rawlen - 2):
value = self.compare(results.rawbuf[i], results.rawbuf[i + 2])
# Add value into the hash
hsh = (hsh * kFnvPrime32) ^ value
results.value = hsh & 0xFFFFFFFF
results.bits = results.rawlen / 2
results.address = 0
results.command = 0
results.decode_type = UNKNOWN
return True
def decodeNEC(self, results, nbits=kNECBits, strict=True):
from . import ir_NEC
return ir_NEC.decodeNEC(results, nbits, strict)
def decodeArgo(self, results, nbits=kArgoBits, strict=True):
from . import ir_Argo
return ir_Argo.decodeArgo(results, nbits, strict)
def decodeSony(self, results, nbits=kSonyMinBits, strict=False):
from . import ir_Sony
return ir_Sony.decodeSony(results, nbits, strict)
def decodeSanyo(self, results, nbits=kSanyoSA8650BBits, strict=False):
from . import ir_Sanyo
return ir_Sanyo.decodeSanyo(results, nbits, strict)
def decodeSanyoLC7461(self, results, nbits=kSanyoLC7461Bits, strict=True):
from . import ir_Sanyo
return ir_Sanyo.decodeSanyoLC7461(results, nbits, strict)
def decodeMitsubishi(self, results, nbits=kMitsubishiBits, strict=True):
from . import ir_Mitsubishi
return ir_Mitsubishi.decodeMitsubishi(results, nbits, strict)
def decodeMitsubishi2(self, results, nbits=kMitsubishiBits, strict=True):
from . import ir_Mitsubishi
return ir_Mitsubishi.decodeMitsubishi2(results, nbits, strict)
def decodeMitsubishiAC(self, results, nbits=kMitsubishiACBits, strict=False):
from . import ir_Mitsubishi
return ir_Mitsubishi.decodeMitsubishiAC(results, nbits, strict)
def decodeMitsubishi136(self, results, nbits=kMitsubishi136Bits, strict=True):
from . import ir_Mitsubishi
return ir_Mitsubishi.decodeMitsubishi136(results, nbits, strict)
def decodeMitsubishi112(self, results, nbits=kMitsubishi112Bits, strict=True):
from . import ir_Mitsubishi
return ir_Mitsubishi.decodeMitsubishi112(results, nbits, strict)
def decodeMitsubishiHeavy(self, results, nbits, strict=True):
from . import ir_MitsubishiHeavy
return ir_MitsubishiHeavy.decodeMitsubishiHeavy(results, nbits, strict)
def getRClevel(self, results, offset, used, bitTime, tolerance=kUseDefTol, excess=kMarkExcess, delta=0, maxwidth=3):
pass
def decodeRC5(self, results, nbits=kRC5XBits, strict=True):
from . import ir_RC5_RC6
return ir_RC5_RC6.decodeRC5(results, nbits, strict)
def decodeRC6(self, results, nbits=kRC6Mode0Bits,strict=False):
from . import ir_RC5_RC6
return ir_RC5_RC6.decodeRC6(results, nbits, strict)
def decodeRCMM(self, results, nbits=kRCMMBits, strict=False):
from . import ir_RCMM
return ir_RCMM.decodeRCMM(results, nbits, strict)
def decodePanasonic(self, results, nbits=kPanasonicBits, strict=False, manufacturer=kPanasonicManufacturer):
from . import ir_Panasonic
return ir_Panasonic.decodePanasonic(results, nbits, strict, manufacturer)
def decodeLG(self, results, nbits=kLgBits,strict=False):
from . import ir_LG
return ir_LG.decodeLG(results, nbits, strict)
def decodeInax(self, results, nbits=kInaxBits, strict=True):
from . import ir_Inax
return ir_Inax.decodeInax(results, nbits, strict)
def decodeJVC(self, results, nbits=kJvcBits, strict=True):
from . import ir_JVC
return ir_JVC.decodeJVC(results, nbits, strict)
def decodeSAMSUNG(self, results, nbits=kSamsungBits,strict=True):
from . import ir_Samsung
return ir_Samsung.decodeSAMSUNG(results, nbits, strict)
def decodeSamsung36(self, results, nbits=kSamsung36Bits,strict=True):
from . import ir_Samsung
return ir_Samsung.decodeSamsung36(results, nbits, strict)
def decodeSamsungAC(self, results, nbits=kSamsungAcBits,strict=True):
from . import ir_Samsung
return ir_Samsung.decodeSamsungAC(results, nbits, strict)
def decodeWhynter(self, results, nbits=kWhynterBits, strict=True):
from . import ir_Whynter
return ir_Whynter.decodeWhynter(results, nbits, strict)
def decodeCOOLIX(self, results, nbits=kCoolixBits, strict=True):
from . import ir_Coolix
return ir_Coolix.decodeCOOLIX(results, nbits, strict)
def decodeDenon(self, results, nbits=kDenonBits, strict=True):
from . import ir_Denon
return ir_Denon.decodeDenon(results, nbits, strict)
def decodeDISH(self, results, nbits=kDishBits, strict=True):
from . import ir_Dish
return ir_Dish.decodeDISH(results, nbits, strict)
def decodeSharp(self, results, nbits=kSharpBits, strict=True, expansion=True):
from . import ir_Sharp
return ir_Sharp.decodeSharp(results, nbits, strict, expansion)
def decodeSharpAc(self, results, nbits=kSharpAcBits, strict=True):
from . import ir_Sharp
return ir_Sharp.decodeSharpAc(results, nbits, strict)
def decodeAiwaRCT501(self, results, nbits=kAiwaRcT501Bits, strict=True):
from . import ir_Aiwa
return ir_Aiwa.decodeAiwaRCT501(results, nbits, strict)
def decodeNikai(self, results, nbits=kNikaiBits, strict=True):
from . import ir_Nikai
return ir_Nikai.decodeNikai(results, nbits, strict)
def decodeMagiQuest(self, results, nbits=kMagiquestBits, strict=True):
from . import ir_MagiQuest
return ir_MagiQuest.decodeMagiQuest(results, nbits, strict)
def decodeKelvinator(self, results, nbits=kKelvinatorBits, strict=True):
from . import ir_Kelvinator
return ir_Kelvinator.decodeKelvinator(results, nbits, strict)
def decodeDaikin(self, results, nbits=kDaikinBits, strict=True):
from . import ir_Daikin
return ir_Daikin.decodeDaikin(results, nbits, strict)
def decodeDaikin128(self, results, nbits=kDaikin128Bits, strict=True):
from . import ir_Daikin
return ir_Daikin.decodeDaikin128(results, nbits, strict)
def decodeDaikin152(self, results, nbits=kDaikin152Bits, strict=True):
from . import ir_Daikin
return ir_Daikin.decodeDaikin152(results, nbits, strict)
def decodeDaikin160(self, results, nbits=kDaikin160Bits, strict=True):
from . import ir_Daikin
return ir_Daikin.decodeDaikin160(results, nbits, strict)
def decodeDaikin176(self, results, nbits=kDaikin176Bits, strict=True):
from . import ir_Daikin
return ir_Daikin.decodeDaikin176(results, nbits, strict)
def decodeDaikin2(self, results, nbits=kDaikin2Bits, strict=True):
from . import ir_Daikin
return ir_Daikin.decodeDaikin2(results, nbits, strict)
def decodeDaikin216(self, results, nbits=kDaikin216Bits, strict=True):
from . import ir_Daikin
return ir_Daikin.decodeDaikin216(results, nbits, strict)
def decodeToshibaAC(self, results, nbytes=kToshibaACBits, strict=True):
from . import ir_Toshiba
return ir_Toshiba.decodeToshibaAC(results, nbytes, strict)
def decodeTrotec(self, results, nbits=kTrotecBits, strict=True):
from . import ir_Trotec
return ir_Trotec.decodeTrotec(results, nbits, strict)
def decodeMidea(self, results, nbits=kMideaBits, strict=True):
from . import ir_Midea
return ir_Midea.decodeMidea(results, nbits, strict)
def decodeFujitsuAC(self, results, nbits=kFujitsuAcBits, strict=False):
from . import ir_Fujitsu
return ir_Fujitsu.decodeFujitsuAC(results, nbits, strict)
def decodeLasertag(self, results, nbits=kLasertagBits, strict=True):
from . import ir_Lasertag
return ir_Lasertag.decodeLasertag(results, nbits, strict)
def decodeCarrierAC(self, results, nbits=kCarrierAcBits, strict=True):
from . import ir_Carrier
return ir_Carrier.decodeCarrierAC(results, nbits, strict)
def decodeGoodweather(self, results,nbits=kGoodweatherBits, strict=True):
from . import ir_Goodweather
return ir_Goodweather.decodeGoodweather(results, nbits, strict)
def decodeGree(self, results, nbits=kGreeBits, strict=True):
from . import ir_Gree
return ir_Gree.decodeGree(results, nbits, strict)
def decodeHaierAC(self, results, nbits=kHaierACBits, strict=True):
from . import ir_Haier
return ir_Haier.decodeHaierAC(results, nbits, strict)
def decodeHaierACYRW02(self, results, nbits=kHaierACYRW02Bits, strict=True):
from . import ir_Haier
return ir_Haier.decodeHaierACYRW02(results, nbits, strict)
def decodeHitachiAC(self, results, nbits=kHitachiAcBits, strict=True):
from . import ir_Hitachi
return ir_Hitachi.decodeHitachiAC(results, nbits, strict)
def decodeHitachiAC1(self, results, nbits=kHitachiAc1Bits, strict=True):
from . import ir_Hitachi
return ir_Hitachi.decodeHitachiAC1(results, nbits, strict)
def decodeHitachiAc424(self, results, nbits=kHitachiAc424Bits, strict=True):
from . import ir_Hitachi
return ir_Hitachi.decodeHitachiAc424(results, nbits, strict)
def decodeGICable(self, results, nbits=kGicableBits, strict=True):
from . import ir_GICable
return ir_GICable.decodeGICable(results, nbits, strict)
def decodeWhirlpoolAC(self, results, nbits=kWhirlpoolAcBits, strict=True):
from . import ir_Whirlpool
return ir_Whirlpool.decodeWhirlpoolAC(results, nbits, strict)
def decodeLutron(self, results, nbits=kLutronBits, strict=True):
from . import ir_Lutron
return ir_Lutron.decodeLutron(results, nbits, strict)
def decodeElectraAC(self, results, nbits=kElectraAcBits, strict=True):
from . import ir_Electra
return ir_Electra.decodeElectraAC(results, nbits, strict)
def decodePanasonicAC(self, results, nbits=kPanasonicAcBits, strict=True):
from . import ir_Panasonic
return ir_Panasonic.decodePanasonicAC(results, nbits, strict)
def decodePioneer(self, results, nbits=kPioneerBits, strict=True):
from . import ir_Pioneer
return ir_Pioneer.decodePioneer(results, nbits, strict)
def decodeMWM(self, results, nbits=24, strict=True):
from . import ir_MWM
return ir_MWM.decodeMWM(results, nbits, strict)
def decodeVestelAc(self, results, nbits=kVestelAcBits, strict=True):
from . import ir_Vestel
return ir_Vestel.decodeVestelAc(results, nbits, strict)
def decodeTeco(self, results, nbits=kTecoBits, strict=False):
from . import ir_Teco
return ir_Teco.decodeTeco(results, nbits, strict)
def decodeLegoPf(self, results, nbits=kLegoPfBits, strict=True):
from . import ir_Lego
return ir_Lego.decodeLegoPf(results, nbits, strict)
def decodeNeoclima(self, results, nbits=kNeoclimaBits, strict=True):
from . import ir_Neoclima
return ir_Neoclima.decodeNeoclima(results, nbits, strict)
def decodeAmcor(self, results, nbits=kAmcorBits,strict=True):
from . import ir_Amcor
return ir_Amcor.decodeAmcor(results, nbits, strict)
| kdschlosser/IRDecoder | IRDecoder/IRrecv.py | IRrecv.py | py | 49,415 | python | en | code | 1 | github-code | 13 |
6838230393 | __author__ = 'Simon'
import random
import cProfile
def merge_sort(array):
if len(array) == 1:
return array
mid = len(array)/2
left = array[:mid]
right = array[mid:]
left = merge_sort(left)
right = merge_sort(right)
return merge(left, right)
def merge(left, right):
merged = []
if not left:
merged = right
elif not right:
merged = left
else:
i = j = 0
while i < len(left) or j < len(right):
if i < len(left) and j >= len(right):
merged = merged + left[i:]
i = len(left)
elif i >= len(left) and j < len(right):
merged = merged + right[j:]
j = len(right)
elif left[i] <= right[j]:
merged.append(left[i])
i += 1
else:
merged.append(right[j])
j += 1
return merged
if __name__ == "__main__":
n = 100000
a = [random.randint(0, n) for i in xrange(n)]
cProfile.run( 'merge_sort(a)' )
| sosimon/python | merge_sort.py | merge_sort.py | py | 1,055 | python | en | code | 0 | github-code | 13 |
13058946855 | import os
import os.path
import numpy as np
from pdb import set_trace
class TestIOHandler:
"""This class handles all file I/O for reference and test calculations
Attributes
----------
tester : StaticTest/EOSTest etc type object
The CONQUEST test object
ref : bool
Toggle between reference calculation (True) and test calculation (False)
ref_dir : str
Directory for reference calculations and results
test_dir : str
Directory for test calculations and results
ref_path : str
Path to reference data file
test_path : str
Path to test data file
"""
def __init__(self, test_object, ref=False):
"""Constructor for TestIOHandler
Parameters
----------
test_object : Test object
of class StaticTest, EOSTest etc
ref : bool (default False)
Whether to do the reference calculation (True) or test calculation (False)
"""
self.tester = test_object
self.ref = ref
self.ref_dir = "reference"
self.test_dir = "test"
name = self.tester.get_name
self.ref_path = os.path.join(self.ref_dir, self.tester.get_name() + ".ref")
self.test_path = os.path.join(self.test_dir, self.tester.get_name() + ".dat")
if ref:
self.mkdir(self.ref_dir)
else:
self.mkdir(self.test_dir)
def mkdir(self, dir):
"""Make a directory if it doesn't already exist"""
if not os.path.isdir(dir):
os.mkdir(dir)
def chdir(self, dir=None):
"""Change directory, save the base directory for switching back"""
if dir:
self.basedir = os.getcwd()
os.chdir(dir)
else:
os.chdir(self.basedir)
def set_ion_path(self, ion_path, basis):
for species in basis:
if 'file' in basis[species]:
basis[species]['file'] = os.path.join(ion_path, basis[species]['file'])
def run_test(self, grid_cutoff, xc, kpts, basis, flags={}):
"""Run the test
If ref = True, do the reference calculation and store the results, otherwise
read the reference results and compare them against the test calculation.
Parameters
----------
grid_cutoff : float
Grid cutoff in Hartrees
xc : str
Exchange-correlation functional (LDA, PBE, etc.)
kpts : list/dictionary
K-points in list (Monkhorst-Pack mesh) or dictionary (see ASE documentation)
basis : dictionary/str
Either a dictionary of Conquest flags or a basis size
(minimal, small, medium, large)
flags : dictionary
A dictionary of CONQUEST input flags to pass to the TestObject.calculate
"""
if self.ref:
if os.path.isfile(self.ref_path):
print(f'{self.ref_path} exists, skipping calculation')
else:
self.chdir(self.ref_dir)
self.tester.calculate(grid_cutoff, xc, kpts, basis, **flags)
self.chdir()
self.tester.write(self.ref_path)
else:
self.chdir(self.test_dir)
self.tester.calculate(grid_cutoff, xc, kpts, basis, **flags)
self.chdir()
self.tester.read(self.ref_path)
self.tester.compare()
self.tester.write(self.test_path)
| Paraquat/ConquestTest | iohandler.py | iohandler.py | py | 3,073 | python | en | code | 0 | github-code | 13 |
30583201938 | '''
'''
from colors_definitions import *
import IMP
import IMP.display, IMP.core, IMP.atom
#
# NEEDS : kaki, light_orange, light_green
#
#
#
#
#protein_color={}
## CORE ------
#protein_color["p_8"] = IMP.display.Color(162/255.0 ,205/255.0 , 90/255.0) # DarkOliveGreen_3
#protein_color["p_52"] = IMP.display.Color(112/255.0 ,219/255.0 ,147/255.0) # Aquamarine
#protein_color["p_34"] = IMP.display.Color(152/255.0 ,245/255.0 ,255/255.0) # CadetBlue_1
#protein_color["p_44"] = IMP.display.Color( 0/255.0 ,191/255.0 ,255/255.0) # DeepSkyBlue
#protein_color["p_62"] = IMP.display.Color(123/255.0 ,104/255.0 ,238/255.0) # MediumSlateBlue
## Glue ------
#protein_color["XPB"] = IMP.display.Color(255/255.0,255/255.0,0) # yellow_1
#protein_color["XPD"] = IMP.display.Color(238/255.0,200/255.0,0) # orange-yellow
## CAK ------
#protein_color["MAT_1"] = IMP.display.Color(1.,1.,1.) # red
#protein_color["CDK_7"] = IMP.display.Color(255/255.0,0,255/255.0) # fuchsia
#protein_color["CyclinH"] = IMP.display.Color(238/255.0,106/255.0,80/255.0) # coral_2
class ModelRenderer:
"""
"""
def __init__(self,mi):
"""
@param mi: a ModelInfo object"""
self.__mi = mi
self.__colors_s = {} # subunit colors
self.__colors_s_b = {} # subunit beads colors
def set_subunit_color(self,subunit_name,imp_color):
"""
"""
if self.__mi.has_key(subunit_name) :
self.__colors_s[subunit_name] = imp_color
def set_subunit_colors(self,subunit_color_dict):
"""
"""
for subunit_name,imp_color in subunit_color_dict.iteritems():
self.set_subunit_color(subunit_name, imp_color)
# def set_subunit_bead_color(self,subunit_name,bead_number,color):
def __is_state_displayable(self):
"""
mainly check if I have a color for each of my subunits
"""
return (set(self.__colors_s.keys()) == set(self.__mi.keys()))
def __init_pymol_display_rendering(self):
"""
returns a list of hierarchy geometries corresponding to the actual state of the ModelRepresentation
"""
geoms=[]
for subunit_name,subunit_info in self.__mi.iteritems():
# h = subunit_info.get_hierarchy()
h = subunit_info.get_beads_hierarchy()
# h = subunit_info.get_linkers_hierarchy()
subunit_color = self.__colors_s[subunit_name]
try :
geom = IMP.atom.HierarchyGeometry(h)
except :
geom = IMP.display.HierarchyGeometry(h)
geom.set_color(subunit_color)
geoms.append(geom)
return geoms
def __init_pymol_display_rendering_linkers(self):
"""
returns a list of hierarchy geometries corresponding to the actual state of the ModelRepresentation
"""
geoms=[]
for subunit_name,subunit_info in self.__mi.iteritems():
h = subunit_info.get_linkers_hierarchy()
for hl in h.get_children() :
# bpl = [ IMP.core.XYZR.decorate_particle(p) for p in subunit_info.get_particles() ]
# lpl = [ IMP.core.XYZR.decorate_particle(p) for p in subunit_info.get_linker_particles() ]
subunit_color = self.__colors_s[subunit_name]
subunit_color = IMP.display.Color(subunit_color.get_red()/2.,subunit_color.get_green()/2.,subunit_color.get_blue()/2.)
# subunit_color=IMP.display.Color(255/255.,248/255.,198/255.)
geom = IMP.display.HierarchyGeometry(h)
geom.set_color(subunit_color)
geoms.append(geom)
return geoms
def write_configuration_set_to_pymol_file(self,cs,fileName):
"""
@param cs: a ConfigurationSet or MyConfigurationDet object
@param fileName: guess what...
"""
# for each of the configuration, dump it to a file to view in pymol
gs = self.__init_pymol_display_rendering()
gs.extend(self.__init_pymol_display_rendering_linkers())
w= IMP.display.PymolWriter(fileName)
# w= IMP.display.ChimeraWriter(fileName+"%1%.py")
for i in range(0, cs.get_number_of_configurations()):
cs.load_configuration(i)
w.set_frame(i)
for g in gs:
w.add_geometry(g)
del w
# w.do_close()
class PredifinedColors(dict):
def __init__(self) :
self["black"] = [0,0,0]
self["gray_0"] = [21,5,23]
self["gray_18"] = [37,5,23]
self["gray_21"] = [43,27,23]
self["gray_23"] = [48,34,23]
self["gray_24"] = [48,34,38]
self["gray_25"] = [52,40,38]
self["gray_26"] = [52,40,44]
self["gray_27"] = [56,45,44]
self["gray_28"] = [59,49,49]
self["gray_29"] = [62,53,53]
self["gray_30"] = [65,56,57]
self["gray_31"] = [65,56,60]
self["gray_32"] = [70,62,63]
self["gray_34"] = [74,67,68]
self["gray_35"] = [76,70,70]
self["gray_36"] = [78,72,72]
self["gray_37"] = [80,74,75]
self["gray_38"] = [84,78,79]
self["gray_39"] = [86,80,81]
self["gray_40"] = [89,84,84]
self["gray_41"] = [92,88,88]
self["gray_42"] = [95,90,89]
self["gray_43"] = [98,93,93]
self["gray_44"] = [100,96,96]
self["gray_45"] = [102,99,98]
self["gray_46"] = [105,101,101]
self["gray_47"] = [109,105,104]
self["gray_48"] = [110,106,107]
self["gray_49"] = [114,110,109]
self["gray_50"] = [116,113,112]
self["gray"] = [115,111,110]
self["white"] = [255,255,255]
self["blue"] = [0,0,255]
self["slate_gray_4"] = [97,109,126]
self["slate_gray"] = [101,115,131]
self["light_steel_blue_4"] = [100,109,126]
self["light_slate_gray"] = [109,123,141]
self["cadet_blue"] = [95,158,160]
self["cadet_blue_1"] = [152,245,255]
self["cadet_blue_2"] = [142,229,238]
self["cadet_blue_3"] = [119,191,199]
self["cadet_blue_4"] = [76,120,126]
self["dark_slate_gray_4"] = [76,125,126]
self["thistle_4"] = [128,109,126]
self["medium_slate_blue"] = [94,90,128]
self["medium_purple_4"] = [78,56,126]
self["midnight_blue"] = [21,27,84]
self["dark_slate_blue"] = [43,56,86]
self["dark_slate_gray"] = [37,56,60]
self["dim_gray"] = [70,62,65]
self["cornflower_blue"] = [21,27,141]
self["royal_blue_4"] = [21,49,126]
self["slate_blue_4"] = [52,45,126]
self["royal_blue"] = [43,96,222]
self["royal_blue_1"] = [48,110,255]
self["royal_blue_2"] = [43,101,236]
self["royal_blue_3"] = [37,84,199]
self["deep_sky_blue"] = [59,185,255]
self["deep_sky_blue_2"] = [56,172,236]
self["slate_blue"] = [53,126,199]
self["deep_sky_blue_3"] = [48,144,199]
self["deep_sky_blue_4"] = [37,88,126]
self["dodger_blue"] = [21,137,255]
self["dodger_blue_2"] = [21,125,236]
self["dodger_blue_3"] = [21,105,199]
self["dodger_blue_4"] = [21,62,126]
self["steel_blue_4"] = [43,84,126]
self["steel_blue"] = [72,99,160]
self["slate_blue_2"] = [105,96,236]
self["violet"] = [141,56,201]
self["medium_purple_3"] = [122,93,199]
self["medium_purple"] = [132,103,215]
self["medium_purple_2"] = [145,114,236]
self["medium_purple_1"] = [158,123,255]
self["light_steel_blue"] = [114,143,206]
self["steel_blue_3"] = [72,138,199]
self["steel_blue_2"] = [86,165,236]
self["steel_blue_1"] = [92,179,255]
self["sky_blue_3"] = [101,158,199]
self["sky_blue_4"] = [65,98,126]
self["slate_blue"] = [115,124,161]
self["slate_blue"] = [115,124,161]
self["slate_gray_3"] = [152,175,199]
self["violet_red"] = [246,53,138]
self["violet_red_1"] = [246,53,138]
self["violet_red_2"] = [228,49,127]
self["deep_pink"] = [245,40,135]
self["deep_pink_2"] = [228,40,124]
self["deep_pink_3"] = [193,34,103]
self["deep_pink_4"] = [125,5,63]
self["medium_violet_red"] = [202,34,107]
self["violet_red_3"] = [193,40,105]
self["firebrick"] = [128,5,23]
self["violet_red_4"] = [125,5,65]
self["maroon_4"] = [125,5,82]
self["maroon"] = [129,5,65]
self["maroon_3"] = [193,34,131]
self["maroon_2"] = [227,49,157]
self["maroon_1"] = [245,53,170]
self["magenta"] = [255,0,255]
self["magenta_1"] = [244,51,255]
self["magenta_2"] = [226,56,236]
self["magenta_3"] = [192,49,199]
self["medium_orchid"] = [176,72,181]
self["medium_orchid_1"] = [212,98,255]
self["medium_orchid_2"] = [196,90,236]
self["medium_orchid_3"] = [167,74,199]
self["medium_orchid_4"] = [106,40,126]
self["purple"] = [142,53,239]
self["purple_1"] = [137,59,255]
self["purple_2"] = [127,56,236]
self["purple_3"] = [108,45,199]
self["purple_4"] = [70,27,126]
self["dark_orchid_4"] = [87,27,126]
self["dark_orchid"] = [125,27,126]
self["dark_violet"] = [132,45,206]
self["dark_orchid_3"] = [139,49,199]
self["dark_orchid_2"] = [162,59,236]
self["dark_orchid_1"] = [176,65,255]
self["plum_4"] = [126,88,126]
self["pale_violet_red"] = [209,101,135]
self["pale_violet_red_1"] = [247,120,161]
self["pale_violet_red_2"] = [229,110,148]
self["pale_violet_red_3"] = [194,90,124]
self["pale_violet_red_4"] = [126,53,77]
self["plum"] = [185,59,143]
self["plum_1"] = [249,183,255]
self["plum_2"] = [230,169,236]
self["plum_3"] = [195,142,199]
self["thistle"] = [210,185,211]
self["thistle_3"] = [198,174,199]
self["lavender_blush_2"] = [235,221,226]
self["lavender_blush_3"] = [200,187,190]
self["thistle_2"] = [233,207,236]
self["thistle_1"] = [252,223,255]
self["lavender"] = [227,228,250]
self["lavender_blush"] = [253,238,244]
self["light_steel_blue_1"] = [198,222,255]
self["light_blue"] = [173,223,255]
self["light_blue_1"] = [189,237,255]
self["light_cyan"] = [224,255,255]
self["slate_gray_1"] = [194,223,255]
self["slate_gray_2"] = [180,207,236]
self["light_steel_blue_2"] = [183,206,236]
self["turquoise_1"] = [82,243,255]
self["cyan"] = [0,255,255]
self["cyan_1"] = [87,254,255]
self["cyan_2"] = [80,235,236]
self["turquoise_2"] = [78,226,236]
self["medium_turquoise"] = [72,204,205]
self["turquoise"] = [67,198,219]
self["dark_slate_gray_1"] = [154,254,255]
self["dark_slate_gray_2"] = [142,235,236]
self["dark_slate_gray_3"] = [120,199,199]
self["cyan_3"] = [70,199,199]
self["turquoise_3"] = [67,191,199]
self["pale_turquoise_3"] = [146,199,199]
self["light_blue_2"] = [175,220,236]
self["dark_turquoise"] = [59,156,156]
self["cyan_4"] = [48,125,126]
self["light_sea_green"] = [62,169,159]
self["light_sky_blue"] = [130,202,250]
self["light_sky_blue_2"] = [160,207,236]
self["light_sky_blue_3"] = [135,175,199]
self["sky_blue"] = [130,202,255]
self["sky_blue_2"] = [121,186,236]
self["light_sky_blue_4"] = [86,109,126]
self["sky_blue"] = [102,152,255]
self["light_slate_blue"] = [115,106,255]
self["light_cyan_2"] = [207,236,236]
self["light_cyan_3"] = [175,199,199]
self["light_cyan_4"] = [113,125,125]
self["light_blue_3"] = [149,185,199]
self["light_blue_4"] = [94,118,126]
self["pale_turquoise_4"] = [94,125,126]
self["dark_sea_green_4"] = [97,124,88]
self["medium_aquamarine"] = [52,135,129]
self["medium_sea_green"] = [48,103,84]
self["sea_green"] = [78,137,117]
self["dark_green"] = [37,65,23]
self["sea_green_4"] = [56,124,68]
self["forest_green"] = [78,146,88]
self["medium_forest_green"] = [52,114,53]
self["spring_green_4"] = [52,124,44]
self["dark_olive_green_4"] = [102,124,38]
self["chartreuse_4"] = [67,124,23]
self["green_4"] = [52,124,23]
self["medium_spring_green"] = [52,128,23]
self["spring_green"] = [74,160,44]
self["lime_green"] = [65,163,23]
self["spring_green"] = [74,160,44]
self["dark_sea_green"] = [139,179,129]
self["dark_sea_green_3"] = [153,198,142]
self["green_3"] = [76,196,23]
self["chartreuse_3"] = [108,196,23]
self["yellow_green"] = [82,208,23]
self["spring_green_3"] = [76,197,82]
self["sea_green_3"] = [84,197,113]
self["spring_green_2"] = [87,233,100]
self["spring_green_1"] = [94,251,110]
self["sea_green_2"] = [100,233,134]
self["sea_green_1"] = [106,251,146]
self["dark_sea_green_2"] = [181,234,170]
self["dark_sea_green_1"] = [195,253,184]
self["green"] = [0,255,0]
self["lawn_green"] = [135,247,23]
self["green_1"] = [95,251,23]
self["green_2"] = [89,232,23]
self["chartreuse_2"] = [127,232,23]
self["chartreuse"] = [138,251,23]
self["green_yellow"] = [177,251,23]
self["dark_olive_green_1"] = [204,251,93]
self["dark_olive_green_2"] = [188,233,84]
self["dark_olive_green_3"] = [160,197,68]
self["yellow"] = [255,255,0]
self["yellow_1"] = [255,252,23]
self["khaki_1"] = [255,243,128]
self["khaki_2"] = [237,226,117]
self["goldenrod"] = [237,218,116]
self["gold_2"] = [234,193,23]
self["gold_1"] = [253,208,23]
self["goldenrod_1"] = [251,185,23]
self["goldenrod_2"] = [233,171,23]
self["gold"] = [212,160,23]
self["gold_3"] = [199,163,23]
self["goldenrod_3"] = [198,142,23]
self["khaki"] = [173,169,110]
self["khaki_3"] = [201,190,98]
self["khaki_4"] = [130,120,57]
self["dark_goldenrod"] = [175,120,23]
self["dark_goldenrod_1"] = [251,177,23]
self["dark_goldenrod_2"] = [232,163,23]
self["dark_goldenrod_3"] = [197,137,23]
self["sienna_1"] = [248,116,49]
self["sienna_2"] = [230,108,44]
self["orange"] = [255,165,0]
self["orange_1"] = [255,165,0]
self["orange_2"] = [238,154,0]
self["orange_3"] = [205,133,0]
self["orange_4"] = [139,90,0]
self["dark_orange"] = [248,128,23]
self["dark_orange_1"] = [248,114,23]
self["dark_orange_2"] = [229,103,23]
self["dark_orange_3"] = [195,86,23]
self["dark_orange_4"] = [126,49,23]
self["cadmium_orange"] = [255,97,3]
self["orange_red"] = [255,69,0]
self["orange_red_1"] = [255,69,0]
self["orange_red_2"] = [238,64,0]
self["orange_red_3"] = [205,55,0]
self["orange_red_4"] = [139,37,0]
self["sienna_3"] = [195,88,23]
self["sienna"] = [138,65,23]
self["sienna_4"] = [126,53,23]
self["indian_red_4"] = [126,34,23]
self["salmon_4"] = [126,56,23]
self["dark_goldenrod_4"] = [127,82,23]
self["gold_4"] = [128,101,23]
self["goldenrod_4"] = [128,88,23]
self["light_salmon_4"] = [127,70,44]
self["chocolate"] = [200,90,23]
self["coral_3"] = [195,74,44]
self["coral_2"] = [229,91,60]
self["coral"] = [247,101,65]
self["dark_salmon"] = [225,139,107]
self["salmon_1"] = [248,129,88]
self["salmon_2"] = [230,116,81]
self["salmon_3"] = [195,98,65]
self["light_salmon_3"] = [196,116,81]
self["light_salmon_2"] = [231,138,97]
self["light_salmon"] = [249,150,107]
self["sandy_brown"] = [238,154,77]
self["hot_pink"] = [246,96,171]
self["hot_pink_1"] = [246,101,171]
self["hot_pink_2"] = [228,94,157]
self["hot_pink_3"] = [194,82,131]
self["hot_pink_4"] = [125,34,82]
self["light_coral"] = [231,116,113]
self["indian_red_1"] = [247,93,89]
self["indian_red_2"] = [229,84,81]
self["indian_red_3"] = [194,70,65]
self["red"] = [255,0,0]
self["red_1"] = [246,34,23]
self["red_2"] = [228,27,23]
self["firebrick_1"] = [246,40,23]
self["firebrick_2"] = [228,34,23]
self["firebrick_3"] = [193,27,23]
self["pink"] = [250,175,190]
self["rosy_brown_1"] = [251,187,185]
self["rosy_brown_2"] = [232,173,170]
self["pink_2"] = [231,161,176]
self["light_pink"] = [250,175,186]
self["light_pink_1"] = [249,167,176]
self["light_pink_2"] = [231,153,163]
self["pink_3"] = [196,135,147]
self["rosy_brown_3"] = [197,144,142]
self["rosy_brown"] = [179,132,129]
self["light_pink_3"] = [196,129,137]
self["rosy_brown_4"] = [127,90,88]
self["light_pink_4"] = [127,78,82]
self["pink_4"] = [127,82,93]
self["lavender_blush_4"] = [129,118,121]
self["light_goldenrod_4"] = [129,115,57]
self["lemon_chiffon_4"] = [130,123,96]
self["lemon_chiffon_3"] = [201,194,153]
self["light_goldenrod_3"] = [200,181,96]
self["light_golden_2"] = [236,214,114]
self["light_goldenrod"] = [236,216,114]
self["light_goldenrod_1"] = [255,232,124]
self["lemon_chiffon_2"] = [236,229,182]
self["lemon_chiffon"] = [255,248,198]
self["light_goldenrod_yellow"] = [250,248,204]
self["fuchsia"] = [255,0,255]
self["aquamarine"] = [127,255,212]
self["aquamarine_1"] = [127,255,212]
self["aquamarine_2"] = [118,238,198]
self["aquamarine_3"] = [102,205,170]
self["aquamarine_4"] = [69,139,116]
def get_color_names(self):
""" returns the name of all registered colors
"""
return self.keys()
def get_color_by_name(self,color_name):
"""
@param : color_name
"""
return self[color_name]
def insert_color(self,color_name,rgb_triplet):
""" insert a novel named color in the present dictionnary
"""
self[color_name] = rgb_triplet
return self[color_name]
# def get_color_by_channels(self,r,g,b):
# """ returns the IMP color corresponding to the provided r g b channels
# @param r: red (int in [0-255])
# @param g: green (int in [0-255])
# @param b: blue (int in [0-255])
# """
# return IMP.display.Color(r/255.,g/255.,b/255.)
def get_IMP_color(r,g,b):
""" returns the IMP color corresponding to the provided r g b channels
@param r: red (int in [0-255])
@param g: green (int in [0-255])
@param b: blue (int in [0-255])
"""
return IMP.display.Color(r/255.,g/255.,b/255.)
#
#
#def init_display_rendering(h):
# gs=[]
# for i in range(h.get_number_of_children()):
## color= IMP.display.get_display_color(i)
# n= h.get_child(i)
# name= n.get_name()
# color = protein_color[name]
# g= IMP.display.HierarchyGeometry(n)
# g.set_color(color)
# gs.append(g)
# return gs
#
#def init_display_rendering_detailed(h):
# gs=[]
# for i in range(h.get_number_of_children()):
## color= IMP.display.get_display_color(i)
# n= h.get_child(i)
# name= n.get_name()
# color = protein_color[name]
# balls = []
# nb_beads = n.get_number_of_children()
# for bi in range(nb_beads) :
# g= IMP.display.HierarchyGeometry( n.get_child(bi) )
# coef = 1. - (.5*bi/nb_beads)
# bcolor = IMP.display.Color(
# coef * color.get_red(),
# coef * color.get_green(),
# coef * color.get_blue(),
# )
# g.set_color(bcolor)
# gs.append(g)
# return gs
#def get_conformations_aligned_on_selection(cs,selection):
# acs = IMP.ConfigurationSet(cs.get_model(),"aligned configurations")
# particles = selection.get_selected_particles()
# return acs
#def dump_to_pymol_file(cs,gs,fileName):
# # for each of the configuration, dump it to a file to view in pymol
# w= IMP.display.PymolWriter(fileName)
# for i in range(0, cs.get_number_of_configurations()):
# cs.load_configuration(i)
# w.set_frame(i)
# for g in gs:
# w.add_geometry(g)
# w.do_close()
| dbenlopers/SANDBOX | IMP/HGM-old/display.py | display.py | py | 25,380 | python | en | code | 0 | github-code | 13 |
41161336504 | def team_lineup(*args):
result = ''
country_players_dict= {}
for player, country in args:
if country not in country_players_dict:
country_players_dict[country] = []
country_players_dict[country].append(player)
country_players_dict = dict(sorted(country_players_dict.items(), key=lambda kvp: (-len(kvp[1]), kvp[0])))
for country, players in country_players_dict.items():
result += f"{country}:\n"
for player in players:
result += f" -{player}\n"
return result
print(team_lineup(
("Harry Kane", "England"),
("Manuel Neuer", "Germany"),
("Raheem Sterling", "England"),
("Toni Kroos", "Germany"),
("Cristiano Ronaldo", "Portugal"),
("Thomas Muller", "Germany")))
print()
print(team_lineup(
("Lionel Messi", "Argentina"),
("Neymar", "Brazil"),
("Cristiano Ronaldo", "Portugal"),
("Harry Kane", "England"),
("Kylian Mbappe", "France"),
("Raheem Sterling", "England")))
print()
print(team_lineup(
("Harry Kane", "England"),
("Manuel Neuer", "Germany"),
("Raheem Sterling", "England"),
("Toni Kroos", "Germany"),
("Cristiano Ronaldo", "Portugal"),
("Thomas Muller", "Germany"),
("Bruno Fernandes", "Portugal"),
("Bernardo Silva", "Portugal"),
("Harry Maguire", "England")))
| lefcho/SoftUni | Python/SoftUni - Python Advanced/Exam/team_lineup.py | team_lineup.py | py | 1,316 | python | en | code | 0 | github-code | 13 |
24362189666 | import speech_recognition as sr
import random
hello = ["Привет", "Здравствуй", "Позвольте вас поприветствовать!", "Разрешите вас приветствовать!"]
films = ["Крепкий орешек", "Назад в будущее", "Таксист", "Леон", "Богемская рапсодия", "Город грехов", "Мементо", "Отступники", "Деревня"]
recognizer = sr.Recognizer()
while True:
with sr.Microphone(device_index=1) as source:
print("Скажите что-нибудь...")
audio = recognizer.listen(source)
speech = recognizer.recognize_google(audio, language="ru_RU")
print(f"Вы сказали: {speech}")
if speech.lower() == "привет":
hi = random.choice(hello)
print(hi)
if speech.lower() == "фильм":
randomfilm = random.choice(films)
print(randomfilm)
| Den4ik20020/modul4 | modul2/lesson7/dz2.py | dz2.py | py | 947 | python | ru | code | 0 | github-code | 13 |
3108074456 | """
The Program receives from the USER an INTEGER
and displays if it’s an ODD or EVEN number.
"""
# START Definition of FUNCTIONS
def valutaIntPositive(numero):
if numero.isdigit():
if numero != "0":
return True
return False
def evenOrOdd(number):
if number % 2 == 0:
return "EVEN"
else:
return "ODD"
# END Definition of FUNCTIONS
# Acquisition and Control of the DATA entered by the USER
numberInt = input("Enter an INTEGER number: ")
numberIntPositive = valutaIntPositive(numberInt)
while not(numberIntPositive):
print("Incorrect entry. Try again.")
numberInt = input("Enter an INTEGER number: ")
numberIntPositive = valutaIntPositive(numberInt)
# Conversion STR -> INT
numberInt = int(numberInt)
# Valuation EVEN or ODD number
typeNumber = evenOrOdd(numberInt)
# Displaying the RESULT (formatted)
print("The NUMBER " + str(numberInt) + " is " + typeNumber)
| aleattene/python-workbook | chap_02/exe_035_even_odd.py | exe_035_even_odd.py | py | 943 | python | en | code | 1 | github-code | 13 |
23242837539 | import os
import numpy as np
import pandas as pd
import joblib
from sklearn.model_selection import (
StratifiedShuffleSplit,
cross_val_score,
cross_validate,
)
from sklearn.multiclass import OneVsRestClassifier
from sklearn.metrics import classification_report, accuracy_score, make_scorer
from setup import PATH, CONFIG
def select_model(path):
pickles = [i for i in os.listdir(path) if i[-4:] == ".pkl"]
if not pickles:
print("No models found.")
return []
print(f"{len(pickles)} models found. Select models to use.")
for i, name in enumerate(pickles):
print(f"{i+1}. {name[:-4]}")
select = map(int, input("> ").split())
select_pickles = [pickles[i - 1] for i in select]
models = [(i[:-4], joblib.load(os.path.join(path, i))) for i in select_pickles]
return models
def scorer(y_true, y_pred):
print(classification_report(y_true, y_pred))
return accuracy_score(y_true, y_pred)
def evaluate(models, X, y):
cv = StratifiedShuffleSplit(n_splits=5, random_state=42)
scoring = ["accuracy", "precision_micro", "recall_micro", "f1_micro", "roc_auc_ovr"]
for name, model in models:
print(f"Running {name}...")
ovr = OneVsRestClassifier(model, n_jobs=-1)
score = cross_validate(model, X, y, scoring=scoring, cv=cv, n_jobs=-1,)
accuracy = cross_val_score(ovr, X, y, cv=cv, scoring=make_scorer(scorer))
for key, value in score.items():
print(f"{key}: {np.mean(value):.3f}")
print(f"Accuracy: {np.mean(accuracy):.3f}")
print("============================")
if __name__ == "__main__":
PATH_DATA = os.path.join(PATH, CONFIG["PATH"]["PATH_DATA"])
PATH_FEATURE = os.path.join(PATH, CONFIG["PATH"]["PATH_FEATURE"])
PATH_PREDICT = os.path.join(PATH, CONFIG["PATH"]["PATH_PREDICT"])
X = pd.read_csv(os.path.join(PATH_FEATURE, "joined.csv")).values
y = pd.read_pickle(os.path.join(PATH_DATA, "sample.pkl"))
y = y.reset_index()["failure_type"]
model = select_model(PATH_PREDICT)
evaluate(model, X, y)
| 2020-iuc-sw-skku/LSC-Systems | trainer/predict.py | predict.py | py | 2,081 | python | en | code | 7 | github-code | 13 |
42809883981 | from uuid import UUID
from flask import Blueprint, Response, g, jsonify, make_response, request
from flask_jwt_extended import jwt_required
from common.constants.http import HttpStatusCodeConstants
from common.schemas.response import ResponseBaseSchema
from teachers.schemas import TeacherInputSchema, TeacherOutputSchema, TeacherUpdateSchema
from teachers.services import TeacherService
teachers_bp = Blueprint('teachers', __name__, url_prefix='/teachers')
@teachers_bp.get('/')
def get_teachers() -> Response:
"""GET '/teachers' endpoint view function.
Returns:
http response with json data: list of Teacher model objects serialized with TeacherOutputSchema.
"""
teachers = TeacherService(
session=g.db_session,
output_schema=TeacherOutputSchema(many=True),
).get_teachers()
STATUS_CODE = HttpStatusCodeConstants.HTTP_200_OK.value
response = ResponseBaseSchema().load(
{
'status': {
'code': STATUS_CODE,
},
'data': teachers,
'errors': [],
}
)
return make_response(jsonify(response), STATUS_CODE)
@teachers_bp.post('/')
def post_teachers() -> Response:
"""POST '/teachers' endpoint view function.
Returns:
http response with json data: newly created Teacher model object serialized with TeacherOutputSchema.
"""
teacher = TeacherService(
session=g.db_session,
input_schema=TeacherInputSchema(many=False),
output_schema=TeacherOutputSchema(many=False),
).add_teacher(data=request.get_json())
STATUS_CODE = HttpStatusCodeConstants.HTTP_201_CREATED.value
response = ResponseBaseSchema().load(
{
'status': {
'code': STATUS_CODE,
},
'data': teacher,
'errors': [],
}
)
return make_response(jsonify(response), STATUS_CODE)
@teachers_bp.get('/<uuid:id>')
def get_teacher(id: UUID) -> Response:
"""GET '/teachers/{id}' endpoint view function.
Args:
id: UUID of Teacher object.
Returns:
http response with json data: single Teacher model objects serialized with TeacherOutputSchema.
"""
teacher = TeacherService(
session=g.db_session,
output_schema=TeacherOutputSchema(many=False),
).get_teacher_by_id(id=id)
STATUS_CODE = HttpStatusCodeConstants.HTTP_200_OK.value
response = ResponseBaseSchema().load(
{
'status': {
'code': STATUS_CODE,
},
'data': teacher,
'errors': [],
}
)
return make_response(jsonify(response), STATUS_CODE)
@teachers_bp.delete('/<uuid:id>')
@jwt_required()
def delete_teacher(id: UUID) -> Response:
"""DELETE '/teachers/{id}' endpoint view function.
Args:
id: UUID of Teacher object.
Returns:
http response with no data and 204 status code.
"""
TeacherService(session=g.db_session).delete_teacher(id=id)
return make_response('', HttpStatusCodeConstants.HTTP_204_NO_CONTENT.value)
@teachers_bp.put('/<uuid:id>')
@jwt_required()
def put_teacher(id: UUID) -> Response:
"""PUT '/teachers/{id}' endpoint view function.
Args:
id: UUID of Teacher object.
Returns:
http response with json data: single updated Teacher model objects serialized with TeacherOutputSchema.
"""
teacher = TeacherService(
session=g.db_session,
input_schema=TeacherUpdateSchema(many=False),
output_schema=TeacherOutputSchema(many=False),
).update_teacher(id=id, data=request.get_json())
STATUS_CODE = HttpStatusCodeConstants.HTTP_200_OK.value
response = ResponseBaseSchema().load(
{
'status': {
'code': STATUS_CODE,
},
'data': teacher,
'errors': [],
}
)
return make_response(jsonify(response), STATUS_CODE)
| BorodaUA/practice_api_server | teachers/routers/__init__.py | __init__.py | py | 3,930 | python | en | code | 0 | github-code | 13 |
39088968483 | import sqlalchemy as sa
from sqlalchemy.dialects import (
postgresql as postgresql_types,
)
try:
from geoalchemy2 import types as geotypes
except ImportError:
pass
from fastapi_users_db_sqlalchemy import GUID
from app.db import Base
class VehicleBreak(Base):
__tablename__ = "vrp_vehicle_break"
id = sa.Column(
postgresql_types.INTEGER(),
primary_key=True,
unique=True,
nullable=False,
autoincrement=True,
)
hub_id = sa.Column(
postgresql_types.INTEGER(),
sa.ForeignKey(column="vrp_hub.id", ondelete="CASCADE"),
primary_key=False,
unique=False,
nullable=False,
autoincrement=True,
)
name = sa.Column(
postgresql_types.VARCHAR(length=50),
primary_key=False,
unique=False,
nullable=False,
)
vehicle_id = sa.Column(
postgresql_types.INTEGER(),
sa.ForeignKey(column="vrp_vehicle.id", ondelete="CASCADE"),
primary_key=False,
unique=False,
nullable=True,
autoincrement=True,
)
start = sa.Column(
postgresql_types.TIME(),
primary_key=False,
unique=False,
nullable=False,
)
end = sa.Column(
postgresql_types.TIME(),
primary_key=False,
unique=False,
nullable=False,
)
service_time = sa.Column(
postgresql_types.NUMERIC(precision=10, scale=1),
primary_key=False,
unique=False,
nullable=True,
)
hub = sa.orm.relationship(
"Hub",
back_populates="vehicle_break",
cascade="all,delete",
passive_deletes=True,
)
vehicle = sa.orm.relationship(
"Vehicle", back_populates="vehicle_break", passive_deletes=True
)
user_id = sa.Column(GUID, sa.ForeignKey("users.id"))
# user = sa.orm.relationship("User", back_populates="vehicle")
@property
def serialize(self):
return {
"id": self.id,
"name": self.name,
"start": self.start,
"end": self.end,
"service_time": self.service_time,
}
| randyaswin/route-optimization-backend | backend/app/models/vehicle_break.py | vehicle_break.py | py | 2,148 | python | en | code | 0 | github-code | 13 |
70414274257 | """USER: removed null constraints and added department field
Revision ID: a4e9af45c35b
Revises: 40e51195b3fb
Create Date: 2023-09-21 00:26:31.199284
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'a4e9af45c35b'
down_revision = '40e51195b3fb'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('users', schema=None) as batch_op:
batch_op.add_column(sa.Column('department', sa.String(length=64), nullable=False))
batch_op.alter_column('user_id',
existing_type=sa.VARCHAR(length=64),
nullable=False)
batch_op.alter_column('fisrt_name',
existing_type=sa.VARCHAR(length=128),
nullable=True)
batch_op.alter_column('last_name',
existing_type=sa.VARCHAR(length=128),
nullable=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('users', schema=None) as batch_op:
batch_op.alter_column('last_name',
existing_type=sa.VARCHAR(length=128),
nullable=False)
batch_op.alter_column('fisrt_name',
existing_type=sa.VARCHAR(length=128),
nullable=False)
batch_op.alter_column('user_id',
existing_type=sa.VARCHAR(length=64),
nullable=True)
batch_op.drop_column('department')
# ### end Alembic commands ###
| Sulayman-ma/IDCard | migrations/versions/a4e9af45c35b_user_removed_null_constraints_and_added_.py | a4e9af45c35b_user_removed_null_constraints_and_added_.py | py | 1,600 | python | en | code | 0 | github-code | 13 |
11582142044 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
# Author Xu Junkai
# coding=utf-8
# @Time : 2021/2/19 18:32
# @Site :
# @File : before_request.py
# @Software: PyCharm
"""
import re
import json
from globals.bp_v1_manage import bp_v1
from flask import request, current_app, g
from server.libs.redis_tools import UserAuthRds
from utils.jwt.jwt import Jwt
from utils.response_body.base_response_status.base_response_status import Forbidden
from utils.response_body.response_code_msg.response_code_msg import ResponseMessage
@bp_v1.before_app_request
def middleware():
path = request.path
method = request.method
if method == "OPTIONS":
return
# # 白名单
if path in ["/api/v1/LoginAuth/", '/api/v1/obtainPermission/']:
return
token = request.headers.get("Authorization")
if not token:
raise Forbidden(message=ResponseMessage.IllegalLoginErr)
try:
payload = Jwt.decode(token.encode("utf-8"), current_app.config["SIGN"])
except Exception as e:
raise Forbidden(message=ResponseMessage.TokeninvalidErr)
id = payload['id']
auth_user = UserAuthRds.hgetall(id)
# token唯一验证
if token != auth_user['token']:
raise Forbidden(message=ResponseMessage.UserRepeatLoginErr)
# 接口验证
permission_apis = json.loads(auth_user["apis"])
for api in permission_apis:
if re.search(api["api"], path) and method == api['method']:
print('有权限的接口:', api["api"])
break
else:
# raise Forbidden(message = ResponseMessage.NoPermissionApiErr)
print('当前用户没有api的权限为:', path)
# # 数据设置:
g.role_data_id = auth_user["role_data_id"] if auth_user["role_data_id"] else ""
return
@bp_v1.after_app_request
def after_middleware(response):
return response
| shineyGuang/flask_Cli | globals/before_request.py | before_request.py | py | 1,916 | python | en | code | 0 | github-code | 13 |
25661760857 | from ast import Delete
from django.http import HttpResponse, JsonResponse, Http404
from django.views.decorators.csrf import csrf_exempt
from rest_framework.parsers import JSONParser
from rest_framework.views import APIView
from snippets.models import Snippets
from snippets.serializers import SnippetsSerializer
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework import status, mixins, generics
@csrf_exempt
def all_snippets_list(request, format=None):
print("older snippets list")
if request.method == 'GET':
snippets = Snippets.objects.all()
serializer = SnippetsSerializer(snippets, many=True)
return JsonResponse(serializer.data, safe=False)
elif request.method == 'POST':
data = JSONParser().parse(request)
print(data)
serializer = SnippetsSerializer(data=data, many=True)
if serializer.is_valid():
serializer.save()
return JsonResponse(
serializer.data,
status=201,
safe=False) # if returning list of dict mark it true
# other wise it will raise excetion but data will be saved
return JsonResponse(serializer.errors, status=400)
@csrf_exempt
def snippets_detail(request, pk):
"""
Retireve, update and delete a code snippets
"""
try:
snippet = Snippets.objects.get(pk=pk)
except Snippets.DoesNotExist:
return HttpResponse(status=404)
if request.method == 'GET':
serializer = SnippetsSerializer(snippet)
return JsonResponse(serializer.data)
elif request.method == 'PUT':
data = JSONParser().parse(request)
serializer = SnippetsSerializer(snippet, data=data)
if serializer.is_valid():
serializer.save()
return JsonResponse(serializer.data)
return JsonResponse(serializer.errors, status=400)
elif request.method=='DELETE':
snippet.delete()
return HttpResponse(status=204)
@api_view()
def get_param(request, pk):
print(pk)
return JsonResponse({"value": pk}, status=200)
@api_view(["GET" ,"POST"])
def snippets_list(request, pk, format=None):
# allow the api to handle urls with format given like snippets/4.json
print("Newer Snippets List")
if request.method == "GET":
snippets = Snippets.objects.all()
serializer = SnippetsSerializer(snippets, many=True)
return Response(serializer.data)
elif request.method == "POST":
serializer = SnippetsSerializer(data = request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(["GET", "POST", "PUT"])
def snippets_list(request, pk):
try:
snippet = Snippets.objects.get(id=pk)
except Snippets.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == "GET":
serializer = SnippetsSerializer(snippet)
return Response(serializer.data)
elif request.method == "PUT":
serializer = SnippetsSerializer(snippet, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(data = serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == "DELETE":
snippet.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class SnippetDetail(APIView):
def get_object(self, pk):
try:
return Snippets.objects.get(pk=pk)
except Snippets.DoesNotExist:
print("Here in 404 error")
raise Http404
def get(self, request, pk, format=None):
snippet = self.get_object(pk)
serializer = SnippetsSerializer(snippet)
return Response(serializer.data)
class SnippetDetail(mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
generics.GenericAPIView):
queryset = Snippets.objects.all()
serializer_class = SnippetsSerializer
def get(self, request, *args, **kwargs):
return self.retrieve(request, *args, **kwargs)
def put(self, request, *args, **kwargs):
return self.update(request, *args, **kwargs)
def delete(self,request, *args, **kwargs):
return self.destroy(request, *args, **kwargs)
class SnippetsDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Snippets.objects.all()
serializer_class = SnippetsSerializer
| Rahulbeniwal26119/django-rest-tutorial | snippets/views.py | views.py | py | 4,735 | python | en | code | 0 | github-code | 13 |
34348836183 | import tensorflow as tf
import numpy as np
import pickle
import os
import GlobalParameter
from tensorflow.contrib.crf import viterbi_decode
class Predictor:
def __init__(self, model_file, map_file):
word2id, tag2id, id2tag = pickle.load(open(map_file, 'rb'))
self.word2id = word2id
self.tag2id = tag2id
self.id2tag = id2tag
with tf.gfile.GFile(model_file, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
with tf.Graph().as_default() as graph:
tf.import_graph_def(graph_def, name="prefix")
self.graph = graph
self.input = self.graph.get_tensor_by_name("prefix/inputs:0")
self.length = self.graph.get_tensor_by_name("prefix/length:0")
self.dropout = self.graph.get_tensor_by_name("prefix/dropout:0")
self.logits = self.graph.get_tensor_by_name("prefix/project/logits:0")
self.trans = self.graph.get_tensor_by_name("prefix/loss/transitions:0")
self.sess = tf.Session(graph=self.graph)
self.sess.as_default()
self.num_class = len(self.id2tag)
def __decode(self, logits, trans, sequence_lengths, tag_num):
viterbi_sequences = []
small = -1000.0
start = np.asarray([[small] * tag_num + [0]])
for logit, length in zip(logits, sequence_lengths):
score = logit[:length]
pad = small * np.ones([length, 1])
score = np.concatenate([score, pad], axis=1)
score = np.concatenate([start, score], axis=0)
viterbi_seq, viterbi_score = viterbi_decode(score, trans)
viterbi_sequences.append(viterbi_seq[1:])
return viterbi_sequences
def predict(self, sentences):
inputs = []
lengths = [len(text) for text in sentences]
max_len = max(lengths)
for sent in sentences:
sent_ids = [self.word2id.get(w) if w in self.word2id else self.word2id.get("<OOV>") for w in sent]
padding = [0] * (max_len - len(sent_ids))
sent_ids += padding
inputs.append(sent_ids)
inputs = np.array(inputs, dtype=np.int32)
feed_dict = {
self.input: inputs,
self.length: lengths,
self.dropout: 1.0
}
logits, trans = self.sess.run([self.logits, self.trans], feed_dict=feed_dict)
path = self.__decode(logits, trans, lengths, self.num_class)
tags = [[self.id2tag.get(l) for l in p] for p in path]
return tags
def cut(sentences):
predictor = Predictor(os.path.join(GlobalParameter.MODEL_DIR, "modle.pb"), GlobalParameter.MAP_FILE)
all_labels = predictor.predict(sentences)
sent_words = []
for ti, text in enumerate(sentences):
words = []
N = len(text)
seg_labels = all_labels[ti]
tmp_word = ""
for i in range(N):
label = seg_labels[i]
w = text[i]
if label == "B":
tmp_word += w
elif label == "M":
tmp_word += w
elif label == "E":
tmp_word += w
words.append(tmp_word)
tmp_word = ""
else:
tmp_word = ""
words.append(w)
if tmp_word:
words.append(tmp_word)
sent_words.append(words)
return sent_words | yuhanzhang/WordSegmentation | Predictor.py | Predictor.py | py | 3,399 | python | en | code | 0 | github-code | 13 |
5339604404 | import sys
import os
import random
class org:
def __init__(self, bssid=''):
self.bssid = bssid
self.org = self.findORG(self.bssid)
def findORG(self, bssid):
file__ = open(os.getcwd()+'/utils/macers.txt', 'r')
for line in file__.readlines():
if line.strip('\n').split(' ~ ')[0].lower() == bssid.lower()[0:9]+"xx:xx:xx":
file__.close()
return line.strip('\n').split(' ~ ')[1].split(' ')[0]
file__.close()
return 'unknown'
def supports_color():
plat = sys.platform
supported_platform = plat != 'Pocket PC' and (plat != 'win32' or 'ANSICON' in os.environ)
# isatty is not always implemented, #6223.
is_a_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()
if not supported_platform or not is_a_tty:
return False
return True
def randomness(self, _max, last_num):
_to_return = last_num
while _to_return == last_num:
_to_return = random.randint(1, _max)
return _to_return
class Modes:
def get_mode(self, m):
avail_modes = (1, 2, 3, 4)
if m in avail_modes:
return True
else:
return False | hash3liZer/WiFiBroot | utils/macers.py | macers.py | py | 1,059 | python | en | code | 873 | github-code | 13 |
73703989137 | import os
from keras.models import load_model
from datetime import datetime
def export_model(model, settings):
foldername = datetime.now().strftime('%Y%m%d_%H-%M-%S')
if not os.path.exists("./models/" + foldername):
os.makedirs("./models/" + foldername)
settings_file = open("./models/" + foldername + "/model_settings.txt", "w+")
for key,val in settings.items():
settings_file.write(str(key) + ":\n" + str(val) + "\n")
settings_file.write("Model summary:\n")
model.summary(print_fn=lambda x: settings_file.write(x + '\n'))
settings_file.close()
model.save("./models/" + foldername + "/SwissGermanToText.model")
return "Exported to folder ./models/" + foldername
def import_model(path):
return load_model(path)
| luke-z/SwissGermanToText | export_model.py | export_model.py | py | 774 | python | en | code | 1 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.