seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
44875572748 | from papaye.evolve.managers import load_model, context_from_root
@load_model('papaye.evolve.models.snapshot1')
def evolve(root, config=None):
context = context_from_root(root)
repository = context.get('repository', tuple())
for package_name in repository:
package = repository[package_name]
package.__parent__ = repository
print('Upgrade {} package'.format(package.__name__))
for release_name in package.releases:
release = package[release_name]
release.__parent__ = package
print('\tUpgrade {} release'.format(release.__name__))
for release_filename in release.release_files:
release_file = release[release_filename]
release_file.__parent__ = release
print('\t\tUpgrade {} file'.format(release_file.__name__))
context.evolved = 4
| Tyarran/papaye | papaye/evolve/evolve4.py | evolve4.py | py | 883 | python | en | code | 10 | github-code | 13 |
34780355134 | # This file contains functions for inference phase
import numpy as np
import cv2
from .umeyama import umeyama
def get_tar_landmarks(img, landmarks_type=68):
"""
img: detected face image
"""
img_sz = img.shape
if landmarks_type == 68:
avg_landmarks = np.array(
[[0.30366492, 0.02105263], [0.43979058, 0.02105263], [0.54450262, 0.03684211], [0.64921466, 0.06842105], [0.7539267 , 0.1 ],
[0.84816754, 0.15789474], [0.90575916, 0.22105263], [0.97905759, 0.32631579], [1.0104712 , 0.46315789], [0.97905759, 0.61052632],
[0.92146597, 0.73157895], [0.85863874, 0.81052632], [0.76963351, 0.86842105], [0.66492147, 0.9 ], [0.56020942, 0.91578947],
[0.45549738, 0.93157895], [0.33507853, 0.94736842], [0.27225131, 0.11578947], [0.2565445 , 0.17368421], [0.2565445 , 0.23684211],
[0.2565445 , 0.29473684], [0.28795812, 0.34210526], [0.28795812, 0.56842105], [0.27225131, 0.62631579], [0.27225131, 0.68947368],
[0.27225131, 0.76315789], [0.28795812, 0.82631579], [0.40837696, 0.46315789], [0.4973822 , 0.46315789], [0.57591623, 0.44736842],
[0.63350785, 0.44736842], [0.64921466, 0.36842105], [0.66492147, 0.4 ], [0.68062827, 0.46315789], [0.66492147, 0.50526316],
[0.64921466, 0.53684211], [0.36649215, 0.20526316], [0.35078534, 0.23684211], [0.36649215, 0.29473684], [0.37696335, 0.34210526],
[0.39267016, 0.29473684], [0.39267016, 0.23684211], [0.39267016, 0.58421053], [0.36649215, 0.62631579], [0.36649215, 0.68947368],
[0.37696335, 0.73157895], [0.40837696, 0.68947368], [0.40837696, 0.62631579], [0.7382199 , 0.26315789], [0.7382199 , 0.32631579],
[0.72774869, 0.41578947], [0.7382199 , 0.46315789], [0.7382199 , 0.48947368], [0.7382199 , 0.6 ], [0.7539267 , 0.67368421],
[0.83246073, 0.6 ], [0.85863874, 0.52105263], [0.87434555, 0.46315789], [0.85863874, 0.38421053], [0.81675393, 0.32631579],
[0.7382199 , 0.27894737], [0.7539267 , 0.38421053], [0.76963351, 0.46315789], [0.76963351, 0.52105263], [0.7539267 , 0.65789474],
[0.81675393, 0.52105263], [0.81675393, 0.46315789], [0.81675393, 0.38421053]]
)
else:
raise NotImplementedError(f"Only 68 points landmarks model is provided. Received {landmarks_pnts}.")
tar_landmarks = [(int(xy[0]*img_sz[0]), int(xy[1]*img_sz[1])) for xy in avg_landmarks]
return tar_landmarks
def landmarks_match(src_im, src_landmarks, tar_landmarks, border_mode=cv2.BORDER_REPLICATE, border_value=(0,255,0)):
"""
umeyama(src, dst, estimate_scale),
src/dst landmarks coord. should be (y, x)
"""
src_size = src_im.shape
src_tmp = [(int(xy[1]), int(xy[0])) for xy in src_landmarks]
dst_tmp = [(int(xy[1]), int(xy[0])) for xy in tar_landmarks]
if len(src_tmp) >= 68:
src_tmp = src_tmp[17:]
if len(dst_tmp) >= 68:
dst_tmp = dst_tmp[17:]
M = umeyama(np.array(src_tmp), np.array(dst_tmp), True)[0:2]
result = cv2.warpAffine(src_im, M, (src_size[1], src_size[0]), borderMode=border_mode, borderValue=border_value)
return result, M
def get_68_landmarks_edge_image(img, face, lms, eyes_binary_mask=False, apply_dilate_or_erode=True):
result = np.zeros_like(img, np.uint8)
stroke_size = np.maximum(np.min(face.shape[:2])//50, 2)
# nose
for i in range(27,30):
x0, y0 = int(lms[i][0]), int(lms[i][1])
x1, y1 = int(lms[i+1][0]), int(lms[i+1][1])
result = cv2.line(result.copy(), (y0, x0), (y1, x1), (255,255,0), stroke_size*2)
return result
def vis_parsing_maps(im, parsing_anno, stride=1):
# Colors for all 20 parts
part_colors = [
[255, 0, 0], [0, 0, 255], [0, 255, 255], [0, 255, 255], [255, 255, 255],
[255, 255, 255], [255, 85, 0], [170, 255, 0], [0, 255, 85], [0, 255, 170],
[0, 255, 0], [255, 125, 0], [255, 0, 125], [255, 75, 125], [0, 170, 255],
[200, 200, 45], [255, 255, 85], [255, 255, 170], [255, 0, 255],
[255, 85, 255], [255, 170, 255], [255, 170, 0], [85, 255, 255],
[170, 255, 255]
]
im = np.array(im)
vis_im = im.copy().astype(np.uint8)
vis_parsing_anno = parsing_anno.copy().astype(np.uint8)
vis_parsing_anno = cv2.resize(vis_parsing_anno, None, fx=stride, fy=stride, interpolation=cv2.INTER_NEAREST)
vis_parsing_anno_color = np.zeros((vis_parsing_anno.shape[0], vis_parsing_anno.shape[1], 3))# + 255
num_of_class = np.max(vis_parsing_anno)
for pi in range(1, num_of_class + 1):
index = np.where(vis_parsing_anno == pi)
vis_parsing_anno_color[index[0], index[1], :] = part_colors[pi]
vis_parsing_anno_color = vis_parsing_anno_color.astype(np.uint8)
vis_im = cv2.addWeighted(cv2.cvtColor(vis_im, cv2.COLOR_RGB2BGR), 0.4, vis_parsing_anno_color, 0.6, 0)
return vis_parsing_anno_color
def detect_face(im, fd, with_landmarks=True):
def get_square_bbox(x0, x1, y0, y1, input_img):
center = np.array([(x0 + x1)/2, (y0 + y1)/2])
length = (x1-x0 + y1-y0) / 2
if ((center - length//2) < 0).any():
return x0, x1, y0, y1
if ((center + length//2) > input_img.shape[:2]).any():
return x0, x1, y0, y1
return center[0]-length/2, center[0]+length/2, center[1]-length/2, center[1]+length/2
landmarks = None
if with_landmarks:
bboxes, landmarks = fd.detect_face(im, with_landmarks=with_landmarks)
else:
bboxes = fd.detect_face(im, with_landmarks=with_landmarks)
assert len(bboxes) >=1, "No face detected."
if len(bboxes) > 1:
print("Multiple faces detected. Only the most confident face will be processed.")
most_conf_idx = sorted(
range(len(bboxes)),
key=lambda idx: bboxes[idx][-1],
reverse=True)[0]
bboxes = [bboxes[most_conf_idx]]
try:
landmarks = [landmarks[most_conf_idx]]
except:
pass
x0, y0, x1, y1 = bboxes[0][:-1].astype(np.int32)
x0, y0, x1, y1 = map(np.int32, get_square_bbox(x0, y0, x1, y1, im))
return (x0, y0, x1, y1), landmarks
def align_image(im, x0, y0, x1, y1, landmarks):
lms_tar = get_tar_landmarks(im[x0:x1, y0:y1, :], 68)
aligned_img, M = landmarks_match(
im,
landmarks[0], np.array(lms_tar)+[x0,y0],
border_mode=cv2.BORDER_CONSTANT,
border_value=(0,0,0))
return aligned_img
def get_segm_mask(im, face_im, x0, y0, x1, y1, landmarks):
seg_mask = get_68_landmarks_edge_image(
im,
face_im,
landmarks[0],
apply_dilate_or_erode=False)
seg_mask = seg_mask[int(x0):int(x1), int(y0):int(y1), :]
return seg_mask
def parse_face(im, seg_mask, fp):
parsing_map = fp.parse_face(im)
colored_parsing_map = vis_parsing_maps(im, parsing_map[0])
nose_tip_mask = np.prod(seg_mask==(255,255,0), axis=-1)[...,None]
colored_parsing_map = (1-nose_tip_mask) * colored_parsing_map + nose_tip_mask * seg_mask
return colored_parsing_map
def get_eyes_mask(colored_parsing_map):
return np.prod(colored_parsing_map == (255,255,255), axis=-1)[...,None]
def detect_irises(im, idet, landmarks2=None):
eye_lms = idet.detect_iris(im, landmarks2)
return eye_lms
def draw_irises(colored_parsing_map, eyes_mask, eyes_lms):
parsing_map_with_iris = colored_parsing_map.copy()
for lms in eyes_lms[0]:
parsing_map_with_iris = cv2.fillPoly(
parsing_map_with_iris.astype(np.int32),
[lms[8:16, ::-1].reshape(-1,1,2).astype(np.int32)],
color=(125,125,125))
parsing_map_with_iris = (1 - eyes_mask) * colored_parsing_map\
+ eyes_mask * parsing_map_with_iris
return parsing_map_with_iris
def auto_resize(im, max_size=768):
if np.max(im.shape) > max_size:
ratio = max_size / np.max(im.shape)
im = cv2.resize(im, (0,0), fx=ratio, fy=ratio)
return im
def get_src_inputs(fn, fd, fp, idet):
"""
Inputs:
fn: A string. Path to a image file.
fd: An instance of FaceAlignmentDetector. Face detector in face_toolbox_keras.
fp: An instance of FaceParer. Face parsing model in face_toolbox_keras.
Outputs:
aligned_face: A RGB image. Aligned face image.
parsing_map_with_iris: A RGB image.
aligned_im: A RGB image. Aligned raw input image.
(x0, y0, x1, y1), A tuple of integers. Bounding box coordinates.
landmarks: A numpy array of shape (68,2). 68-points face landmarks.
"""
im = cv2.imread(fn)[..., ::-1]
im = auto_resize(im)
(x0, y0, x1, y1), landmarks = detect_face(im, fd)
aligned_im = align_image(im, x0, y0, x1, y1, landmarks)
# Apply detection & alignment twice for better face alignment result.
(x0, y0, x1, y1), landmarks2 = detect_face(aligned_im, fd)
aligned_face = aligned_im[x0:x1, y0:y1, :].copy()
segm_mask = get_segm_mask(aligned_im, aligned_face, x0, y0, x1, y1, landmarks2)
colored_parsing_map = parse_face(aligned_face, segm_mask, fp=fp)
eyes_mask = get_eyes_mask(colored_parsing_map)
eyes_lms = detect_irises(aligned_im, idet, landmarks2)
eyes_lms = eyes_lms - np.array([[[x0, y0]]])
parsing_map_with_iris = draw_irises(colored_parsing_map, eyes_mask, eyes_lms)
return aligned_face, parsing_map_with_iris, aligned_im, (x0, y0, x1, y1), landmarks
def get_tar_inputs(fns, fd, fv):
"""
Inputs:
fn: A string or a list. Path to a image file(s).
fd: An instance of FaceAlignmentDetector. Face detector in face_toolbox_keras.
fv: An instance of FaceVerifier. Face verificaiton model in face_toolbox_keras.
Outputs:
aligned_face: A RGB image.
emb_tar: A numpy array of shape (512,). Latent embeddings of aligned_face.
"""
if not type(fns) == list:
if type(fns) == str:
fns = [fns]
else:
raise ValueError("Received and unknown filename type. fns shoulbe be a list or a string.")
emb_avg_tar = np.zeros((1, 512))
for fn in fns:
im = cv2.imread(fn)[..., ::-1]
im = auto_resize(im)
(x0, y0, x1, y1), landmarks = detect_face(im, fd)
aligned_im = align_image(im, x0, y0, x1, y1, landmarks)
(x0, y0, x1, y1), landmarks2 = detect_face(aligned_im, fd, with_landmarks=False)
aligned_face = aligned_im[x0:x1, y0:y1, :].copy()
emb_tar = fv.extract_embeddings(aligned_face)
emb_avg_tar += emb_tar
emb_avg_tar /= len(fns)
return aligned_face, emb_avg_tar
def get_feather_blend_mask(im):
fb_mask = np.zeros_like(im)
fb_mask[
fb_mask.shape[0]//10:-fb_mask.shape[0]//10,
fb_mask.shape[1]//10:-fb_mask.shape[1]//10] = 255
kernel_size = np.max(fb_mask.shape)//12//2 * 2 + 1
fb_mask = cv2.GaussianBlur(fb_mask, (kernel_size,kernel_size), 0)
return fb_mask
def post_process_result(fn, fd, result, aligned_im, src, x0, y0, x1, y1, landmarks):
output_im = aligned_im.copy()
fb_mask = get_feather_blend_mask(src)
output_im[x0:x1, y0:y1, :] = (1-fb_mask/255) * output_im[x0:x1, y0:y1, :] \
+ fb_mask/255 * cv2.resize(result, (src.shape[1], src.shape[0]))
im = cv2.imread(fn)[..., ::-1]
im = auto_resize(im)
(fx0, fy0, fx1, fy1), _ = detect_face(im, fd, with_landmarks=False)
lms_tar = get_tar_landmarks(im[fx0:fx1, fy0:fy1, :], 68)
output_im, _ = landmarks_match(
output_im,
np.array(lms_tar)+[fx0,fy0],
landmarks[0],
border_mode=cv2.BORDER_CONSTANT,
border_value=(0,0,0))
final_output = im.copy()
final_output[fx0:fx1, fy0:fy1, :] = output_im[fx0:fx1, fy0:fy1, :]
return final_output
| shaoanlu/fewshot-face-translation-GAN | utils/utils.py | utils.py | py | 11,851 | python | en | code | 789 | github-code | 13 |
37774946529 | import webapp2
import jinja2
import json
import os
import logging
from models.connexus_user import ConnexusUser
from models.stream import Stream
from google.appengine.api import urlfetch
from google.appengine.api import users
from google.appengine.ext import blobstore
templates_dir = os.path.normpath(os.path.dirname(__file__) + '/../www/')
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(templates_dir),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
class View(webapp2.RequestHandler):
def display_error(self, error_message):
page_data = {
'logout_url': users.create_logout_url('/'),
'page_name': 'view',
'error_msg': error_message
}
template = JINJA_ENVIRONMENT.get_template('error.html')
self.response.write(template.render(page_data))
def is_user_subscribed(self, stream_id):
user = users.get_current_user()
stream = Stream.get_by_id(stream_id)
return ConnexusUser.is_subscribed(user.user_id(), stream.key)
def get(self):
user = users.get_current_user()
stream_id = long(self.request.get('id'))
limit = int(self.request.get('limit', '8'))
logout_url = users.create_logout_url('/')
if not stream_id :
self.display_error('The stream you are trying to access does not exist. It may have been removed by the owner.')
else :
view_api_uri = '{}?id={}&offset=0&limit={}'.format(self.uri_for('api-view', _full=True), stream_id, limit)
result = urlfetch.fetch(url = view_api_uri)
subscribe_url = self.uri_for('subscribe-stream', _full=True)
if self.is_user_subscribed(stream_id):
subscribe_url = self.uri_for('unsubscribe-stream', _full=True)
if result.status_code == 200:
j = json.loads(result.content)
if not j.get('id'):
self.display_error('The stream you are trying to access does not exist. It may have been removed by the owner.')
else:
upload_url = blobstore.create_upload_url('/upload_photo')
page_data = {
'stream': j,
'logout_url': logout_url,
'page_name': 'view',
'upload_url': upload_url,
'limit' : limit + 8,
'is_subscribed': self.is_user_subscribed(stream_id),
'subscribe_url': subscribe_url,
}
template = JINJA_ENVIRONMENT.get_template('view-single.html')
self.response.write(template.render(page_data))
| rayolanderos/UT-APT-MiniProject | webapp/controllers/view.py | view.py | py | 2,750 | python | en | code | 0 | github-code | 13 |
70175827538 | """
Node classification Task For Evaluation:
Full explanation for what is done can be found the survey file in our github page.
Code explanation:
For this task, one should have a labeled graph: 2 files are required: Graph edges in '.edgelist' or '.txt' format and
nodes' labels in '.txt' format. For labeled graphs examples you can enter the link in the github page. You should
insert them in the appropriate place in the main function in the file 'directed_cheap_node2vec' or
'undirected_cheap_node2vec', depends on your graph's type.
This task compares two things:
1. Compare performance of our method and regular node2vec, i.e. we do the same task with both methods, calculate
needed scores and compare between them - This would be mission 1.
2. Only for our method, compare the success of the task (measuring by several scores) for different number of nodes
in the initial projection - This would be mission 2.
For mission 1: Go to the file 'static_embedding.py' . Change 'initial' variable to a list that consists the percentage
of nodes you want in the initial projection (for example, for pubmed2, 0.975 means 100 nodes in the initial
projection). Go back to this file and run main(1).
For mission 2: Go to the file 'static_embedding.py'. Change 'initial' variable to a list that consists a number of
percentages of nodes you want in the initial projection (for example, for pubmed2 the list is [0.975, 0.905, 0.715,
0.447, 0.339], meaning run with 100 nodes in the initial projection, then with 1000, 3000, 7000 and 10000).
Go back to this file to the function 'initial_proj_vs_scores' and replace x to be equal to 'initial' list you
changed earlier. Then, you can run this file- main(2).
Notice methods to compare, graph and other parameters are all chosen in 'static_embedding.py' file.
"""
from node2vec import Node2Vec
import pandas as pd
import numpy as np
import networkx as nx
import pickle
import os
import argparse
import random
class GraphImporter(object):
def __init__(self, data_name):
self.data_name = data_name
def import_imdb_multi_graph(self):
path = os.path.join(self.data_name, 'IMDb_multi_graph.gpickle')
if os.path.exists(path):
multi_gnx = nx.read_gpickle(path)
else:
from IMDb_data_preparation import main
multi_gnx = main()
nx.write_gpickle(multi_gnx, path)
return multi_gnx
def import_graph(self):
graph = nx.MultiGraph()
data_path = self.data_name + '.txt'
path = os.path.join(self.data_name, data_path)
with open(path, 'r') as f:
for line in f:
items = line.strip().split()
att1 = str(items[0][0])
att2 = str(items[1][0])
graph.add_node(items[0], key=att1)
graph.add_node(items[1], key=att2)
sort_att = np.array([att1, att2])
sort_att = sorted(sort_att)
graph.add_edge(items[0], items[1], key=str(sort_att[0]) + str(sort_att[1]))
return graph
class EmbeddingCreator(object):
def __init__(self, data_name=None, graph=None):
self.data_name = data_name
self.graph = graph
def create_node2vec_embeddings(self):
path1 = os.path.join(self.data_name, 'Node2Vec_embedding_old.pickle')
path2 = os.path.join(self.data_name, 'Node2Vec_embedding_old.csv')
if os.path.exists(path1):
with open(path1, 'rb') as handle:
dict_embeddings = pickle.load(handle)
elif os.path.exists(path2):
embedding_df = pd.read_csv(path2)
dict_embeddings = embedding_df.to_dict(orient='list')
with open(path2, 'wb') as handle:
pickle.dump(dict_embeddings, handle, protocol=3)
else:
node2vec = Node2Vec(self.graph, dimensions=16, walk_length=30, num_walks=200, workers=1)
model = node2vec.fit()
nodes = list(self.graph.nodes())
dict_embeddings = {}
for i in range(len(nodes)):
dict_embeddings.update({nodes[i]: np.asarray(model.wv.get_vector(nodes[i]))})
with open(path1, 'wb') as handle:
pickle.dump(dict_embeddings, handle, protocol=3)
return dict_embeddings
def create_event2vec_embeddings(self):
data_path = self.data_name + '_e2v_embeddings.txt'
path = os.path.join(self.data_name, data_path)
cond = 0
dict_embeddings = {}
with open(path, 'r') as f:
for line in f:
if cond == 1:
items = line.strip().split()
dict_embeddings[items[0]] = items[1:]
cond = 1
return dict_embeddings
try: import cPickle as pickle
except: import pickle
from sklearn import model_selection as sk_ms
from sklearn.multiclass import OneVsRestClassifier as oneVr
from sklearn.linear_model import LogisticRegression as lr
from sklearn.metrics import f1_score, accuracy_score, roc_auc_score, precision_score
import numpy as np
"""
Code for the node classification task as explained in GEM article. This part of the code belongs to GEM.
For more information, you can go to our github page.
"""
class TopKRanker(oneVr):
"""
Linear regression with one-vs-rest classifier
"""
def predict(self, X, top_k_list):
assert X.shape[0] == len(top_k_list)
probs = np.asarray(super(TopKRanker, self).predict_proba(X))
prediction = np.zeros((X.shape[0], self.classes_.shape[0]))
for i, k in enumerate(top_k_list):
probs_ = probs[i, :]
labels = self.classes_[probs_.argsort()[-int(k):]].tolist()
for label in labels:
prediction[i, label] = 1
return prediction, probs
def evaluateNodeClassification(X_train, X_test, Y_train, Y_test):
"""
Predictions of nodes' labels.
:param X: The features' graph- the embeddings from node2vec
:param Y: The nodes' labels
:param test_ratio: To determine how to split the data into train and test
:return: Scores- F1-macro, F1-micro and accuracy.
"""
try:
top_k_list = list(Y_test.toarray().sum(axis=1))
except:
top_k_list = list(Y_test.sum(axis=1))
classif2 = TopKRanker(lr(solver='lbfgs', max_iter=1000))
classif2.fit(X_train, Y_train)
prediction, probs = classif2.predict(X_test, top_k_list)
return prediction, probs
def evaluate_node_classification(prediction, Y_test):
"""
Predictions of nodes' labels.
:param X: The features' graph- norm
:param Y: The edges labels- 0 for true, 1 for false
:param test_ratio: To determine how to split the data into train and test
:return: Scores- F1-macro, F1-micro accuracy and auc
"""
accuracy = accuracy_score(Y_test, prediction)
micro = f1_score(Y_test, prediction, average='micro', zero_division=0)
macro = f1_score(Y_test, prediction, average='macro', zero_division=0)
auc = roc_auc_score(Y_test, prediction)
precision = precision_score(Y_test, prediction, average='micro')
return micro, macro, accuracy, auc, precision
# return micro, macro, accuracy
def expNC(X, Y, test_ratio_arr, rounds):
"""
The final node classification task as explained in our git.
:param X: The features' graph- the embeddings from node2vec
:param Y: The nodes' labels
:param test_ratio_arr: To determine how to split the data into train and test. This an array
with multiple options of how to split.
:param rounds: How many times we're doing the mission. Scores will be the average
:return: Scores for all splits and all splits- F1-micro, F1-macro and accuracy.
"""
micro = [None] * rounds
macro = [None] * rounds
acc = [None] * rounds
auc = [None] * rounds
for round_id in range(rounds):
micro_round = [None] * len(test_ratio_arr)
macro_round = [None] * len(test_ratio_arr)
acc_round = [None] * len(test_ratio_arr)
auc_round = [None] * len(test_ratio_arr)
for i, test_ratio in enumerate(test_ratio_arr):
micro_round[i], macro_round[i], acc_round[i], auc_round[i] = evaluateNodeClassification(X, Y, test_ratio)
micro[round_id] = micro_round
macro[round_id] = macro_round
acc[round_id] = acc_round
auc[round_id] = auc_round
micro = np.asarray(micro)
macro = np.asarray(macro)
acc = np.asarray(acc)
auc = np.asarray(auc)
return micro, macro, acc, auc
def input_for_classification(dict_proj, relevant_nodes):
"""
Run cheap node2vec and make it a features matrix- matrix of size number of sample by number of embedding
dimension, where the i_th row of X is its projection from cheap node2vec.
:param dict_proj: A dictionary with keys==nodes in projection and values==projection
:return: a matrix as explained above
"""
X = []
for i in range(len(relevant_nodes)):
X_i = []
for node in relevant_nodes[i]:
X_i.append(dict_proj[node])
X.append(np.array(X_i).astype(float))
return np.array(X)
def read_labels(K, data_name, times):
labels_path = data_name + '_labels.txt'
path = os.path.join(data_name, labels_path)
if os.path.exists(path):
node_label = {}
classes = []
with open(path, 'r') as f:
for line in f:
items = line.strip().split()
node_label[items[0]] = items[1]
if items[1] not in classes:
classes.append(items[1])
num_classes = np.max(np.array(classes).astype(int))
one_hot_vec = np.zeros(num_classes)
keys = list(node_label.keys())
random_ind = random.sample(range(1, len(keys)), times * K)
relevant_labels = []
relevant_nodes = []
for j in range(times):
relevant_labels_i = []
relevant_nodes_i = []
indexes = np.arange(j * K, (j + 1) * K)
for i in indexes:
tmp = one_hot_vec.copy()
tmp[int(node_label[keys[random_ind[i]]])-1] = 1
tmp = tmp.astype(int)
relevant_labels_i.append(tmp)
relevant_nodes_i.append(keys[random_ind[i]])
relevant_labels.append(relevant_labels_i)
relevant_nodes.append(relevant_nodes_i)
else:
# TODO
pass
return np.array(relevant_labels), relevant_nodes
def compute_final_measures(input, labels, ratio, times):
all_micro, all_macro, all_acc, all_auc, all_precision = [], [], [], [], []
mean_acc, mean_auc, mean_micro, mean_macro, mean_precision = [], [], [], [], []
std_acc, std_auc, std_micro, std_macro, std_precision = [], [], [], [], []
dict_measures = {}
for j in range(len(ratio)):
for i in range(times):
X_train, X_test, Y_train, Y_test = sk_ms.train_test_split(input[i], labels[i], test_size=(1-ratio[j]))
prediction, probs = evaluateNodeClassification(X_train, X_test, Y_train, Y_test)
micro, macro, acc, auc, precision = evaluate_node_classification(prediction, Y_test)
# auc, precision = random.uniform(0, 1), random.uniform(0, 1)
# micro, macro, acc= evaluate_node_classification(prediction, Y_test)
all_acc.append(acc)
all_auc.append(auc)
all_micro.append(micro)
all_macro.append(macro)
all_precision.append(precision)
mean_acc.append(np.mean(np.array(all_acc)))
mean_auc.append(np.mean(np.array(all_auc)))
mean_micro.append(np.mean(np.array(all_micro)))
mean_macro.append(np.mean(np.array(all_macro)))
mean_precision.append(np.mean(np.array(all_precision)))
std_acc.append(np.std(np.array(all_acc)))
std_auc.append(np.std(np.array(all_auc)))
std_micro.append(np.std(np.array(all_micro)))
std_macro.append(np.std(np.array(all_macro)))
std_precision.append(np.std(np.array(all_precision)))
dict_measures['Accuracy'] = mean_acc
dict_measures['AUC'] = mean_auc
dict_measures['Micro-f1'] = mean_micro
dict_measures['Macro-f1'] = mean_macro
dict_measures['Precision'] = mean_precision
dict_measures['std_acc'] = std_acc
dict_measures['std_auc'] = std_auc
dict_measures['std_micro'] = std_micro
dict_measures['std_macro'] = std_macro
dict_measures['std_precision'] = std_precision
return dict_measures
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.rcParams['xtick.labelsize'] = 12
mpl.rcParams['ytick.labelsize'] = 12
mpl.rcParams['axes.titlesize'] = 18
mpl.rcParams['axes.labelsize'] = 14
mpl.rcParams["font.family"] = "Times New Roman"
def plots_maker(dict_measures, ratio_arr, measure, data_name):
x_axis = np.array(ratio_arr)
task = 'Node Classification'
bottom = 0.7
top = 0.99
keys = list(dict_measures.keys())
plt.figure(figsize=(7, 6))
for j in range(len(keys)):
if 'event2vec' in keys[j]:
color = 'red'
marker = 'o'
markersize = 8
linestyle = 'solid'
y_axis = dict_measures[keys[j]][measure]
elif "node2vec" in keys[j]:
color = 'green'
marker = 's'
markersize = 6
linestyle = 'solid'
y_axis = dict_measures[keys[j]][measure]
plt.plot(x_axis, y_axis, marker=marker, linestyle=linestyle, markersize=markersize, color=color)
plt.plot(x_axis, [0.96, 0.957, 0.963, 0.964, 0.962, 0.964, 0.965, 0.97, 0.98], marker='o', linestyle='dashed', markersize=8, color='red')
plt.plot(x_axis, [0.939, 0.945, 0.947, 0.95, 0.95, 0.952, 0.952, 0.955, 0.958], marker='s', linestyle='dashed', markersize=6, color='green')
plt.ylim(bottom=bottom, top=top)
keys = ['our_node2vec', 'our_event2vec', 'event2vec', 'node2vec']
plt.legend(keys, loc='best', ncol=3, fontsize='large')
plt.title("{} Dataset \n {} Task - {} Score".format(data_name, task, measure))
plt.xlabel("Percentage")
plt.ylabel("{}".format(measure))
plt.tight_layout()
plt.savefig(os.path.join(data_name, "plots", "{} {} {}.png".format(data_name, task, measure)))
plt.show()
def main():
"""
Main Function for link prediction task.
:return:
"""
parser = argparse.ArgumentParser()
parser.add_argument('--data_name', default='dblp')
args = parser.parse_args()
number = 2500
times = 1
ratio_arr = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
graph_maker = GraphImporter(args.data_name)
graph = graph_maker.import_graph()
# nodes = graph.nodes()
# indexes = np.linspace(0, len(nodes)-1, 5000)
# indexes = indexes.astype(int)
# relevant_nodes = np.array(nodes)[indexes]
# graph = nx.subgraph(graph, relevant_nodes)
embeddings_maker = EmbeddingCreator(args.data_name, graph)
dict_embeddings_event2vec = embeddings_maker.create_event2vec_embeddings()
dict_embeddings_node2vec = embeddings_maker.create_event2vec_embeddings()
# dict_event2vec_embeddings = embedding_model.create_event2vec_embeddings()
# nodes = list(dict_event2vec_embeddings.keys())
# relevant_edges = edges_to_predict(multi_graph)
# true_edges = choose_true_edges(relevant_edges, number)
# false_edges = choose_false_edges(multi_graph, relevant_edges, number)
labels, relevant_nodes = read_labels(number, args.data_name, times)
input_event2vec = input_for_classification(dict_embeddings_event2vec, relevant_nodes)
input_node2vec = input_for_classification(dict_embeddings_node2vec, relevant_nodes)
dict_measures_event2vec = compute_final_measures(input_event2vec, labels, ratio_arr, times)
dict_measures_node2vec = compute_final_measures(input_node2vec, labels, ratio_arr, times)
dict_measures = {}
dict_measures['node2vec'] = dict_measures_node2vec
dict_measures['event2vec'] = dict_measures_event2vec
plots_maker(dict_measures, ratio_arr, 'AUC', 'DBLP')
print('avg acc e2v: ', dict_measures_event2vec['Accuracy'])
print('avg auc e2v: ', dict_measures_event2vec['AUC'])
print('avg micro e2v: ', dict_measures_event2vec['Micro-f1'])
print('avg macro e2v: ', dict_measures_event2vec['Macro-f1'])
print('std acc e2v: ', dict_measures_event2vec['std_acc'])
print('std auc e2v: ', dict_measures_event2vec['std_auc'])
print('std micro e2v: ', dict_measures_event2vec['std_micro'])
print('std macro e2v: ', dict_measures_event2vec['std_macro'])
print('avg acc n2v: ', dict_measures_node2vec['Accuracy'])
print('avg auc n2v: ', dict_measures_node2vec['AUC'])
print('avg micro n2v: ', dict_measures_node2vec['Micro-f1'])
print('avg macro n2v: ', dict_measures_node2vec['Macro-f1'])
print('std acc n2v: ', dict_measures_node2vec['std_acc'])
print('std auc n2v: ', dict_measures_node2vec['std_auc'])
print('std micro n2v: ', dict_measures_node2vec['std_micro'])
print('std macro n2v: ', dict_measures_node2vec['std_macro'])
# micro, macro, acc, auc = exp_lp(X, Y, ratio_arr, 3)
# avg_micro, avg_macro, avg_acc, avg_auc = calculate_all_avg_scores(micro, macro, acc, auc, 3)
# all_micro.append(avg_micro)
# all_macro.append(avg_macro)
# all_acc.append(avg_acc)
# all_auc.append(avg_auc)
# fig1, fig2, fig3, fig4 = split_vs_score(all_micro[0], all_macro[0], all_micro[1], all_macro[1], all_acc[0],
# all_acc[1], all_auc[0], all_auc[1], ratio_arr)
# plt.show()
main() | kfirsalo/New-Graph-ZSL | node_classification.py | node_classification.py | py | 17,661 | python | en | code | 0 | github-code | 13 |
37099514326 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 3 20:26:44 2017
@author: alexander
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Imports
import numpy as np
import tensorflow as tf
import ProcessData
from sklearn import model_selection
tf.logging.set_verbosity(tf.logging.INFO)
feature_spec = {'x': tf.FixedLenFeature(dtype=np.float32,
shape=[50,50]),
'y': tf.FixedLenFeature(dtype=np.float32,
shape=[50,50])}
def serving_input_receiver_fn():
"""An input receiver that expects a serialized tf.Example."""
serialized_tf_example = tf.placeholder(dtype=tf.string,
shape=[1],
name='input_example_tensor')
receiver_tensors = {'x': serialized_tf_example}
features = tf.parse_example(serialized_tf_example, feature_spec)
return tf.estimator.export.ServingInputReceiver(features, receiver_tensors)
def cnn_model_fn(features, labels, mode):
"""Model function for CNN."""
# Input Layer
input_layer = tf.reshape(features["x"], [-1, 50, 50, 1])
# Convolutional Layer #1
conv1 = tf.layers.conv2d(
inputs=input_layer,
filters=32,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
# Pooling Layer #1
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
# Convolutional Layer #2 and Pooling Layer #2
conv2 = tf.layers.conv2d(
inputs=pool1,
filters=64,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)
# Dense Layer
pool2_flat = tf.reshape(pool2, [-1,9216])
dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)
dropout = tf.layers.dropout(
inputs=dense, rate=0.4, training=mode == tf.estimator.ModeKeys.TRAIN)
# Logits Layer
logits = tf.layers.dense(inputs=dropout, units=6)
predictions = {
# Generate predictions (for PREDICT and EVAL mode)
"classes": tf.argmax(input=logits, axis=1),
# Add `softmax_tensor` to the graph. It is used for PREDICT and by the
# `logging_hook`.
"probabilities": tf.nn.softmax(logits, name="softmax_tensor")
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# Calculate Loss (for both TRAIN and EVAL modes)
onehot_labels = tf.one_hot(indices=tf.cast(labels, tf.int32), depth=6)
loss = tf.losses.softmax_cross_entropy(
onehot_labels=onehot_labels, logits=logits)
# Configure the Training Op (for TRAIN mode)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(
loss=loss,
global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
# Add evaluation metrics (for EVAL mode)
eval_metric_ops = {
"accuracy": tf.metrics.accuracy(
labels=labels, predictions=predictions["classes"])}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def main(unused_argv):
data, labels, lookup = ProcessData.readData(backgroundRatio=0.5)
data = ProcessData.preprocess(data)
data = data.astype(np.float32)
labels = labels.astype(np.int32)
train_data, eval_data, train_labels, eval_labels = model_selection.train_test_split(
data,
labels,
test_size=0.1)
#train_labels = np.asarray(train_labels, dtype=np.int32)
#eval_labels = np.asarray(eval_labels, dtype=np.int32)
whistle_classifier = tf.estimator.Estimator(
model_fn=cnn_model_fn, model_dir="/home/alexander/devel/SoundRecog/whistle_convnet_model_normalized6")
# Set up logging for predictions
tensors_to_log = {"probabilities": "softmax_tensor"}
logging_hook = tf.train.LoggingTensorHook(
tensors=tensors_to_log, every_n_iter=50)
# Train the model
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": train_data},
y=train_labels,
batch_size=400,
num_epochs=None,
shuffle=True)
whistle_classifier.train(
input_fn=train_input_fn,
steps=100000,
hooks=[logging_hook])
# Evaluate the model and print results
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": eval_data},
y=eval_labels,
num_epochs=1,
shuffle=False)
eval_results = whistle_classifier.evaluate(input_fn=eval_input_fn)
print(eval_results)
if __name__ == "__main__":
tf.app.run()
| lonsas/SoundRecog | whistleCNN.py | whistleCNN.py | py | 4,821 | python | en | code | 0 | github-code | 13 |
28752939278 | # ===================================
# khai bao thu vien
from time import sleep
from urllib import request
from seeed_dht import DHT
# import random as rd
# khai bao thiet bi
# dht = DHT("11",18)
# khai bao channel
channel_ID = "2287342"
def post_http():
api_key = "G3LP8MEPQ6UCKYQS"
url = "https://api.thingspeak.com/update?api_key=%s&field1=%s&field2=%s" %(api_key,humi,temp)
request.urlopen(url)
r=request.urlopen(url)
print("http send ok ")
while True:
try:
humi, temp = dht.read()
# humi = rd.randint(60,100)
# temp = rd.randint(20,40)
post_http()
sleep(20)
except:
print("error")
sleep(1) | QuangAnh6723/buoi6 | Buoi6/upload.py | upload.py | py | 715 | python | en | code | 0 | github-code | 13 |
24595732770 | import requests
from bs4 import BeautifulSoup
import spotipy
from spotipy.oauth2 import SpotifyOAuth
import os
import tkinter as tk
def action():
date_parse = e.get().split('/')
date_parse = f'{date_parse[2]}-{date_parse[1]}-{date_parse[0]}'
print(date_parse)
function(date_parse)
# ##### UI #####
window = tk.Tk()
window.config(width=200, height=200, bg='#20322e')
e = tk.Entry(width=30)
e.insert(tk.END, 'aaaa-mm-aa')
e.pack()
button = tk.Button(text="Build playlist", command=action)
button.pack()
label = tk.Label(text='')
label.pack()
SPOTIPY_CLIENT_SECRET = os.environ.get('SPOTIPY_CLIENT_SECRET')
SPOTIPY_CLIENT_ID = os.environ.get('SPOTIPY_CLIENT_ID')
def function(selected_date):
playlist_id = ''
response = requests.get(url=f"https://www.billboard.com/charts/hot-100/{selected_date}")
response.raise_for_status()
data = response.text
soup = BeautifulSoup(data, 'html.parser')
title = soup.select(selector="li h3", class_="c-title")
song_list = []
for n in title:
if str(n.string) != 'None':
song_list.append(str(n.string).split('\n\n\t\n\t\n\t\t\n\t\t\t\t\t')[1].split('\t\t\n\t\n')[0])
date = selected_date.split("-")
year = date[0]
# Spotify Authentication
sp = spotipy.Spotify(
auth_manager=SpotifyOAuth(
scope="playlist-modify-private",
redirect_uri="http://localhost:8000",
client_id=SPOTIPY_CLIENT_ID,
client_secret=SPOTIPY_CLIENT_SECRET,
show_dialog=True,
cache_path="token.txt"
)
)
user_id = sp.current_user()["id"]
# Searching Spotify for songs by title
song_uris = []
year = selected_date.split("-")[0]
for song in song_list[0:11]:
result = sp.search(q=f"track:{song} year:{year}", type="track")
try:
uri = result["tracks"]["items"][0]["uri"]
song_uris.append(uri)
except IndexError:
print(f"{song} doesn't exist in Spotify. Skipped.")
# Creating a new private playlist in Spotify
playlist = sp.user_playlist_create(user=user_id, name=f"{date} Billboard 100", public=False)
# Adding songs found into the new playlist
sp.playlist_add_items(playlist_id=playlist["id"], items=song_uris)
deliver(playlist["id"])
def deliver(playlist):
final = f'https://open.spotify.com/playlist/{playlist}'
print(final)
window.mainloop()
| epbfpm/TimeCapsuleJam | main.py | main.py | py | 2,440 | python | en | code | 0 | github-code | 13 |
7544659279 | # author: Nicolo
# few edits by Agnes
# -*- coding: utf-8 -*-
from __future__ import division
import redis
from math import log
import string
import re
from nltk.stem.snowball import SnowballStemmer
from math import log
stemmer = SnowballStemmer("dutch")
def removePunct(txt):
s = string.punctuation
s2 = re.escape(s)
s2 = '['+s2+']'
txt = re.sub(s2, " ", txt)
return txt
def stem(x):
try:
return stemmer.stem(x)
except:
return x
def get_db_connections():
index_mono = redis.StrictRedis(host='localhost', port=6379, db=0)
index_bi = redis.StrictRedis(host='localhost', port=6379, db=5)
df_mono = redis.StrictRedis(host='localhost', port=6379, db=2)
df_bi = redis.StrictRedis(host='localhost', port=6379, db=3)
db_coll = redis.StrictRedis(host='localhost', port=6379, db=5)
return (index_mono, index_bi, df_mono, df_bi, db_coll)
DB_CONNS = get_db_connections()
NUMBER_OF_DOCUMENTS = len(DB_CONNS[0].keys())
AVERAGE_DOCUMENT_LENGTH = dict({
'title':3.6178696042268967,
'description' : 82.0403598843585,
'summary':45.95869305154022,
'total':131.6169225401256})
COLLECTION_LENGTH = dict({
'title':AVERAGE_DOCUMENT_LENGTH['title'] * NUMBER_OF_DOCUMENTS ,
'description' : AVERAGE_DOCUMENT_LENGTH['description'] * NUMBER_OF_DOCUMENTS ,
'summary':AVERAGE_DOCUMENT_LENGTH['summary'] * NUMBER_OF_DOCUMENTS ,
'total':AVERAGE_DOCUMENT_LENGTH['total'] * NUMBER_OF_DOCUMENTS })
#print COLLECTION_LENGTH['total']
def linearInterpolation(query, documentID, lambdaVal):
query = map(lambda x : stem(x), removePunct(query.lower()).split())
connsResult = DB_CONNS[2].get(documentID)
if connsResult != None:
descriptor = eval(connsResult)
else:
descriptor = {}
score = 0
try:
docLength = sum(descriptor['total'].values())
for term in query:
if term in descriptor['total'].keys():
termCount= descriptor['total'][term]
else:
termCount = 0
cf = DB_CONNS[4].get(term)
if cf is not None and docLength > 0:
score += log((termCount/docLength)*lambdaVal + (int(cf)/(COLLECTION_LENGTH['total']))*(1-lambdaVal))
else:
score = float("-inf")
except:
pass
return score
def dirichletSmoothing(query, documentID, alpha):
query = map(lambda x : stem(x), removePunct(query.lower()).split())
#descriptor = eval(DB_CONNS[2].get(documentID))
connsResult = DB_CONNS[2].get(documentID)
if connsResult != None:
descriptor = eval(connsResult)
else:
descriptor = {}
score = 0
#print descriptor['total'].keys()
try:
docLength = sum(descriptor['total'].values())
for term in query:
if term in descriptor['total'].keys():
termCount= descriptor['total'][term]
else:
termCount = 0
cf = DB_CONNS[4].get(term)
#print "cf:%s\n" % cf
if cf is not None:
score += log ((termCount + alpha * ( int(cf)/COLLECTION_LENGTH['total'])) / (docLength + alpha))
else:
score = float("-inf")
except:
pass
return score
def KLdivergence(query, documentID):
query = map(lambda x : stem(x), removePunct(query.lower()).split())
ptq = {} # probability of a term in the query
for term in query:
if term in ptq.keys():
ptq[term] += 1
else:
ptq[term] = 1
for k,v in ptq.iteritems() :
ptq[k] = v / len(query)
#descriptor = eval(DB_CONNS[2].get(documentID))
connsResult = DB_CONNS[2].get(documentID)
if connsResult != None:
descriptor = eval(connsResult)
else:
descriptor = {}
score=0
try:
docLength = sum(descriptor['total'].values())
#calculating alpha
sumD = 0
sumC = 0
for term in descriptor['total'].keys():
sumD += descriptor['total'][term] / docLength
cf = DB_CONNS[4].get(term)
if (cf == None):
cf = 0
sumC += int(cf) / COLLECTION_LENGTH['total']
alpha = (1 - sumD) / (1 - sumC)
for term, probQ in ptq.iteritems():
if term in descriptor['total'].keys():
ptd = descriptor['total'][term] / docLength
cf = DB_CONNS[4].get(term)
if (cf == None):
cf = 0
ptc = int(cf) / COLLECTION_LENGTH['total']
if (ptc > 0 and alpha > 0) :
score += probQ *log( ptd / (alpha * ptc)) + log (alpha)
except:
pass
return score
| clouizos/AIR | code_featureExtraction_all/LanguageModels.py | LanguageModels.py | py | 4,444 | python | en | code | 0 | github-code | 13 |
15236437102 | class BinarySearchTree:
def __init__(self, data):
self.data = data
self.left = None
self.right = None
def insert(self, data):
if data <= self.data:
if self.left is None:
self.left = BinarySearchTree(data)
else:
self.left.insert(data)
else:
if self.right is None:
self.right = BinarySearchTree(data)
else:
self.right.insert(data)
def delete(self, data):
current = self
parent = None
while current.data != data:
parent = current
if data < current.data:
current = current.left
elif data > current.data:
current = current.right
else:
if parent is None:
if current.right is None and current.left is None:
self.data = None
elif current.right is None:
self = self.left
elif current.left is None:
self = self.right
else:
self = self.right
elif current.left is None and current.right is not None:
if current.right.data > parent.data:
parent.right = current.right
else:
parent.left = current.right
elif current.left is not None and current.right is None:
if current.left.data > parent.data:
parent.right = current.right
else:
parent.left = current.right
else:
currentRight = current.right
currentLeft = current.Left
if currentRight.data > parent.data:
# Parent points to the right
parent.right = currentRight
# current node's left subtree goes to the left subtree of currentRight's left
while currentRight.left is not None:
currentRight = currentRight.left
else:
currentRight.left = currentLeft
else:
# parent point to the left
parent.left = currentRight
while currentRight.left is not None:
currentRight = currentRight.left
else:
currentRight.left = currentLeft
# current node's right subtree goes to the
def display(self):
if self.left is not None:
self.left.display()
print(self.data)
if self.right is not None:
self.right.display()
if __name__ == '__main__':
bst = BinarySearchTree(8)
bst.insert(3)
bst.insert(10)
bst.insert(1); bst.insert(6); bst.insert(14)
bst.insert(4); bst.insert(7); bst.insert(13)
bst.display()
| nssathish/python-dsa | codewithmosh-dsa/DSAProblems/DS/BinaryTrees.py | BinaryTrees.py | py | 2,954 | python | en | code | 0 | github-code | 13 |
17146084433 | import setuptools
from dyepy import (
__name__, __author__, __email__, __github__, __version__, __desc__
)
with open('README.md', 'r') as fh:
long_description = fh.read()
setuptools.setup(
name=__name__,
version=__version__,
author=__author__,
author_email=__email__,
description=__desc__,
long_description=long_description,
long_description_content_type='text/markdown',
url=__github__,
license='MIT',
py_modules=['dyepy'],
packages=[],
install_requires=[],
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
],
python_requires='>=3.6',
)
| SFM61319/DyePy | setup.py | setup.py | py | 718 | python | en | code | 3 | github-code | 13 |
43850741822 | #!/usr/bin/env python
import argparse
import os
import imp
import contextlib
from uuid import uuid4
import warnings
import shutil
import subprocess
from six.moves import cStringIO as StringIO
try:
from pathlib import Path, PurePath
except ImportError:
from pathlib2 import Path, PurePath
_missing = object()
INIT_FILE = PurePath('__init__.py')
NAMESPACE_PKG_TEMPLATE = u"""# -*- coding: utf-8 -*-
from __future__ import absolute_import
# this is a namespace package
try:
import pkg_resources
pkg_resources.declare_namespace(__name__)
except ImportError:
import pkgutil
__path__ = pkgutil.extend_path(__path__, __name__)
"""
# IMPORT_STAR_INIT_TEMPLATE = u"""# -*- coding: utf-8 -*-
# from __future__ import absolute_import
# __version__ = {version}
# {imports}
# """
SETUP_FILE = PurePath('setup.py')
SETUP_BDIST_WHEEL_ARGS = ['python', SETUP_FILE.name, 'bdist_wheel']
SETUP_INFO_ARGS = ['python', SETUP_FILE.name, '--name', '--version']
SETUP_TEMPLATE = u"""
from setuptools import setup, find_packages
requirements = [
'protobuf>=3.0.0',
'googleapis-common-protos>=1.3.1'
]
setup(
name='{name}-{version}',
version='{version}',
url='{{cookiecutter.project_url}},
license='{{cookiecutter.license}}',
author='{{cookiecutter.author_name}}',
author_email='{{cookiecutter.author_email}}',
description='Service Models',
long_description=__doc__,
{% raw %}package_dir={{'': 'gen-src'}},{% endraw %}
namespace_packages={namespaces},
packages=find_packages('gen-src'),
include_package_data=True,
zip_safe=False,
platforms='any',
install_requires=requirements,
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
"""
VERSION_FILE = PurePath('__version__.py')
VERSION_TEMPLATE = u"""# -*- coding: utf-8 -*-
from __future__ import absolute_import
__version__ = {version}
"""
class IOUtil(object):
@staticmethod
def copy(src, dest):
print('copy: {} -> {}'.format(src, dest))
return shutil.copy(str(src), str(dest))
class cached_property(property):
"""A decorator that converts a function into a lazy property. The
function wrapped is called the first time to retrieve the result
and then that calculated result is used the next time you access
the value::
class Foo(object):
@cached_property
def foo(self):
# calculate something important here
return 42
The class has to have a `__dict__` in order for this property to
work.
"""
# implementation detail: A subclass of python's builtin property
# decorator, we override __get__ to check for a cached value. If one
# choses to invoke __get__ by hand the property will still work as
# expected because the lookup logic is replicated in __get__ for
# manual invocation.
def __init__(self, func, name=None, doc=None):
self.__name__ = name or func.__name__
self.__module__ = func.__module__
self.__doc__ = doc or func.__doc__
self.func = func
def __set__(self, obj, value):
obj.__dict__[self.__name__] = value
def __get__(self, obj, type=None):
if obj is None:
return self
value = obj.__dict__.get(self.__name__, _missing)
if value is _missing:
value = self.func(obj)
obj.__dict__[self.__name__] = value
return value
@contextlib.contextmanager
def working_directory(path):
cwd = Path.cwd()
try:
os.chdir(str(path))
yield path
finally:
os.chdir(str(cwd))
class Release(object):
py_file_pattern = '*.py'
lang = 'python'
def __init__(self,
version,
build_dir,
namespace):
self.version = version
self.build_dir = build_dir
self.pkg_build_dir = build_dir / PurePath('gen-src')
self.namespace = namespace
@cached_property
def name(self):
return self.pkg_fmt.format(
lang=self.python,
version=self.version)
@cached_property
def module(self):
"""Find the first generated code file and load that sourcefile into a
temporary namespace. This module will be used to
"""
filepath = next(iter(self.manifest))
with warnings.catch_warnings():
warnings.simplefilter('ignore')
module = imp.load_source('{}.tmp'.format(str(uuid4().hex)), str(filepath.absolute()))
return module
@cached_property
def module_import_path(self):
base, _ =self.module.DESCRIPTOR.package.rsplit('.', 1)
return base
@cached_property
def all_namespaces(self):
n = self.namespace
ns = [n]
while n.count('.') != 0:
n, _ = n.rsplit('.', 1)
ns.append(n)
return ns
@property
def namespace_dir(self):
return self.pkg_build_dir / PurePath(self.namespace.replace('.', '/'))
@property
def module_dir_path(self):
return self.pkg_build_dir / Path(self.module_import_path.replace('.', '/'))
@property
def module_name(self):
return self.module_import_path
@cached_property
def manifest(self):
return tuple(self.build_dir.glob(self.py_file_pattern))
def __str__(self):
return 'Release({})'.format(self.version)
def _build_pkg_structure(self):
# Create the directories for the module
# eg. a.b.c.d -> build/gen-src/a/b/c/d
path = self.module_dir_path
path.mkdir(parents=True)
# Walk up the directory tree, inserting empty __init__.py
# files as needed for a proper python module. Once directory
# that represents the shared namespace module is reached,
# place the special __init__.py files that designate namespace
# modules.
reached_namespace = False
# first = True
while path != self.pkg_build_dir:
if path == self.namespace_dir:
reached_namespace = True
init = path / INIT_FILE
if reached_namespace:
with init.open('w') as init_file:
init_file.write(NAMESPACE_PKG_TEMPLATE)
# elif first:
# text = StringIO()
# for fi in self.manifest:
# if not fi.name.endswith('_pb2.py'):
# continue
# module_name = 'from .{} import *\n'.format(fi.name.replace('.py', ''))
# text.write(module_name)
# text.seek(0)
# with init.open('w') as init_file:
# init_file.write(IMPORT_STAR_INIT_TEMPLATE.format(
# imports=text.read()))
# first = False
else:
init.touch()
path = path.parent
def _copy_manifest_to_pkg(self):
"""Copy the files in the manifest from the /build to the module
directory"""
for srcfile in self.manifest:
destfile = self.module_dir_path / PurePath(srcfile.name)
IOUtil.copy(srcfile, destfile)
def _write_version_to_init(self):
content = VERSION_TEMPLATE.format(
version=repr(self.version))
root_init_path = self.module_dir_path / INIT_FILE
with root_init_path.open('w') as init_file:
init_file.write(content)
def _generate_setup_py(self):
content = SETUP_TEMPLATE.format(
name=self.module_name,
version=self.version,
namespaces=self.all_namespaces)
setup_path = self.build_dir / SETUP_FILE
with setup_path.open('w') as setup_file:
setup_file.write(content)
def build(self):
self._build_pkg_structure()
self._copy_manifest_to_pkg()
self._write_version_to_init()
self._generate_setup_py()
self.verify()
def verify(self):
package_name = '{}-{}'.format(self.module_name, self.version)
expected_info = {package_name, self.version}
with working_directory(self.build_dir):
assert subprocess.call(SETUP_BDIST_WHEEL_ARGS) == 0
cmd = subprocess.Popen(SETUP_INFO_ARGS, stdout=subprocess.PIPE)
info = {line.strip().decode('utf-8') for line in cmd.stdout.readlines()}
assert cmd.wait() == 0
print('{} == {}'.format(info, expected_info))
assert info == expected_info
def main():
args = parse_args()
release = Release(args.model_version,
args.build_dir,
args.namespace)
if args.action == 'build':
release.build()
# elif args.action == 'verify':
# release.verify()
def parse_args():
parser = argparse.ArgumentParser(
description='Preapare and Verify Python Model Releases',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--version', type=str, action='store',
required=True,
dest='model_version',
help='Model release version')
parser.add_argument('--build-dir', type=Path, action='store',
required=True,
dest='build_dir',
help='Build Directory')
parser.add_argument('--namespace', type=str, action='store',
required=True,
dest='namespace',
help='Package namespace')
action_group = parser.add_mutually_exclusive_group(required=True)
action_group.add_argument('--build', action='store_const', const='build', dest='action')
# action_group.add_argument('--verify', action='store_const', const='verify', dest='action')
args = parser.parse_args()
return args
if __name__ == '__main__':
main()
| dillonhicks/versioned-protobufs | {{cookiecutter.project_name}}/python/bin/release.py | release.py | py | 10,293 | python | en | code | 0 | github-code | 13 |
12797403340 | import os
from elasticsearch import Elasticsearch
from elasticsearch.helpers import bulk
# This is intended to form a conglomearte of possible commands
# to open my applications on my Mac
d = "/Applications"
records = []
apps = os.listdir(d)
print(apps)
# goes through the list of apps I have on my Mac
# and matches them with their execution commands and
# then stored in elastic search.
for app in apps:
record = {}
record['voice_command'] = 'open' + app.split('.app')[0]
record['sys_command'] = ' open ' +d +'/%s.app' %app.replace(' ','\ ')
records.append(record)
# open the elastic search client
es = Elasticsearch(['localhost:9200'])
if es.ping():
print("Connected to elastic search")
bulk(es, records, index = "voice_over", doc_type="text",
raise_on_error=True)
# performs a query pf the nodes in the elastic search
# and matches up an execution command
def search_es(query):
res = es.search(index="voice_over", doc_type="text", body={
"query" :{
"match": {
"voice_command": {
"query": query,
"fuzziness": 2
}
}
},
})
print("this is the source" )
return res['hits']['hits'][0]['_source']['sys_command']
| PhelimonSarpaning/AI-Voice-Assistant | commands.py | commands.py | py | 1,246 | python | en | code | 0 | github-code | 13 |
23741722144 | import pygame
import random
class Gem(pygame.sprite.Sprite):
def __init__(self,gem_event):
super().__init__()
self.image = pygame.image.load('assets/assets/gem-lebon.png')
self.image = pygame.transform.smoothscale(self.image,(50,70))
self.rect = self.image.get_rect()
self.velocity = random.randint(1, 3)
self.rect.x = random.randint(20, 800)
self.rect.y = - random.randint(0, 800)
self.gem_event = gem_event
def remove(self):
self.gem_event.all_gems.remove(self)
# verifier si le nombre de comètes est de 0
if len(self.gem_event.all_gems) == 0:
# remettre la bar à 0
self.gem_event.reset_percent()
# appaitre les 2 premiers monstres
self.gem_event.game.spawn_monster()
self.gem_event.game.spawn_monster()
self.gem_event.game.spawn_monster()
def fall(self):
self.rect.y += self.velocity
# si elle ne tombe pas sur le sol
if self.rect.y >= 500:
# retirer la boule de feu
self.remove()
# si il n'y a plus de boules de feu
if len(self.gem_event.all_gems) == 0:
print("L'évenement est fini")
# remettre la jauge de vie au depart
self.gem_event.reset_percent()
self.gem_event.fall_mode2 = False
# verifier si la boule de feu touche le joueur
if self.gem_event.game.check_collision(
self, self.gem_event.game.all_players
):
print("joueur touché !")
# retirer la boule de feu
self.remove()
# subir 20 points de dégats
self.gem_event.game.player.kit(10) | lutintmechant/BUSINESS_ADVENTURE | businessadventure-master/Gem.py | Gem.py | py | 1,801 | python | fr | code | 0 | github-code | 13 |
19335251974 | """testcube URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls import url, include
from django.conf.urls.static import static
from django.contrib import admin
from rest_framework import routers
from .core import views
from .core.api import api_registration as core_api_registration
from .core.api import client_auth
from .runner.api import api_registration as runner_api_registration
from .users import views as user_views
admin.site.site_header = 'TestCube Administration'
admin.site.site_title = admin.site.site_header
router = routers.DefaultRouter()
core_api_registration(router)
runner_api_registration(router)
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^api/', include(router.urls), name='api'),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^client-register', client_auth.register, name='client_register'),
url('^signin$', user_views.signin, name='signin'),
url('^signup$', user_views.signup, name='signup'),
url('^signout$', user_views.signout, name='signout'),
url('^reset$', user_views.reset_password, name='reset_password'),
url('^profile$', user_views.user_profile, name='user_profile'),
url(r'^$', views.index, name='index'),
url(r'^welcome$', views.welcome, name='welcome'),
url(r'^docs/(?P<name>.+)$', views.document, name='docs'),
url(r'^runs$', views.runs, name='runs'),
url(r'^runs/(\d+)$', views.run_detail, name='run_detail'),
url(r'^testcases$', views.cases, name='testcases'),
url(r'^testcases/(\d+)', views.case_detail, name='testcase_detail'),
url(r'^results/(\d+)$', views.result_detail, name='result_detail'),
url(r'^results/(\d+)/reset$', views.result_reset, name='result_reset'),
url(r'^results/(\d+)/analysis$', views.result_analysis, name='result_analysis'),
url(r'^results$', views.results, name='results'),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| tobyqin/testcube | testcube/urls.py | urls.py | py | 2,617 | python | en | code | 27 | github-code | 13 |
6761985034 | # Name: 2 Reclassify
# Description: Reclassify rasters as part of the Staging Site Selection Model
# Requirements: Spatial Analyst Extension
# Import system modules
import arcpy, os
from arcpy import env
from arcpy.sa import *
arcpy.env.overwriteOutput = True
# Define root directory and define geodatabase name
folder = os.path.dirname(arcpy.mapping.MapDocument('CURRENT').filePath)
Tooldata = os.path.join(folder, "Tooldata")
geodatabase = os.path.join(Tooldata, "ModelOutputTables.gdb")
# Set environment settings
#env.workspace = ""
# Set local variables
inSlope = Raster(os.path.join(geodatabase, "Extract_Slope"))
inRoads = Raster(os.path.join(geodatabase, "Extract_Roads"))
inSSURGO = Raster(os.path.join(geodatabase, "Extract_SSURGO"))
inNHD = Raster(os.path.join(geodatabase, "Extract_NHD"))
inLAND = Raster(os.path.join(geodatabase, "Extract_LAND"))
inTrueRaster = 0
inFalseConstant = 1
#SQL statements
#whereClause = "Value > 10" #Slope
#whereClause = "Value < 200 OR Value > 500" #Roads
#whereClause = "hydgrpdcd = 'B' OR hydgrpdcd = 'B/D' OR hydgrpdcd = 'A' OR hydgrpdcd = 'A/D' OR hydgrpdcd = ' '" #SSURGO
#whereClause = "Value <= 500" #NHD
#whereClause = "Value = 11 OR Value = 23 OR Value = 24 OR Value = 41 OR Value = 42 OR Value = 43 OR Value = 81 OR Value = 82 OR Value = 90 OR Value = 95" #Land
# Check out the ArcGIS Spatial Analyst extension license
arcpy.CheckOutExtension("Spatial")
# Execute Con
arcpy.AddMessage("Processing Slope...")
outCon = Con(inSlope, inTrueRaster, inFalseConstant, sys.argv[2])
arcpy.AddMessage("Processing Roads...")
outCon2 = Con(inRoads, inTrueRaster, inFalseConstant, sys.argv[1])
arcpy.AddMessage("Processing Soils...")
outCon3 = Con(inSSURGO, inTrueRaster, inFalseConstant, sys.argv[4])
arcpy.AddMessage("Processing Surface Water...")
outCon4 = Con(inNHD, inTrueRaster, inFalseConstant, sys.argv[5])
arcpy.AddMessage("Processing Land Classification...")
outCon5 = Con(inLAND, inTrueRaster, inFalseConstant, sys.argv[3])
arcpy.AddMessage("Saving outputs...")
# Save the outputs
outCon.save(os.path.join(geodatabase, "Reclass_Slope"))
outCon2.save(os.path.join(geodatabase, "Reclass_Roads"))
outCon3.save(os.path.join(geodatabase, "Reclass_SSURGO"))
outCon4.save(os.path.join(geodatabase, "Reclass_NHD"))
outCon5.save(os.path.join(geodatabase, "Reclass_LAND"))
arcpy.AddMessage("Done!")
| USEPA/Waste_Staging_Tool | ArcMap/Script/Reclassify.py | Reclassify.py | py | 2,352 | python | en | code | 2 | github-code | 13 |
42223743841 | # figures.py
import tkinter as tk
# Pt, Ln, Eq, Cn share
class Figure:
def __init__(self, root, idf, del_cbk, fig_text):
# root: tk.Frame # root frame to which append the Figure widgets
# idf: int # used when calling del_cbk
# del_cbk: void fn() # delete_callback, for the destroy button X
# fig_text: str # text to be shown on the label ("eq", etc.)
self.idf = idf
f = tk.Frame(root)
f.pack()
for j in range(3):
f.columnconfigure(j, weight=1)
self.frame = f
self.text = fig_text
self.label = tk.Label(self.frame, text=fig_text)
self.label.grid(row=0, column=0)
self.x_btn = tk.Button(
self.frame,
text="X",
fg="#FFFFFF",
bg="#DB0000",
activeforeground="#000000",
activebackground="#FF2400",
command=del_cbk
)
self.x_btn.grid(row=0, column=2) #, expand=False)
def get_as_str(self):
raise NotImplementedError
def zero_if_empty(t):
if t == "":
t = "0.0"
return t
def format_float(f):
f = zero_if_empty(f)
'''try:
f = float(f)
f = "{:.4f}".format(f)
except:
pass
'''
return f
class Pt(Figure):
def __init__(self, root, idf, del_cbk):
super().__init__(root, idf, del_cbk, "pt")
self.vec = []
for j in range(3):
e = tk.Entry(self.frame)
e.grid(row=1, column=j)
self.vec.append(e)
def get_as_str(self):
s = ""
for j in range(3):
s += format_float(self.vec[j].get()) + ", "
#s = s[:-1]
return s
class Ln(Figure):
def __init__(self, root, idf, del_cbk):
super().__init__(root, idf, del_cbk, "ln")
self.pts = []
for p in range(2):
vec = []
for j in range(3):
e = tk.Entry(self.frame)
e.grid(row=1+p, column=j)
vec.append(e)
self.pts.append(vec)
def get_as_str(self):
s = ""
for i in range(2):
for j in range(3):
s += format_float(self.pts[i][j].get()) + ", "
s += "\n"
#s = s[:-1]
return s
class Eq(Figure):
def __init__(self, root, idf, del_cbk):
super().__init__(root, idf, del_cbk, "eq")
self.vec = []
for j in range(3):
e = tk.Entry(self.frame)
e.grid(row=1, column=j)
self.vec.append(e)
def get_as_str(self):
s = ""
for j in range(3):
s += format_float(self.vec[j].get()) + ", "
#s = s[:-1]
return s
class Cn(Figure):
def __init__(self, root, idf, del_cbk):
super().__init__(root, idf, del_cbk, "cn")
self.mat = []
for i in range(3):
row = []
for j in range(3):
e = tk.Entry(self.frame)
e.grid(row=i+1, column=j)
row.append(e)
self.mat.append(row)
def get_as_str(self):
s = ""
for i in range(3):
for j in range(3):
s += format_float(self.mat[i][j].get()) + ", "
s += "\n"
# s = s[:-1]
return s
class Param:
def __init__(self, rootframe, name, cbk, del_cbk):
self.name = name
self.frame = tk.Frame(rootframe)
self.frame.pack(anchor=tk.S)
self.frame.columnconfigure(1, weight=1)
self.scale = tk.Scale(
self.frame,
label=self.name,
from_=-1.0,
to=1.0,
digits=2,
resolution=0.125,
orient=tk.HORIZONTAL,
command=cbk
)
self.scale.grid(row=0, column=1, sticky=tk.EW)
self.x_btn = tk.Button(
self.frame,
text="X",
fg="#FFFFFF",
bg="#DB0000",
activeforeground="#000000",
activebackground="#FF2400",
command=del_cbk
)
self.x_btn.grid(row=0, column=3)
self.lo_entry = tk.Entry(self.frame, width=4)
self.lo_entry.bind("<Return>", self.update_lo)
self.lo_entry.insert(tk.END, "-1")
self.lo_entry.grid(row=0, column=0, sticky=tk.SE)
self.hi_entry = tk.Entry(self.frame, width=4)
self.hi_entry.bind("<Return>", self.update_hi)
self.hi_entry.insert(tk.END, "+1")
self.hi_entry.grid(row=0, column=2, sticky=tk.SW)
def get(self):
return self.scale.get()
def update_lo(self, *args):
try:
self.scale.configure(from_=float(self.lo_entry.get()))
except:
print("enter some numba u idiot")
def update_hi(self, *args):
try:
self.scale.configure(to=float(self.hi_entry.get()))
except:
print("enter some numba u idiot")
| papanumba/projec_p2 | python/figures.py | figures.py | py | 4,952 | python | en | code | 2 | github-code | 13 |
1177370095 | from tkinter import *
from cell import Cell
from settings import *
root = Tk()
root.geometry(f'{width}x{height}')
root.configure(bg='black')
root.title("Minesweeper")
root.resizable(False, False)
# Creating the frames
top_frame = Frame(
root,
bg='black',
width=width,
height=height/4
)
top_frame.place(x=0, y=0)
left_frame = Frame(
root,
bg='black',
width=width/4,
height=height
)
left_frame.place(x=0, y=height/4)
main_frame = Frame(
root,
bg='black',
width=(width/4) * 3,
height=(height/4) * 3
)
main_frame.place(x=width/4, y=height/4)
# Creating the cells
for x in range(grid_size):
for y in range(grid_size):
c1 = Cell(x, y)
c1.create_button(main_frame)
c1.cell_button.grid(
column=x, row=y
)
Cell.random_mines() #Picking mines
for c in Cell.all_cells:
print(c.is_mine)
root.mainloop()
| alonshmueli123/Mindsweeper-Game | Minesweeper Game/main.py | main.py | py | 955 | python | en | code | 0 | github-code | 13 |
17109564906 | """
Useful functions associated with mlst.
To use:
from mlst.tools import UTIL1, UTIL2, etc...
"""
import json
from django.db import transaction
from django.db.utils import IntegrityError
from django.core.management.base import CommandError
from staphopia.utils import file_exists, read_json, timeit
from mlst.models import SequenceTypes, MLST, Report, Support
def read_table(table, header=True, sep='\t', ):
names = None
rows = []
with open(table, 'r') as fh:
for line in fh:
cols = line.rstrip().split(sep)
if header:
names = cols
header = False
else:
rows.append(dict(zip(names, cols)))
if len(rows) == 1:
return rows[0]
else:
return rows
def parse_ariba(basic_report, detailed_report):
'''
Basic Ariba Report (tabs not spaces)
ST arcC aroE glpF gmk pta tpi yqiL
22 7 6 1 5 8 8 6
'''
basic_report = read_table(basic_report)
basic_report['uncertainty'] = False
predicted_novel = False
if basic_report['ST'].endswith('*'):
# * indicates uncertainty in Ariba call
basic_report['ST'] = basic_report['ST'].rstrip('*')
basic_report['uncertainty'] = True
if 'Novel' in basic_report['ST']:
basic_report['ST'] = 0
if not basic_report['uncertainty']:
predicted_novel = True
elif basic_report['ST'] == 'ND':
basic_report['ST'] = 0
total_assigned = 0
for key, val in basic_report.items():
if key not in ['ST', 'uncertainty']:
if val.endswith("*") or val == 'ND':
pass
elif int(val):
total_assigned += 1
if total_assigned == 7 and not int(basic_report['ST']):
# See if loci pattern exists
try:
st = SequenceTypes.objects.get(
arcc=int(basic_report['arcC']),
aroe=int(basic_report['aroE']),
glpf=int(basic_report['glpF']),
gmk=int(basic_report['gmk']),
pta=int(basic_report['pta']),
tpi=int(basic_report['tpi']),
yqil=int(basic_report['yqiL'])
)
basic_report['ST'] = st.st
except SequenceTypes.DoesNotExist:
if predicted_novel:
basic_report['ST'] = 10000
'''
Parse Detailed Report (tabs not spaces)
gene allele cov pc ctgs depth hetmin hets
arcC 7 100.0 100.0 1 51.2 . .
aroE 6 100.0 100.0 1 51.0 . .
glpF 1 100.0 100.0 1 36.7 . .
gmk 5 100.0 100.0 1 45.7 . .
pta 8 100.0 100.0 1 61.5 . .
tpi 8 100.0 100.0 1 47.9 . .
yqiL 6 100.0 100.0 1 52.6 . .
'''
detailed_report = read_table(detailed_report)
return [basic_report, detailed_report, total_assigned]
def parse_mentalist(basic_report, tie_report, vote_report):
'''
Basic Mentalist Report (tabs not spaces)
Sample arcC aroE glpF gmk pta tpi yqiL ST clonal_complex
ERX1666310 7 6 1 5 8 8 6 22
'''
basic_report = read_table(basic_report)
total_assigned = 0
if int(basic_report['ST']):
total_assigned = 7
else:
for key, val in basic_report.items():
if key not in ['Sample', 'ST', 'clonal_complex']:
if int(val):
total_assigned += 1
detailed_report = {
'ties': read_table(tie_report),
'votes': read_table(vote_report)
}
# Remove the ties
total_assigned = total_assigned - len(detailed_report['ties'])
if total_assigned == 7 and not int(basic_report['ST']):
# See if loci pattern exists
try:
st = SequenceTypes.objects.get(
arcc=int(basic_report['arcC']),
aroe=int(basic_report['aroE']),
glpf=int(basic_report['glpF']),
gmk=int(basic_report['gmk']),
pta=int(basic_report['pta']),
tpi=int(basic_report['tpi']),
yqil=int(basic_report['yqiL'])
)
basic_report['ST'] = st.st
except SequenceTypes.DoesNotExist:
basic_report['ST'] = 0
return [basic_report, detailed_report, total_assigned]
def parse_blast(basic_report):
detailed_report = None
with open(basic_report, 'r') as fh:
detailed_report = json.load(fh)
basic_report = {
'arcc': int(detailed_report['arcC']['sseqid'].split('.')[1]),
'aroe': int(detailed_report['aroE']['sseqid'].split('.')[1]),
'glpf': int(detailed_report['glpF']['sseqid'].split('.')[1]),
'gmk': int(detailed_report['gmk']['sseqid'].split('.')[1]),
'pta': int(detailed_report['pta']['sseqid'].split('.')[1]),
'tpi': int(detailed_report['tpi']['sseqid'].split('.')[1]),
'yqil': int(detailed_report['yqiL']['sseqid'].split('.')[1]),
}
# Determine ST based on hits
total_assigned = 0
try:
st = SequenceTypes.objects.get(**basic_report)
basic_report['ST'] = st.st
total_assigned = 7
except SequenceTypes.DoesNotExist:
for key, val in basic_report.items():
if val:
total_assigned += 1
basic_report['ST'] = 0
return [basic_report, detailed_report, total_assigned]
@timeit
def insert_mlst(sample, version, files, force=False):
"""Insert mlst results and the reports."""
st = {'ariba': 0}
report = {'ariba': 'empty'}
ariba_assigned = 0
if 'fastq_r2' in files:
# Ariba only works on paired end reads
st['ariba'], report['ariba'], ariba_assigned = parse_ariba(
files['mlst_ariba_mlst_report'],
files['mlst_ariba_details']
)
#st['mentalist'], report['mentalist'], mentalist_assigned = parse_mentalist(
# files['mlst_mentalist'],
# files['mlst_mentalist_ties'],
# files['mlst_mentalist_votes']
#)
mentalist_assigned = 0
report['mentalist'] = {}
st['blast'], report['blast'], blast_assigned = parse_blast(
files['mlst_blastn']
)
novel = {
'ariba': ariba_assigned,
'mentalist': mentalist_assigned,
'blast': blast_assigned
}
if force:
delete_mlst(sample, version)
insert_mlst_results(sample, version, st, novel)
insert_report(sample, version, report)
@transaction.atomic
def delete_mlst(sample, version):
print(f'Deleting MLST calls/reports for {sample.name}')
Report.objects.filter(sample=sample, version=version).delete()
MLST.objects.filter(sample=sample, version=version).delete()
@transaction.atomic
def insert_mlst_results(sample, version, results, novel):
'''Insert sequence type into database.'''
st = {
'st': 0,
'ariba': int(results['ariba']['ST']) if results['ariba'] else 0,
'blast': int(results['blast']['ST']),
'mentalist': 0 # int(results['mentalist']['ST']),
}
ariba = results['ariba']
if st['ariba'] == st['mentalist'] and st['mentalist'] == st['blast']:
st['st'] = st['ariba']
elif st['ariba'] == st['mentalist'] and st['mentalist']:
st['st'] = st['ariba']
elif st['ariba'] == st['blast'] and st['blast']:
st['st'] = st['ariba']
elif st['mentalist'] == st['blast'] and st['mentalist']:
st['st'] = st['mentalist']
elif st['ariba'] and st['ariba'] != 10000 and not ariba['uncertainty']:
# 10000 is 'Novel' as considered by Ariba
# If there is uncertainty, don't call ST soley on Ariba. Previous
# conditions had support from other methods.
st['st'] = st['ariba']
elif st['mentalist']:
st['st'] = st['mentalist']
elif st['blast']:
st['st'] = st['blast']
# If ST still not determined, check if predicted to be novel
predicted_novel = False
if not st['st']:
if novel['blast'] == 7: # or novel['mentalist'] == 7:
predicted_novel = True
elif st['ariba'] == 10000:
predicted_novel = True
# Get support
ariba_support = novel['ariba']
mentalist_support = 0 #novel['mentalist']
blast_support = novel['blast']
support = Support.objects.get_or_create(
ariba=ariba_support, mentalist=mentalist_support, blast=blast_support
)
try:
MLST.objects.create(
sample=sample,
version=version,
predicted_novel=predicted_novel,
support=support[0],
**st
)
print(f'Inserted MLST calls for {sample.name}')
except IntegrityError as e:
raise CommandError(e)
@transaction.atomic
def insert_report(sample, version, reports):
'''Insert detailed report of mlst calls into database.'''
try:
Report.objects.create(sample=sample, version=version, **reports)
print(f'Inserted MLST reports for {sample.name}')
except IntegrityError as e:
raise CommandError(
f'{sample.name} found, will not update unless --force is used. {e}'
)
| staphopia/staphopia-web | mlst/tools.py | tools.py | py | 9,187 | python | en | code | 4 | github-code | 13 |
13168905790 | bl_info = {
"name": "Custom Camera",
"author": "Dave Nectariad Rome",
"version": (0, 3, 7),
"blender": (3, 50, 1),
"location": "View3D > Tool Shelf > Custom Camera Add-on",
"description": "Add a custom camera setup",
"warning": "",
"doc_url": "",
"category": "Object",
}
from mathutils import Vector
import bpy
from bpy.props import EnumProperty, FloatProperty, StringProperty, BoolProperty, IntProperty
from bpy_extras.io_utils import ImportHelper
import urllib.request
import os
import subprocess
def update_camera_settings(self, context):
props = context.scene.custom_camera_props
camera_object = bpy.data.objects.get("CustomCamera")
if camera_object and camera_object.type == 'CAMERA':
camera_data = camera_object.data
camera_data.sensor_width = float(props.sensor_size) if props.sensor_size != "CUSTOM" else props.custom_sensor_size
camera_data.lens = float(props.focal_length.rstrip("mm")) if props.focal_length != "CUSTOM" else props.custom_focal_length
if props.use_depth_of_field:
dof_target_object = bpy.data.objects.get("DOF_target")
if not dof_target_object:
# Create DOF_target object if it doesn't exist
dof_target_object = bpy.data.objects.new("DOF_target", None)
bpy.data.collections.get("Camera Collection").objects.link(dof_target_object)
dof_target_object.location = Vector((0, 0, 0))
camera_data.dof.focus_object = dof_target_object
# Set the distance of the DOF target empty
dof_target_object.location = camera_object.location + camera_object.matrix_world.to_quaternion() @ Vector((0.0, 0.0, -props.dof_target_distance))
else:
# Remove DOF_target object if it exists
dof_target_object = bpy.data.objects.get("DOF_target")
if dof_target_object:
bpy.data.objects.remove(dof_target_object)
camera_data.dof.use_dof = props.use_depth_of_field
camera_data.dof.aperture_fstop = float(props.aperture_size) if props.aperture_size != "CUSTOM" else props.custom_aperture_size
if props.bokeh_shape == "CUSTOM":
camera_data.dof.aperture_blades = int(props.custom_bokeh_size * 10)
else:
camera_data.dof.aperture_blades = {
"CIRCULAR": 0,
"TRIANGLE": 3,
"SQUARE": 4,
"PENTAGON": 5,
"HEXAGONAL": 6,
"OCTAGONAL": 8,
"ANAMORPHIC": 100, # Value to be replaced later
}[props.bokeh_shape]
# Convert aperture size to a float
if props.bokeh_shape == "ANAMORPHIC":
camera_data.dof.aperture_blades = 100
camera_data.dof.aperture_ratio = 2.0
else:
camera_data.dof.aperture_ratio = 1.0
if props.aperture_size != "CUSTOM":
aperture_size_float = float(props.aperture_size.split("f/")[-1])
camera_data.dof.aperture_fstop = aperture_size_float
else:
camera_data.dof.aperture_fstop = props.custom_aperture_size
# Connect the camera to the DOF_target object via a Track To constraint
cam_target_object = props.cam_target
if cam_target_object:
camera_object.constraints.clear()
cam_track_constraint = camera_object.constraints.new(type='TRACK_TO')
cam_track_constraint.target = cam_target_object
cam_track_constraint.track_axis = 'TRACK_NEGATIVE_Z'
cam_track_constraint.up_axis = 'UP_Y'
# Update the distance of the DOF target empty based on the depth of field slider
dof_target_object = bpy.data.objects.get("DOF_target")
if dof_target_object:
dof_target_object.location = camera_object.location + camera_object.matrix_world.to_quaternion() @ Vector((0.0, 0.0, -props.dof_target_distance))
# Update the resolution properties
context.scene.render.resolution_x = props.resolution_x
context.scene.render.resolution_y = props.resolution_y
# Restore DOF_Target object if depth of field is re-enabled
if props.use_depth_of_field and not dof_target_object:
dof_target_object = bpy.data.objects.new("DOF_target", None)
bpy.data.collections.get("Camera Collection").objects.link(dof_target_object)
dof_target_object.location = Vector((0, 0, 0))
camera_data.dof.focus_object = dof_target_object
# Set the distance of the DOF target empty
dof_target_object.location = camera_object.location + camera_object.matrix_world.to_quaternion() @ Vector((0.0, 0.0, -props.dof_target_distance))
def update_custom_camera(self, context):
if bpy.data.is_dirty:
save_prompt = "Your project has unsaved changes. Do you want to save before updating the Custom Camera add-on?"
save_options = {'CANCELLED', 'FINISHED', 'NO', 'YES'}
save_choice = bpy.ops.wm.save_mainfile('INVOKE_DEFAULT')
if save_choice == 'CANCELLED':
self.report({"INFO"}, "Custom Camera add-on update cancelled.")
return {'CANCELLED'}
# Download the updated script
url = "https://raw.githubusercontent.com/mdreece/Custom-Camera-Blender-Add-on/main/custom_camera.py"
response = urllib.request.urlopen(url)
data = response.read()
# Write the updated script to disk
script_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "custom_camera.py")
with open(script_path, "wb") as f:
f.write(data)
# Prompt to save the project before closing Blender
return bpy.ops.wm.quit_blender('INVOKE_DEFAULT')
# Prompt to save the project before closing Blender
wm = bpy.context.window_manager
return wm.invoke_props_dialog(self.quit_blender, width=400)
def quit_blender(self, context):
bpy.ops.wm.save_mainfile()
self.quit_blender()
class CustomCameraPreferences(bpy.types.AddonPreferences):
bl_idname = __name__
def draw(self, context):
layout = self.layout
layout.operator("customcamera.update_custom_camera", text="Update Custom Camera")
layout.operator("wm.url_open", text="Visit GitHub Repository").url = "https://github.com/mdreece/Custom-Camera-Blender-Add-on"
class UPDATE_CUSTOMCAMERA_OT_update_custom_camera(bpy.types.Operator):
bl_idname = "customcamera.update_custom_camera"
bl_label = "Update Custom Camera"
bl_description = "Update the Custom Camera add-on with the latest version from GitHub"
def execute(self, context):
update_custom_camera(self, context)
return {"FINISHED"}
class CustomCameraProperties(bpy.types.PropertyGroup):
sensor_sizes = [
("6.17", "1/2.3\" (6.17 x 4.55 mm)", ""),
("7.6", "1/1.7\" (7.6 x 5.7 mm)", ""),
("17.3", "Micro Four Thirds (17.3 x 13 mm)", ""),
("23.5", "APS-C (23.5×15.6 mm)", ""),
("24.89", "Super 35 (24.89 x 18.66 mm)", ""),
("36", "Full-Frame (36 x 24 mm)", ""),
("30.7", "Red Dragon 6K (30.7 x 15.8 mm)", ""),
("54.12", "Arri Alexa 65 (54.12 x 25.58 mm)", ""),
("70", "IMAX (70 x 48.5 mm)", ""),
("CUSTOM", "Custom", ""),
]
focal_lengths = [
("18mm", "18mm", ""),
("24mm", "24mm", ""),
("35mm", "35mm", ""),
("50mm", "50mm", ""),
("85mm", "85mm", ""),
("135mm", "135mm", ""),
("CUSTOM", "Custom", ""),
]
sensor_size: EnumProperty(
name="Sensor Size",
items=sensor_sizes,
default="36",
update=update_camera_settings,
)
focal_length: EnumProperty(
name="Focal Length",
items=focal_lengths,
default="50mm",
update=update_camera_settings,
)
custom_sensor_size: FloatProperty(
name="Custom Sensor Size",
description="Set custom sensor size in millimeters",
default=35.0,
min=1.0,
update=update_camera_settings,
)
custom_focal_length: FloatProperty(
name="Custom Focal Length",
description="Set custom focal length in millimeters",
default=50.0,
min=1.0,
update=update_camera_settings,
)
use_depth_of_field: BoolProperty(
name="Use Depth of Field",
description="Enable or disable depth of field",
default=False,
update=update_camera_settings,
)
dof_target: bpy.props.PointerProperty(
type=bpy.types.Object,
name="DOF Target",
description="Object for Depth of Field",
)
cam_target: bpy.props.PointerProperty(
type=bpy.types.Object,
name="Camera Target",
description="Object for Camera Target",
)
dof_target_distance: FloatProperty(
name="DOF Target Distance",
description="Set distance of the DOF target empty",
default=5.0,
min=0.0,
update=update_camera_settings,
)
camera_collection_selected: BoolProperty(
name="Camera Collection Selected",
description="Whether the camera collection is selected",
default=False,
)
resolution_x: IntProperty(
name="Resolution X",
description="X resolution of the camera",
default=1920,
min=1,
update=update_camera_settings,
)
resolution_y: IntProperty(
name="Resolution Y",
description="Y resolution of the camera",
default=1080,
min=1,
update=update_camera_settings,
)
aperture_sizes = [
("0.5", "f/0.5", ""),
("1.0", "f/1.0", ""),
("1.4", "f/1.4", ""),
("2.0", "f/2.0", ""),
("2.8", "f/2.8", ""),
("4.0", "f/4.0", ""),
("5.6", "f/5.6", ""),
("8.0", "f/8.0", ""),
("11", "f/11", ""),
("16", "f/16", ""),
("22", "f/22", ""),
("32", "f/32", ""),
("45", "f/45", ""),
("64", "f/64", ""),
("90", "f/90", ""),
("CUSTOM", "Custom", ""),
]
aperture_size: EnumProperty(
name="Aperture Size",
items=aperture_sizes,
default="2.8",
update=update_camera_settings,
)
custom_aperture_size: FloatProperty(
name="Custom Aperture Size",
description="Set custom aperture size",
default=2.8,
min=0.5,
max=90.0,
update=update_camera_settings,
)
bokeh_shapes = [
("CIRCULAR", "Circular", ""),
("TRIANGLE", "Triangle", ""),
("SQUARE", "Square", ""),
("PENTAGON", "Pentagon", ""),
("HEXAGONAL", "Hexagonal", ""),
("OCTAGONAL", "Octagonal", ""),
("ANAMORPHIC", "Anamorphic", ""),
("CUSTOM", "Custom", ""),
]
bokeh_shape: EnumProperty(
name="Bokeh Shape",
items=bokeh_shapes,
default="CIRCULAR",
update=update_camera_settings,
)
custom_bokeh_size: FloatProperty(
name="Custom Bokeh Size",
description="Set custom bokeh size",
default=0.1,
min=0.0,
max=1.0,
update=update_camera_settings,
)
class CUSTOMCAMERA_OT_select_camera_collection(bpy.types.Operator):
bl_idname = "customcamera.select_camera_collection"
bl_label = "Select Camera Collection"
bl_description = "Select all objects in the Camera Collection"
def execute(self, context):
camera_collection = bpy.data.collections.get("Camera Collection")
props = context.scene.custom_camera_props
if camera_collection:
# Deselect all objects first
bpy.ops.object.select_all(action='DESELECT')
# Select objects in the camera collection
for obj in camera_collection.objects:
obj.select_set(True)
props.camera_collection_selected = True
# Disable selection for other objects
bpy.context.scene.tool_settings.mesh_select_mode[:] = (False, False, False)
bpy.context.scene.tool_settings.mesh_select_mode[:] = (True, True, True)
# Disable object selection
bpy.context.scene.tool_settings.use_mesh_automerge = True
# Change the operator name and label to "Deselect Camera Collection"
self.bl_idname = "customcamera.deselect_camera_collection"
self.bl_label = "Deselect Camera Collection"
else:
self.report({'WARNING'}, "Camera Collection not found")
props.camera_collection_selected = False
return {'FINISHED'}
class CUSTOMCAMERA_OT_deselect_camera_collection(bpy.types.Operator):
bl_idname = "customcamera.deselect_camera_collection"
bl_label = "Deselect Camera Collection"
bl_description = "Deselect all objects in the Camera Collection"
def execute(self, context):
camera_collection = bpy.data.collections.get("Camera Collection")
props = context.scene.custom_camera_props
if camera_collection:
for obj in camera_collection.objects:
obj.select_set(False)
props.camera_collection_selected = False
# Enable selection for other objects
bpy.context.scene.tool_settings.mesh_select_mode[:] = (True, True, True)
bpy.context.scene.tool_settings.use_mesh_automerge = False
# Change the operator name and label back to "Select Camera Collection"
self.bl_idname = "customcamera.select_camera_collection"
self.bl_label = "Select Camera Collection"
else:
self.report({'WARNING'}, "Camera Collection not found")
return {'FINISHED'}
class CUSTOMCAMERA_PT_main_panel(bpy.types.Panel):
bl_label = "Custom Camera"
bl_idname = "CUSTOMCAMERA_PT_main_panel"
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_category = "Custom Camera"
def draw(self, context):
layout = self.layout
props = context.scene.custom_camera_props
layout.separator()
row = layout.row()
row.prop(props, "sensor_size")
if props.sensor_size == "CUSTOM":
row = layout.row()
row.prop(props, "custom_sensor_size")
row = layout.row()
row.prop(props, "focal_length")
if props.focal_length == "CUSTOM":
row = layout.row()
row.prop(props, "custom_focal_length")
row = layout.row()
row.prop(props, "resolution_x")
row = layout.row()
row.prop(props, "resolution_y")
layout.separator()
row = layout.row()
row.prop(props, "use_depth_of_field")
if props.use_depth_of_field:
row = layout.row()
row.prop(props, "dof_target_distance")
row = layout.row()
row.prop(props, "bokeh_shape")
if props.bokeh_shape == "CUSTOM":
row = layout.row()
row.prop(props, "custom_bokeh_size")
row = layout.row()
row.prop(props, "aperture_size")
if props.aperture_size == "CUSTOM":
row = layout.row()
row.prop(props, "custom_aperture_size")
if bpy.data.collections.get("Camera Collection"):
row = layout.row()
props = context.scene.custom_camera_props
if props.camera_collection_selected:
row.operator("customcamera.deselect_camera_collection", text="Deselect Camera Collection")
else:
row.operator("customcamera.select_camera_collection", text="Select Camera Collection")
else:
row = layout.row()
row.operator("customcamera.create_camera_collection", text="Create Camera Collection")
if bpy.data.objects.get("CustomCamera"):
row = layout.row()
row.operator("customcamera.delete_camera", text="Delete Camera", icon='CANCEL').action = 'DELETE_CAMERA'
else:
row = layout.row()
row.operator("customcamera.create_camera", text="Create Custom Camera")
layout.separator()
class CUSTOMCAMERA_OT_create_camera(bpy.types.Operator):
bl_idname = "customcamera.create_camera"
bl_label = "Create Custom Camera"
bl_description = "Create a camera with custom settings"
def execute(self, context):
props = context.scene.custom_camera_props
# Create Camera Collection
camera_collection = bpy.data.collections.new("Camera Collection")
bpy.context.scene.collection.children.link(camera_collection)
# Create Camera
camera_data = bpy.data.cameras.new("CustomCamera")
camera_object = bpy.data.objects.new("CustomCamera", camera_data)
camera_collection.objects.link(camera_object)
# Set end clip
camera_data.clip_end = 1000000000
# Set Camera location
camera_object.location = (6.5, -6.5, 4.0)
# Create CAM_target object
cam_target_object = bpy.data.objects.new("CAM_target", None)
camera_collection.objects.link(cam_target_object)
cam_target_object.location = Vector((0, 0, 0))
# Connect camera to CAM_target object via Track To constraint
cam_track_constraint = camera_object.constraints.new(type='TRACK_TO')
cam_track_constraint.target = cam_target_object
cam_track_constraint.track_axis = 'TRACK_NEGATIVE_Z'
cam_track_constraint.up_axis = 'UP_Y'
# Store CAM_target object in custom_camera_props
props.cam_target = cam_target_object
if props.use_depth_of_field:
# Create DOF_target object
dof_target_object = bpy.data.objects.new("DOF_target", None)
camera_collection.objects.link(dof_target_object)
dof_target_object.location = Vector((0, 0, 0))
# Store the DOF_target object in the custom_camera_props
props.dof_target = dof_target_object
# Enable depth of field on camera
camera_data.dof.use_dof = True
# Connect depth of field to the DOF_target object
camera_data.dof.focus_object = dof_target_object
# Set the distance of the DOF target empty
dof_target_object.location = camera_object.location + camera_object.matrix_world.to_quaternion() @ Vector((0.0, 0.0, -props.dof_target_distance))
else:
# Disable depth of field on camera
camera_data.dof.use_dof = False
# Update camera settings
update_camera_settings(self, context)
return {'FINISHED'}
class CUSTOMCAMERA_OT_delete_camera(bpy.types.Operator):
bl_idname = "customcamera.delete_camera"
bl_label = "Delete Camera"
bl_description = "Delete the custom camera"
action: bpy.props.StringProperty()
def execute(self, context):
if self.action == 'DELETE_CAMERA':
custom_camera_obj = bpy.data.objects.get("CustomCamera")
if custom_camera_obj:
# If the camera exists, disable Depth of Field options
context.scene.custom_camera_props.use_depth_of_field = False
bpy.data.objects.remove(custom_camera_obj, do_unlink=True)
bpy.data.objects.remove(bpy.data.objects.get("CAM_target"), do_unlink=True)
bpy.data.collections.remove(bpy.data.collections.get("Camera Collection"), do_unlink=True)
context.scene.custom_camera_props.dof_target = None
context.scene.custom_camera_props.cam_target = None
return {'FINISHED'}
def invoke(self, context, event):
self.report({'INFO'}, "Are you sure you want to delete the camera?")
return context.window_manager.invoke_confirm(self, event)
def on_object_selection_change(scene):
props = scene.custom_camera_props
camera_collection = bpy.data.collections.get("Camera Collection")
if camera_collection:
# Check if any object in the camera collection is selected
props.camera_collection_selected = any(obj.select_get() for obj in camera_collection.objects)
else:
props.camera_collection_selected = False
# Register the event handler when the add-on is enabled
def register():
bpy.utils.register_class(CustomCameraProperties)
bpy.types.Scene.custom_camera_props = bpy.props.PointerProperty(type=CustomCameraProperties)
bpy.utils.register_class(CUSTOMCAMERA_OT_create_camera)
bpy.utils.register_class(CUSTOMCAMERA_PT_main_panel)
bpy.utils.register_class(CUSTOMCAMERA_OT_delete_camera)
bpy.utils.register_class(CUSTOMCAMERA_OT_select_camera_collection)
bpy.utils.register_class(CUSTOMCAMERA_OT_deselect_camera_collection)
bpy.utils.register_class(UPDATE_CUSTOMCAMERA_OT_update_custom_camera)
bpy.utils.register_class(CustomCameraPreferences)
# Add the event handler to listen for object selection changes
bpy.app.handlers.depsgraph_update_post.append(on_object_selection_change)
# Unregister the event handler when the add-on is disabled
def unregister():
# Remove the event handler
bpy.app.handlers.depsgraph_update_post.remove(on_object_selection_change)
bpy.utils.unregister_class(CustomCameraProperties)
del bpy.types.Scene.custom_camera_props
bpy.utils.unregister_class(CUSTOMCAMERA_OT_create_camera)
bpy.utils.unregister_class(CUSTOMCAMERA_PT_main_panel)
bpy.utils.unregister_class(CUSTOMCAMERA_OT_delete_camera)
bpy.utils.unregister_class(CUSTOMCAMERA_OT_select_camera_collection)
bpy.utils.unregister_class(CUSTOMCAMERA_OT_deselect_camera_collection)
bpy.utils.unregister_class(CustomCameraPreferences)
bpy.utils.unregister_class(UPDATE_CUSTOMCAMERA_OT_update_custom_camera)
if __name__ == "__main__":
register()
| mdreece/Custom-Camera-Blender-Add-on | custom_camera.py | custom_camera.py | py | 22,426 | python | en | code | 4 | github-code | 13 |
26370279956 | import logging
from odoo import fields, models
_logger = logging.getLogger(__name__)
class CrmPartnerActionGroup(models.Model):
_name = "crm.partner.action.group"
_description = "Action Group"
name = fields.Char(string="Name of the Group", size=80, required=True)
model_id = fields.Many2one(
comodel_name="ir.model",
string="Related Model",
help="If possible, a warning will be shown "
"when creating an object of this "
"model and there are open actions "
"for this group",
)
# '_company_default_get' on res.company is deprecated and shouldn't be used
company_id = fields.Many2one(
comodel_name="res.company",
string="Company",
required=True,
default=lambda self: self.env.company,
)
| CITOpenRep/canna-erp-third-party | crm_partner_action/models/crm_partner_action_group.py | crm_partner_action_group.py | py | 802 | python | en | code | 9 | github-code | 13 |
9235259718 | import numpy as np
from pydicom.multival import MultiValue
# This function returns the data array values mapped to 0-256 using window/level parameters
# If provided it takes into account the DICOM flags:
# - Rescale Intercept http://dicomlookup.com/lookup.asp?sw=Tnumber&q=(0028,1052)
# - Rescale Slope http://dicomlookup.com/lookup.asp?sw=Tnumber&q=(0028,1053)
# Code adapted from pydicom, requires numpy
# http://code.google.com/p/pydicom/source/browse/source/dicom/contrib/pydicom_PIL.py
def get_LUT_value(data, window, level, rescaleIntercept=0, rescaleSlope=1):
if None in [window, level, rescaleIntercept, rescaleSlope]:
return data
if isinstance(window, list) or isinstance(window, MultiValue):
window = window[0]
if isinstance(level, list) or isinstance(level, MultiValue):
level = int(level[0])
# some vendors use wrong rescale intercept and slope?
if rescaleSlope == 0 and rescaleIntercept == 1:
rescaleSlope = 1
rescaleIntercept = 0
return np.piecewise(data,
[((data * rescaleSlope) + rescaleIntercept) <= (level - 0.5 - (window - 1) / 2),
((data * rescaleSlope) + rescaleIntercept) > (level - 0.5 + (window - 1) / 2)],
[0, 255, lambda VAL: ((((VAL * rescaleSlope) + rescaleIntercept) - (level - 0.5)) / (
window - 1) + 0.5) * (255 - 0)])
def get_PIL_mode(ds):
bits = ds.BitsAllocated
samples = ds.SamplesPerPixel
if bits == 8 and samples == 1:
mode = "L"
elif bits == 8 and samples == 3:
mode = "RGB"
elif bits == 16:
mode = "I;16"
return mode
def get_rescale_params(ds):
try:
rescale_intercept = ds.RescaleIntercept
except AttributeError:
rescale_intercept = 0.0
try:
rescale_slope = ds.RescaleSlope
except AttributeError:
rescale_slope = 1.0
return rescale_intercept, rescale_slope
| tsaiid/femh-dicom | app/dcmconv.py | dcmconv.py | py | 1,962 | python | en | code | 2 | github-code | 13 |
39659692228 | from models.discriminator import Discriminator
from models.generator import Generator, Generator_pert
import torch
import numpy as np
import argparse
import os
from scipy.io import wavfile
from pytorch_mfcc import MFCC
import models
from torch.autograd import Variable
def default_loader(path, sample_rate=16384):
fn, wav_data = wavfile.read(path)
if sample_rate < len(wav_data):
wav_data = wav_data[:sample_rate]
elif sample_rate > len(wav_data):
wav_data = np.pad(wav_data, (0, sample_rate - len(wav_data)), "constant")
wav_data = (2. / 65535.) * (wav_data.astype(np.float32) - 32767) + 1.
return wav_data
if __name__ == '__main__':
parse = argparse.ArgumentParser(description="test pharse")
parse.add_argument('--data_dir',type=str, required=True,help='the original speech to be perturbed')
parse.add_argument('--task', choices=[0,1], default=0, help='0:speech_common_generator, 1:music_genres_generator')
parse.add_argument('--target', type=str, required=True, default='', help='the trained model')
parse.add_argument('--output_dir',type=str, default='./', help='the output dir of generated adversarial example')
parse.add_argument('--checkpoint',type=str, required=True, default='checkpoints',help='the checkpoints')
parse.add_argument('--model', choices=models.available_models, default=models.available_models[7],
help='model of NN(sampleCNN:0, wideResNet:7)')
args = parse.parse_args()
use_gpu = torch.cuda.is_available()
G = Generator()
if args.task == 0:
f = models.create_model(model_name=args.model, num_classes=10, in_channels=1).cuda()
mfcc_layer = MFCC(samplerate=16384, numcep=32, nfft=2048, nfilt=32).cuda() # MFCC layer
print("Loading a pretrained victim model ")
checkpoint = torch.load(
'checkpoints/wideResNet28_10.pth') #
f.load_state_dict({k.replace('module.', ''): v for k, v in checkpoint['state_dict'].items()})
f.eval()
idx2classes = {0: 'yes', 1: 'no', 2: 'up', 3: 'down', 4: 'left', 5: 'right', 6: 'on', 7: 'off', 8: 'stop',
9: 'go'}
generator_model_path = os.path.join(args.checkpoint, "speech_common_generator")
else:
f = models.create_model(model_name=args.model, num_classes=10, in_channels=1)
print("Loading a pretrained victim model ")
checkpoint = torch.load('checkpoints/sampleCNN.pth') #
f.load_state_dict(checkpoint) # sampleCNN
idx2classes = {0: 'blues', 1: 'classical', 2: 'country', 3: 'disco', 4: 'hiphop', 5: 'jazz', 6: 'metal',
7: 'pop', 8: 'reggae', 9: 'rock'}
generator_model_path = os.path.join(args.checkpoint, 'music_genres_generator')
if use_gpu:
G = G.cuda()
f.cuda()
if args.checkpoint:
print("Loading a pretrained generator model ")
generator_model = 'generator-checkpoint-epoch-target-' + args.target + '.pth'
checkpoint = torch.load(os.path.join(generator_model_path, generator_model))
G.load_state_dict(checkpoint['state_dict'])
del checkpoint
data_dir = r'D:\data\speech_commands_data\test1'
# file = r'down\5f814c23_nohash_0.wav'
# file = r'go\5ff3f9a1_nohash_0.wav'
# file = r'left\4c841771_nohash_0.wav'
# file = r'no\9a69672b_nohash_0.wav'
# file = r'off\3efef882_nohash_0.wav'
# file = r'on\2c6d3924_nohash_0.wav'
# file = r'right\9a7c1f83_nohash_0.wav'
# file = r'stop\1fe4c891_nohash_0.wav'
file = r'up\f428ca69_nohash_0.wav'
# file = r'yes\6f2f57c1_nohash_0.wav'
output_path = 'blob/demo_output/result/yes/up'
data = default_loader(args.data_dir)
data = torch.from_numpy(data)
data = torch.unsqueeze(data,dim=0)
data = torch.unsqueeze(data,dim=0).cuda()
# label before perturb
lengths = [16384]
val, mfcc_lengths = mfcc_layer(torch.squeeze(data.detach(),dim=0), lengths)
inputs = Variable(torch.unsqueeze(val, dim=1), requires_grad=True)
outputs = f(inputs)
outputs = torch.nn.functional.softmax(outputs, dim=1)
pred = outputs.data.max(1, keepdim=True)[1]
print('the original label is:', idx2classes[pred.item()])
perturbation = G(data)
adv_audio = torch.clamp(perturbation, -0.3, 0.3) + data
fake = torch.clamp(adv_audio, -1.0, 1.0)
(oname, extensrion) = os.path.split(os.path.basename(file))
filename = 'fake_target_' + args.target +'_' + oname + extensrion
output = fake.cpu().data.numpy().reshape(16384, 1)
output = (output - 1) / (2 / 65535) + 32767
output = output.astype(np.int16)
wavfile.write(os.path.join(args.output_dir, filename), 16384, output)
# prediction
val, mfcc_lengths = mfcc_layer(torch.squeeze(fake.detach(),dim=1), lengths)
inputs = Variable(torch.unsqueeze(val, dim=1), requires_grad=True)
outputs = f(inputs)
outputs = torch.nn.functional.softmax(outputs, dim=1)
pred = outputs.data.max(1, keepdim=True)[1]
idx2classes = {0: 'yes', 1: 'no', 2: 'up', 3: 'down', 4: 'left', 5:'right',6:'on',7:'off',8:'stop',9:'go'}
print('the perturbed label is:', idx2classes[pred.item()])
# statistic
# D:\data\genres\test
# blues\blues_00000_17.wav
# classical\classical_00025_15.wav
# country\country_00005_29.wav
# disco\disco_00003_29.wav
# hiphop\hiphop_00024_16.wav
# jazz\jazz_00029_20.wav
# metal\metal_00077_6.wav
# pop\pop_00010_0.wav
# reggae\reggae_00022_24.wav
# rock\rock_00044_21.wav
| winterwindwang/SpeechAdvGan | test_gan.py | test_gan.py | py | 5,544 | python | en | code | 2 | github-code | 13 |
40240683808 | """
author:
Tomasz Sachanowski,
Aleksander Krzemiński
"""
from osobnik import Osobnik
import numpy as np
from random import sample
from random import random
class Populacja:
def __init__(self, lam, mi, pm, data=None):
"""
lam- liczba generowanych potomkow
mi- liczba osobnikow w każdym pokoleniu
pm - prawdopodobienstwo mutacji
"""
self.data = data
self.wymiar_d = data.shape[1]
self.lam = lam
self.mi = mi
# pm trzeba podac jako 0.*
self.pm = pm
# uzycie list comprechasion
# do stworzenia mi osobnikow poczatkwoych
self.populacja_P = [
Osobnik(wektor_wspol_w=wektor, data=data)
for wektor in ((np.random.rand(mi, self.wymiar_d)-0.5)*4)]
# populacja potomkow narazie pusta do momnetu uzycia krzyzowania
self.populacja_potomkow = []
def krzyzowanie_interpolacja(self):
"""
metoda do krzyzowania naszej populacji na zasadzie
interpolacji losujemy (0, 1) nasze a
a*rodzic_A + (1-a)*rodzic_B
"""
# czyszcze liste aby tworzyc nowych potomkow
self.populacja_potomkow.clear()
# w petli bo mam zrobic lam potomkow
for i in range(0, self.lam, 1):
# losuje dwóch rodziców ze zwracaniem
rodzic_A, rodzic_B = sample(population=self.populacja_P, k=2)
# lsouje a z przedzialu (0, 1)
a = np.random.rand(1)
wektor_interpolacji = a*rodzic_A.wektor_wspol_w + (1-a)*rodzic_B.wektor_wspol_w
# mutacja
nowy_wektor = self.mutacja(wektor_interpolacji)
# tworze nowego osobnika
potomek = Osobnik(wektor_wspol_w=nowy_wektor, data=self.data)
# dodanie na koniec listy potomkow
self.populacja_potomkow.append(potomek)
def krzyzowanie(self):
"""
metoda do krzyzowania naszej populacji na zasadzie
usredniania rodzic_A + rodzic_B /2
"""
# czyszcze liste aby tworzyc nowych potomkow
self.populacja_potomkow.clear()
# w petli bo mam zrobic lam potomkow
for i in range(0, self.lam, 1):
# losuje dwóch ze zwracaniem
rodzic_A, rodzic_B = sample(population=self.populacja_P, k=2)
wektor_sumy = (rodzic_A.wektor_wspol_w + rodzic_B.wektor_wspol_w)
wektor_sr_arytm = wektor_sumy/2
# mutacja
nowy_wektor = self.mutacja(wektor_sr_arytm)
# tworze nowego osobnika
potomek = Osobnik(wektor_wspol_w=nowy_wektor, data=self.data)
self.populacja_potomkow.append(potomek)
# Ta funkcja musi byc uruchamiana przez krzyzowanie dla kazdego nowego potomka
def mutacja(self, wektor_wspol):
"""
metoda mutacji dostaejmy wektor, ktory moze
podlegac mutacji na zasadzie prawdopodobienstwa mutacji
"""
if random() < self.pm:
# tworze wektor o takiej samej wielkosci jak wektor wspolczynnikow
# i umieszczam w nim wylosowane prawdopodobienstwa
wektor_mutacji = np.random.normal(loc=1.0, scale=0.3, size=wektor_wspol.shape[0])
wektor_wspol = wektor_wspol*wektor_mutacji
return wektor_wspol
def selekcja_loss_1(self):
"""
laczy obie listy P i potomkow
sortuje je wedlug rosnacje warotsci loss
wyliczonej na zasadzie treshold_1
a potem wybiera tylko mi osobnikow
"""
tmp = self.populacja_P + self.populacja_potomkow
tmp.sort(key=lambda osobnik: osobnik.wartosc_loss_1)
self.populacja_P = tmp[:self.mi]
def selekcja_loss_2(self):
"""
laczy obie listy P i potomkow
sortuje je wedlug rosnacje warotsci loss
wyliczonej na zasadzie treshold_2
a potem wybiera tylko mi osobnikow
"""
tmp = self.populacja_P + self.populacja_potomkow
tmp.sort(key=lambda osobnik: osobnik.wartosc_loss_2)
self.populacja_P = tmp[:self.mi]
| Tomaszsachanowski/PSZTY | populacja.py | populacja.py | py | 4,069 | python | pl | code | 0 | github-code | 13 |
34119244680 | # ============ PROBLEM 1 ============
print("ENTERING P. 1: Fibonacci Sequence")
_ = input("Press any key when ready to continue")
def fibs_below(n):
f1 = 0
f2 = 1
if (n < 1):
return
for x in range(0, n):
if(f2 <= n):
print(f2, end=" ")
next = f1 + f2
f1 = f2
f2 = next
else:
break
print()
fibs_below(int(input("Enter n for fibonnaci sequence:")))
print("EXITING P.1")
# ============ END PROBLEM 1 ============ | benhg/comp-physics | basic python/fib_seq.py | fib_seq.py | py | 520 | python | en | code | 0 | github-code | 13 |
25285150181 | from pylinal import Matrix, Vector
A = Matrix([
[-1, 0, 0],
[0, 1, 0],
[0, 0, 1]
])
v = Vector([3, -1, 2])
reflection = lambda x: A @ x
motion = lambda x: x + v
affine = lambda x: motion(reflection(x))
x = Vector([1, 1, 1])
assert affine(x) == A @ x + v
| PegasusHunter/pylinal | examples/affine.py | affine.py | py | 272 | python | en | code | 0 | github-code | 13 |
9985417220 | # -*- coding: utf-8 -*-
import sys
import protocol
def main(mds_addr, mds_port, study_state):
p = protocol.Protocol(mds_addr=mds_addr, mds_port=mds_port)
p.launch()
x = p.query1(study_state)
p.finish()
print("receive payload:")
print("{}".format(x))
if __name__ == '__main__':
argc = len(sys.argv)
if argc == 4:
mds_addr = sys.argv[1]
mds_port = int(sys.argv[2])
study_state = int(sys.argv[3])
else:
print("usage: {} mds_addr mds_port study_state".format(sys.argv[0]))
sys.exit(-1)
try:
main(mds_addr, mds_port, study_state)
except KeyboardInterrupt:
sys.exit(-1)
| abrance/mine | wait/autotest.2020.03.16/query1.py | query1.py | py | 670 | python | en | code | 0 | github-code | 13 |
39554190538 | import pandas as pd
import quandl, math, datetime
import numpy as np
from sklearn import preprocessing, cross_validation
from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
import pickle # serialization - arrange something in a series
# regression - takes continous data and find best fit
# features or labels?
# features are attributes of what may cause adjusted price (in 10 days or 10%)
# price is label
# X is features, y is labels
g_df = quandl.get('EOD/GOOGL') # Google stock, EOD sample data
df = pd.DataFrame(g_df)
# print(df.head())
# prints date, open, high, low, close, volume, ex-divident,
# split ratio, adj.open, adj.high, adj.low, adj.close, adj.volume
# split shares denoted by 'Adj.' - more accurate measure of true stock price
# high-low shows margin of volatility for the day
# open-close shows change in price (up/down) within one day
df = df[['Adj. Open','Adj. High','Adj. Low','Adj. Close','Adj. Volume']]
# reflect split stock - more accurate
df['HL%'] = (df['Adj. High'] - df['Adj. Close']) / df['Adj. Close'] * 100.0
# high-low percentage change, percent volatility
df['%CHANGE'] = (df['Adj. Close'] - df['Adj. Open']) / df['Adj. Open'] * 100.0
# daily percent change
# only thing affecting price is Adj. Close, must drop it
df = df[['Adj. Close','HL%','%CHANGE','Adj. Volume']]
forecast_col = 'Adj. Close'
df.fillna(value=-99999, inplace=True)
# treated as an outlier, but does not sacrifice data by deleting column
forecast_out = int(math.ceil(0.1*len(df))) # prints 30 days in advanced
# rounds everything to the nearest whole number (integer form)
# predicts out 10% of the DataFrame, using 10 days ago data to predict today
df['label'] = df[forecast_col].shift(-forecast_out) # prediction column
# shift column negatively, shifted up - label column for each row will be
# adjusted close price 10 days into the future
X = np.array(df.drop(['label', 'Adj. Close'],1))
# returns new DataFrame converted to numpy array stored as X
X = preprocessing.scale(X) # normalize
X_recent = X[-forecast_out:] # predict agasint, find m and b
# don't have y values, not trained or tested agasint this data
X = X[:-forecast_out]
df.dropna(inplace=True)
y = np.array(df['label'])
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.2)
# testing 20% of data, take all features and labels, shuffles, fit classifiers
clf = LinearRegression(n_jobs=-1) # running as many possible threads/jobs at once
clf.fit(X_train, y_train)
with open('linearregression.pickle', 'wb') as f:
pickle.dump(clf, f)
# saving classifier to avoid training step, dumps classifier
pickle_in = open('linearregression.pickle', 'rb')
clf = pickle.load(pickle_in) # don;t need to train classifier every time
accuracy = clf.score(X_test, y_test) # prints 0.967, squared error
# train and test on seperate data
# predict based on X data
forecast_set = clf.predict(X_recent)
# passes single/array of values, output in same order the values
# each investment/price report is a day, means each forecast is a day later
print(forecast_set, accuracy, forecast_out) # next 30 days of stock prices
df['Forecast'] = np.nan
last_date = df.iloc[-1].name # last date and name
last_unix = last_date.timestamp() # last unix value
one_day = 86400 #seconds in a day
next_unix = last_unix + one_day # next day
for i in forecast_set: # have days on axis
# iterating through forecast set taking each forecast and day
next_date = datetime.datetime.fromtimestamp(next_unix)
next_unix += one_day
# setting those as the values in DataFrame, making future features NaN
df.loc[next_date] = [np.nan for _ in range (len(df.columns)-1)] + [i]
# references index (date), next date is a datestamp
# if index exist, replace. if index does not exist, create it
# all future values but Forecast are NaN (do not have that data yet)
df['Adj. Close'].plot()
df['Forecast'].plot()
plt.legend(loc=4)
plt.xlabel('Date')
plt.ylabel('Price')
plt.show() | lucyji12/Finance_ML | finance.py | finance.py | py | 4,011 | python | en | code | 0 | github-code | 13 |
70744588819 | entradas = int(input())
def fib(numero):
global calls
calls += 1
if numero <= 1:
return numero
else:
return fib(numero-1) + fib(numero-2)
for e in range(entradas):
n = int(input())
calls = 0
resultado = fib(n)
calls = 0 if n<= 1 else calls - 1
print(f"fib({n}) = {calls} calls = {resultado}") | JoaoVMansur/uri_problems.py | 1029.py | 1029.py | py | 348 | python | pt | code | 0 | github-code | 13 |
17080736264 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.EcocheckYzPolicyCheckDetail import EcocheckYzPolicyCheckDetail
class AlipayCommerceLogisticsCheckPostpolicyQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayCommerceLogisticsCheckPostpolicyQueryResponse, self).__init__()
self._app_check_info_list = None
self._invalid_app_id_list = None
@property
def app_check_info_list(self):
return self._app_check_info_list
@app_check_info_list.setter
def app_check_info_list(self, value):
if isinstance(value, list):
self._app_check_info_list = list()
for i in value:
if isinstance(i, EcocheckYzPolicyCheckDetail):
self._app_check_info_list.append(i)
else:
self._app_check_info_list.append(EcocheckYzPolicyCheckDetail.from_alipay_dict(i))
@property
def invalid_app_id_list(self):
return self._invalid_app_id_list
@invalid_app_id_list.setter
def invalid_app_id_list(self, value):
if isinstance(value, list):
self._invalid_app_id_list = list()
for i in value:
self._invalid_app_id_list.append(i)
def parse_response_content(self, response_content):
response = super(AlipayCommerceLogisticsCheckPostpolicyQueryResponse, self).parse_response_content(response_content)
if 'app_check_info_list' in response:
self.app_check_info_list = response['app_check_info_list']
if 'invalid_app_id_list' in response:
self.invalid_app_id_list = response['invalid_app_id_list']
| alipay/alipay-sdk-python-all | alipay/aop/api/response/AlipayCommerceLogisticsCheckPostpolicyQueryResponse.py | AlipayCommerceLogisticsCheckPostpolicyQueryResponse.py | py | 1,747 | python | en | code | 241 | github-code | 13 |
42125916512 |
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import scipy.sparse as sp
from typing import List, Tuple, Union
from sklearn.metrics import roc_auc_score, roc_curve, average_precision_score, f1_score, accuracy_score
def get_stats(array):
mean = np.mean(np.asarray(array))
std = np.std(np.asarray(array))
return mean, std
def write_summary(args, config_str, stats):
f = open("results/{}/{}.txt".format(args.dataset, args.embedder), "a")
f.write("--------------------------------------------------------------------------------- \n")
f.write(config_str)
f.write("\n")
f.write("ROC : {:.4f} || AP : {:.4f} || F1 : {:.4f} || Acc : {:.4f} ".format(stats[0], stats[1], stats[2], stats[3]))
f.write("\n")
f.write("--------------------------------------------------------------------------------- \n")
f.close()
def write_summary_total(args, config_str, stats):
f = open("results/{}/{}_total.txt".format(args.dataset, args.embedder), "a")
f.write("--------------------------------------------------------------------------------- \n")
f.write(config_str)
f.write("\n")
f.write("ROC : {:.4f}({:.4f}) || AP : {:.4f}({:.4f}) || F1 : {:.4f}({:.4f}) || Acc : {:.4f}({:.4f}) ".format(stats[0], stats[1], stats[2], stats[3],
stats[4], stats[5], stats[6], stats[7]))
f.write("\n")
f.write("--------------------------------------------------------------------------------- \n")
f.close()
def write_experiment(args, config_str, best_config):
f = open("results/{}/{}.txt".format(args.dataset, args.embedder), "a")
f.write("--------------------------------------------------------------------------------- \n")
f.write(config_str)
f.write("\n")
f.write(best_config)
f.write("\n")
f.write("--------------------------------------------------------------------------------- \n")
f.close()
def one_of_k_encoding(x, allowable_set):
if x not in allowable_set:
raise Exception("input {0} not in allowable set{1}:".format(
x, allowable_set))
return list(map(lambda s: x == s, allowable_set))
def one_of_k_encoding_unk(x, allowable_set):
"""Maps inputs not in the allowable set to the last element."""
if x not in allowable_set:
x = allowable_set[-1]
return list(map(lambda s: x == s, allowable_set))
def get_len_matrix(len_list):
len_list = np.array(len_list)
max_nodes = np.sum(len_list)
curr_sum = 0
len_matrix = []
for l in len_list:
curr = np.zeros(max_nodes)
curr[curr_sum:curr_sum + l] = 1
len_matrix.append(curr)
curr_sum += l
return np.array(len_matrix)
def create_batch_mask(samples):
batch0 = samples[0].batch.reshape(1, -1)
index0 = torch.cat([batch0, torch.tensor(range(batch0.shape[1])).reshape(1, -1)])
mask0 = torch.sparse_coo_tensor(index0, torch.ones(index0.shape[1]), size = (batch0.max() + 1, batch0.shape[1]))
batch1 = samples[1].batch.reshape(1, -1)
index1 = torch.cat([batch1, torch.tensor(range(batch1.shape[1])).reshape(1, -1)])
mask1 = torch.sparse_coo_tensor(index1, torch.ones(index1.shape[1]), size = (batch1.max() + 1, batch1.shape[1]))
return mask0, mask1
class KLD(nn.Module):
def forward(self, inputs, targets):
inputs = F.log_softmax(inputs, dim=0)
targets = F.softmax(targets, dim=0)
return F.kl_div(inputs, targets, reduction='batchmean')
def get_roc_score(preds, labels):
preds_all, preds_all_ = eval_threshold(labels, preds)
roc_score = roc_auc_score(labels, preds_all)
ap_score = average_precision_score(labels, preds_all)
f1_score_ = f1_score(labels, preds_all_)
acc_score = accuracy_score(labels, preds_all_)
return roc_score, ap_score, f1_score_, acc_score
def eval_threshold(labels_all, preds_all):
# fpr, tpr, thresholds = roc_curve(labels_all, preds_all)
# optimal_idx = np.argmax(tpr - fpr)
# optimal_threshold = thresholds[optimal_idx]
optimal_threshold = 0.5
preds_all_ = []
for p in preds_all:
if p >=optimal_threshold:
preds_all_.append(1)
else:
preds_all_.append(0)
return preds_all, preds_all_ | Namkyeong/CGIB | DrugDrugInteraction/utils.py | utils.py | py | 4,412 | python | en | code | 29 | github-code | 13 |
12310454165 | from parsers import page_rank as prp, inv_index as iip
from typing import Dict, List
from firebase_admin import db
from uuid import uuid3
from tqdm import tqdm
import uuid
import json
import db_init
def poblate_pages_and_rank(pages: List, pages_ref: db.Reference,
ranks_ref: db.Reference):
print('Poblating pages and ranks node')
pages_dict = {}
rank_dict = {}
for pageAndRank in tqdm(pages):
page, rank = pageAndRank
page_id = uuid3(uuid.NAMESPACE_DNS, page)
page_id = str(page_id)
pages_dict[page_id] = page
rank_dict[page_id] = rank
pages_ref.update(pages_dict)
ranks_ref.update(rank_dict)
def poblate_indexes(indexes: Dict, index_ref: db.Reference,
pages_ref: db.Reference):
existent_links = {}
counter = 0
nextupload = 10000
print('Poblating indexes')
for word, links in tqdm(indexes.items()):
existent_links[word] = {}
for link in links:
page_id = uuid3(uuid.NAMESPACE_DNS, link)
page_id = str(page_id)
link_ref = pages_ref.child(page_id)
if link_ref is not None:
existent_links[word][page_id] = links[link]
counter += 1
if counter == nextupload:
try:
index_ref.update(existent_links)
nextupload += 10000
except NameError:
print(NameError)
try:
index_ref.update(existent_links)
except NameError:
print(NameError)
print(json.dumps(existent_links), file=open('indexes.json', 'w'))
pages = prp.get_pages(withRank=True)
inv_index = iip.get_index()
pages_ref = db.reference('/pages')
ranks_ref = db.reference('/ranks')
index_ref = db.reference('/indexes')
pages_ref.set({})
ranks_ref.set({})
index_ref.set({})
poblate_pages_and_rank(pages, pages_ref, ranks_ref)
poblate_indexes(inv_index, index_ref, pages_ref)
| sharon1160/buscape | db_population.py | db_population.py | py | 2,016 | python | en | code | 1 | github-code | 13 |
7408106701 | import socket
import threading
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect(('127.0.0.1', 55555))
nickname = input("Choose the nickname: ")
def recieve():
while True:
try:
message = client.recv(1024).decode('ascii')
if message == 'NICK':
client.send(nickname.encode('ascii'))
else:
print(message)
except:
print("An error occurred!")
client.close()
break
def write():
while True:
try:
message = f'{nickname}: {input("")}'
client.send(message.encode("ascii"))
except:
print("Something go wrong")
break
receive_thread = threading.Thread(target=recieve)
receive_thread.start()
write_thread = threading.Thread(target=write)
write_thread.start()
| PavelUdovichenko/test-projects | DINS/client.py | client.py | py | 908 | python | en | code | 0 | github-code | 13 |
14443279846 | from sklearn.feature_extraction import text
import numpy as np
import re, copy
from nltk.corpus import stopwords
## Import excel data of labels
from openpyxl import load_workbook
import numpy as np
## Global variables
heteronyms, vec, fratio, pi0, pi1 = [],[],[],[],[]
numWords = 10 # number of words to be collected from both sides of heteronyms
preNum, postNum = 3, 3 # from left/right of heteronyms
prediction = []
## Search for "word" in "List", returns indices of "word"
def searchList(List, word, start = -1):
ind = []
while True:
try:
index = List.index(word,start+1)
except ValueError:
break
else:
ind.append(index)
start = index
return ind
## Training: input = training data (.txt), labels(.xlsx)
def bayesianTrain(trainData,trainLabels):
global heteronyms, vec, fratio, pi0, pi1, numWords, preNum, postNum
datasetFile = trainData + '.txt'
labelFile = trainLabels + '.xlsx'
wb = load_workbook(filename = labelFile)
ws = wb['Sheet1']
labels = []
for row in ws.rows:
for cell in row:
labels.append(cell.value)
heteronyms = []
labelAll = []
tmpLabel = []
for i in range(len(labels)):
if type(labels[i]) == unicode:
heteronyms.append(str(labels[i]).lower())
if i != 0:
labelAll.append(tmpLabel)
tmpLabel = []
else:
if labels[i] is not None:
tmpLabel.append(int(labels[i]))
if i == (len(labels) - 1):
labelAll.append(tmpLabel)
## Open text files that contain the word to be trained for
origText = open(datasetFile).read().split()
## Look for the word (ind = indices of words)
ind, wordsAll = [], []
lenText = len(origText)
for i in range(len(heteronyms)):
ind.append(searchList(origText,heteronyms[i])) # textlist -> filtered for screening after removing stopwords
# Collect (numWords) # of nearby words
words = []
limit = 0
for j in ind[i]:
# Collect upto 300 entries
limit += 1
if limit == 301: break
if (j - numWords <= 0) and (j + numWords < lenText):
words.append([None]*(numWords - j) + origText[0:j+numWords+1])
elif (j - numWords > 0) and (j + numWords >= lenText):
words.append(origText[j-numWords:lenText] + [None]*(j+numWords-lenText+1))
elif (j - numWords <= 0) and (j + numWords >= lenText):
words.append([None]*(numWords - j) + origText[0:lenText] + [None]*(j+numWords-lenText+1))
else:
words.append(origText[j-numWords:j+numWords+1])
wordsAll.append(words)
## 3 arrays!
preWords, postWords = [], []
for i in range(len(wordsAll)):
pre, post = [], []
for j in range(len(wordsAll[i])):
pre.append(unicode(wordsAll[i][j][numWords-preNum:numWords]))
post.append(unicode(wordsAll[i][j][numWords+1:numWords+postNum+1]))
wordsAll[i][j] = unicode(wordsAll[i][j])
preWords.append(pre)
postWords.append(post)
## Vectorizers
vecAll, vecPre, vecPost = [],[],[]
fitWords, fitPre, fitPost = [],[],[]
fitWords0, fitWords1, fitPre0, fitPre1, fitPost0, fitPost1 = [],[],[],[],[],[]
countW0, countW1, countPre0, countPre1, countPost0, countPost1 = [],[],[],[],[],[]
W0, W1, Pre0, Pre1, Post0, Post1 = [],[],[],[],[],[]
fratioW, fratioPre, fratioPost = [],[],[]
piW0, piW1, piPre0, piPre1, piPost0, piPost1 = [],[],[],[],[],[]
for i in range(len(heteronyms)):
if len(wordsAll[i]) != 0:
stop_words = text.ENGLISH_STOP_WORDS.union({heteronyms[i]})
vecAll.append(text.CountVectorizer(stop_words=stop_words,min_df=2)) # Removes stop words -> words
vecPre.append(text.CountVectorizer()) # Keeps stop words -> preWords
vecPost.append(text.CountVectorizer()) # Keeps stop words -> postWords
fitWords.append(vecAll[i].fit_transform(wordsAll[i]).toarray())
fitPre.append(vecPre[i].fit_transform(preWords[i]).toarray())
fitPost.append(vecPost[i].fit_transform(postWords[i]).toarray())
else: # No occurrence of heteronym
vecAll.append(None)
vecPre.append(None)
vecPost.append(None)
fitWords.append(None)
fitPre.append(None)
fitPost.append(None)
# Estimate probability of each word in vocabulary
k = 0
fitWords0tmp, fitWords1tmp = [], []
fitPre0tmp, fitPre1tmp = [], []
fitPost0tmp, fitPost1tmp = [], []
for j in labelAll[i]:
if j == 0:
fitWords0tmp.append(fitWords[i][k])
fitPre0tmp.append(fitPre[i][k])
fitPost0tmp.append(fitPost[i][k])
else:
fitWords1tmp.append(fitWords[i][k])
fitPre1tmp.append(fitPre[i][k])
fitPost1tmp.append(fitPost[i][k])
k += 1
fitWords0.append(fitWords0tmp), fitWords1.append(fitWords1tmp)
fitPre0.append(fitPre0tmp), fitPre1.append(fitPre1tmp)
fitPost0.append(fitPost0tmp), fitPost1.append(fitPost1tmp)
countW0.append(np.sum(fitWords0[i],axis=0)+1.0), countW1.append(np.sum(fitWords1[i],axis=0)+1.0)
countPre0.append(np.sum(fitPre0[i],axis=0)+1.0), countPre1.append(np.sum(fitPre1[i],axis=0)+1.0)
countPost0.append(np.sum(fitPost0[i],axis=0)+1.0), countPost1.append(np.sum(fitPost1[i],axis=0)+1.0)
W0.append(countW0[-1]/np.sum(countW0[-1])), W1.append(countW1[-1]/np.sum(countW1[-1]))
Pre0.append(countPre0[-1]/np.sum(countPre0[-1])), Pre1.append(countPre1[-1]/np.sum(countPre1[-1]))
Post0.append(countPost0[-1]/np.sum(countPost0[-1])), Post1.append(countPost1[-1]/np.sum(countPost1[-1]))
# Compute ratio of these probabilities
fratioW.append(W0[-1]/W1[-1])
fratioPre.append(Pre0[-1]/Pre1[-1])
fratioPost.append(Post0[-1]/Post1[-1])
# Compute prior probabilities
nW0, nW1 = len(fitWords0[-1]), len(fitWords1[-1])
nPre0, nPre1= len(fitPre0[-1]), len(fitPre1[-1])
nPost0, nPost1 = len(fitPost0[-1]), len(fitPost1[-1])
piW0.append(float(nW0)/(nW0+nW1)), piW1.append(float(nW1)/(nW0+nW1))
piPre0.append(float(nPre0)/(nPre0+nPre1)), piPre1.append(float(nPre1)/(nPre0+nPre1))
piPost0.append(float(nPost0)/(nPost0+nPost1)), piPost1.append(float(nPost1)/(nPost0+nPost1))
fratio = [fratioW, fratioPre, fratioPost]
pi0, pi1 = [piW0, piPre0, piPost0], [piW1, piPre1, piPost1]
vec = [vecAll, vecPre, vecPost]
def bayesianTest(testingData):
global heteronyms, vec, fratio, pi0, pi1, numWords, preNum, postNum
vecAll, vecPre, vecPost = vec[0], vec[1], vec[2]
fratioW, fratioPre, fratioPost = fratio[0], fratio[1], fratio[2]
piW0, piPre0, piPost0 = pi0[0], pi0[1], pi0[2]
piW1, piPre1, piPost1 = pi1[0], pi1[1], pi1[2]
# testFileName = testingData + '.txt'
# origText = open(testFileName).read()
origText = testingData
sentence = re.sub("([^\w']|_)+",' ',origText).lower().split()
## Locate any heteronym occurrences
ind, het, words = [], [], []
preWords, postWords, words, wordsAll = [], [], [], []
lenText = len(sentence)
numSamples, sampleCt = 40, 0
for i in range(len(heteronyms)):
tmpInd = searchList(sentence,heteronyms[i])
if tmpInd != []:
if len(tmpInd) >= numSamples:
tmpInd = tmpInd[0:numSamples]
ind.append(tmpInd)
het.append(i)
if sampleCt == numSamples: break
for i in range(len(ind)):
for j in ind[i]:
if (j - numWords <= 0) and (j + numWords < lenText):
words.append([None]*(numWords - j) + sentence[0:j+numWords+1])
elif (j - numWords > 0) and (j + numWords >= lenText):
words.append(sentence[j-numWords:lenText] + [None]*(j+numWords-lenText+1))
elif (j - numWords <= 0) and (j + numWords >= lenText):
words.append([None]*(numWords - j) + sentence[0:lenText] + [None]*(j+numWords-lenText+1))
else:
words.append(sentence[j-numWords:j+numWords+1])
wordsAll.append(words[-1])
# 3 arrays!
preWords.append(unicode(words[-1][numWords-preNum:numWords]))
postWords.append(unicode(words[-1][numWords+1:numWords+postNum+1]))
words[-1] = unicode(words[-1])
## Vectorizer
LR = []
fitWords, fitPre, fitPost, resultArr = [], [], [], []
arrInd = 0
for i in range(len(ind)):
k = het[i]
for j in ind[i]:
LRtmp = []
fitWords.append(vecAll[k].transform([words[arrInd]]).toarray().flatten())
fitPre.append(vecPre[k].transform([preWords[arrInd]]).toarray().flatten())
fitPost.append(vecPost[k].transform([postWords[arrInd]]).toarray().flatten())
LRtmp.append(np.prod(fratioW[k]**fitWords[-1])*piW0[k]/piW1[k])
LRtmp.append(np.prod(fratioPre[k]**fitPre[-1])*piPre0[k]/piPre1[k])
LRtmp.append(np.prod(fratioPost[k]**fitPost[-1])*piPost0[k]/piPost1[k])
LR.append(LRtmp)
result, ct = 0,0
ctLabels = ["All", "Pre", "Post"]
for lr in LRtmp:
if lr == 0.57142857142857151: # All elements = "None"
result += 0
elif lr > 2:
result += 2
elif (lr <= 2) & (lr > 1):
result += 1
elif (lr <= 1) & (lr > 0.5):
result -= 1
else:
result -= 2
if result >= 0:
result = 0
# print heteronyms[k] +"\t["+dic[k][0]+"]"
else:
result = 1
# print heteronyms[k] +"\t["+dic[k][1]+"]"
resultArr.append(result)
arrInd += 1
return resultArr, ind
def bayesianAccuracy(testingLabels):
global prediction
## Load labels for test data
testingLabelFile = testingLabels + '.xlsx'
wb = load_workbook(filename = testingLabelFile)
ws = wb['Sheet1']
OANC_labels = []
for row in ws.rows:
for cell in row:
OANC_labels.append(cell.value)
hetTest = []
labelTest = []
tmpLabel = []
for i in range(len(OANC_labels)):
if type(OANC_labels[i]) == unicode:
hetTest.append(str(OANC_labels[i]).lower())
if i != 0:
labelTest.append(tmpLabel)
tmpLabel = []
else:
if OANC_labels[i] is not None:
tmpLabel.append(int(OANC_labels[i]))
if i == (len(OANC_labels) - 1):
labelTest.append(tmpLabel)
del wb, ws, OANC_labels, tmpLabel
## Compare classification result with test data labels
percent = []
correct, b = 0, 0
for a in [0,1,3,4]:
correct = len([i for i, j in zip(prediction[a*40:a*40+40], labelTest[b]) if i == j])
percent.append(correct/40.0*100)
b += 1
return percent
#testData = 'OANC'
#testLabels = 'OANC_labels'
def Run(testData):
trainData = 'textDataAll'
trainLabels = 'labels'
bayesianTrain(trainData, trainLabels)
prediction, ind = bayesianTest(testData)
return prediction, ind
#percent = bayesianAccuracy(testLabels) | alex-parisi/Heteronymous-Ambiguity-Resolution | bayesian.py | bayesian.py | py | 11,873 | python | en | code | 2 | github-code | 13 |
42000202152 | #
# @lc app=leetcode.cn id=209 lang=python3
#
# [209] 长度最小的子数组
# 滑动窗口
# 1. 初始化
# 2. 左右窗口
# 3. sumx累加nums[end]
# 4. 两个判断合并 - 赋值操作及左窗口移动
# 5. 返回值, 需要考虑特殊情况 - 不存在符合条件的子数组时, 返回0
# Time: O(n), Space: O(1)
# @lc code=start
class Solution:
def minSubArrayLen(self, target: int, nums: List[int]) -> int:
import math
min_len, sumx = math.inf, 0
start = 0
for end in range(len(nums)):
sumx += nums[end]
while sumx >= target:
min_len = min(min_len, end - start + 1)
sumx -= nums[start]
start += 1
return 0 if min_len == math.inf else min_len
# total, left, right = 0, 0, 0
# res = len(nums) + 1
# while right < len(nums):
# total += nums[right]
# while total >= target:
# res = min(res, right - left + 1)
# total -= nums[left]
# left += 1
# right += 1
# return res if res <= len(nums) else 0
# @lc code=end
| WeiS49/leetcode | Solution/哈希表 双指针/滑动窗口/209.长度最小的子数组.py | 209.长度最小的子数组.py | py | 1,351 | python | en | code | 0 | github-code | 13 |
74564806098 | #!/usr/bin/env python
"""
_SizeBased_t_
Size based splitting test.
"""
from builtins import range
import unittest
from WMCore.DataStructs.File import File
from WMCore.DataStructs.Fileset import Fileset
from WMCore.DataStructs.Job import Job
from WMCore.DataStructs.Subscription import Subscription
from WMCore.DataStructs.Workflow import Workflow
from WMCore.JobSplitting.SplitterFactory import SplitterFactory
from WMCore.Services.UUIDLib import makeUUID
class SizeBasedTest(unittest.TestCase):
def setUp(self):
"""
_setUp_
Create two subscriptions: One that contains a single file and one that
contains multiple files.
"""
self.multipleFileFileset = Fileset(name = "TestFileset1")
for i in range(10):
newFile = File(makeUUID(), size = 1000, events = 100, locations = set(["somese.cern.ch"]))
self.multipleFileFileset.addFile(newFile)
self.singleFileFileset = Fileset(name = "TestFileset2")
newFile = File("/some/file/name", size = 1000, events = 100, locations = set(["somese.cern.ch"]))
self.singleFileFileset.addFile(newFile)
self.multipleSiteFileset = Fileset(name = "TestFileset3")
for i in range(5):
newFile = File(makeUUID(), size = 1000, events = 100, locations = set(["somese.cern.ch"]))
newFile.setLocation("somese.cern.ch")
self.multipleSiteFileset.addFile(newFile)
for i in range(5):
newFile = File(makeUUID(), size = 1000, events = 100)
newFile.setLocation(["somese.cern.ch","otherse.cern.ch"])
self.multipleSiteFileset.addFile(newFile)
testWorkflow = Workflow()
self.multipleFileSubscription = Subscription(fileset = self.multipleFileFileset,
workflow = testWorkflow,
split_algo = "SizeBased",
type = "Processing")
self.singleFileSubscription = Subscription(fileset = self.singleFileFileset,
workflow = testWorkflow,
split_algo = "SizeBased",
type = "Processing")
self.multipleSiteSubscription = Subscription(fileset = self.multipleSiteFileset,
workflow = testWorkflow,
split_algo = "EventBased",
type = "Processing")
return
def tearDown(self):
"""
_tearDown_
Nothing to do...
"""
pass
def testExactEvents(self):
"""
_testExactEvents_
Test event based job splitting when the number of events per job is
exactly the same as the number of events in the input file.
"""
splitter = SplitterFactory()
jobFactory = splitter(self.singleFileSubscription)
jobGroups = jobFactory(size_per_job = 1000)
assert len(jobGroups) == 1, \
"ERROR: JobFactory didn't return one JobGroup."
assert len(jobGroups[0].jobs) == 1, \
"ERROR: JobFactory didn't create a single job."
job = jobGroups[0].jobs.pop()
assert job.getFiles(type = "lfn") == ["/some/file/name"], \
"ERROR: Job contains unknown files."
return
def testFiles1000(self):
"""
_testMultipleFiles_
Tests the mechanism for splitting up multiple files into jobs with
a variety of different arguments.
"""
splitter = SplitterFactory()
jobFactory = splitter(self.multipleFileSubscription)
jobGroups = jobFactory(size_per_job = 1000)
self.assertEqual(len(jobGroups), 1)
self.assertEqual(len(jobGroups[0].jobs), 10)
for job in jobGroups[0].jobs:
self.assertEqual(len(job.getFiles()), 1)
return
def testFiles2000(self):
"""
_testMultipleFiles_
Tests the mechanism for splitting up multiple files into jobs with
a variety of different arguments.
"""
splitter = SplitterFactory()
jobFactory = splitter(self.multipleFileSubscription)
#Test it with two files per job
jobGroups = jobFactory(size_per_job = 2000)
self.assertEqual(len(jobGroups), 1)
self.assertEqual(len(jobGroups[0].jobs), 5)
for job in jobGroups[0].jobs:
self.assertEqual(len(job.getFiles()), 2)
return
def testFiles2500(self):
"""
_testMultipleFiles_
Tests the mechanism for splitting up multiple files into jobs with
a variety of different arguments.
"""
splitter = SplitterFactory()
jobFactory = splitter(self.multipleFileSubscription)
#Now test it with a size that can't be broken up evenly
jobGroups = jobFactory(size_per_job = 2500)
self.assertEqual(len(jobGroups), 1)
self.assertEqual(len(jobGroups[0].jobs), 5)
for job in jobGroups[0].jobs:
self.assertEqual(len(job.getFiles()), 2)
return
def testFiles500(self):
"""
_testMultipleFiles_
Tests the mechanism for splitting up multiple files into jobs with
a variety of different arguments.
"""
splitter = SplitterFactory()
jobFactory = splitter(self.multipleFileSubscription)
#Test it with something too small to handle; should return one job per file
jobGroups = jobFactory(size_per_job = 500)
self.assertEqual(len(jobGroups), 1)
self.assertEqual(len(jobGroups[0].jobs), 10)
return
if __name__ == '__main__':
unittest.main()
| dmwm/WMCore | test/python/WMCore_t/JobSplitting_t/SizeBased_t.py | SizeBased_t.py | py | 5,925 | python | en | code | 44 | github-code | 13 |
5619245946 | from .base import BaseHandler
from src.plugins import get_server_info
import requests
import json
import os
from conf import settings
from lib.security import gen_key, encrypt
import time
class AgentHandler(BaseHandler):
def cmd(self, command):
import subprocess
ret = subprocess.getoutput(command)
return ret
def handler(self):
# agent 下采集信息
info = get_server_info(handler=self)
# 根据主机名 判断 具体的操作
if not os.path.exists(settings.CERT_PATH):
# 文件不存在 新增
info['type'] = 'create'
else:
# 更新
with open(settings.CERT_PATH, 'r', encoding='utf-8') as f:
old_hostname = f.read()
hostname = info['basic']['data']['hostname'] # 最新主机名
if old_hostname == hostname:
# 更新资产
info['type'] = 'update'
else:
info['type'] = 'update_hostname'
info['old_hostname'] = old_hostname
# 汇报 api
now = time.time()
response = requests.post(
url=self.url,
params={'key': gen_key(now), 'ctime': now},
data=encrypt(json.dumps(info)),
headers={'content-type': 'application/json'},
)
ret = response.json()
if ret['status'] is True:
with open(settings.CERT_PATH, 'w', encoding='utf-8') as f1:
f1.write(ret['hostname'])
| wkiii/CMDB-oldboy | auto_client2/src/engine/agent.py | agent.py | py | 1,573 | python | en | code | 0 | github-code | 13 |
8717133337 | '''
This program aims to get the same functionality as the one described
in the book Alfresco One 5.x Developer's guide
Author: Ignacio De Bonis
Date: 19 October 2021
'''
import cmislib
import base64
from cmislib.model import CmisClient
# user credentials
userName = 'admin'
userPass = 'admin'
# connection settings
repositoryUrl = 'http://localhost:8080/alfresco/api/-default-/public/cmis/versions/1.1/atom'
def createDocument(session):
# Create a Marketing folder with a text file that has relationship with a whitepaper
# Locate document library
path = '/Sites/marketing/documentLibrary'
documentLibrary = session.getObjectByPath(path)
# Locate the marketing folder
marketingFolder = None
for child in documentLibrary.getChildren():
if child.name == 'Marketing':
marketingFolder = child
# create the marketing folder if needed
if marketingFolder == None:
marketingFolder = documentLibrary.createFolder('Marketing')
# prepare properties
filename = 'Mi documento3.txt'
properties = {'cmis:name': filename,
'cmis:objectTypeId': 'D:sc:marketingDoc'}
# prepare content
content = 'Hola Mundo 4!'
mimetype = 'text/plain; charset=UTF-8'
marketingDocument = marketingFolder.createDocument(name=filename, properties=properties,
contentFile=str(base64.b64encode(
content.encode('UTF-8')))[2:-1],
contentType=mimetype, contentEncoding='UTF-8')
# Locate the whitepaper folder
whitepaperFolder = None
for child in documentLibrary.getChildren():
if child.name == 'Whitepapers':
whitepaperFolder = child
# look for a whitepaper
if whitepaperFolder != None:
whitepaper = None
for child in whitepaperFolder.getChildren():
if child.properties['cmis:objectTypeId'] == 'D:sc:whitepaper':
whitepaper = child
# Create relationship between marketingDocument and whitepaper
if whitepaper != None:
relationship = marketingDocument.createRelationship(
whitepaper, 'R:sc:relatedDocuments')
def searchDocuments(session, maxItems=5):
# Perform cmis query and print the result properties
maxItems = str(maxItems)
results = session.query("SELECT * FROM sc:doc", maxItems=maxItems)
for hit in results:
for prop in hit.properties:
print(prop, ":", hit.properties[prop])
print('--------------------------------------')
def deleteDocuments(session):
# Delete all documents from the Marketing folder
# Locate document library
path = '/Sites/marketing/documentLibrary'
documentLibrary = session.getObjectByPath(path)
# Locate the marketing folder
marketingFolder = None
for child in documentLibrary.getChildren():
if child.name == 'Marketing':
marketingFolder = child
# Delete documents in marketing folder
if marketingFolder != None:
for child in marketingFolder.getChildren():
child.delete()
#############################################################
try:
# create session
client = CmisClient(repositoryUrl, userName, userPass)
repo = client.defaultRepository
# createDocument(repo)
searchDocuments(repo, 10)
# deleteDocuments(repo)
except Exception as exception:
print(type(exception).__name__)
| Mr-DeBonis/DevelopersGuide_AlfrescoOne | cmis/CmisClient.py | CmisClient.py | py | 3,560 | python | en | code | 1 | github-code | 13 |
34114335244 | from netqasm.logging.glob import get_netqasm_logger
from netqasm.runtime.application import default_app_instance
from netqasm.sdk import EPRSocket
from netqasm.sdk.external import NetQASMConnection, simulate_application
logger = get_netqasm_logger()
num = 10
def run_alice():
epr_socket = EPRSocket("bob")
with NetQASMConnection("alice", epr_sockets=[epr_socket]) as alice:
outcomes = alice.new_array(num)
def post_create(conn, q, pair):
q.H()
outcome = outcomes.get_future_index(pair)
q.measure(outcome)
epr_socket.create(number=num, post_routine=post_create, sequential=True)
return list(outcomes)
def run_bob():
epr_socket = EPRSocket("alice")
with NetQASMConnection("bob", epr_sockets=[epr_socket]) as bob:
outcomes = bob.new_array(num)
def post_recv(conn, q, pair):
q.H()
outcome = outcomes.get_future_index(pair)
q.measure(outcome)
epr_socket.recv(number=num, post_routine=post_recv, sequential=True)
return list(outcomes)
def test_post_epr():
app_instance = default_app_instance(
[
("alice", run_alice),
("bob", run_bob),
]
)
results = simulate_application(
app_instance, use_app_config=False, enable_logging=False
)[0]
print(results)
assert results["app_alice"] == results["app_bob"]
if __name__ == "__main__":
for _ in range(100):
test_post_epr()
| QuTech-Delft/netqasm | tests/test_external/test_sdk/test_post_epr.py | test_post_epr.py | py | 1,503 | python | en | code | 17 | github-code | 13 |
18817717495 | """Импорты и переменные(константы)"""
from datetime import datetime
import requests
URL_LIST_OPERATIONS = 'https://s3.us-west-2.amazonaws.com/secure.notion-static.com/d22c7143-d55e-4f1d-aa98' \
'-e9b15e5e5efc/operations.json?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Content-Sha256=UNSIGNED' \
'-PAYLOAD&X-Amz-Credential=AKIAT73L2G45EIPT3X45%2F20230201%2Fus-west-2%2Fs3%2Faws4_request&X' \
'-Amz-Date=20230201T112404Z&X-Amz-Expires=86400&X-Amz-Signature' \
'=85f20d35a166ca833b589f87bcbf03054c08df9df6c0e934078920f8b6b05683&X-Amz-SignedHeaders=host' \
'&response-content-disposition=filename%3D%22operations.json%22&x-id=GetObject'
def load_list_state():
"""Загрузка и фильтрация списка операций по 'state'"""
req = requests.get(URL_LIST_OPERATIONS)
data = req.json()
state = []
for i in range(len(data)):
if "state" not in data[i]:
continue
else:
state.append(data[i])
return state
def definition_operations_executed():
"""Определяет выполненные (EXECUTED) операции"""
operations = load_list_state()
operations_executed = []
for i in operations:
if i['state'] == 'EXECUTED':
operations_executed.append(i)
return operations_executed
def sort_date_operations():
"""Форматирует дату операций"""
date_operations = definition_operations_executed()
for i in date_operations:
date = i['date']
the_date = datetime.fromisoformat(date)
date_formatted = the_date.strftime('%Y-%m-%d %H:%M:%S')
i['date'] = date_formatted
return date_operations
def date_sort_operation():
"""Сортирует по дате операций"""
list_sort = sort_date_operations()
list_sort.sort(key=lambda x: datetime.strptime(x['date'], '%Y-%m-%d %H:%M:%S'), reverse=True)
return list_sort
def print_last5_operation():
"""Выводит 5 последних операций"""
last5_operation = date_sort_operation()
operation_5 = []
for i in range(5):
operation_5.append(last5_operation[i])
return operation_5
def print_operation(numb):
"""Выводит форматированное сообщение по индексу"""
operation_numb = print_last5_operation()
date = operation_numb[numb]['date']
the_date = datetime.fromisoformat(date)
date_format = the_date.strftime('%d.%m.%Y')
description = operation_numb[numb]['description']
if 'from' in operation_numb[numb]:
from1 = operation_numb[numb]['from']
if 'Maestro' in from1:
from1 = f'{from1[0:12]} {from1[13:15]}** **** {from1[-4:len(from1)]}'
elif 'Visa' in from1:
from1 = f'{from1[0:17]} {from1[14:16]}** **** {from1[-4:len(from1)]}'
elif 'Счет' in from1:
from1 = f'{from1[0:9]} {from1[9:11]}** **** {from1[-4:len(from1)]}'
else:
from1 = ''
to0 = operation_numb[numb]["to"]
to1 = f' Счет **{to0[-4:len(to0)]}'
summa = operation_numb[numb]["operationAmount"]['amount']
currency = operation_numb[numb]['operationAmount']['currency']['name']
message_text = f'{date_format} {description}\n{from1} -> {to1}\n{summa} {currency}\n'
return message_text
| Javoprav/displays_list_5_operations | utils.py | utils.py | py | 3,452 | python | en | code | 1 | github-code | 13 |
30224146009 | # lotto
# 스크랩은 정보제공을
'''
요청을 해서 응답 받는 것은 h와 .. 같다. ###$$?
일반적인 웹페이지는 HTML. # 방금 받았던 파일들?
파이썬은 json을 글자(str)로만 인지할 것.
의미를 가진 dictionary로 만들어야 한다.
'''
from flask import Flask, render_template, request
import requests
import random
url = 'https://www.dhlottery.co.kr/common.do?method=getLottoNumber&drwNo=1'
#
# requests.get('주소')
res = requests.get(url) # url에 있는 html 파일로부터 정보를 긁어와서 res 변수에 저장한다.
# requests.get 이 반환하는 자료형은 response이다. # print( type(res) )
dict_lotto = res.json() ##$$3 자료형 response으로 가져온 것 json 파일을 가져온다?
winner = []
for i in range(7):
# list에서 값을 추가하려면 .append사용!
winner.append(dict_lotto[f'drwtNo{i}']) ##$$? 3.6버젼 부터는 f' {i}' 보기 편하게 만들기 위해서 나온 게 format이라고 하는 str 자료형의 일종 :
# 'drwtno{}.format(i) (~python 3.5)
# 로또 랜덤 추천
your_lotto = random.sample(range(1,46), 6)
print(winner)
print(your_lotto)
# 1번째 코드
for w in winner:
for y in your_lotto:
if (w == y):
count = count + 1
# 2번째 코드:set을 이용해 최적화
count = 0
trial = 0
while True:
your_lotto = sorted(random.sample(range(1,46), 6))
count = len( set(your_lotto) & set(winner))
if count == 6:
print('1등')
elif count == 5:
print('3등')
elif count == 4:
print('4등')
elif count == 3:
print('5등')
else:
print('꽝')
'''
구현을 할 때, 일단 쪼개서 하나부터 푼다.
Divide and conquor***
음식 추천 하나/ 사진 보여주는 것 하나.
'''
# if __name__ == "__main__":
# app.run(debug=True) ##$$
'''
# res.json : json 파일->python Dictionary
# print(res.json()) #( res는 하나 )
# print(res.text)
# json_lotto = res.text ##$$
# dict_lotto = res.json() #Dict니까 키를 가지고 찾아볼 수 있음 ##$$
# winner.append(dict_lotto['drwtNo1'])
# winner.append(dict_lotto['drwtNo2'])
# winner.append(dict_lotto['drwtNo3'])
# winner.append(dict_lotto['drwtNo4'])
# winner.append(dict_lotto['drwtNo5'])
# winner.append(dict_lotto['drwtNo6'])
# winner.append(dict_lotto['drwtNo' + i ]) # 가장 무식하게라도 다 짜고 그 다음에 최적화를 하자!
'''
| haru77/SSAFY2 | day03/dynamic/lotto.py | lotto.py | py | 2,456 | python | ko | code | 0 | github-code | 13 |
10846728425 | import sys
input = sys.stdin.readline
num = int(input())
new_arr = []
for i in range(1000000):
sum = 0
arr = list(str(i))
sum += i
for j in arr:
sum += int(j)
if sum == num:
new_arr.append(i)
break
new_arr.sort()
if new_arr == []:
print(0)
else:
print(new_arr[0])
| woorym/python | 백준/Bronze/2231. 분해합/분해합.py | 분해합.py | py | 370 | python | en | code | 0 | github-code | 13 |
72935272658 | from typing import List
import sys
sys.setrecursionlimit(10**5)
def solution(maps: List[str]) -> List[int]:
answer = []
H, W = len(maps), len(maps[0])
visit = [[False] * W for _ in range(H)]
def dfs(r, c):
if r in [-1, H] or c in [-1, W] or visit[r][c] or maps[r][c] == 'X':
return 0
ls = [[-1, 0], [1, 0], [0, -1], [0, 1]]
days = int(maps[r][c])
visit[r][c] = True
for dr, dc in ls:
days += dfs(r + dr, c + dc)
return days
for r in range(H):
for c in range(W):
if maps[r][c] == 'X' or visit[r][c]:
continue
answer.append(dfs(r, c))
return sorted(answer) if answer else [-1]
| Zeka-0337/Problem-Solving | programmers/level_2/무인도여행.py | 무인도여행.py | py | 724 | python | en | code | 0 | github-code | 13 |
33823239581 | import itertools
from collections import ChainMap
from django.contrib.auth import get_user_model
from rest_framework.test import APITestCase
from courses.models import Course
from courses.services import Status, StatusMessage
from courses.tests.base import DatasetMixin, JWTAuthMixin
User = get_user_model()
class RequirementStatusViewTest(DatasetMixin, JWTAuthMixin, APITestCase):
@classmethod
def setUpClass(cls):
super(RequirementStatusViewTest, cls).setUpClass()
show_gpa_patterns = [True, False]
show_username_patterns = [True, False]
rank_limit_patterns = [3]
config_keys = ['show_gpa', 'show_username', 'rank_limit']
_config_patterns = list(itertools.product(show_gpa_patterns, show_username_patterns, rank_limit_patterns))
cls.config_patterns = [dict(zip(config_keys, pattern)) for pattern in _config_patterns]
gpa_patterns = [{"gpa": 2.0, "ok": True}, {"gpa": None, "ok": False}]
username_patterns = [{"screen_name": "hoge", "ok": True}, {"screen_name": None, "ok": False}]
rank_submit_patterns = [{"rank": 3, "ok": True}, {"rank": None, "ok": False}]
cls.user_status_patterns = list(itertools.product(gpa_patterns, username_patterns, rank_submit_patterns))
cls.all_patterns = []
for config_pattern, user_status_pattern in list(itertools.product(cls.config_patterns, cls.user_status_patterns)):
expected = True
for c, u in zip(config_pattern.values(), user_status_pattern):
expected &= True if not c else u['ok']
combined = dict(**ChainMap(*user_status_pattern))
combined['ok'] = expected
cls.all_patterns.append((config_pattern, combined))
def setUp(self):
super(RequirementStatusViewTest, self).setUp()
self.user = User.objects.create_user(**self.user_data_set[0], is_active=True)
self._set_credentials()
self.course_data = self.course_data_set[0]
self.pin_code = self.course_data['pin_code']
self.course = Course.objects.create_course(**self.course_data)
self.labs = self.create_labs(self.course)
def test_all_patterns(self):
"""ユーザの状態と設定の全ての場合を網羅する"""
self.course.join(self.user, self.pin_code)
for pattern in self.all_patterns:
with self.subTest(pattern=pattern):
config_pattern, user_pattern = pattern
for k, v in config_pattern.items():
setattr(self.course.config, k, v)
self.course.config.save()
for k in ["gpa", "screen_name"]:
setattr(self.user, k, user_pattern[k])
if user_pattern['rank'] is not None:
self.submit_ranks(self.labs, self.user)
else:
self.user.rank_set.all().delete()
self.user.save(update_fields=["gpa", "screen_name"])
resp = self.client.get(f'/courses/{self.course.pk}/status/', format='json')
self.assertEqual(200, resp.status_code)
self.assertEqual(Status.OK if user_pattern['ok'] else Status.NG, resp.data['status'])
def test_no_course_joined(self):
resp = self.client.get(f'/courses/{self.course.pk}/status/', format='json')
expected = {
'status': Status.PENDING,
'status_message': StatusMessage.default_messages[Status.PENDING],
'detail': {
'gpa': False,
'screen_name': False,
'rank_submitted': False
}
}
self.assertEqual(200, resp.status_code)
self.assertEqual(expected, self.to_dict(resp.data))
| StudioAquatan/Saffron | calyx/src/courses/tests/views/test_requirement_status_view.py | test_requirement_status_view.py | py | 3,757 | python | en | code | 0 | github-code | 13 |
38027480928 | from AthenaCommon.AppMgr import ServiceMgr
from GaudiSvc.GaudiSvcConf import THistSvc
ServiceMgr += THistSvc("THistSvc")
#ServiceMgr.THistSvc.Output = ["atlasTest DATAFILE='atlasTest.muons.histo.root' OPT='RECREATE'"];
ServiceMgr.THistSvc.Output = ["truth DATAFILE='RDO_truth.root' OPT='RECREATE'"];
from AthenaCommon.AlgSequence import AlgSequence
job = AlgSequence()
from AthenaCommon import CfgGetter
from DigitizationTests.DigitizationTestsConf import DigiTestAlg,McEventCollectionTestTool,PixelRDOsTestTool,SCT_RDOsTestTool,TRT_RDOsTestTool
job += DigiTestAlg()
if DetFlags.Truth_on():
job.DigiTestAlg.DigiTestTools += [CfgGetter.getPublicTool("McEventCollectionTestTool", checkType=True)]
if DetFlags.Truth_on():
if DetFlags.pileup.any_on():
job.DigiTestAlg.DigiTestTools += [CfgGetter.getPublicTool("PileUpEventInfoTestTool", checkType=True)]
if DetFlags.pixel_on():
job.DigiTestAlg.DigiTestTools += [CfgGetter.getPublicTool("PixelRDOsTestTool", checkType=True)]
if DetFlags.SCT_on():
job.DigiTestAlg.DigiTestTools += [CfgGetter.getPublicTool("SCT_RDOsTestTool", checkType=True)]
if DetFlags.TRT_on():
job.DigiTestAlg.DigiTestTools += [CfgGetter.getPublicTool("TRT_RDOsTestTool", checkType=True)]
| rushioda/PIXELVALID_athena | athena/Simulation/Tests/DigitizationTests/share/postInclude.RDO_Plots.py | postInclude.RDO_Plots.py | py | 1,221 | python | en | code | 1 | github-code | 13 |
14274678716 | lx.eval("user.defNew scale float momentary")
#Set the label name for the popup we're going to call
lx.eval('user.def scale dialogname "Scale Factor"')
#Set the user names for the values that the users will see
lx.eval("user.def scale username {Scale Factor}")
#The '?' before the user.value call means we are calling a popup to have the user
#set the value
try:
lx.eval("?user.value scale")
userResponse = lx.eval("dialog.result ?")
except:
userResponse = lx.eval("dialog.result ?")
lx.out("Thank you for pressing %s." % userResponse)
sys.exit()
#Now that the user set the values, we can just query it
user_input = lx.eval("user.value scale ?")
lx.out('scale', user_input)
#Select all items in the scene
lx.eval('select.type item')
lx.eval('select.all')
#switch to poly component mode
lx.eval('select.type polygon')
#Scale by the user defined value
lx.eval('tool.set TransformScale on')
lx.eval('tool.attr xfrm.transform SX %f' %user_input)
lx.eval('tool.attr xfrm.transform SY %f'%user_input)
lx.eval('tool.attr xfrm.transform SZ %f'%user_input)
lx.eval('tool.doApply')
lx.eval('tool.set TransformScale off')
#freeze geo
lx.eval('!poly.freeze false false 2 true true true')
lx.eval('tool.set actr.origin off')
#Save Dialog commands
lx.eval('dialog.setup fileSave')
lx.eval('dialog.title {Save LWO}')
lx.eval('dialog.fileTypeCustom format:[$NLWO2] username:[LightWave Object] loadPattern:[] saveExtension:[lwo]')
try:
lx.eval('dialog.open')
filename = lx.eval1('dialog.result ?')
lx.eval('!!scene.saveAs {%s} $NLWO2 false' % filename) # The !! is to suppress the data loss warning dialog, remove it if you want that to show.
except:
pass
lx.out('File: %s' % filename)
| Tilapiatsu/modo-tila_customconfig | pp_toolkit/scripts/pp_scale_freeze.py | pp_scale_freeze.py | py | 1,707 | python | en | code | 2 | github-code | 13 |
7835351521 | from ipp import IPPPrinter
def imprimir_via_ipp(ip_impressora_windows, nome_impressora, mensagem):
try:
# Cria uma conexão com a impressora usando o endereço IP
ipp_printer = IPPPrinter("http://" + ip_impressora_windows + "/ipp/print")
# Define os atributos do trabalho de impressão
attributes = {
"document-format": "text/plain",
"requesting-user-name": "Flask Server",
"job-name": "Trabalho de impressao",
}
# Envia o trabalho de impressão
ipp_printer.print_job(attributes, mensagem.encode("utf-8"))
print("Impressão concluída.")
except Exception as e:
print(f"Erro ao conectar à impressora: {e}")
# Exemplo de uso:
ip_impressora_windows = "192.168.0.101" # IP da máquina com a impressora Windows
nome_impressora_compartilhada = "NomeDaImpressoraCompartilhada"
mensagem_para_imprimir = "Teste de impressão via IPP."
imprimir_via_ipp(ip_impressora_windows, nome_impressora_compartilhada, mensagem_para_imprimir)
| luis-fe/Automacao_WMS_InternoMPL | teste.py | teste.py | py | 1,045 | python | pt | code | 0 | github-code | 13 |
34995842968 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import mplfinance as mpf
import plotly.graph_objects as go
#Creating Dataframe for Historical Prices
dfForPiCycle = pd.read_csv('btcPriceHistory1.csv', index_col='Date', thousands=',', parse_dates=True)
dfForPiDate = pd.read_csv('btcPriceHistory1.csv', thousands=',', parse_dates=True)
dfForPiCycle['Date'] = pd.to_datetime(dfForPiDate['Date'])
print(dfForPiCycle)
dfForPiCycle['Close']= pd.to_numeric(dfForPiCycle['Price'])
dfForPiCycle['Open']= pd.to_numeric(dfForPiCycle['Open'])
dfForPiCycle['High']= pd.to_numeric(dfForPiCycle['High'])
dfForPiCycle['Low']= pd.to_numeric(dfForPiCycle['Low'])
ohlc = [dfForPiCycle['Open'], dfForPiCycle['High'], dfForPiCycle['Low'], dfForPiCycle['Close'], dfForPiCycle['Vol.'], dfForPiCycle['Date']]
ohlcheaders= [ 'Open', 'High', 'Low', 'Close', 'Volume', 'Date']
ohlcdf = pd.concat(ohlc, axis=1, keys=ohlcheaders)
ohlcdf = ohlcdf.iloc[::-1]
print(dfForPiCycle)
pd.set_option("display.max_rows", None, "display.max_columns", None)
dfForPiCycleReversed = dfForPiCycle.iloc[::-1]
dfForPiCycle['111MA'] = dfForPiCycleReversed['Close'].rolling(111).mean()
dfForPiCycle['350MA'] = dfForPiCycleReversed['Close'].rolling(350).mean()
dfForPiCycle['2x350'] = 2 * dfForPiCycle['350MA']
stmav = dfForPiCycle['111MA'].round(2)
test = dfForPiCycle['350MA'].round(2)
ltmav = dfForPiCycle['2x350'].round(2)
studiesData = [stmav, test, ltmav]
headers = ['111MA', '350MA', '2x350']
studies = pd.concat(studiesData, axis=1, keys = headers)
df = dfForPiCycle.iloc[::-1]
print(df)
#mpf.plot(df, type='candle' )
figure = go.Figure(
data=[
go.Candlestick(
x=df.index,
open=df['Open'],
high=df['High'],
low=df['Low'],
close=df['Close'],
)
]
)
figure.add_trace(
go.Scatter(
x=df.index,
y=df['111MA'],
line=dict(color='#e74c3c',width=1),
name='111MA'
)
)
figure.add_trace(
go.Scatter(
x=df.index,
y=df['2x350'],
line=dict(color='#263238',width=1),
name='2x350MA'
)
)
# First Pi-Cycle Cross April 6, 2013
figure.add_shape(type="line",
x0=1365231600000, y0=35, x1=1365231600000, y1=100000,
line=dict(color="RoyalBlue",width=1, dash='dash')
)
#Second Pi-Cycle Cross December 3, 2013
figure.add_shape(type="line",
x0=1386057600000, y0=290, x1=1386057600000, y1=100000,
line=dict(color="RoyalBlue",width=1, dash='dash')
)
#Third Pi-Cycle Cross December 16, 2017
figure.add_shape(type="line",
x0=1513411200000, y0=7000, x1=1513411200000, y1=100000,
line=dict(color="RoyalBlue",width=1, dash='dash')
)
#Fourth Pi-Cycle Cross April 12, 2021
figure.add_shape(type="line",
x0=1618210800000, y0=46000, x1=1618210800000, y1=100000,
line=dict(color="RoyalBlue",width=1, dash='dash')
)
figure.update_yaxes(type="log")
figure.show()
| webclinic017/stockAnalyzer-2 | CryptoAnalysis/cryptoDataAnalysisTools/piCycleIndicator.py | piCycleIndicator.py | py | 2,912 | python | en | code | 1 | github-code | 13 |
74429541457 | import csv
import os
import re
from time import time, sleep
from datetime import timedelta
from urllib.request import urlopen
from urllib.error import HTTPError
from .helpers import write_csv as write_csv_
from .helpers import read_csv, get_env
from .config import api_parameters
class LoadAlphaVantage(object):
"""
Alpha Vantage API wrapper class.
"""
def __init__(self, api_key = "demo"):
self.api_key = get_env() or api_key
def __make_url(
self, symbol, api_key, av_fun = "TIME_SERIES_DAILY", output = "compact",
interval = None):
"""
Create url string for api requests.
"""
try:
base_url = "https://www.alphavantage.co/query?"
params = "function={0}&symbol={1}&outputsize={2}&apikey={3}&datatype=csv"\
.format(av_fun, symbol, output, api_key)
if (av_fun == "TIME_SERIES_INTRADAY"):
return "".join([base_url, params, "&interval={0}".format(interval)])
return base_url + params
except NameError as e:
print("Error: {0}".format(e))
def __api_request_delay(self, start_time, request_delay):
"""
Delays API requests x seconds.
"""
delta_time = time() - start_time
sleep(request_delay - delta_time)
def __api_request(
self, symbol, av_fun = "TIME_SERIES_DAILY", output = "compact", interval = None):
"""
Make HTTP GET request to Alpha Vantage API.
Returns HTTP response object.
"""
error_msg_ = "Invalid api call with parameter '{param}'."
assert av_fun in api_parameters()["av_fun"], error_msg_.format(param = "av_fun")
assert output in api_parameters()["output"], error_msg_.format(param = "output")
if(av_fun == "TIME_SERIES_INTRADAY"):
assert interval in api_parameters()["interval"], error_msg_.format(param = "interval")
try:
url = self.__make_url(symbol, self.api_key, av_fun, output, interval)
resp = urlopen(url)
except HTTPError as e:
print("Error: {0}.".format(e))
return
return resp
def __parse_api_request(
self, symbol, av_fun = "TIME_SERIES_DAILY", output = "compact", interval = None):
"""
Parse HTTP response object.
Returns two-dimensional list or None.
"""
resp = self.__api_request(symbol, av_fun, output, interval)
parsed_response = [ line.decode("utf-8").strip("\r\n").split(",") for line in resp]
# Return None if invalid API call.
if(re.match(r'\s*"([E|e]rror)\s([M|m]essage).*"', parsed_response[1][0]) != None):
print("Invalid API call for symbol '{0}'".format(symbol))
return None
return parsed_response
def __parse_interval_overnight(self, row, av_fun):
"""
Parse interval for non-intraday data.
"""
if(av_fun.startswith("TIME_SERIES_DAILY")):
row[3] = "daily"
elif(av_fun.startswith("TIME_SERIES_WEEKLY")):
row[3] = "weekly"
elif(av_fun.startswith("TIME_SERIES_MONTHLY")):
row[3] = "monthly"
def alpha_vantage(
self, symbol, av_fun = "TIME_SERIES_DAILY", output = "compact", interval = None,
write_csv = False, directory = "."):
"""
Pull raw data from the Alpha Vantage API.
Note: Latest data point == t - 1 for daily, weekly and monthly api.
Arguments:
symbol -- Stock ticker symbol as a character string.
av_fun -- Alpha Vantage API function (default == 'TIME_SERIES_DAILY')
output -- Set 'full' or 'compact': 'full' returns complete price history,
'compact' only latest 100 data points. (default == 'compact')
interval -- Intraday data time-interval (default == None)
write_csv -- If set True, download as a csv file. (default == False)
directory -- Directory for csv file downloads. (default == current directory)
Returns a two-dimensional list by default, containing time-series stock price data.
If 'write_csv' == True, returns an empty list.
"""
# Make request and parse response
parsed_resp = self.__parse_api_request(symbol, av_fun, output, interval)
# Return None if response == None
if(parsed_resp == None):
return None
# Remove last row if empty
if(parsed_resp[-1] == []):
parsed_resp = parsed_resp[:-1]
# Insert new columns
for i, row in enumerate(parsed_resp):
x, y, z = ("symbol", "timeseries_api", "interval") if i == 0 else (symbol, av_fun, interval)
row.insert(1, z)
row.insert(1, y)
row.insert(1, x)
# parse interval column if necessary
if(av_fun != "TIME_SERIES_INTRADAY" and i != 0):
self.__parse_interval_overnight(row, av_fun)
# Keep only latest datapoint with full period information.
if(re.search(r"DAILY|MONTHLY|WEEKLY", av_fun)):
del parsed_resp[1]
if (write_csv):
path = "/".join([directory, symbol]) + ".csv"
write_csv_(path, parsed_resp)
return []
return parsed_resp
def __download(
self, symbols, request_limit = True, in_memory = True, **kwargs):
"""
Download interface.
"""
download_result = {}
symbols = symbols if isinstance(symbols, list) else [symbols]
# Download starting time
start_time = time()
# counter for errors
errors = 0
for i, symbol in enumerate(symbols):
print("Downloading {0}/{1}...".format(i + 1, len(symbols)), "\r", end = "")
# API request starting time
start_time_request = time()
data = self.alpha_vantage(symbol, **kwargs)
# Keep only good data
if (data == None):
errors += 1
elif(in_memory):
download_result[symbol] = data
# delay next request with respect to the limits
if request_limit and len(symbols) >= 5 and i < len(symbols) - 1:
self.__api_request_delay(start_time_request, request_delay)
# print relevant statistics
print("Download complete in {0}!"\
.format(timedelta(seconds = time() - start_time)))
if errors > 0:
print("{0} / {1} symbols errored.".format(errors, len(symbols)))
# Return None when writing csv files
if(not in_memory):
return
return download_result
def load_symbols(
self, symbols, av_fun = "TIME_SERIES_DAILY", output = "compact", interval = None,
request_limit = True):
"""
Wrapper for downloading multiple stock time-series.
Arguments:
symbols: Pass multiple ticker symbols as a Python list (or single symbol as str).
request_limit: Set 5 HTTP requests per minute limit. (default == True)
See 'help(LoadAlphaVantage.alpha_vantage)' for the rest of the keyword arguments.
Returns a Python dictionary containing the requested data.
"""
# initialize empty dictionary for stocks
return self.__download(symbols, request_limit = request_limit,
av_fun = av_fun, output = output, interval = interval)
def load_csv(
self, symbols, directory = ".", av_fun = "TIME_SERIES_DAILY", output = "compact",
interval = None, request_limit = True):
"""
Wrapper for multiple csv file downloads.
Arguments:
symbols -- Pass multiple ticker symbols as a Python list (or single symbol as str).
directory -- Set destination directory for the downloads. (default == current directory)
request_limit -- Set 5 HTTP requests per minute limit. (default == True)
See 'help(LoadAlphaVantage.alpha_vantage)' for other keyword arguments.
"""
self.__download(symbols, request_limit = request_limit,
in_memory = False, directory = directory, av_fun = av_fun,
output = output, interval = interval, write_csv = True)
@staticmethod
def read_symbols(path, column_n = 1, skip_rows = 1, sep = ","):
"""
Read a column vector containing stock ticker symbols from csv.
Arguments:
path -- File path as str.
column_n -- Select the column number containing ticker symbols. (default == 1)
skip_rows -- Skip n rows. (default == 1)
sep -- Set the delimiter value. (default == ",")
Returns a Python set of unique ticker symbols.
"""
def filter_fun(i, row, column_n = column_n, skip_rows = skip_rows):
# Skip header row and select column
if (i < skip_rows - 1):
return
return row[column_n - 1]
return set(read_csv(path, sep, fun = filter_fun))
| eenaveis/alpha_vantage_tools | alpha_vantage_tools/av_funcs.py | av_funcs.py | py | 9,124 | python | en | code | 2 | github-code | 13 |
38613403282 | import logging
import requests
import structlog
from flask import current_app
from application.exceptions import RasError, ServiceUnavailableException
log = structlog.wrap_logger(logging.getLogger(__name__))
def get_survey_details(survey_id):
"""
:param survey_id: The survey_id UUID to search with
:return: survey reference
"""
response = service_request(service="survey-service", endpoint="surveys", search_value=survey_id)
return response.json()
def service_request(service, endpoint, search_value):
"""
Makes a request to a different micro service
:param service: The micro service to call to
:param endpoint: The end point of the micro service
:param search_value: The value to search on
:return: response
"""
auth = (current_app.config.get("SECURITY_USER_NAME"), current_app.config.get("SECURITY_USER_PASSWORD"))
try:
service_root = {
"survey-service": current_app.config["SURVEY_URL"],
"collectionexercise-service": current_app.config["COLLECTION_EXERCISE_URL"],
"case-service": current_app.config["CASE_URL"],
"party-service": current_app.config["PARTY_URL"],
}[service]
service_url = f"{service_root}/{endpoint}/{search_value}"
log.info(f"Making request to {service_url}")
except KeyError:
raise RasError(f"service '{service}' not configured", 500)
try:
response = requests.get(service_url, auth=auth)
response.raise_for_status()
except requests.HTTPError:
raise RasError(f"{service} returned a HTTPError")
except requests.ConnectionError:
raise ServiceUnavailableException(f"{service} returned a connection error", 503)
except requests.Timeout:
raise ServiceUnavailableException(f"{service} has timed out", 504)
return response
def collection_exercise_instrument_update_request(action, exercise_id: str) -> object:
"""
Posts a request to the collection exercise service to notify of a collection instrument change
:param: json_message
:type: json
:return: response
"""
auth = (current_app.config.get("SECURITY_USER_NAME"), current_app.config.get("SECURITY_USER_PASSWORD"))
json_message = {"action": action, "exercise_id": str(exercise_id)}
try:
collection_exercise_url = current_app.config["COLLECTION_EXERCISE_URL"]
url = f"{collection_exercise_url}/collection-instrument/link"
log.info("Making request to collection exercise to acknowledge instruments have been changed", action=action)
response = requests.post(url, json=json_message, auth=auth)
response.raise_for_status()
except KeyError:
raise RasError("collection exercise service not configured", 500)
except requests.HTTPError:
raise RasError("collection exercise responded with an http error", response.status_code)
return response
| ONSdigital/ras-collection-instrument | application/controllers/service_helper.py | service_helper.py | py | 2,933 | python | en | code | 2 | github-code | 13 |
34182615617 | import geopandas as gpd
import matplotlib.pyplot as plt
import streamlit as st
import plotly.express as px
from streamlit_plotly_events import plotly_events # pip install streamlit-plotly-events
import random
#plotly events inside streamlit - https://github.com/null-jones/streamlit-plotly-events
# #st.set_page_config(layout='wide')
import os
from pyproj import Transformer
import boto3
from botocore import UNSIGNED
from botocore.client import Config
import json
import laspy
import numpy as np
import pandas as pd
import plotly.graph_objects as go
def convertLatLon(lat,lon,epsgNumber):
transformer = Transformer.from_crs( "epsg:4326", "epsg:{}".format(epsgNumber) )
x, y = transformer.transform(lat, lon)
return x, y
def getLazFile(lazfilename):
with laspy.open(lazfilename) as lz:
las = lz.read()
lidarPoints = np.array((las.X,las.Y,las.Z,las.intensity,las.classification, las.return_number, las.number_of_returns)).transpose()
lidarDF = pd.DataFrame(lidarPoints)
lidarDF.columns = ['X', 'Y', 'Z', 'intens', 'class', 'return_number', 'number_of_returns']
lidarDF['X'] = lidarDF['X'] * lz.header.scales[0] + lz.header.offsets[0]
lidarDF['Y'] = lidarDF['Y'] * lz.header.scales[1] + lz.header.offsets[1]
lidarDF['Z'] = lidarDF['Z'] * lz.header.scales[2] + lz.header.offsets[2]
return lidarDF
def stackTiles(lat,lon, boxSize=100, prefix ='NY_NewYorkCity/'): # 'NY_FingerLakes_1_2020/' #
'''
Parameters:
lat : latitude centerpoint in WGS 1984 (EPSG 4326)
lon : longitude centerpoint in WGS 1984 (EPSG 4326)
boxSize : crop dimensions in X & Y units of source data, typically meters
prefix : S3 server directory name for public usgs lidar, for available servers: https://usgs.entwine.io/
Returns:
lidar_df : Pandas dataframe containing selection of point cloud retrieved from S3 bucket
'''
low, high = 0,0
s3 = boto3.resource('s3', config=Config(signature_version=UNSIGNED))
bucket = s3.Bucket('usgs-lidar-public')
for obj in bucket.objects.filter(Prefix= prefix + 'ept.json'):
key = obj.key
body = obj.get()['Body']
eptJson = json.load(body)
epsgNumber = eptJson['srs']['horizontal']
span = eptJson['span']
[xmin,ymin,zmin,xmax,ymax,zmax] = eptJson['bounds']
x,y = convertLatLon(lat,lon,epsgNumber)
locatorx = ( x - xmin ) / ( xmax - xmin )
locatory = ( y - ymin ) / ( ymax - ymin )
try:
os.mkdir('laz_{}/'.format(prefix))
except:
pass
# download highest level laz for entire extent
if os.path.exists('laz_{}/0-0-0-0.laz'.format(prefix)) == False:
lazfile = bucket.download_file(prefix + 'ept-data/0-0-0-0.laz','laz_{}/0-0-0-0.laz'.format(prefix))
else:
pass
lidar_df = getLazFile('laz_{}/0-0-0-0.laz'.format(prefix))
for depth in range(1,10):
binx = int( (locatorx * 2 ** ( depth ) ) // 1 )
biny = int( (locatory * 2 ** ( depth ) ) // 1 )
lazfile = prefix + 'ept-data/{}-{}-{}-'.format(depth,binx,biny)
for obj in bucket.objects.filter(Prefix = lazfile ):
key = obj.key
lazfilename = key.split('/')[2]
# download subsequent laz files and concat
if os.path.exists('laz_{}/{}'.format(prefix,lazfilename)) == False:
lazfile = bucket.download_file(prefix + 'ept-data/'+lazfilename,'laz_{}/{}'.format(prefix,lazfilename))
else:
pass
lidar_df2 = getLazFile('laz_{}/{}'.format(prefix,lazfilename))
if depth > 7:
low = lidar_df2['Z'].mean() - lidar_df2['Z'].std()*4
high = lidar_df2['Z'].mean() + lidar_df2['Z'].std()*8
else:
low = 0
high = 1000
lidar_df = pd.concat([lidar_df,lidar_df2])
lidar_df = lidar_df[lidar_df['Z'] > low ]
lidar_df = lidar_df[lidar_df['Z'] < high ]
lidar_df = lidar_df[lidar_df['X'] <= x + boxSize/2 ]
lidar_df = lidar_df[lidar_df['X'] >= x - boxSize/2 ]
lidar_df = lidar_df[lidar_df['Y'] <= y + boxSize/2 ]
lidar_df = lidar_df[lidar_df['Y'] >= y - boxSize/2 ]
return lidar_df
def readGeoJSON(filepath):
with open(filepath) as f:
features = json.load(f)["features"]
return features
###############################################################################
st.set_page_config(
page_title="Data Extraction",
)
st.session_state["Extracted_Lidar_Data"] = None
st.markdown("<h1 style='text-align: center; color: white;'>Select A Location</h1>", unsafe_allow_html=True)
#Add the dropdown for the user to select between NYC map and US map
st.sidebar.title("Select the map to proceed")
map_selection = st.sidebar.selectbox("Select a map", ("NYC", "US"))
#Add a checkbox to the sidebar and ask if the user wants to enter a manual location
manual_location = st.sidebar.checkbox("Enter a manual location", value=False)
#If the user selects the manual location checkbox, then ask for the latitude and longitude
if manual_location:
lat = float(st.sidebar.text_input("Enter the latitude", value="40.770236377930985"))
lon = float(st.sidebar.text_input("Enter the longitude", value="-73.97408389247846"))
#If the user does not select the manual location checkbox, then ask for the address
if map_selection == "Select Map":
st.sidebar.markdown("Please select a map to proceed")
#default map is NYC
filepath = '2010 Census Tracts/geo_export_139fc905-b132-4c03-84d5-ae9e70dded42.shp'
if map_selection == "US":
filepath = 'lidarBoundaries.geojson'
#read in the geojson file
features = readGeoJSON(filepath)
#Read the file into a geopandas dataframe
gdf = gpd.GeoDataFrame.from_features(features)
#st.write(gdf.head())
# Create Plotly figure
fig = px.choropleth_mapbox(
gdf,
geojson=gdf.geometry,
locations=gdf.index,
mapbox_style='carto-positron',
center={'lat': 27.8283, 'lon':-78.5795},
hover_data=['name'],
zoom=3,
opacity=0.25)
fig.update_layout(height=800, width=1000, showlegend=False,
margin=dict(l=0,r=0,b=0,t=0),
paper_bgcolor="Black"
)
elif map_selection == "NYC":
filepath = '2010 Census Tracts/geo_export_139fc905-b132-4c03-84d5-ae9e70dded42.shp'
gdf = gpd.read_file(filepath)
# Create Plotly figure
fig = px.choropleth_mapbox(
gdf,
geojson=gdf.geometry,
locations=gdf.index,
mapbox_style='carto-positron',
center={'lat': 40.64, 'lon':-73.7},#center={'lat': 40.74949210762701, 'lon':-73.97236357852755},
zoom=10,
opacity=0.25)
fig.update_layout(height=600, width=1000, showlegend=False,
margin=dict(l=0,r=0,b=0,t=0),
paper_bgcolor="cadetblue"
)
selected_points = plotly_events(fig)
# # structure of selected_points
# [
# {
# "curveNumber": 0,
# "pointNumber": 2150,
# "pointIndex": 2150
# }
# ]
# Add a input to take in how big the box should be and limit it to 1000
boxSize_input = st.sidebar.number_input("Enter the size of the box in meters", min_value=1, max_value=500, value=100)
#Store the box size in the session state
st.session_state["boxSize"] = boxSize_input
# Add a warning if the box size is too large
if boxSize_input > 299:
st.sidebar.warning("The box size is too large. It may take a while to download the data. Please be patient.")
LidarArea = None
if (selected_points and boxSize_input) or (manual_location and boxSize_input and lat and lon):
# st.write(selected_points)
if selected_points:
single_row = gdf.iloc[selected_points[0]['pointIndex']]
# Get the latitude and longitude of the single row's geometry
point = single_row.geometry.centroid
lat, lon = point.y, point.x
st.write(f"Location : {lat,lon}")
# Print the latitude and longitude
print("Latitude:", lat)
print("Longitude:", lon)
if map_selection == "US":
name = single_row['name']
#st.write(name)
st.write(f"Count of Lidar Points in Mapped Area : {single_row['count']}")
lidarArea = '{}/'.format(name)
elif map_selection == "NYC":
lidarArea = 'NY_NewYorkCity/'
st.write(f"Selected Area : {lidarArea}")
elif manual_location:
st.write(f"Location : {lat,lon}")
lidarArea = 'NY_NewYorkCity/'
st.write(f"Selected Area : {lidarArea}")
# Add a trace to the map to show the selected point
fig.add_trace(go.Scattermapbox(
lat=[lat],
lon=[lon],
mode='markers',
marker=go.scattermapbox.Marker(
size=14
),
text=[f"Selected Point"],
))
else:
st.sidebar.warning("Please select a point on the map or enter a manual location(Only for NYC map)")
#st.plotly_chart(fig)
boxSize = boxSize_input
# Get the point cloud data
lidar_df = stackTiles(lat,lon,boxSize,prefix=lidarArea)
st.write(f"Totol Number of Points {len(lidar_df)}")
# Create 3D scatter plot using Plotly with classification-based colors
fig = px.scatter_3d(lidar_df, x='X', y='Y', z='Z', color='class',
hover_data=['X', 'Y', 'Z', 'class'])
fig.update_traces(marker=dict(size=1.2))
fig.update_layout(scene=dict(aspectmode='data'))
st.plotly_chart(fig)
st.write("Point cloud data:", lidar_df)
# Add a dpwnload button to download the lidar_df
st.download_button(
label="Download data as CSV",
data=lidar_df.to_csv(index=False),
file_name='Raw_lidarData.csv',
mime='text/csv',
)
# Add a button to open the manual location on google earth
# if manual_location:
#Create the link to open the location on google earth
link = f"Open Location on Google Earth - https://earth.google.com/web/search/{lat},{lon}"
st.sidebar.write(link)
# Save the point cloud data to the session state
st.session_state['Extracted_Lidar_Data'] = lidar_df
# # Add a reset button to refresh the page
# if st.sidebar.button("Refresh Selections"):
# selected_points = None
# manual_location = None
# lat = None
# lon = None
# boxSize_input = None
# lidar_df = None
# boxSize_input = None
# st.experimental_rerun()
| Sarang-Pramode/Vistara | pages/1_Download_Raw_Data.py | 1_Download_Raw_Data.py | py | 10,685 | python | en | code | 0 | github-code | 13 |
71808373778 | from vb2py.vbfunctions import *
# fromx vb2py.vbdebug import *
from vb2py.vbconstants import *
#import mlpyproggen.Prog_Generator as PG
import subprocess
""" M40_ShellAndWait:
~~~~~~~~~~~~~~~~~
Module Description:
~~~~~~~~~~~~~~~~~~~
This module provides a function to call external programs and wait for a certain time.
In addition the windows style (Hidden, Maximized, Minimized, ...) and the
"Ctrl Break" behavior could be defined.
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
Chip Pearson
http://www.cpearson.com/excel/ShellAndWait.aspx
This module contains code for the ShellAndWait function that will Shell to a process
and wait for that process to End before returning to the caller.
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
# VB2PY (CheckDirective) VB directive took path 1 on Win64
https://foren.activevb.de/forum/vba/thread-25588/beitrag-25588/VBA7-Win64-CreateProcess-WaitFo/
"""
__SYNCHRONIZE = 0x100000
# Enumeration 'ShellAndWaitResult'
Success = 0
Failure = 1
Timeout = 2
InvalidParameter = 3
SysWaitAbandoned = 4
UserWaitAbandoned = 5
UserBreak = 6
# Enumeration 'ActionOnBreak'
IgnoreBreak = 0
AbandonWait = 1
PromptUser = 2
TaskId = Long()
__STATUS_ABANDONED_WAIT_0 = 0x80
__STATUS_WAIT_0 = 0x0
__WAIT_ABANDONED = ( __STATUS_ABANDONED_WAIT_0 + 0 )
__WAIT_OBJECT_0 = ( __STATUS_WAIT_0 + 0 )
__WAIT_TIMEOUT = 258
__WAIT_FAILED = 0xFFFFFFFF
__WAIT_INFINITE = - 1
def ShellAndWait(ShellCommand, TimeOutSeconds, ShellWindowState, BreakKey):
fn_return_value = None
DEFAULT_POLL_INTERVAL = 500
#''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
# ShellAndWait
#
# This function calls Shell and passes to it the command text in ShellCommand. The function
# then waits for TimeOutSeconds (in Seconds) to expire.
#
# Parameters:
# ShellCommand
# is the command text to pass to the Shell function.
#
# TimeOutSeconds Hardi: Changed type to double (Old: Long)
#
# TimeOutMs
# is the number of milliseconds to wait for the shell'd program to wait. If the
# shell'd program terminates before TimeOutMs has expired, the function returns
# ShellAndWaitResult.Success = 0. If TimeOutMs expires before the shell'd program
# terminates, the return value is ShellAndWaitResult.TimeOut = 2.
#
# ShellWindowState
# is an item in VbAppWinStyle specifying the window state for the shell'd program.
#
# BreakKey
# is an item in ActionOnBreak indicating how to handle the application's cancel key
# (Ctrl Break). If BreakKey is ActionOnBreak.AbandonWait and the user cancels, the
# wait is abandoned and the result is ShellAndWaitResult.UserWaitAbandoned = 5.
# If BreakKey is ActionOnBreak.IgnoreBreak, the cancel key is ignored. If
# BreakKey is ActionOnBreak.PromptUser, the user is given a ?Continue? message. If the
# user selects "do not continue", the function returns ShellAndWaitResult.UserBreak = 6.
# If the user selects "continue", the wait is continued.
#
# Return values:
# ShellAndWaitResult.Success = 0
# indicates the the process completed successfully.
# ShellAndWaitResult.Failure = 1
# indicates that the Wait operation failed due to a Windows error.
# ShellAndWaitResult.TimeOut = 2
# indicates that the TimeOutMs interval timed out the Wait.
# ShellAndWaitResult.InvalidParameter = 3
# indicates that an invalid value was passed to the procedure.
# ShellAndWaitResult.SysWaitAbandoned = 4
# indicates that the system abandoned the wait.
# ShellAndWaitResult.UserWaitAbandoned = 5
# indicates that the user abandoned the wait via the cancel key (Ctrl+Break).
# This happens only if BreakKey is set to ActionOnBreak.AbandonWait.
# ShellAndWaitResult.UserBreak = 6
# indicates that the user broke out of the wait after being prompted with
# a ?Continue message. This happens only if BreakKey is set to
# ActionOnBreak.PromptUser.
#''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
## VB2PY (CheckDirective) VB directive took path 1 on Win64
if Trim(ShellCommand) == vbNullString:
fn_return_value = InvalidParameter
return fn_return_value
if TimeOutSeconds < 0:
fn_return_value = InvalidParameter
return fn_return_value
if (BreakKey == AbandonWait) or (BreakKey == IgnoreBreak) or (BreakKey == PromptUser):
# valid
pass
else:
fn_return_value = InvalidParameter
return fn_return_value
if (ShellWindowState == vbHide) or (ShellWindowState == vbMaximizedFocus) or (ShellWindowState == vbMinimizedFocus) or (ShellWindowState == vbMinimizedNoFocus) or (ShellWindowState == vbNormalFocus) or (ShellWindowState == vbNormalNoFocus):
# valid
pass
else:
fn_return_value = InvalidParameter
return fn_return_value
# VB2PY (UntranslatedCode) On Error Resume Next
try:
if TimeOutSeconds == 0:
process = subprocess.run(ShellCommand,shell=False)
else:
process = subprocess.run(ShellCommand,timeout=TimeOutSeconds,shell=False)
errornumber = process.returncode
if errornumber != 0:
return Failure
else:
return Success
except subprocess.TimeoutExpired:
return Timeout
except Exception as error:
logging.debug("ShellandWait - "+str(error))
print(error)
return Failure
"""
#Err.Clear()
TaskId = Shell(ShellCommand, ShellWindowState)
if ( Err.Number != 0 ) or ( TaskId == 0 ) :
fn_return_value = Failure
return fn_return_value
ProcHandle = OpenProcess(__SYNCHRONIZE, False, TaskId)
if ProcHandle == 0:
fn_return_value = Failure
return fn_return_value
# VB2PY (UntranslatedCode) On Error GoTo errH
SaveCancelKey = Application.EnableCancelKey
Application.EnableCancelKey = xlErrorHandler
WaitRes = WaitForSingleObject(ProcHandle, DEFAULT_POLL_INTERVAL)
while not (WaitRes == __WAIT_OBJECT_0):
DoEvents()
if (WaitRes == __WAIT_ABANDONED):
# Windows abandoned the wait
fn_return_value = SysWaitAbandoned
break
elif (WaitRes == __WAIT_OBJECT_0):
# Successful completion
fn_return_value = Success
break
elif (WaitRes == __WAIT_FAILED):
# attach failed
fn_return_value = Failure
break
elif (WaitRes == __WAIT_TIMEOUT):
# Wait timed out. Here, this time out is on DEFAULT_POLL_INTERVAL.
# See if ElapsedTime is greater than the user specified wait
# time out. If we have exceed that, get out with a TimeOut status.
# Otherwise, reissue as wait and continue.
ElapsedTime = ElapsedTime + DEFAULT_POLL_INTERVAL
if ms > 0:
# user specified timeout
if ElapsedTime > ms:
fn_return_value = Timeout
break
else:
# user defined timeout has not expired.
pass
else:
# infinite wait -- do nothing
pass
# reissue the Wait on ProcHandle
WaitRes = WaitForSingleObject(ProcHandle, DEFAULT_POLL_INTERVAL)
else:
# unknown result, assume failure
fn_return_value = Failure
break
Quit = True
CloseHandle(ProcHandle)
Application.EnableCancelKey = SaveCancelKey
return fn_return_value
Debug.Print('ErrH: Cancel: ' + Application.EnableCancelKey)
if Err.Number == ERR_BREAK_KEY:
if BreakKey == ActionOnBreak.AbandonWait:
CloseHandle(ProcHandle)
fn_return_value = ShellAndWaitResult.UserWaitAbandoned
Application.EnableCancelKey = SaveCancelKey
return fn_return_value
elif BreakKey == ActionOnBreak.IgnoreBreak:
Err.Clear()
# VB2PY (UntranslatedCode) Resume
elif BreakKey == ActionOnBreak.PromptUser:
MsgRes = MsgBoxMov('User Process Break.' + vbCrLf + 'Continue to wait?', vbYesNo)
if MsgRes == vbNo:
CloseHandle(ProcHandle)
fn_return_value = ShellAndWaitResult.UserBreak
Application.EnableCancelKey = SaveCancelKey
else:
Err.Clear()
# VB2PY (UntranslatedCode) Resume Next
else:
CloseHandle(ProcHandle)
Application.EnableCancelKey = SaveCancelKey
fn_return_value = ShellAndWaitResult.Failure
else:
# some other error. assume failure
CloseHandle(ProcHandle)
fn_return_value = ShellAndWaitResult.Failure
Application.EnableCancelKey = SaveCancelKey
return fn_return_value
"""
# VB2PY (UntranslatedCode) Option Explicit
# VB2PY (UntranslatedCode) Option Compare Text
# VB2PY (UntranslatedCode) Private Declare PtrSafe Function WaitForSingleObject Lib "kernel32" (ByVal hHandle As LongPtr, ByVal dwMilliseconds As LongLong) As LongLong
## VB2PY (CheckDirective) VB directive took path 1 on VBA7
# VB2PY (UntranslatedCode) Private Declare PtrSafe Function OpenProcess Lib "kernel32" (ByVal dwDesiredAccess As Long, ByVal bInheritHandle As Long, ByVal dwProcessId As Long) As Long
# VB2PY (UntranslatedCode) Private Declare PtrSafe Function CloseHandle Lib "kernel32" (ByVal hObject As Long) As Long
## VB2PY (CheckDirective) VB directive took path 1 on Win64
| haroldlinke/pyMobaLedLib | python/proggen/M40_ShellandWait.py | M40_ShellandWait.py | py | 10,195 | python | en | code | 3 | github-code | 13 |
69894389458 | #Assignment: Find Characters
# Write a program that takes a list of strings and a string containing a single character, and prints a new list of all the strings containing that character.
char = 'o'
word_list = ['hello','world','my','name','is','Anna']
def findwords(x):
matching = [s for s in x if "o" in s]
return matching
# for i in x:
# if 'o' in i:
# print (i)
print(findwords(word_list))
| Jarvis2021/Coding-Dojo | python_stack1/algos/PracticeTest/test5.py | test5.py | py | 438 | python | en | code | 0 | github-code | 13 |
32213758882 | """seq2seq neural machine translation with one layer RNN."""
"""
I borrowed some code from PyTorch Tutorial
http://pytorch.org/tutorials/intermediate/seq2seq_translation_tutorial.html.
"""
import os
import sys
import time
import pickle
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
from torch import optim
sys.path.append('../data/')
sys.path.append('../')
from preprocess import *
from utils import *
SOS_token = 1
EOS_token = 2
UNK_token = 3
class SimpleEncoder(nn.Module):
def __init__(self, num_embeddings, embedding_size, mini_batch_size,
hidden_size, n_layer, GPU_use):
super(SimpleEncoder, self).__init__()
self.num_embeddings = num_embeddings
self.embedding_size = embedding_size
self.hidden_size = hidden_size
self.n_layer = n_layer
self.mini_batch_size = mini_batch_size
self.GPU_use = GPU_use
#nn.Embedding: input (N, W) N- minibatch W- number of inidicies to extract per minibatch
#output: (N, W, embedding_dim)
#params: num_embeddings: size of dictionary of embeddings,
#embedding_dim : size of each embedding vector
self.embedding = nn.Embedding(num_embeddings, embedding_size)
#RNN: Input: (input, h0)
#input: seq_len, batch, input_size
#h0: num_layers * num_directions, batch, hidden_size
#output: output, h_n
#output: seq_len, batch, hidden_size * num_directions-> not used in encoder
#h_n : num_layers * num_directions, batch, hidden_size
self.gru = nn.GRU(embedding_size, hidden_size, n_layer, dropout=0.1)
def forward(self, input_variable, hidden):
batch_size = input_variable.size()[0]
seq_len = input_variable.size()[1]
embedded = self.embedding(input_variable).view(seq_len, batch_size, -1)
output, hidden = self.gru(embedded, hidden)
return output, hidden
def initHidden(self, mini_batch_size):
hidden = Variable(torch.zeros(self.n_layer, mini_batch_size,
self.hidden_size))
if self.GPU_use:
hidden = hidden.cuda()
return hidden
class SimpleDecoder(nn.Module):
def __init__(self, num_embeddings, embedding_size, mini_batch_size,
hidden_size, n_layer, GPU_use):
super(SimpleDecoder, self).__init__()
self.num_embeddings = num_embeddings
self.embedding_size = embedding_size
self.mini_batch_size = mini_batch_size
self.hidden_size = hidden_size
self.n_layer = n_layer
self.GPU_use = GPU_use
self.embedding = nn.Embedding(num_embeddings, embedding_size)
self.gru = nn.GRU(embedding_size, hidden_size*2, n_layer)
self.out = nn.Linear(hidden_size*2, num_embeddings)
self.softmax = nn.LogSoftmax()
def forward(self, word_inputs, prev_hidden, context_vector):
# context_vector: 1, mini_batch_size, hidden_size * num_directions
# prev_hidden : n_layer, mini_batch_size, hidden_size
seq_len = word_inputs.size()[1]
batch_size = word_inputs.size()[0]
n_layer = prev_hidden.size()[0]
embedded = self.embedding(word_inputs).view(seq_len, batch_size, -1)
context_vector = torch.unsqueeze(context_vector, self.n_layer)
hidden = torch.cat((prev_hidden, context_vector), 2)
# hidden : n_layer, mini_batch_size, hidden_size*2
output, hidden = self.gru(embedded, hidden)
# output: seq_len, bath_size, hidden_size*2
hidden = hidden[:,:,:self.hidden_size]
output = torch.squeeze(output, 0)
output = self.softmax(self.out(output))
# output: batch_size, num_embeddings
return output, hidden
def initHidden(self, context_vector):
context_vector = torch.unsqueeze(context_vector, self.n_layer)
context_vector = context_vector.repeat(self.n_layer, 1,1)
hidden = F.tanh(context_vector)
if self.GPU_use:
hidden = hidden.cuda()
return hidden
def train(encoder, decoder, encoder_optimizer, decoder_optimizer, encoder_input,
target_variable, criterion, GPU_use):
mini_batch_size = encoder_input.size()[0]
encoder_optimizer.zero_grad()
decoder_optimizer.zero_grad()
encoder_hidden = encoder.initHidden(mini_batch_size)
encoder_output, encoder_hidden = encoder(encoder_input, encoder_hidden)
# encoder_output: seq_len, batch, hidden_size
context_vector = encoder_output[-1]
decoder_hidden = decoder.initHidden(context_vector)
decoder_input = Variable(torch.LongTensor([SOS_token] * mini_batch_size))
decoder_input = torch.unsqueeze(decoder_input, 1)
# decoder_input: batch_size, seq_len
if GPU_use:
decoder_input = decoder_input.cuda()
loss = 0
# target_variable: batch_size, seq_len
target_length = target_variable.size()[1]
for di in range(target_length):
target = target_variable[:,di]
decoder_output, decoder_hidden = decoder(decoder_input, decoder_hidden,
context_vector)
topv, topi = decoder_output.data.topk(1)
predicted = topi[0][0]
decoder_input = Variable(torch.LongTensor([predicted]*mini_batch_size))
decoder_input = torch.unsqueeze(decoder_input, 1)
if GPU_use:
decoder_input = decoder_input.cuda()
target = torch.squeeze(target, 0)
loss += criterion(decoder_output, target)
if predicted == EOS_token:
break
loss.backward()
encoder_optimizer.step()
decoder_optimizer.step()
# loss dim?
return loss.data[0] / target_length
def test(encoder, decoder, input_sentence, output_lang, GPU_use,
TEST_MAXLENGTH=30):
encoder.train(False)
decoder.train(False)
mini_batch_size = input_sentence.size()[0] #1
encoder_hidden = encoder.initHidden(mini_batch_size)
encoder_output, encoder_hidden = encoder(input_sentence, encoder_hidden)
context_vector = encoder_output[-1]
decoder_hidden = decoder.initHidden(context_vector)
decoder_input = Variable(torch.LongTensor([SOS_token]*1))
decoder_input = torch.unsqueeze(decoder_input, 1)
if GPU_use:
decoder_input = decoder_input.cuda()
result = []
for i in range(TEST_MAXLENGTH-1):
decoder_output, decoder_hidden = decoder(decoder_input, decoder_hidden,
context_vector)
topv, topi = decoder_output.data.topk(1)
predicted = topi[0][0]
decoder_input = Variable(torch.LongTensor([predicted]*1))
decoder_input = torch.unsqueeze(decoder_input, 1)
if GPU_use:
decoder_input = decoder_input.cuda()
result.append(predicted)
if predicted == EOS_token:
break
return result
if __name__ == "__main__":
data_dir = '../data/kor-eng/kor.txt'
SEQ_MAX_LENGTH = 30
TEST_MAXLENGTH = 30
GPU_use = False
mini_batch_size = 1
learning_rate = 0.001
hidden_size = 1000
embedding_size = 1000
n_layer = 1
n_epochs = 8
print_every = 1000
plot_every = 10
train_input, train_target, test_input, test_target,input_lang, output_lang,\
train_input_lengths, train_target_lengths\
= getTrainAndTestSet(data_dir, mini_batch_size, SEQ_MAX_LENGTH, GPU_use)
print("Data Preparation Done.")
encoder = SimpleEncoder(input_lang.n_words, embedding_size, mini_batch_size,
hidden_size, n_layer, GPU_use)
decoder = SimpleDecoder(output_lang.n_words, embedding_size, mini_batch_size
, hidden_size, n_layer, GPU_use)
criterion = nn.CrossEntropyLoss()
encoder_optimizer = torch.optim.Adam(encoder.parameters(), lr=learning_rate)
decoder_optimizer = torch.optim.Adam(decoder.parameters(), lr=learning_rate)
if GPU_use:
encoder.cuda()
decoder.cuda()
print("Training...")
# train
start = time.time()
plot_losses = []
print_loss_total = 0
plot_loss_total = 0
total_iter = len(train_input) * n_epochs *1.
iter_cnt = 0
for epoch in range(n_epochs):
for i in range(len(train_input)):
iter_cnt += 1
input_var = train_input[i]
target_var = train_target[i]
loss = train(encoder, decoder, encoder_optimizer, decoder_optimizer,
input_var, target_var, criterion, GPU_use)
print_loss_total += loss
plot_loss_total += loss
if iter_cnt % print_every == 0:
print_loss_avg = print_loss_total / print_every*1.
print_loss_total = 0
print('%s (%d %d%%) %.4f' % ((timeSince(start,iter_cnt/total_iter)),
iter_cnt, iter_cnt/total_iter * 100, print_loss_avg))
if iter_cnt % plot_every == 0:
plot_loss_avg = plot_loss_total / (plot_every*1.)
plot_losses.append(plot_loss_avg)
plot_loss_total = 0
showPlot(plot_losses, 'vanilaRNN')
print("Training done.")
#save model
torch.save(encoder.state_dict(), './rnn_encoder.pkl')
torch.save(decoder.state_dict(), './rnn_decoder.pkl')
print("Model Saved.")
print("Testing...")
# test
results = []
for s in test_input:
query = [input_lang.index2word[idx] for idx in s.data[0]]
translated_idx = test(encoder, decoder, s, output_lang, GPU_use,
TEST_MAXLENGTH)
translated = [output_lang.index2word[idx] for idx in translated_idx]
results.append((query, translated))
saveTranslatedResults(results, 'vanilaRNN_result.txt')
# turn off training mode
print("Test done.")
| SnowIsWhite/Machine-Translation-in-PyTorch | vanila_rnn/vanila_rnn.py | vanila_rnn.py | py | 9,612 | python | en | code | 0 | github-code | 13 |
17696627813 | # -*- coding: utf-8 -*-
"""
Created on Sun Feb 10 16:52:56 2019
@author: vaish
"""
import math
n = int(input("Enter the number: "))
if n < 2:
print("A number should be greater than 2")
quit()
elif n == 2:
print("It's a Prime Number")
quit()
i = 2
limit = int(math.sqrt(n))
while i <= limit:
if n % i == 0:
print("its a Composite number")
quit()
i += 1
print("its a prime number") | Vaishali1219/Python | EXAMPLES/03_CYCLES/15_Prime or Composite.py | 15_Prime or Composite.py | py | 433 | python | en | code | 0 | github-code | 13 |
73727287057 | from sac3 import llm_models
class Evaluate:
def __init__(self, model):
self.model = model
self.prompt_temp = 'Answer the following question:\n'
def self_evaluate(self, self_question, temperature, self_num):
'''
Inputs:
self_question - original user query
temperature - [0,1] for LLM randomness
self_num - how many generated responses given this question
Outputs:
self_responses - generated responses given this question with different temperatures
'''
self_responses = []
prompt = self.prompt_temp + '\nQ:' + self_question
for i in range(self_num):
# llm model: GPTs, open-source models (falcon, guanaco)
if self.model in ['gpt-3.5-turbo','gpt-4']:
res = llm_models.call_openai_model(prompt, self.model, temperature) # openai model call
elif self.model == 'guanaco-33b':
res = llm_models.call_guanaco_33b(prompt, max_new_tokens = 200)
elif self.model == 'falcon-7b':
res = llm_models.call_falcon_7b(prompt, max_new_tokens = 200)
# other open-sourced llms
self_responses.append(res)
return self_responses
def perb_evaluate(self, perb_questions, temperature):
'''
Inputs:
perb_questions - perturbed questions that are semantically equivalent to the original question
temperature - [0,1] for LLM randomness
Outputs:
perb_responses - generated responses given the perturbed questions
'''
perb_responses = []
for i in range(len(perb_questions)):
prompt = self.prompt_temp + '\nQ:' + perb_questions[i]
# llm model: GPTs, open-source models (falcon, guanaco)
if self.model in ['gpt-3.5-turbo','gpt-4']:
res = llm_models.call_openai_model(prompt, self.model, temperature) # openai model call
elif self.model == 'guanaco-33b':
res = llm_models.call_guanaco_33b(prompt, max_new_tokens = 200)
elif self.model == 'falcon-7b':
res = llm_models.call_falcon_7b(prompt, max_new_tokens = 200)
# other open-sourced llms
perb_responses.append(res)
return perb_responses
| intuit/sac3 | sac3/evaluator.py | evaluator.py | py | 2,350 | python | en | code | 1 | github-code | 13 |
13898818565 | class Biblioteka:
lista_ksiazek = []
lista_egzemplarzy = []
lista_krotek = []
lista_ostateczna = []
czy_jest_w_liscie = False
def __init__(self, limit_wypozyczen):
self.limit_wypozyczen = limit_wypozyczen
def sortuj(self, e):
return e['tytul']
def dostepne_egzemplarze(self):
for ksiazka in self.lista_ksiazek:
for egzemplarze in self.lista_egzemplarzy:
if egzemplarze.tytul == ksiazka.tytul and egzemplarze.autor == ksiazka.autor:
self.czy_jest_w_liscie = True
if not self.czy_jest_w_liscie:
self.lista_ostateczna.append({'tytul': ksiazka.tytul, 'autor': ksiazka.autor, 'ilosc_egzemplarzy': self.liczEgzemplarze(ksiazka)})
self.czy_jest_w_liscie = False
self.lista_egzemplarzy.append(ksiazka)
self.czy_jest_w_liscie = False
self.lista_ostateczna.sort(key=self.sortuj)
for lista in self.lista_ostateczna:
print("('" + lista['tytul'].strip() + "'" + ", " + "'" + lista['autor'].strip() + "', " + lista['ilosc_egzemplarzy'].strip() + ")")
def dodaj_egzemplarz_ksiazki(self, ksiazka):
self.lista_ksiazek.append(ksiazka)
def liczEgzemplarze(self, aktualna_ksiazka):
wynik = 0
for ksiazka in self.lista_ksiazek:
if aktualna_ksiazka.tytul == ksiazka.tytul and aktualna_ksiazka.autor == ksiazka.autor:
wynik += 1
return str(wynik)
class Ksiazka:
def __init__(self, tytul, autor, rok):
self.tytul = tytul
self.autor = autor
self.rok = rok
class Egzemplarz:
def __init__(self, rok_wydania, wypozyczony):
self.rok_wydania = rok_wydania
self.wypozyczony = wypozyczony
class Czytelnik:
def __init__(self, nazwisko):
self.nazwisko = nazwisko
liczba_ksiazek = input().strip()
n = int(liczba_ksiazek)
lista_ksiazek = [input().strip(' ') for ksiazka in range(n)]
splitter = []
biblioteka = Biblioteka(10)
for ksiazkaInput in lista_ksiazek:
usun_nawias = ksiazkaInput.replace("(", "")
usun_nawias2 = usun_nawias.replace(")", "")
usun_cudzyslow = usun_nawias2.replace("\"", "")
splitter = usun_cudzyslow.split(", ")
ksiazka = Ksiazka(tytul=splitter[0], autor=splitter[1], rok=splitter[2])
biblioteka.dodaj_egzemplarz_ksiazki(ksiazka)
biblioteka.dostepne_egzemplarze()
| uep-inz-opr/7_biblioteka1-klakalecka | main.py | main.py | py | 2,423 | python | pl | code | 0 | github-code | 13 |
40986545599 | import argparse
import os
import sys
from tqdm import tqdm
import torch
from torch.utils.data import DataLoader
from argoverse.evaluation.eval_forecasting import compute_forecasting_metrics
from argoverse.evaluation.competition_util import generate_forecasting_h5
from data.argoverse.argo_csv_dataset import ArgoCSVDataset
from data.argoverse.utils.torch_utils import collate_fn_dict
from model.crat_pred import CratPred
# Make newly created directories readable, writable and descendible for everyone (chmod 777)
os.umask(0)
root_path = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, root_path)
parser = argparse.ArgumentParser()
parser = CratPred.init_args(parser)
parser.add_argument("--split", choices=["val", "test"], default="val")
parser.add_argument("--ckpt_path", type=str, default="/path/to/checkpoint.ckpt")
def main():
args = parser.parse_args()
if args.split == "val":
dataset = ArgoCSVDataset(args.val_split, args.val_split_pre, args)
else:
dataset = ArgoCSVDataset(args.test_split, args.test_split_pre, args)
data_loader = DataLoader(
dataset,
batch_size=args.val_batch_size,
num_workers=args.val_workers,
collate_fn=collate_fn_dict,
shuffle=False,
pin_memory=True,
)
# Load model with weights
model = CratPred.load_from_checkpoint(checkpoint_path=args.ckpt_path)
model.eval()
# Iterate over dataset and generate predictions
predictions = dict()
gts = dict()
cities = dict()
for data in tqdm(data_loader):
data = dict(data)
with torch.no_grad():
output = model(data)
output = [x[0:1].detach().cpu().numpy() for x in output]
for i, (argo_id, prediction) in enumerate(zip(data["argo_id"], output)):
predictions[argo_id] = prediction.squeeze()
cities[argo_id] = data["city"][i]
gts[argo_id] = data["gt"][i][0] if args.split == "val" else None
# Evaluate or submit
if args.split == "val":
results_6 = compute_forecasting_metrics(
predictions, gts, cities, 6, 30, 2)
results_1 = compute_forecasting_metrics(
predictions, gts, cities, 1, 30, 2)
else:
generate_forecasting_h5(predictions, os.path.join(
os.path.dirname(os.path.dirname(args.ckpt_path)), "test_predictions.h5"))
if __name__ == "__main__":
main()
| schmidt-ju/crat-pred | test.py | test.py | py | 2,425 | python | en | code | 47 | github-code | 13 |
40566378351 | print('place holder 실습')
import tensorflow as tf
_x = tf.placeholder(tf.float32, shape=[])
three = tf.constant(3)
four = tf.constant(4)
mul = tf.multiply(x, four)
add = tf.add(mul, three)
sess = tf.Session()
y = sess.run(add, feed_dict={_x:10})
print(y)
| ladofa/ky2018 | tensorflow_2.py | tensorflow_2.py | py | 261 | python | en | code | 0 | github-code | 13 |
11669580680 | import json
import boto3
import uuid
import requests
from requests.auth import HTTPBasicAuth
def put_openSearch(payload):
url = 'https://search-test-ed5firxe6hyd5qkuy63q72nvsu.us-east-1.es.amazonaws.com/events/_doc'
headers = {
'Content-Type': 'application/json'
}
req_payload= json.dumps(payload)
print(req_payload)
response = requests.post(url,auth=HTTPBasicAuth('ccbd-project', 'Lionsproj.123'),data=req_payload,headers=headers)
print(response.text)
def join_event(user_id, event_id):
sts_connection = boto3.client('sts')
acct_b = sts_connection.assume_role(
RoleArn="arn:aws:iam::283759418474:role/LionsDynamoRole",
RoleSessionName="cross_acct_lambda_acess"
)
ACCESS_KEY = acct_b['Credentials']['AccessKeyId']
SECRET_KEY = acct_b['Credentials']['SecretAccessKey']
SESSION_TOKEN = acct_b['Credentials']['SessionToken']
client = boto3.client(
'dynamodb',
aws_access_key_id=ACCESS_KEY,
aws_secret_access_key=SECRET_KEY,
aws_session_token=SESSION_TOKEN,
)
try:
Item = {
"uuid": {
"S": str(uuid.uuid4().int & (1<<64)-1)
},
"user_id": {
"S": str(user_id)
},
"item_id": {
"S": str(event_id)
}
}
response = client.put_item(TableName='user_events',Item = Item)
except Exception as error:
print(error)
#send_plain_email_failure(email, event_name)
return "Error when joining event"
def put_eventDetails(event):
print(event)
sts_connection = boto3.client('sts')
acct_b = sts_connection.assume_role(
RoleArn="arn:aws:iam::283759418474:role/LionsDynamoRole",
RoleSessionName="cross_acct_lambda_acess"
)
ACCESS_KEY = acct_b['Credentials']['AccessKeyId']
SECRET_KEY = acct_b['Credentials']['SecretAccessKey']
SESSION_TOKEN = acct_b['Credentials']['SessionToken']
client = boto3.client(
'dynamodb',
aws_access_key_id=ACCESS_KEY,
aws_secret_access_key=SECRET_KEY,
aws_session_token=SESSION_TOKEN,
)
try:
event_id = str(uuid.uuid4().int & (1<<64)-1)
venue_id = str(uuid.uuid4().int & (1<<64)-1)
event['venue_id'] = venue_id
item = {
"item_id":{
"S": event_id
},
"start_local":{
"S": event['start_local'] if 'start_local' in event else ""
},
"organizer_id":{
"N": event['organizer_id']
},
"name_text":{
"S": event['name_text'] if 'name_text' in event else ""
},
"venue_id": {
"N" : event['venue_id']
},
"start_timezone": {
"S": "America/New_York"
},
"end_local": {
"S": event['end_local'] if 'end_local' in event else ""
},
"description_text": {
"S": event['description_text']
},
"category": {
"S": event['category'] if 'category' in event else ""
},
"online_event": {
"BOOL": (str(event['online_event']).lower() == "true") if 'online_event' in event else False
},
"latitude":{
"N": str(event['latitude'])
},
"longitude":{
"N": str(event['longitude'])
}
}
response = client.put_item(TableName='event_details', Item=item)
join_event(event['organizer_id'], event_id)
print("response here",response)
event_data = {}
event_data["item_id"] = event_id
es_data={}
Item = item
es_data['description_text'] = Item['description_text']['S']
es_data['category'] = Item['category']['S']
es_data['online_event'] = str(Item['online_event']['BOOL']).lower()
es_data['name_text'] = Item['name_text']['S']
es_data['item_id'] = Item['item_id']['S']
es_data['coordinate'] = {"lat":float(event['latitude']),"lon":float(event['longitude'])}
put_openSearch(es_data)
print(event)
add_event_to_personalize(event_id, event)
return {
'statusCode': 200,
'body': event_data
}
except Exception as error:
print("Error when storing data", error)
def add_event_to_personalize(event_id, event):
try:
personalizeRt = boto3.client(
'personalize-events',
)
online_event = 'FALSE'
print(event_id)
if 'online_event' in event.keys() and str(event['online_event']).lower() == "true":
online_event ='TRUE'
# "{\"onlineEvent\": \""+ online_event +"\", \"organizerId\": \""+ event['organizer_id'] +"\", \"venueId\": \""+ event['venue_id'] +"\", \"itemId\":\"" + '6000' +"\",\"category\":\""+ event['category'] +"\", \"nameText\": \""+ event['name_text'] +"\", \"descriptionText\": \""+ event['description_text'] +"\",\"startTimezone\": \"America/New_York\", \"startLocal\": "+ event['start_local'] +", \"endLocal\": \""+ event['end_local'] +"\"}"
# print("{\"onlineEvent\": \""+ online_event +"\", \"organizerId\": \""+ event['organizer_id'] +"\", \"venueId\": \""+ event['venue_id'] +"\", \"itemId\":\"" + event_id +"\",\"category\":\""+ event['category'] +"\", \"nameText\": \""+ event['name_text'] +"\", \"descriptionText\": \""+ event['description_text'] +"\",\"startTimezone\": \"America/New_York\", \"startLocal\": "+ event['start_local'] +", \"endLocal\": \""+ event['end_local'] +"\"}")
response = personalizeRt.put_items(
datasetArn='arn:aws:personalize:us-east-1:810123839900:dataset/event-rec/ITEMS',
items=[
{
'itemId': event_id,
'properties': "{\"onlineEvent\": \""+ online_event +"\", \"organizerId\":" + event['organizer_id'] + ", \"venueId\":" + event['venue_id'] +",\"category\":\""+ event['category'] +"\", \"nameText\": \""+ event['name_text'] +"\", \"descriptionText\": \""+ event['description_text'] +"\",\"startTimezone\": \"America/New_York\", \"startLocal\": \""+ event['start_local'] +"\", \"endLocal\": \""+ event['end_local'] +"\"}"
}
]
)
except Exception as error:
print("Error personalize ", error)
def lambda_handler(event, context):
# start_local:
# organizer_id:
# name_text:
# shareable:
# end_local:
# summary:
# category:
# online_event:
return put_eventDetails(event)
| JyothsnaKS/lions-meetup | app/stack/CreateEvents/lambda_function.py | lambda_function.py | py | 6,776 | python | en | code | 0 | github-code | 13 |
36333117285 | from collections import Counter
from itertools import combinations
def solution(orders, course):
answer = []
# sort order each element
for i in range(len(orders)):
orders[i] = ''.join(sorted(orders[i]))
# combination
# 모든 order에 대해 조합을 구하고, Counter로 같은 것들의 숫자를 센다
for c in course:
cur = []
for order in orders:
comb = combinations(order, c)
cur += comb
# print(cur)
cnt = Counter(cur)
# print(cnt)
if len(cnt) != 0 and max(cnt.values()) > 1:
for res in cnt:
if cnt[res] == max(cnt.values()):
answer.append(''.join(res))
print(answer)
return sorted(answer)
orders = ['ABCDE', 'AB', 'CD', 'ADE', 'XYZ', 'XYZ', 'ACD']
course = [2, 3, 5]
print(solution(orders, course))
| bywindow/Algorithm | src/BruteForce/프로그래머스_메뉴리뉴얼_Lv2.py | 프로그래머스_메뉴리뉴얼_Lv2.py | py | 889 | python | en | code | 0 | github-code | 13 |
9580687888 | import json
import csv
import boto3
import os
import uuid
s3 = boto3.resource('s3', aws_access_key_id=os.environ['ACCESS_KEY'],
aws_secret_access_key=os.environ['SECRET_KEY'])
def main(request):
FILEPATH = os.environ['FILEPATH']
S3_BUCKET_NAME = os.environ['S3_BUCKET_NAME']
fargs = request.get_json(silent=True)
pred_outs = fargs['predecessor_outputs']
# Id of the file
file_log = uuid.uuid4().hex
outfile_name = file_log + '_summary.csv'
# Write the csv file
header = ["Name", "order_url", "output_url"]
with open(FILEPATH + outfile_name, 'w') as writeFile:
writer = csv.writer(writeFile)
writer.writerow(header)
# Get list of dictionary entries
satellite_output = [value for key, value in pred_outs.items()]
for item in satellite_output:
write = []
name = item['name']
order_url = item['order_url']
s3url = item['s3url']
print(f"name: {name} order output: {order_url} s3 output: {s3url}")
write.append(name)
write.append(order_url)
write.append(s3url)
with open(FILEPATH + outfile_name, 'a') as writeFile:
writer = csv.writer(writeFile)
writer.writerow(write)
# Upload to S3
s3.meta.client.upload_file(FILEPATH + outfile_name, S3_BUCKET_NAME,
'output/Planet/' + outfile_name)
print("Uploaded")
object_acl = s3.ObjectAcl(S3_BUCKET_NAME, 'output/Planet/' + outfile_name)
response = object_acl.put(ACL='public-read')
print("End workflow - file at ", FILEPATH + outfile_name)
out_dict = {
'statusCode': 200,
'workflow_output': 'output/' + outfile_name,
'body': json.dumps('End of workflow.')
}
return out_dict
| SWEEP-Inc/SWEEP-Workflows | demo-workflows/meadows-demo/tasks/end_wf/main.py | main.py | py | 1,795 | python | en | code | 0 | github-code | 13 |
34852673181 | from urllib import response
from flask import Flask, jsonify
import requests
import json
app = Flask(__name__)
@app.route("/", methods=['GET'])
def index():
url = f'https://developer.intuit.com/app/developer/qbo/docs/api/accounting/most-commonly-used/invoice#read-an-invoice'
response = requests.get(url)
data = response.content
d = json.dumps(data.decode('utf-8'))
print(d)
# print(req.content)
# print(req.json)
# return jsonify(req.json)
# return json.dumps(req.json)
return "hello"
if __name__ == '__main__':
app.run(debug=True)
| najimpatel/Api_get_data | app.py | app.py | py | 596 | python | en | code | 0 | github-code | 13 |
14231622577 | import logging
from typing import Any, Dict, List, Optional, Tuple
from backend.common.cache import Cache, CacheEnum, CacheKeyPrefixEnum, cachedmethod
from backend.common.error_codes import error_codes
from backend.component import iam, resource_provider
from backend.service.models.resource import ResourceApproverAttribute
from backend.util.basic import chunked
from backend.util.url import url_join
from .constants import FETCH_MAX_LIMIT
from .models import (
ResourceAttribute,
ResourceAttributeValue,
ResourceInstanceBaseInfo,
ResourceInstanceInfo,
ResourceTypeProviderConfig,
SystemProviderConfig,
)
# 只暴露ResourceProvider,其他只是辅助ResourceProvider的
__all__ = ["ResourceProvider"]
logger = logging.getLogger("app")
class SystemProviderConfigService:
"""提供系统配置"""
@cachedmethod(timeout=60) # 缓存1分钟
def get_provider_config(self, system_id) -> SystemProviderConfig:
"""获取接入系统的回调信息,包括鉴权和Host"""
system_info = iam.get_system(system_id, fields="provider_config")
provider_config = system_info["provider_config"]
return SystemProviderConfig(**provider_config)
class ResourceTypeProviderConfigService:
"""提供资源类型配置"""
# TODO: 这里需要由后台提供查询某个系统某个资源类型的API,而不是使用批量查询系统资源类型
@cachedmethod(timeout=60) # 一分钟
def _list_resource_type_provider_config(self, system_id: str) -> Dict[str, Dict]:
"""提供给provider_config使用的获取某个系统所有资源类型"""
resource_types = iam.list_resource_type([system_id], fields="id,provider_config")[system_id]
provider_config_dict = {i["id"]: i["provider_config"] for i in resource_types}
return provider_config_dict
def get_provider_config(self, system_id: str, resource_type_id: str) -> ResourceTypeProviderConfig:
"""获取资源类型的回调配置"""
provider_config_dict = self._list_resource_type_provider_config(system_id)
return ResourceTypeProviderConfig(**provider_config_dict[resource_type_id])
class ResourceProviderConfig:
"""资源提供者配置"""
def __init__(self, system_id: str, resource_type_id: str):
self.system_id = system_id
self.resource_type_id = resource_type_id
self.auth_info, self.host = self._get_auth_info_and_host()
self.path = self._get_path()
def _get_auth_info_and_host(self) -> Tuple[Dict[str, str], str]:
"""iam后台获取系统的provider config"""
provider_config = SystemProviderConfigService().get_provider_config(self.system_id)
return {"auth": provider_config.auth, "token": provider_config.token}, provider_config.host
def _get_path(self) -> str:
"""iam后台获取请求该资源类型所需的URL Path"""
provider_config = ResourceTypeProviderConfigService().get_provider_config(
self.system_id, self.resource_type_id
)
return provider_config.path
class ResourceIDNameCache:
"""资源的ID和Name缓存"""
def __init__(self, system_id: str, resource_type_id: str):
self.system_id = system_id
self.resource_type_id = resource_type_id
self.cache = Cache(CacheEnum.REDIS.value, CacheKeyPrefixEnum.CALLBACK_RESOURCE_NAME.value)
def _make_key(self, resource_id: str) -> str:
"""
生成Key
"""
return f"{self.system_id}:{self.resource_type_id}:{resource_id}"
def set(self, id_name_map: Dict[str, str]):
"""
Cache所有短时间内使用list_instance/fetch_instance/search_instance的数据,用于校验和查询id与name使用
"""
data = {self._make_key(_id): name for _id, name in id_name_map.items()}
# 缓存有问题,不影响正常逻辑
try:
self.cache.set_many(data, timeout=5 * 60)
except Exception: # pylint: disable=broad-except noqa
logger.exception("set resource id:name cache fail")
def get(self, ids: List[str]) -> Dict[str, Optional[str]]:
"""
获取缓存内容,对于缓存不存在的,则返回为空
无法获取到的缓存的ID,则不包含在返回Dict里
"""
map_keys = {self._make_key(id_): id_ for id_ in ids}
# 缓存有问题,不影响正常逻辑
try:
results = self.cache.get_many(list(map_keys.keys()))
except Exception: # pylint: disable=broad-except noqa
logger.exception("get resource id:name cache fail")
results = {}
data = {}
for key, id_ in map_keys.items():
value = results.get(key)
if value is None:
continue
data[id_] = value
return data
class ResourceProvider:
"""资源提供者"""
name_attribute = "display_name"
approver_attribute = "_bk_iam_approver_"
def __init__(self, system_id: str, resource_type_id: str):
"""初始化:认证信息、请求客户端"""
self.system_id = system_id
self.resource_type_id = resource_type_id
# 根据系统和资源类型获取相关认证信息和Host、URL_PATH
provider_config = ResourceProviderConfig(system_id, resource_type_id)
auth_info, host, url_path = provider_config.auth_info, provider_config.host, provider_config.path
url = url_join(host, url_path)
self.client = resource_provider.ResourceProviderClient(system_id, resource_type_id, url, auth_info)
# 缓存服务
self.id_name_cache = ResourceIDNameCache(system_id, resource_type_id)
def _get_page_params(self, limit: int, offset: int) -> Dict[str, int]:
"""生成分页参数"""
return {
"page_size": limit,
"page": (offset // limit) + 1,
# 新的标准是page_size/page, 兼容之前协议limit/offset
"limit": limit,
"offset": offset,
}
def list_attr(self) -> List[ResourceAttribute]:
"""查询某个资源类型可用于配置权限的属性列表"""
return [
ResourceAttribute(**i)
for i in self.client.list_attr()
# 由于存在接入系统将内置属性_bk_xxx,包括_bk_iam_path_或将id的返回,防御性过滤掉
if not i["id"].startswith("_bk_") and i["id"] != "id"
]
def list_attr_value(
self, attr: str, keyword: str = "", limit: int = 10, offset: int = 0
) -> Tuple[int, List[ResourceAttributeValue]]:
"""获取一个资源类型某个属性的值列表"""
filter_condition = {"keyword": keyword}
page = self._get_page_params(limit, offset)
count, results = self.client.list_attr_value(attr, filter_condition, page)
return count, [ResourceAttributeValue(**i) for i in results]
def list_instance(
self,
ancestors: List[Dict[str, str]],
limit: int = 10,
offset: int = 0,
action_system_id: str = "",
action_id: str = "",
) -> Tuple[int, List[ResourceInstanceBaseInfo]]:
"""根据上级资源获取某个资源实例列表"""
filter_condition: Dict[str, Any] = {}
if ancestors:
filter_condition["ancestors"] = ancestors
filter_condition["parent"] = {"type": ancestors[-1]["type"], "id": ancestors[-1]["id"]}
if action_system_id and action_id:
filter_condition["action"] = {"system": action_system_id, "id": action_id}
page = self._get_page_params(limit, offset)
count, results = self.client.list_instance(filter_condition, page)
# 转换成需要的数据
instance_results = [ResourceInstanceBaseInfo(**i) for i in results]
# Cache 查询到的信息
if instance_results:
self.id_name_cache.set({i.id: i.display_name for i in instance_results})
return count, instance_results
def search_instance(
self,
keyword: str,
ancestors: List[Dict[str, str]],
limit: int = 10,
offset: int = 0,
action_system_id: str = "",
action_id: str = "",
) -> Tuple[int, List[ResourceInstanceBaseInfo]]:
"""根据上级资源和Keyword搜索某个资源实例列表"""
# Note: 虽然与list_instance很相似,但在制定回调接口协议时特意分开为两个API,这样方便后续搜索的扩展
filter_condition: Dict = {"keyword": keyword}
if ancestors:
filter_condition["ancestors"] = ancestors
filter_condition["parent"] = {"type": ancestors[-1]["type"], "id": ancestors[-1]["id"]}
if action_system_id and action_id:
filter_condition["action"] = {"system": action_system_id, "id": action_id}
page = self._get_page_params(limit, offset)
count, results = self.client.search_instance(filter_condition, page)
# 转换成需要的数据
instance_results = [ResourceInstanceBaseInfo(**i) for i in results]
# Cache 查询到的信息
if instance_results:
self.id_name_cache.set({i.id: i.display_name for i in instance_results})
return count, instance_results
def fetch_instance_info(
self, ids: List[str], attributes: Optional[List[str]] = None
) -> List[ResourceInstanceInfo]:
"""批量查询资源实例属性,包括display_name等"""
# fetch_instance_info 接口的批量限制
# 分页查询资源实例属性
results = []
page_ids_list = chunked(ids, FETCH_MAX_LIMIT)
for page_ids in page_ids_list:
filter_condition = {"ids": page_ids, "attrs": attributes} if attributes else {"ids": page_ids}
page_results = self.client.fetch_instance_info(filter_condition)
results.extend(page_results)
# Dict转为struct
instance_results = []
for i in results:
if "id" not in i:
raise error_codes.RESOURCE_PROVIDER_VALIDATE_ERROR.format(
f"fetch_instance_info[system:{self.system_id} resource_type_id:{self.resource_type_id}"
+ f" resource:{i}] id must not be empty"
)
instance_results.append(
ResourceInstanceInfo(
id=i["id"],
# 容错处理:接入系统实现的回调接口可能将所有属性都返回,所以只过滤需要的属性即可
attributes={k: v for k, v in i.items() if not attributes or k in attributes},
)
)
# IDNameCache,对于查询所有属性或者包括name属性,则进行缓存
if instance_results and (not attributes or self.name_attribute in attributes):
self.id_name_cache.set(
{
i.id: i.attributes[self.name_attribute]
# 只有包括name属性才进行缓存
for i in instance_results
if self.name_attribute in i.attributes
}
)
return instance_results
def fetch_instance_name(self, ids: List[str]) -> List[ResourceInstanceBaseInfo]:
"""批量查询资源实例的Name属性"""
# 先从缓存取,取不到的则再查询
cache_id_name_map = self.id_name_cache.get(ids)
results = [ResourceInstanceBaseInfo(id=_id, display_name=name) for _id, name in cache_id_name_map.items()]
# 未被缓存的需要实时查询
not_cached_ids = [_id for _id in ids if _id not in cache_id_name_map]
not_cached_results = self.fetch_instance_info(not_cached_ids, [self.name_attribute])
for one in not_cached_results:
if self.name_attribute not in one.attributes:
raise error_codes.RESOURCE_PROVIDER_VALIDATE_ERROR.format(
f"fetch_instance_info[system:{self.system_id} resource_type_id:{self.resource_type_id}"
+ f" resource_id:{one.id}] attribute:{self.name_attribute} must not be empty"
)
results.append(ResourceInstanceBaseInfo(id=one.id, display_name=one.attributes[self.name_attribute]))
return results
def fetch_instance_approver(self, ids: List[str]) -> List[ResourceApproverAttribute]:
"""批量查询资源实例的实例审批人属性"""
instance_infos = self.fetch_instance_info(ids, [self.approver_attribute])
results = []
for one in instance_infos:
if self.approver_attribute not in one.attributes:
continue
# 兼容可能返回 list/string 的情况
approver = one.attributes[self.approver_attribute]
if isinstance(approver, list) and approver:
results.append(ResourceApproverAttribute(id=one.id, approver=approver))
elif isinstance(approver, str) and approver:
results.append(ResourceApproverAttribute(id=one.id, approver=[approver]))
return results
| TencentBlueKing/bk-iam-saas | saas/backend/service/resource.py | resource.py | py | 13,159 | python | en | code | 24 | github-code | 13 |
26152058978 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# test.py
# @Author : ()
# @Link :
# @Date : 2/13/2019, 1:47:06 PM
import sys
from PyQt5 import QtWidgets, uic
class MainWindow(QtWidgets.QMainWindow):
def __init__(self, uiPath='', parent=None):
super(MainWindow, self).__init__(parent)
# PyQt5 加载ui文件方法
self.ui = uic.loadUi(uiPath, self)
def closeEvent(self, event):
'''
重写closeEvent方法
'''
event.accept()
quit()
if __name__ == '__main__':
# app = None
app = QtWidgets.QApplication(sys.argv)
w = MainWindow('./study/UI/item_test1.ui')
w.show()
sys.exit(app.exec_()) | IvanYangYangXi/pyqt_study | study/SimpleWin.py | SimpleWin.py | py | 692 | python | en | code | 0 | github-code | 13 |
74638060816 | import sys
input = sys.stdin.readline
n = int(input())
line = list(map(int, input().split()))
line.sort()
answer = 0
for i in range(n):
answer += sum(line[:i+1])
print(answer) | Coding-Test-Study-Group/Coding-Test-Study | kkkwp/baekjoon/11399_ATM.py | 11399_ATM.py | py | 183 | python | en | code | 4 | github-code | 13 |
8115213652 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import os, sys, re
from collections import namedtuple
from robofab.world import OpenFont
from fontTools.agl import AGL2UV
SMOOTH_THRESHOLD = 0.95
MySegment = namedtuple('MySegment', ('type', 'points', 'segment'))
if __name__ == "__main__":
path = sys.argv[1]
path = re.sub("{}+$".format(os.sep), "", path)
basename, _ = os.path.splitext(os.path.basename(path))
dirname = os.path.dirname(path)
output_ufo = os.path.join(dirname, "{}_smoothed.ufo".format(basename))
font = OpenFont(path)
for g in font:
#print "----- {} -----".format(g.name)
for con in g:
segments = []
first_seg = None
prev_seg = None
for seg in con:
if prev_seg is None:
first_seg = seg
else:
if prev_seg is not None:
points = [prev_seg.points[-1]]
points.extend(seg.points)
myseg = MySegment(type=seg.type, points=points, segment=seg)
segments.append(myseg)
prev_seg = seg
# treat first segment
if segments[-1].points[-1].x != first_seg.points[0].x or segments[-1].points[-1].y != first_seg.points[0].y:
points = [segments[-1].points[-1]]
points.extend(first_seg.points)
myseg = MySegment(type=seg.type, points=points, segment=first_seg)
segments.append(myseg)
segments_len = len(segments)
for i in range(segments_len):
seg = segments[i]
#print [(pt.x, pt.y) for pt in seg.points]
next_seg = segments[(i+1)%segments_len]
v1 = complex(seg.points[-1].x - seg.points[-2].x, seg.points[-1].y - seg.points[-2].y)
v2 = complex(next_seg.points[1].x - next_seg.points[0].x, next_seg.points[1].y - next_seg.points[0].y)
# When v1 and v2 have almost same direction, they are `smooth'.
# i.e. the inner product of v1 and v2 are almost |v1|*|v2|.
if v1.real*v2.real + v1.imag*v2.imag > abs(v1) * abs(v2) * SMOOTH_THRESHOLD:
seg.segment.smooth = True
font.save(output_ufo)
| derwind/misc_scripts | smoothing.py | smoothing.py | py | 2,321 | python | en | code | 0 | github-code | 13 |
16472406213 | """
entrytool models.
"""
import datetime
from django.db.models import fields
from opal.core import subrecords
from django.db.models import Max, DateField, DateTimeField
from opal import models
from opal.core import lookuplists
from django.utils.translation import gettext_lazy as _
class Demographics(models.Demographics):
_icon = ''
external_identifier = fields.CharField(
blank=True, null=True, max_length=256, unique=True,
verbose_name=_("External Identifier")
)
class Hospital(lookuplists.LookupList):
class Meta:
verbose_name = _("Hospital")
verbose_name_plural = _("Hospitals")
class SCT(models.EpisodeSubrecord):
SCT_TYPES = (
("Allogenic", _("Allogenic")),
("Autologous", _("Autologous")),
("Unknown", _("Unknown")),
)
order_by = "-sct_date"
sct_date = fields.DateField(verbose_name=_("Date of SCT"))
hospital = models.ForeignKeyOrFreeText(
Hospital, verbose_name=_("Hospital")
)
sct_type = fields.CharField(
max_length=12,
verbose_name=_("Type of SCT"),
choices=SCT_TYPES,
null=True
)
class Meta:
verbose_name = _("Stem Cell Transplant")
verbose_name_plural = _("Stem Cell Transplants")
class PatientStatus(models.PatientSubrecord):
_is_singleton = True
DEATH_CAUSES = (
("Disease", _("Disease")),
("Complications of Disease", _("Complications of Disease")),
("Other", _("Other"))
)
deceased = fields.NullBooleanField(verbose_name=_("Deceased"), blank=True, null= True)
lost_to_follow_up = fields.NullBooleanField(verbose_name=_("Lost to Follow-Up"), null = True, blank = True)
death_date = fields.DateField(
null=True, verbose_name=_("Date of Death"), blank=True
)
death_cause = fields.CharField(
max_length=100,
choices=DEATH_CAUSES,
verbose_name=_("Cause of Death"),
blank=True,
null=True,
)
lost_to_follow_up_date = fields.DateField(
blank=True, null=True, verbose_name=_("Lost to Follow-up")
)
class Meta:
verbose_name = _("Patient status")
verbose_name_plural = _("Patient status")
# TODO does this need to be kept? Can the line number be an attribute of the episode?
class TreatmentLine(models.EpisodeSubrecord):
nb = fields.IntegerField(verbose_name=_("Treatment Line"))
class Meta:
verbose_name = _("Treatment Line")
verbose_name_plural = _("Treatment Lines")
class FollowUp(models.PatientSubrecord):
_sort = "followup_date"
_icon = "fa fa-stethoscope"
hospital = models.ForeignKeyOrFreeText(Hospital, verbose_name=_("Hospital"))
follow_up_date = fields.DateField(verbose_name=_("Visit date"))
LDH = fields.FloatField(blank=True, null=True, verbose_name=_("LDH"))
beta2m = fields.FloatField(blank=True, null=True, verbose_name=_("beta2m"))
albumin = fields.FloatField(blank=True, null=True, verbose_name=_("Albumin"))
mprotein_urine = fields.FloatField(blank=True, null=True, verbose_name=_("MProtein Urine"))
mprotein_serum = fields.FloatField(blank = True, null = True ,verbose_name=("MProtein Serum"))
mprotein_24h = fields.FloatField(blank = True, null = True, verbose_name=_("Mprotein in 24 hour urine"))
class Meta:
verbose_name = _("Follow-up")
verbose_name_plural = _("Follow-ups")
class PatientLoad(models.PatientSubrecord):
"""
A singleton that describes what created a patient whether
it was the loaded in from an external source like a file
or created by the front end.
"""
_is_singleton = True
LOADED_FROM_FILE = "Loaded From File"
CREATED_FROM_UI = "Created From UI"
SOURCE = (
(LOADED_FROM_FILE, _("Loaded From File")),
(CREATED_FROM_UI, _("Created From UI")),
)
source = fields.CharField(
blank=True,
null=True,
max_length=256,
choices=SOURCE,
verbose_name=_("source"),
default=CREATED_FROM_UI
)
validated = fields.BooleanField(
default=False, verbose_name=_("Validated"),
)
has_errors = fields.BooleanField(
default=False, verbose_name=_("Has Errors"),
)
data_quality_reviewed = fields.BooleanField(
default=False, verbose_name=_("Data Quality Reviewed")
)
data_quality_reviewed_date = fields.DateField(blank=True, null=True)
class Meta:
verbose_name = _("Patient Load")
verbose_name_plural = _("Patient Loads")
def get_date_fields(subrecord):
"""
Returns the names of date or datetime fields on the subrecord
excluding the created/updated timestamps
"""
fields = subrecord._meta.get_fields()
date_fields = []
for field in fields:
if isinstance(field, (DateTimeField, DateField,)):
if field.name == 'created' or field.name == 'updated':
continue
date_fields.append(field.name)
return date_fields
def get_max_date(patient, max_fields):
"""
Given a list of annotated max_date fields on the patient
return the most recent
"""
max_dates = [
getattr(patient, max_field) for max_field in max_fields
if getattr(patient, max_field)
]
if len(max_dates) == 1:
return max_dates[0]
elif len(max_dates) == 0:
return None
return max(max_dates)
def sort_by_newest_to_oldest(patients):
"""
Takes a queryset
Annotates with all the dates connected with the patient
excluding created/updated
Returns a list of patients ordered by newest to oldest.
"""
max_fields = []
for subrecord in subrecords.subrecords():
if subrecord == PatientStatus:
continue
date_fields = get_date_fields(subrecord)
for date_field in date_fields:
if issubclass(subrecord, models.EpisodeSubrecord):
field = f"episode__{subrecord.__name__.lower()}__{date_field}"
else:
field = f"{subrecord.__name__.lower()}__{date_field}"
max_field = f"max_{field}"
patients = patients.annotate(**{max_field: Max(field)})
max_fields.append(max_field)
max_date_and_patient = [
(get_max_date(patient, max_fields), patient) for
patient in patients
]
return [
i[1] for i in sorted(
max_date_and_patient,
key=lambda x: x[0] or datetime.datetime.min.date(),
reverse=True
)
]
| solventrix/HONEUR_eCRF | entrytool/models.py | models.py | py | 6,545 | python | en | code | 0 | github-code | 13 |
41266792484 | class Solution:
def subsetsWithDup(self, nums: List[int]) -> List[List[int]]:
nums.sort()
output = []
n = len(nums)
def backtrack(index, curr):
output.append(curr[:])
for i in range(index, n):
if i != index and nums[i] == nums[i-1]:
continue
else:
curr.append(nums[i])
backtrack(i + 1, curr)
curr.pop()
backtrack(0, [])
return output
| AshwinRachha/LeetCode-Solutions | 90-subsets-ii/90-subsets-ii.py | 90-subsets-ii.py | py | 539 | python | en | code | 0 | github-code | 13 |
9892001594 | import cv2
import logging
log_format = '%(created)f:%(levelname)s:%(message)s'
logging.basicConfig(level=logging.DEBUG, format=log_format) # log to file filename='example.log',
TAG = "bg-detect-app:"
def main(data_recv, results_send):
logging.debug(TAG + "inside main")
while True:
data_recv.poll()
foreground_mask = data_recv.recv_pyobj()
logging.debug(TAG + "received and decoded obj")
cv2.imshow("frame", foreground_mask)
logging.debug(TAG + "displayed image")
if cv2.waitKey(5) == 27: # exit on escape
logging.debug(TAG + "received escape key")
break
# Send it back to proxy
# logging.debug(TAG + "sending obj")
# results_send.send_pyobj(foreground_mask)
| christhompson/recognizers-arch | apps/examples/bg-detect/app.py | app.py | py | 772 | python | en | code | 1 | github-code | 13 |
15791012880 | import Project5Start
game_data = Project5Start.get_project5_data()
menu_text = """
[1] Find largest total sales
[2] Find latest release
[3] Find oldest release
[4] Find highest price
[5] Add new Game
[6] Exit program
"""
while True:
print(menu_text)
choice = input("please enter the number of your selection:")
if '1' in choice:
largest_sales = 0
for game in game_data:
if largest_sales < game['total_sales']:
largest_sales = game['total_sales']
print(f"the highest money making 2d Game on steam made {largest_sales}")
elif '2' in choice:
latest_release = game_data[0]
for game in game_data:
if game['release']> latest_release['release']:
latest_release = game
print(f"{latest_release['name']} was released most recently")
elif '3' in choice:
oldest_release = game_data[0]
for game in game_data:
if game['release'] < oldest_release['release']:
oldest_release = game
print(f"{oldest_release['name']} was released the longest ago")
elif '6' in choice:
break
else:
print(f" {choice} not yet implemented, please choose something else") | jsantore/whileDemoMorning | Menudemo.py | Menudemo.py | py | 1,233 | python | en | code | 0 | github-code | 13 |
15791553570 |
def findDigits(n):
# Write your code here
c=0
ls = list((str(n)))
for i in range(len(ls)):
if int(ls[i]) != 0 and n % int(ls[i]) == 0:
c+=1
# print(int(ls[i]))
return(c)
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
t = int(input().strip())
for t_itr in range(t):
n = int(input().strip())
result = findDigits(n)
fptr.write(str(result) + '\n')
fptr.close()
| Joshwa034/testrepo | findDigit.py | findDigit.py | py | 478 | python | en | code | 0 | github-code | 13 |
7601843296 | import argparse
import functools
import itertools
import heapq
import operator
import re
import typing
from dataclasses import dataclass
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('input')
return parser.parse_args()
def _read_data(input_file):
with open(input_file) as _input:
lines = _input.read().splitlines()
translated_input = ''.join([
bit
for char in lines[0]
for bit in format(int(char, 16), '04b')
])
return translated_input
class Packet:
def __init__(self, version, packet_type, sub_packets):
self.version = version
self.packet_type = packet_type
self.sub_packets = sub_packets
@staticmethod
def parse(
packet_version,
packet_type,
stream,
) -> typing.Tuple['Packet', str]:
raise NotImplemented
def value(self):
raise NotImplemented
# version: int
# type: int
# value: str
# sub_packets: typing.List['Packet']
class PacketLiteral(Packet):
def __init__(self, version, packet_type, value):
super().__init__(version, packet_type, [])
self.__value = value
@staticmethod
def parse(
packet_version,
packet_type,
stream,
) -> typing.Tuple['Packet', str]:
value = ''
while stream:
next = stream[:5]
value += next[1:]
stream = stream[5:]
if next[0] == '0':
break
else:
raise RuntimeError
value = int(value, 2)
return (
PacketLiteral(
packet_version,
packet_type,
value,
),
stream
)
def value(self):
return self.__value
class PacketOperator(Packet):
def __init__(self, version, packet_type, sub_packets, operator_fn):
super().__init__(version, packet_type, sub_packets)
self.__operator_fn = operator_fn
@staticmethod
def __parse_operator(packet_type):
if packet_type == 0:
def _sum(*values):
return functools.reduce(
operator.add, values
)
return _sum
if packet_type == 1:
def prod(*values):
return functools.reduce(
operator.mul, values
)
return prod
if packet_type == 2:
def _min(*values):
return min(values)
return _min
if packet_type == 3:
def _max(*values):
return max(values)
return _max
if packet_type == 5:
return operator.gt
if packet_type == 6:
return operator.lt
if packet_type == 7:
return operator.eq
@classmethod
def parse(
cls,
packet_version,
packet_type,
stream,
) -> typing.Tuple['Packet', str]:
operator_type = stream[0]
stream = stream[1:]
sub_packets = []
if operator_type == '0':
packet_length = int(stream[: 15], 2)
stream = stream[15:]
content = stream[:packet_length]
stream = stream[packet_length:]
while content:
sub_packet, content = parse_packet(content)
sub_packets.append(sub_packet)
else:
num_sub_packets = int(stream[: 11], 2)
stream = stream[11:]
for _ in range(num_sub_packets):
sub_packet, stream = parse_packet(stream)
sub_packets.append(sub_packet)
operator_fn = cls.__parse_operator(packet_type)
return (
PacketOperator(
packet_version,
packet_type,
sub_packets,
operator_fn
),
stream
)
def value(self):
values = [p.value() for p in self.sub_packets]
print(self.__operator_fn)
print(values)
return self.__operator_fn(*values)
def parse_packet(stream: str) -> typing.Tuple[Packet, str]:
packet_version = int(stream[:3], 2)
packet_type = int(stream[3:6], 2)
stream = stream[6:]
if packet_type == 4:
return PacketLiteral.parse(
packet_version, packet_type, stream
)
else:
return PacketOperator.parse(
packet_version, packet_type, stream
)
def sum_packet_version(packet: Packet) -> int:
if not packet.sub_packets:
return packet.version
return sum(
sum_packet_version(p)
for p in packet.sub_packets
) + packet.version
def run(input_file: str) -> None:
stream = _read_data(input_file)
packets = []
versions_sum = 0
value = 0
while len(stream) > 7:
packet, stream = parse_packet(stream)
packets.append(packet)
versions_sum += sum_packet_version(packet)
value = packet.value()
print('versions sum:', versions_sum)
print('value:', value)
if __name__ == '__main__':
args = parse_args()
run(args.input)
| mguryev/advent_of_code_2021 | day16.py | day16.py | py | 5,192 | python | en | code | 0 | github-code | 13 |
41894401852 | import os.path
from zipfile import ZipFile
import subprocess
from subprocess import PIPE, CalledProcessError
import shutil
import sys
import json
if not os.path.exists("scripts/tests/e2e-tests.py"):
sys.exit("This script is intended to be executed from the root folder.")
root = os.getcwd()
if sys.argv[1] == "esr102":
print("Testing on esr102")
browserGlueFile = "gecko/browser/components/BrowserGlue.jsm"
else:
print("Testing on main")
browserGlueFile = "gecko/browser/components/BrowserGlue.sys.mjs"
# Remove old gecko
subprocess.call("rm -rf gecko".split(), cwd=root)
# First we build the extension
subprocess.call("npm run build-test".split(), cwd=root)
# then we clone gecko
subprocess.call("git clone hg::https://hg.mozilla.org/mozilla-unified gecko".split(), cwd=root)
if sys.argv[1] == "esr102":
subprocess.call("git checkout bookmarks/esr102".split(), cwd="gecko")
# create the folder for the extension
subprocess.call("mkdir -p gecko/browser/extensions/translations/extension".split(), cwd=root)
# and extract the newly one built there
subprocess.call("unzip web-ext-artifacts/firefox_translations.xpi -d gecko/browser/extensions/translations/extension/".split(), cwd=root)
# copy the tests
subprocess.call("mkdir -p gecko/browser/extensions/translations/test/browser/".split(), cwd=root)
subprocess.call("cp scripts/tests/browser.ini gecko/browser/extensions/translations/test/browser/".split(), cwd=root)
subprocess.call("cp scripts/tests/browser_translation_test.html gecko/browser/extensions/translations/test/browser/".split(), cwd=root)
subprocess.call("cp scripts/tests/frame.html gecko/browser/extensions/translations/test/browser/".split(), cwd=root)
subprocess.call("cp scripts/tests/browser_translation_test.js gecko/browser/extensions/translations/test/browser/".split(), cwd=root)
subprocess.call("cp -r scripts/tests/esen/ gecko/browser/extensions/translations/test/browser/esen/".split(), cwd=root)
subprocess.call("cp -r scripts/tests/enes/ gecko/browser/extensions/translations/test/browser/enes/".split(), cwd=root)
subprocess.call("cp scripts/tests/jar.mn gecko/browser/extensions/translations/".split(), cwd=root)
with open('gecko/browser/extensions/moz.build', 'a') as fp:
fp.write('DIRS += [ \n')
fp.write(' "translations", \n')
fp.write('] \n')
# let's copy bergamot-translator's wasm artifacts at right place for tests
subprocess.call("cp -r gecko/browser/extensions/translations/extension/model/static/translation/ gecko/browser/extensions/translations/test/browser/".split(), cwd=root)
# patching BrowserGlue.jsm to add the extension's version so it could be loaded
f = open("extension/manifest.json")
data = json.load(f)
extension_version = data["version"][0]
f.close()
f = open("scripts/tests/BrowserGlue.jsm")
dataBrowserGlue = f.read()
dataBrowserGlue = dataBrowserGlue.replace("{version}", extension_version)
if sys.argv[1] == "esr102":
dataBrowserGlue = dataBrowserGlue.replace("lazy.AddonManager", "AddonManager")
f.close()
fp = open(browserGlueFile)
Lines = fp.readlines()
fp.close()
count = 0
with open(browserGlueFile, 'w') as fp:
for line in Lines:
if len(Lines) > count + 1 and "async _setupSearchDetection() {" in Lines[count + 1]:
fp.write(dataBrowserGlue + "\n")
elif "this._setupSearchDetection();" in line:
fp.write(line)
fp.write(" this._monitorTranslationsPref(); \n")
else:
fp.write(line)
count += 1
# enable our test
with open('gecko/mozconfig', 'w') as f:
print('ac_add_options --enable-artifact-builds', file=f)
with open('gecko/browser/extensions/translations/moz.build', 'a') as f:
print('with Files("**"):', file=f)
print(' BUG_COMPONENT = ("Firefox", "Translations")', file=f)
print('JAR_MANIFESTS += ["jar.mn"]', file=f)
print('BROWSER_CHROME_MANIFESTS += [\"test/browser/browser.ini\"]', file=f)
# build and run our test
print("****** Test with faster gemm ******")
try:
print("Building gecko")
subprocess.check_output("./mach build", stderr=subprocess.STDOUT, shell=True, universal_newlines=True, cwd="gecko")
print("Running test with faster gemm")
subprocess.check_output("./mach test --setpref=fxtranslations.running.mochitest=true browser/extensions/translations/test/browser/browser_translation_test.js", stderr=subprocess.STDOUT, shell=True, universal_newlines=True, cwd="gecko")
subprocess.check_output("./mach test --setpref=fxtranslations.running.mochitest=true --setpref=browser.translations.enable=false browser/extensions/translations/test/browser/browser_translation_test.js", stderr=subprocess.STDOUT, shell=True, universal_newlines=True, cwd="gecko")
print("Test with faster gemm Succeeded")
except CalledProcessError as cpe:
print(cpe.output)
sys.exit("Tests with faster gemm failed")
# build and run test for fallback gemm
def disable_faster_gemm(engine_js_artifact_name):
FASTER_GEMM = "mozIntGemm"
DISABLE_FASTER_GEMM = "DISABLE_" + FASTER_GEMM
ENGINE_JS_ARTIFACT = "gecko/browser/extensions/translations/extension/controller/translation/" + engine_js_artifact_name
with open(ENGINE_JS_ARTIFACT, "rt") as f:
x = f.read()
with open(ENGINE_JS_ARTIFACT, "wt") as f:
x = x.replace(FASTER_GEMM, DISABLE_FASTER_GEMM)
f.write(x)
print("****** Test with fallback gemm ******")
print("Disabling faster gemm")
disable_faster_gemm("bergamot-translator-worker.js")
try:
print("Building gecko")
subprocess.check_output("./mach build", stderr=subprocess.STDOUT, shell=True, universal_newlines=True, cwd="gecko")
print("Running test with fallback gemm")
subprocess.check_output("./mach test --setpref=fxtranslations.running.mochitest=true browser/extensions/translations/test/browser/browser_translation_test.js", stderr=subprocess.STDOUT, shell=True, universal_newlines=True, cwd="gecko")
print("Test with fallback gemm Succeeded")
except CalledProcessError as cpe:
print(cpe.output)
sys.exit("Tests with fallback gemm failed")
| mozilla/firefox-translations | scripts/tests/e2e-tests.py | e2e-tests.py | py | 6,069 | python | en | code | 578 | github-code | 13 |
14087858315 | import utilities.util as util
possible_inputs = {
0: ['None',
'trichar',
'pos',
'unigrams',
'functionwords',
'synchronized_functionwords',
'avg_word',
'english',
'bipos',
'avgcapital',
'numberwords',
'punctuations',
'edit_distance',
'spelling_errors',
'country_words'],
1: ['binary','family', 'language'],
2: ['in', 'out'],
3: [x for x in range(-1, 17)]
}
'''
feature: [trichar/pos]
type: [binary/family/language]
domain: [in/out]
threads: [-1 - 16]
'''
def get_params():
global feature, type, domain, threads, iterations, numOfFunctionwords
feature = input('Feature? ' + str(possible_inputs[0][1:]) + ' : ')
if feature == 'synchronized_functionwords':
numOfFunctionwords= int(input('numOfFunctionwords?: '))
feature2 = input('Feature 2? ' + str(possible_inputs[0]) + ' : ')
type = input('Classification type? ' + str(possible_inputs[1]) + ' : ')
domain = input('Domain? ' + str(possible_inputs[2]) + ' : ')
threads = int(input('Number of threads? [-1 - 16] : '))
iterations = int(input('Maximum iterations? [-1-1,000,000] : '))
for index, x in enumerate([feature, type, domain, threads]):
if x not in possible_inputs[index]:
raise IOError("Bad input! '{}' not in {}".format(x, possible_inputs[index]))
get_database()
def set_params(var_feature, var_feature2, var_type, var_domain, var_threads, var_iterations, var_numOfFunctionWords = 0):
global feature, feature2, type, domain, threads, iterations, numOfFunctionwords
feature = var_feature
feature2 = var_feature2
type = var_type
domain = var_domain
threads = var_threads
iterations = var_iterations
if feature == 'synchronized_functionwords':
numOfFunctionwords = var_numOfFunctionWords
get_database()
def get_database():
global database
database = util.load_file('utilities/database_dir.txt')[0] + util.FeatureToDirectory[feature] | itstmb/nli-project | utilities/interpreter.py | interpreter.py | py | 2,044 | python | en | code | 0 | github-code | 13 |
22196809410 | # Ref: https://projects.raspberrypi.org/en/projects/build-your-own-weather-station/5
# with wind speed, gust speed and direction
from gpiozero import Button
import time
import math
import wind_direction_byo
import statistics # used to determine wind gusts
store_speeds = []
store_directions = []
# print(adc.value)
wind_speed_sensor = Button(5)
wind_count = 0 # counts how many half-rotations
radius_cm = 8.0 # radius of Davis anemometer
wind_interval = 5 # how often (secs) to report speed. Default = 5
interval = 5 # set secs for main sampling interval for averages
# every half-rotation, add 1 to count
def spin():
global wind_count
wind_count = wind_count + 1
# print("spin" + str(wind_count))
CM_IN_A_KM = 100000.0
SECS_IN_A_HOUR = 3600
ADJUSTMENT = 1.32629 # anemometer factor compensation value. default 1.18
# calculate the wind speed
def calculate_speed(time_sec):
global wind_count
circumference_cm = (2 * math.pi) * radius_cm
rotations = wind_count / 1.0
# calculate distance travelled by a cup in cm
dist_km = circumference_cm * rotations / CM_IN_A_KM
km_per_sec = dist_km / time_sec
km_per_hour = km_per_sec * SECS_IN_A_HOUR
return km_per_hour * ADJUSTMENT
wind_speed_sensor.when_pressed = spin
def reset_wind():
global wind_count
wind_count = 0
# wind gust calculations
while True:
start_time = time.time()
while time.time() - start_time <= interval:
wind_start_time = time.time()
reset_wind()
# time.sleep(wind_interval)
while time.time() - wind_start_time <= wind_interval:
store_directions.append(wind_direction_byo.get_value())
final_speed = calculate_speed(wind_interval)
store_speeds.append(final_speed)
direction_average = wind_direction_byo.get_average(store_directions)
wind_gust = max(store_speeds)
wind_speed = statistics.mean(store_speeds)
store_speeds = []
store_directions = []
print("Wind", round(wind_speed, 2), "km/h", "Gusting to", round(wind_gust, 2), "km/h", "Average direction", round(direction_average)) | professionalmoment/controller | examples/weather-station/weather_station_byo.py | weather_station_byo.py | py | 2,101 | python | en | code | 0 | github-code | 13 |
38227915526 | #!/usr/bin/env/python3
# -*- coding: utf-8 -*-
__author__='drawnkid@gmail.com'
import json
def loadFile():
f = open("services.sql", encoding='utf-8')
f1 = open("services.json", encoding='utf-8')
s = json.load(f1)
l=f.readline()
print(l.split('|')[1].strip())
while l!= '':
l=f.readline()
#print(l.split('|')[1].strip())
trip = l.split('|')[1].strip()
for ser in s['services']:
#print(ser['domain'])
for ser2 in ser['services']:
#print(ser2['name'])
sername = ser2['name']
if trip == sername:
print(trip)
def loadJson():
f = open("services.json", encoding='utf-8')
s = json.load(f)
for ser in s['services']:
#print(ser['domain'])
for ser2 in ser['services']:
print(ser2['name'])
#loadJson()
loadFile()
| BaliStarDUT/hello-world | code/python/json/diffFile.py | diffFile.py | py | 895 | python | en | code | 4 | github-code | 13 |
3299903839 | import sys
import os
if __name__ == "__main__":
if len(sys.argv) is not 2:
sys.exit()
directory = sys.argv[1]
outdir = os.path.join(directory, 'aliveCount')
if not os.path.exists(outdir):
os.mkdir(outdir)
for filename in os.listdir(directory):
if filename != 'aliveCount':
with open(os.path.join(directory, filename)) as f:
f.readline()
w = f.readline()
s = sum(map(int, w.split(',')))
f_id = os.path.basename(filename)
with open (os.path.join(outdir, f_id), "w+") as out_f:
out_f.write(str(s) + "\n")
| lamyiowce/tumor-ca | misc/aliveCellCount.py | aliveCellCount.py | py | 540 | python | en | code | 3 | github-code | 13 |
9087204802 | # Based on the pyvirtualcam 'webcam_filter' example with slight changes https://github.com/letmaik/pyvirtualcam
import argparse
import signal
import sys
import threading
import keyboard
import time
import logging
import cv2
import pyvirtualcam
from pyvirtualcam import PixelFormat
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--camera", type=int, default=1, help="ID of real webcam device (default: 1)")
parser.add_argument("-x", "--width", type=int, default=1280, help="Preferred width for both cameras (default: 1280)")
parser.add_argument("-y", "--height", type=int, default=720, help="Preferred height for both cameras (default: 720)")
parser.add_argument("-i", "--fps-in", type=int, default=30, help="Preferred FPS in for real camera (default: 30)")
parser.add_argument("-o", "--fps-out", type=int, default=30, help="FPS out for virtual camera (default: 30)")
parser.add_argument("-k", "--pause-key", default="#", help="Key used to pause the virtual camera (default: #)")
parser.add_argument('-m', '--mjpeg', default=True, action='store_true', help="Use MJPEG (default: True)")
parser.add_argument('--no-mjpeg', dest='mjpeg', action='store_false')
args = parser.parse_args()
alive = True
pause = False
logging.basicConfig(level=logging.INFO, format='[%(levelname)s] %(message)s')
def start_camera():
# Set up webcam capture.
vc = cv2.VideoCapture(args.camera)
if not vc.isOpened():
logging.error('Could not open video source')
exit_safely(None, None)
pref_width = args.width
pref_height = args.height
pref_fps_in = args.fps_in
vc.set(cv2.CAP_PROP_FRAME_WIDTH, pref_width)
vc.set(cv2.CAP_PROP_FRAME_HEIGHT, pref_height)
if args.mjpeg:
# https://stackoverflow.com/a/40067019/19020549 https://stackoverflow.com/a/65185716/19020549
vc.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter.fourcc('M', 'J', 'P', 'G'))
vc.set(cv2.CAP_PROP_FPS, pref_fps_in)
# Query final capture device values (may be different from preferred settings).
width = int(vc.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vc.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps_in = vc.get(cv2.CAP_PROP_FPS)
logging.info(f'Webcam capture started: ID: {args.camera} ({width}x{height} @ {fps_in}fps)')
with pyvirtualcam.Camera(width, height, args.fps_out, fmt=PixelFormat.BGR) as cam:
logging.info(f'Virtual cam started: {cam.device} ({cam.width}x{cam.height} @ {cam.fps}fps)')
ret, frame = None, None
while alive:
# Read frame from webcam.
ret, frame = vc.read()
if not ret:
logging.error('Error fetching frame')
exit_safely(None, None)
# Send to virtual cam.
if not pause:
cam.send(frame)
# Wait until it's time for the next frame.
cam.sleep_until_next_frame()
def listen_for_key(key_event):
global pause
pause = not pause
logging.info('Camera {}'.format('paused' if pause else 'un-paused'))
camera_thread = threading.Thread(target=start_camera)
def exit_safely(signal, frame):
global alive
logging.info('Shutting down')
alive = False
time.sleep(0.3)
sys.exit(0)
if __name__ == '__main__':
# https://www.pythontutorial.net/python-concurrency/python-threading/
# https://webcamtests.com/viewer
signal.signal(signal.SIGINT, exit_safely)
keyboard.on_release_key(args.pause_key, listen_for_key)
camera_thread.start()
camera_thread.join()
| piotrpdev/CameraFreeze | camera_freeze.py | camera_freeze.py | py | 3,531 | python | en | code | 0 | github-code | 13 |
73493590417 | # Analisando se o Ano é Bissexto
from datetime import date # importa datas, anos
ano = int(input('Coloque um ano para ser analisado: '))
if ano == 0:
ano = date.today().year # Pega o ano atual configurado na máquina
if ano % 4 == 0 and ano % 100 != 0 or ano % 400 == 0:
print('O ano {} é BISSEXTO'.format(ano))
else:
print('O ano {} NÃO É BISSEXTO'.format(ano))
| damiati-a/CURSO-DE-PYTHON | Mundo 1/ex032.py | ex032.py | py | 396 | python | pt | code | 0 | github-code | 13 |
6009613554 | from django.template import Context, loader
from django.http import HttpResponse, HttpResponseRedirect
from django import forms
from baseApp.models import *
from django.contrib.auth import authenticate, login, logout
from django.shortcuts import render_to_response
from django.views.decorators.csrf import csrf_exempt
from django.forms import ModelForm
from django.contrib.auth.models import User
import time
import re
# Create your views here.
def processIncomingSMS(request): # processes all sms received from mobile money providers
time.sleep(10)
smsleft = IncomingSMS.objects.filter(read = False)
for sms in smsleft:
result = re.search(r'(.*)transactionid:(.*)(\d+)\namount:(.*)(\d+)', sms.message)
sms.transactionID = result.group(2)
sms.amount = result.group(4)
class OutgoingSMSForm(ModelForm):
class Meta:
model = OutgoingSMS
exclude = ['receiver','message','sent','created']
def redeemCart(request, phoneNumber, transID='blank'): # Get cart thats unpaid for
cartRecords = Cart.objects.filter(consumerPhone = phoneNumber)
cart2redeem = cartRecords.objects.get(paid = False)
if transID != 'blank' and IncomingSMS.objects.get(transactionID = transID):
cart2redeem.paid = True
ticketsBought=Ticket.objects.filter(cart__id = cart2redeem)
phoneNumStr= 'These tickets have been sent to: '+str(phoneNumber)+'\n'
ticketStr=''
for ticket in ticketsBought:
ticketStr = str(ticket.event)+str(ticket.pin)+str(ticket.ticketType)+'\n'
smsBody = phoneNumStr+ticketStr
content = OutgoingSMS(receiver=phoneNumber,message=smsBody,sent=False)
form = OutgoingSMSForm(request.POST, instance = content)
if form.is_valid():
form.save()
return HttpResponseRedirect(request.path)
else:
form = OutgoingSMSForm(instance = content) | alpharay/ETS | payment/views.py | views.py | py | 1,914 | python | en | code | 2 | github-code | 13 |
14486761196 | from django.core.exceptions import ValidationError
from django.utils.translation import gettext_lazy as _
class FilmBusinessLogic:
"""
The class contains all the business logic of the film app
"""
@staticmethod
def validate_stock_greater_availability(stock: int, availability: int):
"""
Validate that the stock is greater than availability
Args:
stock (int): stock of the film
availability (int): availability of the film
Raises:
ValidationError: The availability shouldn't be higher than stock
"""
if availability > stock:
raise ValidationError({'availability': _(
"The availability shouldn't be higher than stock")})
@staticmethod
def validate_film_type_equal_prequel_film_type(film_type: str,
prequel_film_type: str):
"""
Validate that type of the film and film prequel are equal
Args:
film_type (str): stock of the film
prequel_film_type (str): availability of the film
Raises:
ValidationError: The film prequel should be the same type as the
current film type
"""
if prequel_film_type is not None:
if film_type != prequel_film_type:
raise ValidationError(
{'film_prequel': _('The film prequel should '
'be the same type as '
'the current film type. '
'(Example both movie '
'type)')})
| Thevic16/trainee-python-week-7 | film/business_logic.py | business_logic.py | py | 1,689 | python | en | code | 0 | github-code | 13 |
15142537462 | #!/usr/bin/env python3
import json
from aws_cdk import core as cdk
from dotted.collection import DottedDict
from infra.data_masking_trigger_stack import DataMaskingTriggerStack
from infra.data_masking_process_stack import DataMaskingProcessStack
config = {}
with open("config.json", "r") as f:
jdata = json.load(f)
config = DottedDict(jdata)
app = cdk.App()
for where in [
"eng",
# "dev", "test", "prod"
]:
dmp_stack = DataMaskingProcessStack(
app,
f"DataMaskingProcessStack-{where}",
config={
"prefix": where,
"region": config[where].region
},
env=cdk.Environment(
account=config[where].account,
region=config[where].region
)
)
dmt_stack = DataMaskingTriggerStack(
app,
f"DataMaskingTriggerStack-{where}",
config={
"prefix": where,
"region": config[where].region,
"processFsmArn": dmp_stack.processFsmArn
},
env=cdk.Environment(
account=config[where].account,
region=config[where].region
)
)
dmt_stack.add_dependency(dmp_stack)
app.synth()
| s4mli/salesforce-data-masking-py | app.py | app.py | py | 1,185 | python | en | code | 0 | github-code | 13 |
22946843471 | import os
from collections import defaultdict
import datetime
import logging
import iso8601
from .api import ParlisAPI
from .cache import ParlisFileCache, ParlisForceFileCache
from .subtree_parser import ParlisSubtreeParser
from .attachment_parser import ParlisAttachmentParser
from .parser import ParlisParser
from .formatter import ParlisTSVFormatter
from .compressor import ParlisZipCompressor
from .utils import get_dates, entity_to_singular, makedirs, date_to_parlis_str
logger = logging.getLogger(__name__)
class ParlisCrawler(object):
entity = 'Zaken'
attribute = 'GewijzigdOp'
start_date = None
end_date = None
force = False
fetch_all = False
force_cache = False
def __init__(self, entity='Zaken', attribute='GewijzigdOp', start_date=datetime.datetime.now().date(), end_date=datetime.datetime.now().date(), force=False, force_cache=False, fetch_all=False):
self.entity = entity
self.attribute = attribute
self.force = force
self.force_cache = force_cache
self.fetch_all = fetch_all
if self.fetch_all:
self.start_date = datetime.datetime.strptime('2008-01-01', '%Y-%m-%d').date()
else:
self.start_date = start_date
self.end_date = end_date
def _format_entities(self, entity, entity_properties, entities, relation = None, output_dir='output', order_field='GewijzigdOp'):
file_names = []
file_name = ParlisTSVFormatter(entity_properties).format(
entities,
entity,
relation,
output_dir
)
if file_name is not None:
file_names.append(file_name)
return file_names
def _fetch_attachments(self, api, contents, current_date, entity, relation=None):
file_list = []
# fetch the attachments, if necessary
attachment_parser = ParlisAttachmentParser()
if relation is not None:
entity_name = relation
else:
entity_name = entity
attachments = attachment_parser.parse(entity_name, contents)
if len(attachments) > 0:
makedirs('output/Attachments')
for attachment_SID in attachments:
attachment_url = attachments[attachment_SID]
attachment_file = 'output/Attachments/%s' % (
attachment_SID
)
if not os.path.exists(attachment_file):
response = api.get_request_response(
attachment_url, {}
)
with open(attachment_file, "wb") as att:
att.write(response.content)
file_list.append(attachment_file)
return file_list
def run(self):
if self.force:
cache = ParlisForceFileCache('.', '')
else:
cache = ParlisFileCache('.', '')
api = ParlisAPI('SOS', 'Open2012', cache)
#for current_date in get_dates(self.start_date, self.end_date):
for current_date in [self.start_date]:
#current_end_date = current_date + datetime.timedelta(days=1)
current_end_date = self.end_date + datetime.timedelta(days=1)
logging.debug('Dates: %s %s', current_date, current_end_date)
cache.date_str = str(current_date)
entity_count = 0
last_items_fetched = 250
file_list = []
output_dir = 'output/%s-%s' % (current_date, current_end_date)
while (last_items_fetched >= 250):
logger.debug(
'Going to fetch data for %s, filtered by %s on %s, skipping %s items',
self.entity, self.attribute, current_date, entity_count
)
if not self.fetch_all:
contents = api.fetch_recent(
self.entity,
None,
entity_count,
self.attribute,
current_date,
current_end_date,
not self.force_cache
)
else:
contents = api.fetch_all(
self.entity,
None,
entity_count,
not self.force_cache
)
entity_properties, entities = ParlisParser(
contents, self.entity, None
).parse()
file_list += self._format_entities(
self.entity, entity_properties, entities, None, output_dir, self.attribute
)
# last_items_fetched = len(entities)
last_items_fetched = contents.count('<entry>')
entity_count += last_items_fetched
logging.debug("Parsed %s items, skipped %s items", last_items_fetched, entity_count)
file_list = file_list + self._fetch_attachments(
api, contents, current_date, self.entity
)
# fetch the subtree, if necessary
subtree_parser = ParlisSubtreeParser()
urls = subtree_parser.parse(self.entity, contents)
for SID, relation in urls:
#relation = urls[SID][0]
relation_url = urls[(SID, relation)]
# FIXME: only get subtree items that have changed on this date?
relation_contents = api.get_request(
relation_url, {}, self.entity, relation
)
parent_name = 'SID_%s' % (entity_to_singular(self.entity), )
extra_attributes = {parent_name: SID}
relation_properties, relation_entities = ParlisParser(
relation_contents, self.entity, relation, [parent_name]
).parse(extra_attributes)
file_list += self._format_entities(
self.entity, relation_properties, relation_entities, relation, output_dir, self.attribute
)
# add attachments
file_list = file_list + self._fetch_attachments(
api, relation_contents, current_date, self.entity, relation
)
compressor = ParlisZipCompressor()
compressor.compress('output/%s-%s-%s.zip' % (current_date, current_end_date, self.entity), list(set(file_list)))
logger.debug('Crawling ended, fetched %s urls ..', api.num_requests)
| openstate/parlis-crawler-new | lib/parlis/crawler.py | crawler.py | py | 6,838 | python | en | code | 0 | github-code | 13 |
26414514208 | import collections
d = collections.defaultdict(list)
with open('input.txt') as f:
for l in f:
s = l.strip().split('-')
d[s[0]].append(s[1])
d[s[1]].append(s[0])
paths = 0
def dfs(c, p):
if c == 'end':
global paths
paths += 1
return
for x in d[c]:
if x == 'start':
continue
used = None
for y in p.split(','):
if y.islower() and p.count(y) == 2:
used = y
if not used or x.isupper() or p.count(x) == 0:
dfs(x, f'{p},{x}')
dfs('start', 'start')
print(paths)
| logangeorge01/advent-of-code-2021 | 12/2.py | 2.py | py | 570 | python | en | code | 0 | github-code | 13 |
34649900793 | import datetime
f = open("meds.txt", "a")
while True:
meds = input("What medicine did you take? :")
time_taken = int(input("enter how many minutes ago you took this: "))
when_to_take = int(input("How many minutes until you take again?: "))
current_time = datetime.datetime.now()
meds_taken = current_time - datetime.timedelta(minutes = time_taken)
take_again = meds_taken + datetime.timedelta(minutes = when_to_take)
f.write(f"The current time is {current_time}\n")
f.write(f"{meds}\n")
f.write(f"{meds} was taken at {meds_taken}\n")
f.write(f"{meds} needs to be taken again at {take_again}\n")
end = input("Would you like to continue? Y or N: ")
if end.lower() != "y":
break
total = []
with open("meds.txt") as file:
for line in file:
line = line.replace("\n","")
total.append(line)
f.write("We hope you get well soon!")
f.close() | MarkCrocker/Python | meds.py | meds.py | py | 944 | python | en | code | 0 | github-code | 13 |
9473738975 | """
Train the mybag model
"""
# pylint: disable= R0801
from ast import literal_eval
import pandas as pd
from joblib import dump
from sklearn.preprocessing import MultiLabelBinarizer
from src.classification.train import train_classifier
from src.preprocessing.preprocessing_data import preprocess_data
from src.transformation.transformer_mybag import transform_mybag_training
def read_data(filename):
"""
filename — name of the tsv file
return: panda dataframe
"""
data = pd.read_csv(filename, sep='\t')
data['tags'] = data['tags'].apply(literal_eval)
return data
def main():
"""
return: trained mybag classifier
"""
## data being used
train = read_data('data/train.tsv')
#preprocess data
print("Start Preprocessing")
x_train, y_train = preprocess_data(train)
print("End Preprocessing")
#transform data
print("Start Transformation")
x_train_mybag, tags_counts = transform_mybag_training(x_train, y_train)
print("End Transformation")
# train
print("Start Training")
mlb = MultiLabelBinarizer(classes=sorted(tags_counts.keys()))
y_train = mlb.fit_transform(y_train)
classifier_mybag = train_classifier(x_train_mybag, y_train)
dump(mlb, 'output/mlb_mybag.joblib')
dump(classifier_mybag, 'output/model_mybag.joblib')
print("End Training")
if __name__ == "__main__":
main()
| Jahb/REMA_Base | src/training_classifier_mybag.py | training_classifier_mybag.py | py | 1,399 | python | en | code | 1 | github-code | 13 |
5869951361 | # coding=utf-8
import sys
from PyQt5.QtWidgets import QMainWindow, QApplication
from history import orp_demo
####################### 全局变量#########################
app = QApplication(sys.argv)
class MyWindows(orp_demo.Ui_Form, QMainWindow):
def __init__(self):
super(MyWindows, self).__init__()
self.setupUi(self)
my_windows = MyWindows() # 实例化对象
my_windows.show() # 显示窗口
sys.exit(app.exec_())
| Eric76-Z/ORP | history/main.py | main.py | py | 448 | python | en | code | 1 | github-code | 13 |
20746436729 | # Tipo Booleana
permissoes = []
idades = [20, 14, 40]
def verifica_pode_dirigir(idades, permissoes):
for idade in idades:
if idade >= 18:
#Adicionar o valor True na lista de permissoes
permissoes.append(True)
else:
#Adicionar o valor False na lista de permissoes
permissoes.append(False)
verifica_pode_dirigir(idades, permissoes)
print(permissoes)
#Com valores na lista de permissoes, podemos usar para verificar se pode ou não dirigir
for permissao in permissoes:
if permissao == True:
print('tem permissão para dirigir')
else:
print('não tem permissão para digirir')
| HenryJKS/Python | Conhecendo Python/Boolean.py | Boolean.py | py | 671 | python | pt | code | 0 | github-code | 13 |
36298502266 | #IMPORTING LIBRARIES
import cv2
import numpy as np
import math
#CAPTURE THE VIDEO FROM WEBCAM
cap = cv2.VideoCapture(0)
while True: #to run the loop infinitely
#READ EACH FRAME FROM THE CAPTURED VIDEO
_, frame = cap.read() # _ is a boolean which indicates if the frame is captured successfully and then store it into a variable frame
#GET HAND DATA FROM THE RECTANGLE SUB WINDOW
cv2.rectangle(frame, (100, 100), (300, 300), (0, 255, 0), 1)
crop_image = frame[100:300, 100:300]
blur = cv2.blur(crop_image, (11, 11), 0)
#CHANGE THE COLOR-SPACE FROM BGR TO HSV
hsv = cv2.cvtColor(blur, cv2.COLOR_BGR2HSV)
lc = np.array([0, 40, 60])
hc = np.array([20, 150, 255])
#CREATE MASK FOR SKIN COLOR
mask = cv2.inRange(hsv, lc, hc)
#MORPHOLOGICAL OPERATIONS (CLOSING)
kernel = np.ones((7, 7), np.uint8)
dilation = cv2.dilate(mask, kernel, iterations= 1)
erosion = cv2.erode(dilation, kernel, iterations= 1)
#APPLYING GAUSSIAN BLUR TO REMOVE NOISE
filtered = cv2.GaussianBlur(erosion, (11, 11), 0)
#FIND CONTOURS
cont, hierarchy = cv2.findContours(filtered, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
try:
#FIND CONTOURS OF MAX AREA i.e HAND
max_cont = max(cont, key = lambda x: cv2.contourArea(x))
#print("max cont:",max_cont)
#CREATE BOUNDING RECTANGLE AROUND THE CONTOUR
x, y, w, h = cv2.boundingRect(max_cont)
cv2.rectangle(crop_image, (x, y), (x + w, y + h), (0, 0, 255), 0)
#FIND CONVEX HULL
hull = cv2.convexHull(max_cont)
#print("asli_hull:", hull)
#DRAW CONTOURS
draw = np.zeros(crop_image.shape, np.uint8)
cv2.drawContours(draw, [max_cont], -1, (0, 255, 0), 0)
cv2.drawContours(draw, [hull], -1, (0, 255, 0), 0)
hull = cv2.convexHull(max_cont, returnPoints = False) #to find the indexes of convex hull pts
#print("sasta_hull:", hull)
defects = cv2.convexityDefects(max_cont, hull) #we get starting index, ending index, farthest index, approx distance
#print("defect", defects)
defectshape = defects.shape[0]
#print(defectshape)
# #USE COSINE RULE TO FIND THE ANGLE OF THE FARTHEST PT FROM START PT AND END PT
count_defects = 0
for i in range(defectshape):
s, e, f, d = defects[i][0]
start = tuple(max_cont[s][0])
#print(start)
end = tuple(max_cont[e][0])
far = tuple(max_cont[f][0])
a = math.sqrt((end[0] - start[0]) ** 2 + (end[1] - start[1]) ** 2)
b = math.sqrt((far[0] - start[0]) ** 2 + (far[1] - start[1]) ** 2)
c = math.sqrt((end[0] - far[0]) ** 2 + (end[1] - far[1]) ** 2)
angle = (math.acos((b ** 2 + c ** 2 - a ** 2) / (2 * b * c)) * 180) / 3.14
# if angle < 90 draw a circle
if angle <= 90:
count_defects += 1
cv2.circle(crop_image, far, 1, [0, 0, 255], -1)
cv2.line(crop_image, start, end, [0, 255, 0], 2)
if count_defects == 0:
cv2.putText(draw, 'ONE', (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 1)
elif count_defects == 1:
cv2.putText(draw, 'TWO', (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 1)
elif count_defects == 2:
cv2.putText(draw, 'THREE', (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 1)
elif count_defects == 3:
cv2.putText(draw, 'FOUR', (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 1)
elif count_defects == 4:
cv2.putText(draw, 'FIVE', (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 1)
except:
#pass
draw = np.zeros(crop_image.shape, np.uint8)
cv2.imshow('frame', frame) #to show the output frame
cv2.imshow('crop_image', crop_image)
cv2.imshow('blur', blur)
cv2.imshow('hsv', hsv)
cv2.imshow('mask', mask)
cv2.imshow('dilation', dilation)
cv2.imshow('erosion', erosion)
cv2.imshow('filtered', filtered)
cv2.imshow('draw', draw)
all_img = np.hstack((draw, crop_image))
cv2.imshow('control', all_img)
k = cv2.waitKey(1) #will wait for the key to be pressed for a second and then if not pressed then read the next frame
if k == 27: #ASCII for escape key
break #if key pressed is esc then break
cap.release() #release the capture video from webacm
cv2.destroyAllWindows() #destroy all the windows
| ISA-VESIT/Image-Processing-2021 | Day2/Hand gestures.py | Hand gestures.py | py | 4,624 | python | en | code | 2 | github-code | 13 |
12870996365 | """
Problem statement:
You have a pointer at index 0 in an array of size arrLen. At each step, you can move 1 position to the left, 1 position
to the right in the array, or stay in the same place (The pointer should not be placed outside the array at any time)
Given two integers steps and arrLen, return the number of ways such that your pointer is still at index 0 after exactly
steps steps. Since the answer may be too large, return it modulo 10^9 + 7
Example 1:
Input: steps = 3, arrLen = 2
Output: 4
Explanation: There are 4 differents ways to stay at index 0 after 3 steps.
Right, Left, Stay
Stay, Right, Left
Right, Stay, Left
Stay, Stay, Stay
Example 2:
Input: steps = 2, arrLen = 4
Output: 2
Explanation: There are 2 differents ways to stay at index 0 after 2 steps
Right, Left
Stay, Stay
Example 3:
Input: steps = 4, arrLen = 2
Output: 8
Constraints:
* 1 <= steps <= 500
* 1 <= arrLen <= 10^6
Brain storming:
* May be I can find the math fomular for this problem ???
* If I found a solution, then the A() will be use in this case
* Number of Right == Number of Left: in order to make the move still 0, and of course can have Number of Right == Number of Left == 0
* Notice: The pointer should not be placed outside the array at any time - mean that the A() at point 2 must be divided by 2
* No, in this problem, we have the another argument called arrLen :<
=> We can not solve this by math, try difference approach like DP
DP(pos, steps): Number of ways to back to origin 0 from the CURRENT position pos and the remaining steps
(pos >= 0 and steps >= 0)
Hints:
* Try with Dynamic Programming DP(pos, steps): Number of ways to back to the position 0 using exactly "steps" move
* Notice that tha computational complexity does not depend of "arrLen"
"""
from math import inf
class Solution:
def numWays_SLOW(self, steps: int, arrLen: int) -> int:
@cache
def DP(pos, step):
print(f'Pos: {pos}, Remain step: {step}')
if step == 0:
if pos == 0:
return 1
return 0
ans = DP(pos, step-1) % MOD # Number of stays state
if pos > 0:
ans = (ans + DP(pos-1, step-1)) % MOD # Number of move left
if pos < arrLen - 1:
ans = (ans + DP(pos+1, step-1)) % MOD
return ans
MOD = 10**9+7
return DP(pos=0,step=steps)
def numWays(self, steps: int, arrLen: int) -> int:
"""
Not use the recursion, instead use additional memory
"""
m = steps
n = min(steps // 2 + 1, arrLen)
dp = [[0] * n for _ in range(m + 1)]
dp[0][0] = 1
mod = 10 ** 9 + 7
for i in range(1, m + 1):
for j in range(n):
dp[i][j] = dp[i-1][j]
if j > 0:
dp[i][j] += dp[i-1][j-1]
if j < n - 1:
dp[i][j] += dp[i-1][j+1]
return dp[m][0] % mod
if __name__ == '__main__':
s = Solution()
# print(s.numWays(steps=3, arrLen=2))
print(s.numWays(steps=2, arrLen=4))
# print(s.numWays(steps=4, arrLen=2)) | Nacriema/Leet-Code | daily_challenges/number-of-ways-to-stay-in-the-same-place-after-some-steps.py | number-of-ways-to-stay-in-the-same-place-after-some-steps.py | py | 3,593 | python | en | code | 0 | github-code | 13 |
26890138595 | import os, datetime
from list import buildList
from file import createFile
from process import processList
cwd = os.getcwd()
# Initialise info
contributor = 'Andy Willis'
year = datetime.date.today().year
# Initialise files
logFile = 'log.txt'
processedLogFile = 'processedFiles.txt'
listFile = 'spotmapsList.txt'
# Initialise folders
inputFolder = f'{cwd}\\files\\input\\'
outputFolder = f'{cwd}\\files\\output\\'
# Initialise config
config = {
'contributor': contributor,
'year': year,
'inputFolder': inputFolder,
'outputFolder': outputFolder,
'logFile': logFile,
'listFile': listFile,
'processedLogFile': processedLogFile
}
# Pipeline
createFile(f'{outputFolder}{logFile}')
createFile(f'{outputFolder}{processedLogFile}')
buildList(config)
processList(config)
| andywillis/spotmaps-pipeline | src/main.py | main.py | py | 800 | python | en | code | 0 | github-code | 13 |
25793213172 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
An implementation of Functions in sympy that allow 'anonymous'
functions that can be evaluated when 'lambdified'.
"""
import sympy
def lambdify(args, expr):
""" Returns function for fast calculation of numerical values
A modified version of sympy's lambdify that will find 'aliased'
Functions and substitute them appropriately at evaluation time.
See ``sympy.lambdify`` for more detail.
Parameters
----------
args : object or sequence of objects
May well be sympy Symbols
expr : expression
The expression is anything that can be passed to the sympy
lambdify function, meaning anything that gives valid code from
``str(expr)``.
Examples
--------
>>> x = sympy.Symbol('x')
>>> f = lambdify(x, x**2)
>>> f(3)
9
"""
n = _imp_namespace(expr)
# There was a bug in sympy such that dictionaries passed in as first
# namespaces to lambdify, before modules, would get overwritten by
# later calls to lambdify. The next two lines are to get round this
# bug.
from sympy.utilities.lambdify import _get_namespace
np_ns = _get_namespace('numpy').copy()
return sympy.lambdify(args, expr, modules=(n, np_ns))
def _imp_namespace(expr, namespace=None):
""" Return namespace dict with function implementations
We need to search for functions in anything that can be thrown at
us - that is - anything that could be passed as `expr`. Examples
include sympy expressions, as well tuples, lists and dicts that may
contain sympy expressions.
Parameters
----------
expr : object
Something passed to lambdify, that will generate valid code from
``str(expr)``.
namespace : None or mapping
Namespace to fill. None results in new empty dict
Returns
-------
namespace : dict
dkct with keys of implemented function names within `expr` and
corresponding values being the numerical implementation of
function
"""
if namespace is None:
namespace = {}
# tuples, lists, dicts are valid expressions
if isinstance(expr, (list, tuple)):
for arg in expr:
_imp_namespace(arg, namespace)
return namespace
elif isinstance(expr, dict):
for key, val in expr.items():
# functions can be in dictionary keys
_imp_namespace(key, namespace)
_imp_namespace(val, namespace)
return namespace
# sympy expressions may be Functions themselves
if hasattr(expr, 'func'):
if (isinstance(expr.func, sympy.FunctionClass) and
hasattr(expr.func, 'alias')):
name = expr.func.__name__
imp = expr.func.alias
if name in namespace and namespace[name] != imp:
raise ValueError('We found more than one '
'implementation with name '
'"%s"' % name)
namespace[name] = imp
# and / or they may take Functions as arguments
if hasattr(expr, 'args'):
for arg in expr.args:
_imp_namespace(arg, namespace)
return namespace
def aliased_function(symfunc, alias):
""" Add implementation `alias` to symbolic function `symfunc`
Parameters
----------
symfunc : str or ``sympy.FunctionClass`` instance
If str, then create new anonymous sympy function with this as
name. If `symfunc` is a sympy function, attach implementation to
function
alias : callable
numerical implementation of function for use in ``lambdify``
Returns
-------
afunc : sympy.FunctionClass instance
sympy function with attached implementation
"""
# if name, create anonymous function to hold alias
if isinstance(symfunc, basestring):
symfunc = sympy.FunctionClass(sympy.Function, symfunc)
# We need to attach as a method because symfunc will be a class
symfunc.alias = staticmethod(alias)
return symfunc
| Garyfallidis/nipy | nipy/modalities/fmri/aliased.py | aliased.py | py | 4,136 | python | en | code | null | github-code | 13 |
3719761626 | import boto3
import os
# Set your AWS credentials
aws_access_key_id = ''
aws_secret_access_key = ''
def upload_directory_to_s3(local_directory, bucket_name, s3_prefix=''):
s3 = boto3.client('s3', aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key)
for root, dirs, files in os.walk(local_directory):
for file in files:
local_path = os.path.join(root, file)
relative_path = os.path.relpath(local_path, local_directory)
s3_path = os.path.join(s3_prefix, relative_path).replace("\\", "/")
s3.upload_file(local_path, bucket_name, s3_path)
print(f'Uploaded {local_path} to s3://{bucket_name}/{s3_path}')
# Set the bucket name and file details
bucket_name = 'pharmacy-objects'
dir_path = "./InsertGenerator/GeneratedJson"
s3_folder_key = ""
upload_directory_to_s3(dir_path, bucket_name)
| BloodyOcean/course_work_chemistry | InsertGenerator/json_to_s3.py | json_to_s3.py | py | 895 | python | en | code | 0 | github-code | 13 |
73533713296 | import requests
import argparse
import json
parser = argparse.ArgumentParser(description="test deployed api")
parser.add_argument("--img_path", required=True, help="path to image to predict")
args = parser.parse_args()
# reads image and sends it
img_file = open(args.img_path, "rb")
response = requests.post(
url="http://127.0.0.1:80/predict", files={"img_file": img_file}
)
prediction = json.loads(response.content.decode())["prediction"]
print(f"Image was classified as: {prediction}")
| jsmithdlc/mlflow-cortex-deploy | test_local_api.py | test_local_api.py | py | 494 | python | en | code | 0 | github-code | 13 |
70502521298 | from django.test import TestCase
from src.shared.errors.AppError import AppError
from src.utils.error_messages import PROJECT_NOT_FOUND
from src.utils.test.create_project import create_project
from ....repositories.projects_repository import ProjectsRepository
from ..fetch_project_metrics_use_case import FetchProjectMetricsUseCase
from src.utils.test.create_project_with_employee import create_project_with_employee
class FetchProjectMetricsUseCaseTest(TestCase):
def setUp(self):
self.projects_repository = ProjectsRepository()
self.use_case = FetchProjectMetricsUseCase(self.projects_repository)
def test_fetch_project_metrics(self):
project = create_project()
project_metrics = self.use_case.execute(project['id'])
self.assertEqual(project_metrics['num_employees'], 0)
self.assertEqual(project_metrics['id'], project['id'])
self.assertTrue(project_metrics['remaining_hours'] >= 0)
self.assertEqual(project_metrics['name'], project['name'])
def test_fetch_project_metrics_if_not_exists(self):
with self.assertRaises(Exception) as context:
self.use_case.execute('00000000-0000-0000-0000-000000000000')
self.assertIsInstance(context.exception, AppError)
self.assertEqual(context.exception.message, PROJECT_NOT_FOUND)
def test_fetch_project_metrics_with_employee(self):
project = create_project_with_employee()
project_metrics = self.use_case.execute(project['id'])
self.assertEqual(project_metrics['num_employees'], 1)
self.assertEqual(project_metrics['id'], project['id'])
self.assertTrue(project_metrics['remaining_hours'] >= 0)
self.assertEqual(project_metrics['name'], project['name']) | alyssonbarrera/enterprise-management-api | src/modules/projects/use_cases/fetch_project_metrics/tests/test_fetch_project_metrics_use_case.py | test_fetch_project_metrics_use_case.py | py | 1,763 | python | en | code | 0 | github-code | 13 |
28053084860 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=====================
Classifier comparison
=====================
A comparison of a several classifiers in scikit-learn on synthetic datasets.
The point of this example is to illustrate the nature of decision boundaries
of different classifiers.
This should be taken with a grain of salt, as the intuition conveyed by
these examples does not necessarily carry over to real datasets.
Particularly in high-dimensional spaces, data can more easily be separated
linearly and the simplicity of classifiers such as naive Bayes and linear SVMs
might lead to better generalization than is achieved by other classifiers.
The plots show training points in solid colors and testing points
semi-transparent. The lower right shows the classification accuracy on the test
set.
"""
# Code source: Gaël Varoquaux
# Andreas Müller
# Modified for documentation by Jaques Grobler
# Extracted plot function to reuse with a novel dataset by Ethan Kennerly
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
def moon_circle_line_datasets():
X, y = make_classification(n_features=2, n_redundant=0, n_informative=2,
random_state=1, n_clusters_per_class=1)
rng = np.random.RandomState(2)
X += 2 * rng.uniform(size=X.shape)
linearly_separable = (X, y)
datasets = [make_moons(noise=0.3, random_state=0),
make_circles(noise=0.2, factor=0.5, random_state=1),
linearly_separable
]
return datasets
def sample_classifiers():
names = [
"Decision Tree",
"Nearest Neighbors",
"Random Forest",
]
classifiers = [
DecisionTreeClassifier(max_depth=5),
KNeighborsClassifier(3),
RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
]
return names, classifiers
def all_classifiers():
max_depth = 5
names = [
"Decision Tree",
"Nearest Neighbors",
"Gaussian Process",
"Random Forest",
"QDA",
"Neural Net",
"RBF SVM",
"Naive Bayes",
"Linear SVM",
"AdaBoost",
]
classifiers = [
DecisionTreeClassifier(max_depth=max_depth),
KNeighborsClassifier(3),
GaussianProcessClassifier(1.0 * RBF(1.0), warm_start=True),
RandomForestClassifier(max_depth=max_depth, n_estimators=10, max_features=1),
QuadraticDiscriminantAnalysis(),
MLPClassifier(alpha=1),
SVC(kernel="linear", C=0.025, decision_function_shape='ovr'),
GaussianNB(),
SVC(gamma=2, C=1, decision_function_shape='ovr'),
AdaBoostClassifier(),
]
return names, classifiers
def plot_comparison(datasets, names = None, classifiers = None, is_verbose=False, output_path=None):
if not names and not classifiers:
names, classifiers = all_classifiers()
h = .02 # step size in the mesh
cell_size = 3
width = cell_size * (len(classifiers) + 1)
height = cell_size * len(datasets)
figure = plt.figure(figsize=(width, height))
i = 1
# iterate over datasets
for ds_cnt, ds in enumerate(datasets):
# preprocess dataset, split into training and test part
X, y = ds
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=.4, random_state=42)
if is_verbose:
print('plot_classifier_comparison: index %r size %r head %r' % (
ds_cnt, X_train.size, X_train[:2]))
feature_count = X.shape[1]
second_index = min(2, feature_count - 1)
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, second_index].min() - .5, X[:, second_index].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# just plot the dataset first
cm = plt.cm.RdBu
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
if ds_cnt == 0:
ax.set_title("Input data")
# Plot the training points
ax.scatter(X_train[:, 0], X_train[:, second_index], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, second_index], c=y_test, cmap=cm_bright, alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
i += 1
# iterate over classifiers
for name, clf in zip(names, classifiers):
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
if 2 <= feature_count:
mesh = np.c_[xx.ravel(), yy.ravel()]
else:
mesh = np.c_[xx.ravel()]
if hasattr(clf, "decision_function"):
Z = clf.decision_function(mesh)
else:
Z = clf.predict_proba(mesh)
Z = Z[:, 1]
# Put the result into a color plot
if is_verbose:
print('name %r Z.shape %r xx.shape %r size ratio %r Z head %r' % (
name, Z.shape, xx.shape, float(Z.size) / xx.size, Z[:2]))
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)
# Plot also the training points
ax.scatter(X_train[:, 0], X_train[:, second_index], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, second_index], c=y_test, cmap=cm_bright,
alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
if ds_cnt == 0:
ax.set_title(name)
ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),
size=15, horizontalalignment='right')
i += 1
plt.tight_layout()
if output_path:
print('plot_comparison: Saved figure to: %r' % output_path)
figure.savefig(output_path)
plt.close(figure)
else:
plt.show()
if '__main__' == __name__:
print(__doc__)
datasets = moon_circle_line_datasets()
plot_comparison(datasets)
| ethankennerly/predicting-player-retention | plot_classifier_comparison.py | plot_classifier_comparison.py | py | 7,307 | python | en | code | 0 | github-code | 13 |
5885628675 | # ------------------------------------------
#
# Program created by Maksim Kumundzhiev
#
#
# email: kumundzhievmaxim@gmail.com
# github: https://github.com/KumundzhievMaxim
# -------------------------------------------
import matplotlib.pyplot as plt
N = 1000000
S = N - 1
I = 1
beta = 0.6
sus = [] # infected compartment
inf = [] # susceptible compartment
prob = [] # probability of infection at time t
def infection(S, I, N):
t = 0
while (t < 100):
S, I = S - beta * ((S * I / N)), I + beta * ((S * I) / N)
p = beta * (I / N)
sus.append(S)
inf.append(I)
prob.append(p)
t = t + 1
infection(S, I, N)
figure = plt.figure()
figure.canvas.set_window_title('SI model')
figure.add_subplot(211)
inf_line, =plt.plot(inf, label='I(t)')
sus_line, = plt.plot(sus, label='S(t)')
plt.legend(handles=[inf_line, sus_line])
plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0)) # use scientific notation
ax = figure.add_subplot(212)
prob_line = plt.plot(prob, label='p(t)')
plt.legend(handles=prob_line)
type(ax) # matplotlib.axes._subplots.AxesSubplot
# manipulate
vals = ax.get_yticks()
ax.set_yticklabels(['{:3.2f}%'.format(x*100) for x in vals])
plt.xlabel('T')
plt.ylabel('p')
plt.show() | MaxKumundzhiev/Practices-for-Engineers | NetworkScience/ModelSI.py | ModelSI.py | py | 1,256 | python | en | code | 3 | github-code | 13 |
71129843219 | import requests
import random
import json
import asyncio
from spade.agent import Agent
from spade.behaviour import CyclicBehaviour, FSMBehaviour, State
from spade.message import Message
STATE_TWO = "STATE_TWO"
STATE_THREE = "STATE_THREE"
SPEED = 0.1 # s / step
def choose_random_directions():
"""
:return: Returns two random directions as list
"""
n, e, s, w = ("north", "east", "south", "west")
return random.choice([[n, e], [e, s], [s, w], [w, n]])
def decide_next_move(goal, pos):
"""
For a given pos and a goal, the function calculates the best next move.
:param goal: (x, y)
:param pos: (x, y)
:return: north, east, south or west as string
"""
x_dir = pos[0] - goal[0] # 0 -> x, 1 -> y
y_dir = pos[1] - goal[1]
if abs(x_dir) > abs(y_dir):
return "west" if x_dir > 0 else "east"
else:
return "north" if y_dir > 0 else "south"
def calc_distance(start, goal):
x_dist = abs(start[0] - goal[0]) # 0 -> x, 1 -> y
y_dist = abs(start[1] - goal[1])
return x_dist + y_dist
class AntState(State):
async def run(self):
pass
def send_move_request(self, move_to):
try:
res = requests.get(f"http://127.0.0.1:5000/move/{self.agent.name}/{move_to}")
res = json.loads(res.text)
if res["moved"]:
self.agent.position = (res["position"][0], res["position"][1])
if res["found_food"]:
self.agent.carry_food = True
self.agent.good_food_place = (res["position"][0], res["position"][1])
if res["delivered_food"]:
self.agent.carry_food = False
else:
self.agent.actions = choose_random_directions()
except Exception as e:
print(e)
async def inform_friends(self):
try:
res = requests.get(f"http://127.0.0.1:5000/get_friends")
res = [friend for friend in json.loads(res.text) if self.agent.name not in friend]
for friend in res:
msg = Message(to=friend) # Instantiate the message
msg.body = json.dumps(self.agent.position) # Set the message content
await self.send(msg)
except Exception as e:
print(e)
def decide_next_move(self, goal):
pos = self.agent.position
if pos[0] > goal[0]:
move_to = "west"
elif pos[0] < goal[0]:
move_to = "east"
elif pos[1] < goal[1]:
move_to = "south"
else:
move_to = "north"
return move_to
class AntBehaviour(FSMBehaviour):
async def on_start(self):
res = requests.get(f"http://127.0.0.1:5000/create_ant/{self.agent.name}")
position = json.loads(res.text)["position"]
self.agent.position = (position[0], position[1])
self.agent.home = (position[0], position[1])
self.agent.good_food_place = (99999, 99999)
self.agent.carry_food = False
self.agent.actions = choose_random_directions()
class Searching(AntState):
async def run(self):
# print("SEARCHING", self.agent.name, self.agent.position, self.agent.good_food_place)
await asyncio.sleep(SPEED)
if calc_distance(self.agent.position, self.agent.good_food_place) < 30:
move_to = decide_next_move(self.agent.good_food_place, self.agent.position)
else:
move_to = random.choice(self.agent.actions)
self.send_move_request(move_to)
if calc_distance(self.agent.position, self.agent.good_food_place) == 0:
self.agent.good_food_place = (99999, 99999)
if self.agent.carry_food:
self.set_next_state(STATE_THREE)
await self.inform_friends()
else:
self.set_next_state(STATE_TWO)
class CarryHome(AntState):
async def run(self):
# print("CARRYING", self.agent.name, self.agent.position, self.agent.good_food_place)
await asyncio.sleep(SPEED)
move_to = decide_next_move(self.agent.home, self.agent.position)
self.send_move_request(move_to)
if self.agent.carry_food:
self.set_next_state(STATE_THREE)
else:
self.set_next_state(STATE_TWO)
class ReceiveMsg(CyclicBehaviour):
async def run(self):
msg = await self.receive(timeout=10)
if msg:
# print(f"{self.agent.name} Hey I got a msg: {msg.body}")
body = json.loads(msg.body)
acceptable_distance = 30
distance_to_new_food_source = calc_distance(self.agent.position, body)
if distance_to_new_food_source < calc_distance(self.agent.position, self.agent.good_food_place) \
and distance_to_new_food_source <= acceptable_distance:
self.agent.good_food_place = body
class AntAgent(Agent):
async def setup(self):
print(f"{self.name} started")
# self.web.start(hostname="127.0.0.1", port=10000)
# print(self.web.server, self.web.port)
fsm = AntBehaviour()
fsm.add_state(name=STATE_TWO, state=Searching(), initial=True)
fsm.add_state(name=STATE_THREE, state=CarryHome())
fsm.add_transition(source=STATE_TWO, dest=STATE_TWO)
fsm.add_transition(source=STATE_TWO, dest=STATE_THREE)
fsm.add_transition(source=STATE_THREE, dest=STATE_TWO)
fsm.add_transition(source=STATE_THREE, dest=STATE_THREE)
self.add_behaviour(fsm)
cycl = ReceiveMsg()
self.add_behaviour(cycl)
| patricklanger/multi-agent-example | ant_agent.py | ant_agent.py | py | 5,596 | python | en | code | 0 | github-code | 13 |
72773967379 | import evaluate
import argparse
import os
import glob
import json
import copy
from selfBlue import SelfBleu
bleu = evaluate.load("bleu")
def compute_bleu(sentence, questions):
questions_copy = copy.deepcopy(questions)
questions_copy.remove(sentence)
s = bleu.compute(predictions=sentence, references=questions_copy)
return s['bleu']
def main(args):
overall_results = {"tfq": [], "cloze": [], "SAQ": []}
rel_list = glob.glob(os.path.join(args.data, "*.jsonl"))
for rel in rel_list:
questions = {"tfq": [], "cloze": [], "SAQ": []}
with open(rel, "r") as f:
for line in f.readlines():
data = json.loads(line)
if data['question_type'] != "multi_choices":
if "qa" in args.data:
questions["SAQ"].append(data['lm_input']['message'])
else:
if data['question_type'] == "single_answer" or data['question_type'] == "multiple_answer":
questions["cloze"].append(data['lm_input']['message'])
elif "binary" in data['question_type']:
questions["tfq"].append(data['lm_input']['message'])
# Self-bleu
for key in questions:
if len(questions[key]) == 0:
continue
self_blue = SelfBleu(questions[key])
if args.p:
score = self_blue.get_bleu_parallel()
else:
score = self_blue.get_bleu()
#print(f"rel: {rel}, type: {key}, Self-Bleu: {score}")
overall_results[key].append(score)
for key in overall_results:
if len(overall_results[key]) == 0:
continue
print(f"Overall {args.data} {key}: ", sum(overall_results[key]) / len(overall_results[key]))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--data', type=str, help="Data directory")
parser.add_argument('-p', action='store_true', help="Parallel")
args = parser.parse_args()
main(args) | RManLuo/llm-facteval | scripts/question_analysis/diversity.py | diversity.py | py | 2,144 | python | en | code | 6 | github-code | 13 |
43122881726 | import tkinter as tk
root = tk.Tk()
root.title("User choice")
#root.geometry('400*400')
name = StringVar()
mail = StringVar()
music = StringVar()
def takeuserdata():
name = name.get()
print(name)
l1 = tk.Label(root, text="Enter Your Name : ").grid(row=0, column=0)
e1 = tk.Entry(root).grid(row=0, column=1)
l1= tk.Label(root, text="Enter Your Email : ").grid(row=1, column=0)
e2 = tk.Entry(root).grid(row=1, column=1)
l1= tk.Label(root, text="Enter the path of Music Lib : ").grid(row=2, column=0)
e3 = tk.Entry(root).grid(row=2, column=1)
b1 = tk.Button(root, text="Submit", command=takeuserdata).grid(row=3, column=0)
root.mainloop()
| kuntal-samanta/Core_Python | Advanced_Python/Voice-Processing-Application/gui.py | gui.py | py | 655 | python | en | code | 0 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.