seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
12040736872 | def main():
while True:
try:
fraction = input("Fraction: ")
print (gauge(convert(fraction)))
break
except (ValueError, ZeroDivisionError):
continue
def convert(fraction):
fraction = fraction.split("/")
fraction[0], fraction[1] = int(fraction[0]), int(fraction[1])
fuel = round((fraction[0] / fraction[1]) * 100)
return fuel
def gauge(percentage):
level = ""
if percentage <= 1:
level = "E"
elif 1 < percentage < 99:
level = f"{percentage}%"
elif 99 <= percentage <= 100:
level = "F"
return level
if __name__ == "__main__":
main() | shivang1209-dot/CS50P | pset5/test_fuel/fuel.py | fuel.py | py | 665 | python | en | code | 0 | github-code | 36 |
21518376045 | from __future__ import division
import scipy.optimize
import numpy as np
import json
import re
import cv2
_BLACK = (0, 0, 0)
_RED = (0, 0, 255)
_BLUE = (255, 0, 0)
_PURPLE = (204, 0, 153)
_ORANGE = (51, 153, 255)
_LBROWN = (0, 153, 230)
keypoint_colors = { '1': _RED, '2': _RED, '3': _RED, '4': _RED, '5': _RED,
'6': _ORANGE, '7': _ORANGE, '8': _ORANGE, '9': _ORANGE,
'10': _LBROWN, '11': _LBROWN, '12': _LBROWN, '13': _LBROWN,
'14': _BLUE, '15': _BLUE, '16': _BLUE, '17': _BLUE,
'18': _PURPLE, '19': _PURPLE, '20': _PURPLE, '21': _PURPLE
}
def bbox_iou(boxA, boxB):
# https://www.pyimagesearch.com/2016/11/07/intersection-over-union-iou-for-object-detection/
# ^^ corrected.
# Determine the (x, y)-coordinates of the intersection rectangle
xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1])
xB = min(boxA[2], boxB[2])
yB = min(boxA[3], boxB[3])
interW = xB - xA + 1
interH = yB - yA + 1
# Correction: reject non-overlapping boxes
if interW <=0 or interH <=0 :
return -1.0
interArea = interW * interH
boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1)
boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1)
iou = interArea / float(boxAArea + boxBArea - interArea)
return iou
def match_bboxes(bbox_gt, bbox_pred, IOU_THRESH=0.05):
'''
Given sets of true and predicted bounding-boxes,
determine the best possible match.
Parameters
----------
bbox_gt, bbox_pred : N1x4 and N2x4 np array of bboxes [x1,y1,x2,y2].
The number of bboxes, N1 and N2, need not be the same.
Returns
-------
(idxs_true, idxs_pred, ious, labels)
idxs_true, idxs_pred : indices into gt and pred for matches
ious : corresponding IOU value of each match
labels: vector of 0/1 values for the list of detections
'''
n_true = bbox_gt.shape[0]
n_pred = bbox_pred.shape[0]
MAX_DIST = 1.0
MIN_IOU = 0.0
# NUM_GT x NUM_PRED
iou_matrix = np.zeros((n_true, n_pred))
for i in range(n_true):
for j in range(n_pred):
iou_matrix[i, j] = bbox_iou(bbox_gt[i,:], bbox_pred[j,:])
if n_pred > n_true:
# there are more predictions than ground-truth - add dummy rows
diff = n_pred - n_true
iou_matrix = np.concatenate( (iou_matrix,
np.full((diff, n_pred), MIN_IOU)),
axis=0)
if n_true > n_pred:
# more ground-truth than predictions - add dummy columns
diff = n_true - n_pred
iou_matrix = np.concatenate( (iou_matrix,
np.full((n_true, diff), MIN_IOU)),
axis=1)
# call the Hungarian matching
idxs_true, idxs_pred = scipy.optimize.linear_sum_assignment(1 - iou_matrix)
if (not idxs_true.size) or (not idxs_pred.size):
ious = np.array([])
else:
ious = iou_matrix[idxs_true, idxs_pred]
# remove dummy assignments
sel_pred = idxs_pred<n_pred
idx_pred_actual = idxs_pred[sel_pred]
idx_gt_actual = idxs_true[sel_pred]
ious_actual = iou_matrix[idx_gt_actual, idx_pred_actual]
sel_valid = (ious_actual > IOU_THRESH)
label = sel_valid.astype(int)
return idx_gt_actual[sel_valid], idx_pred_actual[sel_valid], ious_actual[sel_valid], label
def cull_annotations():
gt = json.load(open("train_validated_keypoints_only.json", "r"))
boot = json.load(open("hand_keypoints_train_bootstrap.json", "r"))
# for each image in GT, find entries in boot
gt_images = gt["images"]
gt_anns = {}
boot_anns = {}
boot_imgs = {}
max_img_id = 0
max_ann_id = 0
for img in gt_images:
img_id = img["id"]
# if img_id > max_img_id:
# max_img_id = img_id
img["id"] = max_img_id + 1
gt_anns[img["file_name"]] = []
for ann in gt["annotations"]:
# if ann["id"] > max_ann_id:
# max_ann_id = ann["id"]
if ann["image_id"] == img_id:
ann["image_id"] = img["id"]
ann["id"] = max_ann_id
gt_anns[img["file_name"]].append(ann)
max_ann_id += 1
max_img_id += 1
for img in boot["images"]:
img_id = img["id"]
boot_imgs[img["file_name"]] = img
boot_anns[img["file_name"]] = []
for ann in boot["annotations"]:
if ann["image_id"] == img_id:
boot_anns[img["file_name"]].append(ann)
boot_new_images = []
count = 0
total = 0
imgs_to_remove = set()
for gt_img in gt_images:
img_name = gt_img["file_name"]
vid_id = re.sub("-\d{9}.jpg", "", img_name)
frame = int(re.search("\d{9}", img_name).group())
boot_frames = set(["{vid_id}-{frame:09d}.jpg".format(vid_id=vid_id, frame=frame+i) for i in range(-5, 6)])
boot_frames.remove(img_name)
# for each entry in boot, match bboxes (x, y, w, h)
gt_bboxes = []
for x in gt_anns[img_name]:
gt_bboxes.append([x["bbox"][0], x["bbox"][1], x["bbox"][0] + x["bbox"][2], x["bbox"][1] + x["bbox"][3]])
gt_bboxes = np.array(gt_bboxes)
for f in boot_frames:
boxes = []
if f not in boot_anns:
continue
# reassign image
boot_imgs[f]["id"] = max_img_id + 1
for x in boot_anns[f]:
boxes.append([x["bbox"][0], x["bbox"][1], x["bbox"][0] + x["bbox"][2], x["bbox"][1] + x["bbox"][3]])
boxes = np.array(boxes)
idx_true, idxs_pred, ious, labels = match_bboxes(gt_bboxes, boxes)
total += 1
if len(idxs_pred) >= 1:
count += 1
# image_debug = cv2.imread("./bootstrap/" + f)
# find matched boxes + corresponding annotations
gt_using = [gt_anns[img_name][xi] for xi in idx_true]
boot_using = [boot_anns[f][xi] for xi in idxs_pred]
# eliminate keypoints
remove_idxs = []
using = []
for b in range(len(boot_using)):
gt_ann = gt_using[b]
boot_ann = boot_using[b]
boot_ann["image_id"] = max_img_id + 1
boot_ann["id"] = max_ann_id + 1
keypoints = boot_ann["keypoints"]
for i in range(21):
if gt_ann["keypoints"][i * 3 + 2] == 0:
keypoints[i * 3] = 0
keypoints[i * 3 + 1] = 0
keypoints[i * 3 + 2] = 0
# else:
# cv2.circle(image_debug, (int(keypoints[i * 3]), int(keypoints[i * 3 + 1])), 4, keypoint_colors[str(i + 1)], -1)
# cv2.putText(image_debug, str(i + 1), (int(keypoints[i * 3] - 4), int(keypoints[i * 3 + 1] - 4)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 1, cv2.LINE_AA)
if sum(keypoints) == 0:
remove_idxs.append(b)
continue
using.append(boot_ann)
boot_ann["keypoints"] = keypoints
box = boot_ann["bbox"]
# cv2.rectangle(image_debug, (int(box[0]), int(box[1])), (int(box[2] + box[0]) , int(box[3] + box[1])), color=(0, 255, 0),thickness=3)
# cv2.rectangle(image_debug, (int(box[0]), int(box[1])), (int(box[2]) , int(box[3])), color=(0, 255, 0),thickness=3)
# fix error bbox coco error here # Error is in train _bootstrap
boot_ann["bbox"] = [box[0], box[1], box[2] - box[0], box[3] - box[1]] # x y w h
boot_ann["area"] = (box[2] - box[0]) * (box[3] - box[1])
max_ann_id += 1
# remove image if no more annotations
if len(remove_idxs) == len(boot_using):
imgs_to_remove.add(f)
# boot_using = [x for i, x in enumerate(boot_using) if i not in remove_idxs]
# add to new annotations
boot_new_images.extend(using)
# save image for debugging
# cv2.imwrite("./debug_bootstrap_train_filter/debug_" + f, image_debug)
else:
# eliminate image
imgs_to_remove.add(f)
print(f)
max_img_id += 1
print(len(boot_new_images))
boot["annotations"] = boot_new_images
for v in gt_anns.values():
boot["annotations"].extend(v)
print(len(boot["annotations"]))
boot["images"] = list(boot_imgs.values())
boot["images"].extend(gt["images"])
print(len(boot["images"]), len(imgs_to_remove), len(gt["images"]))
removed = set()
imgs_cpy = list(boot["images"])
for img in imgs_cpy:
if img["file_name"] in imgs_to_remove:
boot["images"].remove(img)
removed.add(img["id"])
if img in gt["images"]:
print("overlap")
print(len(boot["images"]), len(boot["annotations"]))
# remove annotations on removed images
cpy = list(boot["annotations"])
for ann in cpy:
if ann["image_id"] in removed:
boot["annotations"].remove(ann)
print("hi")
json.dump(boot, open("train_boostrap_filtered_validated_2.json", "w"))
print(count, total)
# get rid of unmatched boxes
# get rid of keypoints that aren't labled, occluded, w/in a certain distance
# write to image so you can see what they now look like
cull_annotations()
| egoodman92/semi-supervised-surgery | MULTITASK_FILES/KEYPOINTS_FILES/surgery-hand-detection-new/scripts/filter_bootstrap.py | filter_bootstrap.py | py | 8,359 | python | en | code | 0 | github-code | 36 |
34075769652 | #!/usr/bin/env python
from sense_hat import SenseHat
sense_obj = SenseHat()
red = (255,0,0)
green = (0,255,9)
blue = (0,0,255)
while True:
sense_obj.show_message("Life is Beautiful!", text_colour=red, back_colour=green)
| akshaynawale/PiSense | sense_hat/show_text.py | show_text.py | py | 227 | python | en | code | 0 | github-code | 36 |
2434152222 | from funcs import *
import numpy as np
import matplotlib.pyplot as plt
counter = lambda List, isPositive: sum(List) if isPositive else N - sum(List)
# Выборка
N = 1000
# Class 0
mu_0 = 190
sigma_0 = 10
basketballers = np.random.normal(mu_0, sigma_0, N)
# Class 1
mu_1 = 173
sigma_1 = 12
footballers = np.random.normal(mu_1, sigma_1, N)
# Порог бинарного классификатора
limit = 250
tpr_list, fpr_list, alpha_list, one_minus_beta, accuracy_list = [], [], [], [], []
TruePositive, FalsePositive, FalseNegative, TrueNegative = [], [], [], []
for lim in range(limit):
classified_basketballers = Classification(basketballers, lim)
classified_footballers = Classification(footballers, lim)
TruePositive.append(counter(classified_footballers, True))
TrueNegative.append(counter(classified_basketballers, False))
FalsePositive.append(counter(classified_basketballers, True))
FalseNegative.append(counter(classified_footballers, False))
if TruePositive[lim] != 0 and (FalsePositive[lim] + TrueNegative[lim]) != 0:
accuracy = Accuracy(TruePositive[lim], TrueNegative[lim], FalsePositive[lim], FalseNegative[lim])
accuracy_list.append(accuracy)
alpha = ALPHA(FalsePositive[lim], TrueNegative[lim])
alpha_list.append(alpha)
beta = BETA(FalseNegative[lim], TruePositive[lim])
one_minus_beta.append(1 - beta)
precision = Precision(TruePositive[lim], FalsePositive[lim])
recall = Recall(TruePositive[lim], FalseNegative[lim])
f1 = F1(precision, recall)
tpr_list.append(TPR(TruePositive[lim], FalseNegative[lim]))
fpr_list.append(FPR(FalsePositive[lim], TrueNegative[lim]))
else:
accuracy_list.append(0)
# Площадь под под построенной кривой (AUC)
print(f"AUC: {S(fpr_list, tpr_list)}")
# Максимальный Accuracy
index = accuracy_list.index(max(accuracy_list))
print(f"Порог при максимальном значении Accuracy: {index}")
TP_max = TruePositive[index]
FP_max = FalsePositive[index]
FN_max = FalseNegative[index]
TN_max = TrueNegative[index]
accuracy = Accuracy(TP_max, TN_max, FP_max, FN_max)
print(f"Accuracy = {accuracy}")
precision = Precision(TP_max, FP_max)
print(f"Precision = {precision}")
recall = Recall(TP_max, FN_max)
print(f"Recall = {recall}")
f1 = F1(precision, recall)
print(f"F1-score = {f1}")
alpha = ALPHA(FP_max, TN_max)
print(f"Alpha = {alpha}")
beta = BETA(FN_max, TP_max)
print(f"Beta = {beta}")
# График ROC кривой
fig = plt.figure()
plt.plot(alpha_list, one_minus_beta)
plt.show()
| shulayonok/ML4 | main.py | main.py | py | 2,654 | python | en | code | 0 | github-code | 36 |
2926954909 | import pandas as pd
import numpy as np
from multiprocessing.pool import ThreadPool
from multiprocessing import cpu_count
import operator
from collections import OrderedDict
import itertools
import time
from sklearn.metrics import accuracy_score
import visualization
visuals = visualization.Visualization()
class Local_Outlier_Factor:
'''
@Author: Naren Surampudi
'''
def __init__(self):
self.K = 2
self.DATA = None
self.SAMPLE_DATA = None
self.DATA_FLAG = True
self.THRESH = 1
self.REDUCED_POINTS = []
def neighborhood(self):
if self.DATA_FLAG:
val_data = self.DATA.values.tolist()
else:
val_data = self.SAMPLE_DATA # for sample sets
lrds = []
reach_distances = []
read_index1 = 0
neighbors_dict = {}
reduced_points = []
for reading1 in val_data:
self.REDUCED_POINTS.append(visuals.dimension_reduction(reading1))
neighbors = {}
neighbors_dict[read_index1] = []
read_index2 = 0
for reading2 in val_data:
if read_index1 != read_index2:
print("Reading indices: " + str(read_index1) + " " + str(read_index2))
distance = sum(abs(np.array(list(reading1)) - np.array(list(reading2))))
distance = round(distance, ndigits=2)
neighbors[read_index2] = distance
read_index2 = read_index2 + 1
sorted_temp = sorted(neighbors.items(), key=lambda kv: kv[1])
neighbors = OrderedDict(sorted_temp)
neighbors = list(itertools.islice(neighbors.items(), 0, self.K))
# print(neighbors)
for n in neighbors:
neighbors_dict[read_index1].append(n)
lrds.append(self.LRD(neighbors, self.K))
read_index1 = read_index1 + 1
return [lrds, neighbors_dict]
def K_element_dist(self, read_index1, K):
if self.DATA_FLAG:
val_data = self.DATA.values.tolist()
else:
val_data = self.SAMPLE_DATA
k_dists = []
reading1 = val_data[read_index1]
read_index2 = 0
for reading2 in val_data:
if read_index1 != read_index2:
distance = sum(abs(np.array(list(reading1)) - np.array(list(reading2))))
distance = round(distance, ndigits=2)
k_dists.append(distance)
read_index2 = read_index2 + 1
k_dists.sort()
k_dists = k_dists[0:self.K]
# print(k_dists)
return k_dists[-1]
def LRD(self, neighbors, K):
k_nearest_count = len(neighbors)
reach_distance_sum = self.reach_distance(neighbors, self.K)
lrd = k_nearest_count / reach_distance_sum
return lrd
def reach_distance(self, neighbors, K):
rds = []
for element in neighbors:
rd = max(self.K_element_dist(element[0], self.K), element[1])
rds.append(rd)
return sum(rds)
def LOF(self, lrds, neighbors_dict, K):
lofs = []
# print(neighbors_dict)
for element in neighbors_dict.keys():
print("Calculating LOF for: " + str(element))
neighbors = neighbors_dict[element]
lrd_sum = 0
reach_dist_sum = self.reach_distance(neighbors, self.K)
for n in neighbors:
lrd_sum = lrd_sum + lrds[n[0]]
# reach_dist_sum = reach_dist_sum + reach_distances[n]
lof = (lrd_sum * reach_dist_sum) / (self.K**2)
lof = round(lof, ndigits=2)
# specific for fraud detection
if lof > self.THRESH:
lof = 1
visuals.OUTLIERS.append(self.REDUCED_POINTS[element])
else:
lof = 0
visuals.NON_OUTLIERS.append(self.REDUCED_POINTS[element])
lofs.append(lof)
return lofs
def container(self):
lof_reqs = self.neighborhood()
lofs = self.LOF(lof_reqs[0], lof_reqs[1], self.K)
return lofs
if __name__ == "__main__":
lof_class = Local_Outlier_Factor()
credit_data = pd.read_csv('../creditcard_nomralized.csv')
y = credit_data['Class']
req_cols = []
for i in range(1, 29):
req_cols.append('V' + str(i))
req_cols.append('Time')
req_cols.append('Amount')
data = credit_data[req_cols]
sample_data = [[0,0],[0,1],[1,1],[3,0]] # some sample data
lof_class.DATA = data[0:10000]
lof_class.SAMPLE_DATA = sample_data
lof_class.DATA_FLAG = False
if lof_class.DATA_FLAG:
lof_class.K = 20
lof_class.THRESH = 1.5
val_y = y[0:10000]
pool = ThreadPool(processes=cpu_count())
# lof_reqs = (pool.apply_async(lof_class.neighborhood)).get()
# print(type(neighbors))
# print(data.values.tolist()[0])
# lofs = lof_class.LOF(lof_reqs[0], lof_reqs[1], lof_class.K)
start_time = time.clock()
lofs = (pool.apply_async(lof_class.container)).get()
stop_time = time.clock()
run_time = stop_time - start_time
# print(lofs)
if lof_class.DATA_FLAG:
print("Accuracy: " + str(accuracy_score(lofs, val_y)))
print("Time: " + str(run_time))
visuals.outlier_plot()
| aditya-srikanth/Data-Mining-Assignment-3 | LOF.py | LOF.py | py | 5,514 | python | en | code | 0 | github-code | 36 |
12370764901 | import sys
sys.setrecursionlimit(10000)
n , m , k = map(int, input().split()) # n = 행 M = 열
array = [[0]*m for _ in range(n)]
check = [[0]*m for _ in range(n)]
dx = [-1, 1, 0, 0]
dy = [0, 0, -1, 1]
for i in range(k):
x , y , x1, y1 = map(int, input().split()) # x 열 y 행
for i in range(y, y1):
for j in range(x, x1):
array[i][j] = 1
cnt = 0
result = []
def dfs(x,y, check):
global tmp
array[x][y] = 1
check[x][y] = 1
for i in range(4):
nx = x + dx[i]
ny = y + dy[i]
if 0 <= nx < n and 0 <= ny < m:
if array[nx][ny] == 0 and check[nx][ny] == 0:
tmp += 1
dfs(nx, ny, check)
for i in range(n):
for j in range(m):
if array[i][j] == 0 and check[i][j] == 0:
tmp = 0
dfs(i, j, check)
result.append(tmp+1)
cnt += 1
print(cnt)
answer = sorted(result)
for i in answer:
print(i,end=' ') | hwangstone1/Algorithm_repository | Algorithm_DFS&BFS/DFS_exercise_7.py | DFS_exercise_7.py | py | 963 | python | en | code | 0 | github-code | 36 |
34761412830 | import sys, os
import subprocess
import json
from typing import Union
from random import random
import web3
from web3 import Web3
from web3._utils.threads import Timeout
from solcx import compile_files
from eth_utils import decode_hex
# Project modules
from TextColor.color import bcolors
MGMT_CONTRACT_DB_NAME = 'database.json'
MGMT_CONTRACT_SRC_PATH = r"./contracts/ManagementContract.sol"
MGMT_CONTRACT_NAME = "ManagementContract"
BATTERY_MGMT_CONTRACT_SRC_PATH = r"./contracts/BatteryManagement.sol"
BATTERY_MGMT_CONTRACT_NAME = "BatteryManagement"
REGISTRATION_REQUIRED_GAS = 50000
def _deploy_contract_and_wait(_w3: Web3, _actor: str, _contract_src_file: str, _contract_name: str, *args):
"""
Deploy contract to the blockchain and wait it's inclusion to a block
:param str _actor:The person transacting the contract
:param str _contract_src_file: Path to contract source code
:param str _cantract_name: Contract name
:param list args: Contract's function arguments
:return: contract address
:rtype: str
"""
tx_hash = _deploy_contract(_w3, _actor, _contract_src_file, _contract_name, *args)
receipt = web3.eth.wait_for_transaction_receipt(_w3, tx_hash, 120, 0.1)
return receipt["contractAddress"]
def _deploy_contract(_w3: Web3, _actor: str, _contract_src_file: str, _contract_name: str, *args):
"""
Deploy contract to the blockchain
:param Web3 _w3: Web3 instance
:param str _actor: The person transacting the contract
:param str _contract_src_file: Path to contract source code
:param str _cantract_name: Contract name
:param list args: Contract's function arguments
:return: Deployed contract
:rtype: Contract
"""
compiled = compile_contracts(_contract_src_file)
contract = initialize_contract_factory(_w3, compiled, _contract_src_file + ":" + _contract_name)
tx = {'from': _actor, 'gasPrice': get_actual_gas_price(_w3)}
return contract.constructor(*args).transact(transaction=tx)
def _wait_for_validation(_w3: Web3, _tx_dict: dict, _tmout: int = 120) -> dict:
"""
Wait contract's inclusion to a block
:params Web3 _w3: Web3 instance
:params dict _tx_dict: Transactions waiting for inclusion
:params int: _tmout: Timeout for inclusion to a block in seconds
:return: Receipts
:rtype: dict
"""
receipts_list = {}
for i in _tx_dict.keys():
receipts_list[i] = [_tx_dict[i], None]
confirmations = len(list(_tx_dict))
with Timeout(_tmout) as tm:
while(confirmations > 0):
for i in _tx_dict.keys():
if receipts_list[i][1] is None:
tx_reciept = _w3.eth.getTransactionReceipt(receipts_list[i][0])
if tx_reciept is not None:
receipts_list[i][1] = tx_reciept
confirmations -= 1
tm.sleep(random())
return receipts_list
def _create_mgmt_contract_db(_contract_address: str) -> None:
"""
Create json file with Management contract address
:params str _contract_address: Managment contract address in blockchain
:return: Nothing
:rtype: None
"""
data = {'mgmt_contract': _contract_address}
write_data_base(data, MGMT_CONTRACT_DB_NAME)
def get_actual_gas_price(_w3: Web3) -> float:
"""
Get actual gas price
:param Web3 _w3: Web3 instance
:return: Gas price
:rtype: float
"""
return _w3.toWei(1, 'gwei')
def write_data_base(_data: dict, _file_name: str) -> None:
"""
Write dictionary to specific json file
:param dict _data: Data to write
:param str _file_name: Name of the file for writing
:return: Nothing
:rtype: None
"""
with open(_file_name, 'w') as out:
json.dump(_data, out)
def unlock_account(_w3: Web3, _account: str, _password: str) -> None:
"""
Unlock account for transactions
:param Web3 _w3: Web3 instance
:param str _account: Account to unlock
:param str _password: Password for the account
:return: Nothing
:rtype: None
"""
_w3.geth.personal.unlockAccount(_account, _password, 300)
def create_new_account(_w3: Web3, _password: str, _file_name: str) -> str:
"""
Create new account and write it to database
:param Web3 _w3: Web3 instance
:param str _password: Password for the new account
:param str _file_name: Name of the database file for writing
:return: Account address in blockchain
:rtype: str
"""
if os.path.exists(_file_name):
os.remove(_file_name)
account = _w3.geth.personal.newAccount(_password)
data = {"account": account, "password": _password}
write_data_base(data, _file_name)
return f"{bcolors.HEADER}{data['account']}{bcolors.ENDC}"
def open_data_base(_file_name: str) -> Union[dict, None]:
"""
Load data from the database
:param str _file_name: Database file name
:return: None if file does not exist or loaded from the file data
:rtype: None/dict
"""
if os.path.exists(_file_name):
with open(_file_name) as file:
return json.load(file)
else:
return None
def compile_contracts(_files: Union[str, list]):
"""
Compile contract file/files
:param str/list _files: Files to compile
:return: Compiled files
:rtype: dict
"""
if isinstance(_files, str):
contracts = compile_files([_files])
if isinstance(_files, list):
contracts = compile_files(_files)
return contracts
def get_data_from_db(_file_name: str,_key: str) -> Union[str, None]:
"""
Get data from database
:params str _file_name: Name of the database file
:params str _key: Key of dictionary
:return: None if file does not exist or value of dictionary's key
:rtype: None/str
"""
data = open_data_base(_file_name)
if data is None:
print("Cannot access account database")
return None
return data[_key]
def init_management_contract(_w3: Web3):
"""
Creates management contract object
:param Web3 _w3: Web3 instance
:return: Management contract
:rtype: Contract instance
"""
compiled = compile_contracts(MGMT_CONTRACT_SRC_PATH)
mgmt_contract = initialize_contract_factory(_w3, compiled, MGMT_CONTRACT_SRC_PATH + ":" + MGMT_CONTRACT_NAME,
open_data_base(MGMT_CONTRACT_DB_NAME)["mgmt_contract"])
return mgmt_contract
def initialize_contract_factory(_w3: Web3, _compiled_contracts, _key: str, _address: str = None):
"""
Initialize contract
:params Web3 _w3: Web3 instance
:params _compiled_contracts: Compiled contracts
:params str _key: Contract path + name
:params str _address: Target address
:return: Contract instance
:rtype: Contract
"""
if _address is None:
contract = _w3.eth.contract(
abi=_compiled_contracts[_key]['abi'],
bytecode=_compiled_contracts[_key]['bin']
)
else:
contract = _w3.eth.contract(
abi=_compiled_contracts[_key]['abi'],
address=_address
)
return contract
def get_battery_managment_contract_addr(_w3: Web3) -> str:
"""
:params Web3 _w3: Web3 instance
:return: Contract's address
:rtype: str
"""
try:
mgmt_contract = init_management_contract(_w3)
addr = mgmt_contract.functions.getBatteryManagmentAddr().call()
except:
sys.exit(f"{bcolors.FAIL}Failed{bcolors.ENDC}")
return addr
def init_battery_management_contract(_w3: Web3, addr: str):
"""
Creates battery management contract object
:param Web3 _w3: Web3 instance
:param str addr: Battery management contract's address
:return: Battery management contract
:rtype: Contract instance
"""
compiled = compile_contracts(BATTERY_MGMT_CONTRACT_SRC_PATH)
battery_mgmt_contract = initialize_contract_factory(_w3, compiled, BATTERY_MGMT_CONTRACT_SRC_PATH + ":" + BATTERY_MGMT_CONTRACT_NAME,
addr)
return battery_mgmt_contract
def create_script_from_tmpl(private_key, address: str):
with open("batteryTemplate.py", 'r') as tmpl:
lines = tmpl.readlines()
lines[11] = f"private_key = '{private_key}'\n"
with open(f"firmware/{address[2:10]}.py", 'w') as fw:
fw.writelines(lines)
def get_battery_info(_path: str) -> dict:
"""
Get battery info(v, r, s, charges, time)
:param str _path: Path to battery's firmware
:return: Battery's info
:rtype: dict
"""
if os.path.exists(f"{_path}"):
subprocess.run(["python", f"{_path}", "--get"])
else:
sys.exit(f"{bcolors.FAIL}Battery does not exist{bcolors.ENDC}")
return open_data_base(f"{_path[:-3]}_data.json")
def verify_battery(_w3: Web3, _path: str):
"""
Verify battery firmware
:param Web3 _w3: Web3 instance
:param str _path: Path to firmware
:return:
:rtype:
"""
verified = False
battery_info = get_battery_info(_path)
if battery_info is None:
sys.exit(f"{bcolors.FAIL}The battery does not exist{bcolors.ENDC}")
battery_mgmt_addr = get_battery_managment_contract_addr(_w3)
battery_mgmt_contract = init_battery_management_contract(_w3, battery_mgmt_addr)
verified, vendor_address = battery_mgmt_contract.functions.verifyBattery(battery_info['v'], _w3.toBytes(hexstr=battery_info['r']),
_w3.toBytes(hexstr=battery_info['s']), battery_info['charges'],
battery_info['time']).call()
mgmt_contract = init_management_contract(_w3)
vendor_id = _w3.toHex(mgmt_contract.functions.vendorId(vendor_address).call())
vendor_name = (mgmt_contract.functions.vendorNames(vendor_id).call()).decode()
return verified, battery_info['charges'], vendor_id, vendor_name
def change_owner(_w3: Web3, _battery_id: str, _new_owner: str, account_db_name: str) -> str:
"""
Change the owner of battery
:param Web3 _w3: Web3 instance
:param str _battery_id: battery ID
:param str _new_owner: New owner address
:return: Status message
:rtype: str
"""
data = open_data_base(account_db_name)
actor = data['account']
tx = {'from': actor, 'gasPrice': get_actual_gas_price(_w3), 'gas':2204 * 68 + 21000}
battery_mgmt_contract_addr = get_battery_managment_contract_addr(_w3)
battery_mgmt_contract = init_battery_management_contract(_w3, battery_mgmt_contract_addr)
unlock_account(_w3, actor, data['password'])
tx_hash = battery_mgmt_contract.functions.transfer(_new_owner, decode_hex(_battery_id)).transact(tx)
receipt = web3.eth.wait_for_transaction_receipt(_w3, tx_hash, 120, 0.1)
result = receipt.status
if result == 1:
return "Ownership change was successfull"
else:
return "Ownership change failed"
| acid9reen/bas | utils.py | utils.py | py | 11,123 | python | en | code | 0 | github-code | 36 |
26409261579 | class Solution:
def distributeCandies(self, n: int, limit: int) -> int:
#第一个孩子可能得到的糖果数i min(n,limit)
#第二个孩子可能得到的糖果数j min(n-i,limit)
#第三个孩子可能得到的糖果数k n-i-j
count = 0
for i in range(min(n,limit)+1):
for j in range(min(n-i,limit)+1):
k = n-i-j
if k >=0 and k <=limit:
count += 1
return count
| lpjjj1222/leetcode-notebook | 2928. Distribute Candies Among Children 1.py | 2928. Distribute Candies Among Children 1.py | py | 495 | python | zh | code | 0 | github-code | 36 |
37952156492 | from math import sqrt
from itertools import product
import numpy as np
from scipy.special import factorial as fact
from functools import lru_cache
# The interaction matrix in desired basis
# U^{spherical}_{m1 m2 m3 m4} = \sum_{k=0}^{2l} F_k angular_matrix_element(l, k, m1, m2, m3, m4)
# H = \frac{1}{2} \sum_{ijkl,\sigma \sigma'} U_{ijkl} a_{i \sigma}^\dagger a_{j \sigma'}^\dagger a_{l \sigma'} a_{k \sigma}.
@lru_cache(maxsize=8)
def U_matrix(l,
radial_integrals=None,
U_int=None,
J_hund=None,
basis='spherical',
T=None):
r"""
Calculate the full four-index U matrix being given either radial_integrals or U_int and J_hund.
The convetion for the U matrix is that used to construct the Hamiltonians, namely:
.. math:: H = \frac{1}{2} \sum_{ijkl,\sigma \sigma'} U_{ijkl} a_{i \sigma}^\dagger a_{j \sigma'}^\dagger a_{l \sigma'} a_{k \sigma}.
Parameters
----------
l : integer
Angular momentum of shell being treated (l=2 for d shell, l=3 for f shell).
radial_integrals : list, optional
Slater integrals [F0,F2,F4,..].
Must be provided if U_int and J_hund are not given.
Preferentially used to compute the U_matrix if provided alongside U_int and J_hund.
U_int : scalar, optional
Value of the screened Hubbard interaction.
Must be provided if radial_integrals are not given.
J_hund : scalar, optional
Value of the Hund's coupling.
Must be provided if radial_integrals are not given.
basis : string, optional
The basis in which the interaction matrix should be computed.
Takes the values
- 'spherical': spherical harmonics,
- 'cubic': cubic harmonics,
- 'other': other basis type as given by the transformation matrix T.
T : real/complex numpy array, optional
Transformation matrix for basis change.
Must be provided if basis='other'.
The transformation matrix is defined such that new creation operators :math:`b^\dagger` are related to
the old ones :math:`a^\dagger` as
.. math:: b_{i \sigma}^\dagger = \sum_j T_{ij} a^\dagger_{j \sigma}.
Returns
-------
U_matrix : float numpy array
The four-index interaction matrix in the chosen basis.
"""
# Check all necessary information is present and consistent
if radial_integrals is None and (U_int is None and J_hund is None):
raise ValueError(
"U_matrix: provide either the radial_integrals or U_int and J_hund."
)
if radial_integrals is None and (U_int is not None and J_hund is not None):
radial_integrals = U_J_to_radial_integrals(l, U_int, J_hund)
if radial_integrals is not None and (U_int is not None
and J_hund is not None):
if len(radial_integrals) - 1 != l:
raise ValueError(
"U_matrix: inconsistency in l and number of radial_integrals provided."
)
if (radial_integrals - U_J_to_radial_integrals(l, U_int,
J_hund)).any() != 0.0:
print(
"Warning: U_matrix: radial_integrals provided do not match U_int and J_hund. Using radial_integrals to calculate U_matrix."
)
# Full interaction matrix
# Basis of spherical harmonics Y_{-2}, Y_{-1}, Y_{0}, Y_{1}, Y_{2}
# U^{spherical}_{m1 m2 m3 m4} = \sum_{k=0}^{2l} F_k angular_matrix_element(l, k, m1, m2, m3, m4)
U_mat = np.zeros(
(2 * l + 1, 2 * l + 1, 2 * l + 1, 2 * l + 1), dtype=float)
m_range = list(range(-l, l + 1))
for n, F in enumerate(radial_integrals):
k = 2 * n
for m1, m2, m3, m4 in product(m_range, m_range, m_range, m_range):
U_mat[m1 + l, m2 + l, m3 + l, m4 +
l] += F * angular_matrix_element(l, k, m1, m2, m3, m4)
# Transform from spherical basis if needed
if basis == "cubic": T = spherical_to_cubic(l, convention='wien2k')
if basis == "other" and T is None:
raise ValueError("U_matrix: provide T for other bases.")
if T is not None: U_mat = transform_U_matrix(U_mat, T)
return U_mat
# Convert full 4-index U matrix to 2-index density-density form
def reduce_4index_to_2index(U_4index):
r"""
Reduces the four-index matrix to two-index matrices for parallel and anti-parallel spins.
Parameters
----------
U_4index : float numpy array
The four-index interaction matrix.
Returns
-------
U : float numpy array
The two-index interaction matrix for parallel spins.
Uprime : float numpy array
The two-index interaction matrix for anti-parallel spins.
"""
size = U_4index.shape[0] # 2l+1
U = np.zeros((size, size), dtype=float) # matrix for same spin
Uprime = np.zeros((size, size), dtype=float) # matrix for opposite spin
m_range = list(range(size))
for m, mp in product(m_range, m_range):
U[m, mp] = U_4index[m, mp, m, mp].real - U_4index[m, mp, mp, m].real
Uprime[m, mp] = U_4index[m, mp, m, mp].real
return U, Uprime
# Construct the 2-index matrices for the density-density form
@lru_cache(maxsize=8)
def U_matrix_kanamori(n_orb, U_int, J_hund):
r"""
Calculate the Kanamori U and Uprime matrices.
Parameters
----------
n_orb : integer
Number of orbitals in basis.
U_int : scalar
Value of the screened Hubbard interaction.
J_hund : scalar
Value of the Hund's coupling.
Returns
-------
U : float numpy array
The two-index interaction matrix for parallel spins.
Uprime : float numpy array
The two-index interaction matrix for anti-parallel spins.
"""
U = np.zeros((n_orb, n_orb), dtype=float) # matrix for same spin
Uprime = np.zeros((n_orb, n_orb), dtype=float) # matrix for opposite spin
m_range = list(range(n_orb))
for m, mp in product(m_range, m_range):
if m == mp:
Uprime[m, mp] = U_int
else:
U[m, mp] = U_int - 3.0 * J_hund
Uprime[m, mp] = U_int - 2.0 * J_hund
return U, Uprime
@lru_cache(maxsize=8)
def U_matrix_dudarev(n_orb, U_int, J_hund):
r"""
Calculate the Dudarev U and Uprime matrices.
Parameters
----------
n_orb : integer
Number of orbitals in basis.
U_int : scalar
Value of the screened Hubbard interaction.
J_hund : scalar
Value of the Hund's coupling.
Returns
-------
U : float numpy array
The two-index interaction matrix for parallel spins.
Uprime : float numpy array
The two-index interaction matrix for anti-parallel spins.
"""
U = np.zeros((n_orb, n_orb), dtype=float) # matrix for same spin
Uprime = np.zeros((n_orb, n_orb), dtype=float) # matrix for opposite spin
m_range = list(range(n_orb))
for m, mp in product(m_range, m_range):
if m == mp:
Uprime[m, mp] = U_int
else:
U[m, mp] = U_int - 1.0 * J_hund
Uprime[m, mp] = U_int
return U, Uprime
# Get t2g or eg components
def t2g_submatrix(U, convention=''):
r"""
Extract the t2g submatrix of the full d-manifold two- or four-index U matrix.
Parameters
----------
U : float numpy array
Two- or four-index interaction matrix.
convention : string, optional
The basis convention.
Takes the values
- '': basis ordered as ("xy","yz","z^2","xz","x^2-y^2"),
- 'wien2k': basis ordered as ("z^2","x^2-y^2","xy","yz","xz").
Returns
-------
U_t2g : float numpy array
The t2g component of the interaction matrix.
"""
if convention == 'wien2k':
return subarray(U, len(U.shape) * [(2, 3, 4)])
elif convention == '':
return subarray(U, len(U.shape) * [(0, 1, 3)])
else:
raise ValueError("Unknown convention: " + str(convention))
def eg_submatrix(U, convention=''):
r"""
Extract the eg submatrix of the full d-manifold two- or four-index U matrix.
Parameters
----------
U : float numpy array
Two- or four-index interaction matrix.
convention : string, optional
The basis convention.
Takes the values
- '': basis ordered as ("xy","yz","z^2","xz","x^2-y^2"),
- 'wien2k': basis ordered as ("z^2","x^2-y^2","xy","yz","xz").
Returns
-------
U_eg : float numpy array
The eg component of the interaction matrix.
"""
if convention == 'wien2k':
return subarray(U, len(U.shape) * [(0, 1)])
elif convention == '':
return subarray(U, len(U.shape) * [(2, 4)])
else:
raise ValueError("Unknown convention: " + str(convention))
# Transform the interaction matrix into another basis
def transform_U_matrix(U_mat, T):
r"""
Transform a four-index interaction matrix into another basis.
The transformation matrix is defined such that new creation operators :math:`b^\dagger` are related to
the old ones :math:`a^\dagger` as
.. math:: b_{i \sigma}^\dagger = \sum_j T_{ij} a^\dagger_{j \sigma}.
Parameters
----------
U_mat : float numpy array
The four-index interaction matrix in the original basis.
T : real/complex numpy array, optional
Transformation matrix for basis change.
Must be provided if basis='other'.
Returns
-------
U_mat : float numpy array
The four-index interaction matrix in the new basis.
"""
return np.einsum("ij,kl,jlmo,mn,op", np.conj(T), np.conj(T), U_mat,
np.transpose(T), np.transpose(T))
# Rotation matrices: complex harmonics to cubic harmonics
# Complex harmonics basis: ..., Y_{-2}, Y_{-1}, Y_{0}, Y_{1}, Y_{2}, ...
def spherical_to_cubic(l, convention=''):
r"""
Get the spherical harmonics to cubic harmonics transformation matrix.
Parameters
----------
l : integer
Angular momentum of shell being treated (l=2 for d shell, l=3 for f shell).
convention : string, optional
The basis convention.
Takes the values
- '': basis ordered as ("xy","yz","z^2","xz","x^2-y^2"),
- 'wien2k': basis ordered as ("z^2","x^2-y^2","xy","yz","xz").
Returns
-------
T : real/complex numpy array
Transformation matrix for basis change.
"""
if not convention in ('wien2k', ''):
raise ValueError("Unknown convention: " + str(convention))
size = 2 * l + 1
T = np.zeros((size, size), dtype=complex)
if convention == 'wien2k' and l != 2:
raise ValueError(
"spherical_to_cubic: wien2k convention implemented only for l=2")
if l == 0:
cubic_names = ("s")
elif l == 1:
cubic_names = ("x", "y", "z")
T[0, 0] = 1.0 / sqrt(2)
T[0, 2] = -1.0 / sqrt(2)
T[1, 0] = 1j / sqrt(2)
T[1, 2] = 1j / sqrt(2)
T[2, 1] = 1.0
elif l == 2:
if convention == 'wien2k':
cubic_names = ("z^2", "x^2-y^2", "xy", "yz", "xz")
T[0, 2] = 1.0
T[1, 0] = 1.0 / sqrt(2)
T[1, 4] = 1.0 / sqrt(2)
T[2, 0] = -1.0 / sqrt(2)
T[2, 4] = 1.0 / sqrt(2)
T[3, 1] = 1.0 / sqrt(2)
T[3, 3] = -1.0 / sqrt(2)
T[4, 1] = 1.0 / sqrt(2)
T[4, 3] = 1.0 / sqrt(2)
else:
cubic_names = ("xy", "yz", "z^2", "xz", "x^2-y^2")
T[0, 0] = 1j / sqrt(2)
T[0, 4] = -1j / sqrt(2)
T[1, 1] = 1j / sqrt(2)
T[1, 3] = 1j / sqrt(2)
T[2, 2] = 1.0
T[3, 1] = 1.0 / sqrt(2)
T[3, 3] = -1.0 / sqrt(2)
T[4, 0] = 1.0 / sqrt(2)
T[4, 4] = 1.0 / sqrt(2)
elif l == 3:
cubic_names = ("x(x^2-3y^2)", "z(x^2-y^2)", "xz^2", "z^3", "yz^2",
"xyz", "y(3x^2-y^2)")
T[0, 0] = 1.0 / sqrt(2)
T[0, 6] = -1.0 / sqrt(2)
T[1, 1] = 1.0 / sqrt(2)
T[1, 5] = 1.0 / sqrt(2)
T[2, 2] = 1.0 / sqrt(2)
T[2, 4] = -1.0 / sqrt(2)
T[3, 3] = 1.0
T[4, 2] = 1j / sqrt(2)
T[4, 4] = 1j / sqrt(2)
T[5, 1] = 1j / sqrt(2)
T[5, 5] = -1j / sqrt(2)
T[6, 0] = 1j / sqrt(2)
T[6, 6] = 1j / sqrt(2)
else:
raise ValueError("spherical_to_cubic: implemented only for l=0,1,2,3")
return np.matrix(T)
# Names of cubic harmonics
def cubic_names(l):
r"""
Get the names of the cubic harmonics.
Parameters
----------
l : integer or string
Angular momentum of shell being treated.
Also takes 't2g' and 'eg' as arguments.
Returns
-------
cubic_names : tuple of strings
Names of the orbitals.
"""
if l == 0 or l == 's':
return ("s")
elif l == 1 or l == 'p':
return ("x", "y", "z")
elif l == 2 or l == 'd':
return ("xy", "yz", "z^2", "xz", "x^2-y^2")
elif l == 't2g':
return ("xy", "yz", "xz")
elif l == 'eg':
return ("z^2", "x^2-y^2")
elif l == 3 or l == 'f':
return ("x(x^2-3y^2)", "z(x^2-y^2)", "xz^2", "z^3", "yz^2", "xyz",
"y(3x^2-y^2)")
else:
raise ValueError("cubic_names: implemented only for l=0,1,2,3")
# Convert U,J -> radial integrals F_k
def U_J_to_radial_integrals(l, U_int, J_hund):
r"""
Determine the radial integrals F_k from U_int and J_hund.
Parameters
----------
l : integer
Angular momentum of shell being treated (l=2 for d shell, l=3 for f shell).
U_int : scalar
Value of the screened Hubbard interaction.
J_hund : scalar
Value of the Hund's coupling.
Returns
-------
radial_integrals : list
Slater integrals [F0,F2,F4,..].
"""
F = np.zeros((l + 1), dtype=float)
if l == 1:
F[0] = U_int
F[1] = 5.0 * J_hund
elif l == 2:
F[0] = U_int
F[1] = J_hund * 14.0 / (1.0 + 0.63)
F[2] = 0.630 * F[1]
elif l == 3:
F[0] = U_int
F[1] = 6435.0 * J_hund / (
286.0 + 195.0 * 451.0 / 675.0 + 250.0 * 1001.0 / 2025.0)
F[2] = 451.0 * F[1] / 675.0
F[3] = 1001.0 * F[1] / 2025.0
else:
raise ValueError(
"U_J_to_radial_integrals: implemented only for l=1,2,3")
return F
# Convert radial integrals F_k -> U,J
def radial_integrals_to_U_J(l, F):
r"""
Determine U_int and J_hund from the radial integrals.
Parameters
----------
l : integer
Angular momentum of shell being treated (l=2 for d shell, l=3 for f shell).
F : list
Slater integrals [F0,F2,F4,..].
Returns
-------
U_int : scalar
Value of the screened Hubbard interaction.
J_hund : scalar
Value of the Hund's coupling.
"""
if l == 1:
U_int = F[0]
J_hund = F[1] / 5.0
elif l == 2:
U_int = F[0]
J_hund = F[1] * (1.0 + 0.63) / 14.0
elif l == 3:
U_int = F[0]
J_hund = F[1] * (
286.0 + 195.0 * 451.0 / 675.0 + 250.0 * 1001.0 / 2025.0) / 6435.0
else:
raise ValueError(
"radial_integrals_to_U_J: implemented only for l=1,2,3")
return U_int, J_hund
# Angular matrix elements of particle-particle interaction
# (2l+1)^2 ((l 0) (k 0) (l 0))^2 \sum_{q=-k}^{k} (-1)^{m1+m2+q} ((l -m1) (k q) (l m3)) ((l -m2) (k -q) (l m4))
def angular_matrix_element(l, k, m1, m2, m3, m4):
r"""
Calculate the angular matrix element
.. math::
(2l+1)^2
\begin{pmatrix}
l & k & l \\
0 & 0 & 0
\end{pmatrix}^2
\sum_{q=-k}^k (-1)^{m_1+m_2+q}
\begin{pmatrix}
l & k & l \\
-m_1 & q & m_3
\end{pmatrix}
\begin{pmatrix}
l & k & l \\
-m_2 & -q & m_4
\end{pmatrix}.
Parameters
----------
l : integer
k : integer
m1 : integer
m2 : integer
m3 : integer
m4 : integer
Returns
-------
ang_mat_ele : scalar
Angular matrix element.
"""
ang_mat_ele = 0
for q in range(-k, k + 1):
ang_mat_ele += three_j_symbol(
(l, -m1), (k, q), (l, m3)) * three_j_symbol(
(l, -m2), (k, -q), (l, m4)) * (-1.0
if (m1 + q + m2) % 2 else 1.0)
ang_mat_ele *= (2 * l + 1)**2 * (three_j_symbol((l, 0), (k, 0), (l, 0))**2)
return ang_mat_ele
# Wigner 3-j symbols
# ((j1 m1) (j2 m2) (j3 m3))
def three_j_symbol(jm1, jm2, jm3):
r"""
Calculate the three-j symbol
.. math::
\begin{pmatrix}
l_1 & l_2 & l_3\\
m_1 & m_2 & m_3
\end{pmatrix}.
Parameters
----------
jm1 : tuple of integers
(j_1 m_1)
jm2 : tuple of integers
(j_2 m_2)
jm3 : tuple of integers
(j_3 m_3)
Returns
-------
three_j_sym : scalar
Three-j symbol.
"""
j1, m1 = jm1
j2, m2 = jm2
j3, m3 = jm3
if (m1 + m2 + m3 != 0 or m1 < -j1 or m1 > j1 or m2 < -j2 or m2 > j2
or m3 < -j3 or m3 > j3 or j3 > j1 + j2 or j3 < abs(j1 - j2)):
return .0
three_j_sym = -1.0 if (j1 - j2 - m3) % 2 else 1.0
three_j_sym *= sqrt(
fact(j1 + j2 - j3) * fact(j1 - j2 + j3) * fact(-j1 + j2 + j3) /
fact(j1 + j2 + j3 + 1))
three_j_sym *= sqrt(
fact(j1 - m1) * fact(j1 + m1) * fact(j2 - m2) * fact(j2 + m2) *
fact(j3 - m3) * fact(j3 + m3))
t_min = max(j2 - j3 - m1, j1 - j3 + m2, 0)
t_max = min(j1 - m1, j2 + m2, j1 + j2 - j3)
t_sum = 0
for t in range(t_min, t_max + 1):
t_sum += (-1.0 if t % 2 else 1.0) / (
fact(t) * fact(j3 - j2 + m1 + t) * fact(j3 - j1 - m2 + t) *
fact(j1 + j2 - j3 - t) * fact(j1 - m1 - t) * fact(j2 + m2 - t))
three_j_sym *= t_sum
return three_j_sym
# Clebsch-Gordan coefficients
# < j1 m1 j2 m2 | j3 m3 > = (-1)^{j1-j2+m3} \sqrt{2j3+1} ((j1 m1) (j2 m2) (j3 -m3))
def clebsch_gordan(jm1, jm2, jm3):
r"""
Calculate the Clebsh-Gordan coefficient
.. math::
\langle j_1 m_1 j_2 m_2 | j_3 m_3 \rangle = (-1)^{j_1-j_2+m_3} \sqrt{2 j_3 + 1}
\begin{pmatrix}
j_1 & j_2 & j_3\\
m_1 & m_2 & -m_3
\end{pmatrix}.
Parameters
----------
jm1 : tuple of integers
(j_1 m_1)
jm2 : tuple of integers
(j_2 m_2)
jm3 : tuple of integers
(j_3 m_3)
Returns
-------
cgcoeff : scalar
Clebsh-Gordan coefficient.
"""
norm = sqrt(2 * jm3[0] + 1) * (-1 if jm1[0] - jm2[0] + jm3[1] % 2 else 1)
return norm * three_j_symbol(jm1, jm2, (jm3[0], -jm3[1]))
# Create subarray containing columns in idxlist
# e.g. idxlist = [(0),(2,3),(0,1,2,3)] gives
# column 0 for 1st dim,
# columns 2 and 3 for 2nd dim,
# columns 0,1,2 and 3 for 3rd dim.
def subarray(a, idxlist, n=None):
r"""
Extract a subarray from a matrix-like object.
Parameters
----------
a : matrix or array
idxlist : list of tuples
Columns that need to be extracted for each dimension.
Returns
-------
subarray : matrix or array
Examples
--------
idxlist = [(0),(2,3),(0,1,2,3)] gives
- column 0 for 1st dim,
- columns 2 and 3 for 2nd dim,
- columns 0, 1, 2 and 3 for 3rd dim.
"""
if n is None: n = len(a.shape) - 1
sa = a[tuple(slice(x) for x in a.shape[:n]) + (idxlist[n], )]
return subarray(sa, idxlist, n - 1) if n > 0 else sa
| romerogroup/CondensedMatter_Jupyter | code/minimulti/electron/U_matrix.py | U_matrix.py | py | 19,979 | python | en | code | 7 | github-code | 36 |
71940065385 | # Define the Rosenbrock function with parameters a=1 and b=100
def rosenbrock(x, y):
return (x-1)**2 + 10 * (y - x**2)**2
# Set parameters
a = 1
b = 10
alpha = 0.004
u = [-0.75, 0.7]
# Initialize mold with a dummy value
mold = 10**100
mnew = rosenbrock(u[0], u[1])
# Steepest Descent Algorithm
while mnew<mold:
# Compute the current value of the Rosenbrock function
# Update mold
mold = mnew
# Compute the gradient of the Rosenbrock function with respect to u
df_dx = 40*u[0]**3 - 40*u[0]*u[1] +2*u[0] -2
df_dy = 20 * u[1] - 20 * u[0]**2
# Compute the direction vector h
h = [-df_dx, -df_dy]
# Update u using the steepest descent formula
u[0] = u[0] + alpha * h[0]
u[1] = u[1] + alpha * h[1]
mnew = rosenbrock(u[0], u[1])
# The minimum value of the Rosenbrock function is m* = mold, and the optimal u* is u - alpha * h
m_star = mold
u_star = [u[0] - alpha * h[0], u[1] - alpha * h[1]]
print("Minimum Value of Rosenbrock Function:", m_star)
print("Optimal u:", u_star)
| KuffDeSchmull/comp_sci_unilu | table41_ros.py | table41_ros.py | py | 1,031 | python | en | code | 0 | github-code | 36 |
10017544829 | from core.celery import app
from celery import Celery
import json
import subprocess
import os
from .models import Node, Offer
from django.utils import timezone
import tempfile
import redis
from .serializers import NodeSerializer, OfferSerializer
import calendar
import datetime
import requests
from api.serializers import FlatNodeSerializer
from collector.models import Node as NodeV1
pool = redis.ConnectionPool(host='redis', port=6379, db=0)
r = redis.Redis(connection_pool=pool)
@ app.task
def v2_network_online_to_redis():
data = Node.objects.filter(online=True)
serializer = NodeSerializer(data, many=True)
test = json.dumps(serializer.data, default=str)
r.set("v2_online", test)
@app.task
def v2_network_online_to_redis_flatmap():
data = NodeV1.objects.filter(online=True)
serializer = FlatNodeSerializer(data, many=True)
test = json.dumps(serializer.data)
r.set("v2_online_flatmap", test)
@ app.task
def v2_cheapest_offer():
recently = timezone.now() - timezone.timedelta(minutes=5)
data = Offer.objects.filter(runtime="vm",
updated_at__range=(recently, timezone.now())).order_by('-monthly_price_glm')
serializer = OfferSerializer(data, many=True)
sorted_data = json.dumps(serializer.data, default=str)
r.set("v2_cheapest_offer", sorted_data)
@ app.task
def latest_blog_posts():
req = requests.get(
f"https://blog.golemproject.net/ghost/api/v3/content/posts/?key={os.environ.get('BLOG_API_KEY')}&include=tags,authors&limit=3")
data = json.dumps(req.json())
r.set("v2_index_blog_posts", data)
@ app.task
def v2_cheapest_provider():
req = requests.get(
"https://api.coingecko.com/api/v3/coins/ethereum/contract/0x7DD9c5Cba05E151C895FDe1CF355C9A1D5DA6429")
data = req.json()
price = data['market_data']['current_price']['usd']
obj = Offer.objects.filter(
runtime="vm", provider__online=True).order_by("monthly_price_glm")
serializer = OfferSerializer(obj, many=True)
mainnet_providers = []
for index, provider in enumerate(serializer.data):
if "golem.com.payment.platform.erc20-mainnet-glm.address" in provider['properties']:
mainnet_providers.append(provider)
sorted_pricing_and_specs = sorted(mainnet_providers, key=lambda element: (
float(element['properties']['golem.inf.cpu.threads']), float(element['monthly_price_glm'])))
two_cores = [{'name': 'Digital Ocean', 'img': '/do-logo.svg',
'usd_monthly': '15', 'bandwidth': '3', 'cores': 2, 'memory': '1', 'disk': "25", "glm": float(price) * 15}, {'name': 'Amazon Web Services', 'img': '/aws-logo.svg',
'usd_monthly': '15.23', 'bandwidth': 'Unlimited', 'cores': 2, 'memory': '1', 'disk': "25", "glm": float(price) * 15.23}, {'name': 'Google Cloud Platform', 'img': '/gcp-logo.svg',
'usd_monthly': '10.37', 'bandwidth': 'Unlimited', 'cores': 2, 'memory': '1', 'disk': "25", "glm": float(price) * 10.37}, {'name': 'Azure', 'img': '/azure-logo.svg',
'usd_monthly': '15.11', 'bandwidth': '6', 'cores': 2, 'memory': '1', 'disk': "25", "glm": float(price) * 15.11}, ]
eight_cores = [{'name': 'Digital Ocean', 'img': '/do-logo.svg',
'usd_monthly': '80', 'bandwidth': '6', 'cores': 8, 'memory': '16', 'disk': "320", "glm": float(price) * 80}, {'name': 'Amazon Web Services', 'img': '/aws-logo.svg',
'usd_monthly': '121.81', 'bandwidth': 'Unlimited', 'cores': 8, 'memory': '16', 'disk': "320", "glm": float(price) * 121.81}, {'name': 'Google Cloud Platform', 'img': '/gcp-logo.svg',
'usd_monthly': '208.47', 'bandwidth': 'Unlimited', 'cores': 8, 'memory': '32', 'disk': "320", "glm": float(price) * 208.47}, {'name': 'Azure', 'img': '/azure-logo.svg',
'usd_monthly': '121.18', 'cores': 8, 'memory': '16', 'bandwidth': '6', 'disk': "320", "glm": float(price) * 121.18}]
thirtytwo_cores = [{'name': 'Digital Ocean', 'img': '/do-logo.svg',
'usd_monthly': '640', 'bandwidth': '9', 'cores': 32, 'memory': '64', 'disk': "400", "glm": float(price) * 640}, {'name': 'Amazon Web Services', 'img': '/aws-logo.svg',
'usd_monthly': '834.24', 'bandwidth': 'Unlimited', 'cores': 32, 'memory': '64', 'disk': "400", "glm": float(price) * 834.24}, {'name': 'Google Cloud Platform', 'img': '/gcp-logo.svg',
'usd_monthly': '746.04', 'bandwidth': 'Unlimited', 'cores': 32, 'memory': '64', 'disk': "400", "glm": float(price) * 746.04}, {'name': 'Azure', 'img': '/azure-logo.svg',
'usd_monthly': '1310.13', 'bandwidth': '1', 'cores': 32, 'memory': '64', 'disk': "256", "glm": float(price) * 1310.13}, ]
sixtyfour_cores = [{'name': 'Digital Ocean', 'img': '/do-logo.svg',
'usd_monthly': '1200', 'bandwidth': '9', 'cores': 40, 'memory': '160', 'disk': "500", "glm": float(price) * 1200}, {'name': 'Amazon Web Services', 'img': '/aws-logo.svg',
'usd_monthly': '1638.48', 'bandwidth': 'Unlimited', 'cores': 64, 'memory': '64', 'disk': "500", "glm": float(price) * 1638.48}, {'name': 'Google Cloud Platform', 'img': '/gcp-logo.svg',
'usd_monthly': '1914.62', 'bandwidth': 'Unlimited', 'cores': 60, 'memory': '240', 'disk': "500", "glm": float(price) * 1914.62}, {'name': 'Azure', 'img': '/azure-logo.svg',
'usd_monthly': '2688.37', 'bandwidth': '1', 'cores': 64, 'memory': '256', 'disk': "512", "glm": float(price) * 2688.37}, ]
for obj in sorted_pricing_and_specs:
provider = {}
provider['name'] = "Golem Network"
provider['node_id'] = obj['properties']['id']
provider['img'] = "/golem.png"
provider['usd_monthly'] = float(
price) * float(obj['monthly_price_glm'])
provider['cores'] = float(
obj['properties']['golem.inf.cpu.threads'])
provider['memory'] = float(obj['properties']['golem.inf.mem.gib'])
provider['bandwidth'] = "Unlimited"
provider['disk'] = float(
obj['properties']['golem.inf.storage.gib'])
provider['glm'] = float(obj['monthly_price_glm'])
if float(obj['properties']['golem.inf.cpu.threads']) == 2 and len(two_cores) == 4:
two_cores.append(provider)
elif float(obj['properties']['golem.inf.cpu.threads']) >= 2 and len(two_cores) == 4:
two_cores.append(provider)
if float(obj['properties']['golem.inf.cpu.threads']) == 8 and len(eight_cores) == 4:
eight_cores.append(provider)
elif float(obj['properties']['golem.inf.cpu.threads']) >= 8 and len(eight_cores) == 4:
eight_cores.append(provider)
if float(obj['properties']['golem.inf.cpu.threads']) == 32 and len(thirtytwo_cores) == 4:
thirtytwo_cores.append(provider)
elif float(obj['properties']['golem.inf.cpu.threads']) >= 32 and len(thirtytwo_cores) == 4:
thirtytwo_cores.append(provider)
if float(obj['properties']['golem.inf.cpu.threads']) == 64 and len(sixtyfour_cores) == 4:
sixtyfour_cores.append(provider)
elif float(obj['properties']['golem.inf.cpu.threads']) >= 64 and len(sixtyfour_cores) == 4:
sixtyfour_cores.append(provider)
sorted_two = sorted(two_cores, key=lambda element: (
float(element['usd_monthly'])))
sorted_eight = sorted(eight_cores, key=lambda element: (
float(element['usd_monthly'])))
sorted_thirtytwo = sorted(thirtytwo_cores, key=lambda element: (
float(element['usd_monthly'])))
sorted_sixtyfour = sorted(sixtyfour_cores, key=lambda element: (
float(element['usd_monthly'])))
data = json.dumps({'2': sorted_two, '8': sorted_eight,
'32': sorted_thirtytwo, '64': sorted_sixtyfour})
r.set("v2_cheapest_provider", data)
@ app.task
def v2_offer_scraper():
os.chdir("/stats-backend/yapapi/examples/low-level-api/v2")
with open('data.config') as f:
for line in f:
command = line
proc = subprocess.Popen(command, shell=True)
proc.wait()
content = r.get("offers_v2")
serialized = json.loads(content)
now = datetime.datetime.now()
days_in_current_month = calendar.monthrange(
now.year, now.month)[1]
seconds_current_month = days_in_current_month*24*60*60
for line in serialized:
data = json.loads(line)
provider = data['id']
wallet = data['wallet']
obj, created = Node.objects.get_or_create(node_id=provider)
if created:
offerobj = Offer.objects.create(properties=data, provider=obj,
runtime=data['golem.runtime.name'])
if data['golem.runtime.name'] == 'vm':
vectors = {}
for key, value in enumerate(data['golem.com.usage.vector']):
vectors[value] = key
monthly_pricing = (data['golem.com.pricing.model.linear.coeffs'][vectors['golem.usage.duration_sec']] * seconds_current_month) + (
data['golem.com.pricing.model.linear.coeffs'][vectors['golem.usage.cpu_sec']] * seconds_current_month * data['golem.inf.cpu.cores']) + data['golem.com.pricing.model.linear.coeffs'][-1]
offerobj.monthly_price_glm = monthly_pricing
offerobj.save()
obj.wallet = wallet
obj.online = True
obj.save()
else:
offerobj, offercreated = Offer.objects.get_or_create(
provider=obj, runtime=data['golem.runtime.name'])
if data['golem.runtime.name'] == 'vm':
vectors = {}
for key, value in enumerate(data['golem.com.usage.vector']):
vectors[value] = key
monthly_pricing = (data['golem.com.pricing.model.linear.coeffs'][vectors['golem.usage.duration_sec']] * seconds_current_month) + (
data['golem.com.pricing.model.linear.coeffs'][vectors['golem.usage.cpu_sec']] * seconds_current_month * data['golem.inf.cpu.cores']) + data['golem.com.pricing.model.linear.coeffs'][-1]
offerobj.monthly_price_glm = monthly_pricing
offerobj.save()
offerobj.properties = data
offerobj.save()
obj.runtime = data['golem.runtime.name']
obj.wallet = wallet
obj.online = True
obj.save()
# Find offline providers
str1 = ''.join(serialized)
fd, path = tempfile.mkstemp()
try:
with os.fdopen(fd, 'w') as tmp:
# do stuff with temp file
tmp.write(str1)
online_nodes = Node.objects.filter(online=True)
for node in online_nodes:
if not node.node_id in str1:
node.online = False
node.computing_now = False
node.save(update_fields=[
'online', 'computing_now'])
finally:
os.remove(path)
@ app.task
def v2_offer_scraper_hybrid_testnet():
os.chdir("/stats-backend/yapapi/examples/low-level-api/hybrid")
proc = subprocess.Popen(
'export YAGNA_APPKEY=$(yagna app-key list --json | jq -r .[0].key) && python3 list-offers-testnet.py', shell=True)
proc.wait()
content = r.get("v2_offers_hybrid_testnet")
serialized = json.loads(content)
now = datetime.datetime.now()
days_in_current_month = calendar.monthrange(
now.year, now.month)[1]
seconds_current_month = days_in_current_month*24*60*60
nodes_to_create = []
nodes_to_update = []
offers_to_create = []
offer_to_update = []
offline_nodes = set(Node.objects.filter(
online=True).values_list('node_id', flat=True))
for line in serialized:
data = json.loads(line)
provider = data['id']
wallet = data['wallet']
obj, created = Node.objects.get_or_create(node_id=provider)
if created:
if data['golem.runtime.name'] == 'vm':
vectors = {}
for key, value in enumerate(data['golem.com.usage.vector']):
vectors[value] = key
monthly_pricing = (data['golem.com.pricing.model.linear.coeffs'][vectors['golem.usage.duration_sec']] * seconds_current_month) + (
data['golem.com.pricing.model.linear.coeffs'][vectors['golem.usage.cpu_sec']] * seconds_current_month * data['golem.inf.cpu.cores']) + data['golem.com.pricing.model.linear.coeffs'][-1]
offers_to_create.append(
Offer(properties=data, provider=obj, runtime=data['golem.runtime.name'], monthly_price_glm=monthly_pricing))
offers_to_create.append(
Offer(properties=data, provider=obj, runtime=data['golem.runtime.name']))
nodeobj = Node(node_id=provider, wallet=wallet, online=True)
nodes_to_create.append(
nodeobj)
else:
offerobj, offercreated = Offer.objects.get_or_create(
provider=obj, runtime=data['golem.runtime.name'])
if data['golem.runtime.name'] == 'vm':
vectors = {}
for key, value in enumerate(data['golem.com.usage.vector']):
vectors[value] = key
monthly_pricing = (data['golem.com.pricing.model.linear.coeffs'][vectors['golem.usage.duration_sec']] * seconds_current_month) + (
data['golem.com.pricing.model.linear.coeffs'][vectors['golem.usage.cpu_sec']] * seconds_current_month * data['golem.inf.cpu.cores']) + data['golem.com.pricing.model.linear.coeffs'][-1]
offerobj.monthly_price_glm = monthly_pricing
offerobj.properties = data
offerobj.runtime = data['golem.runtime.name']
if offercreated:
offers_to_create.append(offerobj)
else:
offer_to_update.append(offerobj)
obj.wallet = wallet
obj.online = True
obj.updated_at = timezone.now()
nodes_to_update.append(obj)
if provider in offline_nodes:
offline_nodes.remove(provider)
Node.objects.bulk_create(nodes_to_create)
Node.objects.bulk_update(nodes_to_update, fields=[
'wallet', 'online', 'updated_at', ])
Offer.objects.bulk_create(offers_to_create)
Offer.objects.bulk_update(offer_to_update, fields=[
'properties', 'monthly_price_glm'])
# mark offline nodes as offline
Node.objects.filter(node_id__in=offline_nodes, online=True).update(
online=False, computing_now=False, updated_at=timezone.now())
| golemfactory/golem-stats-backend | stats-backend/api2/tasks.py | tasks.py | py | 17,630 | python | en | code | 1 | github-code | 36 |
20101978227 | import cache
import timer
import lora
import websockets
import asyncio
import threading
import keyboard
import timestamp
def thread_function():
while (not cache.stop_thread):
timer.countdown(cache.t)
def thread_function_killer():
while (not cache.stop_thread):
record = keyboard.record(until = 'q')
record = str(record[0])
if record == "KeyboardEvent(q down)" or record == "KeyboardEvent(q up)":
print('\nYou terminated the program!\nGoodbye\n.')
cache.stop_thread = True
break
def thread_function_bdreg():
while (not cache.stop_thread):
if cache.time_thread == True:
time = timestamp.timestamp()
cache_len = len(cache.cache)
if cache_len > 0:
print("\n[CACHE FLUSH]")
lora.cacheClear(time)
else:
print("\n[CACHE FLUSH EMPTY]")
cache.marcos_fluxos.append(time)
cache.numero_fluxos += 1
cache.time_thread = False
#if cache.expire_thread == True:
# time = timestamp.timestamp()
# cache_len = len(cache.cache)
# if cache_len > 0:
# lora.reg_expire(time)
# cache.expire_thread = False
if len(cache.cache) >= cache.cache_max_size :
time = timestamp.timestamp()
cache_len = len(cache.cache)
if cache_len > 0:
print("\n[CACHE SIZE FLUSH]")
lora.cacheClear(time)
print("\n[CACHE SIZE FLUSH EMPTY]")
cache.marcos_fluxos.append(time)
cache.numero_fluxos += 1
return
def main():
x = threading.Thread(target=thread_function)
#y = threading.Thread(target=thread_function_killer)
z = threading.Thread(target = thread_function_bdreg)
#y.start()
z.start()
x.start()
#Define uma função assíncrona que se conecta ao seridor e lida com as informações que chegam.
async def listen():
#Conecta ao servidor.
async with websockets.connect(cache.url, ping_interval=None) as ws:
await ws.send("") #conexão de testes
#Faz com que a execução seja contínua e que se escute todas as mensagens que chegam.
while (not cache.stop_thread):
print("Listening")
if cache.stop_thread == True:
return
msg = await ws.recv()
#Verifica se é uma mensagem de erro. Caso não seja, criar fluxo e armazena na cache.
fluxo = lora.createFlow(msg)
if fluxo != None:
cache.cache.append(fluxo)
print("\nFLOW UPDATED TO CACHE:\n---------------------------")
print(f"\n Último fluxo em: {cache.marcos_fluxos} | Fluxos registrados: {cache.numero_fluxos} | Mensagens totais: {cache.numero_mensagens}\n")
#It will run the function "listen()" and it will wait until it is completed.
#We need a connection wich is asyn, and we need a received message wich is also async.
asyncio.get_event_loop().run_until_complete(listen())
if __name__ == "__main__":
main() | juliogcm/lorawan-flow | websocket_client/client.py | client.py | py | 3,331 | python | en | code | 0 | github-code | 36 |
74001078184 | from permute import recursive
# @recursive
def test_single(depth: int) -> int:
if depth == 10:
return 0
return test_single(depth+1) + 1
def sum_results(results: list[int]) -> int:
result: int = 0
for r in results:
result += r
return result
@recursive
def test_branching(depth: int) -> int:
if depth == 7:
return 1
results: list[int] = []
for _ in range(2):
results.append(test_branching(depth+1))
return sum_results(results)
| LQR471814/permute | example.py | example.py | py | 501 | python | en | code | 0 | github-code | 36 |
43104980518 | '''
Minimum Multiple
Given a collection C1 of ‘n’ positive integers and a number ‘m’ write a C program to find the minimum multiple of m in C1. If no such multiple exist in C1 then print ‘No multiple found’
For example, if there are seven elements 23, 24, 25, 12, 6, 7, 11 and m is 3 then the output should be 6.
Input Format
First line contains the number of elements in the collection C1, n
Next ‘n’ lines contain the elements in the collection C1
Next line contains the value of m
Output Format
Print the minimum multiple of ‘m’ present in C1 or ‘No multiple found’
print("h")
Input
7
23
24
25
12
6
7
11
3
Expected output
6
Your Program Output
6
H
'''
#input format
n=int(input())
lst=[]
for i in range(n):
ele=int(input())
lst.append(ele)
check=int(input())
#driver code
lst.sort()
count=0
for i in lst:
if(i%check==0):
print(i)
count+=1
break
if(count==0):
print("No multiple found")
| Sasank123k/problem-of-the-day | 2021-12-18.py | 2021-12-18.py | py | 965 | python | en | code | 0 | github-code | 36 |
34234807737 |
class Employee:
raise_amount = 1.04
num_emps = 0
def __init__(self, first, last, pay):
self.first = first
self.last = last
self.pay = pay
self.email = first + '.' + last + '@company.com'
Employee.num_emps += 1
def fullname(self):
return f"{self.first} {self.last}"
def apply_raise(self):
self.pay = int(self.pay * self.raise_amount)
@classmethod
def set_raise_amount(cls, amount):
cls.raise_amount = amount
@classmethod
def from_string(cls, emp_str):
first, last, pay = emp_str.split('-')
return cls(first, last, pay)
"""Regular methods pass the instance, 'self', as the first argument. Class methods pass
the class, 'cls', as the first argument. Static methods do not pass anything
automatically. They behave like regular functions but add logical connection to the class."""
"""Static methods have a limited use case, as they cannot access the properties
of classes themselves. They are used if you need a utility function that
doesn't access class properties but still needs to belong to the class."""
"""If the instance or class is not used anywhere within a function, it
is a static method. Therefore, you should use a static method if the code
is not dependant on instance creation and does not use any instance variable."""
"""Create a function that takes a date and checks if it is a workday.
In Python, days have indexes 0-6 for Mon-Sun."""
@staticmethod
def is_workday(day):
if day.weekday() == 5 or day.weekday() == 6: #checks if weekend
return False
else:
return True
emp_1 = Employee("Corey", "Schafer", 50000)
emp_2 = Employee("Test", "User", 60000)
import datetime
my_date = datetime.date(2016, 7, 10) #prints false as it is a Sunday
print(Employee.is_workday(my_date))
| latiful-hassan/OOP | staticmethods.py | staticmethods.py | py | 1,980 | python | en | code | 0 | github-code | 36 |
41230462018 | import numpy as np
import matplotlib.pyplot as plt
from pathlib import Path
from SnapOptimizer.optimization.snap_optimizer import SNAPGateOptimizer2Qubits, SNAPGateOptimizerStatePreparation
from SnapOptimizer.optimization.snap_pulse_optimizer import SNAPPulseOptimizer
import SnapOptimizer.qubit_gates as qubit_gates
from SnapOptimizer.visualize import show_state
from SnapOptimizer.encodings import Encoding
import SnapOptimizer.paths as local_paths
def optimize_SNAP_gates(
encoding: Encoding, gates: list[str], n_gates: list[int], Ns: list[int], epochs: int, output_folder: Path,
show_figure: bool = False, c: float = 0.001, averages: int = 1
):
"""
Automation for the optimization of SNAP gates
Args:
encoding: The encoding to use
gates: The gates to optimize
n_gates: How many SNAP gates to use to replicate the gate
Ns: Size of the Hilbert space in the fock basis
epochs: How many epochs to run the optimization for
output_folder: folder to save the results to
show_figure: If True the figures will pop up on screen when they are drawn. Otherwise they will only be saved
c: Parameter to control the weight of the thetas in the optimization
averages: Run the same optimization multiple times
"""
fidelity_fig, fidelity_ax = plt.subplots(1, figsize=(8, 8))
for N in Ns:
code = encoding.get_encoding(N)
snap_op = SNAPGateOptimizer2Qubits(code, c=c)
for gate_name in gates:
gate = getattr(qubit_gates, gate_name.upper(), None)
if gate is None:
print(f"The gate {gate_name} is not defined. Check your spelling and try again")
continue
for n in n_gates:
for i in range(averages):
if averages == 1:
save_to = output_folder / f"{gate_name}-{n}-gates-{N}-fockstates"
else:
save_to = output_folder / f"{gate_name}-{n}-gates-{N}-fockstates_{i+1}"
alphas, thetas, _, fidelities = snap_op.optimize_gates(gate, n, epochs, output_folder=save_to)
# Generate figures
fidelity_ax.plot(range(epochs), fidelities, label=f"{gate_name} {n} gates {N} fock")
fig, axs = plt.subplots(2, 4, figsize=(16, 8))
for i, qubit_state in enumerate(['L00', 'L01', 'L10', 'L11']):
logic_state = getattr(snap_op, qubit_state)
evolved_state = snap_op.snap_gate(alphas, thetas, logic_state)
expected_state = snap_op.transform_gate(gate) @ logic_state
show_state(evolved_state, ax=axs[0][i], title=f"Gate on {qubit_state}")
show_state(expected_state, ax=axs[1][i], title="")
fig.savefig(save_to / 'wigner_plots.png', dpi=150)
if show_figure:
plt.show()
plt.close(fig)
# Figure over the fidelities
fidelity_ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
fidelity_fig.tight_layout()
fidelity_ax.set_ylabel("Fidelity")
fidelity_ax.set_xlabel("Epoch")
fidelity_ax.set_ylim([0, 1.1])
filename = output_folder / 'fidelity_plot.png'
# Handle the possibility of the image already existing. (Multiple processes running the optimization)
counter = 1
while filename.exists():
filename = output_folder / f'fidelity_plot_{counter}.png'
counter += 1
fidelity_fig.savefig(filename, dpi=150)
if show_figure:
plt.show()
def optimize_SNAP_gates_for_state_preparation(state: np.ndarray, n: int, N: int, epochs: int = 2000, output_folder: Path = None):
op = SNAPGateOptimizerStatePreparation(N=N)
ground_state = np.zeros((N, 1))
ground_state[0, 0] = 1
alphas, thetas, cost, fidelities = op.optimize_gates(state, n_gates=n, epochs=epochs, output_folder=output_folder)
_, (ax1, ax2) = plt.subplots(1, 2)
fig, ax = plt.subplots(1)
evolved_state = op.snap_gate(alphas, thetas, ground_state)
show_state(evolved_state, ax=ax1, title=f"Evolved ground state")
show_state(state, ax=ax2, title="Target state")
ax.plot(range(len(fidelities)), fidelities)
plt.show()
def optimize_SNAP_pulses(alphas: np.ndarray, thetas: np.ndarray, output_folder: Path = None):
op = SNAPPulseOptimizer(
dim_c = thetas.shape[-1],
dim_t = 2,
delta = -2.574749e6,
xi = -2*np.pi* 2.217306e6,
xip = -2*np.pi* 0.013763e6,
K = -2*np.pi* 0.002692e6,
alpha = 0,
wt = 0,
wc = 0,
max_rabi_rate = 2*np.pi* 20e6,
cutoff_frequency = 2*np.pi* 30e6,
num_drives = 1
)
op.optimize_gate_pulses(thetas, alphas, 0.7e-6, output_folder=output_folder)
if __name__ == '__main__':
# input_folder = local_paths.data('test_state_20')
# output_folder = local_paths.pulses('test_state_20')
# thetas = np.loadtxt(input_folder / 'thetas.csv', delimiter=',')
# alphas = np.loadtxt(input_folder / 'alphas.csv', delimiter=',')
# optimize_SNAP_pulses(alphas=alphas, thetas=thetas)
output_folder = local_paths.data('fock_1')
N = 12
n = 1
epochs = 3000
fock1 = np.zeros((12, 1))
fock1[1,:] = 1
print(fock1)
optimize_SNAP_gates_for_state_preparation(fock1, n, N, epochs=epochs, output_folder=output_folder)
| Paulsson99/SnapOptimizer | SnapOptimizer/optimization/automation.py | automation.py | py | 5,537 | python | en | code | 0 | github-code | 36 |
41539002397 | #!/usr/bin/python
# Imports
### System
import os
import argparse
### Python
import numpy as np
import cv2 as cv
from tqdm.auto import tqdm
import matplotlib.pyplot as plt
### Pytorch
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
### TODO: Use WandB, Pytorch Lightning & torchsummary
import wandb
import lightning.pytorch as pl
from lightning.pytorch import LightningDataModule, LightningModule, Trainer, seed_everything
from lightning.pytorch import Callback
from lightning.pytorch.callbacks import DeviceStatsMonitor, TQDMProgressBar, ModelCheckpoint, EarlyStopping, LearningRateMonitor
from lightning.pytorch.loggers import WandbLogger
from torchsummary import summary
### Timm
import timm
import timm.optim
### Custom
from custom_dataloader import get_dataloaders
# DONE_TODO 1: Import pytorch lightning, wandb(logging), timm and use it to load a pretrained model
# Done_TODO 2: Modify the model's classifier to output 3 classes instead of X (defined by the model)
# Done_TODO 3: Train the model + Logging & Saving best ckpt for 10 epochs and report test accuracy
def get_args():
args = argparse.ArgumentParser(description='Transfer Learning')
args.add_argument('--model', '-m', type=str, default='vgg16', required=True, help='Model to use [vgg16, vgg19, resnet50, resnet101, effb4, effb5]')
args.add_argument('--epochs', '-e', type=int, default=10, help='Number of epochs to train for')
args.add_argument('--batch_size', type=int, default=32, help='Batch size')
args.add_argument('--device', '-d', type=str, default='cuda', required=True, help='Device to use [cpu, cuda:0, cuda:1, cuda]')
args.add_argument('--mode', '-md', type=str, default='train', help='Mode to run: [train, trainX, test]. train = finetune only classifier layer. trainX = finetune last few layers including the classifier. test = test the model')
args.add_argument('--ckpt_path', '-cp', type=str, default="", help='Path to checkpoint to load')
args.add_argument('--lr', '-lr', type=float, default=1e-3, help='Learning rate')
args.add_argument('--num_workers', '-nw', type=int, default=8, help='Number of workers for dataloader')
args.add_argument('--exp_name', '-en', type=str, default='generic_exp', help='Experiment name for wandb')
args.add_argument('--use_cam', action='store_true', help='Use Class Activation Maps Loss')
# args.print_help()
return args.parse_args()
def get_model(modelName):
# VGG Fam
if modelName == 'vgg16':
model = timm.create_model('vgg16', pretrained=True)
elif modelName == 'vgg19':
model = timm.create_model('vgg19', pretrained=True)
# Res Fam: Using catavgmax pooling to increase number of features
elif modelName == 'resnet50':
model = timm.create_model('resnet50', pretrained=True)
elif modelName == 'resnet101':
model = timm.create_model('resnet101', pretrained=True)
# EfficientNet Fam: Using catavgmax pooling here as well
elif modelName == 'effb4':
model = timm.create_model('efficientnet_b4', pretrained=True)
elif modelName == 'effb5':
model = timm.create_model('efficientnet_b5', pretrained=True)
return model
def check_args(args):
if args.model not in ['vgg16', 'vgg19', 'resnet50', 'resnet101', 'effb4', 'effb5']:
raise ValueError('[!] Invalid model')
if 'cuda' in args.device and not torch.cuda.is_available():
raise ValueError('[!] Cuda not available')
if 'cuda:' in args.device:
if int(args.device[-1]) >= torch.cuda.device_count():
raise ValueError('[!] Invalid cuda device. You have lesser cuda devices than the one you specified')
class LIT_TL(pl.LightningModule):
def __init__(self, model, modelName = "brrr", config: dict = None):
super().__init__()
self.save_hyperparameters(ignore=['model'])
self.modelName = modelName
self.config = config
if "vgg" in modelName:
self.num_filters = model.head.fc.in_features
elif "eff" in modelName:
self.num_filters = model.classifier.in_features
else:
self.num_filters = model.fc.in_features
layers = list(model.children())[:-1]
self.feature_extractor = nn.Sequential(*layers)
num_target_classes = config['classes']
if "vgg" in modelName: # custom classifier head for vggs ;)
self.classifier = nn.Sequential(*[
model.head.global_pool,
model.head.drop,
nn.Linear(in_features=4096, out_features=3, bias=True),
model.head.flatten,
])
else:
self.classifier = nn.Linear(self.num_filters, num_target_classes)
self.classifier.apply(self.init_xavier)
self.ce_loss = nn.CrossEntropyLoss()
# TODO: Apply Class-Activation-Maps Loss and visualize it
self.use_cam = config['use_cam']
if self.use_cam:
self.gap_fc = nn.utils.spectral_norm(nn.Linear(self.num_filters, 1, bias=False))
self.gmp_fc = nn.utils.spectral_norm(nn.Linear(self.num_filters, 1, bias=False))
self.conv1x1 = nn.Conv2d(self.num_filters * 2, self.num_filters, kernel_size=1, stride=1, bias=True)
self.conv_classifier = nn.utils.spectral_norm(
nn.Conv2d(self.num_filters, 1, kernel_size=4, stride=1, padding=0, bias=False))
self.cam_loss = nn.CrossEntropyLoss()
def init_xavier(self, m):
if type(m) == nn.Linear:
torch.nn.init.xavier_uniform_(m.weight)
m.bias.data.fill_(0.01)
def calculate_accuracy(self, yhat, y):
return 100. * torch.sum(yhat == y) / len(y)
def forward(self, x):
self.feature_extractor.eval()
with torch.no_grad():
rep = self.feature_extractor(x)
if self.use_cam:
gap = torch.nn.functional.adaptive_avg_pool2d(rep, 1)
gap_logit = self.gap_fc(gap.view(rep.shape[0], -1))
gap_weight = list(self.gap_fc.parameters())[0]
gap = rep * gap_weight.unsqueeze(2).unsqueeze(3)
gmp = torch.nn.functional.adaptive_max_pool2d(rep, 1)
gmp_logit = self.gmp_fc(gmp.view(rep.shape[0], -1))
gmp_weight = list(self.gmp_fc.parameters())[0]
gmp = rep * gmp_weight.unsqueeze(2).unsqueeze(3)
c_logit = torch.cat([gap_logit, gmp_logit], 1)
rep = torch.cat([gap, gmp], 1)
rep = self.leaky_relu(self.conv1x1(rep))
heatmap = torch.sum(rep, dim=1, keepdim=True)
rep = self.pad(rep)
out = self.conv_classifier(rep)
return out, c_logit, heatmap
else:
if not "vgg" in self.modelName:
rep = rep.flatten(1)
out = self.classifier(rep)
return out
def configure_optimizers(self):
opt = timm.optim.AdamW((param for param in self.classifier.parameters() if param.requires_grad),
lr=self.config['lr'],
weight_decay=self.config['decay'])
scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(opt,
T_0=self.config['T_0'],
eta_min=self.config['eta_min'])
return [opt], [scheduler]
def training_step(self, batch, batch_idx):
img = batch['image'].to(self.device)
# mask = batch['mask'] # Not Needed in classification setting
y = batch['class_label'].to(self.device)
if self.use_cam:
y_hat, logits, heatmap = self.forward(img)
if 1 + batch_idx % 100 == 0:
plt.savefig(f"{self.config['model']}_{batch_idx}.png", heatmap.squeeze(0).cpu().numpy())
cam_loss = self.cam_loss(logits, y)
loss = self.ce_loss(y_hat, y) + cam_loss
else:
y_hat = self.forward(img)
loss = self.ce_loss(y_hat, y)
preds = torch.argmax(y_hat, dim=1)
# print(f"y: {y}, y_hat: {y_hat}")
# DONE_TODO: Log train metrics here
train_step_acc = self.calculate_accuracy(preds, y) # DONE_TODO: Calculate train accuracy here
self.log("train_acc", train_step_acc, on_step=True, on_epoch=True, prog_bar=True, logger=True)
return loss
def validation_step(self, batch, batch_idx):
img = batch['image'].to(self.device)
# mask = batch['mask'] # Not Needed in classification setting
y = batch['class_label'].to(self.device)
# print(f"{i+1} | {img.shape} | {y}")
y_hat = self.forward(img)
preds = torch.argmax(y_hat, dim=1)
test_acc = self.calculate_accuracy(preds, y)
self.log("test_acc", test_acc, on_step=True, on_epoch=True, prog_bar=True, logger=True)
# DONE_TODO: Add images to the logger for one batch
if (batch_idx + 1) % 10 == 0:
id2cat = {'0': 'authentic', '1': 'copy-moved', '2': 'spliced'}
caption_strs = []
for i in range(len(img)):
correct = "Misclassified" if preds[i] != y[i] else "Correct"
caption_strs.append(f"Pred: {id2cat[str(preds[i].item())]}, Label: {id2cat[str(y[i].item())]} | {correct}")
self.logger.log_image(
key=f"Validation Batch: {batch_idx + 1}",
images=[img[i] for i in range(len(img))],
caption=caption_strs,
)
def test_step(self, batch, batch_idx):
# NOTE: Same as validation loop minus the image logging
# No image logging so that export can be done easily
img = batch['image'].to(self.device)
# mask = batch['mask'] # Not Needed in classification setting
y = batch['class_label'].to(self.device)
# print(f"{i+1} | {img.shape} | {y}")
y_hat = self.forward(img)
preds = torch.argmax(y_hat, dim=1)
test_acc = self.calculate_accuracy(preds, y)
self.log("test_acc", test_acc, on_step=True, on_epoch=True, prog_bar=True, logger=True)
def get_config(args):
config = {
'seed': 42,
'model': args.model,
'mode': args.mode,
'lr': args.lr,
'batch_size': args.batch_size,
'epochs': args.epochs,
'device': args.device,
'num_workers': args.num_workers,
'T_0': 50,
'eta_min': 6e-4,
'classes': 3,
'decay': 1e-3,
'exp_name': args.exp_name,
'use_cam': args.use_cam
}
return config
if __name__ == '__main__':
args = get_args()
# print("Total devices:", torch.cuda.device_count())
check_args(args) # will also set args.device properly
config = get_config(args)
seed_everything(42)
if torch.cuda.is_available():
device_name = torch.cuda.get_device_name(args.device)
else:
device_name = 'cpu'
print("--------------------------------------------\nSelected device:", device_name,"\n--------------------------------------------")
print(f"[+] Model Selected: {config['model']}")
model = get_model(config['model'])
lit_model = LIT_TL(model, config['model'], config)
train_dataloader, test_dataloader = get_dataloaders(batch_size=config['batch_size'],
num_workers=config['num_workers'])
# Callbacks
checkpoint_callback = ModelCheckpoint(monitor="test_acc",
mode="max",
save_top_k=1,
dirpath="checkpoints/",
filename=f"{config['model']}" + "_{test_acc:.3f}")
lr_monitor = LearningRateMonitor(logging_interval='step', log_momentum=True)
# early_stop_callback = EarlyStopping(monitor="loss", patience=99)
wandb.login()
if config['exp_name'] == 'generic_exp':
fnam = f"{config['model']}_GE"
else:
fnam = config['exp_name']
wandb_logger = WandbLogger(project='forgery_detection',
name=f"TL_{fnam}",
config=config,
job_type='finetuning',
log_model="all")
# call trainer
trainer = Trainer(fast_dev_run=False,
inference_mode=False, # to enable grad enabling during inference
max_epochs=config['epochs'],
accelerator="gpu" if "cuda" in config['device'] else "cpu",
devices=[int(config['device'].split(":")[-1])], # GPU ID that you selected
precision="16", # automatic mixed precision training
deterministic=True,
enable_checkpointing=True,
callbacks=[checkpoint_callback, lr_monitor],
gradient_clip_val=None,
log_every_n_steps=50,
logger=wandb_logger, # The absolute best: wandb <3
enable_progress_bar=True)
# fit model
if config['mode'] == 'train' or config['mode'] == 'trainX': # TODO: Implement trainX mode
trainer.fit(lit_model, train_dataloader, test_dataloader)
else:
# DONE_TODO: Load last checkpoint and test
if not os.exists(args.ckpt_path):
args.ckpt_path = checkpoint_callback.best_model_path
lit_model = LIT_TL.load_from_checkpoint(args.ckpt_path)
lit_model.freeze()
trainer.test(lit_model, test_dataloader)
wandb.finish()
| Aryan-Garg/Image-Forgery-Detection | transfer_learning.py | transfer_learning.py | py | 13,826 | python | en | code | 0 | github-code | 36 |
27029688469 | import torch
import numpy as np
import torch.nn as nn
class PositionalEncoding1D(nn.Module):
def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None):
"""
:param channels: The last dimension of the tensor you want to apply pos emb to.
"""
super().__init__()
self.channels = num_pos_feats
dim_t = torch.arange(0, self.channels, 2).float()
if scale is not None and normalize is False:
raise ValueError("normalize should be True if scale is passed")
if scale is None:
scale = 2 * np.pi
self.scale = scale
self.normalize = normalize
inv_freq = 1. / (temperature ** (dim_t / self.channels))
self.register_buffer('inv_freq', inv_freq)
def forward(self, tensor):
"""
:param tensor: A 2d tensor of size (len, c)
:return: Positional Encoding Matrix of size (len, c)
"""
if tensor.ndim != 2:
raise RuntimeError("The input tensor has to be 2D!")
x, orig_ch = tensor.shape
pos_x = torch.arange(
1, x + 1, device=tensor.device).type(self.inv_freq.type())
if self.normalize:
eps = 1e-6
pos_x = pos_x / (pos_x[-1:] + eps) * self.scale
sin_inp_x = torch.einsum("i,j->ij", pos_x, self.inv_freq)
emb_x = torch.cat((sin_inp_x.sin(), sin_inp_x.cos()), dim=-1)
emb = torch.zeros((x, self.channels),
device=tensor.device).type(tensor.type())
emb[:, :self.channels] = emb_x
return emb[:, :orig_ch]
class PositionalEncoding2D(nn.Module):
"""
This is a more standard version of the position embedding, very similar to the one
used by the Attention is all you need paper, generalized to work on images.
"""
def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None):
super().__init__()
self.num_pos_feats = num_pos_feats
self.temperature = temperature
self.normalize = normalize
if scale is not None and normalize is False:
raise ValueError("normalize should be True if scale is passed")
if scale is None:
scale = 2 * np.pi
self.scale = scale
def forward(self, tensors):
x = tensors.tensors
mask = tensors.mask
assert mask is not None
not_mask = ~mask
y_embed = not_mask.cumsum(1, dtype=torch.float32)
x_embed = not_mask.cumsum(2, dtype=torch.float32)
if self.normalize:
eps = 1e-6
y_embed = (y_embed - 0.5) / (y_embed[:, -1:, :] + eps) * self.scale
x_embed = (x_embed - 0.5) / (x_embed[:, :, -1:] + eps) * self.scale
dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
dim_t = self.temperature ** (2 * torch.div(dim_t, 2, rounding_mode='trunc') / self.num_pos_feats)
pos_x = x_embed[:, :, :, None] / dim_t
pos_y = y_embed[:, :, :, None] / dim_t
pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
return pos
| ViTAE-Transformer/DeepSolo | adet/layers/pos_encoding.py | pos_encoding.py | py | 3,339 | python | en | code | 177 | github-code | 36 |
20760678532 | from re import T
from django.db import models
from machine.computations.examples import victorious_payment
from django.contrib.postgres.fields import ArrayField
# Create your models here.
class Machine(models.Model):
def empty_list():
return list()
# identificar la maquina que estamos usando
name = models.CharField(max_length=200, blank=True)
payments = models.JSONField()
free_spins = ArrayField(
models.IntegerField(blank=True),
size=5,
default=empty_list
)
normal_reel = ArrayField(
models.CharField(max_length=200, blank=True),
size=5,
default=empty_list
)
bonus_reel = ArrayField(
models.CharField(max_length=200, blank=True),
size=5,
default=empty_list
)
visible = ArrayField(
models.IntegerField(blank=True),
size=5,
default=empty_list
)
multiplier = models.IntegerField(default=3)
# corregir:
# ver si se puede mejorar
roi = models.FloatField(default=0)
def payment(self, roll):
return victorious_payment(self, roll)
def save(self, *args, **kwargs):
self.roi += 0.01
super(Machine, self).save(*args, **kwargs)
def __str__(self) -> str:
return self.name
| montenegrop/casinoGames | machine/models.py | models.py | py | 1,286 | python | en | code | 0 | github-code | 36 |
15760194327 | import glob
import os
import sys
try:
sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % (
sys.version_info.major,
sys.version_info.minor,
'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])
except IndexError:
pass
import carla
import random
import time
import numpy as np
import cv2
IM_WIDTH = 640
IM_HEIGHT = 480
def process_img(image, name):
print("Frame: "+str(image.frame)+", timestamp: "+str(image.timestamp))
i = np.array(image.raw_data)
i2 = i.reshape((IM_HEIGHT, IM_WIDTH, 4))
i3 = i2[:, :, :3]
cv2.imshow(name, i3)
cv2.waitKey(1)
if image.frame % 20 == 0:
image.save_to_disk('_out/%06d.png' % image.frame)
return i3/255.0
actor_list = []
try:
# 0. Set the cilent and the world
client = carla.Client('localhost', 2000) # https://carla.readthedocs.io/en/latest/core_world/#client-creation
client.set_timeout(10)
world = client.get_world()
# 1. Choose blueprint for the vehicle
blueprint_library = world.get_blueprint_library() # https://carla.readthedocs.io/en/latest/core_actors/#blueprints
vehicle = blueprint_library.find('vehicle.tesla.model3') # vehicle_bp = blueprint_library.filter('model3')[0]
vehicle.set_attribute('color', '255,0,0')
print(vehicle)
# 2. Choose spawn point
# manually
# spawn_point = carla.Transform(carla.Location(x=, y=, z=),
# carla.Rotation(pitch=, yaw=, roll=))
# automatically
spawn_point_vehicle = random.choice(world.get_map().get_spawn_points())
print(spawn_point_vehicle)
# 3. Spawn the vehicles
# spawn the actor
actor_vehicle = world.spawn_actor(vehicle, spawn_point_vehicle)
# set control mode. https://carla.readthedocs.io/en/latest/python_api/#carla.Vehicle
# vehicle.apply_control(carla.VehicleControl(throttle=0.1, steer=0.0))
actor_vehicle.set_autopilot(True) # if you just wanted some NPCs to drive.
# append to the actor_list
actor_list.append(actor_vehicle)
# 4. Get the blueprint for this sensor: https://carla.readthedocs.io/en/latest/core_sensors/
sensor = blueprint_library.find('sensor.camera.rgb')
# Change the dimensions of the image
sensor.set_attribute('image_size_x', f'{IM_WIDTH}')
sensor.set_attribute('image_size_y', f'{IM_HEIGHT}')
sensor.set_attribute('fov', '110')
# 5. Adjust sensor relative to vehicle
# choose the relative spawn point
spawn_point_sensor = carla.Transform(carla.Location(x=2.5, z=1.0), carla.Rotation(pitch=-15))
print(spawn_point_sensor)
# spawn the sensor and attach to vehicle.
actor_sensor = world.spawn_actor(sensor, spawn_point_sensor, attach_to=actor_vehicle)
# add sensor to list of actors
actor_list.append(actor_sensor)
# 6. Process the collected images: https://carla.readthedocs.io/en/latest/core_sensors/#listening
# Use the data collected by the sensor. The lambda function can be customized
actor_sensor.listen(lambda data: process_img(data, "camera1"))
# actor_sensor.listen(lambda image: image.save_to_disk('output/%06d.png' % image.frame))
finally:
print('destroying actors')
for actor in actor_list:
actor.destroy()
print('done.') | hchoi256/carla-research-project | Learning_Tasks/LT1/vehicle_camera.py | vehicle_camera.py | py | 3,348 | python | en | code | 1 | github-code | 36 |
21185857238 | import pygame
from time import time
import os
## OPTIONS LIEES A L'AFFICHAGE
screen_width, screen_height = 1280, 720 # taille de la fenetre
show_interface = False # afficher le classement
ticks_per_second = 60 # nombre de mise à jour par seconde
empty_ground_color = (210, 210, 210) # couleur d'une terre inoccupée
background_color = (73, 200, 255) # couleur du vide
port_color = (13, 143, 185)
map_height = 650 # taille de la carte en pixel
## PRECHARGEMENTS
pygame.font.init()
font = pygame.font.Font("img/Proxima Nova Font.otf", 30)
small_font = pygame.font.Font("img/Proxima Nova Font.otf", 20)
flag_img = pygame.image.load("img/flag.png")
disabled_flag_img = pygame.image.load("img/disabled_flag.png")
folder_name = "result/" + str(int(time())) + "/"
os.mkdir(folder_name)
## OPTIONS LIEES AUX MONTAGES VIDEOS
record_games = True # enregistrement des vidéos
edit_when_finished = True # faire le montage une fois la vidéo terminée
min_x, max_x, min_y, max_y = 99999, -1, 99999, -1
framerate = 30 # nombre d'image par seconde
duration = 60 # durée de la partie, en secondes
result_duration = 5 # durée d'affichage du gagnant, en secondes
width, height = 1080, 1920 # taille de la vidéo
source_width, source_height = 1280, 720 # taille des images d'origine
top_text = "Bataille de terrain" # texte affiché en haut de la vidéo
bottom_text = ["Abonnez vous et", "commentez votre", "département pour", "recevoir un boost !"] # text affiché au bas de la vidéo | JonathanOll/Pixel-War-Simulator | options.py | options.py | py | 1,495 | python | fr | code | 0 | github-code | 36 |
28508158291 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('home', '0019_auto_20150312_1008'),
]
operations = [
migrations.CreateModel(
name='LinkCategory',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, verbose_name='ID', auto_created=True)),
('name', models.CharField(max_length=200)),
('description', models.TextField(default=None, blank=True, null=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='WebLink',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, verbose_name='ID', auto_created=True)),
('name', models.CharField(max_length=200)),
('url', models.URLField()),
('category', models.ForeignKey(to='home.LinkCategory')),
],
options={
},
bases=(models.Model,),
),
]
| micahlagrange/rmlsa.com | rmlsa/home/migrations/0020_linkcategory_weblink.py | 0020_linkcategory_weblink.py | py | 1,164 | python | en | code | 0 | github-code | 36 |
75086618662 | from dronekit import connect,LocationGlobalRelative,APIException,VehicleMode
import time
import socket
import math
import cv2, imutils, socket
import numpy as np
import base64
import cv2, imutils
import numpy as np
import base64
import torch
def arm_and_takeoff(aTargetAltitude):
"""
Arms vehicle and fly to aTargetAltitude.
"""
print("Basic pre-arm checks")
# Don't let the user try to arm until autopilot is ready
while not vehicle.is_armable:
print(" Waiting for vehicle to initialise...")
time.sleep(1)
print("Arming motors")
# Copter should arm in GUIDED mode
vehicle.mode = VehicleMode("GUIDED")
vehicle.armed = True
while not vehicle.armed:
print(" Waiting for arming...")
time.sleep(1)
print("Taking off!")
vehicle.simple_takeoff(aTargetAltitude) # Take off to target altitude
# Wait until the vehicle reaches a safe height before processing the goto (otherwise the command
# after Vehicle.simple_takeoff will execute immediately).
while True:
print(" Altitude: ", vehicle.location.global_relative_frame.alt)
if vehicle.location.global_relative_frame.alt>=aTargetAltitude*0.95: #Trigger just below target alt.
print("Reached target altitude")
break
time.sleep(1)
def send_ned_velocity(velocity_x, velocity_y, velocity_z, duration):
"""
Move vehicle in direction based on specified velocity vectors.
"""
msg = vehicle.message_factory.set_position_target_local_ned_encode(
0, # time_boot_ms (not used)
0, 0, # target system, target component
mavutil.mavlink.MAV_FRAME_LOCAL_NED, # frame
0b0000111111000111, # type_mask (only speeds enabled)
0, 0, 0, # x, y, z positions (not used)
velocity_x, velocity_y, velocity_z, # x, y, z velocity in m/s
0, 0, 0, # x, y, z acceleration (not supported yet, ignored in GCS_Mavlink)
0, 0) # yaw, yaw_rate (not supported yet, ignored in GCS_Mavlink)
# send command to vehicle on 1 Hz cycle
for x in range(0,duration):
vehicle.send_mavlink(msg)
time.sleep(1)
# vehicle=connectMyCopter()
connection_string="/dev/ttyAMA0"
baud_rate=57600
print("Connecting")
vehicle=connect(connection_string,baud=baud_rate,wait_ready=True)
print("Connected to drone")
vehicle.mode=VehicleMode("GUIDED")
vehicle.armed=True
print ("Autopilot Firmware version: %s" % vehicle.version)
print ("Autopilot capabilities (supports ftp): %s" % vehicle.capabilities.ftp)
print ("Global Location: %s" % vehicle.location.global_frame)
#Arm and take of to altitude of 5 meters
arm_and_takeoff(0.5)
send_ned_velocity(0.5, 0, 0, 5)
vehicle.mode=VehicleMode("LAND")
| RahulHKumar/autonomous_drone_monocular_cam_pixhawk | dro_vel.py | dro_vel.py | py | 2,854 | python | en | code | 0 | github-code | 36 |
8827399963 | from __future__ import absolute_import, division, print_function
from collections import OrderedDict
import flask
from flask import current_app, request
class APIEndpoint(object):
MIN_API_VERSION = 3
LATEST_API_VERSION = 3
HEADER_PREFIX = "application/vnd.marv.v"
def __init__(self, name, func, url_rule, defaults=None, methods=None, version=None, acl=None):
self.name = name
version = self.MIN_API_VERSION if version is None else version
self.funcs = [(version, func)]
self.url_rules = [(url_rule, {'defaults': defaults, 'methods': methods})]
self.acl = set(acl) if acl else set()
def __call__(self, *args, **kw):
authorization = request.headers.get('Authorization')
# TODO: can authorization be '' or is None test?
if not authorization:
authorization = request.args.get('access_token')
groups = current_app.um.check_authorization(self.acl, authorization)
try:
accepted = (x[0] for x in flask.request.accept_mimetypes
if x[0].startswith(self.HEADER_PREFIX)).next()
accepted_version = int(accepted[len(self.HEADER_PREFIX):])
except (StopIteration, ValueError):
accepted_version = self.MIN_API_VERSION
try:
func = (func for version, func in self.funcs
if version <= accepted_version).next()
except StopIteration:
flask.abort(406)
return func(*args, **kw)
def init_app(self, app, url_prefix=None, name_prefix=None):
name = '.'.join(filter(None, [name_prefix, self.name]))
for url_rule, options in self.url_rules:
url_rule = ''.join(filter(None, [url_prefix, url_rule]))
app.add_url_rule(url_rule, name, self, **options)
class APIGroup(object):
def __init__(self, name, func, url_prefix=None):
self.name = name
self.func = func
self.url_prefix = url_prefix
self.endpoints = OrderedDict()
def add_endpoint(self, ep):
"""endpoints and groups are all the same (for now)"""
assert ep.name not in self.endpoints, ep
self.endpoints[ep.name] = ep
def endpoint(self, *args, **kw):
return api_endpoint(*args, registry=self.endpoints, **kw)
def init_app(self, app, url_prefix=None, name_prefix=None):
self.func(app)
name_prefix = '.'.join(filter(None, [name_prefix, self.name]))
url_prefix = '/'.join(filter(None, [url_prefix, self.url_prefix])) or None
for ep in self.endpoints.values():
ep.init_app(app, url_prefix=url_prefix, name_prefix=name_prefix)
def __repr__(self):
return '<APIGroup {} url_prefix={}>'.format(self.name, self.url_prefix)
def api_endpoint(url_rule, defaults=None, methods=None, version=None,
cls=APIEndpoint, registry=None, acl=None):
def decorator(func):
if isinstance(func, cls):
func.url_rules.append((url_rule, {'defaults': defaults, 'methods': methods}))
return func
name = func.func_name
rv = cls(name, func, url_rule=url_rule, defaults=defaults,
methods=methods, version=version, acl=acl)
rv.__doc__ = func.__doc__
if registry is not None:
assert name not in registry, name
registry[name] = rv
return rv
return decorator
def api_group(url_prefix=None, cls=APIGroup):
def decorator(func):
if isinstance(func, cls):
raise TypeError('Attempted to convert function into api group twice.')
name = func.func_name
rv = cls(name, func, url_prefix)
rv.__doc__ = func.__doc__
return rv
return decorator
| ternaris/marv | marv_webapi/tooling.py | tooling.py | py | 3,760 | python | en | code | 3 | github-code | 36 |
5791008757 | #client.py
import time
import socket
import _thread as thread
from time import sleep
import connHandle
# socket wrapper class
class sockW:
def __init__ (self, sock, ipAddr):
self.sock = sock
self.ipAddr = ipAddr
def get_ip_addr(self):
return self.ipAddr
def get_sock(self):
return self.sock
# List of addr to connect to
addr = ['192.168.1.100',
'192.168.1.101',
'192.168.1.102',
'192.168.1.103',
'192.168.1.104',
'192.168.1.105',
'192.168.1.106',
'192.168.1.107',
'192.168.1.108',
'192.168.1.109',
'192.168.1.110',
'192.168.1.111']
#addr = ['192.168.1.100']
# List of connHandle objects
conn = []
# connect to all addr
for i in addr:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(1)
try:
sock.connect((i, 9800))
print(i + " + [OK]")
sock_w = sockW(sock, i)
conn_H = connHandle.connHandle(sock_w)
conn.append(conn_H)
except socket.error:
print(i + " + [BAD]")
continue
while (True):
msg = input()
if msg[:4] == "all ":
for conn_H in conn:
conn_H.step(msg[4:])
elif msg[:5] == "allH ":
for i in range(int(len(conn) / 2), len(conn)):
conn[i].step(msg[5:])
elif msg[:5] == "allL ":
for i in range(0, int(len(conn) / 2)):
conn[i].step(msg[5:])
elif msg == "ls":
print("\tCLIENT LIST\n<------------------------->\n")
for conn_H in conn:
print("| CLIENT: [" + conn_H.ip_addr + "] |\n")
print("<------------------------->")
elif msg == "help":
client = conn[0]
client.sock.send("help".encode())
print(client.sock.recv(1000).decode())
elif msg[:3] == "ip ":
msg = msg[3:]
client_ip = msg[:msg.index(' ')]
msg = msg[msg.index(' ') + 1:]
for conn_H in conn:
if conn_H.ip_addr == client_ip:
conn_H.step(msg)
else:
print("UNKOWN CMD!!!")
| RiceShelley/RPI_Cluster | clusterCtl/multiClient/client.py | client.py | py | 2,099 | python | en | code | 0 | github-code | 36 |
10830099515 | import tkinter as tk
import json
from api import functions
class window(tk.Tk):
def __init__(self):
super().__init__()
self.geometry('767x445')
self.title('Get daily news')
self.funcs = functions()
def main_window(self):
# Creating required labels and frame
title = tk.Label(self, text="Get news", font="monospace 20")
frame = tk.Frame(self)
# Creating required variables
query = tk.StringVar()
query.set('')
# Creating entry boxes
query_box = tk.Entry(frame, textvariable=query)
# Creating label for entry box
query_label = tk.Label(frame, text="Query: ")
# Creating buttons
submit = tk.Button(frame, text="Submit", command=lambda: (self.window_result(query.get())))
# Packing everything
query_label.grid(column=1, row=1)
query_box.grid(column=2, row=1)
submit.grid(column=1, row=2)
title.pack()
frame.pack()
def show_json(self, json_data: str):
# Creating required frame
frame = tk.Frame(self.slave_window)
# Creating required variables
data = json.loads(json_data)
articles = data.get('articles')
titles = []
selected_title = tk.StringVar()
selected_title.set('Options')
# Functions which will be used
def find_selected_article(title: str, articles: list):
for article in articles:
if article.get('title') == title:
return article
return False
def see_news(selected_article: dict):
#self.slave_window.geometry('')
frame.pack_forget()
frame2 = tk.Frame(self.slave_window)
title = tk.Label(frame2, text='Title: '+selected_article.get('title'))
author = tk.Label(frame2, text='Author: '+selected_article.get('author'))
source = tk.Label(frame2, text="Source: "+selected_article.get('source').get('name'))
link = tk.Label(frame2, text='Link: '+selected_article.get('url'))
date = tk.Label(frame2, text='Published At: '+selected_article.get('publishedAt'))
content = tk.Label(frame2, text='Content: '+selected_article.get('content'))
title.grid(column=1, row=1)
author.grid(column=1, row=2)
source.grid(column=1, row=3)
link.grid(column=1, row=4)
date.grid(column=1, row=5)
content.grid(column=1, row=6)
frame2.pack()
for article in articles:
titles.append(article.get('title'))
# Creating label
label_choose = tk.Label(frame, text="Choose News: ")
# Creating option menu
entry_box = tk.OptionMenu(frame, selected_title, *titles)
# Creating buttons
submit = tk.Button(frame, text="Submit", command=lambda: (frame.pack_forget(), see_news(find_selected_article(selected_title.get(), articles))))
# Packing Everything
label_choose.grid(column=1, row=1)
entry_box.grid(column=2, row=1)
submit.grid(column=1, row=2)
frame.pack()
def window_result(self, query: str):
self.slave_window = tk.Toplevel(self)
self.slave_window.title(f'GET {query} NEWS')
self.slave_window.geometry('700x300')
json_text = self.funcs.get(query)
self.show_json(json_text)
if __name__=='__main__':
windo = window()
windo.main_window()
windo.mainloop()
| PingalPie/news-application | gui.py | gui.py | py | 3,053 | python | en | code | 0 | github-code | 36 |
74144288102 | from django.core.management.base import BaseCommand
import os
from importlib import import_module
from django.conf import settings
from django.core.management import call_command
from newapp.utils import get_app_template_path, get_app_templates
APP_TEMPLATES = [ x.get('name') for x in get_app_templates() ]
class Command(BaseCommand):
"""
Example usage:
python manage.py newapp mambu --template=lite --appsdir=apps
"""
help = __doc__
args = '<function arg arg ...>'
def check_name_conflick(self, name):
apps_list = next(os.walk(os.path.join(settings.BASE_DIR, "apps")))[1]
apps_list = apps_list +['admin', 'admindocs', 'auth' ,'contenttypes' ,'flatpages','gis','humanize',
'messages','postgres','redirects','sessions','sitemaps','sites','staticfiles'
'syndication']
if name in apps_list:
return True
else:
try:
import_module(name)
except ImportError:
return False
else:
return True
def add_arguments(self, parser):
parser.add_argument('name', type=str)
parser.add_argument('--apptype', '-t', dest='apptype', default='lite',
help='Application type')
parser.add_argument('--appdir', '-d', type=str, dest='appdir', default='/',
help='Target directory')
def handle(self, *args, **options):
name = options['name']
if self.check_name_conflick(name):
self.stdout.write(self.style.ERROR("Sorry, but you can't use %s as name because this name already taken" % name))
exit()
apps_type = options['apptype']
if apps_type not in APP_TEMPLATES:
self.stdout.write(self.style.ERROR("no template with name %s" % apps_type))
exit()
if options['appdir'] == "/":
app_dir = settings.BASE_DIR
app_path = os.path.join(settings.BASE_DIR, name)
else:
app_dir = options['appdir'].strip("/")
app_path = os.path.join(settings.BASE_DIR, "%s/%s" % (app_dir, name))
if os.path.isdir(app_dir):
os.mkdir(app_path)
else:
self.stdout.write(self.style.ERROR("Appdir %s not found" % app_dir))
exit()
template_path = get_app_template_path(apps_type)
call_command("startapp", name, app_path, template=template_path)
os.unlink(os.path.join(app_path, "desc.txt"))
self.stdout.write(self.style.SUCCESS("Congratulation apps %s successfuly created" % name)) | freezmeinster/django-newapp | newapp/management/commands/newapp.py | newapp.py | py | 2,670 | python | en | code | 0 | github-code | 36 |
71239927463 | '''
Author - Imanpal Singh <imanpalsingh@gmail.com>
GUI application for twitter sentiment analysis
Date created : - 02-07-2019
Date modified : - 03-07-2019
'''
#importing requierd libraries
import numpy as np
import pandas as pd
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
ps = PorterStemmer()
from sklearn.naive_bayes import GaussianNB
nb = GaussianNB()
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer(max_features = 8000)
import re
import tkinter as tk
from tkinter import Text
import pyperclip
#Global variable to hold score
score=0
#Machine Learning part
def Algorithm(file):
global score
#Loading the dataset
dataset = pd.read_csv(file)
#Cleaning tweets
clean_tweets = []
for i in range(len(dataset)):
tw = re.sub('[^a-zA-Z]', ' ', dataset['tweet'][i])
tw = re.sub('@[\w]*',' ',tw)
tw = tw.lower()
tw = tw.split()
tw = [ps.stem(token) for token in tw if not token in set(stopwords.words('english'))]
tw = ' '.join(tw)
clean_tweets.append(tw)
#textual encoding
X = cv.fit_transform(clean_tweets)
X = X.toarray()
y = dataset.iloc[:, 1].values
#splitting the data
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test = train_test_split(X,y)
#training the data
nb.fit(X_train,y_train)
score = nb.score(X_test,y_test)
print("Score is : - ",score)
#Function to handle Go button event
def forward():
#Getting the tweet
tw = tweet.get("1.0","end")
#Cleaning the tweet
tw = re.sub('[^a-zA-Z]', ' ', tw)
tw = re.sub('@[\w]*',' ',tw)
tw = tw.lower()
tw = tw.split("\n")
tw = [ps.stem(token) for token in tw if not token in set(stopwords.words('english'))]
tw = cv.transform(tw)
tw = tw.toarray()
#Predicting the class
y_pred = nb.predict(tw)
#Clearning the Entry
tweet.delete("1.0","end")
#Displaying the class
if y_pred[0] == 0:
tweet.insert("1.0","The tweet entered is normal ( model's accuracy : {}% )".format(score*100))
else :
tweet.insert("1.0","The tweet entered is negative ( model's accuracy : {}% )".format(score*100))
#Function to handle Paste from clipboard button event
def clippaste():
tweet.insert("1.0",pyperclip.paste())
#Initialising algorithm
Algorithm('train.csv')
#GUI part
#Creating a window
Main = tk.Tk()
Main.configure(background='white')
Main.title("Twitter Sentiment analysis")
Main.geometry("1000x400+400+300")
#Adding the heading
one = tk.Label(Main,text="Twitter Sentiment analysis",fg="white",width="100",height="2")
one.configure(background="#6E97ED",font=(20))
#Adding the textbox
tweet = tk.Text(Main,height="10",width="60")
tweet.insert("1.0","Paste tweet here..")
tweet.configure(bd=0,fg="#6E97ED")
#Adding buttons
button_frame = tk.Frame(Main)
button_frame.configure(background="white")
go= tk.Button(button_frame,text="GO !",width="10",height="5",command=forward)
go.configure(background="#6E97ED",fg="white",bd=0)
paste = tk.Button(button_frame,text="Paste from clipboard",width="20",height="5",command=clippaste)
paste.configure(background="#6E97ED",fg="white",bd=0)
#Finishing up
one.pack(pady=30)
tweet.pack(side="top",padx=10,pady=20)
go.pack(side="left")
paste.pack(side="left",padx="30")
button_frame.pack(side="bottom")
#Removing resizeable feature
Main.resizable(0,0)
tk.mainloop()
| imanpalsingh/twitter-sentiment-analysis | GUI.py | GUI.py | py | 3,692 | python | en | code | 1 | github-code | 36 |
13780351709 | import sys
from collections import deque
def div_area(q):
while q:
r, c = q.popleft()
for x, y in [[r + 1, c], [r - 1, c], [r, c + 1], [r, c - 1]]:
if 0<= x < n and 0<= y < m:
if area[x][y] == 0:
area[x][y] = -1
q.append([x, y])
def next_hour():
global cheese
new_cheese = []
new_air = deque()
air = []
for i, j in cheese:
cnt = 0
cnt2 = 0
for x, y in [[i - 1, j], [i + 1, j], [i, j - 1], [i, j + 1]]:
if area[x][y] == -1:
cnt += 1
elif area[x][y] == 0:
cnt2 += 1
if cnt >= 2:
air.append([i, j])
if cnt2 > 0:
new_air.append([i, j])
else:
new_cheese.append([i, j])
for i, j in air:
area[i][j] = -1
div_area(new_air)
cheese = new_cheese
n, m = map(int, sys.stdin.readline().strip().split())
area = [list(map(int, sys.stdin.readline().strip().split())) for _ in range(n)]
cheese = []
for i in range(n):
for j in range(m):
if area[i][j] == 1:
cheese.append([i, j])
area[0][0] = -1
div_area(deque([[0, 0]]))
cnt = 0
while cheese:
next_hour()
cnt += 1
print(cnt) | Yangseyeon/BOJ | 03. Gold/2638.py | 2638.py | py | 1,281 | python | en | code | 0 | github-code | 36 |
27029634669 | import copy
import logging
import os.path as osp
import numpy as np
import torch
from fvcore.common.file_io import PathManager
from PIL import Image
from pycocotools import mask as maskUtils
from detectron2.data import detection_utils as utils
from detectron2.data import transforms as T
from detectron2.data.dataset_mapper import DatasetMapper
from detectron2.data.detection_utils import SizeMismatchError
from detectron2.structures import BoxMode
from .augmentation import RandomCropWithInstance
from .detection_utils import (annotations_to_instances, build_augmentation,
transform_instance_annotations)
"""
This file contains the default mapping that's applied to "dataset dicts".
"""
__all__ = ["DatasetMapperWithBasis"]
logger = logging.getLogger(__name__)
def segmToRLE(segm, img_size):
h, w = img_size
if type(segm) == list:
# polygon -- a single object might consist of multiple parts
# we merge all parts into one mask rle code
rles = maskUtils.frPyObjects(segm, h, w)
rle = maskUtils.merge(rles)
elif type(segm["counts"]) == list:
# uncompressed RLE
rle = maskUtils.frPyObjects(segm, h, w)
else:
# rle
rle = segm
return rle
def segmToMask(segm, img_size):
rle = segmToRLE(segm, img_size)
m = maskUtils.decode(rle)
return m
def filter_empty_instances(instances):
"""
Filter out empty instances in an `Instances` object.
Args:
instances (Instances):
by_box (bool): whether to filter out instances with empty boxes
by_mask (bool): whether to filter out instances with empty masks
box_threshold (float): minimum width and height to be considered non-empty
return_mask (bool): whether to return boolean mask of filtered instances
Returns:
Instances: the filtered instances.
tensor[bool], optional: boolean mask of filtered instances
"""
pass
r = []
r.append(instances.gt_boxes.nonempty())
if not r:
return instances
m = r[0]
for x in r[1:]:
m = m & x
return instances[m]
class DatasetMapperWithBasis(DatasetMapper):
"""
This caller enables the default Detectron2 mapper to read an additional basis semantic label
"""
def __init__(self, cfg, is_train=True):
super().__init__(cfg, is_train)
# Rebuild augmentations
logger.info(
"Rebuilding the augmentations. The previous augmentations will be overridden."
)
self.augmentation = build_augmentation(cfg, is_train)
if cfg.INPUT.CROP.ENABLED and is_train and cfg.MODEL.TRANSFORMER.BOUNDARY_HEAD:
self.augmentation.insert(
0,
RandomCropWithInstance(
cfg.INPUT.CROP.TYPE,
cfg.INPUT.CROP.SIZE,
cfg.INPUT.CROP.CROP_INSTANCE,
),
)
logging.getLogger(__name__).info(
"Cropping used in training: " + str(self.augmentation[0])
)
if cfg.INPUT.ROTATE and is_train:
if cfg.MODEL.TRANSFORMER.BOUNDARY_HEAD:
self.augmentation.insert(0, T.RandomRotation(angle=[-45, 45]))
else:
self.augmentation.insert(0, T.RandomRotation(angle=[-90, 90]))
def __call__(self, dataset_dict):
"""
Args:
dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
Returns:
dict: a format that builtin models in detectron2 accept
"""
dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
# USER: Write your own image loading if it's not from a file
try:
image = utils.read_image(
dataset_dict["file_name"], format=self.image_format
)
except Exception as e:
print(dataset_dict["file_name"])
print(e)
raise e
try:
utils.check_image_size(dataset_dict, image)
except SizeMismatchError as e:
expected_wh = (dataset_dict["width"], dataset_dict["height"])
image_wh = (image.shape[1], image.shape[0])
if (image_wh[1], image_wh[0]) == expected_wh:
print("transposing image {}".format(dataset_dict["file_name"]))
image = image.transpose(1, 0, 2)
else:
raise e
######################################################################
boxes = np.asarray(
[
BoxMode.convert(
instance["bbox"], instance["bbox_mode"], BoxMode.XYXY_ABS
)
for instance in dataset_dict["annotations"]
]
)
######################################################################
# aug_input = T.StandardAugInput(image)
aug_input = T.StandardAugInput(image, boxes=boxes)
transforms = aug_input.apply_augmentations(self.augmentation)
image = aug_input.image
image_shape = image.shape[:2] # h, w
# Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,
# but not efficient on large generic data structures due to the use of pickle & mp.Queue.
# Therefore it's important to use torch.Tensor.
dataset_dict["image"] = torch.as_tensor(
np.ascontiguousarray(image.transpose(2, 0, 1))
)
if not self.is_train:
dataset_dict.pop("annotations", None)
dataset_dict.pop("sem_seg_file_name", None)
dataset_dict.pop("pano_seg_file_name", None)
return dataset_dict
if "annotations" in dataset_dict:
# USER: Modify this if you want to keep them for some reason.
for anno in dataset_dict["annotations"]:
if not self.use_instance_mask:
anno.pop("segmentation", None)
if not self.use_keypoint:
anno.pop("keypoints", None)
# USER: Implement additional transformations if you have other types of data
annos = [
transform_instance_annotations(
obj,
transforms,
image_shape,
keypoint_hflip_indices=self.keypoint_hflip_indices,
)
for obj in dataset_dict.pop("annotations")
if obj.get("iscrowd", 0) == 0
]
instances = annotations_to_instances(
annos, image_shape, mask_format=self.instance_mask_format
)
# dataset_dict["instances"] = instances
dataset_dict["instances"] = utils.filter_empty_instances(instances)
return dataset_dict
| ViTAE-Transformer/DeepSolo | adet/data/dataset_mapper.py | dataset_mapper.py | py | 6,846 | python | en | code | 177 | github-code | 36 |
23381324856 | vocabulary = []
with open('Voca_01.txt', 'r', encoding='utf-8') as file:
for line in file:
line = line.strip() # Loại bỏ khoảng trắng dư thừa từ đầu và cuối dòng
if not line:
continue # Bỏ qua dòng trống
# Tách thông tin từ dòng sử dụng dấu ngoặc đơn (') và dấu ngoặc kép (")
parts = line.split("(")
if len(parts) < 2:
print("not enough infor")
continue # Bỏ qua dòng không đủ thông tin
else:
english = parts[0].strip()
phonetic = parts[1].split(')')[0].strip()
vietnamese = parts[1].split(')')[1].strip()
if vietnamese == "":
continue
else:
vocabulary.append((english, phonetic, vietnamese))
# In ra kết quả
for entry in vocabulary:
print("English:", entry[0])
print("Phonetic:", entry[1])
print("Translation:", entry[2])
print()
| nguyenbuitk/python-tutorial | search_vocabulary/test_split_strip.py | test_split_strip.py | py | 1,000 | python | vi | code | 0 | github-code | 36 |
22398051842 | import sys
import cv2
import numpy as np
from os import listdir
PY3 = sys.version_info[0] == 3
#Define the parameters
SIZE = 32
CLASS_NUMBER = 6
#Read the traffic sign dataset and store the dataset and labels into a list
def load_traffic_dataset():
dataset = []
labels = []
for sign_type in range(CLASS_NUMBER):
sign_list = listdir("./dataset/{}".format(sign_type))
for sign_file in sign_list:
if '.png' in sign_file:
path = "./dataset/{}/{}".format(sign_type,sign_file)
print(path)
img = cv2.imread(path,0)
img = cv2.resize(img, (SIZE, SIZE))
img = np.reshape(img, [SIZE, SIZE])
dataset.append(img)
labels.append(sign_type)
return np.array(dataset), np.array(labels)
#Deskew the images
def deskew(img):
m = cv2.moments(img)
if abs(m['mu02']) < 1e-2:
return img.copy()
skew = m['mu11']/m['mu02']
M = np.float32([[1, skew, -0.5*SIZE*skew], [0, 1, 0]])
img = cv2.warpAffine(img, M, (SIZE, SIZE), flags=cv2.WARP_INVERSE_MAP | cv2.INTER_LINEAR)
return img
#Define a class for SVM model object
class StatModel(object):
def load(self, fn):
self.model.load(fn) # Known bug: https://github.com/opencv/opencv/issues/4969
def save(self, fn):
self.model.save(fn)
class SVM(StatModel):
def __init__(self, C = 12.5, gamma = 0.50625):
self.model = cv2.ml.SVM_create()
self.model.setGamma(gamma)
self.model.setC(C)
self.model.setKernel(cv2.ml.SVM_RBF)
self.model.setType(cv2.ml.SVM_C_SVC)
def train(self, samples, responses):
self.model.train(samples, cv2.ml.ROW_SAMPLE, responses)
def predict(self, samples):
return self.model.predict(samples)[1].ravel()
def preprocess_simple(data):
return np.float32(data).reshape(-1, SIZE*SIZE) / 255.0
def get_hog() :
winSize = (20,20)
blockSize = (10,10)
blockStride = (5,5)
cellSize = (10,10)
nbins = 9
deriveAperture = 1
winSigma = -1.0
histogramNormType = 0
L2HysThreshold = 0.2
gammaCorrection = 1
nlevels = 64
signedGradient = True
hog = cv2.HOGDescriptor(winSize,blockSize,blockStride,cellSize,nbins,deriveAperture,winSigma,histogramNormType,L2HysThreshold,gammaCorrection,nlevels, signedGradient)
return hog
#Train the model
def training():
print('Loading data...')
data, labels = load_traffic_dataset()
print(data.shape)
print('Shuffling data...')
rand = np.random.RandomState(10)
shuffle = rand.permutation(len(data))
data, labels = data[shuffle], labels[shuffle]
print('Deskewing images...')
data_deskewed = list(map(deskew, data))
print('Defining HoG parameters...')
hog = get_hog()
print('Calculating HoG descriptor for every image...')
hog_descriptors = []
for img in data_deskewed:
hog_descriptors.append(hog.compute(img))
hog_descriptors = np.squeeze(hog_descriptors)
print('Training SVM model...')
model = SVM()
model.train(hog_descriptors, labels)
print('Saving SVM model...')
model.save('data_svm.dat')
return model
#Get the label of detected traffic sign using the SVM model
def getLabel(model, data):
gray = cv2.cvtColor(data, cv2.COLOR_BGR2GRAY)
img = [cv2.resize(gray,(SIZE,SIZE))]
img_deskewed = list(map(deskew, img))
hog = get_hog()
hog_descriptors = np.array([hog.compute(img_deskewed[0])])
hog_descriptors = np.reshape(hog_descriptors, [-1, hog_descriptors.shape[1]])
return int(model.predict(hog_descriptors)[0])
| nabil053/Bangladeshi-Traffic-Sign-Detection-And-Recognition-System | classification.py | classification.py | py | 3,647 | python | en | code | 0 | github-code | 36 |
24788953369 | import sys
# 북동남서
dir_y = [-1, 0, 1, 0]
dir_x = [0, 1, 0, -1]
# 대각선들 오른쪽 위부터 시계
diagonal_y = [-1, 1, 1, -1]
diagonal_x = [1, 1, -1, -1]
N, M, K, C = list(map(int, sys.stdin.readline().strip().split()))
trees = [None] * N
killer = [[0] * N for _ in range(N)]
for i in range(N):
trees[i] = list(map(int, sys.stdin.readline().strip().split()))
def out_of_range(y,x):
return y < 0 or x < 0 or y >= N or x >= N
def grow():
for i in range(N):
for j in range(N):
if trees[i][j] > 0:
count = 0
for d in range(4):
next_i, next_j = i + dir_y[d], j + dir_x[d]
if out_of_range(next_i, next_j):
continue
if trees[next_i][next_j] > 0:
count += 1
trees[i][j] += count
def spread():
cand = []
for i in range(N):
for j in range(N):
if trees[i][j] > 0:
position = []
for d in range(4):
next_i, next_j = i + dir_y[d], j + dir_x[d]
if out_of_range(next_i, next_j) or trees[next_i][next_j] != 0 or killer[next_i][next_j] > 0:
continue
position.append((next_i, next_j))
if position:
cand.append((position, trees[i][j] // len(position)))
for position, length in cand:
for p_i, p_j in position:
trees[p_i][p_j] += length
def kill():
best_y, best_x = -1, -1
best_score = 0
for i in range(N):
for j in range(N):
if trees[i][j] <= 0:
continue
count = trees[i][j]
for d in range(4):
for power in range(1, K + 1):
next_i, next_j = i + (diagonal_y[d] * power), j + (diagonal_x[d] * power)
if out_of_range(next_i, next_j) or trees[next_i][next_j] <= 0:
break
count += trees[next_i][next_j]
if best_score < count:
best_y, best_x = i, j
best_score = count
trees[best_y][best_x] = 0
killer[best_y][best_x] = C + 1
for d in range(4):
for power in range(1, K + 1):
next_i, next_j = best_y + (diagonal_y[d] * power), best_x + (diagonal_x[d] * power)
if out_of_range(next_i, next_j) or trees[next_i][next_j] == -1:
break
if trees[next_i][next_j] == 0:
killer[next_i][next_j] = C + 1
break
trees[next_i][next_j] = 0
killer[next_i][next_j] = C + 1
return best_score
def disappear():
for i in range(N):
for j in range(N):
if killer[i][j] > 0:
killer[i][j] -= 1
def solution():
count = 0
year = 1
while year <= M:
# print(year)
# for k in killer:
# print(k)
# print()
# for t in trees:
# print(t)
# print()
grow()
spread()
# for t in trees:
# print(t)
# print()
count += kill()
# for t in trees:
# print(t)
# print()
disappear()
year += 1
return count
print(solution())
"""
5 4 4 5
1 0 0 -1 5
0 0 -1 4 0
0 0 5 4 0
0 0 5 0 0
2 0 -1 0 0
""" | inhyeokJeon/AALGGO | Python/codetree/47.py | 47.py | py | 3,407 | python | en | code | 0 | github-code | 36 |
25189150116 | # -*- coding: utf-8 -*-
#
#
# TheVirtualBrain-Scientific Package. This package holds all simulators, and
# analysers necessary to run brain-simulations. You can use it stand alone or
# in conjunction with TheVirtualBrain-Framework Package. See content of the
# documentation-folder for more details. See also http://www.thevirtualbrain.org
#
# (c) 2012-2013, Baycrest Centre for Geriatric Care ("Baycrest")
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 2 as published by the Free
# Software Foundation. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details. You should have received a copy of the GNU General
# Public License along with this program; if not, you can download it here
# http://www.gnu.org/licenses/old-licenses/gpl-2.0
#
#
# CITATION:
# When using The Virtual Brain for scientific publications, please cite it as follows:
#
# Paula Sanz Leon, Stuart A. Knock, M. Marmaduke Woodman, Lia Domide,
# Jochen Mersmann, Anthony R. McIntosh, Viktor Jirsa (2013)
# The Virtual Brain: a simulator of primate brain network dynamics.
# Frontiers in Neuroinformatics (7:10. doi: 10.3389/fninf.2013.00010)
#
#
if __name__ == "__main__":
from tvb.tests.library import setup_test_console_env
setup_test_console_env()
import numpy
import unittest
from tvb.tests.library.base_testcase import BaseTestCase
from tvb.simulator.monitors import MonitorTransforms
from tvb.simulator import models, coupling, integrators, simulator
from tvb.datatypes import connectivity
from tvb.simulator.monitors import Raw, TemporalAverage
class MonitorTransformsTests(BaseTestCase):
def test_split(self):
mt = MonitorTransforms('a,c', '1,2', delim=',')
self.assertEqual(len(mt.pre), 2)
self.assertEqual(len(mt.post), 2)
mt = MonitorTransforms('a;c', 'exp(x);2.234')
self.assertEqual(len(mt.pre), 2)
self.assertEqual(len(mt.post), 2)
def test_pre_1(self):
mt = MonitorTransforms('a', 'b;c')
self.assertEqual(1, len(mt.pre))
def test_post_1(self):
mt = MonitorTransforms('a;b', 'c')
self.assertEqual(1, len(mt.post))
# def _shape_fail(self):
# MonitorTransforms('1,2,3', '2;3', delim=',')
#
# def test_shape_fail(self):
# self.assertRaises(Exception, self._shape_fail)
def _syntax_fail(self, pre, post):
MonitorTransforms(pre, post)
def test_syntax(self):
self.assertRaises(SyntaxError, self._syntax_fail, 'a=3', '23.234')
self.assertRaises(SyntaxError, self._syntax_fail, 'a+b/c*f(a,b)', 'f=23')
def test_noop_post(self):
mt = MonitorTransforms('a;b;c', '2.34*(pre+1.5);;')
self.assertEqual(len(mt.post), 3)
def _fail_noop_pre(self):
MonitorTransforms(';;', ';;')
def test_noop_pre_fail(self):
self.assertRaises(SyntaxError, self._fail_noop_pre)
def test_pre(self):
state = numpy.r_[:4].reshape((1, -1, 1))
# check expr correctly evaluated
mt = MonitorTransforms('x0**2', '')
out = mt.apply_pre(state)
self.assertEqual(out[0, -1, 0], 9)
# check correct shape
n_expr = numpy.random.randint(5, 10)
pre_expr = ';'.join([str(i) for i in range(n_expr)])
mt = MonitorTransforms(pre_expr, '')
out = mt.apply_pre(state)
self.assertEqual(n_expr, out.shape[0])
def test_post(self):
state = numpy.tile(numpy.r_[:4], (2, 1)).reshape((2, -1, 1))
state[1] *= 2
# check expr eval correct
mt = MonitorTransforms('0;0', 'mon;')
_, out = mt.apply_post((0.0, state))
self.assertEqual(3, out.flat[3])
self.assertEqual(6, out.flat[7])
mt = MonitorTransforms('0;0', 'mon;mon**2-1')
_, out = mt.apply_post((0.0, state))
self.assertEqual(3, out.flat[3])
self.assertEqual(35, out.flat[7])
# check correct shape
n_expr = numpy.random.randint(5, 10)
state = numpy.tile(numpy.r_[:4], (n_expr, 1)).reshape((n_expr, -1, 1))
post_expr = ';'.join([str(i) for i in range(n_expr)])
mt = MonitorTransforms('0', post_expr)
_, out = mt.apply_post((0.0, state))
self.assertEqual(n_expr, out.shape[0])
def test_user_tags(self):
pre = '0;0'
post = 'mon;mon**2-1'
raw = Raw(pre_expr=pre, post_expr=post)
tags = raw._transform_user_tags()
self.assertIn('user_tag_1', tags)
self.assertIn('user_tag_2', tags)
self.assertEqual(tags['user_tag_1'], pre)
self.assertEqual(tags['user_tag_2'], post)
class MonitorTransformsInSimTest(BaseTestCase):
def _run_sim(self, length, model, *mons):
sim = simulator.Simulator(
model=model,
connectivity=connectivity.Connectivity(load_default=True),
coupling=coupling.Linear(),
integrator=integrators.EulerDeterministic(),
monitors=mons)
sim.configure()
ys = []
for (t, y), in sim(simulation_length=length):
ys.append(y)
return sim, numpy.array(ys)
def test_expr_pre(self):
sim, ys = self._run_sim(5, models.Generic2dOscillator(), Raw(pre_expr='V;W;V**2;W-V',
post_expr='mon;mon;mon;mon'))
self.assertTrue(hasattr(sim.monitors[0], '_transforms'))
v, w, v2, wmv = ys.transpose((1, 0, 2, 3))
self.assertTrue(numpy.allclose(v ** 2, v2))
self.assertTrue(numpy.allclose(w - v, wmv))
def test_expr_post(self):
sim, ys = self._run_sim(5, models.Generic2dOscillator(),
Raw(pre_expr='V;W;V;W', post_expr=';;mon**2; exp(mon)'))
self.assertTrue(hasattr(sim.monitors[0], '_transforms'))
v, w, v2, ew = ys.transpose((1, 0, 2, 3))
self.assertTrue(numpy.allclose(v ** 2, v2))
self.assertTrue(numpy.allclose(numpy.exp(w), ew))
def test_expr_tim(self):
sim, ys = self._run_sim(5, models.Epileptor(), Raw(pre_expr='-y0+y3;y2', post_expr='mon;mon'))
self.assertTrue(hasattr(sim.monitors[0], '_transforms'))
lfp, slow = ys.transpose((1, 0, 2, 3))
def test_period_handling(self):
"""Test that expression application working for monitors with a period."""
sim, ys = self._run_sim(5, models.Generic2dOscillator(), TemporalAverage(pre_expr='V+W'))
def suite():
"""
Gather all the tests in a test suite.
"""
test_suite = unittest.TestSuite()
test_suite.addTest(unittest.makeSuite(MonitorTransformsInSimTest))
test_suite.addTest(unittest.makeSuite(MonitorTransformsTests))
return test_suite
if __name__ == "__main__":
#So you can run tests from this package individually.
TEST_RUNNER = unittest.TextTestRunner()
TEST_SUITE = suite()
TEST_RUNNER.run(TEST_SUITE)
| suraj1074/tvb-library | tvb/tests/library/simulator/monitor_transforms_test.py | monitor_transforms_test.py | py | 7,125 | python | en | code | null | github-code | 36 |
4703541196 | #!/usr/bin/env python3
import rospy
import sounddevice as sd
import numpy as np
import queue
import sys
import sounddevice as sd
from audio_universal.msg import AudioData
'''
~output_device: use `python3 -m sounddevice` to get device list, numerical device ID or case-insensitive substrings is ok.
~channels: 1
~refresh_rate: 30
~latency: 'high'
~blocksize: 512
~dtype: 'float32'
~samplerate: 44100 48000 88200 96000 192000
'''
class audio_play:
def __init__(self):
self.initROS()
self.q = queue.Queue()
self.q_connects = queue.Queue()
self.q_channels = queue.Queue()
self.stream = sd.OutputStream(device=self.output_device,
samplerate=self.samplerate,
blocksize=self.blocksize,
dtype=self.dtype,
latency=self.latency,
channels=self.channels,
callback=self.audio_callback)
with self.stream:
rospy.spin()
def initROS(self):
rospy.init_node('audio_record', anonymous=True)
self.output_device = rospy.get_param("~output_device", default=None)
self.channels = rospy.get_param("~channels", default=1)
self.refresh_rate = rospy.get_param("~refresh_rate", default=30)
self.latency = rospy.get_param("~latency", default='high')
self.blocksize = rospy.get_param("~blocksize", default=512)
self.dtype = rospy.get_param("~dtype", default='float32')
self.samplerate = rospy.get_param("~samplerate", default=48000)
rospy.Subscriber('/audio_record_data', AudioData, self.AudioData_callback)
def audio_callback(self, outdata, frames, time, status):
if status:
rospy.logwarn(status)
try:
data = self.q.get_nowait()
connects = self.q_connects.get_nowait()
in_channels = self.q_channels.get_nowait()
data = data.reshape(self.blocksize, in_channels)
if len(connects) / 2 != len(connects) // 2:
raise Exception
for idx in range(0, len(connects) // 2):
if (connects[idx * 2] in range(0, self.channels)) and (connects[idx * 2 + 1] in range(0, self.channels)):
outdata[:, connects[idx * 2 + 1]] = data[:, connects[idx * 2]]
except queue.Empty as e:
# rospy.logwarn('Buffer is empty: increase buffersize?')
outdata[:] = np.zeros_like(outdata)
def AudioData_callback(self, AudioData):
self.q.put(np.frombuffer(AudioData.data, dtype=self.dtype))
self.q_connects.put(np.frombuffer(AudioData.connects, dtype='uint32'))
self.q_channels.put(AudioData.channels)
if __name__ == '__main__':
audio_play()
| jsbyysheng/ros_audio_universal | scripts/audio_play.py | audio_play.py | py | 2,867 | python | en | code | 0 | github-code | 36 |
37989827251 | """
Steps to run:
python insured_info_scraper.py <account number>
Eg:
python insured_info_scraper.py 20011
Program written in Python 3
Program Output:
1 file:
Insured_Info_<account_num>.json - json file that contains the insured info details
Program Description:
Progam first fetches the ASP login page paramters - __VIEWSTATE, __VIEWSTATEGENERATOR,
etc and then inputs these paramters and the login credentials to login
Then the program stores cookie info and uses it along with the new page parameters
to access the quotes page.
The code then access the account details page by sending the account number
(retrieved as a command line argument) along with other parameter to get the
account details.
The insured information is then scraped and stored into a dictionary var which is
written into a json file
"""
import sys
import requests
from bs4 import BeautifulSoup
import json
# Main Function
def main():
# Variable to store the json putput
insured_information = {}
# Getting the account number from the command line
account_num = str(sys.argv[1])
print("Account number entered:")
print(account_num)
# Login credentials
credentials = dict()
credentials['username'] = 'samplecsrtest'
credentials['password'] = 'Ik vaari aa---123'
print("Getting login session parameters to login")
# Home page URL
home_page_url = 'https://secure.financepro.net/financepro/default.aspx?company=deltafinance'
# Storing the session info
session = requests.Session()
response = session.get(home_page_url)
# Parsing the response using Beautiful Soup
soup = BeautifulSoup(response.content, 'html.parser')
# Storing 3 ASP web-page specific form parameters to use to login
viewstate = soup.select('input[name=__VIEWSTATE]')[0]['value']
viewstate_generator = soup.select('input[name=__VIEWSTATEGENERATOR]')[0]['value']
event_validation = soup.select('input[name=__EVENTVALIDATION]')[0]['value']
login_form_parameters = dict()
login_form_parameters['__VIEWSTATE'] = viewstate
login_form_parameters['__VIEWSTATEGENERATOR'] = viewstate_generator
login_form_parameters['__EVENTVALIDATION'] = event_validation
login_form_parameters['tblForm$txtUserName'] = credentials['username']
login_form_parameters['tblForm$txtPassword'] = credentials['password']
login_form_parameters['tblForm$btnLogin'] = 'Log In'
login_form_parameters['tblForm$txtCompanyCode'] = 'deltafinance'
# Storing the cookies post login
response = session.post(home_page_url, login_form_parameters)
cookies = session.cookies.get_dict()
# Logging in
response = requests.post(home_page_url, login_form_parameters)
print("Logged in")
print("Accessing the accounts page session paramaters to navigate to accounts page")
accounts_url = 'https://secure.financepro.net/financepro/account/account.aspx'
# Sending the same session cookies
response = session.get(accounts_url, cookies=cookies)
soup = BeautifulSoup(response.content, 'html.parser')
# Getting the ASP accounts web page session paramters
viewstate = soup.select('input[name=__VIEWSTATE]')[0]['value']
viewstate_generator = soup.select('input[name=__VIEWSTATEGENERATOR]')[0]['value']
event_validation = soup.select('input[name=__EVENTVALIDATION]')[0]['value']
print("Parameters retrieved")
# Storing paramters to get account details page
account_info_params = dict()
account_info_params['__VIEWSTATE'] = viewstate
account_info_params['__VIEWSTATEGENERATOR'] = viewstate_generator
account_info_params['__EVENTVALIDATION'] = event_validation
account_info_params['fndAccountSearch$hdnName'] = 'hvalue'
# Account number sent as input here
account_info_params['fndAccountSearch$txtAccountNumber'] = account_num
account_info_params['fndAccountSearch$txtAccountStatus'] = '0'
account_info_params['fndAccountSearch$txtCompanyCheckType'] = 'paid'
account_info_params['fndAccountSearch$btnFind'] = 'Find Account'
# POST request to get account and insured details
print("\nAccessing Account Details Page")
response = requests.post(accounts_url, account_info_params, cookies=cookies)
soup = BeautifulSoup(response.content, 'html.parser')
# All insured information is stored in span tags
insured_name = soup.find("span", {"id": "lblInsuredName"}).text
print("\nInsured Details are:\n")
print("Insured name:")
print(insured_name)
insured_information['Name'] = insured_name
insured_address = soup.find("span", {"id": "lblInsuredAddress"})
# Converting the <br> tag into a new line char and then splitting the
# address into its components
insured_address = str(insured_address).replace("<br/>", "\n")
insured_address = insured_address.replace('<span id="lblInsuredAddress">', '')
insured_address = insured_address.replace('</span>', '')
address_dict = dict()
insured_address = insured_address.split("\n")
address_dict['Line 1'] = insured_address[0]
line2 = insured_address[1].split(" ")
address_dict['City'] = line2[0][:-1]
address_dict['State'] = line2[1]
address_dict['Zip Code'] = line2[2]
print("Insured address:")
print("Address Line 1:")
print(address_dict['Line 1'])
print("City:")
print(address_dict['City'])
print("State:")
print(address_dict['State'])
print("Zip Code:")
print(address_dict['Zip Code'])
insured_information['Address'] = address_dict
insured_telephone = soup.find("span", {"id": "lblInsuredPhone"}).text
print("Insured telephone:")
print(insured_telephone)
insured_information['Telephone'] = insured_telephone
# Writing the insured information into a json file
file_name = 'Insured_Info_' + account_num + '.json'
with open(file_name, 'w') as f:
json.dump(insured_information, f)
print("\nOutput File Created:")
print(file_name)
print("\nLogging off")
# Log off page called with cookie info
log_off_url = 'https://secure.financepro.net/financepro/logoff.aspx'
response = requests.get(log_off_url, cookies=cookies)
final_url = 'https://www.deltafinanceoftexas.com/'
response = requests.get(final_url)
# Entry point of code
if __name__ == "__main__":
main()
| tebbythomas/Freelance_Projects | Web_Data_Extraction_Projects/J10_Finance_Pro_Insured_Info_Scraper/Insured_Info/insured_info_scraper.py | insured_info_scraper.py | py | 6,297 | python | en | code | 1 | github-code | 36 |
19699984534 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import base_classes as bc
from struct import pack, unpack
# FPGA Instruments
KERRIGAN = {
'nickname' : 'kerrigan',
'name' : 'Xilinx Virtex 5',
'address' : 0x55,
}
# I2C Instruments
CHEN = {
'nickname' : 'chen',
'name' : 'I2C Mux Thing',
'address' : 0x70,
}
TRAXEX = {
'nickname' : 'traxex',
'name' : 'Sensirion STS21 Temperature Sensor',
'address' : 0x4A,
'mux_address' : 0x04,
}
XIN = {
'nickname' : 'xin',
'name' : 'Sensirion STS21 Temperature Sensor',
'address' : 0x4A,
'mux_address' : 0x05,
}
# GPIB Instruments
ARCEUS = {
'nickname' : 'arceus',
'name' : 'Agilent 8753ES S-Parameter Network Analyzer',
'get_byte_order' : '',
'byte_order_little' : '',
}
MELOETTA = {
'nickname' : 'meloetta',
'name' : 'Hewlett-Packard 6623A System DC Power Supply',
'get_byte_order' : '',
'byte_order_little' : '',
}
XERNEAS = {
'nickname' : 'xerneas',
'name' : 'Hewlett-Packard 4156A Precision Semiconductor Parameter Analyzer',
'get_byte_order' : '',
'byte_order_little' : '',
}
# TCPIP Instruments
DARKRAI = {
'nickname' : 'darkrai',
'name' : 'Agilent N9020A MXA Signal Analyzer',
'socket' : ('192.168.1.5', 5025),
'get_byte_order' : '',
'byte_order_little' : '',
}
DEOXYS = {
'nickname' : 'deoxys',
'name' : 'Agilent InfiniiVision MSO7104A Mixed Signal Oscilloscope',
'socket' : ('192.168.1.10', 5025),
'get_byte_order' : ':waveform:byteorder?',
'byte_order_little' : 'LSBF',
}
GENESECT = {
'nickname' : 'genesect',
'name' : 'Agilent B2962A Power Source',
'socket' : ('192.168.1.9', 5025),
'get_byte_order' : ':format:border?',
'byte_order_little' : 'NORM',
'get_data_format' : ':format:data?',
'data_format_single': 'REAL,32',
'data_format_double': 'REAL,64',
}
GIRATINA = {
'nickname' : 'giratina',
'name' : 'Agilent B2962A Power Source',
'socket' : ('192.168.1.8', 5025),
'get_byte_order' : ':format:border?',
'byte_order_little' : 'NORM',
'get_data_format' : ':format:data?',
'data_format_single': 'REAL,32',
'data_format_double': 'REAL,64',
}
HEATRAN = {
'nickname' : 'heatran',
'name' : 'Agilent 16803A Logic Analyzer',
'socket' : ('192.168.1.11', 5025),
'get_byte_order' : '',
'byte_order_little' : '',
}
HO_OH = {
'nickname' : 'ho_oh',
'name' : 'Agilent N5182A MXG Vector Signal Generator',
'socket' : ('192.168.1.4', 5025),
'get_byte_order' : '',
'byte_order_little' : '',
}
KYUREM = {
'nickname' : 'kyurem',
'name' : 'Agilent N5183A MXG Analog Signal Generator',
'socket' : ('192.168.1.3', 5025),
'get_byte_order' : '',
'byte_order_little' : '',
}
RAYQUAZA = {
'nickname' : 'rayquaza',
'name' : 'Agilent E4443A PSA Series Spectrum Analyzer',
'socket' : ('192.168.1.2', 5025),
'get_byte_order' : '',
'byte_order_little' : '',
}
YVELTAL = {
'nickname' : 'yveltal',
'name' : 'Agilent B2902A Precision Source/Measure Unit',
'socket' : ('192.168.1.7', 5025),
'get_byte_order' : ':format:border?',
'byte_order_little' : 'NORM',
'get_data_format' : ':format:data?',
'data_format_single': 'REAL,32',
'data_format_double': 'REAL,64',
}
ZYGARDE = {
'nickname' : 'zygarde',
'name' : 'Agilent E5071C ENA Series Network Analyzer',
'socket' : ('192.168.1.6', 5025),
'get_byte_order' : '',
'byte_order_little' : '',
}
class Kerrigan(bc.FPGAInstrument):
def __init__(self, aardvark):
"""Initialize the FPGA.
:param Aardvark aardvark:
An Aardvark object through which I2C commands are relayed.
.. code-block:: python
import microlab_instruments as mi
aa = mi.Aardvark()
kerrigan = mi.Kerrigan(aa)
"""
self.DATA = KERRIGAN
super(Kerrigan, self).__init__(aardvark=aardvark)
class Chen(bc.I2CMuxInstrument):
def __init__(self, aardvark):
"""Initialize the I2C multiplexer.
:param Aardvark aardvark:
An Aardvark object through which I2C commands are relayed.
.. code-block:: python
import microlab_instruments as mi
aa = mi.Aardvark()
chen = mi.Chen(aa)
"""
self.DATA = CHEN
super(Chen, self).__init__(aardvark=aardvark)
class Traxex(bc.TempSensorInstrument):
def __init__(self, aardvark, mux):
"""Initialize a Sensirion STS21 temperature sensor.
:param Aardvark aardvark:
An Aardvark object through which I2C commands are relayed.
.. code-block:: python
import microlab_instruments as mi
aa = mi.Aardvark()
traxex = mi.Traxex(aa)
print traxex.read_temp()
"""
self.DATA = TRAXEX
super(Traxex, self).__init__(aardvark=aardvark, mux=mux)
class Xin(bc.TempSensorInstrument):
def __init__(self, aardvark, mux):
"""Initialize a Sensirion STS21 temperature sensor.
:param Aardvark aardvark:
An Aardvark object through which I2C commands are relayed.
.. code-block:: python
import microlab_instruments as mi
aa = mi.Aardvark()
xin = mi.Xin(aa)
print xin.read_temp()
"""
self.DATA = XIN
super(Xin, self).__init__(aardvark=aardvark, mux=mux)
class Arceus(bc.GPIBInstrument):
def __init__(self):
self.DATA = ARCEUS
super(Arceus, self).__init__(nickname=self.DATA['nickname'])
class Meloetta(bc.GPIBInstrument):
def __init__(self):
self.DATA = MELOETTA
super(Meloetta, self).__init__(nickname=self.DATA['nickname'])
class Xerneas(bc.GPIBInstrument):
def __init__(self):
self.DATA = XERNEAS
super(Xerneas, self).__init__(nickname=self.DATA['nickname'])
class Darkrai(bc.TCPIPInstrument):
def __init__(self):
self.DATA = DARKRAI
super(Darkrai, self).__init__(socket_pair=self.DATA['socket'])
class Deoxys(bc.TCPIPInstrument):
def __init__(self):
self.DATA = DEOXYS
super(Deoxys, self).__init__(socket_pair=self.DATA['socket'])
self.write(':waveform:byteorder msbfirst')
self.write(':waveform:format word')
self.write('*OPC')
def _chop16(self, s):
"""A generator that, given a string, yields its 16-bit slices.
:param str s:
The string to be chopped
:returns out:
A two-character (16-bit) string.
:rtype: str
"""
n = 0
while True:
k = s[n:n+2]
if not k:
break
yield k
n += 2
def _half_to_float(self, half):
"""Converts half-precision floating-point (16-bit) binary data to
Python ``float``\ .
:param str half:
A 16-bit string to be converted to a Python float
:returns out:
The actual floating point number represented by the 16-bit string.
:rtype: float
This was copied from `fpmurphy`_
.. _fpmurphy: http://fpmurphy.blogspot.com/2008/12/half-precision-floating-point-format_14.html
"""
# Get byte order of input
bo = '<' if self._is_little_endian() else '>'
# Preliminary unpacking
fmt = '{0}H'.format(bo)
h = unpack(fmt, half)[0]
# Pad 16 bits to 32 bits
s = int((h >> 15) & 0x00000001) # sign
e = int((h >> 10) & 0x0000001F) # exponent
f = int(h & 0x000003FF) # fraction
if e == 0x00: # exponent is 0
if f == 0x00:
hpad = int(s << 31)
else:
while not (f & 0x00000400):
f <<= 1
e -= 1
e += 1
f &= ~0x00000400
elif e == 0x1F: # exponent is 31
if f == 0x00:
hpad = int((s << 31) | 0x7F800000)
else:
hpad = int((s << 31) | 0x7F800000 | (f << 13))
e = e + (127 - 15)
f = f << 13
hpad = int((s << 31) | (e << 23) | f)
# struct.pack hack
st = pack('I', hpad)
out = unpack('f', st)
return out
def read_preamble(self):
"""Read the waveform preamble from Deoxys. It contains the following
metadata about the waveform data:
:returns out:
:rtype: dict
"""
# TODO Combine write, preamble and data in one function
# TODO Read :waveform:preamble
# format WORD this is two bytes for each data point
# type :waveform:type?
# points :waveform:points? can be found in the header of :waveform:data?
# count :acquire:count? averaging for one data point, etc
# xincrement
# xorigin
# xreference
# yincrement
# yorigin
# yreference
# TODO Read :save:waveform:start I do not know how to transfer a file
pass
def compose_waveform_xy(self, waveform_y, waveform_preamble):
"""Compose the (x,y) data list according to the y data and preamble
obtained from the instrument.
:returns out:
A 2-column list. The first column holds the x values and the
second column holds the y values.
:rtype: list
"""
# TODO Read :waveform:data
# :waveform:byteorder DONE
# :waveform:unsigned DONE
# :waveform:format DONE
# :waveform:source channel | function | math | pod | bus | sbus
# :system:precision
# 0x0000 hole
# 0x0001 clipped low
# 0xFFFF clipped high
# TODO Need to adjust waveform_x for special values (clipped, etc)
# TODO Need to adjust waveform_x according to preamble
# TODO Need to create waveform_y according to preamble
# TODO Need to compose X and Y values
pass
def ask_waveform_data(self):
"""A convenience function to query the waveform preamble and waveform
data in one call. Additionally, it also composes the (x,y) data list.
:returns out:
A 2-column list. The first column holds the x values and the
second column holds the y values.
:rtype: list
"""
self.write(':waveform:preamble?')
waveform_preamble = self.read_preamble()
self.write(':waveform:data?')
waveform_y = self.read_ieee754()
out = self.compose_waveform_xy(waveform_y, waveform_preamble)
return out
class Genesect(bc.TCPIPInstrument):
def __init__(self):
self.DATA = GENESECT
super(Genesect, self).__init__(socket_pair=self.DATA['socket'])
self.write(':format:data real,32')
self.write('*OPC')
class Giratina(bc.TCPIPInstrument):
def __init__(self):
self.DATA = GIRATINA
super(Giratina, self).__init__(socket_pair=self.DATA['socket'])
self.write(':format:data real,32')
self.write('*OPC')
class Heatran(bc.TCPIPInstrument):
def __init__(self):
self.DATA = HEATRAN
super(Heatran, self).__init__(socket_pair=self.DATA['socket'])
class Ho_oh(bc.TCPIPInstrument):
def __init__(self):
self.DATA = HO_OH
super(Ho_oh, self).__init__(socket_pair=self.DATA['socket'])
class Kyurem(bc.TCPIPInstrument):
def __init__(self):
self.DATA = KYUREM
super(Kyurem, self).__init__(socket_pair=self.DATA['socket'])
class Rayquaza(bc.TCPIPInstrument):
def __init__(self):
self.DATA = RAYQUAZA
super(Rayquaza, self).__init__(socket_pair=self.DATA['socket'])
class Yveltal(bc.TCPIPInstrument):
def __init__(self):
self.DATA = YVELTAL
super(Yveltal, self).__init__(socket_pair=self.DATA['socket'])
self.write(':format:data real,32')
self.write('*OPC')
class Zygarde(bc.TCPIPInstrument):
def __init__(self):
self.DATA = ZYGARDE
super(Zygarde, self).__init__(socket_pair=self.DATA['socket'])
| kitmonisit/microlab-instruments | microlab_instruments/microlab_instruments.py | microlab_instruments.py | py | 12,995 | python | en | code | 1 | github-code | 36 |
5001168268 | import json
import os
import boto3
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
dynamodb = boto3.resource("dynamodb")
def lambda_handler(event, context):
logger.info(f"EVENT: {event}")
statistics_table_name = os.environ["STATISTICS_TABLE_NAME"]
statistics_table = dynamodb.Table(statistics_table_name)
try:
# Get the new item from the DynamoDB event
for item in event["Records"]:
new_item = item["dynamodb"]
if "NewImage" not in new_item.keys():
continue
new_item = new_item["NewImage"]
# Extract required attributes for data processing
event_type = new_item.get("EventType", {}).get("S")
event_details = new_item.get("EventDetails", {}).get("S")
team_name = new_item.get("Team", {}).get("S")
opponent_team_name = new_item.get("Opponent", {}).get("S")
match_id = new_item.get("MatchID", {}).get("S")
if not event_type or not team_name or not match_id or not event_details:
return
event_details = json.loads(event_details)
# Calculate statistics based on the event_type
if event_type == "goal":
update_statistics(statistics_table, match_id, team_name, opponent_team_name, "total_goals_scored", 1)
update_statistics(statistics_table, match_id, opponent_team_name, team_name,"total_goals_conceded", 1)
elif event_type == "foul":
update_statistics(statistics_table, match_id, team_name, opponent_team_name, "total_fouls", 1)
update_statistics(statistics_table, match_id, opponent_team_name, team_name, "total_fouls", 0)
update_match_result(statistics_table, team_name, opponent_team_name, match_id)
response = statistics_table.get_item(Key={"TeamName": team_name, "MatchID": match_id}, AttributesToGet=["Date"])
if "Item" not in response or not response.get("Item", {}):
date = new_item.get("Timestamp", {}).get("S")
update_date(statistics_table, team_name, match_id, date)
update_date(statistics_table, opponent_team_name, match_id, date)
except Exception as e:
print(f"Error processing data: {e}")
raise e
def update_statistics(statistics_table, match_id, team_name, opponent_name, statistic_type, value):
# Get the existing statistics for the team from the statistics table
response = statistics_table.get_item(Key={"TeamName": team_name, "MatchID": match_id})
if "Item" not in response or not response.get("Item", {}):
# If the team and match are not present in the statistics table, create a new entry
item = {
"TeamName": team_name,
"MatchID": match_id,
"Opponent": opponent_name,
statistic_type: str(value)
}
statistics_table.put_item(Item=item)
else:
# If the team and match are already present in the statistics table, update the existing entry
existing_item = response["Item"]
existing_item[statistic_type] = str(int(existing_item.get(statistic_type, "0")) + value)
statistics_table.put_item(Item=existing_item)
def update_match_result(statistics_table, team_name, opponent_name, match_id):
response = statistics_table.get_item(Key={"TeamName": team_name, "MatchID": match_id})
response = response.get("Item", {})
goals_scored = response.get("total_goals_scored", "0")
goals_conceded = response.get("total_goals_conceded", "0")
# Calculate match result based on goals scored and conceded
results = {
1: "win",
-1: "loss",
0: "draw",
}
sign = lambda x: -1 if x < 0 else (1 if x > 0 else 0)
result = sign(int(goals_scored) - int(goals_conceded))
# Update match result for the team
response = statistics_table.update_item(
Key={"TeamName": team_name, "MatchID": match_id},
UpdateExpression="SET #result = :result",
ExpressionAttributeNames={"#result": "Result"},
ExpressionAttributeValues={":result": results[result]}
)
# Update match result for the opponent team
response = statistics_table.update_item(
Key={"TeamName": opponent_name, "MatchID": match_id},
UpdateExpression="SET #result = :result",
ExpressionAttributeNames={"#result": "Result"},
ExpressionAttributeValues={":result": results[-result]}
)
def update_date(table, team_name, match_id, date):
# Update the "Date" attribute in the StatisticsTable for the team and match_id
table.update_item(
Key={"TeamName": team_name, "MatchID": match_id},
UpdateExpression="SET #dateAttr = :dateValue",
ExpressionAttributeNames={"#dateAttr": "Date"},
ExpressionAttributeValues={":dateValue": str(date)}
) | HeNeos/SportsAnalyticsPlatform | services/dynamodb/runtime/lambda_function.py | lambda_function.py | py | 4,924 | python | en | code | 0 | github-code | 36 |
5544666629 | import urllib.request
import datetime
import json
# 返回一个月内的cf场数和上/掉分情况
def get_CF_ContestCount(name):
apiUrl = "https://codeforces.com/api/user.rating?handle=" + name
try:
page = urllib.request.urlopen(apiUrl, timeout=2000)
s = page.read().decode('utf-8')
contestsData = json.loads(s)['result']
# 改为直接用rating变化的时间当做比赛时间
# 由于rating变化时间一般延迟一天,放宽到32天
lastTime=(datetime.timedelta(days=-32) +
datetime.datetime.now()).timestamp()
sum=0
cnt=0
for contest in contestsData:
if contest['ratingUpdateTimeSeconds'] < lastTime:
continue
cnt += 1
sum += contest['newRating'] - contest['oldRating']
return [cnt, sum]
except Exception as e:
print(str(e))
return [-1, -1]
if __name__ == "__main__":
while(True):
name=input("请输入要爬的ID:")
print(get_CF_ContestCount(name))
| Linzecong/LPOJ | CrawlingServer/CodeForceContestCounter.py | CodeForceContestCounter.py | py | 1,067 | python | en | code | 216 | github-code | 36 |
39871891033 | # -*- coding: UTF-8 -*-
'''
Created on 2022年1月19日
@author: automan
'''
class Language(object):
clanguage = {
'PV_01_01' : "通知方法设定",
'PV_01_02' : "通知 OFF/ON",
'PV_01_03' : "通知",
'PV_01_04' : "尚无配对的太阳能板。请先完成装置的配对。",
'PV_01_05' : "储存",
'PV_01_06' : "根据设定的时间每小时发送一次通知。",
'PV_01_07' : "目标量",
'PV_01_08' : "请输入1~9,999的数字。",
'PV_01_09' : "低于该值时通知",
'PV_01_10' : "开始时间",
'PV_01_11' : "结束时间",
'PV_01_12' : "完成",
'PV_01_13' : "瓩⋅时",
'PST_01_01' : "发电低下通知",
'PST_01_02' : "太阳能发电模组 %s,今日 %s ~ %s 发电量: %s kWh。低于 %s kWh 的 %s%。",
'PST_02_01' : "发电异常通知",
'PST_02_02' : "太阳能发电模组 %s,过去 %s 分钟内发电状况异常,请检查设备状态。",
'PST_03_01' : "群组发电异常通知",
'PST_03_02' : "群组 %s 中,过去 %s 分钟内有部分太阳能发电模组发电状况异常,请检查设备状态。"
}
elanguage = {
'PV_01_01' : 'PV Generation Alert',
'PV_01_02' : 'Notification OFF/ON',
'PV_01_03' : 'Notification',
'PV_01_04' : 'There are no paired solar panels yet. Please complete the pairing of the appliance first.',
'PV_01_05' : 'Save',
'PV_01_06' : 'Send notifications every hour based on a set time.',
'PV_01_07' : 'Goal',
'PV_01_08' : 'Please enter a number between 1~9,999 kWh.',
'PV_01_09' : 'Lower Bound',
'PV_01_10' : 'Starting Time',
'PV_01_11' : 'End Time',
'PV_01_12' : 'Done',
'PV_01_13' : 'kWh',
'PST_01_01' : 'Low power generation notification',
'PST_01_02' : "Today %s ~ %s power generation: %s kWh. Below %s% of %s kWh.",
'PST_02_01' : 'Abnormal power generation notification',
'PST_02_02' : 'Abnormal power generation in the past %s minutes, please check your equipments.',
'PST_03_01' : 'Abnormal group power generation notification',
'PST_03_02' : 'Abnormal power generation in the group, %s, in the past %s minutes, please check your equipments.'
}
jlanguage = {
'PV_01_01' : "通知方法设定",
'PV_01_02' : "通知 OFF/ON",
'PV_01_03' : "通知",
'PV_01_04' : "尚无配对的太阳能板。请先完成装置的配对。",
'PV_01_05' : "储存",
'PV_01_06' : "根据设定的时间每小时发送一次通知。",
'PV_01_07' : "目标量",
'PV_01_08' : "请输入1~9,999的数字。",
'PV_01_09' : "低于该值时通知",
'PV_01_10' : "开始时间",
'PV_01_11' : "结束时间",
'PV_01_12' : "完成",
'PV_01_13' : "瓩⋅时",
'PST_01_01' : "发电低下通知",
'PST_01_02' : "今日 %s ~ %s 发电量: %s kWh。低于 %s kWh 的 %s%。",
'PST_02_01' : "发电异常通知",
'PST_02_02' : "过去 %s 分钟内发电状况异常,请检查设备状态。",
'PST_03_01' : "群组发电异常通知",
'PST_03_02' : "群组 %s 中,过去 %s 分钟内有部分太阳能发电模组发电状况异常,请检查设备状态。"
}
def __init__(self):
'''
Constructor
'''
| panda109/tsfm | app/main/language.py | language.py | py | 3,797 | python | zh | code | 0 | github-code | 36 |
32443531446 | import imdb
import json
from tqdm import tqdm
ia = imdb.IMDb()
DATA_NEEDED = False
with open("./imdb/movie_title-id.json", "r") as data:
current_data = json.load(data)
with open('./movie_titles_list.json', "r") as movie_list:
movies = json.load(movie_list)
if DATA_NEEDED:
for i in tqdm(range(0, len(movies))):
# searching the name
name = movies[i]
try:
search = ia.search_movie(name)
# getting the id
id = search[0].movieID
current_data[name]=[id]
except Exception:
continue
for movie_name in current_data:
if len(current_data[movie_name]) > 0:
current_data[movie_name] = current_data[movie_name][0]
with open("./imdB/movie_title-id.json", "w") as data_list:
json.dump(current_data, data_list)
| Shreneken/movie-data-getter | imdb/imdb_id.py | imdb_id.py | py | 824 | python | en | code | 0 | github-code | 36 |
70489042024 | from unittest.mock import MagicMock
from uuid import uuid4
import pytest
from pytest import raises
from pydantic import ValidationError
from api.exceptions import InvalidParameterError
from api.schemas.output import (
ConsultaProcessoOutput,
ExtractDataOutput,
ExtractDataSecondInstanceOutput,
StatusSolicitacaoOutput
)
def test_valida_numero_solicitacao_valid():
"""Testa se a validação do UUID é bem-sucedida com um UUID válido."""
valid_uuid = str(uuid4())
ConsultaProcessoOutput(numero_solicitacao=valid_uuid)
@pytest.mark.parametrize("invalid_uuid", ["invalid_uuid_string"])
def test_valida_numero_solicitacao_invalid(invalid_uuid):
"""Testa se a validação do UUID falha com uma string inválida."""
with MagicMock(side_effect=InvalidParameterError("Mocked error")):
with raises(InvalidParameterError):
ConsultaProcessoOutput(numero_solicitacao=invalid_uuid)
def test_valid_extract_data():
"""Testa se o modelo é válido com dados corretos."""
valid_data = {
'classe': 'Penal',
'area': 'Criminal',
'assunto': 'Roubo',
'data_distribuicao': 'Sorteio',
'juiz': 'Dr. João Silva',
'valor_acao': '5000,00',
'partes_processo': [],
'lista_movimentacoes': []
}
ExtractDataOutput(**valid_data)
def test_valid_output():
"""Testa um output válido para ExtractDataSecondInstanceOutput."""
valid_data = {
"classe": "Recurso Penal",
"area": "Criminal",
"assunto": "Furto",
"partes_processo": [],
"lista_movimentacoes": []
}
output = ExtractDataSecondInstanceOutput(**valid_data)
assert output.classe == "Recurso Penal"
assert output.area == "Criminal"
def test_missing_required_field():
"""Testa a ausência de campos obrigatórios para ExtractDataSecondInstanceOutput."""
invalid_data = {
"classe": "Recurso Penal",
"assunto": "Furto",
"partes_processo": [],
"lista_movimentacoes": []
}
with raises(ValidationError):
ExtractDataSecondInstanceOutput(**invalid_data)
def test_valid_output_first_instance_only():
"""Testa um output válido para StatusSolicitacaoOutput com apenas dados da primeira instância."""
valid_data = {
"numero_processo": "0113546-72.2018.8.02.0001",
"sigla_tribunal": "TJAL",
"status": "Na Fila",
"first_instance": {
"classe": "Penal",
"area": "Criminal",
"assunto": "Roubo",
"data_distribuicao": "Sorteio",
"juiz": "Dr. João Silva",
"partes_processo": [],
"lista_movimentacoes": []
}
}
output = StatusSolicitacaoOutput(**valid_data)
assert output.numero_processo == "0113546-72.2018.8.02.0001"
assert output.first_instance.classe == "Penal"
def test_valid_output_both_instances():
"""Testa um output válido para StatusSolicitacaoOutput com dados de ambas as instâncias."""
valid_data = {
"numero_processo": "0113546-72.2018.8.02.0001",
"sigla_tribunal": "TJAL",
"status": "Na Fila",
"first_instance": ExtractDataOutput(
classe="Penal",
area="Criminal",
assunto="Roubo",
data_distribuicao="Sorteio",
juiz="Dr. João Silva",
valor_acao="5000,00",
partes_processo=[],
lista_movimentacoes=[]
),
"second_instance": ExtractDataSecondInstanceOutput(
classe="Recurso Penal",
area="Criminal",
assunto="Furto",
valor_acao="2500,55",
partes_processo=[],
lista_movimentacoes=[]
)
}
output = StatusSolicitacaoOutput(**valid_data)
assert output.numero_processo == "0113546-72.2018.8.02.0001"
assert output.sigla_tribunal == "TJAL"
assert output.status == "Na Fila"
assert isinstance(output.first_instance, ExtractDataOutput)
assert isinstance(output.second_instance, ExtractDataSecondInstanceOutput)
def test_invalid_output():
"""Testa um output inválido para StatusSolicitacaoOutput."""
invalid_data = {
"numero_processo": 123456 # Um número em vez de uma string
}
with raises(ValidationError):
StatusSolicitacaoOutput(**invalid_data)
| BrunoPisaneschi/JusBrasil | tests/unit/api/schemas/test_output.py | test_output.py | py | 4,369 | python | pt | code | 0 | github-code | 36 |
26094620258 | import google
import firebase_admin
from firebase_admin import credentials
from firebase_admin import firestore
import base64
keyPath = '../firestoreKEY.json'
def initializeDb(path):
cred = credentials.Certificate(path)
firebase_admin.initialize_app(cred)
db = firestore.client()
return db
def stringToImage(string, imagePath):
fh = open(imagePath, "wb")
fh.write(string.decode('base64'))
fh.close()
def getFieldFromDB(database, collectionName, docName, fieldName):
dbRef = database.collection(collectionName).document(docName)
try:
doc = dbRef.get()
str = doc.to_dict()[fieldName]
except google.cloud.exception.NotFound:
str = 'NO SUCH DOCUMENT'
return str
def getDocsFromCol(database, collectionName):
return database.collection(collectionName).get()
def main():
database = initializeDb(keyPath)
# str = getFieldFromDB(database, u'photos', u'04, 03:59AM on December 15, 2018', u'im')
strIngs = getDocsFromCol(database, u'photos')
for ims in strIngs:
docInfo = ims.to_dict()
stringToImage(docInfo[u'im'], "images/" + ims.id + ".jpg")
if __name__ == '__main__':
main()
| 16francej/firestoredemo | RecieveImage.py | RecieveImage.py | py | 1,190 | python | en | code | 0 | github-code | 36 |
73603683305 | from os.path import join
from django.shortcuts import render
from random import randint, randrange
from physics import physics_classes_functions as ucf
from physics import variety_lists as vl
def list_callable_functions():
"""returns list of all modules in this file
This function MUST remain in this file to work correctly!
Used by:
modulesList
previousNext
"""
entireModuleList = []
for key, value in globals().items():
if callable(value) and value.__module__ == __name__:
entireModuleList.append(key)
return entireModuleList
def modulesList():#this list is used by views to automatically generate views!
return ucf.moduleListGen(list_callable_functions(), 'd', 0, 1)
def module_path():
return '/physics/electricity/'
#selects a view function at random from moduleList generated list and returns everything needed to generate a view
def emfIntResSetup():
emf = randint(6,18)
intRes = randint(5,25)/10
resistor =randint(2,9)
totalRes = resistor + intRes
current = emf/(totalRes)
terminalPd = current*resistor
wastedPd = current*intRes
powerSupplied = current*emf
powerDelivered = current * current * resistor
powerWasted = current * current * intRes
current2 = randint(1,100)/10
terminalPd2 = current2*resistor
resistor2 = emf/current2 - intRes
return emf, intRes, resistor, totalRes, round(current, 3), round(terminalPd,3), round(wastedPd, 3), round(powerSupplied, 3), round(powerDelivered,3), round(powerWasted, 3), round(current2, 3), round(terminalPd2, 3), round(resistor2, 2)
def db_ca_resistance_current_lost_pxax8():
q = ucf.Question(ucf.currentFuncName())
q.previousQ, q.nextQ, q.currentQname, q.nextQname, q.previousQname = ucf.previousNext(list_callable_functions(), ucf.currentFuncName(), module_path(), ucf.currentFuncName()[0:2], 0, 2)
emf, intRes, resistor, totalRes, current, terminalPd, wastedPd, powerSupplied, powerDelivered, powerWasted, current2, terminalPd2, resistor2 = emfIntResSetup()
q.questionBase = [f"A battery of emf {emf}v and internal resistance of {intRes} \u03A9 is connected to a {resistor} \u03A9 resistor.","Calculate:"]
question1 = ["the total resistance of the circuit"]
question2 = ["the current through the battery"]
question3 = ["the lost pd"]
question4 = ["the pd across the cell terminals."]
marks1, marks2, marks3, marks4= 2, 2, 2, 2
answer1 = f"{totalRes} \u03A9"
answer2 = f"{current} A"
answer3 = f"{wastedPd} v"
answer4 = f"{terminalPd} v"
q.questionPartList = [
{'sub_number': 1, 'sub_question': question1, 'sub_answer': answer1, 'sub_mark': marks1},
{'sub_number': 2, 'sub_question': question2, 'sub_answer': answer2, 'sub_mark': marks2},
{'sub_number': 3, 'sub_question': question3, 'sub_answer': answer3, 'sub_mark': marks3},
{'sub_number': 4, 'sub_question': question4, 'sub_answer': answer4, 'sub_mark': marks4},
]
return q.returnAll()
def db_cb_resistance_and_power_lost_pd_pxax8():
q = ucf.Question(ucf.currentFuncName())
q.previousQ, q.nextQ, q.currentQname, q.nextQname, q.previousQname = ucf.previousNext(list_callable_functions(), ucf.currentFuncName(), module_path(), ucf.currentFuncName()[0:2], 0, 2)
emf, intRes, resistor, totalRes, current, terminalPd, wastedPd, powerSupplied, powerDelivered, powerWasted, current2, terminalPd2, resistor2 = emfIntResSetup()
q.questionBase = [f"A battery of emf {emf} v and internal resistance of {intRes} \u03A9 is connected to a {resistor} \u03A9 resistor.","Calculate:"]
question1 = ["the current"]
question2 = [")the terminal pd"]
question3 = [f"the power delivered to the {resistor} \u03A9 resistor"]
question4 = ["the power wasted in the cell."]
marks1, marks2, marks3, marks4= 2, 2, 2, 2
answer1 = f"{current} A"
answer2 = f"{terminalPd} v"
answer3 = f"{powerDelivered} w"
answer4 = f"{powerWasted} w"
q.questionPartList = [
{'sub_number': 1, 'sub_question': question1, 'sub_answer': answer1, 'sub_mark': marks1},
{'sub_number': 2, 'sub_question': question2, 'sub_answer': answer2, 'sub_mark': marks2},
{'sub_number': 3, 'sub_question': question3, 'sub_answer': answer3, 'sub_mark': marks3},
{'sub_number': 4, 'sub_question': question4, 'sub_answer': answer4, 'sub_mark': marks4},
]
return q.returnAll()
def db_cc_changing_current_impact_on_emf_pxax5():
q = ucf.Question(ucf.currentFuncName())
q.previousQ, q.nextQ, q.currentQname, q.nextQname, q.previousQname = ucf.previousNext(list_callable_functions(), ucf.currentFuncName(), module_path(), ucf.currentFuncName()[0:2], 0, 2)
emf, intRes, resistor, totalRes, current, terminalPd, wastedPd, powerSupplied, powerDelivered, powerWasted, current2, terminalPd2, resistor2 = emfIntResSetup()
q.questionBase = [f"The pd acrpss the terminals of a cell was {terminalPd} v when the current from the cell was {current} A, and {terminalPd2} v when the current was {current2} A.","Calculate:"]
question1 = ["the internal resistance of the cell"]
question2 = ["the cell's emf"]
marks1, marks2 = 2, 3
answer1 = f"{intRes} \u03A9"
answer2 = f"{emf} v"
q.questionPartList = [
{'sub_number': 1, 'sub_question': question1, 'sub_answer': answer1, 'sub_mark': marks1},
{'sub_number': 2, 'sub_question': question2, 'sub_answer': answer2, 'sub_mark': marks2},
]
return q.returnAll()
def db_cd_changing_current_effect_on_resistor_pxax5():
q = ucf.Question(ucf.currentFuncName())
q.previousQ, q.nextQ, q.currentQname, q.nextQname, q.previousQname = ucf.previousNext(list_callable_functions(), ucf.currentFuncName(), module_path(), ucf.currentFuncName()[0:2], 0, 2)
emf, intRes, resistor, totalRes, current, terminalPd, wastedPd, powerSupplied, powerDelivered, powerWasted, current2, terminalPd2, resistor2 = emfIntResSetup()
q.questionBase = [f"A battery of unknown emf and internal resistance is connected in series with an ammeter and a resistance box. The current was {current} A when the box was set at {resistor} \u03A9 and {current2} A at {resistor2} \u03A9.","Calculate:"]
question1 = ["the cell's emf"]
question2 = ["the cell's internal resistance"]
marks1, marks2 = 2, 3
answer1 = f"{emf} v"
answer2 = f"{intRes} \u03A9"
q.questionPartList = [
{'sub_number': 1, 'sub_question': question1, 'sub_answer': answer1, 'sub_mark': marks1},
{'sub_number': 2, 'sub_question': question2, 'sub_answer': answer2, 'sub_mark': marks2},
]
return q.returnAll()
| devjolt/eqg | physics/d_electricity/dbc_emf_and_internal_resistance.py | dbc_emf_and_internal_resistance.py | py | 6,642 | python | en | code | 0 | github-code | 36 |
29858390178 | import time
import pickle
from pathlib import Path
from os.path import splitext
import json
from tensorflow.keras.wrappers.scikit_learn import KerasClassifier
# ASReview dependencies
from asreview.review import ReviewSimulate, ReviewOracle, MinimalReview
from asreview.utils import text_to_features
from asreview.types import is_pickle
from asreview.config import AVAILABLE_CLI_MODI, AVAILABLE_REVIEW_CLASSES,\
DEFAULT_MODEL, DEFAULT_QUERY_STRATEGY, DEFAULT_BALANCE_STRATEGY,\
DEFAULT_N_INSTANCES, DEFAULT_N_PRIOR_INCLUDED, DEFAULT_N_PRIOR_EXCLUDED,\
DEMO_DATASETS, KERAS_MODELS
from asreview.models.embedding import download_embedding, EMBEDDING_EN
from asreview.models.embedding import load_embedding, sample_embedding
from asreview.utils import get_data_home
from asreview.query_strategies import get_query_strategy
from asreview.balance_strategies import get_balance_strategy
from asreview.logging import Logger
from asreview.settings import ASReviewSettings
from asreview.models import create_lstm_base_model, lstm_base_model_defaults
from asreview.models import create_lstm_pool_model, lstm_pool_model_defaults
from asreview.models import lstm_fit_defaults
from asreview.readers import ASReviewData
def get_reviewer(dataset,
mode='oracle',
model=DEFAULT_MODEL,
query_strategy=DEFAULT_QUERY_STRATEGY,
balance_strategy=DEFAULT_BALANCE_STRATEGY,
n_instances=DEFAULT_N_INSTANCES,
n_queries=1,
embedding_fp=None,
verbose=1,
prior_included=None,
prior_excluded=None,
n_prior_included=DEFAULT_N_PRIOR_INCLUDED,
n_prior_excluded=DEFAULT_N_PRIOR_EXCLUDED,
config_file=None,
src_log_fp=None,
**kwargs
):
# Find the URL of the datasets if the dataset is an example dataset.
if dataset in DEMO_DATASETS.keys():
dataset = DEMO_DATASETS[dataset]
if src_log_fp is not None:
logger = Logger(log_fp=src_log_fp)
settings = logger.settings
else:
logger = None
settings = ASReviewSettings(model=model, n_instances=n_instances,
n_queries=n_queries,
n_prior_included=n_prior_included,
n_prior_excluded=n_prior_excluded,
query_strategy=query_strategy,
balance_strategy=balance_strategy,
mode=mode, data_fp=dataset
)
settings.from_file(config_file)
model = settings.model
if model in ["lstm_base", "lstm_pool"]:
base_model = "RNN"
else:
base_model = "other"
# Check if mode is valid
if mode in AVAILABLE_REVIEW_CLASSES:
if verbose:
print(f"Start review in '{mode}' mode.")
else:
raise ValueError(f"Unknown mode '{mode}'.")
print(f"Model: '{model}'")
# if the provided file is a pickle file
if is_pickle(dataset):
with open(dataset, 'rb') as f:
data_obj = pickle.load(f)
if isinstance(data_obj, tuple) and len(data_obj) == 3:
X, y, embedding_matrix = data_obj
elif isinstance(data_obj, tuple) and len(data_obj) == 4:
X, y, embedding_matrix, _ = data_obj
else:
raise ValueError("Incorrect pickle object.")
else:
as_data = ASReviewData.from_file(dataset)
_, texts, labels = as_data.get_data()
# get the model
if base_model == "RNN":
if embedding_fp is None:
embedding_fp = Path(
get_data_home(),
EMBEDDING_EN["name"]
).expanduser()
if not embedding_fp.exists():
print("Warning: will start to download large "
"embedding file in 10 seconds.")
time.sleep(10)
download_embedding(verbose=verbose)
# create features and labels
X, word_index = text_to_features(texts)
y = labels
embedding = load_embedding(embedding_fp, word_index=word_index)
embedding_matrix = sample_embedding(embedding, word_index)
elif model.lower() in ['nb', 'svc', 'svm']:
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
text_clf = Pipeline([('vect', CountVectorizer()),
('tfidf', TfidfTransformer())])
X = text_clf.fit_transform(texts)
y = labels
settings.fit_kwargs = {}
settings.query_kwargs = {}
if base_model == 'RNN':
if model == "lstm_base":
model_kwargs = lstm_base_model_defaults(settings, verbose)
create_lstm_model = create_lstm_base_model
elif model == "lstm_pool":
model_kwargs = lstm_pool_model_defaults(settings, verbose)
create_lstm_model = create_lstm_pool_model
else:
raise ValueError(f"Unknown model {model}")
settings.fit_kwargs = lstm_fit_defaults(settings, verbose)
settings.query_kwargs['verbose'] = verbose
# create the model
model = KerasClassifier(
create_lstm_model(embedding_matrix=embedding_matrix,
**model_kwargs),
verbose=verbose
)
elif model.lower() in ['nb']:
from asreview.models import create_nb_model
model = create_nb_model()
elif model.lower() in ['svm', 'svc']:
from asreview.models import create_svc_model
model = create_svc_model()
else:
raise ValueError('Model not found.')
# Pick query strategy
query_fn, query_str = get_query_strategy(settings)
if verbose:
print(f"Query strategy: {query_str}")
train_data_fn, train_method = get_balance_strategy(settings)
if verbose:
print(f"Using {train_method} method to obtain training data.")
# Initialize the review class.
if mode == "simulate":
reviewer = ReviewSimulate(
X, y,
model=model,
query_strategy=query_fn,
train_data_fn=train_data_fn,
n_instances=settings.n_instances,
n_queries=settings.n_queries,
verbose=verbose,
prior_included=prior_included,
prior_excluded=prior_excluded,
n_prior_included=settings.n_prior_included,
n_prior_excluded=settings.n_prior_excluded,
fit_kwargs=settings.fit_kwargs,
balance_kwargs=settings.balance_kwargs,
query_kwargs=settings.query_kwargs,
logger=logger,
**kwargs)
elif mode == "oracle":
reviewer = ReviewOracle(
X,
model=model,
query_strategy=query_fn,
as_data=as_data,
train_data_fn=train_data_fn,
n_instances=settings.n_instances,
n_queries=settings.n_queries,
verbose=verbose,
prior_included=prior_included,
prior_excluded=prior_excluded,
fit_kwargs=settings.fit_kwargs,
balance_kwargs=settings.balance_kwargs,
query_kwargs=settings.query_kwargs,
logger=logger,
**kwargs)
elif mode == "minimal":
reviewer = MinimalReview(
X,
model=model,
query_strategy=query_fn,
train_data_fn=train_data_fn,
n_instances=settings.n_instances,
n_queries=settings.n_queries,
verbose=verbose,
prior_included=prior_included,
prior_excluded=prior_excluded,
fit_kwargs=settings.fit_kwargs,
balance_kwargs=settings.balance_kwargs,
query_kwargs=settings.query_kwargs,
logger=logger,
**kwargs)
else:
raise ValueError("Error finding mode, should never come here...")
reviewer._logger.add_settings(settings)
return reviewer
def review(*args, mode="simulate", model=DEFAULT_MODEL, save_model_fp=None,
**kwargs):
if mode not in AVAILABLE_CLI_MODI:
raise ValueError(f"Unknown mode '{mode}'.")
reviewer = get_reviewer(*args, model=model, **kwargs)
# Wrap in try expect to capture keyboard interrupt
try:
# Start the review process.
reviewer.review()
except KeyboardInterrupt:
print('\nClosing down the automated systematic review.')
# If we're dealing with a keras model, we can save the last model weights.
if save_model_fp is not None and model in KERAS_MODELS:
save_model_h5_fp = splitext(save_model_fp)[0]+".h5"
json_model = model.model.to_json()
with open(save_model_fp, "w") as f:
json.dump(json_model, f, indent=2)
model.model.save_weights(save_model_h5_fp, overwrite=True)
if not reviewer.log_file:
print(reviewer._logger._print_logs())
def review_oracle(dataset, **kwargs):
"""CLI to the interactive mode."""
review(dataset, mode='oracle', **kwargs)
def review_simulate(dataset, **kwargs):
"""CLI to the oracle mode."""
review(dataset, mode='simulate', **kwargs)
| syuanuvt/automated-systematic-review | asreview/review/factory.py | factory.py | py | 9,549 | python | en | code | null | github-code | 36 |
6432536009 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import apps.users.models
class Migration(migrations.Migration):
dependencies = [
('users', '0005_auto_20180802_1614'),
]
operations = [
migrations.AlterModelManagers(
name='user',
managers=[
('objects', apps.users.models.MyUserManager()),
],
),
]
| eashme/Django-backend | Hello_Server/apps/users/migrations/0006_auto_20180802_1723.py | 0006_auto_20180802_1723.py | py | 449 | python | en | code | 1 | github-code | 36 |
9148386910 | #coding=utf-8
"""
这是一个关于QQ模拟(QListView的使用)的例子--模型定义!
文章链接:http://www.xdbcb8.com/archives/701.html
"""
import random
import Random_Name
from PyQt5.QtCore import QAbstractListModel, Qt, QModelIndex, QVariant, QSize
from PyQt5.QtGui import QIcon, QFont
class ListModel(QAbstractListModel):
'''
自定义模型
'''
def __init__(self):
'''
一些初始设置
'''
super().__init__()
self.ListItemData = []
# 存储每个QQ用户的列表
self.Data_init()
def data(self, index, role):
'''
子类化QAbstractListModel必须要实现的函数,主要作用就是返回index所引用项目的给定role下存储的数据。
'''
if index.isValid() or (0 <= index.row() < len(self.ListItemData)):
if role == Qt.DisplayRole:
return QVariant(self.ListItemData[index.row()]['name'])
# 文本形式呈现数据
elif role == Qt.DecorationRole:
return QVariant(QIcon(self.ListItemData[index.row()]['iconPath']))
# 以图标形式呈现装饰数据
elif role == Qt.SizeHintRole:
return QVariant(QSize(70, 80))
# 视图项目大小
elif role == Qt.TextAlignmentRole:
return QVariant(int(Qt.AlignHCenter|Qt.AlignVCenter))
# 文本对齐方式
elif role == Qt.FontRole:
font = QFont()
font.setPixelSize(20)
return QVariant(font)
# 字体设置
return QVariant()
# 非上述情况,返回为空,记住这里是QVariant()
def rowCount(self, parent = QModelIndex()):
'''
返回行数,在这里就是数据列表的大小。
'''
return len(self.ListItemData)
def Data_init(self):
'''
数据初始化
'''
randomnum = random.sample(range(26), 10)
# 从0-25个数字中随机的抽取10个不重复的数字组成一个列表
for i in randomnum:
randname = Random_Name.getname()
ItemData = {'name':'', 'iconPath':''}
ItemData['name'] = randname
ItemData['iconPath'] = "./res/"+ str(i) + ".jpg"
# 遍历这个列表randomnum,其中联系人的姓名我是随机生成的,随机的生成图标的路径;把姓名和图标路径添加到字典当中。
self.ListItemData.append(ItemData)
# append到数据列表里面。
def addItem(self, itemData):
'''
新增的操作实现
'''
if itemData:
self.beginInsertRows(QModelIndex(), len(self.ListItemData), len(self.ListItemData) + 1)
self.ListItemData.append(itemData)
self.endInsertRows()
# 结束行插入操作
def deleteItem(self, index):
'''
指定索引的数据从数据列表中删除
'''
del self.ListItemData[index]
def getItem(self, index):
'''
获得相应的项目数据
'''
if index > -1 and index < len(self.ListItemData):
return self.ListItemData[index]
| redmorningcn/PyQT5Example | PyQt5All/PyQt535、36、37/ListModel.py | ListModel.py | py | 3,281 | python | zh | code | 1 | github-code | 36 |
17929378111 | #!/usr/bin/python3
import multiprocessing
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider, Button, RadioButtons
import math
import time
def points():
import sys
for line in open("input.txt"):
line = line.replace("position=", "").replace("velocity=", "").replace(' ', '').replace('<', '').strip()
a = line.split('>')
yield [tuple([int(y) for y in x.split(',')]) for x in a[0:2]]
def produce_state(PP, PV, T):
ret = []
for p in zip(PP, PV):
v = p[1]
p = p[0]
ret.append([p[0] + v[0] * T, p[1] + v[1] * T])
return ret
def render(PP, T):
plt.scatter([p[0] for p in PP], [p[1] for p in PP])
plt.show(block=True)
def BB(PP):
sx = 0
sy = 0
ex = 0
ey = 0
for p in PP:
if p[0] < sx: sx = p[0]
if p[0] > ex: ex = p[0]
if p[1] < sy: sy = p[1]
if p[1] > ey: ey = p[1]
return sx, sy, ex, ey
P = [point for point in points()]
# calc BB
PP = [point[0] for point in P]
PV = [point[1] for point in P]
T = 0
Emin = 0
Tmin = -1
step = 1
def update(T):
S = produce_state(PP, PV, T)
sx, sy, ex, ey = BB(S)
#render(S, T)
return (math.sqrt((sx - ex) ** 2 + (sy - ey) ** 2))
fuck = 256000
scale = 100
start = int(10518.9 * scale)
end = int((1 + 10519) * scale)
#for T in range(start, end, 1):
for i in range(1):
T = 10519
#T = T / scale
S = produce_state(PP, PV, T)
sx, sy, ex, ey = BB(S)
cunt = (math.sqrt((sx - ex) ** 2 + (sy - ey) ** 2))
print(T)
#if fuck< cunt:
# print(T)
# render(S, T)
# break
render(S, T)
fuck = cunt
| Easimer/advent-of-code-2018 | day10/day10.py | day10.py | py | 1,643 | python | en | code | 0 | github-code | 36 |
74329290664 | # import pytest
from schemaql.helpers.fileio import read_yaml
from schemaql.connectors.snowflake import SnowflakeConnector
from schemaql.connectors.bigquery import BigQueryConnector
class TestConnections(object):
def _get_connection(self, connection_name):
connections_file = "connections.yml"
connections = read_yaml(connections_file)
connection_info = connections[connection_name]
connection_type = connection_info["type"]
supported_connectors = {
"snowflake": SnowflakeConnector,
"bigquery": BigQueryConnector,
}
connector = supported_connectors[connection_type](connection_info)
return connector
def test_bigquery_connection(self):
conn = self._get_connection("tpch-snowflake")
assert conn is not None
cur = conn.engine.connect()
assert cur is not None
def test_snowflake_connection(self):
conn = self._get_connection("calogica-bq")
assert conn is not None
cur = conn.engine.connect()
assert cur is not None
| clausherther/schemaql-core | test/integration/test_connections.py | test_connections.py | py | 1,089 | python | en | code | 0 | github-code | 36 |
25759790506 | import os
from datetime import datetime
dir = os.path.dirname(os.getcwd()+'/users/')
print(dir)
try:
os.stat(dir)
except:
os.mkdir(dir)
print('make directory')
all_users = {}
for f in os.listdir(os.getcwd()):
if f.endswith(".csv"):
split = str(f).split('_')
code = split[1]
print('[{}] Processing {} - {}'.format(datetime.now(), code, f))
users = {}
with open(f, 'r', encoding='utf8') as fr:
for line in fr:
split = line.split(',')
if len(split) < 2:
continue
try:
uid = int(split[1])
except:
continue
# This file users
f_uid = users.get(uid)
if f_uid is None:
users[uid] = 1
else:
users[uid] = f_uid + 1
# All users
f_all = all_users .get(uid)
if f_all is None:
all_users[uid] = 1
else:
all_users[uid] = f_all + 1
with open(dir + '/' + str(code) + '.csv', 'w') as fw:
for uid, f_uid in users.items():
fw.write('{},{}\n'.format(uid,f_uid))
users.clear()
print('[{}] Processing all files'.format(datetime.now()))
with open(dir + '/#ALL.csv', 'w') as fw_all:
for uid, f_all in all_users.items():
fw_all.write('{},{}\n'.format(uid,f_all))
print('[{}] Program finished'.format(datetime.now())) | gunarto90/twitter-stream | iterate user id.py | iterate user id.py | py | 1,564 | python | en | code | 1 | github-code | 36 |
18233237825 | class Define:
def __init__(self, traffic_demand: int, holding_time: int, total_traffic: int,
max_route: int, avg_repaired_time: int, node_size: int, shape: float, scale: float):
self.traffic_demand = traffic_demand
self.holding_time = holding_time
self.total_traffic = total_traffic
self.max_route = max_route
self.avg_repaired_time = avg_repaired_time
self.node_size = node_size
self.shape = shape
self.scale = scale
| shugonta/simulatorV2 | define.py | define.py | py | 503 | python | en | code | 0 | github-code | 36 |
21628945974 | # VoiceOver language choices
VOICEOVER_LANGUAGE_CHOICES = [
("en", "English"),
("hi", "Hindi"),
("as", "Assamese"),
("bn", "Bengali"),
("brx", "Bodo"),
("doi", "Dogri"),
("gu", "Gujarati"),
("kn", "Kannada"),
("ks", "Kashmiri"),
("gom", "Konkani"),
("mai", "Maithili"),
("ml", "Malayalam"),
("mr", "Marathi"),
("mni", "Manipuri"),
("ne", "Nepali"),
("or", "Oriya"),
("pa", "Punjabi"),
("sa", "Sanskrit"),
("sat", "Santali"),
("sd", "Sindhi"),
("si", "Sinhala"),
("ta", "Tamil"),
("te", "Telugu"),
("ur", "Urdu"),
]
VOICEOVER_SUPPORTED_LANGUAGES = {
"English": "en",
"Assamese": "as",
"Bengali": "bn",
"Bodo": "brx",
"Gujarati": "gu",
"Hindi": "hi",
"Kannada": "kn",
"Malayalam": "ml",
"Manipuri": "mni",
"Marathi": "mr",
"Odia": "or",
"Punjabi": "pa",
"Tamil": "ta",
"Telugu": "te",
}
| AI4Bharat/Chitralekha-Backend | backend/voiceover/metadata.py | metadata.py | py | 982 | python | en | code | 18 | github-code | 36 |
28492416311 | from django.shortcuts import render, HttpResponse
from django.conf import settings
from rest_framework.decorators import api_view
from rest_framework.response import Response
import random
from .models import Planet
from .serializers import PlanetSerializer
from api.serializers import GenericSerializer
from api.views import BaseRandomView, BaseIdView
from api.utils import validate_request, set_options_response
MODEL = Planet
@api_view(['GET', 'OPTIONS'])
def index(request):
if request.method == 'OPTIONS':
return set_options_response()
result = validate_request(request)
if 'error' in result:
return Response({"error": result['error']}, status=result['status'], headers=settings.CORS_HEADERS)
name = request.GET.get('name', None)
affiliation = request.GET.get('affiliation', None)
region = request.GET.get('region', None)
page = request.GET.get('page', 0)
planets_set = Planet.objects.all().order_by('id')
if name:
planets_set = planets_set.filter(name__icontains=name)
if affiliation:
planets_set = planets_set.filter(info__affiliation__icontains=affiliation)
if region:
planets_set = planets_set.filter(info__region__icontains=region)
if page:
try:
page = int(page)
except:
page = 1;
start = settings.RESOURCE_LIMIT*(page-1)
end = settings.RESOURCE_LIMIT*(page-1)+settings.RESOURCE_LIMIT
planets_set = planets_set[start:end]
else:
planets_set = planets_set[0:settings.RESOURCE_LIMIT]
serializer = PlanetSerializer(planets_set, many=True)
# If nothing matches queries
if not serializer.data:
return Response({"error": settings.MSG_404}, status=404, headers=settings.CORS_HEADERS)
return Response(serializer.data, headers=settings.CORS_HEADERS)
class RandomPlanetView(BaseRandomView):
model = MODEL
class PlanetIdView(BaseIdView):
model = MODEL | yitchee/The-Clone-Wars-API | api/planets/views.py | views.py | py | 1,971 | python | en | code | 0 | github-code | 36 |
17453307389 | import os
import csv
import random
import pandas as pd
def trainval_split(root, suffix, save_dir):
content = []
num = 0
suf_root = os.path.join(root, suffix)
for slide in os.listdir(suf_root):
slide_dir = os.path.join(suf_root, slide)
for subslide in os.listdir(slide_dir):
subslide_dir = slide + '/' + subslide
subset = random.randint(0, 4)
content.append(dict(subslide=subslide_dir, subset=subset))
num += 1
random.shuffle(content)
df = pd.DataFrame(content, columns=['subslide', 'subset'])
df.to_csv(os.path.join(save_dir, 'trainval.csv'), index=False)
print('Number of training patches: {}'.format(num))
print('Finished!!')
def make_trainval_mini(csv_dir, save_dir, num):
df = pd.read_csv(csv_dir)
df = list(df.itertuples(index=False))
random.shuffle(df)
new_df = df[:num]
gf = pd.DataFrame(new_df, columns=['subslide', 'subset'])
gf.to_csv(os.path.join(save_dir, 'trainval_mini.csv'), index=False)
if __name__ == '__main__':
suffix = 'train'
root = '/media/ldy/e5a10f4e-18fd-4656-80d8-055bc4078655/OSCC_sf/subslide/'
save_dir = '/media/ldy/e5a10f4e-18fd-4656-80d8-055bc4078655/OSCC_sf/'
csv_dir = '/media/ldy/e5a10f4e-18fd-4656-80d8-055bc4078655/OSCC_sf/trainval.csv'
# trainval_split(root, suffix, save_dir)
make_trainval_mini(csv_dir, save_dir, num=120) | yida2311/OSCC_SF | dataset/data_split.py | data_split.py | py | 1,418 | python | en | code | 0 | github-code | 36 |
70617927783 | # Speech Brain Viewer app
# to accompany Hamilton, Oganian, Hall, and Chang, Cell 2021
# https://doi.org/10.1016/j.cell.2021.07.019
#
# Viewer created by Liberty Hamilton, 2021
# Email liberty.hamilton@austin.utexas.edu with questions
#
import scipy.io
import numpy as np
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_daq as daq
from dash.dependencies import Input, Output, State, ClientsideFunction
import plotly.express as px
import pandas as pd
from plotly.subplots import make_subplots
import plotly.graph_objs as go
import time
import os
from flask_caching import Cache
from dash.exceptions import PreventUpdate
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
suppress_callback_exceptions=True
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.title='Speech Brain Viewer'
server = app.server
cache = Cache(app.server, config={
'CACHE_TYPE': 'redis',
'CACHE_REDIS_URL': os.environ.get('REDIS_URL', '')
})
timeout = 300
styles = {
'pre': {
'border': 'thin lightgrey solid',
'overflowX': 'scroll'
}
}
full_strf = scipy.io.loadmat('full_strf.mat')['strf']
spect_strf = scipy.io.loadmat('spect_strf.mat')['strf']
onset_strf = scipy.io.loadmat('onset_strf.mat')['strf']
peakrate_strf = scipy.io.loadmat('peakrate_strf.mat')['peakrate_strf']
phnfeat_strf = scipy.io.loadmat('phnfeat_strf.mat')['strf']
rel_strf = scipy.io.loadmat('rel_strf.mat')['strf']
elecs = scipy.io.loadmat('elecmatrix.mat')['elecmatrix']
vcorrs1 = scipy.io.loadmat('vcorrs.mat')['vcorrs']
vcorrs = scipy.io.loadmat('uvar.mat')['uvar']
vcorrs = np.hstack((vcorrs, vcorrs1))
trivert = scipy.io.loadmat('lh_pial_trivert.mat')
v = trivert['vert']
t = trivert['tri']
temporal_trivert = scipy.io.loadmat('cvs_avg_inMNI152_lh_temporal_pial.mat')
tv = temporal_trivert['vert']
tt = temporal_trivert['tri']
curv = scipy.io.loadmat('cvs_curv.mat')['curv']
anatomy = scipy.io.loadmat('elecmatrix.mat')['anatomy']
anum = np.array([a[0]-1 for a in anatomy])
elecs[anum>=5,0] = elecs[anum>=5,0]-1
anames = scipy.io.loadmat('elecmatrix.mat')['new7AreaNames']
anames2 = [a[0] for a in anames[0]]
anat_labels = [anames2[a[0]-1] for a in anatomy]
clr = scipy.io.loadmat('elecmatrix.mat')['area7Cols']
clrs = [clr[a[0]-1,:].tolist() for a in anatomy]
# We have a small number in the right hem that were projected to the medial wall, lets remove
rm_elecs = np.intersect1d(np.where(elecs[:,1]<-20)[0], np.where(elecs[:,2]<-20)[0])
elec_no = np.arange(elecs.shape[0])
elecs_mask = np.ones((elecs.shape[0],), dtype=bool)
elecs_mask[rm_elecs] = False
elec_no = elec_no[elecs_mask]
elecs = elecs[elecs_mask,:]
vcorrs = vcorrs[elecs_mask,:]
full_strf = full_strf[elecs_mask,:,:]
onset_strf = onset_strf[elecs_mask,:,:]
spect_strf = spect_strf[elecs_mask,:,:]
peakrate_strf = peakrate_strf[elecs_mask,:]
phnfeat_strf = phnfeat_strf[elecs_mask,:,:]
rel_strf = rel_strf[elecs_mask,:,:]
anum = anum[elecs_mask]
anat_labels = [anat_labels[a] for a in elec_no]
clrs = [clrs[a] for a in elec_no]
#stim_effects = pd.read_excel(io='/Users/jsh3653/Dropbox/Heschls_STRFs/data/stim/HG_stim_summary.xlsx',
# sheet_name='unique_for_manuscript')
stim_effects = pd.read_excel(io='stim_results.xlsx', sheet_name='Sheet1')
stim_df = pd.DataFrame(
data={'elec_number': np.arange(len(stim_effects)),
'x': stim_effects['x'],
'y': stim_effects['y'],
'z': stim_effects['z'],
'anatomy': stim_effects['anatomy'],
'effect': stim_effects['effect'],
'passive_effect': stim_effects['passive_effect'],
'repetition_effect': stim_effects['repetition_effect']},
)
def create_figure(dropdownData='RF', elec_marker='vcorrs',
show_rest_of_brain=True, corr_type=20):
'''
Create the brain figure and modify the electrode
colors based on dropdown menus. The frontal lobe
will be shown or not depending on the value of the
show_rest_of_brain switch.
'''
if dropdownData=='RF':
chosen_elecs = np.arange(elecs.shape[0])
df = pd.DataFrame(
data={'elec_number': chosen_elecs,
'x': elecs[chosen_elecs,0],
'y': elecs[chosen_elecs,1],
'z': elecs[chosen_elecs,2],
'anatomy': [anat_labels[a] for a in chosen_elecs],
'anatomy_num': [anum[a] for a in chosen_elecs],
'vcorrs': vcorrs[chosen_elecs,corr_type]},
)
else:
df = stim_df
if elec_marker == 'anatomy_num':
marker = dict(color=clrs,
size=6)
elif elec_marker == 'vcorrs':
marker = dict(color=df['vcorrs'],
colorscale='RdBu_r',
cmin=-df['vcorrs'].max(),
cmax=df['vcorrs'].max(),
size=6, colorbar=dict(title='Corr.', thickness=20))
elif elec_marker == 'stim_eff':
marker = dict(color=df['effect'],
colorscale='RdBu_r',
cmin=1,
cmax=3,
size=6, colorbar=dict(title='Effect', thickness=20))
fig = go.Figure(
data = [go.Mesh3d(
x=tv[:, 0],
y=tv[:, 1],
z=tv[:, 2],
i=tt[:, 0],
j=tt[:, 1],
k=tt[:, 2],
colorbar=None,
showscale=False,
color='rgb(200,200,200)',
name='temporal lobe',
opacity=0.6,
lighting=dict(ambient=0.9, diffuse=0.9),
intensity=curv,
colorscale=[[0, 'white'],
[0.5, 'gray'],
[1, 'black']]
),
])
if show_rest_of_brain:
fig.add_trace(
go.Mesh3d(
x=v[:, 0],
y=v[:, 1],
z=v[:, 2],
i=t[:, 0],
j=t[:, 1],
k=t[:, 2],
colorbar=None,
showscale=False,
color='rgb(200,200,200)',
name='brain',
text=None,
opacity=0.2,
lighting=dict(ambient=0.9, diffuse=0.9),
intensity=curv,
colorscale=[[0, 'white'],
[0.5, 'gray'],
[1, 'black']]
)
)
fig.add_trace(
go.Scatter3d(
x=df['x'],
y=df['y'],
z=df['z'],
ids=df['elec_number'],
mode='markers',
name='electrode',
text=df['anatomy'],
marker=marker,
),
)
camera = dict(
up=dict(x=0, y=0, z=1),
center=dict(x=0, y=0, z=0),
eye=dict(x=-1.25, y=0.1, z=0.13),
)
fig.update_layout(clickmode='event+select',
scene=dict(
xaxis=dict(showticklabels=False, showgrid=False, title='L-R'),
yaxis=dict(showticklabels=False, showgrid=False, title='A-P'),
zaxis=dict(showticklabels=False, showgrid=False, title='D-V'),
),
scene_camera=camera,
height=int(500),
)
fig.update_scenes(xaxis_showbackground=False,
yaxis_showbackground=False,
zaxis_showbackground=False,
xaxis_showaxeslabels=False,
yaxis_showaxeslabels=False,
zaxis_showaxeslabels=False,)
return fig
def create_rf(elec_num=310, corr_type=12):
'''
This creates the receptive field heat map plot for
the model of interest (based on `corr_type` number).
For reference, those corr numbers are:
Unique Onset: 0
Unique Peak rate: 1
Unique Features: 2
Unique Abs Pitch: 3
Unique Rel Pitch: 4
Full phonological+pitch: 12,
Spectrogram: 20
'''
if elec_num is None:
elec_num = 310
title = 'Please select an electrode...'
strf = np.zeros((spect_strf.shape[1], spect_strf.shape[2]))
yticks = []
yticklabels = []
ticksize = 12
ylabel = ''
autorange = True
else:
if (corr_type == 20) or (corr_type == 12):
title = 'Electrode %d, r=%2.2f'%(elec_num, vcorrs[elec_num,corr_type])
else:
title = 'Electrode %d, unique r^2=%2.2f'%(elec_num, vcorrs[elec_num,corr_type])
if corr_type == 20:
strf = np.fliplr(spect_strf[elec_num,:,:])
yticks = [11, 43, 79]
yticklabels = [0.5, 2, 8]
ticksize = 12
ylabel = 'Frequency (kHz)'
autorange = True
elif corr_type == 0: # onset
strf = np.fliplr(onset_strf[elec_num,:,:])
ticksize = 12
yticks = [strf.min(), 0, strf.max()]
ylabel = 'Onset weight (A.U.)'
yticklabels = [np.round(strf.min()*100)/100., 0, np.round(strf.max()*100)/100.]
autorange = True
elif corr_type == 1: # peakrate
strf = peakrate_strf[elec_num,:][::-1]
ticksize = 12
yticks = [strf.min(), 0, strf.max()]
ylabel = 'Peak rate weight (A.U.)'
yticklabels = [np.round(strf.min()*100)/100., 0, np.round(strf.max()*100)/100.]
autorange = True
elif corr_type == 2:
strf = np.fliplr(phnfeat_strf[elec_num,:,:])
yticks = np.arange(phnfeat_strf.shape[1])
yticklabels = ['sonorant','obstruent','voiced',
'nasal','syllabic','fricative','plosive',
'back','low','front','high','labial',
'coronal','dorsal']
ticksize = 6
ylabel = ''
autorange = 'reversed'
elif corr_type == 3: # abs pitch
strf = np.fliplr(full_strf[elec_num,15:25,:])
#yticks = [0,1,15,25,35]
#yticklabels = ['on','ph','ab','rl','dr']
yticks = [0,9]
yticklabels = [90, 250]
ticksize = 12
ylabel = 'Abs. Pitch (Hz)'
autorange = True
elif corr_type == 4:
strf = np.fliplr(rel_strf[elec_num,:,:])
yticks = [0, 4.5, 9, 10, 14.5, 19]#np.arange(rel_strf.shape[1])
yticklabels = [-1.9, 0, 1.9, -0.4, 0, 0.3]
ticksize = 12
ylabel = 'Rel. Pitch + ∆Rel. Pitch'
autorange = True
else:
strf = np.fliplr(full_strf[elec_num,:,:])
reorder = [0,strf.shape[0]-1]+list(np.arange(1,full_strf.shape[1]-1))
print(strf.shape)
print(reorder)
strf = strf[reorder,:]
#yticks = [0,1,15,25,35]
#yticklabels = ['on','ph','ab','rl','dr']
yticks = np.arange(full_strf.shape[1])
yticklabels = ['onset','peakRate','sonorant','obstruent','voiced',
'nasal','syllabic','fricative','plosive',
'back','low','front','high','labial',
'coronal','dorsal','abs. pitch','','','',
'','','','','','','rel. pitch','','','',
'','','','','','','∆rel. pitch','','','',
'','','','','','']
ticksize = 6
ylabel = ''
autorange = 'reversed'
smax = np.abs(strf.max())
if smax==0:
smax = 1
if corr_type == 0:
smax = 0.1
if corr_type > 1:
fig = go.Figure(data = [
go.Heatmap(
x=np.linspace(-0.6,0,60),
z=strf,
zmin=-smax,
zmax=smax,
colorscale='RdBu_r',
colorbar=dict(title='Beta<br>weight<br>(A.U.)',
tickvals=[-smax,0,smax],
ticktext=['-max','0','max']),
)
]
)
else:
fig = go.Figure(data = [
go.Scatter(
x=np.linspace(-0.6,0,60),
y=strf.ravel(),
mode='lines',
)
]
)
if corr_type != 20:
if corr_type == 12:
fig.add_hline(y=0.5, line_width=1, line_color='black', line_dash='dash')
fig.add_hline(y=1.5, line_width=1, line_color='black', line_dash='dash')
fig.add_hline(y=15.5, line_width=1, line_color='black', line_dash='dash')
fig.add_hline(y=25.5, line_width=1, line_color='black', line_dash='dash')
fig.add_hline(y=35.5, line_width=1, line_color='black', line_dash='dash')
if corr_type == 4:
fig.add_hline(y=9.5, line_width=1, line_color='black', line_dash='dash')
else:
fig.add_hline(y=11, line_width=1, line_color='black', line_dash='dash')
fig.add_hline(y=43, line_width=1, line_color='black', line_dash='dash')
fig.add_hline(y=79, line_width=1, line_color='black', line_dash='dash')
fig.update_layout(
title={'text': title,
'x': 0.5, 'xanchor': 'center', 'yanchor': 'top'},
xaxis={'title': 'Time (s)'},
yaxis={'title': ylabel,
'tickmode': 'array',
'tickvals': yticks,
'ticktext': yticklabels, 'showgrid': False,
'autorange': autorange,
'tickfont_size': ticksize,
'automargin': False,
}
)
return fig
fig = create_figure()
rf_fig = create_rf()
#fig = px.scatter(df, x="x", y="y", color="fruit", custom_data=["customdata"])
#fig.update_traces(selector=dict(name='electrode'), marker=dict(color='mediumblue', size=20), row=1, col=1)
rf_markdown = dcc.Markdown('''
Click on an electrode on the brain to see its corresponding receptive field or stimulation result on the right.
**Brain Controls:**
* Zoom in and out of the brain by scrolling
* Rotate the brain by clicking and dragging
Note that the nonlinear warping of electrodes sometimes means the electrodes will seem farther forward
or back than expected. The anatomical name that shows on hover is taken from the original (native space)
brain data. Electrodes have been projected to the nearest surface vertex for ease of clicking. For the most
accurate visualization, please see [our paper](https://doi.org/10.1016/j.cell.2021.07.019).
Brain viewer created by Liberty Hamilton 2021 using [Dash and Plotly for python](https://dash.plotly.com/).
Contact liberty.hamilton@austin.utexas.edu with any questions.
''')
# This creates the initial app in its first instantiation. This will be
# modified by user behaviors (clicking, changing menu items, etc.)
app.layout = html.Div([
html.Div([
dcc.Markdown('''
### Parallel and distributed speech encoding across human auditory cortex ###
*Citation*: [Hamilton, Oganian, Hall, and Chang. _Cell_ 2021](https://doi.org/10.1016/j.cell.2021.07.019)
This is an interactive tool to accompany our paper showing receptive fields across
multiple sub-fields of auditory cortex. Select from the Dropdown menu below to
explore receptive field findings and stimulation findings. Works best on desktop computers, tablet/mobile does not include all features. [Video Tutorial.](https://www.youtube.com/watch?v=Q0zulm4ciRI&ab_channel=LibertyHamilton)
'''),
]),
html.Div([
html.Div([
daq.BooleanSwitch(
id='show-brain',
on=True,
label="Whole brain",
labelPosition="top",
),
], className='three columns',
style={'background-color': 'lightgrey', 'padding': '10px',
'float': 'left'}),
html.Div([
html.Label('Color electrodes by:'),
dcc.RadioItems(
id='radio-color',
options=[
{'label': 'Anatomy', 'value': 'anatomy_num'},
{'label': 'Correlation', 'value': 'vcorrs'},
],
value='vcorrs'
)], className='three columns',
style={'background-color': 'lightgrey', 'padding': '10px'}, id='color-electrodes-div'),
html.Div([
html.Label('Correlation type:'),
dcc.Dropdown(
id='corr-type-dropdown',
options=[
{'label': 'Spectrogram', 'value': '20'},
{'label': 'Full phonological+pitch', 'value': '12'},
{'label': 'Unique Onset', 'value': '0'},
{'label': 'Unique Peak rate', 'value': '1'},
{'label': 'Unique Features', 'value': '2'},
{'label': 'Unique Absolute Pitch', 'value': '3'},
{'label': 'Unique Relative Pitch', 'value': '4'},
],
# options=[
# {'label': 'Onset', 'value': '0'},
# {'label': 'Full', 'value': '6'},
# {'label': 'Relative pitch', 'value': '12'},
# {'label': 'Spectrogram', 'value': '14'},
# ],
value='20'
)], className='three columns', id='corr-type-div',
style={'background-color': 'lightgrey',
'padding': '10px', 'display': 'inline-block'}),
html.Div([
html.Label('Choose results to explore:'),
dcc.Dropdown(
id='rf-stim-dropdown',
options=[
{'label': 'Receptive Fields', 'value': 'RF'},
{'label': 'Stimulation', 'value': 'ST'},
],
value='RF'
)], className='three columns',
style={'background-color': 'lightgrey',
'padding': '10px', 'display': 'inline-block',
'float': 'right'}),
],
style={'background-color': 'lightgrey', 'display': 'inline-block', 'width': '100%'}
),
html.Div([
dcc.Loading(
dcc.Graph(
id='brain-fig',
figure=fig,
),
type='circle',
),
],
style={'width': '70%', 'display': 'inline-block', 'height': '70%'}),
html.Div([
html.Div([
dcc.Graph(
id='rf',
figure=rf_fig,
),
],
id="rf_div",
style={'width': '100%', 'display': 'inline-block', 'vertical-align': 'top'},
),
html.Div([
html.H4('Stimulation effects'),
html.P('Click on an electrode to see effects of stimulation on passive \
listening and on speech perception. We recommend you turn off\
the "whole brain" switch at the top left to show the temporal lobe only.'),
html.P('Effect types: ', style={'font-weight': 'bold'}),
html.P('1 (blue): sound hallucination + no problems perceiving speech',
style={'background-color': '#0c2350', 'padding': '10px', 'color': '#ffffff'}),
html.P('2 (white): no sound hallucination + problems perceiving speech',
style={'background-color': '#f1f2f2', 'padding': '10px', 'color': '#000000'}),
html.P('3 (red): Complex response',
style={'background-color': '#73001c', 'padding': '10px', 'color': '#ffffff'}),
html.H5('', id='stim_desc'),
html.H5('', id='repet_effect')
],
id="stim_div",
style={'width': '100%', 'display': 'none', 'vertical-align': 'middle'},
)
],
id="rf_or_stim_div",
style={'width': '30%', 'display': 'inline-block', 'vertical-align': 'top'}),
html.Div([
rf_markdown,
],
style={'background-color': 'lightgrey', 'padding': '10px'}),
],
style={'max-width': '1200px'},
)
# This callback will create the receptive field figure
# based on the correlation type you choose and what you
# have clicked on the brain figure
@app.callback(
[Output('rf', 'figure'),
Output('stim_desc', 'children'),
Output('repet_effect', 'children'),
Output('corr-type-div', 'style'),
Output('color-electrodes-div', 'style')],
[Input('brain-fig', 'clickData'),
Input('corr-type-dropdown', 'value'),
Input('rf-stim-dropdown', 'value')])
def update_rf(clickData, corr_val, rf_value):
ctx = dash.callback_context
prop_id = ctx.triggered[0]['prop_id'].split('.')[0]
try:
elec_num = clickData['points'][0]['id']
except:
elec_num = None
if rf_value == 'RF':
rf_updated = create_rf(elec_num=elec_num, corr_type=int(corr_val))
stim_updated = ''
rep_updated = ''
corr_div_style={'background-color': 'lightgrey',
'padding': '10px', 'display': 'inline-block'}
color_elec_style={'background-color': 'lightgrey', 'padding': '10px',
'display': 'inline-block'}
else:
corr_div_style={'background-color': 'lightgrey',
'padding': '10px', 'display': 'none'}
color_elec_style={'background-color': 'lightgrey', 'padding': '10px',
'display': 'none'}
if (prop_id == 'rf-stim-dropdown') or (prop_id=='corr-type-dropdown'):
elec_num = 0
rf_updated = create_rf(elec_num=elec_num, corr_type=int(corr_val))
stim_updated = ''
rep_updated = ''
else:
passive_description = stim_df['passive_effect'][elec_num]
repet_description = stim_df['repetition_effect'][elec_num]
rf_updated = create_rf(elec_num=elec_num, corr_type=int(corr_val))
stim_updated = 'Passive: ' + passive_description
rep_updated = 'Repetition: ' + repet_description
return rf_updated, stim_updated, rep_updated, corr_div_style, color_elec_style
# This callback will change the brain figure to show
# either receptive field data or stimulation data
# based on the dropdown values. It will also change
# the correlation type that is shown if in "RF" mode
@app.callback(
[Output('brain-fig', 'figure'),
Output('show-brain', 'label'),
Output('rf_div', 'style'),
Output('stim_div', 'style'),],
[Input('rf-stim-dropdown', 'value'),
Input('radio-color', 'value'),
Input('show-brain', 'on'),
Input('corr-type-dropdown', 'value')])
# @cache.memoize(timeout=timeout) # in seconds, cache the data
def display_click_data(rf_value, radio_value, brain_value, corr_val):
ctx = dash.callback_context
prop_id = ctx.triggered[0]['prop_id'].split('.')[0]
value = ctx.triggered[0]['value']
if rf_value == 'ST':
# Override elec_marker type
el_marker = 'stim_eff'
stim_style = {'width': '100%', 'display': 'inline-block', 'vertical-align': 'middle'}
rf_style = {'width': '100%', 'display': 'none', 'vertical-align': 'top'}
else:
el_marker = radio_value
stim_style = {'width': '100%', 'display': 'none', 'vertical-align': 'middle'}
rf_style = {'width': '100%', 'display': 'inline-block', 'vertical-align': 'top'}
fig = create_figure(dropdownData=rf_value, elec_marker=el_marker,
show_rest_of_brain=brain_value, corr_type=int(corr_val))
if brain_value:
show_brain = "Whole brain"
else:
show_brain = "Temporal lobe only"
# if rf_value=='RF':
# rf_stim_update = dcc.Loading(dcc.Graph(id='rf', figure=rf_fig))
# else:
# rf_stim_update = ... #markdown for stim
return fig, show_brain, rf_style, stim_style
if __name__ == '__main__':
#app.run_server(processes=6)
app.run_server(debug=True, host='127.0.0.1')
| libertyh/SpeechCortex | app.py | app.py | py | 24,899 | python | en | code | 2 | github-code | 36 |
18924218055 | from selenium import webdriver
from selenium.webdriver.common.by import By
import time
class SwitchToWindow():
def test(self):
baseUrl = "https://letskodeit.teachable.com/pages/practice"
driver = webdriver.Firefox()
driver.maximize_window()
driver.get(baseUrl)
# Find parent handle -> Main Window
parentHandle = driver.current_window_handle
print("Parent Handle: " + parentHandle)
# Find open window button and click it
driver.find_element(By.ID, "openwindow").click()
time.sleep(2)
# Find all handles, there should two handles after clicking open window button
handles = driver.window_handles
# Switch to window and search course
for handle in handles:
print("Handle: " + handle)
ff = SwitchToWindow()
ff.test() | PacktPublishing/-Selenium-WebDriver-With-Python-3.x---Novice-To-Ninja-v- | CODES/S23 - Selenium WebDriver -_ Switch Window And IFrames/1_switch-to-window.py | 1_switch-to-window.py | py | 847 | python | en | code | 11 | github-code | 36 |
26673906311 | import glob
import os
import shutil
import os.path
from os import path
# Constants
CE_DIR = "ce"
CE_FILTER_DIR = "ce_filter"
def cleanup(directory):
if path.exists(directory):
shutil.rmtree(directory)
os.mkdir(directory)
def main():
cleanup(CE_FILTER_DIR)
ce_files = sorted(glob.glob(f"{CE_DIR}/*.ce"))
for file in ce_files:
out_file = file.replace(f"{CE_DIR}/", f"{CE_FILTER_DIR}/")
os.system(f"grep -i 'com\.axa\.trx\.' {file} > {out_file}")
return
if __name__ == '__main__':
main()
| nyfd/m118 | ce-filter.py | ce-filter.py | py | 547 | python | en | code | 0 | github-code | 36 |
10495517316 | from django.test import TestCase, RequestFactory
from djlotrek.request_utils import get_host_url
class RequestUtilsTestCase(TestCase):
def test_get_host_url(self):
"""
get_host_url function retrieve request object and
return host url when request object is not None
"""
request_factory = RequestFactory()
request = request_factory.get("/path")
request.META["HTTP_HOST"] = "localhost"
host_url = get_host_url(request)
self.assertEqual(host_url, "http://localhost")
def test_get_host_url_no_request(self):
"""
get_host_url function retrieve request object and
return None when request object is None
"""
host_url = get_host_url(None)
self.assertEqual(host_url, None)
| lotrekagency/djlotrek | tests/test_request_utils.py | test_request_utils.py | py | 797 | python | en | code | 7 | github-code | 36 |
14716003738 | from sqlalchemy import create_engine, text, MetaData, Table, Column, Integer, String, select
engine = create_engine('postgresql://postgres:1@localhost/news_db')
meta = MetaData()
students = Table(
'students', meta,
Column('id', Integer, primary_key=True),
Column('first_name', String),
Column('last_name', String))
# meta.create_all(engine)
conn = engine.connect()
st = students.alias()
s = st.select().where(st.c.id > 2)
result = conn.execute(s).fetchall()
print(result)
| devabsaitov/self_study | sqlalchemy_lesson/Basic/7_using_aliases.py | 7_using_aliases.py | py | 494 | python | en | code | 0 | github-code | 36 |
38251884357 | from django.db import models
all_pages = []
# Just switched to keeping it in-memory. No real need for a model here.
# class Page(models.Model):
# top = models.TextField(blank=True, null=True)
# middle_link = models.TextField(blank=True, null=True)
# middle_html = models.TextField(blank=True, null=True)
# bottom = models.TextField(blank=True, null=True)
# year = models.CharField(max_length=200, blank=True, null=True)
# order = models.IntegerField(default=0)
# active = models.BooleanField(default=True)
# def __unicode__(self, *args, **kwargs):
# return self.top
class BaseModel(models.Model):
updated_at = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
def __unicode__(self):
return "%s" % self.name
class SimplePage:
def __init__(self, top="", middle_link="", middle_links=None, bottom="", year=""):
self.top = top
self.middle_link = middle_link # list of dicts - url, text
self.middle_links = middle_links
self.bottom = bottom
self.year = year
def __unicode__(self, *args, **kwargs):
return self.top
def add_page(*args, **kwargs):
globals()["all_pages"].append(SimplePage(*args, **kwargs))
add_page(
top="is enough",
bottom="what does love mean to you?",
middle_link="http://isenough.com",
year="2012 (in-progress)",
)
add_page(
top="encore",
bottom="a digital poem",
middle_link="http://www.encorepoem.com",
year="2012 (in-progress)",
)
add_page(
top="slow art",
bottom="the anti-museum.<br/>\r\nin portland, or",
middle_link="http://slowartpdx.com",
year="2011+",
)
# add_page(
# top="dear text messages,",
# bottom="spoken word",
# middle_link="",
# year="2012",
# )
add_page(
top="togetheralone",
bottom="an experiment in community",
middle_link="http://togetheralone.org",
year="2012 (in-progress)",
)
add_page(
top="the digital executioner",
bottom="viral web idea generator",
middle_link="http://www.thedigitalexecutioner.com",
year="2012",
)
add_page(
top="goodcloud",
bottom="helping small nonprofits succeed",
middle_link="https://www.agoodcloud.com",
year="2011",
)
add_page(
top="github",
bottom="where I keep the bits and bytes",
middle_link = "https://www.github.com/skoczen",
# middle_links=[
# {
# "url": "https://www.github.com/skoczen",
# "text": "Personal",
# "class": ""
# },
# {
# "url": "https://www.github.com/GoodCloud",
# "text": "GoodCloud",
# "class": ""
# }
# ],
year="2009+",
)
add_page(
top="sixlinks",
bottom="sustainability you can actually do",
middle_link="http://www.sixlinks.org",
year="2008+",
)
add_page(
top="the facebooks",
bottom="yep, I'm on there",
middle_link = "https://www.facebook.com/skoczen",
# middle_links=[
# {
# "url": "https://www.facebook.com/skoczen",
# "text": "f",
# "class": "facebook"
# },
# {
# "url": "https://twitter.com/#!/skoczen",
# "text": "t",
# "class": "twitter"
# },
# {
# "url": "https://plus.google.com/101690366177319310091/",
# "text": "g+",
# "class": "google_plus"
# }
# ],
year="2007+",
)
# add_page(
# top="Write Around Portland",
# bottom="I'm proud to be a volunteer and donor for this amazing organization.",
# middle_link="http://www.writearound.org",
# year="2009+",
# )
add_page(
top="30 people, 30 minutes",
bottom="an epic way to turn 30",
middle_link="http://www.30people30minutes.com",
year="1999+",
)
add_page(
top="quantum imagery",
bottom="ye olde sole proprietorship",
middle_link="http://www.quantumimagery.com",
year="1999+",
)
add_page(
top="photoblog",
bottom="ye olde photos",
middle_link="http://www.skoczen.net/photos",
year="2005",
)
add_page(
top="but i'm hungry!",
bottom="ye olde recipe and restaurant site",
middle_link="http://skoczen.net/food/",
year="1999+",
)
add_page(
top="liquid silver zen",
bottom="early experiments in design",
middle_link="http://liquidsilverzen.net/",
year="2002",
)
add_page(
top="or, just google",
bottom="It's all me. Except for the Ohio seatbelt ticket. That's the other Steven Skoczen. (Really.)",
middle_link="https://www.google.com/?q=Steven%20Skoczen",
year="1997+",
)
# add_page(
# top="birth",
# bottom="there was no internet then",
# middle_links=[
# {
# "url": "",
# "text": "whoa",
# "class": ""
# },
# ],
# year="1980",
# )
| skoczen/skoczen | project/apps/resume/models.py | models.py | py | 4,868 | python | en | code | 1 | github-code | 36 |
8729618404 | #This script will format all images in a given directory to the same size and to gray scale using OpenCV
from scipy import ndimage, misc
import numpy as np
import cv2 as cv
from os import listdir
#Iterate throuh the training image directory and sub folders
img_directory= "/home/paok/Documents/FaceRecognition/trainImages"
#create a list with file names in image directory
#Iterate through folder in img_directory
subjects= listdir(img_directory)
#Iterate through file name list and add to csv file
for i in range(len(subjects)):
pictures = listdir("/home/paok/Documents/FaceRecognition/trainImages/"+subjects[i])
for pic in pictures:
filtered = np.zeros((256, 256, 1), dtype = "uint8")
path = "/home/paok/Documents/FaceRecognition/trainImages/"+subjects[i]+"/"+pic
#print(path)
#Open image in direcotry
image = cv.imread(path)
#Convert it to gray scale
image = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
#Saturate image
image = 1.2*image
#Format as float32
img = np.array(image, dtype=np.float32)
#Filter
filtered = cv.bilateralFilter(img,2,15,15)
#Resize to 200x200 pixels
filtered = cv.resize(filtered,(200,200))
wrt = np.array(filtered, dtype="uint8")
write = cv.equalizeHist(wrt)
cv.imwrite(path, write)
| PAOK-2001/FaceRecognition | Trainer_auxfiles/imageFormater.py | imageFormater.py | py | 1,359 | python | en | code | 0 | github-code | 36 |
15316070011 | from rng import RNG
from exceptions import InvalidValue
import time
#Blum Blum Shub Generator
class BBS(RNG):
def __init__(self, seed = int(time.time()), moduo = 429497053):
RNG.__init__(self, seed)
if(seed<0):
raise InvalidValue("seed")
if(moduo<=0):
raise InvalidValue("moduo")
self.__seed = seed
self.__moduo = moduo
def setSeed(self, seed):
if(0 <= seed < self.__moduo):
self.__seed = seed
else:
raise InvalidValue('seed')
def getSeed(self):
return self.__seed
def setModuo(self, moduo):
if(moduo>0):
self.__moduo = moduo
else:
raise InvalidValue('moduo')
def getModuo(self):
return self.__moduo
def __getParityBit(self, number):
parity = False
while(number):
c = number & 1
number = number >> 1
parity = not parity if c==1 else parity
return 1 if parity else 0
def __computeNumber(self):
newNumber = 0
#32 iteration to get 32 bits
for i in range(32):
self.__seed = (self.__seed ** 2) % self.__moduo
bit = self.__getParityBit(self.__seed)
newNumber = (newNumber<<1) | bit
return newNumber
def random(self, size = None):
if(size == None):
return self.__randomOne()
else:
return self.__randomArray(size)
def __randomOne(self):
return self.__computeNumber()/4294967296
def __randomArray(self, size):
array = []
for i in range(size):
array.append(self.__randomOne())
return array
| MihailoTim/Pseudo-Random-Number-Generators | code/bbs.py | bbs.py | py | 1,723 | python | en | code | 0 | github-code | 36 |
21841983047 | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 6 16:57:05 2016
A file that contains steppers based on Scovel's method at various orders.
Details of the mathematical background an some example usages can be found in
Chapters 4 and 6 of Simulating Hamiltonian Dynamics.
@author: rpoolman
"""
import numpy as np
import NumericalAlgorithms.derivatives as deriv
class _ScovelsMethod:
"""
The second order Scovel method that may be concatenated to create higher
order methods. This is a base class and should not generally be used.
Instead use the Composition class and set it two the order you wish to use.
The details of this method are described in L + R section 4.5.2, pp. 94.
"""
def __init__(self, func, M, b, Dt = None, useNumericalDerivatives = False):
"""
Parameters
func - Either the potential function or its first spatial
derivative, depending on whether useNumericalDerivatices is
False or True. Must take a one-dimensional three element
vector represent the spatial coordinates at which at which
the function will evaluate.
M - The particle mass.
b - A three element vector used to define the force acting on the
system.
Dt - The timestep size, which defaults to none. This allows
adpative methods to applied. If a value is provided then
step size is fixed.
useNumericalDerivatives: If True the first spatial derivative of
the potential function is calculated by a
finite difference method. If False then
func must be an analytically derived
first derivative of the potential
function. Defaults to False.
"""
self.m_func = func
self.m_M = M
if np.shape(b) != (3,):
raise ValueError("_ScovelsMethod.__init__: Vector b must be a three-vector.")
else:
self.m_B = np.zeros((3,3))
self.m_B[1, 0] = b[2]
self.m_B[2, 0] = -b[1]
self.m_B[0, 1] = -b[2]
self.m_B[2, 1] = b[0]
self.m_B[0, 2] = b[1]
self.m_B[1, 2] = -b[0]
self.b_norm = np.sqrt(b[0]**2 + b[1]**2 + b[2]**2)
if Dt is None:
self.m_F = self._F
self.m_expBt = self._expBt
else:
self.m_Dt = Dt
self.m_F = 1.0/self.m_M*(Dt*np.diag(np.ones(3)) + \
(1.0 - np.cos(self.b_norm*Dt))/self.b_norm**2.0*self.m_B - \
(np.sin(self.b_norm*Dt) - self.b_norm*Dt)/self.b_norm**3.0*np.dot(self.m_B.T, self.m_B))
self.m_expBt = np.diag(np.ones(3)) + \
np.sin(self.b_norm*self.m_Dt)/self.b_norm*self.m_B - \
2*(np.sin(self.b_norm*self.m_Dt/2)/self.b_norm)**2*np.dot(self.m_B.T, self.m_B)
if Dt is None and useNumericalDerivatives:
self.m_step = self._numericScovelAdaptive
elif Dt is None and not useNumericalDerivatives:
self.m_step = self._analyticScovelAdaptive
elif Dt is not None and useNumericalDerivatives:
self.m_step = self._numericScovel
elif Dt is not None and not useNumericalDerivatives:
self.m_step = self._analyticScovel
def _expBt(self, t):
"""
Rodrigues' formula, used to calculate the expontential of a 3x3 matrix.
Parameters:
t - The time at which the solution is to be calculated.
Returns:
A 3x3 matrix exp(Bt) where t is the time and B the matrix.
"""
return np.diag(np.ones(3)) + \
np.sin(self.b_norm*t)/self.b_norm*self.m_B - \
2*(np.sin(self.b_norm*t/2)/self.b_norm)**2*np.dot(self.m_B.T, self.m_B)
def _F(self, t):
"""
Matrix equation that forms part of the linear system solution described
in L + R section 4.5.2, p. 95.
Parameters:
t - The time at which the solution is to be calculated.
Returns:
A 3x3 matrix that is used to calculate the coordinate at the next
time step.
"""
return 1.0/self.m_M*(t*np.diag(np.ones(3)) + \
(1.0 - np.cos(self.b_norm*t))/self.b_norm**2.0*self.m_B - \
(np.sin(self.b_norm*t) - self.b_norm*t)/self.b_norm**3.0*np.dot(self.m_B.T, self.m_B))
def _numericScovelAdaptive(self, Dt, qn, pn):
"""
A Scovel method time step with sptial derivatives of the potenial
calculated with finite differences.
Parameters:
Dt - The time step size.
qn - The coordinate in three dimensions at the start of the step.
pn - The momentum in three dimensions at the start of the step.
Returns:
The coordinate and momentum after the step.
"""
pHalf = pn - Dt/2*deriv.grad(self.m_func, qn, Dt*pn)
qn1 = qn + np.dot(self.m_F(Dt), pHalf)
pn1 = np.dot(self.m_expBt(Dt), pHalf) - \
Dt/2*deriv.grad(self.m_func, qn1, Dt*pn)
return qn1, pn1
def _analyticScovelAdaptive(self, Dt, qn, pn):
"""
A Scovel method time step with sptial derivatives of the potenial
calculated from a pre-derived analytical function. This offers
superior performance
Parameters:
Dt - The time step size.
qn - The coordinate in three dimensions at the start of the step.
pn - The momentum in three dimensions at the start of the step.
Returns:
The coordinate and momentum after the step.
"""
pHalf = pn - Dt/2*self.m_func(qn)
qn1 = qn + np.dot(self.m_F(Dt), pHalf)
pn1 = np.dot(self.m_expBt(Dt), pHalf) - Dt/2*self.m_func(qn1)
return qn1, pn1
def _numericScovel(self, qn, pn):
"""
A Scovel method time step with sptial derivatives of the potenial
calculated with finite differences.
Parameters:
qn - The coordinate in three dimensions at the start of the step.
pn - The momentum in three dimensions at the start of the step.
Returns:
The coordinate and momentum after the step.
"""
pHalf = pn - self.m_Dt/2*deriv.grad(self.m_func, qn, self.m_Dt*pn)
qn1 = qn + np.dot(self.m_F, pHalf)
pn1 = np.dot(self.m_expBt, pHalf) - \
self.m_Dt/2*deriv.grad(self.m_func, qn1, self.m_Dt*pn)
return qn1, pn1
def _analyticScovel(self, qn, pn):
"""
A Scovel method time step with sptial derivatives of the potenial
calculated from a pre-derived analytical function. This offers
superior performance.
Parameters:
qn - The coordinate in three dimensions at the start of the step.
pn - The momentum in three dimensions at the start of the step.
Returns:
The coordinate and momentum after the step.
"""
pHalf = pn - self.m_Dt/2*self.m_func(qn)
qn1 = qn + np.dot(self.m_F, pHalf)
pn1 = np.dot(self.m_expBt, pHalf) - self.m_Dt/2*self.m_func(qn1)
return qn1, pn1
def workPerUnitTime(self):
"""
The work per unit time is defined as W = N*S/T, where N is the number
of time steps, S is the number of stages and T is the period. See
L + R, section 6.6, pp. 165 for details. I have remove the T
dependency with N = T/Dt leading to S/Dt. For the Scovel method there
is only one stage so W = 1/Dt
Return:
A value for the work per unit time as calculated by S/Dt.
"""
return 1/self.m_Dt
class Composition(_ScovelsMethod):
"""
A dervied class to create compositions of Scovels 2nd order method by
repeated application across a weighted time step. The scheme is described
in L + R, section 6.2.2 on pp. 147.
"""
def __init__(self, func, M, b, w, Dt = None,
useNumericalDerivatives = False):
"""
Parameters:
func - Either the potential function or its first spatial
derivative, depending on whether useNumericalDerivatices is
False or True. Must take a one-dimensional three element
vector represent the spatial coordinates at which at which
the function will evaluate.
M - The mass of the particle to be simulated.
b - A three element vector used to define the force acting on the
system.
w - The first half of array of weights to be applied to the
time-step size. Note the final array must be symmeteric and
have an odd number of elements. The second half is the mirror
of the first and the central element is the sum of hte first
half.
Dt - The timestep size, which defaults to none. This allows
adpative methods to applied. If a value is provided then
step size is fixed.
useNumericalDerivatives - If True the first spatial derivative of
the potential function is calculated by a
finite difference method. If False then
func must be an analytically derived
first derivative of the potential
function. Defaults to False.
"""
# initialse the child class
self.m_w = np.zeros(len(w)*2 + 1)
self.m_w[0 : np.int(len(w))] = w
self.m_w[np.int(len(w)) + 1:] = w[::-1]
self.m_w[np.int(len(w))] = 1 - 2*np.sum(w)
self.m_Dt = Dt
self.m_stages = len(self.m_w)
# initailise the base class
super(Composition, self).__init__(func, M, b, None,
useNumericalDerivatives)
def step(self, qn, pn):
qn1 = qn
pn1 = pn
for w in self.m_w:
qn1, pn1 = self.m_step(w*self.m_Dt, qn1, pn1)
return qn1, pn1
def workPerUnitTime(self):
"""
The work per unit time is defined as W = N*S/T, where N is the number
of time steps, S is the number of stages and T is the period. See
L + R, section 6.6, pp. 165 for details. I have remove the T
dependency with N = T/Dt leading to S/Dt.
Return:
A value for the work per unit time as calculated by S/Dt.
"""
return self.m_stages/self.m_Dt
class Processing(Composition):
"""
A derived class to create a post-processed composition method as described
in L + R section 6.2.3, pp. 148. The idea here is that a composition
method is applied to z after it has undergone a transformation. When the
output is required the composition method is then applied to the results.
This allows a higher order method to be used at a smaller computational
price. This class Sovel's method is the transform.
"""
def __init__(self, func, M, b, w, c, Dt,
useNumericalDerivatives = False):
"""
Parameters:
func - Either the potential function or its first spatial
derivative, depending on whether useNumericalDerivatices is
False or True. Must take a one-dimensional three element
vector represent the spatial coordinates at which at which
the function will evaluate.
M - The mass of the particle to be simulated.
b - A three element vector used to define the force acting on the
system.
w - The first half of array of weights to be applied to the
time-step size. Note the final array must be symmeteric and
have an odd number of elements. The second half is the mirror
of the first and the central element is the sum of hte first
half.
c - The coefficients applied to the coordinate transform.
Dt - The timestep size, which defaults to none. This allows
adpative methods to applied. If a value is provided then
step size is fixed.
useNumericalDerivatives - If True the first spatial derivative of
the potential function is calculated by a
finite difference method. If False then
func must be an analytically derived
first derivative of the potential
function. Defaults to False.
"""
# initialise the base class
super().__init__(func, M, b, w, None, useNumericalDerivatives)
# need to store step size
self.m_Dt = Dt
# initailise the transform coefficients
self.m_c = np.zeros(len(c) + 1)
self.m_c[1:len(c) + 1] = c
self.m_c[0] = -np.sum(c)
def applyTransform(self, qn, pn):
"""
Applies the transform to the coordinates q and momentum p, after which
they may be integrated.
Parameters:
qn - The coordinate to which the transform is applied.
pn - The momentum o which the transform is applied.
Returns:
The transformed coordinates and momentum to which integration may
now occur.
"""
qn_hat = qn
pn_hat = pn
for c in self.m_c:
qn_hat, pn_hat = self.m_step(c*self.m_Dt, qn_hat, pn_hat)
for c in self.m_c:
qn_hat, pn_hat = self.m_step(-c*self.m_Dt, qn_hat, pn_hat)
return qn_hat, pn_hat
def applyInverseTransform(self, qn_hat, pn_hat):
"""
Applies the inverse transform to the coordinates q and momentum p,
after which they are suitable for further analysis.
Parameters:
qn_hat - The transformed coordinate to which the inverse transform
is applied.
pn_hat - The transformed momentum o which the inverse transform
is applied.
Returns:
The coordinates and momentum in the original frame which can now
be further analysed.
"""
qn = qn_hat
pn = pn_hat
for c in self.m_c[::-1]:
qn_hat, pn_hat = self.m_step(c*self.m_Dt, qn_hat, pn_hat)
for c in self.m_c[::-1]:
qn_hat, pn_hat = self.m_step(-c*self.m_Dt, qn_hat, pn_hat)
return qn, pn
def integrate(self, qn0, pn0, T):
"""
A function to calcualte the trajectory of a particle up to the time
t = T and return the position and mometum of that particle at the first
value of t > T.
Parameters:
qn0 - The initial coordinates
qn1 - The initial momentum.
T - The time up to which the integration occurs.
Returns:
The coordinates and momentum of the particle at time T.
"""
# transform qn0 and pn0 to qn0_hat and pn0_hat
qn_hat, pn_hat = self.applyTransform(qn0, pn0)
# integrate up to time t = T
N = np.int(np.ceil(T/self.m_Dt))
for ii in range(N):
qn_hat, pn_hat = self.step(qn_hat, pn_hat)
# inverse transform the results back to oringal frame of reference
return self.applyInverseTransform(qn_hat, pn_hat) | Rhys314/Simulating_Hamiltonian_Dynamics | Steppers/scovel.py | scovel.py | py | 16,278 | python | en | code | 0 | github-code | 36 |
25578129495 | class Solution:
def removeKdigits(self, num: str, k: int) -> str:
#if k is equal to our larger than the length of num, return '0'
if len(num) <= k:
return '0'
for i in range(k):
j = 1
while j < len(num):
#greedy algorithm. Remove the first number when the number on the right
#is lower than the number on the left. This will leave us with the lowest
#posible number for each for loop
if num[j-1] > num[j]:
num = num[:j-1] + num[j:]
break
#if we've gotten to the end and only seen decreasing numbers, remove the last digit
elif j == len(num)-1:
num = num[:-1]
else:
j += 1
if num == '0':
return num
else:
#remove leading zeroes before returning
k = 0
while k < len(num) and num[k] == '0':
k += 1
num = num[k:]
#if the string contained only '0's, num will be an empty string at this point
return '0' if not num else num
| korynewton/code-challenges | leetcode/RemoveKDigits/solution.py | solution.py | py | 1,281 | python | en | code | 0 | github-code | 36 |
9140513276 | class bebida:
Agua = True
Gas = "w"
Sabor = "x"
Colorantes = "y"
Temperatura = "z"
def __init__ (self,a,b,c,d):
self.Gas = a
self.Sabor = b
self.Colorantes = c
self.Temperatura = d
cocacola = bebida("Con gas", "Sabor cola", "con colorantes", "Fria")
tematcha = bebida("Sin gas", "Sabor matcha", "sin colorantes", "Caliente")
agua = bebida("Sin gas", "Sin sabor", "Sin color", "Fria")
print(cocacola.Gas, cocacola.Sabor, cocacola.Colorantes, cocacola.Temperatura)
print(tematcha.Gas, tematcha.Sabor, tematcha.Colorantes, tematcha.Temperatura)
print(agua.Gas, agua.Sabor, agua.Colorantes, agua.Temperatura)
| Briusx/Inicializador | inicializador bebidas.py | inicializador bebidas.py | py | 696 | python | en | code | 0 | github-code | 36 |
30161042149 | a = str(input('Digit your full name: ')).upper().strip()
b = a.split()
count = 0
for i in b:
if i == "SILVA":
print('Your name contains "SILVA" ')
count += 1
if count != 0:
break
if count == 0:
print('Your name does not contain "SILVA" ')
| RodolfoCRS/Python | 025 - Looking for a string inside another.py | 025 - Looking for a string inside another.py | py | 283 | python | en | code | 0 | github-code | 36 |
34619647140 | import datetime
import json
import random
import requests
import numpy as np
import pandas as pd
from pkg_resources import resource_filename
class DataWrangler:
def __init__(self, wallet, start, end):
self.wallet = wallet
self.start = start
self.end = end
self.profit = None
self.summary = None
self.dataviz_data = None
def get_summary(self):
df = pd.DataFrame(self.wallet)
df = df[df['effectiveDate'] == self.end].copy(deep=True)
df['final_pct_of_total'] = round(
(df['outcome'] / sum(df['outcome'])) * 100, 2)
df.drop(columns=['no', 'mid', 'effectiveDate'], inplace=True)
df.set_index('currency', inplace=True)
self.profit = round(sum(df['profit']), 2)
self.summary = df
def get_dataviz_data(self):
df = pd.DataFrame(self.wallet)
df.rename(columns={'effectiveDate': 'date'}, inplace=True)
df = df.pivot(index='currency', columns='date', values='pct_change')
self.dataviz_data = df
class RandomCurrencies:
def __init__(self, num):
self.num = num
self._codes = set(load_currencies().keys())
@property
def num(self):
return self._num
@num.setter
def num(self, value):
if not 0 < value <= 35:
raise ValueError('Number of currencies must be between 1 and 35')
self._num = value
def _country_codes(self):
"""Generate random country codes"""
return random.sample(self._codes, self.num)
def _pct_values(self):
"""Generate random pct values that add up to 100"""
nums = np.random.random(self.num)
nums = [round(n / sum(nums) * 100) for n in nums]
if 0 in nums or sum(nums) != 100:
return self._pct_values()
return nums
def generate(self):
"""Return currency & percent value pairs (max 35)"""
return [(code, pct_value) for code, pct_value
in zip(self._country_codes(), self._pct_values())]
def first_possible_date():
date = datetime.date.today() - datetime.timedelta(days=29)
return date.strftime('%Y-%m-%d')
def load_currencies():
path = resource_filename('currency_wallet.utils', 'data/currencies.json')
with open(path, 'r') as file:
currencies = json.load(file)
return {c['code']: c['currency'] for c in currencies[0]['rates']}
def query_nbp_api(currency, start, end):
"""Get exchange rates from NBP api"""
adapter = requests.adapters.HTTPAdapter(max_retries=2)
session = requests.Session()
session.mount('http://api.nbp.pl/', adapter)
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1)\
AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'
}
url = f'http://api.nbp.pl/api/exchangerates/rates/a/{currency}/{start}/{end}/?format=json'
try:
response = session.get(url, headers=headers, timeout=3)
response.raise_for_status()
result = response.json()
except Exception as e:
print(e)
return result
| karolow/currency-wallet | currency_wallet/utils/utils.py | utils.py | py | 3,114 | python | en | code | 0 | github-code | 36 |
70190123945 | from tech_news.database import search_news
from datetime import datetime
# Requisito 6
def search_by_title(title):
find = search_news(
{"title": {"$regex": f"{title}", "$options": "i"}}
)
return [(item["title"], item["url"]) for item in find]
# Requisito 7
def search_by_date(date):
try:
find_by_date = list()
date = datetime.strptime(date, '%Y-%m-%d')
default_date = datetime.strftime(date, '%d/%m/%Y')
new_date = search_news({
'timestamp': default_date
})
for date in new_date:
find_by_date.append((date['title'], date['url']))
except ValueError:
raise ValueError('Data inválida')
return find_by_date
# Requisito 8
def search_by_tag(tag):
find = search_news(
{"tags": {"$regex": tag, "$options": "i"}}
)
return [(item["title"], item["url"]) for item in find]
# Requisito 9
def search_by_category(category):
find = search_news(
{"category": {"$regex": category, "$options": "i"}}
)
return [(item["title"], item["url"]) for item in find]
| mabiiak/tech-news | tech_news/analyzer/search_engine.py | search_engine.py | py | 1,103 | python | en | code | 0 | github-code | 36 |
37217485047 | def max_sum(A, left, right):
# A[left], ..., A[right] 중 최대 구간 합 리턴
# 아래 3줄의 수행시간: c(상수)
if left == right:
return A[left]
m = (left + right) // 2
# 아래 2줄의 수행시간: 2T(n/2)
L = max_sum(A, left, m) # 수행시간: T(n/2)
R = max_sum(A, m + 1, right) # 수행시간: T(n/2)
# 아래 14줄의 수행시간: O(n)
max_left_ps = A[m]
cur_left_ps = A[m]
for i in range(m-1, left-1, -1):
cur_left_ps += A[i]
if cur_left_ps > max_left_ps:
max_left_ps = cur_left_ps
max_right_ps = A[m+1]
cur_right_ps = A[m+1]
for i in range(m+2, right+1):
cur_right_ps += A[i]
if cur_right_ps > max_right_ps:
max_right_ps = cur_right_ps
M = max_left_ps + max_right_ps
return max(L, M, R)
A = [int(x) for x in input().split()]
sol = max_sum(A, 0, len(A)-1)
print(sol)
'''
알고리즘 설명: A를 반으로 분할하면 최대 구간 합이 존재하는 경우는 딱 3가지다.
1. A의 왼쪽 반 구간에 존재
2. A의 오른쪽 반 구간에 존재
3. A의 양쪽에 모두 걸치는 경우
A의 왼쪽 반 구간에서의 최대 구간 합을 L, A의 양쪽에 모두 걸치는 최대 구간 합을 M, A의 오른쪽 반 구간에서의 최대 구간 합을 R이라고 하자. 그러면 L, M, R 중에서 최댓값이 최종적인 최대 구간 합이 된다. L과 R은 같은 방식을 사용하여 재귀로 구하고, M은 L의 가장 끝 수부터 왼쪽으로 prefix-sum을 구하면서 가장 큰 구간을 찾고, R의 첫 수부터 오른쪽으로 prefix-sum을 구하면서 가장 큰 구간을 찾아 두 구간의 합을 더하면 구할 수 있다.
'''
'''
수행시간 분석
T(n) = 2T(n/2) + cn = O(nlogn)
'''
| shinyewon/Assignment | Algorithm/최대 구간 합.py | 최대 구간 합.py | py | 1,791 | python | ko | code | 0 | github-code | 36 |
7749021941 | import sys
import os
from Bio.Blast import NCBIWWW
from Bio.Blast.Applications import NcbiblastxCommandline
from Bio.Blast import NCBIXML
from Bio import SeqIO
E_VALUE_THRESH = 10
RESULTS_XML = "results.xml"
PROT_DB = "swissprot"
NUC_DB = "nt"
if len (sys.argv) != 5:
print("Invalid params: 1) In file path - 2) Out file path 3) Type ( --prot or --nuc ) 4) Mode ( --online --local)")
sys.exit(1)
fasta_string = open(sys.argv[1]).read()
if sys.argv[3] == "--prot":
if sys.argv[4] == '--online':
result_handle = NCBIWWW.qblast("blastp", PROT_DB, fasta_string, expect=E_VALUE_THRESH)
with open(RESULTS_XML, "w") as out_handle:
out_handle.write(result_handle.read())
result_handle = open(RESULTS_XML)
elif sys.argv[4] == '--local':
blastx_cline = NcbiblastxCommandline(cmd='blastp', query=sys.argv[1], db=PROT_DB, evalue=E_VALUE_THRESH, out=RESULTS_XML, outfmt=5)
stdout, stderr = blastx_cline()
result_handle = open(RESULTS_XML)
else:
print("Invalid Mode for blast")
sys.exit(1)
elif sys.argv[3] == "--nuc":
result_handle = NCBIWWW.qblast("blastn", NUC_DB, fasta_string)
else:
print("Invalid type for blast")
sys.exit(1)
blast_records = NCBIXML.parse(result_handle)
if os.path.exists(sys.argv[2]):
os.remove(sys.argv[2])
for blast_record in blast_records:
for alignment in blast_record.alignments:
for hsp in alignment.hsps:
with open(sys.argv[2], "a") as f:
print("****Blast Result****", file=f)
print("sequence:", alignment.title, file = f)
print("length:", alignment.length, file = f)
print("e value:", hsp.expect, file = f)
print("gaps:", hsp.gaps, file = f)
print("identities:", hsp.identities, file = f)
print("positives:", hsp.positives, file = f)
print("score:", hsp.score, file = f)
print(hsp.query[0:75] + "...", file = f)
print(hsp.match[0:75] + "...", file = f)
print(hsp.sbjct[0:75] + "...", file = f)
if os.path.exists("results.xml"):
os.remove("results.xml") | jpalacci/bio | src/ex2.py | ex2.py | py | 1,987 | python | en | code | 0 | github-code | 36 |
20233922637 | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
"""
思路一:dfs+深度优先计算
"""
def rob(self, root) -> int:
def dfs(root):
if not root:
return 0, 0
left = dfs(root.left)
right = dfs(root.right)
rob_value = left[1] + right[1] + root.val
skip_value = max(left[0], left[1]) + max(right[0], right[1])
return [rob_value, skip_value]
value = dfs(root)
return max(value[0], value[1])
| geroge-gao/Algorithm | LeetCode/python/337_打家劫舍3.py | 337_打家劫舍3.py | py | 654 | python | en | code | 26 | github-code | 36 |
3300025718 | from exif_service import ExifService
from ai_service import AiService
import base64
import os
class ImageService:
def __init__(self, app, initial_path, ai_enabled=True):
self.directory = initial_path
self.app = app
self.ai_enabled = ai_enabled
self.exif_service = ExifService()
if self.ai_enabled:
self.ai_service = AiService()
def get_current_directory(self):
return self.directory
def get_images(self):
return self.__get_images(self.directory, 10)
def __get_images(self, directory, remaining_depth=0):
images = []
for file in os.listdir(directory):
if os.path.isfile(os.path.join(directory, file)):
try:
Image.open(os.path.join(directory, file))
except IOError:
self.app.logger.warning("File %s is not an image", file)
continue
try:
images.append(self.exif_service.get_metadata(directory, file))
except LookupError:
continue
if self.ai_enabled:
images[-1]["motif"] = self.ai_service.get_image_motif(directory, file)
elif remaining_depth > 0:
images.extend(self.__get_images(os.path.join(directory, file), remaining_depth - 1))
return images
def change_directory(self, directory):
if len(directory.split(os.path.sep)) > 1:
self.directory = directory
elif directory == "..":
self.directory = os.path.dirname(self.directory)
else:
self.directory = os.path.join(self.directory, directory)
def get_image(self, file):
with open(os.path.join(self.directory, file), "rb") as image_file:
return base64.b64encode(image_file.read()).decode("utf-8")
def get_files_and_directories(self):
files = [{"name": "..", "isFile": False}]
files.extend([{"name": f, "isFile": os.path.isfile(os.path.join(self.directory, f))} for f in os.listdir(self.directory)])
return files | tim0-12432/photo-analyzer | backend/image_service.py | image_service.py | py | 2,127 | python | en | code | 0 | github-code | 36 |
28524876545 | def divided_diff(x, y):
n = len(x)
coefficients = []
for i in range(n):
coefficients.append(y[i])
for j in range(1, n):
for i in range(n-1, j-1, -1):
coefficients[i] = (coefficients[i] - coefficients[i-1]) / (x[i] - x[i-j])
return coefficients
def newton_interpolation(x, y, xi):
coefficients = divided_diff(x, y)
n = len(x)
result = coefficients[n-1]
for i in range(n-2, -1, -1):
result = result * (xi - x[i]) + coefficients[i]
return result
# user input
n = int(input("Enter the number of data points: "))
x = []
y = []
for i in range(n):
x_i = float(input(f"Enter x{i+1}: "))
y_i = float(input(f"Enter y{i+1}: "))
x.append(x_i)
y.append(y_i)
# test
xi = float(input("Enter the value of xi for interpolation: "))
yi = newton_interpolation(x, y, xi)
print(f"Interpolated value at xi = {xi} is {yi}")
| umang27102001/AssignmentsMCA | assignmentNM/.py/assignment6A.py | assignment6A.py | py | 898 | python | en | code | 0 | github-code | 36 |
22753766688 | """Deep_acsauto package definition"""
from setuptools import setup, find_packages
from __init__ import __version__
# Read long description from file
with open("README.md", "r") as fh:
LONG_DESCRIPTION = fh.read()
setup(
name="DeepACSA",
version=__version__,
description=(
"Anatomical cross-sectional area evalutaion in Ultrasound images."
),
long_description=LONG_DESCRIPTION,
long_description_content_type="text/markdown",
url="https://github.com/PaulRitsche/ACSAuto_DeepLearning",
author="Paul Ritsche",
author_email="paul.ritsche@unibas.ch",
maintainers=["Paul Ritsche", "Philipp Wirth", "Neil Cronin"],
maintainers_email=["paul.ritsche@unibas.ch",
"philipp.m.wirth@gmail.com",
"neil.cronin@jyu.fi"],
classifiers=[
"Environment :: Console",
"Intended Audience :: Science/Research",
"License :: ",
"Natural Language :: English",
"Programming Language :: Python :: 3.8",
"Topic :: Scientific/Engineering :: Physiology",
"Topic :: Utilities",
],
entry_points={
'console_scripts': [
'deep_acsa = deep_acsa_gui:main',
],
},
keywords=[
'ultrasound',
'physiology',
'deep learning',
'muscle',
],
project_urls={
"Github": "https://github.com/PaulRitsche/DeepACSA.git",
},
packages=find_packages(),
include_package_data=True,
setup_requires=[
"setuptools_git == 1.2",
],
)
| maxull/Sharples-Lab | DeepACSA/DeepACSA-main/setup.py | setup.py | py | 1,552 | python | en | code | 2 | github-code | 36 |
8209880971 | import os
import shutil
import tarfile
from colcon_bundle.verb import logger
from colcon_bundle.verb.utilities import \
update_shebang
def create_workspace_overlay(install_base: str,
workspace_staging_path: str,
overlay_path: str):
"""
Create overlay from user's built workspace install directory.
:param str install_base: Path to built workspace install directory
:param str workspace_staging_path: Path to stage the overlay build at
:param str overlay_path: Name of the overlay file (.tar.gz)
"""
workspace_install_path = os.path.join(
workspace_staging_path, 'opt', 'built_workspace')
shutil.rmtree(workspace_staging_path, ignore_errors=True)
assets_directory = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'assets')
shellscript_path = os.path.join(assets_directory, 'v2_workspace_setup.sh')
# install_base: Directory with built artifacts from the workspace
os.mkdir(workspace_staging_path)
shutil.copy2(shellscript_path,
os.path.join(workspace_staging_path, 'setup.sh'))
shutil.copytree(install_base, workspace_install_path)
# This is required because python3 shell scripts use a hard
# coded shebang
update_shebang(workspace_staging_path)
recursive_tar_gz_in_path(overlay_path,
workspace_staging_path)
def create_dependencies_overlay(staging_path, overlay_path):
"""
Create the dependencies overlay from staging_path.
:param str staging_path: Path where all the dependencies
have been installed/extracted to
:param str overlay_path: Path of overlay output file
(.tar.gz)
"""
dependencies_staging_path = staging_path
dependencies_tar_gz_path = overlay_path
logger.info('Dependencies changed, updating {}'.format(
dependencies_tar_gz_path
))
assets_directory = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'assets')
shellscript_path = os.path.join(assets_directory, 'v2_setup.sh')
shutil.copy2(shellscript_path,
os.path.join(dependencies_staging_path, 'setup.sh'))
if os.path.exists(dependencies_tar_gz_path):
os.remove(dependencies_tar_gz_path)
recursive_tar_gz_in_path(dependencies_tar_gz_path,
dependencies_staging_path)
def recursive_tar_gz_in_path(output_path, path):
"""
Create a tar.gz archive of all files inside a directory.
This function includes all sub-folders of path in the root of the tarfile
:param output_path: Name of archive file to create
:param path: path to recursively collect all files and include in
tar.gz. These will be included with path as the root of the archive.
"""
with tarfile.open(output_path, mode='w:gz', compresslevel=5) as tar:
logger.info(
'Creating tar of {path}'.format(path=path))
for name in os.listdir(path):
some_path = os.path.join(path, name)
tar.add(some_path, arcname=os.path.basename(some_path))
| rotu/colcon-bundle | colcon_bundle/verb/_overlay_utilities.py | _overlay_utilities.py | py | 3,108 | python | en | code | null | github-code | 36 |
40706528029 | import random
# Shows the player what grades they could still possibly win (show them everytime before they make a choice)
def showAvailableGrades():
print("Here are the remaining final grades that you could receive: ")
for grades in originalPapers:
print(grades, "%")
# Shows the player what 'papers' they could still select from to eliminate
def showAvailableCases():
print("Here are the available 'papers' to choose from: ")
for paper, availability in availablePapers.items():
if availability == True:
print(paper)
## Shuffles array 'ar' in place with Fisher-Yates algorithm.
## Code from http://code.activestate.com/recipes/360461-fisher-yates-shuffle/
def shuffle(ar):
a=len(ar)
b=a-1
for d in range(b,0,-1):
e=random.randint(0,d)
if e == d:
continue
ar[d],ar[e]=ar[e],ar[d]
return ar
# Basic algorithm that generates the 'teacher's offer for the player based on my number of 'papers' left and decr value
# that gets smaller with each passing round
def callTeacher(decr):
offer = 0
numLeft = 0
for score in shufflePapers:
numLeft += 1
offer += score
return int(int(offer / numLeft) - decr)
# A check function to see whether or not the player accepts the offer from the teacher
def checkIfAccept(choice):
if choice == 'grade':
return True
elif (choice == 'no') or (choice == 'no grade'):
return False
# A check function that gets the users choice and makes sure that it is a valid choice before carrying out the removal process
def getUsersChoice(choice):
while (choice.isnumeric() == False) or (int(choice) > 11) or (int(choice) < 1) or (availablePapers[int(choice)] == False):
if (choice.isnumeric() == False) or (int(choice) > 11) or (int(choice) < 1):
choice = input("Sorry, that's not a valid response. Please choose another: ")
elif (availablePapers[int(choice)] == False):
choice = input("Sorry, that paper was already eliminated. Please choose another: ")
choice = int(choice)
availablePapers[choice] = False
originalPapers.remove(shufflePapers[choice - 1])
# A print function that just outputs to the terminal what the grade of the paper that the player chose was.
def printUsersChoice(choice):
print()
print()
print("The paper you chose had a grade of " + str(shufflePapers[int(choice) - 1]) + "%")
print()
print()
#Welcome(Intro) Messages
welcomeMsg = "Welcome to 'Grade or No Grade'!"
directionMsg = "Your objective is to get the highest grade possible by eliminating lower grades and negotiating with your 'teacher'."
firstCase = "There are 11 graded 'papers'. Let's start off by picking your personal paper."
print(welcomeMsg)
print(directionMsg)
print(firstCase)
#A+, A, A-, B+, B, B-, C+, C, C-, D, F in percent form
#Store all different grades in 2 lists (1st kept in order, 2nd to be shuffled ) and their availabilities in a dictionary
originalPapers = [100, 95, 90, 88, 85, 80, 78, 75, 70, 60, 50]
shufflePapers = [100, 95, 90, 88, 85, 80, 78, 75, 70, 60, 50]
availablePapers = {1: True, 2: True, 3: True, 4: True, 5: True, 6: True, 7: True, 8: True, 9: True, 10: True, 11: True}
shuffle(shufflePapers)
#1) Get user's first selection as their personal 'paper' and remove it from possible papers
firstSelection = input("Choose your personal paper #(1-11): ")
while (firstSelection.isnumeric() == False) or (int(firstSelection) < 1) or (int(firstSelection) > 11):
firstSelection = input("Sorry, that's not valid response. Please choose your personal paper between 1 and 11: ")
firstSelection = int(firstSelection)
availablePapers[firstSelection] = False
personalCase = shufflePapers[firstSelection - 1]
#2) Loop the program where it eliminates 2 grades at a time and then checks with "teacher"
#If offer is accepted, game is over. Otherwise, continue the game
gameIsOver = False
decr = 10
roundCount = 1
while (gameIsOver == False) and (len(originalPapers) != 2):
showAvailableGrades()
showAvailableCases()
if (roundCount == 1) and (len(originalPapers) == 3):
print("You are down to the last 2 'papers'. You will only pick one this round before you look at your personal paper.")
userChoice = input("Which paper do you choose: ")
elif roundCount == 1:
print("You will eliminate 2 'papers' at a time")
userChoice = input("Choose the 1st paper to eliminate: ")
roundCount = 2
elif roundCount == 2:
userChoice = input("Choose the 2nd paper to eliminate: ")
roundCount = 1
getUsersChoice(userChoice)
printUsersChoice(userChoice)
if (len(originalPapers) != 2) and (roundCount == 1):
teacherOffer = callTeacher(decr)
decr = decr - 3.7
print("Hang On. The teacher wants to offer you " + str(teacherOffer) + "%")
showAvailableGrades()
choice = input("So.....Grade or No Grade: ")
if (choice.lower() == 'grade') or (choice.lower() == 'no') or (choice.lower() == 'no grade'):
gameIsOver = checkIfAccept(choice.lower())
else:
choice = input("Sorry, I didn't understand that.....Grade or No Grade: ")
#Two cases
#1) GameIsOver becomes true. This only occurs when user has chosen to accept a grade from the "teacher"
#2) Length of originalPapers is 2. This means that only 1 grade is left on the board so the user gets the personal grade that they picked in the beginning.
if(gameIsOver == True):
print("Congrats, your final grade is " + str(teacherOffer) + "%")
else:
print("It seems you have now reached the last case so go ahead and look at the personal grade you picked in the beginning")
print("Congrats, your personal grade was " + str(personalCase) + "%")
| AlvinNgo123/GradeOrNoGrade | game.py | game.py | py | 5,543 | python | en | code | 0 | github-code | 36 |
74713864745 | from UM.Application import Application
from UM.Logger import Logger
from cura.CuraApplication import CuraApplication
from cura.PrinterOutputDevice import PrinterOutputDevice, ConnectionState
from PyQt5.QtNetwork import QHttpMultiPart, QHttpPart, QNetworkRequest, QNetworkAccessManager, QNetworkReply
from PyQt5.QtCore import pyqtProperty, pyqtSignal, pyqtSlot, pyqtSignal, QUrl, QCoreApplication
from time import time
from typing import Callable, Any, Optional, Dict, Tuple
from enum import IntEnum
from typing import List
import os # To get the username
import gzip
class AuthState(IntEnum):
NotAuthenticated = 1
AuthenticationRequested = 2
Authenticated = 3
AuthenticationDenied = 4
AuthenticationReceived = 5
class NetworkedPrinterOutputDevice(PrinterOutputDevice):
authenticationStateChanged = pyqtSignal()
def __init__(self, device_id, address: str, properties, parent = None) -> None:
super().__init__(device_id = device_id, parent = parent)
self._manager = None # type: QNetworkAccessManager
self._last_manager_create_time = None # type: float
self._recreate_network_manager_time = 30
self._timeout_time = 10 # After how many seconds of no response should a timeout occur?
self._last_response_time = None # type: float
self._last_request_time = None # type: float
self._api_prefix = ""
self._address = address
self._properties = properties
self._user_agent = "%s/%s " % (Application.getInstance().getApplicationName(), Application.getInstance().getVersion())
self._onFinishedCallbacks = {} # type: Dict[str, Callable[[QNetworkReply], None]]
self._authentication_state = AuthState.NotAuthenticated
# QHttpMultiPart objects need to be kept alive and not garbage collected during the
# HTTP which uses them. We hold references to these QHttpMultiPart objects here.
self._kept_alive_multiparts = {} # type: Dict[QNetworkReply, QHttpMultiPart]
self._sending_gcode = False
self._compressing_gcode = False
self._gcode = [] # type: List[str]
self._connection_state_before_timeout = None # type: Optional[ConnectionState]
printer_type = self._properties.get(b"machine", b"").decode("utf-8")
printer_type_identifiers = {
"9066": "ultimaker3",
"9511": "ultimaker3_extended"
}
self._printer_type = "Unknown"
for key, value in printer_type_identifiers.items():
if printer_type.startswith(key):
self._printer_type = value
break
def requestWrite(self, nodes, file_name=None, filter_by_machine=False, file_handler=None, **kwargs) -> None:
raise NotImplementedError("requestWrite needs to be implemented")
def setAuthenticationState(self, authentication_state) -> None:
if self._authentication_state != authentication_state:
self._authentication_state = authentication_state
self.authenticationStateChanged.emit()
@pyqtProperty(int, notify=authenticationStateChanged)
def authenticationState(self) -> int:
return self._authentication_state
def _compressDataAndNotifyQt(self, data_to_append: str) -> bytes:
compressed_data = gzip.compress(data_to_append.encode("utf-8"))
self._progress_message.setProgress(-1) # Tickle the message so that it's clear that it's still being used.
QCoreApplication.processEvents() # Ensure that the GUI does not freeze.
# Pretend that this is a response, as zipping might take a bit of time.
# If we don't do this, the device might trigger a timeout.
self._last_response_time = time()
return compressed_data
def _compressGCode(self) -> Optional[bytes]:
self._compressing_gcode = True
## Mash the data into single string
max_chars_per_line = int(1024 * 1024 / 4) # 1/4 MB per line.
file_data_bytes_list = []
batched_lines = []
batched_lines_count = 0
for line in self._gcode:
if not self._compressing_gcode:
self._progress_message.hide()
# Stop trying to zip / send as abort was called.
return None
# if the gcode was read from a gcode file, self._gcode will be a list of all lines in that file.
# Compressing line by line in this case is extremely slow, so we need to batch them.
batched_lines.append(line)
batched_lines_count += len(line)
if batched_lines_count >= max_chars_per_line:
file_data_bytes_list.append(self._compressDataAndNotifyQt("".join(batched_lines)))
batched_lines = []
batched_lines_count = 0
# Don't miss the last batch (If any)
if len(batched_lines) != 0:
file_data_bytes_list.append(self._compressDataAndNotifyQt("".join(batched_lines)))
self._compressing_gcode = False
return b"".join(file_data_bytes_list)
def _update(self) -> bool:
if self._last_response_time:
time_since_last_response = time() - self._last_response_time
else:
time_since_last_response = 0
if self._last_request_time:
time_since_last_request = time() - self._last_request_time
else:
time_since_last_request = float("inf") # An irrelevantly large number of seconds
if time_since_last_response > self._timeout_time >= time_since_last_request:
# Go (or stay) into timeout.
if self._connection_state_before_timeout is None:
self._connection_state_before_timeout = self._connection_state
self.setConnectionState(ConnectionState.closed)
# We need to check if the manager needs to be re-created. If we don't, we get some issues when OSX goes to
# sleep.
if time_since_last_response > self._recreate_network_manager_time:
if self._last_manager_create_time is None:
self._createNetworkManager()
if time() - self._last_manager_create_time > self._recreate_network_manager_time:
self._createNetworkManager()
elif self._connection_state == ConnectionState.closed:
# Go out of timeout.
self.setConnectionState(self._connection_state_before_timeout)
self._connection_state_before_timeout = None
return True
def _createEmptyRequest(self, target, content_type: Optional[str] = "application/json") -> QNetworkRequest:
url = QUrl("http://" + self._address + self._api_prefix + target)
request = QNetworkRequest(url)
if content_type is not None:
request.setHeader(QNetworkRequest.ContentTypeHeader, "application/json")
request.setHeader(QNetworkRequest.UserAgentHeader, self._user_agent)
return request
def _createFormPart(self, content_header, data, content_type = None) -> QHttpPart:
part = QHttpPart()
if not content_header.startswith("form-data;"):
content_header = "form_data; " + content_header
part.setHeader(QNetworkRequest.ContentDispositionHeader, content_header)
if content_type is not None:
part.setHeader(QNetworkRequest.ContentTypeHeader, content_type)
part.setBody(data)
return part
## Convenience function to get the username from the OS.
# The code was copied from the getpass module, as we try to use as little dependencies as possible.
def _getUserName(self) -> str:
for name in ("LOGNAME", "USER", "LNAME", "USERNAME"):
user = os.environ.get(name)
if user:
return user
return "Unknown User" # Couldn't find out username.
def _clearCachedMultiPart(self, reply: QNetworkReply) -> None:
if reply in self._kept_alive_multiparts:
del self._kept_alive_multiparts[reply]
def put(self, target: str, data: str, onFinished: Optional[Callable[[Any, QNetworkReply], None]]) -> None:
if self._manager is None:
self._createNetworkManager()
request = self._createEmptyRequest(target)
self._last_request_time = time()
reply = self._manager.put(request, data.encode())
self._registerOnFinishedCallback(reply, onFinished)
def get(self, target: str, onFinished: Optional[Callable[[Any, QNetworkReply], None]]) -> None:
if self._manager is None:
self._createNetworkManager()
request = self._createEmptyRequest(target)
self._last_request_time = time()
reply = self._manager.get(request)
self._registerOnFinishedCallback(reply, onFinished)
def post(self, target: str, data: str, onFinished: Optional[Callable[[Any, QNetworkReply], None]], onProgress: Callable = None) -> None:
if self._manager is None:
self._createNetworkManager()
request = self._createEmptyRequest(target)
self._last_request_time = time()
reply = self._manager.post(request, data)
if onProgress is not None:
reply.uploadProgress.connect(onProgress)
self._registerOnFinishedCallback(reply, onFinished)
def postFormWithParts(self, target:str, parts: List[QHttpPart], onFinished: Optional[Callable[[Any, QNetworkReply], None]], onProgress: Callable = None) -> None:
if self._manager is None:
self._createNetworkManager()
request = self._createEmptyRequest(target, content_type=None)
multi_post_part = QHttpMultiPart(QHttpMultiPart.FormDataType)
for part in parts:
multi_post_part.append(part)
self._last_request_time = time()
reply = self._manager.post(request, multi_post_part)
self._kept_alive_multiparts[reply] = multi_post_part
if onProgress is not None:
reply.uploadProgress.connect(onProgress)
self._registerOnFinishedCallback(reply, onFinished)
return reply
def postForm(self, target: str, header_data: str, body_data: bytes, onFinished: Optional[Callable[[Any, QNetworkReply], None]], onProgress: Callable = None) -> None:
post_part = QHttpPart()
post_part.setHeader(QNetworkRequest.ContentDispositionHeader, header_data)
post_part.setBody(body_data)
self.postFormWithParts(target, [post_part], onFinished, onProgress)
def _onAuthenticationRequired(self, reply, authenticator) -> None:
Logger.log("w", "Request to {url} required authentication, which was not implemented".format(url = reply.url().toString()))
def _createNetworkManager(self) -> None:
Logger.log("d", "Creating network manager")
if self._manager:
self._manager.finished.disconnect(self.__handleOnFinished)
self._manager.authenticationRequired.disconnect(self._onAuthenticationRequired)
self._manager = QNetworkAccessManager()
self._manager.finished.connect(self.__handleOnFinished)
self._last_manager_create_time = time()
self._manager.authenticationRequired.connect(self._onAuthenticationRequired)
machine_manager = CuraApplication.getInstance().getMachineManager()
machine_manager.checkCorrectGroupName(self.getId(), self.name)
def _registerOnFinishedCallback(self, reply: QNetworkReply, onFinished: Optional[Callable[[Any, QNetworkReply], None]]) -> None:
if onFinished is not None:
self._onFinishedCallbacks[reply.url().toString() + str(reply.operation())] = onFinished
def __handleOnFinished(self, reply: QNetworkReply) -> None:
# Due to garbage collection, we need to cache certain bits of post operations.
# As we don't want to keep them around forever, delete them if we get a reply.
if reply.operation() == QNetworkAccessManager.PostOperation:
self._clearCachedMultiPart(reply)
if reply.attribute(QNetworkRequest.HttpStatusCodeAttribute) is None:
# No status code means it never even reached remote.
return
self._last_response_time = time()
if self._connection_state == ConnectionState.connecting:
self.setConnectionState(ConnectionState.connected)
callback_key = reply.url().toString() + str(reply.operation())
try:
if callback_key in self._onFinishedCallbacks:
self._onFinishedCallbacks[callback_key](reply)
except Exception:
Logger.logException("w", "something went wrong with callback")
@pyqtSlot(str, result=str)
def getProperty(self, key: str) -> str:
bytes_key = key.encode("utf-8")
if bytes_key in self._properties:
return self._properties.get(bytes_key, b"").decode("utf-8")
else:
return ""
def getProperties(self):
return self._properties
## Get the unique key of this machine
# \return key String containing the key of the machine.
@pyqtProperty(str, constant=True)
def key(self) -> str:
return self._id
## The IP address of the printer.
@pyqtProperty(str, constant=True)
def address(self) -> str:
return self._properties.get(b"address", b"").decode("utf-8")
## Name of the printer (as returned from the ZeroConf properties)
@pyqtProperty(str, constant=True)
def name(self) -> str:
return self._properties.get(b"name", b"").decode("utf-8")
## Firmware version (as returned from the ZeroConf properties)
@pyqtProperty(str, constant=True)
def firmwareVersion(self) -> str:
return self._properties.get(b"firmware_version", b"").decode("utf-8")
@pyqtProperty(str, constant=True)
def printerType(self) -> str:
return self._printer_type
## IPadress of this printer
@pyqtProperty(str, constant=True)
def ipAddress(self) -> str:
return self._address
| criscola/G-Gen | misc/zip/Cura-master/cura/PrinterOutput/NetworkedPrinterOutputDevice.py | NetworkedPrinterOutputDevice.py | py | 14,072 | python | en | code | 1 | github-code | 36 |
24304392906 | import numpy
import matplotlib
import matplotlib.pyplot as plt
from scipy.spatial import distance
from copy import deepcopy
filename1 = 'normal.txt'
filename2 = 'unbalanced.txt'
def readData(filename):
xc = []
yc = []
coords = [xc, yc]
with open(filename,'r') as f:
for line in f:
x, y = line.split()
xc.append(float(x))
yc.append(float(y))
return coords
def chooseCentroids(k):
centroidX = []
centroidY = []
index = numpy.random.randint(0, len(data[0]), size = k)
centroids = [centroidX, centroidY]
for i in range(0, k):
centroidX.append(data[0][index[i]])
centroidY.append(data[1][index[i]])
return centroids
def computeError(current, previous):
errorSum = 0
for i in range(len(current)):
point0 = [current[0][i], current[1][i]]
point1 = [previous[0][i], previous[1][i]]
error = distance.euclidean(point0, point1)
errorSum = errorSum + error
return errorSum
def kMeans(data, k):
#Step 1 - Picking K random points as cluster centers,i.e centroids.
#centroids = [[x1,..,xk], [y1,..yk]], xi,yi - centroids coordinates
centroids = [map(float, coord) for coord in chooseCentroids(k)]
#plt.scatter(data[0], data[1])
#plt.scatter(centroids[0], centroids[1] , c='r')
#plt.show()
#prevCentroids will be used for saving centroids coordinates before
#choosing another ones
x = [0]
y = [0]
for i in range(len(centroids[0]) - 1):
x.append(0)
y.append(0)
prevCentroids = [x, y]
centroidNumbers = []
error = computeError(centroids, prevCentroids)
while error != 0:
#Step 2 - Assigning each point to nearest cluster by calculating its distance to
#each centroid.
centrN = 0
#data[0] = [x1,...,xn]
#data[1] = [y1,...,yn]
#for each point [x, y] in data - compute Euclidean distance between
#the point and each of centroid points. Then for each point find
#to which centroid the distance is minimal.
#In centroidNumbers each element represents the number of the centroid
#to which corresponding point is closest, i.e:
#centroidNumbers[c0=1, c1=0,..., cn=2] means that first point in data
#is closest to centroid number 1, second point in data is closest to
# centroid number 0 and so on.
for pointN in range(len(data[0])):
point = [data[0][pointN], data[1][pointN]]
centroid = [centroids[0][0], centroids[1][0]]
minDist = distance.euclidean(point, centroid)
for i in range(1, k):
centroid = [centroids[0][i], centroids[1][i]]
currDist = distance.euclidean(point, centroid)
if minDist > currDist:
minDist = currDist
centrN = i
centroidNumbers.append(centrN)
centrN = 0
#copying old centroid coordinates in prevCentroids
prevCentroids = deepcopy(centroids)
#Step 3 - Finding new centroids by taking the average of the assigned points.
x = []
y = []
#cluster = [[x1,...,xn], [y1,...,yn]]
cluster = [x, y]
#points in cluster0 #points in cluster1
#clusters = [[[xk0,..], [yk0,...]], [[xk1,...], [yk1,...]],...]
clusters = []
for clustN in range(0, k):
for pointN in range(len(data[0])):
if clustN == centroidNumbers[pointN]:
x.append(data[0][pointN])
y.append(data[1][pointN])
centroids[0][clustN] = numpy.mean(x)
centroids[1][clustN] = numpy.mean(y)
clusters.append(cluster)
x = []
y = []
error = computeError(centroids, prevCentroids)
#Step 4 - Repeat Step 2 and 3.
return centroids, clusters
colors = ['r', 'g', 'r']
#data = 2-dimensional array coords=[[x1,...,xn], [y1,...,yn]]
data = [map(float, coord) for coord in readData(filename1)]
#data = [map(float, coord) for coord in readData(filename2)]
cluster_centers, ac = kMeans(data, 3)
fig, ax = plt.subplots()
for i in range(3):
ax.scatter(ac[i][0], ac[i][1], c=colors[i])
ax.scatter(cluster_centers[0],cluster_centers[1], s=100, c='black')
plt.plot(ax)
| nyuseinova/K-Means-Clustering | kMeans.py | kMeans.py | py | 3,806 | python | en | code | 0 | github-code | 36 |
72514606824 | import logging
import os
import sys
import tempfile
import warnings
from contextlib import contextmanager
from collections import defaultdict
from datetime import datetime, timedelta, timezone
from itertools import chain
from subprocess import CalledProcessError
from urllib.parse import urljoin, urlsplit, urlunsplit
import requests
from yaml import load
log = logging.getLogger('keystone_light')
log.addHandler(logging.NullHandler())
# ======================================================================
# OpenStack Keystone
# ----------------------------------------------------------------------
# cloud = Cloud(CloudsYamlConfig('cloudX')) # or:
# cloud = Cloud(DirectConfig('https://domain:user:pass@...'))
# ======================================================================
class PermissionDenied(Exception):
def __init__(self, method, url, status_code, response):
self.args = (method, url, status_code, response)
class ObjectNotFound(Exception):
pass
class MultipleObjectsFound(Exception):
pass
def the_one_entry(list_, type_, params):
if not list_:
raise ObjectNotFound(
'lookup of {} with params {} yielded nothing'.format(
type_, params))
if len(list_) > 1:
raise MultipleObjectsFound(
'lookup of {} with params {} yielded multiple results: {}'.format(
type_, params, list_))
return list_[0]
class CloudsYamlConfig:
"""
Reads ~/.config/openstack/clouds.yaml and selects one
Example file contents::
clouds:
# v-- this would be the selected os_cloud='my-cloud-admin'
my-cloud-admin:
auth:
auth_url: https://KEYSTONE/
system_scope: all
user_domain_name: DOMAIN
username: USERNAME
password: PASSWORD
identity_api_version: 3
region_name: NL1
"""
def __init__(self, os_cloud):
with open(os.path.expanduser('~/.config/openstack/clouds.yaml')) as fp:
clouds_yaml = load(fp.read())
self.user_info = clouds_yaml['clouds'][os_cloud]
assert self.user_info['identity_api_version'] == 3, self.user_info
def get_auth_url(self):
return self.user_info['auth']['auth_url']
def as_user_password(self):
auth = self.user_info['auth']
password = {
'user': {
'name': auth['username'],
'domain': {
'name': auth['user_domain_name'],
},
'password': auth['password'],
},
}
return password
def __str__(self):
return str(self.user_info)
class CloudConfig(CloudsYamlConfig):
"""
Old name for CloudsYamlConfig
"""
def __init__(self, *args, **kwargs):
warnings.warn(
'CloudConfig is deprecated, please use CloudsYamlConfig',
DeprecationWarning, stacklevel=2)
super().__init__(*args, **kwargs)
class DirectConfig:
"""
Direct config, by passing https://<DOMAIN>:<USER>:<PASS>@KEYSTONE
"""
def __init__(self, auth_url):
parts = urlsplit(auth_url)
self._auth_url = urlunsplit((
parts.scheme,
('{}:{}'.format(parts.hostname, parts.port) if parts.port
else parts.hostname),
parts.path, parts.query, parts.fragment))
domain, password = parts.username, parts.password
assert ':' not in domain, domain
assert ':' in password, 'expected <domain>:<user>:<pass>'
self._user_domain_name = domain
self._username, self._password = password.split(':', 1)
def get_auth_url(self):
return self._auth_url
def as_user_password(self):
password = {'user': {
'name': self._username,
'domain': {'name': self._user_domain_name},
'password': self._password,
}}
return password
class CloudToken:
def __init__(self, unscoped_token=None, cloud_config=None, scope=None):
self.unscoped_token = unscoped_token
self.cloud_config = cloud_config
self.scope = scope
self.base_url = None
self.data = None
self.expires_at = None
self.token = None
self.renew()
def renew(self):
if self.unscoped_token:
assert not self.cloud_config
base_url = self.unscoped_token.base_url
post_data = {
'auth': {
'identity': {
'methods': ['token'],
'token': {
'id': str(self.unscoped_token),
},
},
},
}
elif self.cloud_config:
assert not self.unscoped_token
base_url = self.cloud_config.get_auth_url()
post_data = {
'auth': {
'identity': {
'methods': ['password'],
'password': self.cloud_config.as_user_password(),
},
},
}
else:
raise TypeError('expect unscoped_token OR cloud_config')
if self.scope:
post_data['auth']['scope'] = self.scope
# Optional "?nocatalog", but then we won't get the catalog,
# which we need for project endpoints.
url = urljoin(base_url, '/v3/auth/tokens')
headers = {}
if self.unscoped_token:
headers['X-Auth-Token'] = str(self.unscoped_token)
out = requests.post(url, json=post_data, headers=headers)
if out.status_code == 401:
raise PermissionDenied('POST', url, out.status_code, out.text)
try:
assert out.status_code == 201
out_token = out.headers['X-Subject-Token']
out_data = out.json()
except (AssertionError, KeyError):
# FIXME: auth leak to logging in case of errors.
log.debug(out)
log.debug(out.headers)
log.debug(out.content)
raise
self.base_url = base_url
self.data = out_data.pop('token')
expires_at = self.data.get('expires_at')
if expires_at is not None:
if expires_at.endswith('Z'):
expires_at = expires_at[:-1] + '+00:00'
expires_at = datetime.fromisoformat(expires_at)
self.expires_at = expires_at
assert not out_data, out_data
self.token = out_token
def will_expire_within(self, **kwargs):
if self.expires_at is not None:
utcnow = datetime.now(timezone.utc)
if utcnow + timedelta(**kwargs) > self.expires_at:
return True
return False
def __str__(self):
if self.will_expire_within(minutes=2):
log.debug('token will expire at %s, force renew', self.expires_at)
self.renew()
return self.token
class Cloud:
def __init__(self, cloud_config):
self.base_url = cloud_config.get_auth_url()
self.cloud_config = cloud_config
self._unscoped_token = None
self._system_token = None
self._domain_tokens = {}
self._project_tokens = {}
self._endpoints = {}
self._domains = {}
def get_roles(self):
if not hasattr(self, '_get_roles'):
system_token = self.get_system_token()
url = urljoin(self.base_url, '/v3/roles')
out = requests.get(
url=url, headers={'X-Auth-Token': str(system_token)})
self._get_roles = [
Role.from_keystone(i, cloud=self)
for i in out.json()['roles']]
return self._get_roles
def get_role(self, name=None):
roles = self.get_roles()
if name is not None:
roles = [i for i in roles if i.name == name]
return the_one_entry(roles, 'role', dict(name=name))
def get_domains(self):
"""
Get domains from SYSTEM scope
"""
if not hasattr(self, '_get_domains'):
system_token = self.get_system_token()
url = urljoin(self.base_url, '/v3/domains')
out = requests.get(
url=url, headers={'X-Auth-Token': str(system_token)})
for data in out.json()['domains']:
if data['id'] not in self._domains:
self._domains[data['id']] = Domain.from_keystone(
data, cloud=self)
self._get_domains = self._domains.values()
return self._get_domains
def get_domain(self, name=None, domain_id=None):
"""
Get domains by name or id
"""
# If we have it in cache, return immediately, or create one if
# we have all the values.
if domain_id in self._domains:
return self._domains[domain_id]
if name and domain_id:
ret = Domain(name=name, id=domain_id, enabled=True)
ret.cloud = self
self._domains[domain_id] = ret
return ret
# Otherwise, fetch the SYSTEM domains and filter by args.
domains = self.get_domains()
if name is not None:
domains = [i for i in domains if i.name == name]
if domain_id is not None:
domains = [i for i in domains if i.id == domain_id]
return the_one_entry(
domains, 'domain', dict(name=name, domain_id=domain_id))
def get_groups(self, domain_id=None):
if not hasattr(self, '_get_groups'):
system_token = self.get_system_token()
url = urljoin(self.base_url, '/v3/groups')
out = requests.get(
url=url, headers={'X-Auth-Token': str(system_token)})
groups = [
Group.from_keystone(i, cloud=self)
for i in out.json()['groups']]
groups_by_domain = defaultdict(list)
for group in groups:
groups_by_domain[group.domain_id].append(group)
self._get_groups = groups_by_domain
if domain_id:
return self._get_groups[domain_id]
return list(chain(*self._get_groups.values()))
def get_group(self, name=None, domain_id=None):
groups = self.get_groups()
if name is not None:
groups = [i for i in groups if i.name == name]
if domain_id is not None:
groups = [i for i in groups if i.domain_id == domain_id]
return the_one_entry(
groups, 'group', dict(name=name, domain_id=domain_id))
def get_projects(self, domain_id=None):
"""
Get projects from SYSTEM scope
"""
if not hasattr(self, '_get_projects'):
system_token = self.get_system_token()
url = urljoin(self.base_url, '/v3/projects')
out = requests.get(
url=url, headers={'X-Auth-Token': str(system_token)})
projects = [
Project.from_keystone(i, cloud=self)
for i in out.json()['projects']]
projects_by_domain = defaultdict(list)
for project in projects:
projects_by_domain[project.domain_id].append(project)
self._get_projects = projects_by_domain
if domain_id:
return self._get_projects[domain_id]
return list(chain(*self._get_projects.values()))
def get_current_project(self):
"""
Get CURRENT project that belongs to this user
"""
if not hasattr(self, '_get_current_project'):
# We expect this in the unscoped_token.data:
# "project": {
# "name": "x", "domain": {"name": "x", "id": "abc123"},
# "id": "abc123"}
data = self.get_unscoped_token().data
keystone_dict = {
'id': data['project']['id'],
'name': data['project']['name'],
'enabled': True,
'is_domain': data['is_domain'], # not on project...?
'domain_id': data['project']['domain']['id'],
}
self.get_domain( # the get_domain() creates on in cache
name=data['project']['domain']['name'],
domain_id=data['project']['domain']['id'])
project = Project.from_keystone(keystone_dict, cloud=self)
self._get_current_project = project
return self._get_current_project
def get_unscoped_token(self):
if not self._unscoped_token:
self._unscoped_token = CloudToken(cloud_config=self.cloud_config)
return self._unscoped_token
def get_system_token(self):
if not self._system_token:
system_scope = {'system': {'all': True}}
unscoped_token = self.get_unscoped_token()
self._system_token = CloudToken(
unscoped_token=unscoped_token, scope=system_scope)
for catalog_row in self._system_token.data.get('catalog', []):
type_, name = catalog_row['type'], catalog_row['name']
self.update_endpoints(
(type_, name, 'system', 'all'),
catalog_row['endpoints'])
return self._system_token
def get_domain_token(self, domain_id):
if domain_id not in self._domain_tokens:
domain_scope = {'domain': {'id': domain_id}}
unscoped_token = self.get_unscoped_token()
domain_token = CloudToken(
unscoped_token=unscoped_token, scope=domain_scope)
for catalog_row in domain_token.data.get('catalog', []):
type_, name = catalog_row['type'], catalog_row['name']
self.update_endpoints(
(type_, name, 'domain', domain_id),
catalog_row['endpoints'])
self._domain_tokens[domain_id] = domain_token
return self._domain_tokens[domain_id]
def get_project_token(self, project_id):
if project_id not in self._project_tokens:
project_scope = {'project': {'id': project_id}}
unscoped_token = self.get_unscoped_token()
project_token = CloudToken(
unscoped_token=unscoped_token, scope=project_scope)
for catalog_row in project_token.data.get('catalog', []):
type_, name = catalog_row['type'], catalog_row['name']
self.update_endpoints(
(type_, name, 'project', project_id),
catalog_row['endpoints'])
self._project_tokens[project_id] = project_token
return self._project_tokens[project_id]
def update_endpoints(self, key, endpoints):
# endpoints = [{"id": "c3f2..", "interface": "public",
# "region_id": "NL1", "url": "https://KEYSTONE/v3/", "region": "NL1"}]
assert key not in self._endpoints, (key, self._endpoints)
# print('<endpoints>', key, endpoints)
self._endpoints[key] = endpoints
class Role:
@classmethod
def from_keystone(cls, data, cloud):
# data = {"id": "7931..", "name": "admin",
# "domain_id": None, "description": None, "options": {},
# "links": {"self": "http://KEYSTONE/v3/roles/7931.."}}
ret = cls(
name=data['name'], id=data['id'], domain_id=data['domain_id'])
ret.cloud = cloud
return ret
def __init__(self, name, id, domain_id):
self.name = name
self.id = id
self.domain_id = domain_id
def __repr__(self):
return '<Role({})>'.format(self.name)
class Domain:
@classmethod
def from_keystone(cls, data, cloud):
# data = {"id": "b49d...", "name": "DOMAIN", "description": "",
# "enabled": True, "tags": [], "options": {},
# "links": {"self": "http://KEYSTONE/v3/domains/b49d..."}}
ret = cls(name=data['name'], id=data['id'], enabled=data['enabled'])
ret.cloud = cloud
return ret
def __init__(self, name, id, enabled):
self.name = name
self.id = id
self.enabled = enabled
def get_admin_group(self):
"""
WARNING: This is a configuration choice. We choose to have an
admin group named DOMAIN-admins. This group should exist.
"""
groups = [
i for i in self.get_groups()
if i.name == '{}-admins'.format(self.name)]
if len(groups) != 1:
raise ValueError(
'expected a single {o.name}-admins group '
'in domain {o.name} [domain_id={o.id}]'.format(o=self))
return groups[0]
def get_groups(self):
return self.cloud.get_groups(domain_id=self.id)
def get_projects(self):
return self.cloud.get_projects(domain_id=self.id)
def __repr__(self):
return '<Domain({})>'.format(self.name)
class Group:
@classmethod
def from_keystone(cls, data, cloud):
# data = {"id": "19d9..", "name": "admins", "domain_id": "default",
# "description": "",
# "links": {"self": "http://KEYSTONE/v3/groups/19d9..."}}
ret = cls(
name=data['name'], id=data['id'], domain_id=data['domain_id'])
ret.cloud = cloud
return ret
def __init__(self, name, id, domain_id):
self.name = name
self.id = id
self.domain_id = domain_id
def __repr__(self):
return '<Group({})>'.format(self.name)
class Project:
@classmethod
def from_keystone(cls, data, cloud):
# data = {"id": "d304..", "name": "admin", "domain_id": "default",
# "description": "Bootstrap..", "enabled": true,
# "parent_id": "default", "is_domain": false, "tags": [],
# "options": {},
# "links": {"self": "http://KEYSTONE/v3/projects/d304.."}}
ret = cls(
name=data['name'], id=data['id'], enabled=data['enabled'],
domain_id=data['domain_id'])
ret.cloud = cloud
return ret
def __init__(self, name, id, enabled, domain_id):
self.name = name
self.id = id
self.enabled = enabled
self.domain_id = domain_id
def __repr__(self):
return '<Project({})>'.format(self.name)
def get_fullname(self):
return '{}:{}'.format(self.get_domain().name, self.name)
def get_domain(self):
return self.cloud.get_domain(domain_id=self.domain_id)
def get_swift(self):
key = ('object-store', 'swift', 'project', self.id)
# Getting the project token ensures we get the endpoint in the
# endpoints dict.
project_token = self.cloud.get_project_token(self.id)
del project_token
endpoints = self.cloud._endpoints[key] # FIXME: encapsulation!
endpoints = [i for i in endpoints if i['interface'] == 'public']
endpoint = the_one_entry(endpoints, 'endpoints', dict())
return Swift.from_keystone(
endpoint, project_id=self.id, cloud=self.cloud)
# ======================================================================
# OpenStack Swift
# ----------------------------------------------------------------------
# swift = cloud.get....project().get_swift()
# ======================================================================
class SwiftFileExistsError(FileExistsError):
def __init__(self, filename, strerror):
EEXIST = 17
super().__init__(EEXIST, filename)
self._strerror = strerror
def __str__(self):
return self._strerror
class SwiftFileNotFoundError(FileNotFoundError):
def __init__(self, filename, strerror):
ENOENT = 2
super().__init__(ENOENT, filename)
self._strerror = strerror
def __str__(self):
return self._strerror
class Swift:
@classmethod
def from_keystone(cls, data, project_id, cloud):
# data = {"id": "8888..", "interface": "admin",
# "url": "https://SWIFT/v1", "region": "NL1"}
ret = cls(id=data['id'], url=data['url'], region=data['region'])
ret.project_id = project_id
ret.cloud = cloud
return ret
def __init__(self, id, url, region):
self.id = id
self.url = url
self.region = region
def _mkurl(self, *args):
if args:
return '{}/{}'.format(self.url, '/'.join(args))
return self.url
def _mkhdrs(self, json=False):
project_token = self.cloud.get_project_token(self.project_id)
headers = {'X-Auth-Token': str(project_token)}
if json:
# text/plain, application/json, application/xml, text/xml
headers['Accept'] = 'application/json'
return headers
def get_stat(self):
url, hdrs = self._mkurl(), self._mkhdrs()
out = requests.head(url, headers=hdrs)
if out.status_code == 403:
# "We" need to give ourselves permission, if possible.
raise PermissionDenied('HEAD', url, out.status_code, out.text)
return out.headers
def get_containers(self):
url, hdrs = self._mkurl(), self._mkhdrs(json=True)
out = requests.get(url, headers=hdrs)
if out.status_code != 200:
raise PermissionDenied('GET', url, out.status_code, out.text)
# headers = {
# "Server": "nginx/x.x (Ubuntu)",
# "Date": "Sat, 16 May 2020 14:57:27 GMT",
# "Content-Type": "application/json; charset=utf-8",
# "Content-Length": "2",
# "X-Account-Container-Count": "0",
# "X-Account-Object-Count": "0",
# "X-Account-Bytes-Used": "0",
# "X-Timestamp": "1589641047.63424",
# "X-Put-Timestamp": "1589641047.63424",
# "X-Trans-Id": "tx97..",
# "X-Openstack-Request-Id": "tx97.."}
# out.json() = {
# {"name": "logbunny-test", "count": 0, "bytes": 0,
# "last_modified": "2020-05-16T15:02:03.684680"}
return [SwiftContainer.from_list(i, swift=self) for i in out.json()]
def get_container(self, name):
ret = SwiftContainer(name=name)
ret.swift = self
return ret
class SwiftContainer:
@classmethod
def from_list(cls, data, swift):
# data = {"name": "logbunny-test", "count": 0, "bytes": 0,
# "last_modified": "2020-05-16T15:02:03.684680"}
ret = cls(name=data['name'])
ret.swift = swift
return ret
def __init__(self, name):
self.name = name
def _mkurl(self, *args):
return self.swift._mkurl(self.name, *args)
def ensure_exists(self):
"""
Make sure the container exists by creating it if it doesn't.
Keep in mind that it will be created with "default"
properties/metadata.
"""
url, hdrs = self._mkurl(), self.swift._mkhdrs()
out = requests.head(url, headers=hdrs)
# 200 = OK, 204 = OK (but has no content)
# 201 = created, 202 accepted (will be creating shortly)
if out.status_code in (200, 204):
pass
elif out.status_code == 404:
out = requests.put(url, headers=hdrs)
assert out.status_code in (201, 202), (url, out.status_code)
out = requests.head(url, headers=hdrs)
assert out.status_code in (200, 204), (url, out.status_code)
else:
assert False, (url, out.status_code)
def list(self):
"""
List all files in the container; returns a list of dicts
NOTE: This interface will change in the future, as we'll want
filtering capabilities.
Example return value:
[
{"bytes": 432,
"content_type": "application/octet-stream",
"hash": "<md5-hash>",
"last_modified": "2020-05-16T15:58:02.489890",
"name": "README.rst"},
...
]
"""
url, hdrs = self._mkurl(), self.swift._mkhdrs(json=True)
out = requests.get(url, headers=hdrs)
if out.status_code != 200:
raise PermissionDenied('GET', url, out.status_code, out.text)
# headers = {
# "Server": "nginx/x.x (Ubuntu)",
# "Date": "Sat, 16 May 2020 15:20:45 GMT",
# "Content-Type": "application/json; charset=utf-8",
# "Content-Length": "2",
# "X-Container-Object-Count": "0",
# "X-Container-Bytes-Used": "0",
# "X-Timestamp": "1589641323.69412",
# "Last-Modified": "Sat, 16 May 2020 15:02:04 GMT",
# "Accept-Ranges": "bytes",
# "X-Storage-Policy": "policy0",
# "X-Trans-Id": "txe3a...",
# "X-Openstack-Request-Id": "txe3a..."}
# out.json() = [
# {"bytes": 432, "hash": "5a..",
# "name": "README.rst", "content_type": "application/octet-stream",
# "last_modified": "2020-05-16T15:58:02.489890"}]
return out.json()
def delete(self, name):
"""
DELETE (remove) remote Swift file
"""
url, hdrs = self._mkurl(name), self.swift._mkhdrs()
out = requests.delete(url, headers=hdrs)
if out.status_code == 404:
raise SwiftFileNotFoundError(
filename=name,
strerror='DELETE {} {}'.format(url, out.status_code))
if out.status_code != 204:
raise PermissionDenied('DELETE', url, out.status_code, out.text)
assert out.content == b'', out.content
def get(self, name):
"""
GET (read) remote Swift file, returns a requests.Response object
Example usage:
with container.get(filename) as response, \
open(local_filename, 'wb') as fp:
for chunk in response.iter_content(chunk_size=8192):
fp.write(chunk)
See: https://requests.readthedocs.io/en/master/api/#requests.Response
"""
url, hdrs = self._mkurl(name), self.swift._mkhdrs()
out = requests.get(url, headers=hdrs)
if out.status_code == 404:
raise SwiftFileNotFoundError(
filename=name,
strerror='GET {} {}'.format(url, out.status_code))
if out.status_code != 200:
raise PermissionDenied('GET', url, out.status_code, out.text)
return out
def head(self, name):
"""
HEAD (read) remote Swift file metadata, returns a requests.Response
object
"""
url, hdrs = self._mkurl(name), self.swift._mkhdrs()
out = requests.head(url, headers=hdrs)
if out.status_code == 404:
raise SwiftFileNotFoundError(
filename=name,
strerror='GET {} {}'.format(url, out.status_code))
if out.status_code != 200:
raise PermissionDenied('GET', url, out.status_code, out.text)
assert out.content == b'', out.content
return out
def put(self, name, fp, content_type='application/octet-stream',
check_exists_before=True, check_exists_after=True):
"""
PUT (write) remote Swift file
BEWARE: if you're uploading from a file of unknown size (a
pipe/stream), you may want to wrap the fp in a
ChunkIteratorIOBaseWrapper: instead of iterating over lines,
it will iterate over chunks of data.
NOTE: Right now, we do a:
- HEAD check before PUT (to ensure we do not overwrite), and a
- HEAD check after PUT (to ensure the file was written).
This may prove to be more overhead than we want, so this might
change in the future. You can disable this by setting the
`check_exists_*` arguments to False.
"""
url, hdrs = self._mkurl(name), self.swift._mkhdrs()
hdrs['Content-Type'] = content_type
if check_exists_before:
out = requests.head(url, headers=hdrs)
if out.status_code != 404:
raise SwiftFileExistsError(
filename=name,
strerror='HEAD before PUT {} {}'.format(
url, out.status_code))
assert out.content == b'', out.content
out = requests.put(url, headers=hdrs, data=fp)
if out.status_code != 201:
raise PermissionDenied('PUT', url, out.status_code, out.text)
assert out.content == b'', out.content
if check_exists_after:
out = requests.head(url, headers=hdrs)
if out.status_code != 200:
raise SwiftFileNotFoundError(
filename=name,
strerror='HEAD after PUT {} {}'.format(
url, out.status_code))
assert out.content == b'', out.content
# ======================================================================
# Request helpers
# ======================================================================
class ChunkIteratorIOBaseWrapper:
"""
Wrapper around python file objects with a chunked iterator
Regular file objects (IOBase) have a readline() iterator. This
wrapper changes the iterator to provide appropriately sized chunks
instead.
When an input file size is not known beforehand (for streamed IO),
the requests http library will iterate over the input file. This
wrapper makes it a lot more efficient.
Usage:
infp = sys.stdin.buffer # let input be a pipe, instead of a file
# Slow: as infp is iterated over using readlines()
requests.put('https://path/to/somewhere', data=infp)
# Fast: as we get decent sized chunks
requests.put('https://path/to/somewhere', data=(
ChunkIteratorIOBaseWrapper(infp))
See also: wsgiref.util.FileWrapper -- but that one does not forward
calls to like fileno() and seek().
"""
BUFSIZ = 256 * 1024
def __init__(self, fp):
self.__fp = fp
def __iter__(self):
# TODO: check for closed file?
return self
def __next__(self):
buf = self.__fp.read(self.BUFSIZ)
if buf == b'':
raise StopIteration()
return buf
def __getattr__(self, attr):
"Get property/method from self.__fp instead"
return getattr(self.__fp, attr)
class FuncOpen:
"""
Popen "compatible" subprocess handler to be used in Popen-chains
Because reading a HTTP stream takes userland code (we cannot
simply read() from the socket), we spawn a subprocess to read and
write to an anonymous pipe. (*)
The other end of the pipe can then be read() as usual.
Usage:
def data_get_func(dstfp):
"Closure, to call container.get(name) and write to dstfp"
with container.get(name) as response:
for chunk in response.iter_content(chunk_size=8192):
if chunk:
dstfp.write(chunk)
# Example that loads data from a Swift container and feeds it to
# md5sum directly:
with FuncOpen(data_get_func) as pipe1, (
subprocess.Popen(
["md5sum"], stdin=pipe1.stdout,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)) as pipe2:
md5out, err = pipe2.communicate()
assert pipe2.wait() == 0 and err == b''
void, err = pipe1.communicate()
assert pipe1.wait() == 0 and void == b'' and err == b'', (
pipe1.returncode, void, err)
print(md5out) # b'<md5-hash> -<LF>'
(*) Why? For chunked transfer, the data is intermixed with chunk
headers. For gzipped data, the data also needs decompression.
"""
def __init__(self, function, stdout=None):
if stdout is None:
# Our pipe, our responsibility to clean up the fd.
c2pread, c2pwrite = os.pipe()
else:
# Someone elses responsibility.
c2pread, c2pwrite = None, stdout.fileno()
errread, errwrite = os.pipe()
pid = os.fork()
if not pid:
# This is the child
handled_fds = (None, c2pread, c2pwrite)
os.close(errread)
if c2pread is not None:
os.close(c2pread)
if sys.stdin and sys.stdin.fileno() not in handled_fds:
sys.stdin.close()
if sys.stdout and sys.stdout.fileno() not in handled_fds:
sys.stdout.close()
if sys.stderr.fileno() is not None:
os.dup2(errwrite, sys.stderr.fileno())
try:
# Function is called first after the fork(), so we need
# not worry about anyone setting CLOEXEC on its newly
# created FDs.
with os.fdopen(c2pwrite, 'wb') as dstfp:
function(dstfp)
finally:
try:
os.close(errwrite)
except Exception:
pass
os._exit(0)
self.pid = pid
self.returncode = None
self._using_pipe = (c2pread is not None)
if self._using_pipe:
os.close(c2pwrite)
self.stdout = os.fdopen(c2pread, 'rb')
os.close(errwrite)
self._stderr = errread
def communicate(self):
# Behave as much as regular Popen as possible. Note that we
# don't cope with large amounts of stderr while stdout is still
# (supposed) to be flowing.
out = b''
with os.fdopen(self._stderr, 'rb') as fp:
err = fp.read()
self._stderr = None
self.wait()
return out, err
def wait(self):
if self.returncode is None:
pid, returncode = os.waitpid(self.pid, 0)
self.returncode = returncode
if self._stderr is not None:
os.close(self._stderr)
self._stderr = None
return self.returncode
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
if self._using_pipe:
self.stdout.close()
self.wait()
class FuncPipe(FuncOpen):
"""
FuncOpen as a Pipe where we assert that there was no failure
"""
def communicate(self):
out, err = super().communicate()
assert out in (b'', None), out
if self.returncode != 0:
log.debug('(stderr) %s', err.decode('ascii', 'replace'))
raise CalledProcessError(
cmd='{} (FuncPipe child)'.format(sys.argv[0]), output=err,
returncode=self.returncode)
return (None, err)
class SwiftContainerGetPipe(FuncPipe):
"""
Get data from a container in a subprocess: self.stdout iterates the data
Usage:
with SwiftContainerGetPipe(container, remote_name) as pipe1, (
subprocess.Popen(
["md5sum"], stdin=pipe1.stdout,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)) as pipe2:
md5out, err = pipe2.communicate()
pipe1.communicate()
print(md5out) # b'<md5-hash> -<LF>'
"""
def __init__(self, container, filename, stdout=None):
# Call generator enter manually and immediately: any 403/404
# will get picked up now.
self._response = response = container.get(filename).__enter__()
# The container data fetch gets to be done in the subprocess.
def getter(dstfp):
for chunk in response.iter_content(chunk_size=8192):
# In the wild, we have seen one case where an apparently empty
# write propagated to a process causing the read to be cut
# short, aborting the reader. Still not entirely sure what
# happened, because we haven't been able to reproduce. But the
# addition of this if-condition fixed the problem in both
# occurrences: don't write empty data. It may cause someone to
# think there is nothing left.
if chunk:
dstfp.write(chunk)
super().__init__(function=getter, stdout=stdout)
def __exit__(self, type, value, traceback):
self._response.__exit__(type, value, traceback)
super().__exit__(type, value, traceback)
@contextmanager
def TemporaryUntilClosedFile(filename, mode='wb'):
"""
NamedTemporaryFile replacement that renames if there is no exception
If there is an exception inside the context handler, the temporary file is
deleted. If there is _no_ exception, the temporary file is renamed to the
target filename.
Usage:
with TemporaryUntilClosedFile(local_name) as outfp, \\
SwiftContainerGetPipe(
container, remote_name, outfp) as source:
source.communicate()
"""
# Use dir=directory-of-the-file, so we won't have issues with
# cross-filesystem-moves.
ret = tempfile.NamedTemporaryFile(
mode=mode, dir=os.path.dirname(filename), delete=False)
try:
yield ret
except Exception:
os.unlink(ret.name)
raise
else:
os.rename(ret.name, filename)
| ossobv/keystone-light | keystone_light/__init__.py | __init__.py | py | 37,297 | python | en | code | 0 | github-code | 36 |
30540305178 | #!/usr/bin/env python3
import requests
import pandas as pd
import json
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
# Our endpoint URL
endpoint = "http://universities.hipolabs.com/search?country=Canada"
def report_to_csv(api_url):
# Making a GET request to the endpoint
response = requests.get(api_url)
# Serialize the response as JSON
json_response = json.dumps(response.json())
logging.info(" Data buffered successfully")
# Use the pandas library to buffer the serialized response
df = pd.read_json(json_response)
logging.info(" %d records retrieved", (len(df)-1))
logging.info(" Data converted to type JSON")
# Sort the data frame by the 'name' column, ascending
sorted_df = df.sort_values(by=["name"])
logging.info(" Data Frame is now sorted alphabetically")
# Create the universities.csv file and again use the pandas linrary to export the
# JSON data frame to CSV format and write it to the file.
# We're also changing the order of the columns in the CSV file so that it makes more sense.
with open("universities.csv", 'w') as uni_csv:
sorted_df.to_csv(uni_csv, index=False, columns=['name', 'domains', 'web_pages', 'state-province', 'alpha_two_code', 'country'])
logging.info(" Data is now written to file universities.csv")
if __name__ == "__main__":
report_to_csv(endpoint)
logging.info(" Task finished with no error") | samsheriff/seccomp-proj-api | main.py | main.py | py | 1,463 | python | en | code | 0 | github-code | 36 |
25238456 | #1110번
n = input().zfill(2)
temp = str(n)
count = 0
while True:
a = str(sum(map(int, temp))).zfill(2)
count += 1
if temp[1] + a[1] == n:
break
temp = temp[1] + a[1]
print(count)
# 다른 사람꺼 돚거. 이 사람은 몫과 나머지를 이용했다.
# def main():
# init_num = int(input())
# cur_num = init_num
# count = 0
# while (True):
# x = cur_num // 10
# y = cur_num % 10
# value1 = y
# value2 = (x + y) % 10
# cur_num = int(str(value1) + str(value2))
# count += 1
# if cur_num == init_num: break
#
# print(count) | kmgyu/baekJoonPractice | etc/add cycle.py | add cycle.py | py | 625 | python | ko | code | 0 | github-code | 36 |
37401395031 | import numpy as np
np.random.seed(1234)
from functions import *
# ------ NETWORK ARCH ------
#
# Layers = 2 (1 hidden layer)
# Epochs = 300
# LR =
# SGD = BATCH
# Loss = MSE (with added 1/2)
# Hyper parameters
numer_of_epoch = 300
learning_rate_1 = 0.01
learning_rate_2 = 0.1
# Data
x = np.array([
[0,0,1],
[0,1,1],
[1,0,1],
[1,1,1]
])
y = np.array([
[0],
[0],
[0],
[1]
])
# Variables
w1 = np.random.randn(3,5)
w2 = np.random.randn(5,1)
for iter in range(numer_of_epoch):
# Foward
layer_1 = x.dot(w1)
layer_1_act = tanh(layer_1)
layer_2 = layer_1_act.dot(w2)
layer_2_act = tanh(layer_2)
cost = MSE(layer_2_act, y)
# Backward
grad_2_part_1 = d_MSE(layer_2_act, y)
grad_2_part_2 = d_tanh(layer_2)
grad_2_part_3 = layer_1_act
grad_2 = grad_2_part_3.T.dot(grad_2_part_1 * grad_2_part_2)
grad_1_part_1 = (grad_2_part_1 * grad_2_part_2).dot(w2.T)
grad_1_part_2 = d_tanh(layer_1)
grad_1_part_3 = x
grad_1 = grad_1_part_3.T.dot(grad_1_part_1 * grad_1_part_2)
w1 -= learning_rate_1*grad_1
w2 -= learning_rate_2*grad_2
layer_1 = x.dot(w1)
layer_1_act = tanh(layer_1)
layer_2 = layer_1_act.dot(w2)
layer_2_act = tanh(layer_2)
print(layer_2_act)
| javiabellan/deep-learning | DL framework from sctrach --> javia repo/Python/nn.py | nn.py | py | 1,302 | python | en | code | 31 | github-code | 36 |
4304799902 | import os
import pickle
def save_data(data, path):
with open(path, 'wb') as file:
pickle.dump(data, file)
def load_data(path):
if not os.path.exists(path):
raise Exception('File has not been found')
with open(path, 'rb') as file:
return pickle.load(file)
| EgorDm/TextSummarization | datasets/utils.py | utils.py | py | 295 | python | en | code | 0 | github-code | 36 |
25951213667 | import streamlit as st
import pickle
import numpy as np
import pandas as pd
movies_data = pickle.load(open('./books/movies.pkl','rb'))
similarities = pickle.load(open('./books/movie_similarities.pkl','rb'))
movies_df = pd.DataFrame(movies_data)
movies_title = movies_df['title'].values
def recommend(movie):
movie = movie.lower()
movie_id = movies_df[movies_df['lower_title'] == movie].index
best5 = np.argsort(similarities[movie_id])[0,-6:-1]
return movies_df['title'].values[best5]
# print(recommend('Avatar'))
st.title('Movie Recommender System')
selected_movie_name = st.selectbox(
'Choose a movie',
movies_title
)
if st.button('Recommend'):
recommendations = recommend(selected_movie_name)
for m in recommendations: # type: ignore
st.write(m)
| bheemisme/movie-recommendar-system | app.py | app.py | py | 797 | python | en | code | 0 | github-code | 36 |
41610031938 | import json
with open("players.json", 'r') as file:
data=json.load(file)
p1=data['player']
d={}
for wkr in p1:
for k,v in wkr.items():
if k=="role" and v=="Wicket-keeper":
print("There is one wicket-keeper in a BTeam:", k,v)
| tangellamudimanisha/BTeam | wicket-keeper.py | wicket-keeper.py | py | 270 | python | en | code | 0 | github-code | 36 |
72170811625 | #Dazarus Chapman
import math
class Triangle:
def __init__(self, base, height):
self.set_base(base)
self.set_height(height)
def set_base(self, b):
if b > 0:
self.base = float(b)
else:
print("Base must be greater than 0.")
def set_height(self, height):
if height > 0:
self.height = float(height)
else:
print("Height must be greater than 0.")
def calc_side(self):
side = math.sqrt((self.base/2 ) ** 2 + self.height ** 2)
return side
def calc_perimeter(self):
perimeter = self.base + 2 * self.calc_side()
return perimeter
def calc_area(self):
area = 0.5 * self.base * self.height
return area
def calc_alpha(self):
alpha = math.degrees(math.atan((self.height * 2 ) / self.base))
return alpha
def calc_beta(self): #define function as beta
beta = math.degrees(math.acos(self.height / self.calc_side()) * 2)
return beta
def print_all(self) -> None:
print(f"------------------------------")
print(f"base : {self.base}")
print(f"height : {self.height}")
print(f"side : {self.calc_side()}")
print(f"perimeter: {self.calc_perimeter()}")
print(f"area : {self.calc_area()}")
print(f"alpha : {self.calc_alpha()}")
print(f"beta : {self.calc_beta()}")
print(f"------------------------------")
x = Triangle(2, 3)
x.print_all()
x.set_height(5)
x.print_all()
x.set_base(2)
x.set_height(3)
x.print_all()
| DAILYCODI/Fall-2023-Systems-Programming-Class | SPLabs/Lab4/triangle_dazarus_chapman.py | triangle_dazarus_chapman.py | py | 1,578 | python | en | code | 0 | github-code | 36 |
74649258984 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
from rocketmq.client import Producer,Message
from utils.my_logger import logger
import time
import re
def push(num):
tt = re.findall('^\d{13}', str(time.time()).replace('.', ''))[0]
print(type(tt))
producer = Producer('PID-001')
producer.set_namesrv_addr('192.168.90.131:9876')
producer.start()
msg = Message('linkcld_tdmp_gantry')
msg.set_keys('key')
msg.set_tags('bayonet')
body = ("[{'enn':'河南惠济站'," \
"'gid':'G003041003005620010'," \
"'ipnc':'%s'," \
"'ln':'201'," \
"'marked':true," \
"'mid':'020000410101620060354820210315072344'," \
"'mt':3," \
"'pnc':'%s'," \
"'pt':%s," \
"'rt':1615767977000," \
"'vt':'2'}]"% (num,num,tt))
msg.set_body(body)
ret = producer.send_sync(msg)
print(ret.status, ret.msg_id, ret.offset)
producer.shutdown()
def main():
'''MQ推送预警 '''
try:
push('豫A00001_1')
raise
except Exception:
logger.exception("RocketMQ推送预警失败")
raise
else:
logger('MQ推送成功')
if __name__ == '__main__':
logger.info('mqpush.py开始运行')
main() | iospeng/python | pycharm_demo/pythonProject2/test_cases/mqpush.py | mqpush.py | py | 1,275 | python | en | code | 0 | github-code | 36 |
39263585193 | '''Faça um programa que calcule o mostre a média aritmética de N notas.'''
qnt_notas = int(input("Informe quantas notas deseja inserir: "))
contador = 1
soma = 0
while contador <= qnt_notas:
nota = float(input("Informe a nota: "))
contador += 1
soma += nota
media = soma / qnt_notas
print(media)
| jessica-lemes/exercicios-python | Ex. 24 EstruturaDeRepeticao.py | Ex. 24 EstruturaDeRepeticao.py | py | 317 | python | pt | code | 0 | github-code | 36 |
1970589964 | #!/usr/bin/env python3
import sys
from enum import Enum
class Layout:
def __init__(self, filename):
width = 0
self._data = []
with open(filename, 'r') as infile:
for line in (line.rstrip() for line in infile.readlines()):
if width == 0:
width = len(line)
elif width != len(line):
raise RuntimeError('Invalid input')
self._data.append([c for c in line])
@property
def width(self):
return len(self._data[0])
@property
def height(self):
return len(self._data)
def print(self):
for row in self._data:
for seat in row:
print(seat, end='')
print('')
def get_state(self, row, column):
if row < 0 or row >= self.width:
raise RuntimeError('Invalid row')
if column < 0 or column >= self.height:
raise RuntimeError('Invalid column')
return self._data[column][row]
def set_state(self, row, column, to):
if row < 0 or row >= self.width:
raise RuntimeError('Invalid row')
if column < 0 or column >= self.height:
raise RuntimeError('Invalid column')
if to not in ('#', 'L'):
raise RuntimeError('Invalid new state')
if self._data[column][row] not in ('#', 'L'):
raise RuntimeError('Cannot set state of floor')
self._data[column][row] = to
def adjacent_occupied(self, row, column):
if row < 0 or row >= self.width:
raise RuntimeError('Invalid row')
if column < 0 or column >= self.height:
raise RuntimeError('Invalid column')
num_occupied = 0
min_row = max(row - 1, 0)
max_row = min(row + 1, self.width - 1)
min_col = max(column - 1, 0)
max_col = min(column + 1, self.height - 1)
for c in range(min_col, max_col + 1):
for r in range(min_row, max_row + 1):
if c == column and r == row:
continue
s = self._data[c][r]
if s == '#':
num_occupied += 1
return num_occupied
def visibly_occupied(self, row, column):
width = self.width
height = self.height
if row < 0 or row >= width:
raise RuntimeError('Invalid row')
if column < 0 or column >= height:
raise RuntimeError('Invalid column')
num_occupied = 0
directions = ((-1, 0), (1, 0),
(0, -1), (0, 1),
(-1, 1), (1, 1),
(-1, -1), (1, -1))
for dr, dc in directions:
r = row + dr
c = column + dc
while r >= 0 and r < width and c >= 0 and c < height:
s = self._data[c][r]
if s != '.':
if s == '#':
num_occupied += 1
break
r += dr
c += dc
return num_occupied
def num_occupied_total(self):
count = 0
for row in self._data:
for col in row:
if col == '#':
count += 1
return count
def simulation_step_part1(layout):
width = layout.width
height = layout.height
todo = []
for c in range(0, height):
for r in range(0, width):
s = layout.get_state(r, c)
if s == '.':
continue
num_occupied = layout.adjacent_occupied(r, c)
if s == 'L' and num_occupied == 0:
todo.append((r, c, '#'))
elif s == '#' and num_occupied >= 4:
todo.append((r, c, 'L'))
for r, c, s in todo:
layout.set_state(r, c, s)
return len(todo) != 0
def simulation_step_part2(layout):
width = layout.width
height = layout.height
todo = []
for c in range(0, height):
for r in range(0, width):
s = layout.get_state(r, c)
if s == '.':
continue
num_occupied = layout.visibly_occupied(r, c)
if s == 'L' and num_occupied == 0:
todo.append((r, c, '#'))
elif s == '#' and num_occupied >= 5:
todo.append((r, c, 'L'))
for r, c, s in todo:
layout.set_state(r, c, s)
return len(todo) != 0
def main(filename):
layout = Layout(filename)
layout.print()
#while simulation_step_part1(layout):
while simulation_step_part2(layout):
print('----')
layout.print()
print(f'Num occupied: {layout.num_occupied_total()}')
if __name__ == '__main__':
if len(sys.argv) != 2:
print(f'Usage {sys.argv[0]} <filename>', file=sys.stderr)
sys.exit(1)
main(sys.argv[1])
| koeleck/aoc | 11/solve.py | solve.py | py | 4,840 | python | en | code | 0 | github-code | 36 |
14992218159 | import calendar
from requests_html import HTMLSession
c = calendar.TextCalendar(calendar.SUNDAY)
url_list = []
for x in range(2020,2021):
for y in range(1,13):
for z in c.itermonthdays(x,y):
if z != 0:
if y < 10:
month = '0'+str(y)
else:
month = str(y)
if z < 10:
day = '0'+str(z)
else:
day = str(z)
url = 'https://us.trend-calendar.com/trend/'+str(x)+'-'+month+'-'+day+'.html'
url_list.append(url)
#print(url_list)
def get_webinfo(num,url):
try:
session = HTMLSession()
r = session.get(url)
sel1='#readmore1'
result1 = r.html.find(sel1)
sel2='#post-'+str(num)+' > div > ol:nth-child(3)'
result2 = r.html.find(sel2)
result = result1[0].text+'\n'+result2[0].text
return result.replace('\n', ',').split(',')
except Exception:
return []
def identifyHashtag(ls):
name_ls =[]
for i in ls:
if i[0] == '#':
continue
else:
name_ls.append(i)
return name_ls
start = 3282
#demo
url_list = url_list[:10]
name_ls = []
for i in url_list:
ls = get_webinfo(start,i)
start+=1
name_tem = identifyHashtag(ls)
name_ls = name_ls + name_tem
print(name_ls)
file = open("demo_trending_word.txt",'a')
for i in name_ls:
file.write(str(i));
file.write('\n');
file.close();
| KJS89/Wuduplz | Web mining/data_mining/trending_word_getter.py | trending_word_getter.py | py | 1,511 | python | en | code | 2 | github-code | 36 |
32577431418 | #!/usr/bin/env python
# Resequence back bone files for sequential load in PyMOL
# Warning: Modifies file names!
import os
seq_orig_labeled = os.listdir(os.getcwd())
seq_zero_labeled = []
def reseq():
for so in seq_orig_labeled:
if len(so.split('-')) == 3 and '-1.pdb' in so:
so_ini = so.split('-')[1][0]
so_resi = int(so.split('-')[1][1:])
sz = "%03d-seq-%s.pdb" % (so_resi, so_ini)
sz_f = open(sz, 'w')
seq_orig_dat = open(so, 'r')
seq_orig_val = seq_orig_dat.readlines()
for l in seq_orig_val:
sz_f.write(l)
seq_orig_dat.close()
sz_f.close()
#reseq()
| mzhKU/PyEnzyme-Screening | res-seq.py | res-seq.py | py | 694 | python | en | code | 4 | github-code | 36 |
74478447143 | import random
import pygame
from pygame.locals import QUIT
from pelota import*
from Raqueta import*
pygame.mixer.init()
VENTANA_HORI = 1200
VENTANA_VERT = 600
FPS = 160
BLANCO = (255, 255, 255)
NEGRO = (0, 0, 0)
fondo= pygame.image.load("fondo.png")
pantalla = pygame.display.set_mode((VENTANA_HORI,VENTANA_VERT))
def main():
pygame.init()
pantalla.blit(fondo,(0,0))
sonido_fondo = pygame.mixer.Sound("theme.mp3")
pygame.mixer.Sound.play(sonido_fondo, -1)
ventana = pygame.display.set_mode((VENTANA_HORI, VENTANA_VERT))
pygame.display.set_caption("Pong ")
fuente = pygame.font.Font(None, 60)
pelota = PelotaPong("ball.png")
raqueta_1 = RaquetaPong()
raqueta_1.x = 60
raqueta_2 = RaquetaPong()
raqueta_2.x = VENTANA_HORI - 60 - raqueta_2.ancho
jugando = True
while jugando:
pelota.mover()
pelota.rebotar()
raqueta_1.mover()
raqueta_2.mover_ia(pelota)
raqueta_1.golpear(pelota)
raqueta_2.golpear_ia(pelota)
ventana.blit(pygame.image.load("fondo.png"),(0,0))
ventana.blit(pelota.imagen, (pelota.x, pelota.y))
ventana.blit(raqueta_1.imagen, (raqueta_1.x, raqueta_1.y))
ventana.blit(raqueta_2.imagen, (raqueta_2.x, raqueta_2.y))
texto = f"{pelota.puntuacion} : {pelota.puntuacion_ia}"
if(pelota.puntuacion>=10):
ventana.blit(pygame.image.load("win.png"),(300,100))
if(pelota.puntuacion_ia>=10):
ventana.blit(pygame.image.load("lose.png"),(300,100))
letrero = fuente.render(texto, False, BLANCO)
ventana.blit(letrero, (VENTANA_HORI / 2 - fuente.size(texto)[0] / 2, 50))
for event in pygame.event.get():
if event.type == QUIT:
jugando = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_w:
raqueta_1.dir_y = -5
if event.key == pygame.K_s:
raqueta_1.dir_y = 5
if event.type == pygame.KEYUP:
if event.key == pygame.K_w:
raqueta_1.dir_y = 0
if event.key == pygame.K_s:
raqueta_1.dir_y = 0
pygame.display.flip()
pygame.time.Clock().tick(FPS)
pygame.quit()
if __name__ == "__main__":
main()
| luis10dsn/Pong | Pong/main.py | main.py | py | 2,513 | python | es | code | 0 | github-code | 36 |
37426403903 | ''' Obtención de la matriz
identidad de 4 x 4 '''
#INICIO
''' Generación de la matriz de ceros '''
I = [[0 for k in range (4)] for j in range (4)]
''' Generación de los elementos de la
diagonal principal transformandolos
en unos '''
for k in range(4):
I[k][k] = 1
''' Despliegue de la matriz identidad '''
for k in range(4):
print() # salto de linea
for j in range(4):
print(I[k][j],end=", ")
#FIN
| AlejandroPaniagua/AlexProjPy | matrizIdentidad.py | matrizIdentidad.py | py | 436 | python | es | code | 0 | github-code | 36 |
36395547481 | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from datetime import datetime
import time
class InstagramBot:
def __init__(self, username, password):
self.username = username
self.password = password
self.bot = webdriver.Firefox()
self.cont = 0
self.stopCont = 10
self.numeroSeguidores = '0'
self.codigoLocal = ''
def login(self):
bot = self.bot
bot.get('https://www.instagram.com/accounts/login/?source=auth_switcher')
time.sleep(3)
email = bot.find_element_by_name('username')
password = bot.find_element_by_name('password')
email.clear()
password.clear()
email.send_keys(self.username)
password.send_keys(self.password)
password.send_keys(Keys.RETURN)
time.sleep(3)
def encontrarFotos(self, tag):
bot = self.bot
time.sleep(2)
for i in range(1,3):
bot.execute_script('window.scrollTo(0,document.body.scrollHeight)')
time.sleep(2)
foto = bot.find_elements_by_tag_name('a')
links = [elem.get_attribute('href') for elem in foto]
for i in range(13,30):
bot.get(links[i])
try:
time.sleep(2)
if ed.verificarLike() == False:
bot.find_element_by_class_name('fr66n').click() #curtir
time.sleep(2)
if tag == seguirTag:
ed.verificarSeguindo()
time.sleep(2)
except Exception as ex:
time.sleep(6)
def seguirSeguidores(self):
bot = self.bot
bot.get('https://www.instagram.com/'+self.username)
time.sleep(2)
elemento = bot.find_elements_by_tag_name('a')
links = [elem.get_attribute('href') for elem in elemento]
for i in range(len(links)):
if links[i]=='https://www.instagram.com/'+ self.username +'/followers/':
elemento[i].click()
break
time.sleep(4)
bot2 = self.bot
seguidores = bot2.find_elements_by_class_name('wo9IH')
listaSeguidores = [seguidores.find_element_by_class_name('uu6c_') for seguidores in seguidores]
listaBotaoSeguir = [listaSeguidores.find_element_by_class_name('Pkbci') for listaSeguidores in listaSeguidores]
for listaBotaoSeguir in listaBotaoSeguir:
if listaBotaoSeguir.find_element_by_tag_name('button').text == 'Seguir':
listaBotaoSeguir.find_element_by_tag_name('button').click()
time.sleep(1)
def calcularSeguidores(self):
bot = self.bot
numeroSeguidores = bot.find_element_by_xpath('//*[@id="react-root"]/section/main/div/header/section/ul/li[2]/a/span').text
if self.numeroSeguidores != numeroSeguidores:
self.numeroSeguidores = numeroSeguidores
print('--------------------------\nNúmero atual de seguidores: '+numeroSeguidores)
def seguirIdeal(self):
bot = self.bot
bot.get('https://www.instagram.com/lpeventos08')
time.sleep(2)
elemento = bot.find_elements_by_tag_name('a')
links = [elem.get_attribute('href') for elem in elemento]
for i in range(len(links)):
if links[i]=='https://www.instagram.com/lpeventos08/following/':
elemento[i].click()
def verificarLike(self):
bot = self.bot
botao = bot.find_element_by_class_name('fr66n')
botao2 = botao.find_element_by_tag_name('button')
botaoFinal = botao2.find_element_by_tag_name('span')
if botaoFinal.get_attribute('class') == 'glyphsSpriteHeart__filled__24__red_5 u-__7':
return True
return False
def buscarTag(self, tag):
bot = self.bot
bot.get('https://www.instagram.com/'+self.username)
time.sleep(2)
ed.calcularSeguidores()
bot.get('https://www.instagram.com/explore/tags/'+tag)
time.sleep(2)
ed.encontrarFotos(tag)
def verificarSeguindo(self):
bot = self.bot
if ed.cont<10:
ed.cont = ed.cont+1
bot.find_element_by_xpath('//*[@id="react-root"]/section/main/div/div/article/header/div[2]/div[1]/div[2]/button').click()
else:
ed.zerarCont()
def zerarCont(self):
self.stopCont = self.stopCont - 1
if self.stopCont == 0:
self.cont = 0
self.stopCont = 10
def getCodigoCidade(self, cidade):
codigoCidades = ['213106903','405027146','248738611','112047398814697','213665608','221337983','8226756']
return codigoCidades[cidade]
def verificaCidade(self, tag):
cidades = ['curitiba', 'riodejaneiro', 'parana', 'saopaulo','londrina','assis', 'maringa']
for i in range(len(cidades)):
if tag == cidades[i]:
self.codigoLocal = ed.getCodigoCidade(i)
return True
return False
def buscarLocalizacao(self, tag):
bot = self.bot
bot.get('https://www.instagram.com/'+self.username)
time.sleep(2)
ed.calcularSeguidores()
bot.get('https://www.instagram.com/explore/locations/'+self.codigoLocal)
time.sleep(2)
ed.encontrarFotos(tag)
usuario = ''
senha = ''
print('Usuário:')
#usuario = input(usuario)
print('Senha:')
#senha = input(senha)
tags = []
tag = ''
print('PARA INICIAR A APLICAÇÃO DIGITE: start')
while True:
print('Insira uma tag:')
tag = ''
tag = input(tag)
if tag == 'start':
break
tags.append(tag)
seguirTag = 'nao seguir'
ed = InstagramBot(usuario,senha)
ed.login()
time.sleep(2)
while True:
i=0
for i in range(len(tags)):
try:
if ed.verificaCidade(tags[i]):
ed.buscarLocalizacao(tags[i])
else:
ed.buscarTag(tags[i])
except Exception as ex:
ed.bot.get('https://www.instagram.com/'+usuario) | matheusleps/InstaBot | instabot.py | instabot.py | py | 6,262 | python | pt | code | 0 | github-code | 36 |
27941619917 | import gc
import itertools
import multiprocessing as mp
from math import ceil
import numpy as np
import pandas as pd
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from SEMITONES._utils import _chunk_indices
from SEMITONES._utils import _linreg_get_beta
from SEMITONES._utils import _permute
from SEMITONES._utils import _std_sparse
from SEMITONES.support_funcs import pairwise_similarities
gc.enable()
def _enrichment_scoring(X, S, scale_exp, i, n_chunks=None):
"""Perform the actual enrichment scoring"""
n_chunks = 100 if n_chunks is None else n_chunks
if len(S.shape) == 2:
params = {}
for c in range(S.shape[1]):
if issparse(X):
pars = []
for chunk in _chunk_indices(X, n_chunks, axis=1):
pars.extend(np.apply_along_axis(_linreg_get_beta, 0,
X[:, chunk].A, S[:, c],
scale_exp))
params[c] = pars
else:
params[c] = np.apply_along_axis(_linreg_get_beta, 0, X,
S[:, c], scale_exp)
else:
if issparse(X):
params = []
for chunk in _chunk_indices(X, n_chunks, axis=1):
params.extend(np.apply_along_axis(_linreg_get_beta, 0,
X[:, chunk].A, S, scale_exp))
else:
params = np.apply_along_axis(_linreg_get_beta, 0, X, S, scale_exp)
return i, params
def calculate_escores(X, query, metric=None, S=None, scale_exp=None,
optim_over=None, ncpu=None, n_chunks=None,
make_copy=None):
"""Calculate the enrichment scores for all features with respect
to the reference cells in the query.
Parameters
----------
X: matrix-like object (n_samples, n_features)
An array where the rows are samples (i.e. cells) and the columns
are features (i.e. genes). Accepts pandas dataframes,
numpy arrays, and scipy compressed sparse row matrix.
query: list-like object
An iterable which contains the names or indices of the cells
with respect to which enrichment scoring should be performed. If
providing a pandas dataframe, these should be strings. If
providing a numpy array or sparse matrix, these should be
indices (int). If providing a matrix S that contains a different
metric to rank cells by and X is a dataframe, the strings may
be identifiers of the columns in S, even if these do not
correspond to any cell (i.e. sample) in X.
metric: str, optional
If S is None, this metric will be used to calculate the similarity
to the reference cell for each cell. Available metrics are those in
sklearn.metrics.pairwise_distances and
sklearn.metrics.pairwise.pairwise_kernels modules.
S: numpy array (n_samples, n_features), optional
A similarity matrix where each column represents the similarity to
the reference cell for each cell in X. The columns should be ordered
as the cells in the query.
scale_exp: boolean
Whether to scale the expression vector before performing
enrichment scores.
ncpu: int
Number of CPUs the use when using parallel processing. Defaults
to 1.
optim_over: "cols" or "rows"
Choose “cols” if enrichment scores will be computed for many
features and “rows” if there are many reference cells in the
query. Paralellization over “rows” is only beneficial if enough
memory is available.
n_chunks: int
The number of chunks to divide the feature matrix into when
processing a scipy CSR matrix. If memory is limited, choosing
a higher number of chunks might be beneficial. Defaults to
n_features * 0.01 rounded up to the first integer.
Returns
-------
A pandas dataframe of enrichment scores of size
(n_features, n_reference_cells)."""
# set the default parameters
metric = "cosine" if metric is None else metric
scale_exp = True if scale_exp is None else scale_exp
if optim_over is None:
if X.shape[1] > len(query):
optim_over = "cols"
else:
optim_over = "rows"
if n_chunks is None:
n_chunks = ceil(X.shape[1] * 0.01) if n_chunks is None else n_chunks
ncpu = 1 if ncpu is None else ncpu
make_copy = True if make_copy is None else make_copy
if make_copy is True:
X = X.copy()
if isinstance(X, pd.DataFrame):
cells, genes = X.index, X.columns
if all(i in cells for i in query):
query = [X.index.get_loc(i) for i in query]
X = X.values
if S is None:
print("Calculating pairwise similarities")
S = pairwise_similarities(X, query, metric)
else:
S = S
if ncpu > 1:
print("Start enrichment scoring using {0} CPUs".format(ncpu))
print("Creating process pool")
with mp.Pool(processes=ncpu) as pool:
if optim_over == "cols":
i_chunks = _chunk_indices(X, n=ncpu, axis=1)
mpres = [pool.apply_async(_enrichment_scoring,
args=(X[:, i], S, scale_exp, i,
n_chunks)) for i in i_chunks]
else:
i_chunks = _chunk_indices(S, n=ncpu, axis=1)
mpres = [pool.apply_async(_enrichment_scoring,
args=(X, S[:, i], scale_exp, i,
n_chunks)) for i in i_chunks]
print("Run enrichment scoring")
mpres = [r.get() for r in mpres]
pool.close()
pool.join()
print("Enrichment scoring complete")
else:
print("Start enrichment scoring")
i_chunks = _chunk_indices(X, n=2, axis=1)
mpres = [_enrichment_scoring(X[:, i], S, scale_exp, i, n_chunks)
for i in i_chunks]
print("Enrichment scoring complete")
if "cells" in locals():
rows = [list(mpres[i][0]) for i in range(len(mpres))]
rows = [genes[i] for i in itertools.chain.from_iterable(rows)]
if all(cells[item] in cells for item in query):
cols = [cells[i] for i in query]
else:
cols = query
else:
rows = [list(mpres[i][0]) for i in range(len(mpres))]
rows = list(itertools.chain.from_iterable(rows))
cols = query
scores = [pd.DataFrame(mpres[i][1]) for i in range(len(mpres))]
if (optim_over == "rows") and (ncpu > 1):
scores = pd.concat(scores, axis=1)
if "genes" in locals():
scores.index, scores.columns = genes, cols
else:
scores.columns = cols
else:
scores = pd.concat(scores, axis=0)
scores.index, scores.columns = rows, cols
return scores
def permute(X, n=None, axis=None, seed=None):
"""Permute a dataframe n times.
Parameters
----------
X: a matrix-like object
A matrix-like object where rows are samples (i.e. cells)
and columns are features (i.e. genes). Accepts pandas
dataframes, numpy arrays, and scipy compressed sparse
row matrices.
n: int
The number of times to permute the dataframe
seed: int
The seed to pass to numpy.random for reproducibility
axis: 0 or 1
Whether to permute the rows or columns of the dataframe.
0 corresponds to permuting the expression vectors of a
feature matrix of shape (n_samples, n_features).
Returns: an n-times permuted matrix of shape X."""
n = 100 if n is None else n
seed = 42 if seed is None else seed
axis = 0 if axis is None else axis
return _permute(X, n=n, axis=axis, seed=seed)
def sig_interval(pscores, n_sds, query=None):
"""Returns a dictionary {query cell: (lower, upper} of
enrichment score significance cut-off below (lower) and
above (upper) which the scores are significant at a certain
standard deviation (n_sds) away from the mean of the
permutation enrichment scores.
Parameters
----------
pscores: pandas dataframe
A pandas dataframe of enrichment scores obtained from
permuted expression vectors (e.g. from permute(X)).
through the permute() function.
n_sds: int
The number of standard deviations away from the mean of
the pscores at which to declare significance. Defaults
to 5.
query: list-like
A list of reference cells corresponding to the columns
in the pscores dataframe.
Returns
-------
A dictionary of the shape {cell: (lower, upper)}
"""
n_sds = 5 if n_sds is None else n_sds
if issparse(pscores):
if query is None:
print("Outputting cut-offs in order. Please provide a" +
" query in order if you want to use labels as keys.")
if pscores.getformat() not in ["csr", "csc"]:
pscores = pscores.to_csr()
mu = np.array(pscores.mean(axis=0))
std = _std_sparse(pscores, axis=0, ddof=1)
else:
mu, std = np.mean(pscores, axis=0), np.std(pscores, axis=0, ddof=1)
if not issparse(pscores):
query = pscores.columns
else:
query = list(range(pscores.shape[1]))
return dict(zip(query, zip(mu - std * n_sds, mu + std * n_sds)))
def _min_set(X, sets, i=None):
"""Return the min value of each set as expression value"""
if issparse(X):
return i, csr_matrix([np.amin(X[:, s].A, 1) for s in sets]).T
else:
return i, csr_matrix([np.amin(X[:, s], 1) for s in sets]).T
def _max_set(X, sets, i=None):
"""Return the max value of each set as expression value"""
if issparse(X):
return i, csr_matrix([np.amax(X[:, s].A, 1) for s in sets]).T
else:
return i, csr_matrix([np.amax(X[:, s], 1) for s in sets]).T
def _median_set(X, sets, i=None):
"""Return the median value of each set as expression value"""
if issparse(X):
return i, csr_matrix([np.median(X[:, s].A, 1) for s in sets]).T
else:
return i, csr_matrix([np.median(X[:, s], 1) for s in sets]).T
def _interaction_set(X, sets, i=None):
"""Return the expression product of each set as expression value"""
if issparse(X):
return i, csr_matrix([np.prod(X[:, s].A, 1) for s in sets]).T
else:
return i, csr_matrix([np.prod(X[:, s], 1) for s in sets]).T
def _binary_set(X, sets, i=None):
if issparse(X):
gse = csr_matrix([np.sum(X[:, s].A, 1) for s in sets]).T
else:
gse = csr_matrix([np.sum(X[:, s], 1) for s in sets]).T
gse[gse > 1] = 1
return i, gse
def feature_set_values(X, sets, combtype):
"""Combines feature values for all elements in a set.
Parameters
----------
X: matrix-like
A matrix-like object where rows are samples
(i.e. cells) and columns are features (i.e. genes).
Accepts numpy arrays, pandas dataframes and
scipy compressed sparse row matrices.
sets: iterable of tuples
An iterable of tuples (i, …, n) where i is the column
index of feature 1 and n is the column index of
feature n.
combtype: str
- “min”: set the feature set value to be the
element-wise minimum of the feature
vectors.
- “max”: set the feature set value to be the
element-wise maximum of the feature
vectors.
- “median”: set the feature value to be the
element-wise median of the feature
vectors.
- “interaction”: set the feature value to be
the element-wise product of
the feature vectors.
- “binary”: set the feature value to 1 if at
least one of the features is
present and 0 if none are present.
Returns
-------
A matrix-like object (n_samples, sets) of feature
vectors (columns) for each set in sets."""
if isinstance(X, pd.DataFrame):
X = X.values
if (issparse(X)) and (X.getformat() not in ["csr", "csc"]):
X = X.tocsr()
print("Constructing expression set")
if combtype == "min":
return _min_set(X, sets)[1]
elif combtype == "max":
return _max_set(X, sets)[1]
elif combtype == "median":
return _median_set(X, sets)[1]
elif combtype == "interaction":
return _interaction_set(X, sets)[1]
elif combtype == "binary":
return _binary_set(X, sets)[1]
def pvals_per_cell(escores, pscores, mt_cor=None, alpha=None, ret=None):
"""Computes the p-value with respect to a permutation null for each
gene in each reference cell.
-----
escores: matrix-like
A np.array or pd.DataFrame that contains the enrichment scores
for each gene (row) in each reference cell (column)
pscores: matrix-like
A np.array or pd.DataFrame that contains the enrichment scores
for each permutated gene (row) in each reference cell (column).
mt_cor: str
The method by which to perform multiple testing correction.
alpha: float
Family-wise error rate.
ret: str
Whether to return p-values ("p"), corrected p-values ("q"),
or both ("pq"). Defaults to corrected p-values ("q").
-----
Returns: a pd.DataFrame of p-values, corrected p-values,
or both.
"""
from scipy.stats import norm
from statsmodels.sandbox.stats.multicomp import multipletests
mt_cor = "bonferroni" if mt_cor is None else mt_cor
alpha = .05 if alpha is None else alpha
ret = "q" if ret is None else ret
if isinstance(escores, pd.DataFrame):
genes, cells, escores = escores.index, escores.columns, escores.values
mu, sigma = np.mean(pscores, axis=0), np.std(pscores, axis=0, ddof=1)
pvals = 2 * (1 - norm.cdf(abs(escores), loc=mu, scale=sigma))
if (ret == "q") or (ret == "pq"):
pvals_cor = []
for i in range(pvals.shape[1]):
pvals_cor.append(multipletests(pvals[:, i], alpha=alpha,
method=mt_cor)[1])
# make corrected p-value dataframe
if "cells" in locals():
pvals_cor = pd.DataFrame(pvals_cor, columns=genes, index=cells).T
else:
pvals_cor = pd.DataFrame(pvals_cor).T
if (ret == "pq") or (ret == "p"):
if "cells" in locals():
pvals = pd.DataFrame(pvals, index=genes, columns=cells)
else:
pvals = pd.DataFrame(pvals)
# return the appropiate dataframe
if ret == "q":
return pvals_cor
elif ret == "pq":
return pvals, pvals_cor
else:
return pvals
def pvals_per_gene(escores, pscores, mt_cor="bonferroni", alpha=0.05):
"""Computes a p-value per gene.
-----
escores: matrix-like
A np.array or pd.DataFrame that contains the enrichment scores
for each gene (row) in each reference cell (column)
pscores: matrix-like
A np.array or pd.DataFrame that contains the enrichment scores
for each permutated gene (row) in each reference cell (column).
mt_cor: str
The method by which to perform multiple testing correction.
alpha: float
Family-wise error rate.
ret: str
Whether to return p-values ("p"), corrected p-values ("q"),
or both ("pq"). Defaults to corrected p-values ("q").
-----
Returns: a pd.DataFrame of p-values, corrected p-values,
or both.
"""
from scipy.stats import ks_2samp
from statsmodels.stats.multitest import multipletests
mt_cor = "bonferroni" if mt_cor is None else mt_cor
alpha = .05 if alpha is None else alpha
if isinstance(escores, pd.DataFrame):
egenes, ecells, escores = escores.index, escores.columns, escores.values
if isinstance(pscores, pd.DataFrame):
pgenes, pcells, pscores = pscores.index, pscores.columns, pscores.values
n = escores.shape[0]
ks = [ks_2samp(escores[i, :], pscores[i, :]) for i in range(n)]
ks = pd.DataFrame(data=[(d, k) for d, k in ks],
columns=["d", "p"])
if "egenes" in locals():
ks.index = egenes
r, q, s, b = multipletests(ks.loc[:, "p"], alpha, method=mt_cor)
ks["q"] = q
return ks
| ohlerlab/SEMITONES | src/SEMITONES/enrichment_scoring.py | enrichment_scoring.py | py | 16,684 | python | en | code | 8 | github-code | 36 |
9645302628 | '''
Find sum of all primes below N.
'''
from datetime import datetime
import math
import itertools
def summationOfPrimes(N):
sum = 0
#initialize prime number array, number corresponding to index is a prime if value is True (except first 2)
primeArray = [True for i in range(N+1)]
number = 2
while True:
#If current number of prime, mark its multiples out of the primeArray
if primeArray[number]:
sum += number
#print(sum)
if number * 2 > N:
number += 1
continue
for counter in range(number * 2, N+1, number):
#Number corresponding to this index is not prime
if primeArray[counter]:
primeArray[counter] = False
number += 1
if number > N:
break
return sum
if __name__ == '__main__':
N = int (input('Input upper bound number to find sum of all primes (E.g. 2000000): ') )
start = datetime.now().strftime("%H:%M:%S")
print('Answer: ', str(summationOfPrimes( N) ) )
end = datetime.now().strftime("%H:%M:%S")
print( 'Start and end time: ', start, ' ', end ) | bikram-gill/code | solutions/projectEuler/python/P0010-SummationOfPrimes.py | P0010-SummationOfPrimes.py | py | 1,217 | python | en | code | 1 | github-code | 36 |
27095189897 | from django.urls import path
from .views import register, login,home, task_edit,task_delete, filter_period_view,logout
urlpatterns =[
path("register/", register, name='register'),
path('', login, name='login'),
path('logout/', logout, name='logout' ),
path('home/', home, name='home'),
path('task/edit/<int:id>', task_edit, name='task_edit' ),
path('task/delete/<int:id>', task_delete, name='task_delete'),
path('task/<str:slug>', filter_period_view, name='filter-period'),
] | cavidanhasanli/Planner_project | planner_app/urls.py | urls.py | py | 507 | python | en | code | 1 | github-code | 36 |
26579559940 | '''
Say you have an array for which the ith element is the price of a given stock on day i.
If you were only permitted to complete at most one transaction (ie, buy one and sell one share of the stock), design an algorithm to find the maximum profit.
'''
class Solution():
def maxProfit(self,prices):
if not prices:
return 0
minPrice = prices[0]
maxProfit = 0
for i in range(1,len(prices)):
p = prices[i]
if p<minPrice:
minPrice=p
maxProfit = max(maxProfit,p-minPrice)
return maxProfit
s = Solution()
assert 3 == s.maxProfit([2,1,4])
assert 5 == s.maxProfit([7, 1, 5, 3, 6, 4])
assert 0 == s.maxProfit([])
| msencer/leetcode-solutions | easy/python/MaxProfit.py | MaxProfit.py | py | 717 | python | en | code | 5 | github-code | 36 |
12366385422 | #!/usr/bin/env ccp4-python
"""
Created on 2 Feb 2015
@author: jmht
"""
import logging
import os
import shutil
import sys
import uuid
from ample.util import ample_util, mtz_util
try:
from mrbump.parsers import parse_shelxe
except ImportError:
mrbumpd = os.path.join(os.environ['CCP4'], "share", "mrbump", "include", "parsers")
sys.path.insert(0, mrbumpd)
import parse_shelxe
logger = logging.getLogger(__name__)
class MRinfo(object):
"""An object to analyse Molecular Replacement solutions
Attributes
----------
work_dir : str
Path to the working directory
shelxe_exe : str
Path to the SHELXE executable
stem : str
The name for all the SHELXE files
originShift : list
The origin shift of the MR pdb to native as a list of three floats
MPE : float
The Mean Phase Error of the MR pdb to the native pdb
wMPE : float
The weighted Mean Phase Error of the MR pdb to the native pdb
"""
def __init__(self, shelxe_exe, native_pdb, native_mtz, work_dir=None):
"""Intialise from native pdb and mtz so that analyse only requires a MR pdb
Parameters
----------
shelxe_exe : str
Path to the SHELXE executable
native_pdb : str
Path to the native PDB file
native_mtz : str
Path to the native MTZ file
"""
self.work_dir = work_dir or os.getcwd()
self.shelxe_exe = shelxe_exe
self.stem = 'shelxe-input-{}'.format(str(uuid.uuid1()))
self.MPE = None
self.wMPE = None
self.originShift = None
self.mk_native_files(native_pdb, native_mtz)
return
def mk_native_files(self, native_pdb, native_mtz):
"""Create the files required by SHELXE from the native structure
Parameters
----------
native_pdb : str
Path to the native PDB file
native_mtz : str
Path to the native MTZ file
"""
mtz_util.to_hkl(native_mtz, hkl_file=os.path.join(self.work_dir, self.stem + ".hkl"))
shutil.copyfile(native_pdb, os.path.join(self.work_dir, self.stem + ".ent"))
def analyse(self, mr_pdb, cleanup=True):
"""Use SHELXE to analyse an MR pdb file to determine the origin shift and phase error
This function sets the ``MPE``, ``wMPE`` and ``originShift`` attributes.
Parameters
----------
mr_pdb : str
Path to the Molecular Replacement PDB file
"""
os.chdir(self.work_dir)
input_pdb = self.stem + ".pda"
shutil.copyfile(mr_pdb, os.path.join(self.work_dir, input_pdb))
cmd = [self.shelxe_exe, input_pdb, '-a0', '-q', '-s0.5', '-o', '-n', '-t0', '-m0', '-x']
logfile = os.path.abspath('shelxe_{}.log'.format(str(uuid.uuid1())))
ret = ample_util.run_command(cmd=cmd, logfile=logfile, directory=None, dolog=False, stdin=None)
if ret != 0:
raise RuntimeError("Error running shelxe - see log: {0}".format(logfile))
sp = parse_shelxe.ShelxeLogParser(logfile)
# Only added in later version of MRBUMP shelxe parser
if hasattr(sp, 'MPE'):
self.MPE = sp.MPE
self.wMPE = sp.wMPE
if isinstance(sp.originShift, list):
self.originShift = [o * -1 for o in sp.originShift]
if cleanup:
for ext in ['.hkl', '.ent', '.pda', '.pdo', '.phs', '.lst', '_trace.ps']:
try:
os.unlink(self.stem + ext)
except:
pass
os.unlink(logfile)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Determine origin using SHELXE', prefix_chars="-")
parser.add_argument('--native_mtz', help='Native MTZ', required=True)
parser.add_argument('--native_pdb', help='Native PDB', required=True)
parser.add_argument('--mr_pdb', help='Molecular Replacement MTZ', required=True)
parser.add_argument('--executable', help="Path to SHELXE executable")
args = parser.parse_args()
logging.basicConfig(level=logging.INFO)
executable = None
if args.executable:
executable = args.executable
else:
executable = os.path.join(os.environ['CCP4'], "bin", "shelxe" + ample_util.EXE_EXT)
# Get full paths to all files
native_mtz = os.path.abspath(args.native_mtz)
if not os.path.isfile(native_mtz):
raise RuntimeError("Cannot find input file: {0}".format(native_mtz))
native_pdb = os.path.abspath(args.native_pdb)
if not os.path.isfile(native_pdb):
raise RuntimeError("Cannot find input file: {0}".format(native_pdb))
mr_pdb = os.path.abspath(args.mr_pdb)
if not os.path.isfile(mr_pdb):
raise RuntimeError("Cannot find input file: {0}".format(mr_pdb))
mrinfo = MRinfo(executable, native_pdb, native_mtz)
mrinfo.analyse(mr_pdb)
os.unlink('shelxe-input.hkl')
os.unlink('shelxe-input.ent')
logger.info("Origin shift is: {0}".format(mrinfo.originShift))
logger.info("Phase error is MPE: {0} | wMPE: {1}".format(mrinfo.originShift, mrinfo.MPE, mrinfo.wMPE))
| rigdenlab/ample | ample/util/shelxe.py | shelxe.py | py | 5,172 | python | en | code | 6 | github-code | 36 |
43380221313 | import openai
def interact_with_chatgpt_prova(user, system, API_KEY, maxContent, creativita):
openai.api_key = API_KEY
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k",
messages=[
{"role": "system", "content": system},
{"role": "user", "content": user}
],
max_tokens=maxContent, # Puoi regolare questo valore per ottenere risposte più lunghe o più corte (N token)
temperature=creativita, # Puoi regolare questo valore per controllare la "creatività" delle risposte
)
return [response['choices'][0]['message']['content'], response['usage']['total_tokens']] | Rfusar/dashboard | dashboard/interazioneGPT/connGPT.py | connGPT.py | py | 678 | python | it | code | 0 | github-code | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.