seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
19447382035 | # -*- coding: utf-8 -*-
from CGRtools.files import SDFwrite
from pickle import load
global_result = set()
pairs = set()
molecules = {}
NUMBER = set()
SIG = set()
train = set()
test = set()
validation = set()
def get_set(path, number):
for num in range(number):
tuples = load(open('{}/{}.pickle'.format(path, num), 'rb'))
for take_ml in tuples:
a = take_ml[0]
b = take_ml[1]
sig_a = bytes(a)
sig_b = bytes(b)
if (sig_b, sig_a) in pairs or (sig_a, sig_b) in pairs:
continue
TUPLE = (sig_a, sig_b, take_ml[2], take_ml[3])
pairs.add((sig_a, sig_b))
check_a = ([t[0] for t in global_result])
check_b = ([t[1] for t in global_result])
if sig_a in check_a or sig_b in check_b or sig_a in check_b or sig_b in check_a:
train.add(take_ml)
else:
if len(test) <= len(validation):
test.add(take_ml)
else:
validation.add(take_ml)
global_result.add(TUPLE)
for number in range(4, 7):
get_set('/home/nadia/data/True_pairs_new', number)
| Pandylandy/Practice | NN_score/prepare.py | prepare.py | py | 1,197 | python | en | code | 0 | github-code | 13 |
16129955763 | """
Purpose: Adding Logging configuration
%(asctime)s : displays the date and time of the log, in local time
%(levelname)s: the logging level of the message
%(message)s : the message
"""
import logging
logging.basicConfig(
format="%(asctime)-15s %(client_ip)s %(name)9s %(user)-8s %(message)s"
)
d = {"client_ip": "192.168.0.1", "user": "fbloggs"}
logging.info("This is info message", extra=d)
logging.error("This is error message", extra=d)
# Method 1
logging.basicConfig(
filename="logs/06_logging.log", # filemode='w',
format="%(asctime)s %(levelname)8s %(name)s %(message)s",
datefmt="%d-%b-%Y %I:%M:%S %p",
level=logging.DEBUG,
)
logging.warning("Protocol problem: %s", "connection reset", extra=d)
# Method 2
logger = logging.getLogger("TCPserver")
logger.warning("Protocol problem: %s", "connection reset", extra=d)
logger2 = logging.getLogger("UDPserver")
logger2.warning("Protocol problem: %s", "connection reset", extra=d)
logger3 = logging.getLogger("myApp")
logger3.warning("Protocol problem: %s", "connection reset", extra=d)
| udhayprakash/PythonMaterial | python3/12_Logging/a_builtin_logging/06_custom_logging_configuration.py | 06_custom_logging_configuration.py | py | 1,084 | python | en | code | 7 | github-code | 13 |
16755238195 | """Tests for diamond_norm."""
import numpy as np
from toqito.channel_metrics import diamond_norm
from toqito.channels import dephasing, depolarizing
def test_diamond_norm_same_channel():
"""The diamond norm of identical channels should yield 0."""
choi_1 = dephasing(2)
choi_2 = dephasing(2)
np.testing.assert_equal(np.isclose(diamond_norm(choi_1, choi_2), 0, atol=1e-3), True)
def test_diamond_norm_different_channel():
"""Calculate the diamond norm of different channels."""
choi_1 = dephasing(2)
choi_2 = depolarizing(2)
np.testing.assert_equal(np.isclose(diamond_norm(choi_1, choi_2), 1, atol=1e-3), True)
def test_diamond_norm_inconsistent_dims():
"""Inconsistent dimensions between Choi matrices."""
with np.testing.assert_raises(ValueError):
choi_1 = depolarizing(4)
choi_2 = dephasing(2)
diamond_norm(choi_1, choi_2)
def test_diamond_norm_non_square():
"""Non-square inputs for diamond norm."""
with np.testing.assert_raises(ValueError):
choi_1 = np.array([[1, 2, 3], [4, 5, 6]])
choi_2 = np.array([[1, 2, 3], [4, 5, 6]])
diamond_norm(choi_1, choi_2)
| vprusso/toqito | toqito/channel_metrics/tests/test_diamond_norm.py | test_diamond_norm.py | py | 1,164 | python | en | code | 118 | github-code | 13 |
75082233936 | #we're gonna take a 10 x 10 grid of squares
#obstacles are black squares
#objects defined by shape, size, color
#each square gets an x, y coordinate
#return list of occupied grids using computer vision
#find minimimum path between starting object and matching object using a star search
#openCV was created by Intel, maintained by willow garage, and is open source
#it does everything image related - segmentation, motion tracking, facial recogniton
import cv2
#scientific computing
import numpy as np
#for measuring time
import time
#image processing in python
#Compute the mean structural similarity index between two images.
from skimage.measure import compare_ssim as ssim #to compare 2 images
# it's the leading pathfinding algorithm
#used in games like Warcraft III.
#searching among all possible paths to the solution (goal) for the one that incurs the smallest
#cost (least distance travelled, shortest time, etc.), and among these paths it first considers
#the ones that appear to lead most quickly to the solution.
#we generate our possibilities and pick the one with the least projected cost.
#Once a possibility is generated and its cost is calculated,
#it stays in the list of possibilities until all the better nodes have been searched before it
import astarsearch
#helper class, will help us traverse the image from left to right for image prcoessing
import traversal
def main(image_filename):
'''
Returns:
1 - List of tuples which is the coordinates for occupied grid.
2 - Dictionary with information of path.
'''
occupied_grids = [] # List to store coordinates of occupied grid
planned_path = {} # Dictionary to store information regarding path planning
# load the image and define the window width and height
image = cv2.imread(image_filename)
(winW, winH) = (60, 60) # Size of individual cropped images
obstacles = [] # List to store obstacles (black tiles)
index = [1,1] #starting point
#create blank image, initialized as a matrix of 0s the width and height
blank_image = np.zeros((60,60,3), np.uint8)
#create an array of 100 blank images
list_images = [[blank_image for i in xrange(10)] for i in xrange(10)] #array of list of images
#empty #matrix to represent the grids of individual cropped images
maze = [[0 for i in xrange(10)] for i in xrange(10)]
#traversal for each square
for (x, y, window) in traversal.sliding_window(image, stepSize=60, windowSize=(winW, winH)):
# if the window does not meet our desired window size, ignore it
if window.shape[0] != winH or window.shape[1] != winW:
continue
# print index, image is our iterator, it's where were at returns image matrix
clone = image.copy()
#format square for openCV
cv2.rectangle(clone, (x, y), (x + winW, y + winH), (0, 255, 0), 2)
crop_img = image[x:x + winW, y:y + winH] #crop the image
list_images[index[0]-1][index[1]-1] = crop_img.copy() #Add it to the array of images
#we want to print occupied grids, need to check if white or not
average_color_per_row = np.average(crop_img, axis=0)
average_color = np.average(average_color_per_row, axis=0)
average_color = np.uint8(average_color) #Average color of the grids
#iterate through color matrix,
if (any(i <= 240 for i in average_color)): #Check if grids are colored
maze[index[1]-1][index[0]-1] = 1 #ie not majorly white
occupied_grids.append(tuple(index)) #These grids are termed as occupied_grids
if (any(i <= 20 for i in average_color)): #Check if grids are black in color
# print ("black obstacles")
obstacles.append(tuple(index)) #add to obstacles list
#show this iteration
cv2.imshow("Window", clone)
cv2.waitKey(1)
time.sleep(0.025)
#Iterate
index[1] = index[1] + 1
if(index[1]>10):
index[0] = index[0] + 1
index[1] = 1
#get object list
list_colored_grids = [n for n in occupied_grids if n not in obstacles] #Grids with objects (not black obstacles)
#Compare each image in the list of objects with every other image in the same list
#Most similar images return a ssim score of > 0.9
#Find the min path from the startimage to this similar image u=by calling astar function
for startimage in list_colored_grids:
key_startimage = startimage
#start image
img1 = list_images[startimage[0]-1][startimage[1]-1]
for grid in [n for n in list_colored_grids if n != startimage]:
#next image
img = list_images[grid[0]-1][grid[1]-1]
#convert to grayscale
image = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
image2 = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#compare structural similarity
s = ssim(image, image2)
#if they are similar
if s > 0.9:
#perform a star search between both
result = astarsearch.astar(maze,(startimage[0]-1,startimage[1]-1),(grid[0]-1,grid[1]-1))
# print result
list2=[]
for t in result:
x,y = t[0],t[1]
list2.append(tuple((x+1,y+1))) #Contains min path + startimage + endimage
result = list(list2[1:-1]) #Result contains the minimum path required
if not result: #If no path is found;
planned_path[startimage] = list(["NO PATH",[], 0])
planned_path[startimage] = list([str(grid),result,len(result)+1])
for obj in list_colored_grids:
if not(planned_path.has_key(obj)): #If no matched object is found;
planned_path[obj] = list(["NO MATCH",[],0])
return occupied_grids, planned_path
if __name__ == '__main__':
# change filename to check for other images
image_filename = "test_images/test_image1.jpg"
main(image_filename)
cv2.waitKey(0)
cv2.destroyAllWindows()
| llSourcell/path_planning_demo_live | process_image.py | process_image.py | py | 5,615 | python | en | code | 65 | github-code | 13 |
9483920526 | #!/usr/bin/env python
# -*- encoding:utf-8 -*-
import sys
from PyQt5 import QtWidgets, QtGui
def main():
app = QtWidgets.QApplication(sys.argv)
font_db = QtGui.QFontDatabase()
print('Font Families'.center(80, '='))
for family in font_db.families():
print(family)
print('=' * 80)
print('Font Test'.center(80, '='))
font = QtGui.QFont()
print(font.toString())
font.setFixedPitch(True)
print(font.toString())
mono_font = QtGui.QFont('Monospace')
print(mono_font.toString())
if __name__ == '__main__':
main()
| liuyug/code_example | pyqt/fonts.py | fonts.py | py | 572 | python | en | code | 0 | github-code | 13 |
43114768582 | def solution(participant, completion):
answer = ''
hashDict = {}
sumHash = 0
for part in participant:
hashDict[hash(part)] = part # hash()는 각 값에 따른 고유한 hash값을 생성하는 함수
sumHash += hash(part)
# print(hash(part), sumHash)
# print()
for comp in completion:
sumHash -= hash(comp)
# print(hash(comp), sumHash)
# print(hashDict)
# print(hashDict[sumHash])
answer = hashDict[sumHash]
return answer | jinhyungrhee/Problem-Solving | Programmers/고득점Kit/완주하지못한선수.py | 완주하지못한선수.py | py | 521 | python | en | code | 0 | github-code | 13 |
73702111056 | import unittest
import pandas as pd
from src.cleaning_data.data_transformation_classes import MakeDataframesFromMovies
from src.cleaning_data.cleaning_functions import clean_movies
class TestMakeDataframesFromMovies(unittest.TestCase):
def setUp(self):
clean_movies_df = clean_movies()
self.maker = MakeDataframesFromMovies(clean_movies_df)
def test_create_collections_df(self):
btc_df = self.maker.create_collections_df()
self.assertIn('collection_id', btc_df.columns)
self.assertIn('name', btc_df.columns)
self.assertNotIn('belongs_to_collection', self.maker.df.columns)
def test_extract_df_from_col(self):
genre_df, genre_junction_df = self.maker.extract_df_from_col('genres', 'genre', 'name')
self.assertIn('genre', genre_df.columns)
self.assertIn('film_id', genre_junction_df.columns)
def test_final_transformation_movies_df(self):
self.maker.final_transformation_movies_df()
for col in ['genres', 'production_companies', 'production_countries', 'spoken_languages']:
self.assertNotIn(col, self.maker.df.columns)
def test_get_movies_df(self):
movies_df = self.maker.get_movies_df()
self.assertIsInstance(movies_df, pd.DataFrame)
if __name__ == '__main__':
unittest.main()
| JPatryk13/movie_dataset_analysis | src/tests/transforming_tests/test_movies.py | test_movies.py | py | 1,325 | python | en | code | 1 | github-code | 13 |
17555588516 | import json
from channels.generic.websocket import WebsocketConsumer
import asyncio
from django.contrib.auth import get_user_model
from channels.consumer import AsyncConsumer
#from channels.db import database_sync_to_async
from channels.generic.websocket import AsyncWebsocketConsumer
import pandas.io.sql as sqlio
import pickle
import numpy as np
import base64
import psycopg2
import os
import cv2
from psycopg2_pgevents.trigger import install_trigger, \
install_trigger_function, uninstall_trigger, uninstall_trigger_function
from psycopg2_pgevents.event import poll, register_event_channel, \
unregister_event_channel
class MyConsumer(WebsocketConsumer):
def connect(self):
# Called on connection.
# To accept the connection call:
self.accept()
#DATABASE LOOKUP
self.scaleup=2.0
self.img = None
self.data = None
self.row_id = None
self.conn = psycopg2.connect(host=os.environ['db_uri'],
dbname=os.environ['db_database'],
user=os.environ['db_username'],
password=os.environ['db_password'],
port = os.environ['db_port'])
self.table_name = 'crowd_heatmaps'
self.query_db()
def receive(self, text_data=None, bytes_data=None):
pass
def disconnect(self, close_code):
pass
##### Handlers for messages sent over the channel layer
def query_db(self):
if self.conn is not None:
self.conn.autocommit=True
install_trigger_function(self.conn)
install_trigger(self.conn, self.table_name)
register_event_channel(self.conn)
try:
print("LIStening for event...")
while True:
row_ids = []
for evt in poll(self.conn):
print('New Event: {}'.format(evt))
row_id = vars(evt)['row_id']
row_ids.append(row_id)
if len(row_ids)>0:
self.row_id=max(row_ids)
self.query_db_basic()
self.transform_and_scale()
if self.img is not None:
self.send(text_data=self.img.decode('utf-8'))
except KeyboardInterrupt:
print('User exit via Ctrl-C; Shutting down...')
unregister_event_channel(connection)
uninstall_trigger(connection, table_name)
uninstall_trigger_function(connection)
print('Shutdown complete.')
#await asyncio.sleep(2)
#await self.query_db_basic()
#await self.transform_and_scale()
#if self.img is not None:
# await self.send(text_data=self.img.decode('utf-8'))
#print(self.data)
#@database_sync_to_async
def query_db_basic(self):
if self.row_id is not None:
try:
#cursor = self.conn.cursor()
query = "select * from {} natural join video_sensors where id={};".format(self.table_name,self.row_id)
data = sqlio.read_sql_query(query, self.conn)
self.data= data['heatmap'][0]
except psycopg2.DatabaseError as error:
print(error)
self.data = None
def transform_and_scale(self):
q = self.data
if q is not None:
q = pickle.loads(q)
q = np.array(q[...,0])
q = np.clip(q*self.scaleup,0,255).astype(np.uint8)
q=cv2.applyColorMap(q, cv2.COLORMAP_JET)
self.img = base64.b64encode(cv2.imencode('.png',q)[1].tobytes())
| eric-yim/aws-django-channels | stream/consumers.py | consumers.py | py | 4,019 | python | en | code | 0 | github-code | 13 |
35103337951 | # -*- coding: utf-8 -*-
from forms_builder.forms.forms import FormForForm
from .models import FieldEntry, FormEntry
class FormForForm(FormForForm):
field_entry_model = FieldEntry
class Meta:
model = FormEntry
exclude = ("form", "entry_time")
| sigmacms/fluentcms-forms-builder | fluentcms_forms_builder/forms.py | forms.py | py | 271 | python | en | code | null | github-code | 13 |
41849774563 | from http import HTTPStatus
from fastapi import HTTPException
def validate_salary(salary_from: int, salary_to: int) -> None:
if salary_to < salary_from:
raise HTTPException(
status_code=HTTPStatus.UNPROCESSABLE_ENTITY,
detail='Интервал зарплат указан некорректно',
)
| Flopp30/vacancy_searcher_bot | app/endpoints/validators.py | validators.py | py | 346 | python | en | code | 1 | github-code | 13 |
41882942450 | import math
def find_divisor(n):
shifts = 0
while not (n & 1):
n >>= 1
if n == 1:
return 2**shifts
shifts += 1
return 2**shifts
def reduce(n, nums):
if n == 1:
nums.append(str(n))
return
else:
nums.append(str(n))
reduce(n - find_divisor(n), nums)
return
def divisor_chain(n):
nums = []
reduce(n, nums)
print(len(nums))
for num in nums:
print(num, end=" ")
print()
if __name__ == "__main__":
lines = int(input())
for i in range(lines):
divisor_chain(int(input()))
| danherbriley/acm4 | 01/divisor_chain.py | divisor_chain.py | py | 611 | python | en | code | 0 | github-code | 13 |
4511778510 | #
# @lc app=leetcode.cn id=133 lang=python
#
# [133] 克隆图
#
# https://leetcode-cn.com/problems/clone-graph/description/
#
# algorithms
# Medium (66.68%)
# Likes: 343
# Dislikes: 0
# Total Accepted: 58.4K
# Total Submissions: 87.6K
# Testcase Example: '[[2,4],[1,3],[2,4],[1,3]]\n[[]]\n[]'
#
# 给你无向 连通 图中一个节点的引用,请你返回该图的 深拷贝(克隆)。
#
# 图中的每个节点都包含它的值 val(int) 和其邻居的列表(list[Node])。
#
# class Node {
# public int val;
# public List<Node> neighbors;
# }
#
#
#
# 测试用例格式:
#
# 简单起见,每个节点的值都和它的索引相同。例如,第一个节点值为 1(val = 1),第二个节点值为 2(val =
# 2),以此类推。该图在测试用例中使用邻接列表表示。
#
# 邻接列表 是用于表示有限图的无序列表的集合。每个列表都描述了图中节点的邻居集。
#
# 给定节点将始终是图中的第一个节点(值为 1)。你必须将 给定节点的拷贝 作为对克隆图的引用返回。
#
#
#
# 示例 1:
#
#
#
# 输入:adjList = [[2,4],[1,3],[2,4],[1,3]]
# 输出:[[2,4],[1,3],[2,4],[1,3]]
# 解释:
# 图中有 4 个节点。
# 节点 1 的值是 1,它有两个邻居:节点 2 和 4 。
# 节点 2 的值是 2,它有两个邻居:节点 1 和 3 。
# 节点 3 的值是 3,它有两个邻居:节点 2 和 4 。
# 节点 4 的值是 4,它有两个邻居:节点 1 和 3 。
#
#
# 示例 2:
#
#
#
# 输入:adjList = [[]]
# 输出:[[]]
# 解释:输入包含一个空列表。该图仅仅只有一个值为 1 的节点,它没有任何邻居。
#
#
# 示例 3:
#
# 输入:adjList = []
# 输出:[]
# 解释:这个图是空的,它不含任何节点。
#
#
# 示例 4:
#
#
#
# 输入:adjList = [[2],[1]]
# 输出:[[2],[1]]
#
#
#
# 提示:
#
#
# 节点数不超过 100 。
# 每个节点值 Node.val 都是唯一的,1 <= Node.val <= 100。
# 无向图是一个简单图,这意味着图中没有重复的边,也没有自环。
# 由于图是无向的,如果节点 p 是节点 q 的邻居,那么节点 q 也必须是节点 p 的邻居。
# 图是连通图,你可以从给定节点访问到所有节点。
#
#
#
# @lc code=start
"""
# Definition for a Node.
class Node(object):
def __init__(self, val = 0, neighbors = None):
self.val = val
self.neighbors = neighbors if neighbors is not None else []
"""
class Solution(object):
def __init__(self):
self.cl = {}
def cloneGraph(self, node):
if not node:
return None
if self.cl.get(node, None):
return self.cl[node]
ret = Node(node.val)
self.cl[node] = ret
for x in node.neighbors:
ret.neighbors.append(self.cloneGraph(x))
return ret
# @lc code=end
| lagoueduCol/Algorithm-Dryad | 13.DFS.BFS/133.克隆图.py | 133.克隆图.py | py | 2,947 | python | zh | code | 134 | github-code | 13 |
781044414 | import os
import cv2
import pandas as pd
import numpy as np
from sklearn.cluster import MeanShift # as ms
from sklearn.datasets.samples_generator import make_blobs
import matplotlib.pyplot as plt
from collections import Counter
keyword = input("Search: ");
PATH = "data/"
for category in os.listdir(PATH):
if category == keyword:
path = os.path.join(PATH,category)
for image in os.listdir(path):
img = cv2.imread(os.path.join(path,image))
img = cv2.resize(img,(200,200))
cv2.imshow(category, img)
cv2.waitKey(1000)
#cv2.destroyAllWindows()
centers = [[1,1],[5,5],[8,4]]
dataset = pd.read_csv('person.csv')
X = dataset.iloc[:,[2,3]].values
y = dataset.iloc[:,[0]].values
name = dataset['Item_names'].tolist()
#plt.scatter(X[:,0], X[:,1])
#plt.show()
ms = MeanShift()
ms.fit(X)
labels = ms.labels_
cluster_centers = ms.cluster_centers_
#print(cluster_centers)
gg = Counter(labels)
#print(gg)
def find_max():
max = gg[0]
v = 0
for i in range(len(gg)):
if gg[i] > max:
max = gg[i]
v = i
return v
#print(type(labels))
Y = y.tolist()
L = labels.tolist()
max_label = find_max()
#print("max_label",max_label)
suggest = []
for i in range(len(labels)):
if max_label == L[i]:
suggest.append(Y[i])
new = []
def stripp(rr):
for i in range(len(suggest)):
p=str(rr[i]).replace('[','').replace(']','')
new.append(int(p))
return new
new_Y = stripp(Y)
new_name = []
for i in range(len(suggest)):
p=str(name[i]).replace('[','').replace(']','')
new_name.append(p)
#print("new_y", new_Y[4])
#print("Y" ,p[0])
#print(Y, L)
#print(len(suggest))
#print("suggest array::",suggest)
n_clusters_ = len(np.unique(labels))
#print("Number of estimated clusters: ", n_clusters_)
suggest = 10
colors = 10*['r.','g.','b.','c.','k.','y.','m.']
#print(colors)
#print(labels)
for i in range(len(X)):
plt.plot(X[i][0], X[i][1], colors[labels[i]],markersize = 10)
plt.scatter(cluster_centers[:,0],cluster_centers[:,1], marker = "x", s=150, linewidths = 5, zorder=10)
item_name = dict(zip(new_Y, new_name))
#print("item_name ", item_name)
print("Recommendations::")
for i in range(suggest):
print("Item ID- {} Item name- {}".format(new_Y[i],new_name[i]))
plt.show()
| riti1302/AI-Based-Shopping-Assistant | main.py | main.py | py | 2,213 | python | en | code | 9 | github-code | 13 |
5091338931 | from books.models import Book
from infrastructure.book.BookRepository import BookRepository
from integration_tests.integration_test_case import IntegrationTestCase
from test_data_provider.ChefDataProvider import ChefDataProvider
from test_data_provider.RecipeDataProvider import RecipeDataProvider
class BookRepositoryTest(IntegrationTestCase):
def setUp(self):
super(BookRepositoryTest, self).setUp()
self.chef = ChefDataProvider.getDefault()
self.chef.id = 9999
self.chef.save()
self.sut = BookRepository.new()
self.test_material = self.exercise_create_book_with_recipes()
def test_getBooksByCollaborator_should_returnExpected(self):
actual = self.sut.getBooksByCollaborator(self.chef, 2)
actual = map(lambda x: x.name, actual)
expected = ['book-1']
self.assertEqual(actual, expected)
def test_get_recipe_by_chef_should_returnExpected(self):
actual = self.sut.get_recipe_by_chef(self.chef)
actual = map(lambda x: x.name, actual)
expected = ['recipe-1', 'recipe-2', 'recipe-4', 'recipe-5']
self.assertEqual(actual, expected)
def test_get_book_by_chef_should_returnExpected(self):
actual = self.sut.get_book_by_chef(self.chef)
actual = map(lambda x: x.name, actual)
expected = ['book-1', 'book-3']
self.assertEqual(actual, expected)
def test_get_recipe_by_books_should_return_all_recipes_in_books(self):
actual = self.sut.get_recipe_by_books(self.test_material['books'])
expected = self.test_material['recipes']
self.assertEqual(actual, expected)
def test_get_recipe_by_following_chef_should_return_all_recipe_public_of_chef_follow(self):
actual = self.sut.get_recipe_by_following_chef(self.chef)
actual = map(lambda x: x.name, actual)
expected = ['recipe-5']
self.assertEquals(actual, expected)
def test_is_recipe_belong_to_public_book_should_returnTrue_whenRecipeBelongToPublicBook(self):
recipe = self.test_material['recipes'][0]
actual = self.sut.is_recipe_belong_to_public_book(recipe)
self.assertTrue(actual)
def test_is_recipe_belong_to_public_book_should_returnFalse_whenRecipeNotBelongToPublicBook(self):
recipe = self.test_material['recipes'][3]
actual = self.sut.is_recipe_belong_to_public_book(recipe)
self.assertFalse(actual)
def test_is_collaborator_of_recipe_should_returnTrue_whenChefIsCollaboratorOfABookContainsRecipe(self):
recipe = self.test_material['recipes'][0]
chef_id = 1
actual = self.sut.is_collaborator_of_recipe(chef_id, recipe)
self.assertTrue(actual)
def test_is_collaborator_of_recipe_should_returnFalse_whenChefIsNotCollaboratorOfABookContainsRecipe(self):
recipe = self.test_material['recipes'][0]
chef_id = 4
actual = self.sut.is_collaborator_of_recipe(chef_id, recipe)
self.assertFalse(actual)
def exercise_create_book_with_recipes(self):
recipe1 = RecipeDataProvider.get().with_name('recipe-1').with_id(1).active().build()
recipe2 = RecipeDataProvider.get().with_name('recipe-2').with_id(2).active().build()
recipe3 = RecipeDataProvider.get().with_name('recipe-3').with_id(3).build()
recipe4 = RecipeDataProvider.get().with_name('recipe-4').with_id(4).active().build()
recipe5 = RecipeDataProvider.get().with_name('recipe-5').with_id(5).active().build()
recipe6 = RecipeDataProvider.get().with_name('recipe-6').with_id(6).build()
recipe7 = RecipeDataProvider.get().with_name('recipe-7').with_id(7).active().build()
self.save([recipe1, recipe2, recipe3, recipe4, recipe5, recipe6, recipe7])
book1 = self._saveBook('book-1', '[1],[2],[3]')
book1.book_type = 'N'
book1.private = False
book2 = self._saveBook('book-2', '[2],[5],[6]')
book3 = self._saveBook('book-3', '[3],[4],[5],[%d]' % self.chef.id)
book4 = self._saveBook('book-4', '')
chefFollow = ChefDataProvider.get().withEmail('chefFollow@cmail.com').withId(8888).build()
chefFollow.save()
self.chef.follow(chefFollow)
book1.add_recipe(recipe1)
book1.add_recipe(recipe2)
book1.chef = self.chef
book1.save()
book2.add_recipe(recipe1)
book2.add_recipe(recipe3)
book2.add_recipe(recipe7)
book2.save()
book3.add_recipe(recipe4)
book3.save()
book4.add_recipe(recipe5)
book4.add_recipe(recipe6)
book4.private = False
book4.book_type = 'N'
book4.status = 'A'
book4.chef = chefFollow
book4.save()
return dict(
books=[book1, book2, book3, book4],
recipes=[recipe1, recipe2, recipe1, recipe7, recipe4, recipe5]
)
def _saveBook(self, book_name, collaborators):
book = Book(name=book_name, collaborators=collaborators)
book.save()
return book
def save(self, instances):
for ins in instances:
ins.save()
| khoaanh2212/nextChef | backend_project/backend/integration_tests/tests/infrastructure/book/test_book_repository.py | test_book_repository.py | py | 5,120 | python | en | code | 0 | github-code | 13 |
22478113469 | # Importação do panda
import pandas as pd
# Carrega seu arquivo csv
ovnis_preparado = pd.read_csv('df_OVNI_preparado.csv')
ovnis_preparado
#filtra a cidade dentro do csv
cidade_phoenix = ovnis_preparado[ovnis_preparado['City']=='Phoenix']
cidade_phoenix.sort_values(by='Sight_date')
import pandasql
# Roda o seu comando SQL e retorna um dataframe
query = '''
SELECT Sight_date ,Count(*) as Views FROM cidade_phoenix group by Sight_day, Sight_month order by Sight_date
'''
views_phoenix= pandasql.sqldf(query.lower(), locals())
views_phoenix
#filtra a data e o ano
views_phoenix['Sight_date'] = pd.to_datetime(views_phoenix['Sight_date'])
views_phoenix.dtypes
views_phoenix['Sight_year'] = views_phoenix['Sight_date'].dt.year
views_phoenix
# Roda o seu comando SQL e retorna um dataframe
query = '''
SELECT Count(*) as views, Sight_year FROM views_phoenix group by Sight_year
'''
views_phoenix_per_year= pandasql.sqldf(query.lower(), locals())
views_phoenix_per_year
views_phoenix_per_year.plot.line(x='Sight_year',y='views') | samuellopes223/ProjetoIntegradoIAeCD | 3.4 Analise Temporal/3.4 Analise Temporal.py | 3.4 Analise Temporal.py | py | 1,033 | python | pt | code | 0 | github-code | 13 |
4818540730 | class RBNode:
def __init__(self, key):
#트리 내에서 유일한 키
self.key=key
#노드의 색 : RED or BLACK
#트리에 insert 연산을 할 때 먼저 새 노드의 색은 RED로 한다.
self.color="RED"
self.left=None
self.right=None
#부모
self.parent=None
def __str__(self):
return str(self.key)
class RedBlackTree:
def __init__(self):
self.__root=None
# 모든 외부 노드를 하나의 객체로 표현
self.__EXT = RBNode(None)
# 외부 노드의 컬러는 블랙
self.__EXT.color="BLACK"
def get_root(self):
return self.__root
def preorder_traverse(self, cur, func, *args, **kwargs):
if cur == self.__EXT:
return
func(cur, *args, **kwargs)
self.preorder_traverse(cur.left, func, *args, **kwargs)
self.preorder_traverse(cur.right, func, *args, **kwargs)
def __left_rotate(self, n):
#n's right child
r=n.right
#r's left child
l=r.left
#l을 n의 오른쪽 자식으로
l.parent=n
n.right=l
#n.parent를 r.parent로
#n이 루트라면, 트리 루트도 업데이트
if n==self.__root:
self.__root=r
elif n.parent.left==n:
n.parent.left=r
else:
n.parent.right=r
r.parent=n.parent
#n을 r의 왼쪽 자식으로
r.left=n
n.parent=r
def __right_rotate(self, n):
#n's left child
l=n.left
#lc's right child
r=l.right
#r을 n의 왼쪽 자식으로
r.parent=n
n.left=r
#n.parent를 l.parent로
#n이 루트라면 트리의 루트도 업데이트
if n==self.__root:
self.__root=l
elif n.parent.left==n:
n.parent.left=l
else:
n.parent.right=l
l.parent=n.parent
#n을 lc의 오른쪽 자식으로
l.right=n
n.parent=l
def __insert_fix(self, n):
pn=gn=un=None
pn=n.parent
while pn != None and pn.color=="RED":
gn=pn.parent
if gn.left==pn:
un=gn.right
if un.color=="RED":
gn.color="RED"
pn.color=un.color="BLACK"
n=gn
pn=n.parent
else:
if pn.right==n:
self.__left_rotate(pn)
n, pn = pn, n
pn.color, gn.color=gn.color, pn.color
self.__right_rotate(gn)
else:
un=gn.left
if un.color=="RED":
gn.color="RED"
pn.color=un.color="BLACK"
n=gn
pn=n.parent
else:
if pn.left==n:
self.__right_rotate(pn)
n, pn = pn, n
pn.color, gn.color=gn.color, pn.color
self.__left_rotate(gn)
self.__root.color="BLACK"
def insert(self, key):
new_node=RBNode(key)
new_node.left=self.__EXT
new_node.right=self.__EXT
cur=self.__root
if not cur:
self.__root=new_node
#루트 노드는 BLACK
self.__root.color="BLACK"
return
while True:
parent=cur
if key < cur.key:
cur=cur.left
if cur==self.__EXT:
parent.left=new_node
#노드의 parent 설정
new_node.parent=parent
break
else:
cur=cur.right
if cur==self.__EXT:
parent.right=new_node
#노드의 parent 설정
new_node.parent=parent
break
#노드 삽입 후 처리
self.__insert_fix(new_node)
# 편의 함수
def print_node(self, rbn):
if rbn:
print("node : {}, ".format(rbn.key), end="")
if rbn.color=="RED":
print("color : RED, ", end="")
else:
print("color : BLACK, ", end="")
if rbn.left:
print("left : {}, ".format(rbn.left.key), end="")
if rbn.right:
print("right : {}, ".format(rbn.right.key), end="")
if rbn.parent:
print("parent : {}".format(rbn.parent.key), end="")
print()
if __name__=="__main__":
print('*'*100)
rbt=RedBlackTree()
for i in range(10):
rbt.insert(i)
rbt.preorder_traverse(rbt.get_root(), rbt.print_node)
print('*'*100)
| gilbutITbook/080200 | ch09/red_black_tree.py | red_black_tree.py | py | 4,896 | python | ko | code | 3 | github-code | 13 |
2741863236 | import socket
import os
TARGET_IP = "127.0.0.1"
TARGET_PORT = 5005
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
namafile="bart.png"
ukuran = os.stat(namafile).st_size
fp = open('bart.png','rb')
k = fp.read()
terkirim=0
for x in k:
k_bytes = bytes([x])
sock.sendto(k_bytes, (TARGET_IP, TARGET_PORT))
terkirim = terkirim + 1
print(k_bytes,f"terkirim {terkirim} of {ukuran} ")
| rm77/progjar | progjar2/udpfileclient.py | udpfileclient.py | py | 401 | python | en | code | 5 | github-code | 13 |
70302990417 | from django.db.models import Avg, Count
from django.db.models.functions import Round
from rest_framework import status
from rest_framework.renderers import TemplateHTMLRenderer
from rest_framework.response import Response
from rest_framework.views import APIView
from .constants import MAX_REACTION_RATE
from .models import Mood
from .serializers import MoodSerializer, MoodsDaySerializer
from ..common.decorators import authentication_required
from ..common.models import Achievement, UserProfile
# Create your views here.
class HomeList(APIView):
renderer_classes = [TemplateHTMLRenderer]
template_name = 'mood_tracker/home.html'
@authentication_required
def get(self, request):
# Filter by user information
user_profile = UserProfile.objects.get(user_id=request.user.id)
user_moods = Mood.objects.filter(user_profile_id=user_profile.id)\
.order_by('-day_date')
# Home table - Get user data grouped by days
moods_by_day = user_moods.values('day_date', 'day_week')\
.annotate(total_moods=Count('emotion'),
day_avg_rate=Avg('reaction_rate'))
serializer = MoodsDaySerializer(moods_by_day, many=True)
# Avg rate card - Get total average reaction rate of a user
total_avg_rate = user_moods.aggregate(
total_avg_rate=Round(Avg('reaction_rate'), precision=1)
)['total_avg_rate']
# Achievements card - Get total achievements accomplished by a user
achievements = Achievement.objects.all()
user_achievements = user_profile.achievements.aggregate(
total_achievements=Count('title')
)
# Days registered
days_registered = moods_by_day.aggregate(total_days=Count('day_date'))
data = {
'day_moods': serializer.data,
'days_registered': days_registered['total_days'],
'user_avg_rate': total_avg_rate,
'max_rate': MAX_REACTION_RATE,
'user_achievements': user_achievements['total_achievements'],
'total_achievements': achievements.count()
}
return Response(data, status=status.HTTP_200_OK)
class MoodList(APIView):
renderer_classes = [TemplateHTMLRenderer]
template_name = 'mood_tracker/moods.html'
@authentication_required
def get(self, request):
user_profile = UserProfile.objects.get(user_id=request.user.id)
user_moods = Mood.objects.filter(user_profile_id=user_profile.id)
serializer = MoodSerializer(user_moods, many=True)
data = {
'moods': serializer.data
}
return Response(data, status=status.HTTP_200_OK)
@authentication_required
def post(self, request):
pass
class MoodCharts(APIView):
renderer_classes = [TemplateHTMLRenderer]
template_name = 'mood_tracker/charts.html'
@authentication_required
def get(self, request):
user_profile = UserProfile.objects.get(user_id=request.user.id)
user_moods = Mood.objects.filter(user_profile_id=user_profile.id)
serializer = MoodSerializer(user_moods, many=True)
data = {
'moods': serializer.data
}
return Response(data, status=status.HTTP_200_OK)
| mario-nunez/mood_tracker | apps/mood_tracker/views.py | views.py | py | 3,341 | python | en | code | 0 | github-code | 13 |
74371289936 | from FeatureExplorer import *
import matplotlib.pyplot as plt
import scipy.io as sio
import numpy as np
import string
import time
import os
trainingdir = "E:/training results 5/"
testingdir = "E:/DATA/"
outputfile = "E:/testing results/training5.txt"
f = open(outputfile, 'w')
for dn in os.listdir(trainingdir):
# load classified points
ipoints = []
epoints = []
lpoints = []
trainingpath = os.path.join(trainingdir, dn)
if "Store" not in dn:
for fn in os.listdir(trainingpath):
if "test" not in fn:
trainingfile = os.path.join(trainingpath, fn)
point = sio.loadmat(trainingfile)
if point['type'] == 'i':
ipoints.append(point['data'].tolist())
elif point['type'] == 'e':
epoints.append(point['data'].tolist())
elif point['type'] == 'l':
lpoints.append(point['data'].tolist())
else:
print("Invalid point type.")
# classify new points
testingpath = os.path.join(testingdir, dn)
print(testingpath)
for fn in os.listdir(testingpath):
if "test" in fn:
print(fn)
testname = os.path.join(testingpath, fn)
PD, is_early = PeakDetect(testname)
dCPD = difChannelPeakDeviation(testname)[0]
#dPD = difPeakDetect(testname)
BPF = TenTwentyBPF(testname)
f1 = np.mean(PD)
f2 = dCPD
f3 = np.mean(BPF)
point = [f1, f2, f3]
i_neighbors = []
for i in ipoints:
dist = ((f1 - i[0][0])**2 + (f2 - i[0][1])**2 + (f3 - i[0][2])**2)**0.5
i_neighbors.append(dist)
e_neighbors = []
for i in epoints:
dist = ((f1 - i[0][0])**2 + (f2 - i[0][1])**2 + (f3 - i[0][2])**2)**0.5
e_neighbors.append(dist)
l_neighbors = []
for i in lpoints:
dist = ((f1 - i[0][0])**2 + (f2 - i[0][1])**2 + (f3 - i[0][2])**2)**0.5
l_neighbors.append(dist)
ins = sorted(i_neighbors)[:]
while len(ins) < 10:
ins.append(100)
ens = sorted(e_neighbors)[:]
while len(ens) < 10:
ens.append(100)
lns = sorted(l_neighbors)[:]
while len(lns) < 10:
lns.append(100)
i = 0.0
e = 0.0
l = 0.0
for j in range(0, 10):
if ins[0] < ens[0]:
if ins[0] < lns[0]:
i += 1.0
ins.pop(0)
else:
l += 1.0
lns.pop(0)
else:
if ens[0] < lns[0]:
e += 1.0
ens.pop(0)
else:
l += 1.0
lns.pop(0)
ictal = 1.0 - i/10.0
early = e/10.0
f.write(fn)
f.write(',')
f.write("{0:.1f}".format(ictal))
f.write(",{0:.1f}\n".format(early))
print("\nClassification complete.")
f.close()
| alexmcmaster/detection | Classifier.py | Classifier.py | py | 3,344 | python | en | code | 0 | github-code | 13 |
36945611349 | """
Also first task
The previous task was intentionally simplified. Usually I have more data about participants:
solutions = [
{
'date': '2021-01-01 10:00:00',
'name': 'Brad Pitt',
'email': 'Participant1@mail.com',
'phone': '+7 912-345-67-89',
'code': '...'
},
{
'date': '2021-01-01 10:01:00',
'name': 'Not Brad Pitt',
'email': 'participant2@mail.com',
'phone': '+7(912) 3456789',
'code': '...'
},
{
'date': '2021-01-01 10:02:00',
'name': 'Definitely not Brad Pitt',
'email': 'particiPANT1@mail.com',
'phone': '79001234567',
'code': '...'
},
...
]
In that "solutions" list, all 3 solutions are from 1 person: 1st and 2nd have the same phone, 1st and 3rd have the
same email.
Now, try to improve your previous algorithm to handle several contact details from every lottery participant.
I rate this variant of task as difficult for newbies. That's why I strongly advise you to discuss this algorithm with
each other. Remember that it is good to ask and give help. But it is bad to copy code without understanding how it
works.
"""
# pip install phonenumbers
import phonenumbers
def func(sols):
res = list()
emails = set()
phones = set()
for sol in reversed(sols):
phone = phonenumbers.parse(sol['phone'], None)
email = sol['email'].lower()
if phone not in phones and email not in emails:
res.append(sol)
phones.add(phone)
emails.add(email)
return res
if __name__ == '__main__':
solutions = [
{
'date': '2021-01-01 10:00:00',
'name': 'Brad Pitt',
'email': 'Participant1@mail.com',
'phone': '+7 912-345-67-89',
'code': '...'
},
{
'date': '2021-01-01 10:01:00',
'name': 'Not Brad Pitt',
'email': 'participant2@mail.com',
'phone': '+7(912) 3456789',
'code': '...'
},
{
'date': '2021-01-01 10:02:00',
'name': 'Definitely not Brad Pitt',
'email': 'particiPANT1@mail.com',
'phone': '79001234567',
'code': '...'
}
]
print(func(solutions))
| iaramer/algorithms | python/mipt/mipt_python course/homework/hw2/also_first_task.py | also_first_task.py | py | 2,298 | python | en | code | 0 | github-code | 13 |
7835200880 | import re
import subprocess
from pathlib import Path
from typing import List
import i18n
import journalist_app as journalist_app_module
import pytest
import source_app
from babel.core import Locale, UnknownLocaleError
from db import db
from flask import render_template, render_template_string, request, session
from flask_babel import gettext
from i18n import parse_locale_set
from sdconfig import DEFAULT_SECUREDROP_ROOT, FALLBACK_LOCALE, SecureDropConfig
from tests.factories import SecureDropConfigFactory
from werkzeug.datastructures import Headers
# Interlingua, per
# <https://developers.securedrop.org/en/latest/supported_languages.html>.
NEVER_LOCALE = "ia"
def create_config_for_i18n_test(
supported_locales: List[str],
default_locale: str = "en_US",
translation_dirs: Path = DEFAULT_SECUREDROP_ROOT / "translations",
) -> SecureDropConfig:
tmp_root_for_test = Path("/tmp/sd-tests/test_i18n")
tmp_root_for_test.mkdir(exist_ok=True, parents=True)
i18n_config = SecureDropConfigFactory.create(
SECUREDROP_DATA_ROOT=tmp_root_for_test,
DEFAULT_LOCALE=default_locale,
SUPPORTED_LOCALES=supported_locales,
TRANSLATION_DIRS=translation_dirs,
# For the tests in these files, the following argument / config fields are not used so
# we set them to invalid values
RQ_WORKER_NAME="",
GPG_KEY_DIR=tmp_root_for_test,
JOURNALIST_KEY="",
)
# Create an empty key file just to pass the sanity checks when starting the source or
# journalist app; the encryption code is not exercised as part of these tests
gpg_key_path = tmp_root_for_test / "private-keys-v1.d"
gpg_key_path.mkdir(exist_ok=True)
return i18n_config
def set_msg_translation_in_po_file(po_file: Path, msgid_to_translate: str, msgstr: str) -> None:
po_content = po_file.read_text()
content_to_update = f"""
msgid "{msgid_to_translate}"
msgstr ""
"""
assert content_to_update in po_content
content_with_translation = f"""
msgid "{msgid_to_translate}"
msgstr "{msgstr}"
"""
po_content_with_translation = po_content.replace(content_to_update, content_with_translation)
po_file.write_text(po_content_with_translation)
def verify_i18n(app):
not_translated = "code hello i18n"
translated_fr = "code bonjour"
for accepted in ("unknown", "en_US"):
headers = Headers([("Accept-Language", accepted)])
with app.test_request_context(headers=headers):
assert not hasattr(request, "babel_locale")
assert not_translated == gettext(not_translated)
assert hasattr(request, "babel_locale")
assert (
render_template_string(
"""
{{ gettext('code hello i18n') }}
"""
).strip()
== not_translated
)
for lang in ("fr", "fr-FR"):
headers = Headers([("Accept-Language", lang)])
with app.test_request_context(headers=headers):
assert not hasattr(request, "babel_locale")
assert translated_fr == gettext(not_translated)
assert hasattr(request, "babel_locale")
assert (
render_template_string(
"""
{{ gettext('code hello i18n') }}
"""
).strip()
== translated_fr
)
# https://github.com/freedomofpress/securedrop/issues/2379
headers = Headers([("Accept-Language", "en-US;q=0.6,fr_FR;q=0.4,nb_NO;q=0.2")])
with app.test_request_context(headers=headers):
assert not hasattr(request, "babel_locale")
assert not_translated == gettext(not_translated)
translated_cn = "code chinese"
for lang in ("zh-CN", "zh-Hans-CN"):
headers = Headers([("Accept-Language", lang)])
with app.test_request_context(headers=headers):
assert not hasattr(request, "babel_locale")
assert translated_cn == gettext(not_translated)
assert hasattr(request, "babel_locale")
assert (
render_template_string(
"""
{{ gettext('code hello i18n') }}
"""
).strip()
== translated_cn
)
translated_ar = "code arabic"
for lang in ("ar", "ar-kw"):
headers = Headers([("Accept-Language", lang)])
with app.test_request_context(headers=headers):
assert not hasattr(request, "babel_locale")
assert translated_ar == gettext(not_translated)
assert hasattr(request, "babel_locale")
assert (
render_template_string(
"""
{{ gettext('code hello i18n') }}
"""
).strip()
== translated_ar
)
with app.test_client() as c:
# a request without Accept-Language or "l" argument gets the
# default locale
page = c.get("/login")
assert session.get("locale") == "en_US"
assert not_translated == gettext(not_translated)
assert b"?l=fr_FR" in page.data
assert b"?l=en_US" not in page.data
# the session locale should change when the "l" request
# argument is present and valid
page = c.get("/login?l=fr_FR", headers=Headers([("Accept-Language", "en_US")]))
assert session.get("locale") == "fr_FR"
assert translated_fr == gettext(not_translated)
assert b"?l=fr_FR" not in page.data
assert b"?l=en_US" in page.data
# confirm that the chosen locale, now in the session, is used
# despite not matching the client's Accept-Language header
c.get("/", headers=Headers([("Accept-Language", "en_US")]))
assert session.get("locale") == "fr_FR"
assert translated_fr == gettext(not_translated)
# the session locale should not change if an empty "l" request
# argument is sent
c.get("/?l=")
assert session.get("locale") == "fr_FR"
assert translated_fr == gettext(not_translated)
# the session locale should not change if no "l" request
# argument is sent
c.get("/")
assert session.get("locale") == "fr_FR"
assert translated_fr == gettext(not_translated)
# sending an invalid locale identifier should not change the
# session locale
c.get("/?l=YY_ZZ")
assert session.get("locale") == "fr_FR"
assert translated_fr == gettext(not_translated)
# requesting a valid locale via the request argument "l"
# should change the session locale
c.get("/?l=en_US", headers=Headers([("Accept-Language", "fr_FR")]))
assert session.get("locale") == "en_US"
assert not_translated == gettext(not_translated)
# again, the session locale should stick even if not included
# in the client's Accept-Language header
c.get("/", headers=Headers([("Accept-Language", "fr_FR")]))
assert session.get("locale") == "en_US"
assert not_translated == gettext(not_translated)
with app.test_request_context():
assert render_template("locales.html") == ""
with app.test_client() as c:
c.get("/")
locales = render_template("locales.html")
assert "?l=fr_FR" in locales
assert "?l=en_US" not in locales
# Test that A[lang,hreflang] attributes (if present) will validate as
# BCP47/RFC5646 language tags from `i18n.RequestLocaleInfo.language_tag`.
if 'lang="' in locales:
assert 'lang="en-US"' in locales
assert 'lang="fr-FR"' in locales
if 'hreflang="' in locales:
assert 'hreflang="en-US"' in locales
assert 'hreflang="fr-FR"' in locales
c.get("/?l=ar")
# We have to render a template that inherits from "base.html" so that "tab_title" will be
# set. But we're just checking that when a page is rendered in an RTL language the
# directionality is correct, so it doesn't matter which template we render.
base = render_template("error.html", error={})
assert 'dir="rtl"' in base
def test_i18n():
translation_dirs = Path("/tmp/sd-tests/test_i18n/translations")
translation_dirs.mkdir(exist_ok=True, parents=True)
test_config = create_config_for_i18n_test(
supported_locales=["ar", "en_US", "fr_FR", "nb_NO", "zh_Hans"],
translation_dirs=translation_dirs,
)
i18n_dir = Path(__file__).absolute().parent / "i18n"
sources = [str(i18n_dir / "code.py"), str(i18n_dir / "template.html")]
pot = i18n_dir / "messages.pot"
subprocess.check_call(
[
"pybabel",
"extract",
"--mapping",
str(i18n_dir / "babel.cfg"),
"--output",
pot,
*sources,
]
)
subprocess.check_call(
[
"pybabel",
"init",
"--input-file",
pot,
"--output-dir",
translation_dirs,
"--locale",
"en_US",
]
)
for (locale, translated_msg) in (
("fr_FR", "code bonjour"),
("zh_Hans", "code chinese"),
("ar", "code arabic"),
("nb_NO", "code norwegian"),
("es_ES", "code spanish"),
):
subprocess.check_call(
[
"pybabel",
"init",
"--input-file",
pot,
"--output-dir",
translation_dirs,
"--locale",
locale,
]
)
# Populate the po file with a translation
po_file = translation_dirs / locale / "LC_MESSAGES" / "messages.po"
set_msg_translation_in_po_file(
po_file,
msgid_to_translate="code hello i18n",
msgstr=translated_msg,
)
subprocess.check_call(
[
"pybabel",
"compile",
"--directory",
translation_dirs,
"--locale",
locale,
"--input-file",
po_file,
]
)
# Use our config (and not an app fixture) because the i18n module
# grabs values at init time and we can't inject them later.
for app in (
journalist_app_module.create_app(test_config),
source_app.create_app(test_config),
):
with app.app_context():
db.create_all()
assert list(app.config["LOCALES"].keys()) == test_config.SUPPORTED_LOCALES
verify_i18n(app)
def test_parse_locale_set():
assert parse_locale_set([FALLBACK_LOCALE]) == {Locale.parse(FALLBACK_LOCALE)}
def test_no_usable_fallback_locale():
"""
The apps fail if neither the default nor the fallback locale is usable.
"""
test_config = create_config_for_i18n_test(
default_locale=NEVER_LOCALE, supported_locales=[NEVER_LOCALE]
)
with pytest.raises(ValueError, match="in the set of usable locales"):
journalist_app_module.create_app(test_config)
with pytest.raises(ValueError, match="in the set of usable locales"):
source_app.create_app(test_config)
def test_unusable_default_but_usable_fallback_locale(caplog):
"""
The apps start even if the default locale is unusable, as along as the fallback locale is
usable, but log an error for OSSEC to pick up.
"""
test_config = create_config_for_i18n_test(
default_locale=NEVER_LOCALE, supported_locales=[NEVER_LOCALE, FALLBACK_LOCALE]
)
for app in (journalist_app_module.create_app(test_config), source_app.create_app(test_config)):
with app.app_context():
assert NEVER_LOCALE in caplog.text
assert "not in the set of usable locales" in caplog.text
def test_invalid_locales():
"""
An invalid locale raises an error during app configuration.
"""
test_config = create_config_for_i18n_test(supported_locales=[FALLBACK_LOCALE, "yy_ZZ"])
with pytest.raises(UnknownLocaleError):
journalist_app_module.create_app(test_config)
with pytest.raises(UnknownLocaleError):
source_app.create_app(test_config)
def test_valid_but_unusable_locales(caplog):
"""
The apps start with one or more unusable, but still valid, locales, but log an error for
OSSEC to pick up.
"""
test_config = create_config_for_i18n_test(
supported_locales=[FALLBACK_LOCALE, "wae_CH"],
)
for app in (journalist_app_module.create_app(test_config), source_app.create_app(test_config)):
with app.app_context():
assert "wae" in caplog.text
assert "not in the set of usable locales" in caplog.text
def test_language_tags():
assert i18n.RequestLocaleInfo(Locale.parse("en")).language_tag == "en"
assert i18n.RequestLocaleInfo(Locale.parse("en-US", sep="-")).language_tag == "en-US"
assert i18n.RequestLocaleInfo(Locale.parse("en-us", sep="-")).language_tag == "en-US"
assert i18n.RequestLocaleInfo(Locale.parse("en_US")).language_tag == "en-US"
assert i18n.RequestLocaleInfo(Locale.parse("zh_Hant")).language_tag == "zh-Hant"
def test_html_en_lang_correct():
test_config = create_config_for_i18n_test(supported_locales=["en_US"])
app = journalist_app_module.create_app(test_config).test_client()
resp = app.get("/", follow_redirects=True)
html = resp.data.decode("utf-8")
assert re.compile('<html lang="en-US".*>').search(html), html
app = source_app.create_app(test_config).test_client()
resp = app.get("/", follow_redirects=True)
html = resp.data.decode("utf-8")
assert re.compile('<html lang="en-US".*>').search(html), html
# check '/generate' too because '/' uses a different template
resp = app.post("/generate", data={"tor2web_check": 'href="fake.onion"'}, follow_redirects=True)
html = resp.data.decode("utf-8")
assert re.compile('<html lang="en-US".*>').search(html), html
def test_html_fr_lang_correct():
"""Check that when the locale is fr_FR the lang property is correct"""
test_config = create_config_for_i18n_test(supported_locales=["fr_FR", "en_US"])
app = journalist_app_module.create_app(test_config).test_client()
resp = app.get("/?l=fr_FR", follow_redirects=True)
html = resp.data.decode("utf-8")
assert re.compile('<html lang="fr-FR".*>').search(html), html
app = source_app.create_app(test_config).test_client()
resp = app.get("/?l=fr_FR", follow_redirects=True)
html = resp.data.decode("utf-8")
assert re.compile('<html lang="fr-FR".*>').search(html), html
# check '/generate' too because '/' uses a different template
resp = app.post(
"/generate?l=fr_FR", data={"tor2web_check": 'href="fake.onion"'}, follow_redirects=True
)
html = resp.data.decode("utf-8")
assert re.compile('<html lang="fr-FR".*>').search(html), html
def test_html_attributes():
"""Check that HTML lang and dir attributes respect locale."""
test_config = create_config_for_i18n_test(supported_locales=["ar", "en_US"])
app = journalist_app_module.create_app(test_config).test_client()
resp = app.get("/?l=ar", follow_redirects=True)
html = resp.data.decode("utf-8")
assert '<html lang="ar" dir="rtl">' in html
resp = app.get("/?l=en_US", follow_redirects=True)
html = resp.data.decode("utf-8")
assert '<html lang="en-US" dir="ltr">' in html
app = source_app.create_app(test_config).test_client()
resp = app.get("/?l=ar", follow_redirects=True)
html = resp.data.decode("utf-8")
assert '<html lang="ar" dir="rtl">' in html
resp = app.get("/?l=en_US", follow_redirects=True)
html = resp.data.decode("utf-8")
assert '<html lang="en-US" dir="ltr">' in html
# check '/generate' too because '/' uses a different template
resp = app.post(
"/generate?l=ar", data={"tor2web_check": 'href="fake.onion"'}, follow_redirects=True
)
html = resp.data.decode("utf-8")
assert '<html lang="ar" dir="rtl">' in html
resp = app.post(
"/generate?l=en_US", data={"tor2web_check": 'href="fake.onion"'}, follow_redirects=True
)
html = resp.data.decode("utf-8")
assert '<html lang="en-US" dir="ltr">' in html
def test_same_lang_diff_locale():
"""
Verify that when two locales with the same lang are specified, the full locale
name is used for both.
"""
test_config = create_config_for_i18n_test(supported_locales=["en_US", "pt_BR", "pt_PT"])
app = journalist_app_module.create_app(test_config).test_client()
resp = app.get("/", follow_redirects=True)
html = resp.data.decode("utf-8")
assert "português (Brasil)" in html
assert "português (Portugal)" in html
def test_duplicate_locales():
"""
Verify that we don't display the full locale name for duplicate locales,
whether from user input or securedrop.sdconfig's enforcement of the
fallback locale.
"""
# ["en_US", "en_US"] alone will not display the locale switcher, which
# *does* pass through set deduplication.
test_config = create_config_for_i18n_test(supported_locales=["en_US", "en_US", "ar"])
app = journalist_app_module.create_app(test_config).test_client()
resp = app.get("/", follow_redirects=True)
html = resp.data.decode("utf-8")
assert "English (United States)" not in html
| freedomofpress/securedrop | securedrop/tests/test_i18n.py | test_i18n.py | py | 17,510 | python | en | code | 3,509 | github-code | 13 |
1886852216 | from __future__ import annotations
import logging
from tuxemon.tools import NamedTupleProtocol, cast_parameters_to_namedtuple
from typing import TypeVar, Generic, ClassVar, Type, Sequence, Any, TypedDict,\
TYPE_CHECKING
from tuxemon.session import Session
if TYPE_CHECKING:
from tuxemon.npc import NPC
from tuxemon.monster import Monster
logger = logging.getLogger(__name__)
ParameterClass = TypeVar("ParameterClass", bound=NamedTupleProtocol)
class ItemEffectResult(TypedDict):
success: bool
class ItemEffect(Generic[ParameterClass]):
"""ItemEffects are executed by items.
ItemEffect subclasses implement "effects" defined in Tuxemon items.
All subclasses, at minimum, must implement the following:
* The ItemEffect.apply() method
* A meaningful name, which must match the name in item file effects
By populating the "valid_parameters" class attribute, subclasses
will be assigned a "parameters" instance attribute that holds the
parameters passed to the action in the item file. It is also used
to check the syntax of effects, by verifying the correct type and
number of parameters passed.
Parameters
==========
Tuxemon supports type-checking of the parameters defined in the items.
valid_parameters may be the following format (may change):
(type, name)
* the type may be any valid python type, or even a python class or function
* type may be a single type, or a tuple of types
* type, if a tuple, may include None is indicate the parameter is optional
* name must be a valid python string
After parsing the parameters of the Item, the parameter's value
will be passed to the type constructor.
Example types: str, int, float, Monster, NPC
(int, "duration") => duration must be an int
((int, float), "duration") => can be an int or float
((int, float, None), "duration") => is optional
(Monster, "monster_slug") => a Monster instance will be created
"""
name: ClassVar[str] = "GenericEffect"
param_class: ClassVar[Type[ParameterClass]]
def __init__(
self,
session: Session,
user: NPC,
parameters: Sequence[Any],
) -> None:
self.session = session
self.user = user
# if you need the parameters before they are processed, use this
self.raw_parameters = parameters
# parse parameters
try:
if self.param_class._fields:
# cast the parameters to the correct type, as defined in cls.valid_parameters
self.parameters = cast_parameters_to_namedtuple(
parameters,
self.param_class,
)
else:
self.parameters = parameters
except ValueError:
logger.error(f"error while parsing for {self.name}")
logger.error(f"cannot parse parameters: {parameters}")
logger.error(self.param_class)
logger.error("please check the parameters and verify they are correct")
self.parameters = None
self._done = False
def apply(self, target: Monster) -> ItemEffectResult:
pass
| 26eldrpau/Tuxemon | tuxemon/item/itemeffect.py | itemeffect.py | py | 3,231 | python | en | code | null | github-code | 13 |
14646716575 | from sqlalchemy import Column, ForeignKey, Identity, Integer, Table
from . import metadata
SetupAttemptPaymentMethodDetailsCardJson = Table(
"setup_attempt_payment_method_details_cardjson",
metadata,
Column(
"three_d_secure",
ThreeDSecureDetails,
ForeignKey("ThreeDSecureDetails"),
comment="Populated if this authorization used 3D Secure authentication",
nullable=True,
),
Column("id", Integer, primary_key=True, server_default=Identity()),
)
__all__ = ["setup_attempt_payment_method_details_card.json"]
| offscale/stripe-sql | stripe_openapi/setup_attempt_payment_method_details_card.py | setup_attempt_payment_method_details_card.py | py | 566 | python | en | code | 1 | github-code | 13 |
35579941585 | from selenium import webdriver
import pytest
_driver = None
def pytest_addoption(parser):
'''添加命令行参数--browser、--host'''
parser.addoption(
"--browser", action="store", default="chrome", help="browser option: firefox or chrome"
)
'''添加host参数,设置默认测试环境地址'''
parser.addoption(
"--host", action="store", default="https://cloud.yoseenir.com/login", help="case host->https://cloud.yoseenir.com/login"
)
@pytest.fixture(scope='session')
def host(request):
'''全局host参数'''
return request.config.getoption("--host")
@pytest.fixture(scope='session')
def driver(request):
'''定义全局driver参数'''
global _driver
if _driver is None:
name = request.config.getoption("--browser")
if name == "firefox":
_driver = webdriver.Firefox()
elif name == "chrome":
_driver = webdriver.Chrome()
else:
_driver = webdriver.Chrome()
_driver.get("https://cloud.yoseenir.com/login")
print("正在启动浏览器名称:%s" % name)
def fn():
print("当全部用例执行完之后:teardown quit driver!")
_driver.quit()
request.addfinalizer(fn)
return _driver
| zxiaoxing/web_auto | conftest.py | conftest.py | py | 1,288 | python | en | code | 0 | github-code | 13 |
70994369299 | import os
from flask import Flask, flash, request, redirect, render_template
from werkzeug.utils import secure_filename
import netifaces
from flask_qrcode import QRcode
import click
app=Flask(__name__)
port_number = 5000
app.secret_key = "secret key"
app.config['MAX_CONTENT_LENGTH'] = 1 * 1024 * 1024 * 1024
QRcode(app)
# Get current path
path = os.getcwd()
# file Upload
UPLOAD_FOLDER = os.path.join(path, 'uploads')
# Make directory if uploads is not exists
if not os.path.isdir(UPLOAD_FOLDER):
os.mkdir(UPLOAD_FOLDER)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
# Allowed extension you can set your own
ALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'])
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/')
def upload_form():
ipAddr = netifaces.ifaddresses(netifaces.gateways()['default'][netifaces.AF_INET][1])[netifaces.AF_INET][0]['addr']
return render_template('upload.html', address = 'http://{}:{}'.format(ipAddr, port_number))
@app.route('/', methods=['POST'])
def upload_file():
if request.method == 'POST':
if 'files[]' not in request.files:
flash('No file part')
return redirect(request.url)
files = request.files.getlist('files[]')
for file in files:
if file :
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
flash('File(s) successfully uploaded')
return redirect('/')
@click.command()
@click.option('--host', '-h', default='0.0.0.0', help='The interface to bind to.')
@click.option('--port', '-p', default=5000, help='The port to bind to.')
@click.option('--debug', '-d', default=False, is_flag=True, help='show a debugger in case an exception happened')
@click.option('--dev', default=False, is_flag=True, help='show a debugger in case an exception happened')
def cli(host, port, debug, dev):
global port_number
port_number = port
if(dev):
app.run(host=host,port=port,debug=debug)
else:
app.run(host=host,port=port,debug=debug)
| GLinBoy/general-uploader | app.py | app.py | py | 2,173 | python | en | code | 0 | github-code | 13 |
19337603034 | """
Use this script to upload a pypi package, require below package:
pip install setuptools -U
pip install wheel -U
pip install twine -U
This is the script to release manually, now the package can be released via Github Actions:
- https://github.com/tobyqin/xmind2testlink/actions
"""
import os
egg = 'dist'
if os.path.exists(egg):
for f in os.listdir(egg):
os.remove(os.path.join(egg, f))
os.system('python setup.py sdist bdist_wheel')
os.system('twine upload dist/*')
| tobyqin/xmind2testlink | publish.py | publish.py | py | 501 | python | en | code | 105 | github-code | 13 |
74009544656 | from datetime import datetime
from flask import request
from app import db
from models import ShortUrl, short_id
class ShortUrlService:
@staticmethod
def short(short_url):
short = db.session.query(ShortUrl).\
filter_by(short_url=short_url).first()
return short
@staticmethod
def all_urls():
al = db.session.query(ShortUrl).all()
return al
@staticmethod
def long(long_url):
long = ShortUrl.query.filter_by(long_url=long_url).first()
return long
@staticmethod
def gen():
short_url = short_id(6)
return short_url
@staticmethod
def new(url, short_url):
new_link = ShortUrl(long_url=url,
short_url=short_url, time_to_create=datetime.now())
db.session.add(new_link)
db.session.commit()
short = request.host_url + short_url
return short
@staticmethod
def count_click(link):
link.clicks += 1
db.session.commit()
| AlimkhodjaevaSevinch/url-shortener | services.py | services.py | py | 1,024 | python | en | code | 0 | github-code | 13 |
1430456147 | __author__ = 'halley'
import random
from music21 import stream, note
scale = [0,2,4,5,7,9,11] #standard C-major scale
octave = 6
total_measures = 16
#possible rhythms that span half a measure
half_measure_rhythms = ([[1.5,0.5], [1.5,0.25,0.25], [1.0,1.0], [1.0,0.5,0.5], [0.5,0.5,1.0]])
durs = []
degrees = []
prev_degree = 0
for i in range(0, int(total_measures*2)):
half_measure_rhythm = random.choice(half_measure_rhythms) #choose random rhythm for 2 beats
for dur in half_measure_rhythm:
durs.append(dur)
degrees.append(prev_degree + random.choice([-1,1,-1,1,0,-2,2])) #move a small, randomly chosen amount
prev_degree = degrees[-1]
pitches = [scale[degree % 7] + (octave+(degree/7))*12 for degree in degrees] #convert degrees to pitches
#append to score
score = stream.Score()
for i in range(0, len(pitches)):
n = note.Note(pitches[i])
n.quarterLength = durs[i]
score.append(n)
score.show('midi')
| HalleyYoung/MusicTalk | random_melody2.py | random_melody2.py | py | 955 | python | en | code | 1 | github-code | 13 |
17176906452 | import os
import openai
import pandas as pd
import numpy as np
from sentence_transformers import SentenceTransformer, CrossEncoder, util
import os
import torch
import json
import pickle
with open('api_key.json', 'r') as f:
key = json.load(f)['key']
openai.api_key = key
ceos_table = pd.read_csv('CEOS.csv')
#We use the Bi-Encoder to encode all passages, so that we can use it with sematic search
bi_encoder = SentenceTransformer('multi-qa-MiniLM-L6-cos-v1')
bi_encoder.max_seq_length = 256 #Truncate long passages to 256 tokens
top_k = 32 #Number of passages we want to retrieve with the bi-encoder
#The bi-encoder will retrieve 100 documents. We use a cross-encoder, to re-rank the results list to improve the quality
cross_encoder = CrossEncoder('cross-encoder/ms-marco-MiniLM-L-6-v2')
print('loading chapters')
with open("EO_portal_demo/chapter_txt.pkl", "rb") as fp: # Unpickling
passages = pickle.load(fp)
print('loading embeddings')
corpus_embeddings = torch.from_numpy(np.load('EO_portal_demo/chapter_embeddings.npy'))
# This function will search all wikipedia articles for passages that
# answer the query
def search(query):
print("Input question:", query)
##### Sematic Search #####
# Encode the query using the bi-encoder and find potentially relevant passages
question_embedding = bi_encoder.encode(query, convert_to_tensor=True)
question_embedding = question_embedding#.cuda()
print('bi encoder search ...')
hits = util.semantic_search(question_embedding, corpus_embeddings, top_k=top_k*2)
hits = hits[0] # Get the hits for the first query
#deduplicate too similar results
hits_df = pd.DataFrame(hits)[:32]
hits_df.score = hits_df.score.apply(lambda x: round(x,6))
hits_df_dropped = hits_df.drop_duplicates(subset='score').sort_values('score')[::-1]
hits = hits_df_dropped.to_dict(orient='records')[:top_k]
##### Re-Ranking #####
# Now, score all retrieved passages with the cross_encoder
cross_inp = [[query, passages[hit['corpus_id']]] for hit in hits]
print('reranking cross ...')
cross_scores = cross_encoder.predict(cross_inp)
# Sort results by the cross-encoder scores
for idx in range(len(cross_scores)):
hits[idx]['cross-score'] = cross_scores[idx]
# # Output of top-5 hits from bi-encoder
# print("\n-------------------------\n")
# print("Top-3 Bi-Encoder Retrieval hits")
hits = sorted(hits, key=lambda x: x['cross-score'], reverse=True)
return hits
def call_GPT(model, prompt):
completion = openai.ChatCompletion.create(
messages = [{"role": "system", "content": prompt},],
model=model,
#max_tokens = 4096 - int(1.1*len(bi_encoder.tokenizer.encode(prompt))),
temperature=0)
return completion
if __name__ == '__main__':
GPT_model = "gpt-3.5-turbo"
query = "What are the spectral bands and the wavelenghts captured by sentinel-2"
use_GPT_as_source = False # whether we want to use GPT inherent knowledge as a source as well
# add best n sources to context for GPT
context = ''
hits = search(query = query)
for hit in hits[0:10]:
context = context + passages[hit['corpus_id']] + '\n\n'
if use_GPT_as_source:
gpt_answer = call_GPT(GPT_model, query)
print('GPT answer',gpt_answer)
context = context + '\n\n' + gpt_answer['choices'][0]['message']['content'] + '\nSOURCE: GPT chapter GPT'
prompt = f" You are a truthful assistant that helps synthesising information from multiple sources. \
Base your answer only on the given context and show your answer is correct by listing all the different sources used to provide the answer in the format SOURCE <insert text> chapter <insert text>. \
Not all context might be relevant.\
The query is: {query} \n Context: '''{context}'''"
completion = call_GPT(GPT_model,prompt)
print(completion)
print(completion['choices'][0]['message']['content'])
| ESA-PhiLab/TestCase_1 | EO_portal_demo/launch_query.py | launch_query.py | py | 3,949 | python | en | code | 0 | github-code | 13 |
11593109172 | #
# 따라하며 배우는 파이썬과 데이터과학(생능출판사 2020)
# LAB 6-2 n-각형을 그리는 함수 만들어보기, 154쪽
#
import turtle
t = turtle.Turtle()
# n-각형을 그리는 함수를 정의한다.
def n_polygon(n, length):
for i in range(n):
t.forward(length)
t.left(360 // n) # 정수 나눗셈은 //으로 한다.
for i in range(20):
t.left(30)
n_polygon(4, 100)
turtle.done() | dongupak/DataSciPy | src/파이썬코드(py)/Ch06/lab_6_2.py | lab_6_2.py | py | 438 | python | ko | code | 12 | github-code | 13 |
70185317777 | import torch
import torch.nn as nn
from torch.distributions import Categorical
class Network:
def __init__(self, network_type, lr, target=False):
self.net = network_type()
self.optim = torch.optim.Adam(self.net.parameters(), lr=lr)
if target:
self.target_net = network_type()
self.target_net.load_state_dict(self.net.state_dict())
def __call__(self, *args):
return self.net(*args)
def minimize(self, loss):
self.optim.zero_grad()
loss.backward()
self.optim.step()
def maximize(self, loss):
self.optim.zero_grad()
(-loss).backward()
self.optim.step()
def target(self, *args):
return self.target_net(*args)
def soft_update_target(self):
for param, target_param in zip(self.net.parameters(), self.target_net.parameters()):
target_param.data.copy_((0.995 * target_param.data) + ((0.005) * param.data))
def log_prob(self, s, a):
return self.net.log_prob(s, a)
#! add note about why states are 2-dimensional, actions are 4-dim...
class EnvironmentModel(nn.Module):
def __init__(self):
super().__init__()
# state preprocessing
self.s = nn.Sequential(
nn.Linear(2, 32),
nn.ELU(),
)
# action preprocessing
self.a = nn.Sequential(
nn.Linear(1, 32),
nn.ELU(),
)
# next-state predictor
self.main = nn.Sequential(
nn.Linear(64, 64),
nn.ELU(),
nn.Linear(64, 2)
)
def forward(self, s, a):
s = self.s(s)
a = self.a(a)
# print('---------')
# print(s.shape)
# print(a.shape)
# print('---------')
x = torch.cat((s, a), -1)
return self.main(x)
class ValueNetwork(nn.Module):
def __init__(self):
super().__init__()
self.main = nn.Sequential(
nn.Linear(2, 64),
nn.ReLU(),
nn.Linear(64, 64),
nn.ReLU(),
nn.Linear(64, 1)
)
def forward(self, s):
return self.main(s)
class StochasticPolicy(nn.Module):
def __init__(self):
super().__init__()
self.logits = nn.Sequential(
nn.Linear(2, 64),
nn.ReLU(),
nn.Linear(64, 64),
nn.ReLU(),
nn.Linear(64, 4)
)
def dist(self, s):
return Categorical(logits=self.logits(s))
def forward(self, s):
dist = self.dist(s)
return self.dist(s).sample().float()
def log_prob(self, s, a):
return self.dist(s).log_prob(a) | BCHoagland/VINS | vins/models.py | models.py | py | 2,711 | python | en | code | 2 | github-code | 13 |
41243209154 | import sys
from collections import defaultdict
from copy import deepcopy
import json
import resource
import timeit
import traceback
from statistics import median_high, median_low, mean
import difflib as df
import re
import subprocess
# import multiprocessing.pool
# from multiprocessing import TimeoutError
from os.path import join, exists
from functools import partial
from pathlib import Path
# import signal
# from contextlib import contextmanager
from concurrent.futures import TimeoutError
from pebble import ProcessPool, ProcessExpired
# import tqdm
from ecpp_individual_grammar import read_grammar, fixed_lexed_prog, get_token_list, get_actual_token_list, repair_prog
# @contextmanager
# def time_limit(seconds):
# def signal_handler(signum, frame):
# raise TimeoutError("Timed out!")
# signal.signal(signal.SIGALRM, signal_handler)
# signal.alarm(seconds)
# try:
# yield
# finally:
# signal.alarm(0)
def limit_memory():
soft, hard = resource.getrlimit(resource.RLIMIT_AS)
if soft < 0:
soft = 8 * 1024 * 1024 * 1024
else:
soft = soft * 6 // 10
if hard < 0:
hard = 32 * 1024 * 1024 * 1024
else:
hard = hard * 8 // 10
resource.setrlimit(resource.RLIMIT_AS, (soft, hard))
def rate(secs, times):
in_set = list(filter(lambda x: x <= secs, times))
return len(in_set) * 100.0 / len(times)
def print_results(fails, succs, bads, not_pop_bads, not_pops, accs_per_chgs, all_tok_chngs, avg_time, parse_times, tpsize, times_sizes, time_gs, user_sames, all_ls, any_ls, out_dir, results_file, repaired_tuplas, repair_csts, used_erls, max_used_erls, max_time=30):
positives = len(list(filter(lambda dt: dt > 0, time_gs)))
print("# Dataset size:", succs, "/", fails + succs)
print("# Parse accuracy within time limit (%):", bads * 100.0 / succs)
print("# Timed out (%):", fails * 100.0 / (fails + succs))
print("# => Total parse accuracy (%):", bads * 100.0 / (fails + succs))
print("# => Not popular parse accuracy (%):", not_pop_bads * 100.0 / not_pops)
print("# => Mean parse time (sec):", avg_time / (fails + succs))
print("# => Median parse time (sec):", median_low(parse_times))
temp_87 = list(map(lambda x: x[0], filter(lambda d: d[1] > 87, times_sizes)))
print("# => Median parse time (sec):", median_low(temp_87) if len(temp_87) > 0 else None , "for", len(temp_87), "programs with length > 87")
temp_100 = list(map(lambda x: x[0], filter(lambda d: d[1] > 100, times_sizes)))
print("# => Median parse time (sec):", median_low(temp_100) if len(temp_100) > 0 else None , "for", len(temp_100), "programs with length > 100")
temp_250 = list(map(lambda x: x[0], filter(lambda d: d[1] > 250, times_sizes)))
print("# => Median parse time (sec):", median_low(temp_250) if len(temp_250) > 0 else None , "for", len(temp_250), "programs with length > 250")
temp_500 = list(map(lambda x: x[0], filter(lambda d: d[1] > 500, times_sizes)))
print("# => Median parse time (sec):", median_low(temp_500) if len(temp_500) > 0 else None , "for", len(temp_500), "programs with length > 500")
print("# => Avg. parse time / 50 tokens (sec):", avg_time * 50 / tpsize)
print("# => Dataset parsed faster than user (%):", positives * 100 / succs)
print("# => Mean parse time speedup (sec):", mean(time_gs))
print("# => Median parse time speedup (sec):", median_high(time_gs))
print("# => Same as user accuracy (%):", user_sames * 100.0 / (fails + succs))
print("# => All locations fixed accuracy (%):", all_ls * 100.0 / (fails + succs))
print("# => Any locations fixed accuracy (%):", any_ls * 100.0 / (fails + succs))
total_erules_used = 0
for er in used_erls:
total_erules_used += used_erls[er]
print("# => Average used error rule position:", sum([used_erls[er] * er for er in used_erls]) / total_erules_used)
rates = defaultdict(float)
for dt in range(1, max_time + 1):
rates[dt] = rate(dt, parse_times)
if dt <= 60 and (dt % 5 == 0 or dt == 1):
print(dt, "sec: Parse accuracy =", rates[dt])
costs_rates = defaultdict(float)
for dt in range(1, 11):
costs_rates[dt] = rate(dt, repair_csts)
if dt <= 5:
print("Cost =", str(dt) + ": Test set % =", costs_rates[dt])
erules_rates = defaultdict(float)
for dt in range(5, 21, 5):
erules_rates[dt] = rate(dt, max_used_erls)
print("Top", dt, "error rules: Test set % =", erules_rates[dt])
for tok_chgs in sorted(accs_per_chgs.keys())[:5]:
parsed, total_progs = accs_per_chgs[tok_chgs]
print(tok_chgs, "token changes accuracy (%) =", parsed * 100.0 / total_progs)
median_times_per_tok_chngs = defaultdict(float)
for tok_chgs in sorted(accs_per_chgs.keys()):
temp = list(map(lambda x: x[0], filter(lambda d: d[1] == tok_chgs, zip(parse_times, all_tok_chngs))))
median_times_per_tok_chngs[tok_chgs] = median_low(temp) if len(temp) > 0 else None
if tok_chgs <= 5:
print(tok_chgs, "token changes median parse time (sec) =", median_times_per_tok_chngs[tok_chgs])
print("---------------------------------------------------")
with open(join(out_dir, results_file), "w") as dataset_file:
dataset_file.write("Dataset size: " + str(succs) + "/" + str(fails + succs) + "\n")
dataset_file.write("Parse accuracy within time limit (%): " + str(bads * 100.0 / succs) + "\n")
dataset_file.write("Timed out (%): " + str(fails * 100.0 / (fails + succs)) + "\n")
dataset_file.write("=> Total parse accuracy (%): " + str(bads * 100.0 / (fails + succs)) + "\n")
dataset_file.write("=> Not popular parse accuracy (%): " + str(not_pop_bads * 100.0 / not_pops) + "\n")
dataset_file.write("=> Mean parse time (sec): " + str(avg_time / (fails + succs)) + "\n")
dataset_file.write("=> Median parse time (sec): " + str(median_low(parse_times)) + "\n")
dataset_file.write("=> Median parse time (sec): " + str(median_low(temp_87) if len(temp_87) > 0 else None) + " for " + str(len(temp_87)) + " programs with length > 87" + "\n")
dataset_file.write("=> Median parse time (sec): " + str(median_low(temp_100) if len(temp_100) > 0 else None) + " for " + str(len(temp_100)) + " programs with length > 100" + "\n")
dataset_file.write("=> Median parse time (sec): " + str(median_low(temp_250) if len(temp_250) > 0 else None) + " for " + str(len(temp_250)) + " programs with length > 250" + "\n")
dataset_file.write("=> Median parse time (sec): " + str(median_low(temp_500) if len(temp_500) > 0 else None) + " for " + str(len(temp_500)) + " programs with length > 500" + "\n")
dataset_file.write("=> Avg. parse time / 50 tokens (sec): " + str(avg_time * 50 / tpsize) + "\n")
dataset_file.write("=> Dataset parsed faster than user (%): " + str(positives * 100 / succs) + "\n")
dataset_file.write("=> Mean parse time speedup (sec): " + str(mean(time_gs)) + "\n")
dataset_file.write("=> Median parse time speedup (sec): " + str(median_high(time_gs)) + "\n")
dataset_file.write("=> Same as user accuracy (%): " + str(user_sames * 100.0 / (fails + succs)) + "\n")
dataset_file.write("=> All locations fixed accuracy (%): " + str(all_ls * 100.0 / (fails + succs)) + "\n")
dataset_file.write("=> Any locations fixed accuracy (%): " + str(any_ls * 100.0 / (fails + succs)) + "\n")
dataset_file.write("=> Average used error rule position: " + str(sum([used_erls[er] * er for er in used_erls]) / total_erules_used) + "\n")
for dt in range(1, max_time + 1):
dataset_file.write(str(dt) + " sec: Parse accuracy = " + str(rates[dt]) + "\n")
for dt in range(1, 11):
dataset_file.write("Cost = " + str(dt) + ": Test set % = " + str(costs_rates[dt]) + "\n")
for dt in range(5, 21, 5):
dataset_file.write("Top " + str(dt) + " error rules: Test set % = " + str(erules_rates[dt]) + "\n")
for tok_chgs in sorted(accs_per_chgs.keys()):
parsed, total_progs = accs_per_chgs[tok_chgs]
dataset_file.write(str(tok_chgs) + " token changes accuracy (%) = " + str(parsed * 100.0 / total_progs) + "\n")
for tok_chgs in sorted(accs_per_chgs.keys()):
dataset_file.write(str(tok_chgs) + " token changes median parse time (sec) = " + str(median_times_per_tok_chngs[tok_chgs]) + "\n")
with open(join(out_dir, "repaired_prog_pairs.jsonl"), 'w') as repaired_progs_file:
for pair in repaired_tuplas:
repaired_progs_file.write(json.dumps(pair) + "\n")
def get_changes(diff):
line_changes = []
line_num = 0
for i, change in enumerate(diff):
line = change[2:]
if change[0] == '-':
if i-1 >= 0 and diff[i-1][0] == '?':
if i-2 >= 0 and diff[i-2][0] == '+' and line_changes != [] and line_changes[-1][0] == 'added':
prev_line = line_changes.pop()[-2]
line_changes.append(('replaced', line_num, prev_line, line))
else:
line_changes.append(('deleted', line_num, None, line))
elif i-1 >= 0 and diff[i-1][0] == '+' and line_changes != [] and line_changes[-1][0] == 'added':
prev_line = line_changes.pop()[-2]
line_changes.append(('replaced', line_num, prev_line, line))
elif len(re.sub(r"[\n\t\s]*", "", line)) > 0:
line_changes.append(('deleted', line_num, None, line))
line_num += 1
elif change[0] == '+':
if i-1 >= 0 and diff[i-1][0] == '?':
if i-2 >= 0 and diff[i-2][0] == '-' and line_changes != [] and line_changes[-1][0] == 'deleted':
prev_line = line_changes.pop()[-1]
line_changes.append(('replaced', line_num-1, line, prev_line))
else:
line_changes.append(('added', line_num, line, None))
elif i-1 >= 0 and diff[i-1][0] == '-' and line_changes != [] and line_changes[-1][0] == 'deleted':
prev_line = line_changes.pop()[-1]
line_changes.append(('replaced', line_num-1, line, prev_line))
elif len(re.sub(r"[\n\t\s]*", "", line)) > 0:
line_changes.append(('added', line_num, line, None))
elif change[0] == ' ':
if change[2:].strip() == '':
line_num += 1
continue
line_changes.append(('no_change', line_num, line, line))
line_num += 1
return [(ch_type, k) for ch_type, k, _, _ in line_changes if ch_type != 'no_change']
def return_all_changes(bad, fix):
# bad = bad.split('_NEWLINE_')
# fix = fix.split('_NEWLINE_')
diff = list(df.ndiff(bad, fix))
# print('------------------------------------')
# print("\n".join(diff))
# print('------------------------------------')
line_changes = get_changes(diff)
# print(line_changes)
changes = []
for line_ch in line_changes:
if line_ch[0] == 'replaced':
# These are changes within a line
changes.append(line_ch[1])
# elif line_ch[0] == 'deleted':
# # This are whole line changes (deletions)
# changes.append(line_ch[1])
# else:
# # This are whole line changes (additions)
# changes.append(line_ch[1])
return changes
def has_parse(egrammar, max_cost, tup):
tokns, eruls, tok_chgs, user_time, fixed_tokns, popul, orig_prg, orig_fix, actual_tokns = tup
# print('=' * 42 + '\n')
# print(orig_prg.replace("\\n", '\n'))
# print(orig_fix.replace("\\n", '\n'))
# print(eruls)
# print('=' * 42 + '\n')
upd_grammar_empty = deepcopy(egrammar)
upd_grammar_empty.update_error_grammar_with_erules([])
abstr_orig_fixed_seq, orig_fixed_seq, _, _, _ = fixed_lexed_prog(fixed_tokns, upd_grammar_empty, max_cost)
start_time = timeit.default_timer()
upd_grammar = deepcopy(egrammar)
# if 'Err_Colon -> Err_Tag' in eruls:
# eruls.remove('Err_Colon -> Err_Tag')
upd_grammar.update_error_grammar_with_erules(eruls)
abstr_fixed_seq, fixed_seq, fixed_seq_ops, used_erules, repair_cost = fixed_lexed_prog(tokns, upd_grammar, max_cost)
repaired_prog = None
if fixed_seq is None:
bparse = False
else:
repaired_prog = repair_prog(actual_tokns, fixed_seq_ops)
bparse = True
# debug_out = '=' * 42 + '\n'
# debug_out += tokns.replace('_NEWLINE_ ', '\n')
# debug_out += '\n' + '*' * 42 + '\n'
# debug_out += fixed_seq_ops.replace('_NEWLINE_ ', '\n')
# debug_out += '\n' + '*' * 42 + '\n'
# debug_out += str(eruls)
# debug_out += '\n' + '*' * 42 + '\n'
# debug_out += actual_tokns.replace('_NEWLINE_ ', '\n')
# debug_out += '\n' + '*' * 42 + '\n'
# debug_out += repaired_prog
# debug_out += '\n' + '=' * 42 + '\n'
# print(debug_out)
run_time = timeit.default_timer() - start_time
prog_size = len(tokns.split())
if bparse:
tokns_lines = tokns.split('_NEWLINE_')
fixed_orig_lines = orig_fixed_seq.split('_NEWLINE_')
fixed_seq_lines = fixed_seq.split('_NEWLINE_')
orig_fixed_lines = return_all_changes(tokns_lines, fixed_orig_lines)
our_fixed_lines = return_all_changes(tokns_lines, fixed_seq_lines)
used_erules = [eruls.index(str(er)) for er in used_erules if str(er) in eruls]
all_correct_lines = all(map(lambda l: l in orig_fixed_lines, our_fixed_lines)) if our_fixed_lines else True
any_correct_lines = any(map(lambda l: l in orig_fixed_lines, our_fixed_lines)) if our_fixed_lines else True
dt = user_time - run_time
if bparse:
return (bparse, run_time, dt, tok_chgs, prog_size, abstr_orig_fixed_seq == abstr_fixed_seq, all_correct_lines, any_correct_lines, popul, {"orig": orig_prg, "repaired": repaired_prog, "fix": orig_fix}, repair_cost, used_erules)
else:
return (bparse, run_time, dt, tok_chgs, prog_size, False, False, False, popul, None, -1, None)
def read_sample(samp):
samp_1 = samp.split(" <||> ")
samp_2 = samp_1[1].split(" <++> ")
return (samp_1[0], samp_2, int(samp_1[2]), float(samp_1[3]), samp_1[4], samp_1[5] == "popular", samp_1[6], samp_1[7], samp_1[8])
def do_all_test(grammar_file, data_dir, out_dir, top_rules_num, ecpp_max_cost, results_file, in_file):
ERROR_GRAMMAR = read_grammar(grammar_file)
TIMEOUT = 60 * 5
parses_bad = 0
finds_all_lines = 0
finds_any_lines = 0
same_as_users = 0
not_popular_parses = 0
all_not_populars = 0
done = 0
failed = 0
dataset = []
avg_run_time = 0.0
total_size = 0
all_times_sizes = []
all_tok_chngs = []
parsed_progs_times = []
time_gains = []
all_tuplas = []
repair_costs = []
all_used_erules = defaultdict(int)
max_used_erules = []
accs_per_changes = defaultdict(lambda : (0, 0))
with ProcessPool(max_workers=28, max_tasks=5) as pool:
dataset_part_file = join(data_dir, in_file)
if exists(dataset_part_file):
with open(dataset_part_file, "r") as inFile:
dataset = list(map(read_sample, inFile.read().split('\n')[:-1]))
dataset = [(tokns, erules[:top_rules_num], tok_chgs, user_time, fixed_tokns, popular, orig_prog, orig_fix, actual_tkns) for tokns, erules, tok_chgs, user_time, fixed_tokns, popular, orig_prog, orig_fix, actual_tkns in dataset[:15000]]
for _, _, tok_chgs, _, _, popul, _, _, _ in dataset:
parsed, total_progs = accs_per_changes[tok_chgs]
accs_per_changes[tok_chgs] = (parsed, total_progs + 1)
if not popul:
all_not_populars += 1
print("# Syntax Errors to repair:", len(dataset))
new_has_parse = partial(has_parse, ERROR_GRAMMAR, ecpp_max_cost)
future = pool.map(new_has_parse, dataset, chunksize=1, timeout=TIMEOUT)
it = future.result()
while True:
try:
bruh = next(it)
if bruh:
parse_bad, run_time, dt, tok_chgs, size, user_same, all_lines, any_lines, popular, tupla, repair_cst, used_erls = bruh
if parse_bad:
parses_bad += 1
if all_lines:
finds_all_lines += 1
if any_lines:
finds_any_lines += 1
if user_same:
same_as_users += 1
if not popular:
not_popular_parses += 1
repair_costs.append(repair_cst)
for er in used_erls:
all_used_erules[er] += 1
max_used_erules.append(max(used_erls))
parsed, total_progs = accs_per_changes[tok_chgs]
accs_per_changes[tok_chgs] = (parsed + 1, total_progs)
all_tok_chngs.append(tok_chgs)
avg_run_time += run_time
parsed_progs_times.append(run_time)
total_size += size
all_times_sizes.append((run_time, size))
time_gains.append(dt)
all_tuplas.append(tupla)
done += 1
if (failed + done) % 50 == 0:
print_results(failed, done, parses_bad, not_popular_parses, all_not_populars, accs_per_changes, all_tok_chngs, avg_run_time, parsed_progs_times, total_size, all_times_sizes, time_gains, same_as_users, finds_all_lines, finds_any_lines, out_dir, results_file, all_tuplas, repair_costs, all_used_erules, max_used_erules, max_time=TIMEOUT+5)
except StopIteration:
break
except (TimeoutError, ProcessExpired):
failed += 1
run_time = TIMEOUT
avg_run_time += run_time
parsed_progs_times.append(run_time)
if (failed + done) % 50 == 0:
print_results(failed, done, parses_bad, not_popular_parses, all_not_populars, accs_per_changes, all_tok_chngs, avg_run_time, parsed_progs_times, total_size, all_times_sizes, time_gains, same_as_users, finds_all_lines, finds_any_lines, out_dir, results_file, all_tuplas, repair_costs, all_used_erules, max_used_erules, max_time=TIMEOUT+5)
except Exception as e:
print("WHY here?!", str(e))
traceback.print_tb(e.__traceback__)
failed += 1
run_time = TIMEOUT
avg_run_time += run_time
parsed_progs_times.append(run_time)
if (failed + done) % 50 == 0:
print_results(failed, done, parses_bad, not_popular_parses, all_not_populars, accs_per_changes, all_tok_chngs, avg_run_time, parsed_progs_times, total_size, all_times_sizes, time_gains, same_as_users, finds_all_lines, finds_any_lines, out_dir, results_file, all_tuplas, repair_costs, all_used_erules, max_used_erules, max_time=TIMEOUT+5)
print_results(failed, done, parses_bad, not_popular_parses, all_not_populars, accs_per_changes, all_tok_chngs, avg_run_time, parsed_progs_times, total_size, all_times_sizes, time_gains, same_as_users, finds_all_lines, finds_any_lines, out_dir, results_file, all_tuplas, repair_costs, all_used_erules, max_used_erules, max_time=TIMEOUT+5)
if __name__ == "__main__":
grammarFile = sys.argv[1]
dataDir = Path(sys.argv[2])
outDir = Path(sys.argv[3])
input_file = sys.argv[4]
num_of_tops = int(sys.argv[5])
max_cost = int(sys.argv[6])
limit_memory()
do_all_test(grammarFile, dataDir, outDir, num_of_tops, max_cost, "ECPP-runtime-clean-test-top-1-repair-" + str(num_of_tops) + "-popular-cost-" + str(max_cost) + ".txt", input_file)
# # For individual testing using:
# # >>> time python run_parse_test_time_top_n_all_states.py python-grammar.txt repairs/orig_0.py repairs/fix_0.py test-set-top-20-partials-probs.txt 20
# failPath = dataDir
# goodPath = outDir
# bad = failPath.read_text()
# fix = goodPath.read_text()
# # print('*' * 42)
# # print(bad)
# # print('*' * 42)
# # print(fix)
# # print('*' * 42)
# ERROR_GRAMMAR = read_grammar(grammarFile)
# terminals = ERROR_GRAMMAR.get_alphabet()
# # erules0 = ["Err_Break_Stmt -> ", "Err_Close_Paren -> ", "Err_Colon -> ", "Err_Dedent -> ", "Err_For_Keyword -> H For_Keyword", "Err_Indent -> ", "Err_Indent -> Err_Tag", "Err_Literals -> ", "Err_Literals -> H Literals", "Err_Newline -> H Newline", "Err_Open_Paren -> ", "Err_Return_Keyword -> ", "Err_Return_Keyword -> H Return_Keyword", "InsertErr -> )", "InsertErr -> :", "InsertErr -> _NAME_", "InsertErr -> def", "InsertErr -> for", "InsertErr -> if", "InsertErr -> while"]
# # erules0 = ["Err_Dedent -> ", "Err_Indent -> ", "Err_Indent -> Err_Tag", "Err_Return_Keyword -> ", "Err_Return_Keyword -> H Return_Keyword"]
# # erules0 = ["Err_Dedent -> ", "Err_Indent -> ", "Err_Literals -> "]
# # erules0 = ["InsertErr -> )", "Err_Colon -> H Colon", "Err_Colon -> "]
# utime1 = 4532.0
# erules1 = ['Err_Break_Stmt -> ', 'Err_Close_Paren -> ', 'Err_Close_Paren -> H Close_Paren', 'Err_Colon -> ', 'Err_Dedent -> ', 'Err_Indent -> ', 'Err_Literals -> ', 'Err_Literals -> H Literals', 'Err_Newline -> H Newline', 'Err_Open_Paren -> ', 'Err_Pass_Stmt -> ', 'Err_Return_Keyword -> ', 'InsertErr -> )', 'InsertErr -> :', 'InsertErr -> _NAME_', 'InsertErr -> def', 'InsertErr -> else', 'InsertErr -> for', 'InsertErr -> if', 'InsertErr -> while']
# utime2 = 10.0
# erules2 = ['Err_Arith_Op -> Err_Tag', 'Err_Close_Paren -> H Close_Paren', 'Err_Colon -> H Colon', 'Err_Comma -> Err_Tag', 'Err_Literals -> ', 'Err_Literals -> Err_Tag', 'Err_Literals -> H Literals', 'Err_Newline -> H Newline', 'Err_Open_Paren -> H Open_Paren', 'Err_Tag -> :', 'Err_Tag -> _NUMBER_', 'Err_Tag -> _UNKNOWN_', 'Err_Tag -> else', 'Err_Vfpdef -> Err_Tag', 'InsertErr -> :', 'InsertErr -> =', 'InsertErr -> _NAME_', 'InsertErr -> _NUMBER_', 'InsertErr -> _STRING_', 'InsertErr -> _UNKNOWN_']
# utime3 = 12.0
# erules3 = ['Err_Close_Paren -> H Close_Paren', 'Err_Literals -> ', 'Err_Literals -> Err_Tag', 'Err_Literals -> H Literals', 'Err_MulDiv_Op -> H MulDiv_Op', 'Err_Newline -> H Newline', 'Err_Open_Paren -> H Open_Paren', 'Err_Tag -> _NUMBER_', 'Err_Tag -> _STRING_', 'Err_Vfpdef -> Err_Tag', 'Err_Vfpdef -> H Vfpdef', 'InsertErr -> )', 'InsertErr -> +', 'InsertErr -> ,', 'InsertErr -> .', 'InsertErr -> :', 'InsertErr -> =', 'InsertErr -> _NAME_', 'InsertErr -> _NUMBER_', 'InsertErr -> return']
# utime4 = 8.0
# erules4 = ['Err_Break_Stmt -> ', 'Err_Close_Paren -> ', 'Err_Close_Paren -> H Close_Paren', 'Err_Colon -> ', 'Err_Dedent -> ', 'Err_Endmarker -> H Endmarker', 'Err_Indent -> ', 'Err_Literals -> ', 'Err_Literals -> H Literals', 'Err_Newline -> H Newline', 'Err_Open_Paren -> ', 'Err_Return_Keyword -> ', 'InsertErr -> )', 'InsertErr -> :', 'InsertErr -> _NAME_', 'InsertErr -> def', 'InsertErr -> else', 'InsertErr -> for', 'InsertErr -> if', 'InsertErr -> while']
# utime5 = 12.0
# erules5 = ['Err_Arith_Op -> Err_Tag', 'Err_Close_Paren -> ', 'Err_Close_Paren -> H Close_Paren', 'Err_Literals -> Err_Tag', 'Err_Literals -> H Literals', 'Err_Newline -> H Newline', 'Err_Open_Paren -> H Open_Paren', 'Err_Tag -> _NUMBER_', 'Err_Tag -> _UNKNOWN_', 'Err_Unary_Op -> Err_Tag', 'InsertErr -> (', 'InsertErr -> .', 'InsertErr -> :', 'InsertErr -> _DEDENT_', 'InsertErr -> _INDENT_', 'InsertErr -> _NAME_', 'InsertErr -> _NUMBER_', 'InsertErr -> _STRING_', 'InsertErr -> _UNKNOWN_', 'InsertErr -> return']
# utime6 = 35.0
# erules6 = ['Err_Close_Curl_Bracket -> ', 'Err_Close_Paren -> Err_Tag', 'Err_Close_Paren -> H Close_Paren', 'Err_Close_Sq_Bracket -> ', 'Err_Comma -> Err_Tag', 'Err_Comma -> H Comma', 'Err_Literals -> ', 'Err_Literals -> Err_Tag', 'Err_Literals -> H Literals', 'Err_Newline -> H Newline', 'Err_Open_Curl_Bracket -> ', 'Err_Open_Paren -> H Open_Paren', 'Err_Open_Sq_Bracket -> ', 'Err_Tag -> :', 'InsertErr -> (', 'InsertErr -> )', 'InsertErr -> :', 'InsertErr -> _NAME_', 'InsertErr -> _NUMBER_', 'InsertErr -> _STRING_']
# utime7 = 9.0
# erules7 = ['Err_Close_Paren -> H Close_Paren', 'Err_Colon -> H Colon', 'Err_Else_Keyword -> Err_Tag', 'Err_If_Keyword -> Err_Tag', 'Err_Literals -> ', 'Err_Literals -> Err_Tag', 'Err_Literals -> H Literals', 'Err_Newline -> H Newline', 'Err_Return_Keyword -> H Return_Keyword', 'Err_Tag -> _NUMBER_', 'Err_Tag -> _STRING_', 'Err_Vfpdef -> Err_Tag', 'InsertErr -> :', 'InsertErr -> _DEDENT_', 'InsertErr -> _INDENT_', 'InsertErr -> _NAME_', 'InsertErr -> _NUMBER_', 'InsertErr -> def', 'InsertErr -> return', 'InsertErr -> try']
# utime8 = 6.0
# erules8 = ['Err_Arith_Op -> ', 'Err_Arith_Op -> H Arith_Op', 'Err_Assign_Op -> ', 'Err_Close_Paren -> ', 'Err_Close_Paren -> H Close_Paren', 'Err_Colon -> ', 'Err_Comma -> ', 'Err_Comma -> H Comma', 'Err_Literals -> H Literals', 'Err_MulDiv_Op -> ', 'Err_Newline -> H Newline', 'Err_Open_Paren -> ', 'Err_Open_Paren -> H Open_Paren', 'Err_Return_Keyword -> Err_Tag', 'Err_Simple_Name -> ', 'Err_Tag -> _NAME_', 'InsertErr -> (', 'InsertErr -> _NAME_', 'InsertErr -> _NUMBER_', 'InsertErr -> _STRING_']
# utime9 = 12.0
# erules9 = ['Err_Arith_Op -> ', 'Err_Arith_Op -> H Arith_Op', 'Err_Assign_Op -> ', 'Err_Close_Paren -> ', 'Err_Close_Paren -> H Close_Paren', 'Err_Comma -> ', 'Err_Comma -> H Comma', 'Err_Literals -> ', 'Err_Literals -> H Literals', 'Err_MulDiv_Op -> H MulDiv_Op', 'Err_Newline -> H Newline', 'Err_Open_Paren -> ', 'Err_Open_Paren -> H Open_Paren', 'Err_Tag -> )', 'InsertErr -> )', 'InsertErr -> .', 'InsertErr -> :', 'InsertErr -> _NAME_', 'InsertErr -> _NUMBER_', 'InsertErr -> _STRING_']
# utime10 = 25.0
# erules10 = ['Err_Close_Paren -> H Close_Paren', 'Err_Colon -> H Colon', 'Err_Comp_Op -> Err_Tag', 'Err_Comp_Op -> H Comp_Op', 'Err_Literals -> ', 'Err_Literals -> Err_Tag', 'Err_Literals -> H Literals', 'Err_Newline -> H Newline', 'Err_Tag -> =', 'InsertErr -> +=', 'InsertErr -> ,', 'InsertErr -> <', 'InsertErr -> =', 'InsertErr -> [', 'InsertErr -> ]', 'InsertErr -> _NAME_', 'InsertErr -> _NUMBER_', 'InsertErr -> and', 'InsertErr -> is', 'InsertErr -> return']
# utime11 = 24.0
# erules11 = ['Err_Assign_Op -> H Assign_Op', 'Err_Close_Paren -> ', 'Err_Close_Paren -> H Close_Paren', 'Err_Colon -> H Colon', 'Err_Comp_Op -> ', 'Err_Comp_Op -> Err_Tag', 'Err_Comp_Op -> H Comp_Op', 'Err_Literals -> ', 'Err_Literals -> H Literals', 'Err_Newline -> H Newline', 'Err_Open_Paren -> ', 'Err_Open_Paren -> H Open_Paren', 'Err_Tag -> =', 'Err_Tag -> _NAME_', 'InsertErr -> (', 'InsertErr -> =', 'InsertErr -> [', 'InsertErr -> _NAME_', 'InsertErr -> _NUMBER_', 'InsertErr -> return']
# utime12 = 80.0
# erules12 = ['Err_Colon -> ', 'Err_Colon -> H Colon', 'Err_Comp_Op -> H Comp_Op', 'Err_If_Keyword -> Err_Tag', 'Err_In_Keyword -> ', 'Err_In_Keyword -> Err_Tag', 'Err_Literals -> ', 'Err_Literals -> H Literals', 'Err_Newline -> H Newline', 'Err_Open_Paren -> ', 'Err_Tag -> _NAME_', 'Err_Tag -> for', 'Err_While_Keyword -> Err_Tag', 'InsertErr -> )', 'InsertErr -> ,', 'InsertErr -> _DEDENT_', 'InsertErr -> _INDENT_', 'InsertErr -> _NAME_', 'InsertErr -> _NUMBER_', 'InsertErr -> for']
# utime13 = 105.0
# erules13 = ['Err_Close_Paren -> ', 'Err_Colon -> ', 'Err_Dedent -> ', 'Err_Def_Keyword -> ', 'Err_Endmarker -> H Endmarker', 'Err_Indent -> ', 'Err_Literals -> ', 'Err_Literals -> H Literals', 'Err_Newline -> H Newline', 'Err_Open_Paren -> ', 'Err_Return_Keyword -> H Return_Keyword', 'Err_Simple_Name -> ', 'InsertErr -> :', 'InsertErr -> _DEDENT_', 'InsertErr -> _INDENT_', 'InsertErr -> _NAME_', 'InsertErr -> def', 'InsertErr -> for', 'InsertErr -> if', 'InsertErr -> while']
# utime14 = 15.0
# erules14 = ['Err_Close_Paren -> ', 'Err_Colon -> ', 'Err_Dedent -> ', 'Err_Dedent -> H Dedent', 'Err_Def_Keyword -> ', 'Err_Endmarker -> H Endmarker', 'Err_For_Keyword -> H For_Keyword', 'Err_In_Keyword -> Err_Tag', 'Err_Indent -> ', 'Err_Literals -> ', 'Err_Literals -> H Literals', 'Err_Newline -> H Newline', 'Err_Open_Paren -> ', 'Err_Return_Keyword -> H Return_Keyword', 'Err_Simple_Name -> ', 'InsertErr -> _DEDENT_', 'InsertErr -> _INDENT_', 'InsertErr -> def', 'InsertErr -> for', 'InsertErr -> while']
# utime15 = 20.0
# erules15 = ['Err_Arith_Op -> ', 'Err_Assign_Op -> ', 'Err_Assign_Op -> Err_Tag', 'Err_Close_Paren -> ', 'Err_Close_Paren -> Err_Tag', 'Err_Comma -> ', 'Err_Comma -> Err_Tag', 'Err_Comma -> H Comma', 'Err_Literals -> H Literals', 'Err_Newline -> H Newline', 'Err_Open_Paren -> ', 'Err_Open_Paren -> Err_Tag', 'Err_Open_Paren -> H Open_Paren', 'Err_Tag -> (', 'Err_Tag -> _NAME_', 'Err_Tag -> _STRING_', 'InsertErr -> (', 'InsertErr -> _NAME_', 'InsertErr -> _NUMBER_', 'InsertErr -> _STRING_']
# utime16 = 30.0
# erules16 = ['Err_Arith_Op -> H Arith_Op', 'Err_Assign_Op -> ', 'Err_Assign_Op -> Err_Tag', 'Err_Close_Paren -> ', 'Err_Close_Paren -> Err_Tag', 'Err_Close_Sq_Bracket -> ', 'Err_Comma -> ', 'Err_Comma -> Err_Tag', 'Err_Comma -> H Comma', 'Err_Literals -> H Literals', 'Err_Newline -> H Newline', 'Err_Open_Paren -> ', 'Err_Open_Paren -> Err_Tag', 'Err_Open_Paren -> H Open_Paren', 'Err_Open_Sq_Bracket -> H Open_Sq_Bracket', 'Err_Tag -> (', 'InsertErr -> (', 'InsertErr -> _NAME_', 'InsertErr -> _NUMBER_', 'InsertErr -> _STRING_']
# utime17 = 8.0
# erules17 = ['Err_Assign_Op -> ', 'Err_Close_Paren -> ', 'Err_Close_Paren -> H Close_Paren', 'Err_Colon -> ', 'Err_Colon -> H Colon', 'Err_Comma -> H Comma', 'Err_Literals -> ', 'Err_Literals -> H Literals', 'Err_Newline -> H Newline', 'Err_Open_Paren -> ', 'Err_Open_Paren -> H Open_Paren', 'Err_Tag -> _NAME_', 'InsertErr -> (', 'InsertErr -> )', 'InsertErr -> :', 'InsertErr -> _NAME_', 'InsertErr -> _NUMBER_', 'InsertErr -> _STRING_', 'InsertErr -> def', 'InsertErr -> for']
# utime19 = 196.0
# erules19 = ['Err_Break_Stmt -> ', 'Err_Close_Paren -> ', 'Err_Close_Paren -> H Close_Paren', 'Err_Colon -> ', 'Err_Dedent -> ', 'Err_Indent -> ', 'Err_Literals -> ', 'Err_Literals -> H Literals', 'Err_Newline -> H Newline', 'Err_Open_Paren -> ', 'Err_Pass_Stmt -> ', 'Err_Return_Keyword -> ', 'InsertErr -> )', 'InsertErr -> :', 'InsertErr -> _NAME_', 'InsertErr -> def', 'InsertErr -> else', 'InsertErr -> for', 'InsertErr -> if', 'InsertErr -> while']
# utime20 = 81.0
# erules20 = ['Err_And_Bool_Op -> Err_Tag', 'Err_Assign_Op -> H Assign_Op', 'Err_Close_Paren -> ', 'Err_Colon -> ', 'Err_Colon -> H Colon', 'Err_Comp_Op -> ', 'Err_Comp_Op -> Err_Tag', 'Err_Comp_Op -> H Comp_Op', 'Err_Literals -> ', 'Err_Literals -> H Literals', 'Err_Newline -> H Newline', 'Err_Open_Paren -> H Open_Paren', 'Err_Tag -> =', 'InsertErr -> (', 'InsertErr -> =', 'InsertErr -> [', 'InsertErr -> ]', 'InsertErr -> _NAME_', 'InsertErr -> _NUMBER_', 'InsertErr -> return']
# utime21 = 25.0
# erules21 = ['Err_Assign_Op -> H Assign_Op', 'Err_Close_Paren -> ', 'Err_Colon -> H Colon', 'Err_Comp_Op -> ', 'Err_Comp_Op -> Err_Tag', 'Err_Comp_Op -> H Comp_Op', 'Err_Literals -> ', 'Err_Literals -> H Literals', 'Err_Newline -> H Newline', 'Err_Open_Paren -> ', 'Err_Open_Paren -> H Open_Paren', 'Err_Tag -> =', 'Err_Tag -> _NAME_', 'InsertErr -> (', 'InsertErr -> =', 'InsertErr -> [', 'InsertErr -> _NAME_', 'InsertErr -> _NUMBER_', 'InsertErr -> _STRING_', 'InsertErr -> return']
# utime_program8 = 48.0
# erules_program8 = ["Err_Arith_Op -> H Arith_Op", "Err_Assign_Op -> ", "Err_Assign_Op -> Err_Tag", "Err_Close_Paren -> ", "Err_Close_Paren -> Err_Tag", "Err_Comma -> ", "Err_Comma -> Err_Tag", "Err_Comma -> H Comma", "Err_Comp_Op -> Err_Tag", "Err_Literals -> H Literals", "Err_Newline -> H Newline", "Err_Open_Paren -> ", "Err_Open_Paren -> Err_Tag", "Err_Open_Paren -> H Open_Paren", "Err_Open_Sq_Bracket -> H Open_Sq_Bracket", "Err_Tag -> (", "InsertErr -> (", "InsertErr -> _NAME_", "InsertErr -> _NUMBER_", "InsertErr -> _STRING_"]
# pr = (get_token_list(bad, terminals), erules9, utime9, get_token_list(fix, terminals), True, bad, fix, get_actual_token_list(bad, terminals))
# all_names = list(set([tk[1] for tk in zip(get_token_list(bad, terminals).split(), get_actual_token_list(bad, terminals).split()) if tk[0] == '_NAME_'])) + ["1"]
# for name in all_names:
# if "def " + name in bad:
# all_names.remove(name)
# # print(all_names)
# results = has_parse(ERROR_GRAMMAR, max_cost, pr)
# # print(results)
# dct = results[-3]
# print(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
# print("-------------Original Buggy Program---------------")
# print(dct['orig'][:-1].replace("\\n", '\n'))
# print("-----------------Repaired Program-----------------")
# print(dct['repaired'][:-3].replace("\\n", '\n'))
# print("--------------Original Fix Program----------------")
# print(dct['fix'][:-1].replace("\\n", '\n'))
# print("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
| gsakkas/seq2parse | src/run_parse_test_time_top_n_preds_partials.py | run_parse_test_time_top_n_preds_partials.py | py | 33,281 | python | en | code | 8 | github-code | 13 |
1721828877 | import RPi.GPIO as GPIO
from lib_utils import *
import numpy as np
class GBlob():
"""Blob detection. Returns coordinates of all blobs
This class takes a camera image and returns the pixel coordinates of all blobs.
It contains functions to convert the image to grayscale, threshold the image to
separate blob pixels from background, create lists of indices of pixels which
belong to single blobs, and calculate the center of each blob.
"""
def __init__(self, side, thresh=U_BLOB_THRESH):
"""Load a new image
Arguments:
img_raw {} -- camera image
Keyword Arguments:
thresh {int} -- detection threshold that separates background from blob pixels
x_res {int} -- x-resolution of the image
y_res {int} -- y-resolution of the image
"""
# Parameters
self.side = side
self.thresh = thresh
# Initializations
self.blob_size = 0
self.blobs = np.zeros((2, 1))
self.no_blobs = 0
def detect(self, img):
# Initializations
self.blob_size = 0
self.blobs = np.zeros((2, 1))
self.no_blobs = 0
"""Runs all subfunctions for blob detection"""
img_gray = self._raw_to_gray(img)
blob_pixels = self._thresholding(img_gray)
self._continuity(blob_pixels)
self.reflections()
def _raw_to_gray(self, img):
"""Converts the image to grayscale"""
img_rgb = np.zeros((U_CAM_MRES, U_CAM_NRES, 3), dtype=np.uint8)
img_rgb = np.array(img)
img_gray = np.zeros((U_CAM_MRES, U_CAM_NRES))
img_gray[:, :] = img_rgb[:, :, 2]
return img_gray
def _thresholding(self, img_gray):
"""Thresholds the gray image and returns blob pixels
Arguments:
img_gray {} -- grayscale image
"""
blob_pixels = np.where(img_gray > self.thresh)
blob_pixels = np.asarray(blob_pixels)
return blob_pixels
def _continuity(self, blob_pixels):
"""Clusters blob pixels and returns lists of single blob centroids
This method checks all blob pixels for continuity in x-direction. It then checks the subsets which are continous in x-direction for continuity in y-direction. It finally returns an array that contains the centroids of individual blobs.
Arguments:
blob_pixels {} -- array of pixels that belong to blobs
"""
# Total amount of blob pixels. If none, return.
self.blob_size = blob_pixels.size
if self.blob_size < 4:
self.blobs = np.zeros(0)
return
# Find pixels that are continuous in m-direction
m = blob_pixels[0, :]
m_shifted = np.zeros(m.shape)
m_shifted[1:-1] = np.copy(m[:-2])
m_shifted[0] = -1
m_shifted[-1] = -1
blob_m = np.where(abs(m_shifted - m) > 1)
blob_m = np.asarray(blob_m)
blob_m[:, -1] += 1
# For each continous set in m-direction, find pixels that are also continuous in n-direction
for i in range(0, blob_m.shape[1]-1):
m = blob_pixels[0, blob_m[0, i]:blob_m[0, i+1]]
n = blob_pixels[1, blob_m[0, i]:blob_m[0, i+1]]
arg_n = np.argsort(n)
n_sorted = np.sort(n)
n_shifted = np.zeros(n.shape)
n_shifted[1:-1] = np.copy(n_sorted[:-2])
n_shifted[0] = -1
n_shifted[-1] = -1
blob_n = np.where(abs(n_shifted - n_sorted) > 1)
blob_n = np.asarray(blob_n)
blob_n[:, -1] += 1
# For pixels continuous in m- and n-direction, find centroids
for j in range(0, blob_n.shape[1]-1):
blob_indices = arg_n[np.asscalar(blob_n[:, j]):np.asscalar(blob_n[:, j+1])]
m_center = round(sum(m[blob_indices])/blob_indices.shape[0], 3)
n_center = round(sum(n[blob_indices])/blob_indices.shape[0], 3)
# flip image 180 degrees bcs camera mounted upside down
m_center = U_CAM_MRES - m_center
n_center = U_CAM_NRES - n_center
if self.no_blobs == 0:
self.blobs[0, 0] = m_center
self.blobs[1, 0] = n_center
else:
self.blobs = np.append(self.blobs, [[m_center], [n_center]], axis=1)
self.no_blobs += 1
def reflections(self):
# discard blobs that are reflected on the surface, keep single lowest blob only
if self.no_blobs > 2:
#print(self.blobs)
blob_ind = np.argsort(self.blobs[0, :])[-2:]
self.blobs = self.blobs[:, blob_ind]
#print(self.blobs)
| fberlinger/blueswarm | fishfood/old_but_dont_delete/lib_globalblob.py | lib_globalblob.py | py | 4,809 | python | en | code | 2 | github-code | 13 |
24083013204 | import cv2
import pandas as pd
# Função para processar uma imagem e extrair informações
def processar_imagem(imagem):
# Carrega a imagem utilizando o pacote OpenCV
img = cv2.imread(imagem)
# Converte a imagem para escala de cinza
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Calcula o valor médio de intensidade de pixel na imagem
valor_medio = img_gray.mean()
# Calcula a largura e altura da imagem
altura, largura = img_gray.shape
# Cria um dicionário com as informações extraídas da imagem
info_imagem = {
'Imagem': imagem,
'Valor Médio': valor_medio,
'Altura': altura,
'Largura': largura
}
return info_imagem
# Lista de imagens para processar
imagens = ['imagem1.jpg', 'imagem2.jpg', 'imagem3.jpg']
# Lista para armazenar as informações de cada imagem
info_imagens = []
# Processa cada imagem da lista
for imagem in imagens:
info = processar_imagem(imagem)
info_imagens.append(info)
# Cria um DataFrame utilizando o Pandas para armazenar as informações
df = pd.DataFrame(info_imagens)
# Exibe o DataFrame
print(df)
| rodrigosiqq/processPandas | processamento_imagens.py | processamento_imagens.py | py | 1,158 | python | pt | code | 0 | github-code | 13 |
1199702475 | # 给定一个整数数组 A,返回 A 中最长等差子序列的长度。
#
# 回想一下,A 的子序列是列表 A[i_1], A[i_2], ..., A[i_k] 其中 0 <= i_1 < i_2 < ... < i_k <= A.length - 1。
# 并且如果 B[i+1] - B[i]( 0 <= i < B.length - 1) 的值都相同,那么序列 B 是等差的。
# 示例 1:
# 输入:[3,6,9,12]
# 输出:4
# 解释:
# 整个数组是公差为 3 的等差数列。
#
# 示例 2:
# 输入:[9,4,7,2,10]
# 输出:3
# 解释:
# 最长的等差子序列是 [4,7,10]。
#
# 示例 3:
# 输入:[20,1,15,3,10,5,8]
# 输出:4
# 解释:
# 最长的等差子序列是 [20,15,10,5]。
#
# 提示:
# 2 <= A.length <= 2000
# 0 <= A[i] <= 10000
from typing import List
class Solution:
def longestArithSeqLength(self, A: List[int]) -> int:
n = len(A)
if n == 1 or n == 2:
return n
dp = [[0 for _ in range(10000)] for _ in range(n)]
res = 2
for i in range(1, n):
for j in range(i):
diff = A[i] - A[j]
if dp[j][diff] > 0:
dp[i][diff] = max(dp[i][diff], dp[j][diff] + 1)
if dp[i][diff] == 0:
dp[i][diff] = 2
res = max(res, dp[i][diff])
return res
s = Solution()
arr = [9, 4, 7, 2, 10]
print(s.longestArithSeqLength(arr))
| Lemonstars/algorithm | leetcode/1027.最长等差数列/solution.py | solution.py | py | 1,371 | python | zh | code | 0 | github-code | 13 |
7438304831 | class Property:
def init(self, name, price, rent, color, position, houses=0, owner=None):
self.name = name
self.price = price
self.rent = rent
self.color = color
self.position = position
self.houses = houses
self.owner = owner
# def set_name(self, name):
# self.name = name
#
# def set_price(self, price):
# self.price = price
#
# def set_rent(self, rent):
# self.rent = rent
#
# def set_color(self, color):
# self.color = color
#
# def set_position(self, position):
# self.position = position
#
# def set_houses(self, houses):
# self.houses = houses
#
# def set_owner(self, owner):
# self.owner = owner
#
# def get_name(self):
# return self.name
#
# def get_price(self):
# return self.price
#
# def get_rent(self):
# return self.rent
#
# def get_color(self):
# return self.color
#
# def get_position(self):
# return self.position
#
# def get_houses(self):
# return self.houses
#
# def get_owner(self):
# return self.owner
#
# def get_type(self):
# return "property"
| rbridges12/Monopopy | Property.py | Property.py | py | 1,264 | python | en | code | 0 | github-code | 13 |
33934705300 | import requests
import re
import pymysql
import time
from lxml import etree
import numpy.random
conn = pymysql.connect(host='localhost', user='root', passwd='123456', db='crawler', charset='utf8')
cur = conn.cursor()
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 '
'(KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36'}
def get_movie_url(url):
html = requests.get(url, headers=headers)
print(html.status_code)
selector = etree.HTML(html.text)
movie_hrefs = selector.xpath('//div[@class="pic"]/a/@href')
for movie_href in movie_hrefs:
get_movie_info(movie_href)
def get_movie_info(url):
html = requests.get(url, headers=headers)
selector = etree.HTML(html.text)
try:
name = selector.xpath('//h1/span[1]/text()')[0]
director = selector.xpath('//div[@id="info"]/span[1]/span[2]/a/text()')[0]
actors = selector.xpath('//div[@id="info"]/span[3]/span[2]')[0]
actor = actors.xpath('string(.)')
style = re.findall('<span property="v:genre">(.*?)</span>', html.text, re.S)[0]
country = re.findall('<span class="pl">制片国家/地区:</span> (.*?)<br/>', html.text, re.S)[0]
release_time = re.findall('上映日期:</span> .*?>(.*?)</span>', html.text, re.S)[0]
runtime = re.findall('片长:</span>.*?>(.*?)</span>', html.text, re.S)[0]
score = selector.xpath('//strong[@class="ll rating_num"]/text()')[0]
cur.execute("""
insert into doubanmovie (name,director, actor, style, country, release_time, runtime, score)
values(%s,%s,%s,%s,%s,%s,%s,%s)
""", (str(name), str(director), str(actor), str(style), str(country), str(release_time),
str(runtime), str(score)))
print(name, director, actor, '\n', style, country, release_time, runtime, score)
except IndexError:
print(url)
pass
if __name__ == '__main__':
urls = ['http://movie.douban.com/top250?start={}'.format(str(i)) for i in range(0, 250, 25)]
for url in urls:
get_movie_url(url)
time.sleep(numpy.random.rand()*10)
conn.commit()
| protheanzZ/web_crawler | douban_movie.py | douban_movie.py | py | 2,172 | python | en | code | 0 | github-code | 13 |
11742877804 | #By a mcmc I will try to sample a sin(x)^2 on the interval [0,2pi]
import math
import rosenbrock
import random
import histogram
import histo
# assume the testfunction is two-dimensional
def daserste(num,xstartpoint,ystartpoint):
#print '------------'
#print testfun.testfun(3)
#print '------------'
#num=50000
#x=(range(num)/num-1./4)*20
#P=math.exp(abs(x))*abs(2*x**2+7*x**3+x)/(x**2+2)
#Have genetated the function wich can be sampled from
datalength=10000
datamean=4
datasigma=2
aa=0
data=[]
while aa < datalength:
data.append(random.gauss(datamean,datasigma))
aa+=1
# Lav histogram for data:
interval=[min(data),max(data)]
numbin=100
deltabin=(max(data)-min(data))/100
a,b=histo.histo(data,interval,deltabin,numbin)
#for i in range(len(a)):
# print repr(a[i]).rjust(1), repr(b[i]).rjust(2)
#The monte carlo step
k=0
xs=[]
ys=[]
Pxs=[]
#Initial starting value
x=xstartpoint
y=ystartpoint
aa=0
kk=len(data)
newdata=[]
while aa < kk:
newdata.append(random.gauss(x,y))
aa+=1
anew,bnew=histo.histo(newdata,interval,deltabin,numbin)
Px=0
for i in range(len(anew)):
Px+=(a[i]-anew[i])**2
#Px=rosenbrock.rosenbrock(x,y)#math.exp(-abs(x))*abs(2*x**2+7*x**3+x)/(x**2+2)#testfun.testfun(x)
xs.append(x)
ys.append(y)
Pxs.append(Px)
#Step
while k < num:
steplength=0.1
xn=(random.random()-1./2)*2
yn=(random.random()-1./2)*2
xny=x+(xn/(xn**2+yn**2)**0.5)*steplength
yny=y+(yn/(xn**2+yn**2)**0.5)*steplength
aa=0
kk=len(data)
newdata=[]
while aa < kk:
newdata.append(random.gauss(xny,yny))
aa+=1
anew,bnew=histo.histo(newdata,interval,deltabin,numbin)
Pxny=0
for i in range(len(anew)):
Pxny+=(a[i]-anew[i])**2
#print '--------'
#print Pxny
#print 'Px'
#print Px
#Pxny=rosenbrock.rosenbrock(xny,yny)#math.exp(-abs(xny))*abs(2*xny**2+7*xny**3+xny)/(xny**2+2)#testfun.testfun(x)
if Pxny < Px: #max Pxny > Px
#print 1
x=xny
y=yny
Px=Pxny
else:
auk=random.random()
b=Px/Pxny #max Pxny/Px
#print b
#print a
if b > auk:
x=xny
y=yny
Px=Pxny
#print 2
xs.append(x)
ys.append(y)
#print Px#.append(Px)
k+=1
return xs,ys
#for i in range(len(xs)):
#print repr(xs[i]).rjust(1), repr(Pxs[i]).rjust(2)
| Solaro/Brave-New-World | numeric/project/daserste.py | daserste.py | py | 2,236 | python | en | code | 0 | github-code | 13 |
20296184634 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
"""
desitarget.brightmask
=====================
Module for studying and masking bright sources in the sweeps
.. _`Tech Note 2346`: https://desi.lbl.gov/DocDB/cgi-bin/private/ShowDocument?docid=2346
.. _`Tech Note 2348`: https://desi.lbl.gov/DocDB/cgi-bin/private/ShowDocument?docid=2348
"""
from time import time
import fitsio
import healpy as hp
import os
import re
from glob import glob
import numpy as np
import numpy.lib.recfunctions as rfn
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
from desitarget import io
from desitarget.internal import sharedmem
from desitarget.targetmask import desi_mask, targetid_mask
from desitarget.targets import encode_targetid, decode_targetid
from desitarget.gaiamatch import find_gaia_files, get_gaia_nside_brick
from desitarget.geomask import circles, cap_area, circle_boundaries, is_in_hp
from desitarget.geomask import ellipses, ellipse_boundary, is_in_ellipse
from desitarget.geomask import radec_match_to, rewind_coords, add_hp_neighbors
from desitarget.cuts import _psflike
from desitarget.tychomatch import get_tycho_dir, get_tycho_nside
from desitarget.tychomatch import find_tycho_files_hp
from desitarget.gfa import add_urat_pms
from desiutil import depend, brick
# ADM set up default logger
from desiutil.log import get_logger
# ADM fake the matplotlib display so it doesn't die on allocated nodes.
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt # noqa: E402
log = get_logger()
maskdatamodel = np.array([], dtype=[
('RA', '>f8'), ('DEC', '>f8'), ('PMRA', '>f4'), ('PMDEC', '>f4'),
('REF_CAT', '|S2'), ('REF_ID', '>i8'), ('REF_MAG', '>f4'),
('URAT_ID', '>i8'), ('IN_RADIUS', '>f4'), ('NEAR_RADIUS', '>f4'),
('E1', '>f4'), ('E2', '>f4'), ('TYPE', '|S3')
])
def get_mask_dir():
"""Convenience function to grab the MASK_DIR environment variable.
Returns
-------
:class:`str`
The directory stored in the $MASK_DIR environment variable.
"""
# ADM check that the $MASK_DIR environment variable is set.
maskdir = os.environ.get('MASK_DIR')
if maskdir is None:
msg = "Set $MASK_DIR environment variable!"
log.critical(msg)
raise ValueError(msg)
return maskdir
def get_recent_mask_dir(input_dir=None):
"""Grab the most recent sub-directory of masks in MASK_DIR.
Parameters
----------
input_dir : :class:`str`, optional, defaults to ``None``
If passed and not ``None``, then this is returned as the output.
Returns
-------
:class:`str`
If `input_dir` is not ``None``, then the most recently created
sub-directory (with the appropriate format for a mask directory)
in $MASK_DIR is returned.
"""
if input_dir is not None:
return input_dir
else:
# ADM glob the most recent mask directory.
try:
md = os.environ["MASK_DIR"]
except KeyError:
msg = "pass a mask directory, turn off masking, or set $MASK_DIR!"
log.error(msg)
raise IOError(msg)
# ADM a fairly exhaustive list of possible mask directories.
mds = sorted(glob(os.path.join(md, "*maglim*"))) + \
sorted(glob(os.path.join(md, "*/*maglim*"))) + \
sorted(glob(os.path.join(md, "*/*/*maglim*")))
if len(mds) == 0:
msg = "no mask sub-directories found in {}".format(md)
log.error(msg)
raise IOError(msg)
return max(mds, key=os.path.getctime)
def radii(mag):
"""The relation used to set the radius of bright star masks.
Parameters
----------
mag : :class:`flt` or :class:`recarray`
Magnitude. Typically, in order of preference, G-band for Gaia
or VT then HP then BT for Tycho.
Returns
-------
:class:`recarray`
The `IN_RADIUS`, corresponding to the `IN_BRIGHT_OBJECT` bit
in `data/targetmask.yaml`.
:class:`recarray`
The `NEAR_RADIUS`, corresponding to the `NEAR_BRIGHT_OBJECT` bit
in data/targetmask.yaml`.
"""
# ADM mask all sources with mag < 12 at 5 arcsecs.
inrad = (mag < 12.) * 5.
# ADM the NEAR_RADIUS is twice the IN_RADIUS.
nearrad = inrad*2.
return inrad, nearrad
def _rexlike(rextype):
"""If the object is REX (a round exponential galaxy)"""
# ADM explicitly checking for an empty input.
if rextype is None:
log.error("NoneType submitted to _rexlike function")
rextype = np.asarray(rextype)
# ADM in Python3 these string literals become byte-like
# ADM so to retain Python2 compatibility we need to check
# ADM against both bytes and unicode.
# ADM also 'REX' for astropy.io.fits; 'REX ' for fitsio (sigh).
rexlike = ((rextype == 'REX') | (rextype == b'REX') |
(rextype == 'REX ') | (rextype == b'REX '))
return rexlike
def max_objid_bricks(targs):
"""For a set of targets, return the maximum value of BRICK_OBJID in each BRICK_ID
Parameters
----------
targs : :class:`recarray`
A recarray of targets as made by :mod:`desitarget.cuts.select_targets`
Returns
-------
maxobjid : :class:`dictionary`
A dictionary with keys for each unique BRICKID and values of the maximum OBJID in that brick
"""
# ADM the maximum BRICKID in the passed target set.
brickmax = np.max(targs["BRICKID"])
# ADM how many OBJIDs are in each unique brick, starting from 0 and ordered on BRICKID.
h = np.histogram(targs["BRICKID"], range=[0, brickmax], bins=brickmax)[0]
# ADM remove zero entries from the histogram.
h = h[np.where(h > 0)]
# ADM the index of the maximum OBJID in eacn brick if the bricks are ordered on BRICKID and OBJID.
maxind = np.cumsum(h)-1
# ADM an array of BRICKID, OBJID sorted first on BRICKID and then on OBJID within each BRICKID.
ordered = np.array(sorted(zip(targs["BRICKID"], targs["BRICK_OBJID"]), key=lambda x: (x[0], x[1])))
# ADM return a dictionary of the maximum OBJID (values) for each BRICKID (keys).
return dict(ordered[maxind])
def make_bright_star_mask_in_hp(nside, pixnum, verbose=True, gaiaepoch=2015.5,
maglim=12., matchrad=1., maskepoch=2023.0):
"""Make a bright star mask in a HEALPixel using Tycho, Gaia and URAT.
Parameters
----------
nside : :class:`int`
(NESTED) HEALPixel nside.
pixnum : :class:`int`
A single HEALPixel number.
verbose : :class:`bool`
If ``True`` then log informational messages.
Returns
-------
:class:`recarray`
The bright star mask in the form of `maskdatamodel.dtype`.
Notes
-----
- Runs in a a minute or so for a typical nside=4 pixel.
- See :func:`~desitarget.brightmask.make_bright_star_mask` for
descriptions of the output mask and the other input parameters.
"""
# ADM start the clock.
t0 = time()
# ADM read in the Tycho files.
tychofns = find_tycho_files_hp(nside, pixnum, neighbors=False)
tychoobjs = []
for fn in tychofns:
tychoobjs.append(fitsio.read(fn, ext='TYCHOHPX'))
tychoobjs = np.concatenate(tychoobjs)
# ADM create the Tycho reference magnitude, which is VT then HP
# ADM then BT in order of preference.
tychomag = tychoobjs["MAG_VT"].copy()
tychomag[tychomag == 0] = tychoobjs["MAG_HP"][tychomag == 0]
tychomag[tychomag == 0] = tychoobjs["MAG_BT"][tychomag == 0]
# ADM discard any Tycho objects below the input magnitude limit
# ADM and outside of the HEALPixels of interest.
theta, phi = np.radians(90-tychoobjs["DEC"]), np.radians(tychoobjs["RA"])
tychohpx = hp.ang2pix(nside, theta, phi, nest=True)
ii = (tychohpx == pixnum) & (tychomag < maglim)
tychomag, tychoobjs = tychomag[ii], tychoobjs[ii]
if verbose:
log.info('Read {} (mag < {}) Tycho objects (pix={})...t={:.1f} mins'.
format(np.sum(ii), maglim, pixnum, (time()-t0)/60))
# ADM read in the associated Gaia files. Also grab
# ADM neighboring pixels to prevent edge effects.
gaiafns = find_gaia_files(tychoobjs, neighbors=True)
gaiaobjs = []
cols = 'SOURCE_ID', 'RA', 'DEC', 'PHOT_G_MEAN_MAG', 'PMRA', 'PMDEC'
for fn in gaiafns:
if os.path.exists(fn):
gaiaobjs.append(fitsio.read(fn, ext='GAIAHPX', columns=cols))
gaiaobjs = np.concatenate(gaiaobjs)
gaiaobjs = rfn.rename_fields(gaiaobjs, {"SOURCE_ID": "REF_ID"})
# ADM limit Gaia objects to 3 magnitudes fainter than the passed
# ADM limit. This leaves some (!) leeway when matching to Tycho.
gaiaobjs = gaiaobjs[gaiaobjs['PHOT_G_MEAN_MAG'] < maglim + 3]
if verbose:
log.info('Read {} (G < {}) Gaia sources (pix={})...t={:.1f} mins'.format(
len(gaiaobjs), maglim+3, pixnum, (time()-t0)/60))
# ADM substitute URAT where Gaia proper motions don't exist.
ii = ((np.isnan(gaiaobjs["PMRA"]) | (gaiaobjs["PMRA"] == 0)) &
(np.isnan(gaiaobjs["PMDEC"]) | (gaiaobjs["PMDEC"] == 0)))
if verbose:
log.info('Add URAT for {} Gaia objs with no PMs (pix={})...t={:.1f} mins'
.format(np.sum(ii), pixnum, (time()-t0)/60))
urat = add_urat_pms(gaiaobjs[ii], numproc=1)
if verbose:
log.info('Found an additional {} URAT objects (pix={})...t={:.1f} mins'
.format(np.sum(urat["URAT_ID"] != -1), pixnum, (time()-t0)/60))
for col in "PMRA", "PMDEC":
gaiaobjs[col][ii] = urat[col]
# ADM need to track the URATID to track which objects have
# ADM substituted proper motions.
uratid = np.zeros_like(gaiaobjs["REF_ID"])-1
uratid[ii] = urat["URAT_ID"]
# ADM match to remove Tycho objects already in Gaia. Prefer the more
# ADM accurate Gaia proper motions. Note, however, that Tycho epochs
# ADM can differ from the mean (1991.5) by as as much as 0.86 years,
# ADM so a star with a proper motion as large as Barnard's Star
# ADM (10.3 arcsec) can be off by a significant margin (~10").
margin = 10.
ra, dec = rewind_coords(gaiaobjs["RA"], gaiaobjs["DEC"],
gaiaobjs["PMRA"], gaiaobjs["PMDEC"],
epochnow=gaiaepoch)
# ADM match Gaia to Tycho with a suitable margin.
if verbose:
log.info('Match Gaia to Tycho with margin={}" (pix={})...t={:.1f} mins'
.format(margin, pixnum, (time()-t0)/60))
igaia, itycho = radec_match_to([ra, dec],
[tychoobjs["RA"], tychoobjs["DEC"]],
sep=margin, radec=True)
if verbose:
log.info('{} matches. Refining at 1" (pix={})...t={:.1f} mins'.format(
len(itycho), pixnum, (time()-t0)/60))
# ADM match Gaia to Tycho at the more exact reference epoch.
epoch_ra = tychoobjs[itycho]["EPOCH_RA"]
epoch_dec = tychoobjs[itycho]["EPOCH_DEC"]
# ADM some of the Tycho epochs aren't populated.
epoch_ra[epoch_ra == 0], epoch_dec[epoch_dec == 0] = 1991.5, 1991.5
ra, dec = rewind_coords(gaiaobjs["RA"][igaia], gaiaobjs["DEC"][igaia],
gaiaobjs["PMRA"][igaia], gaiaobjs["PMDEC"][igaia],
epochnow=gaiaepoch,
epochpast=epoch_ra, epochpastdec=epoch_dec)
# ADM catch the corner case where there are no initial matches.
if ra.size > 0:
_, refined = radec_match_to([ra, dec], [tychoobjs["RA"][itycho],
tychoobjs["DEC"][itycho]], radec=True)
else:
refined = np.array([], dtype='int')
# ADM retain Tycho objects that DON'T match Gaia.
keep = np.ones(len(tychoobjs), dtype='bool')
keep[itycho[refined]] = False
tychokeep, tychomag = tychoobjs[keep], tychomag[keep]
if verbose:
log.info('Kept {} Tychos with no Gaia match (pix={})...t={:.1f} mins'
.format(len(tychokeep), pixnum, (time()-t0)/60))
# ADM now we're done matching to Gaia, limit Gaia to the passed
# ADM magnitude limit and to the HEALPixel boundary of interest.
theta, phi = np.radians(90-gaiaobjs["DEC"]), np.radians(gaiaobjs["RA"])
gaiahpx = hp.ang2pix(nside, theta, phi, nest=True)
ii = (gaiahpx == pixnum) & (gaiaobjs['PHOT_G_MEAN_MAG'] < maglim)
gaiakeep, uratid = gaiaobjs[ii], uratid[ii]
if verbose:
log.info('Mask also comprises {} Gaia sources (pix={})...t={:.1f} mins'
.format(len(gaiakeep), pixnum, (time()-t0)/60))
# ADM move the coordinates forwards to the input mask epoch.
epoch_ra, epoch_dec = tychokeep["EPOCH_RA"], tychokeep["EPOCH_DEC"]
# ADM some of the Tycho epochs aren't populated.
epoch_ra[epoch_ra == 0], epoch_dec[epoch_dec == 0] = 1991.5, 1991.5
ra, dec = rewind_coords(
tychokeep["RA"], tychokeep["DEC"], tychokeep["PM_RA"], tychokeep["PM_DEC"],
epochnow=epoch_ra, epochnowdec=epoch_dec, epochpast=maskepoch)
tychokeep["RA"], tychokeep["DEC"] = ra, dec
ra, dec = rewind_coords(
gaiakeep["RA"], gaiakeep["DEC"], gaiakeep["PMRA"], gaiakeep["PMDEC"],
epochnow=gaiaepoch, epochpast=maskepoch)
gaiakeep["RA"], gaiakeep["DEC"] = ra, dec
# ADM finally, format according to the mask data model...
gaiamask = np.zeros(len(gaiakeep), dtype=maskdatamodel.dtype)
tychomask = np.zeros(len(tychokeep), dtype=maskdatamodel.dtype)
for col in "RA", "DEC":
gaiamask[col] = gaiakeep[col]
gaiamask["PM"+col] = gaiakeep["PM"+col]
tychomask[col] = tychokeep[col]
tychomask["PM"+col] = tychokeep["PM_"+col]
gaiamask["REF_ID"] = gaiakeep["REF_ID"]
# ADM take care to rigorously convert to int64 for Tycho.
tychomask["REF_ID"] = tychokeep["TYC1"].astype('int64')*int(1e6) + \
tychokeep["TYC2"].astype('int64')*10 + tychokeep["TYC3"]
gaiamask["REF_CAT"], tychomask["REF_CAT"] = 'G2', 'T2'
gaiamask["REF_MAG"] = gaiakeep['PHOT_G_MEAN_MAG']
tychomask["REF_MAG"] = tychomag
gaiamask["URAT_ID"], tychomask["URAT_ID"] = uratid, -1
gaiamask["TYPE"], tychomask["TYPE"] = 'PSF', 'PSF'
mask = np.concatenate([gaiamask, tychomask])
# ADM ...and add the mask radii.
mask["IN_RADIUS"], mask["NEAR_RADIUS"] = radii(mask["REF_MAG"])
if verbose:
log.info("Done making mask...(pix={})...t={:.1f} mins".format(
pixnum, (time()-t0)/60.))
return mask
def make_bright_star_mask(maglim=12., matchrad=1., numproc=32,
maskepoch=2023.0, gaiaepoch=2015.5,
nside=None, pixels=None):
"""Make an all-sky bright star mask using Tycho, Gaia and URAT.
Parameters
----------
maglim : :class:`float`, optional, defaults to 12.
Faintest magnitude at which to make the mask. This magnitude is
interpreted as G-band for Gaia and, in order of preference, VT
then HP then BT for Tycho (not every Tycho source has each band).
matchrad : :class:`int`, optional, defaults to 1.
Tycho sources that match a Gaia source at this separation in
ARCSECONDS are NOT included in the output mask. The matching is
performed rigorously, accounting for Gaia proper motions.
numproc : :class:`int`, optional, defaults to 16.
Number of processes over which to parallelize
maskepoch : :class:`float`
The mask is built at this epoch. Not all sources have proper
motions from every survey, so proper motions are used, in order
of preference, from Gaia, URAT, then Tycho.
gaiaepoch : :class:`float`, optional, defaults to Gaia DR2 (2015.5)
The epoch of the Gaia observations. Should be 2015.5 unless we
move beyond Gaia DR2.
nside : :class:`int`, optional, defaults to ``None``
If passed, create a mask only in nested HEALPixels in `pixels`
at this `nside`. Otherwise, run for the whole sky. If `nside`
is passed then `pixels` must be passed too.
pixels : :class:`list`, optional, defaults to ``None``
If passed, create a mask only in nested HEALPixels at `nside` for
pixel integers in `pixels`. Otherwise, run for the whole sky. If
`pixels` is passed then `nside` must be passed too.
Returns
-------
:class:`recarray`
- The bright star mask in the form of `maskdatamodel.dtype`:
- `REF_CAT` is `"T2"` for Tycho and `"G2"` for Gaia.
- `REF_ID` is `Tyc1`*1,000,000+`Tyc2`*10+`Tyc3` for Tycho2;
`"sourceid"` for Gaia-DR2 and Gaia-DR2 with URAT.
- `REF_MAG` is, in order of preference, G-band for Gaia, VT
then HP then BT for Tycho.
- `URAT_ID` contains the URAT reference number for Gaia objects
that use the URAT proper motion, or -1 otherwise.
- The radii are in ARCSECONDS.
- `E1` and `E2` are placeholders for ellipticity components, and
are set to 0 for Gaia and Tycho sources.
- `TYPE` is always `PSF` for star-like objects.
- Note that the mask is based on objects in the pixel AT THEIR
NATIVE EPOCH *NOT* AT THE INPUT `maskepoch`. It is therefore
possible for locations in the output mask to be just beyond
the boundaries of the input pixel.
Notes
-----
- Runs (all-sky) in ~20 minutes for `numproc=32` and `maglim=12`.
- `IN_RADIUS` (`NEAR_RADIUS`) corresponds to `IN_BRIGHT_OBJECT`
(`NEAR_BRIGHT_OBJECT`) in `data/targetmask.yaml`. These radii
are set in the function `desitarget.brightmask.radius()`.
- The correct mask size for DESI is an open question.
- The `GAIA_DIR`, `URAT_DIR` and `TYCHO_DIR` environment
variables must be set.
"""
log.info("running on {} processors".format(numproc))
# ADM check if HEALPixel parameters have been correctly sent.
io.check_both_set(pixels, nside)
# ADM grab the nside of the Tycho files, which is a reasonable
# ADM resolution for bright stars.
if nside is None:
nside = get_tycho_nside()
npixels = hp.nside2npix(nside)
# ADM array of HEALPixels over which to parallelize...
pixels = np.arange(npixels)
# ADM ...shuffle for better balance across nodes (as there are
# ADM more stars in regions of the sky where pixels adjoin).
np.random.shuffle(pixels)
# ADM the common function that is actually parallelized across.
def _make_bright_star_mx(pixnum):
"""returns bright star mask in one HEALPixel"""
return make_bright_star_mask_in_hp(
nside, pixnum, maglim=maglim, matchrad=matchrad,
gaiaepoch=gaiaepoch, maskepoch=maskepoch, verbose=False)
# ADM this is just to count pixels in _update_status.
npix = np.zeros((), dtype='i8')
t0 = time()
def _update_status(result):
"""wrap key reduction operation on the main parallel process"""
if npix % 10 == 0 and npix > 0:
rate = (time() - t0) / npix
log.info('{}/{} HEALPixels; {:.1f} secs/pixel...t = {:.1f} mins'.
format(npix, npixels, rate, (time()-t0)/60.))
npix[...] += 1
return result
# ADM Parallel process across HEALPixels.
if numproc > 1:
pool = sharedmem.MapReduce(np=numproc)
with pool:
mask = pool.map(_make_bright_star_mx, pixels, reduce=_update_status)
else:
mask = list()
for pixel in pixels:
mask.append(_update_status(_make_bright_star_mx(pixel)))
mask = np.concatenate(mask)
log.info("Done making mask...t = {:.1f} mins".format((time()-t0)/60.))
return mask
def plot_mask(mask, limits=None, radius="IN_RADIUS", show=True):
"""Plot a mask or masks.
Parameters
----------
mask : :class:`recarray`
A mask, as constructed by, e.g. :func:`make_bright_star_mask()`.
limits : :class:`list`, optional
RA/Dec plot limits in the form [ramin, ramax, decmin, decmax].
radius : :class: `str`, optional
Which mask radius to plot (``IN_RADIUS`` or ``NEAR_RADIUS``).
show : :class:`boolean`
If ``True``, then display the plot, Otherwise, just execute the
plot commands so it can be added to or saved to file later.
Returns
-------
Nothing
"""
# ADM make this work even for a single mask.
mask = np.atleast_1d(mask)
# ADM set up the plot.
fig, ax = plt.subplots(1, figsize=(8, 8))
plt.xlabel('RA (o)')
plt.ylabel('Dec (o)')
# ADM set up some default plot limits if they weren't passed.
if limits is None:
maskra, maskdec, tol = mask["RA"], mask["DEC"], mask[radius]/3600.
limits = [np.max(maskra-tol), np.min(maskra+tol),
np.min(maskdec-tol), np.max(maskdec+tol)]
ax.axis(limits)
# ADM only consider a limited mask range corresponding to a few
# ADM times the largest mask radius beyond the requested limits.
# ADM remember that the passed mask sizes are in arcseconds.
tol = 3.*np.max(mask[radius])/3600.
# ADM the np.min/np.max combinations are to guard against people
# ADM passing flipped RAs (so RA increases to the east).
ii = ((mask["RA"] > np.min(limits[:2])-tol) &
(mask["RA"] < np.max(limits[:2])+tol) &
(mask["DEC"] > np.min(limits[-2:])-tol) &
(mask["DEC"] < np.max(limits[-2:])+tol))
if np.sum(ii) == 0:
msg = 'No mask entries within specified limits ({})'.format(limits)
log.error(msg)
raise ValueError(msg)
else:
mask = mask[ii]
# ADM create ellipse polygons for each entry in the mask and
# ADM make a list of matplotlib patches for them.
patches = []
for i, ellipse in enumerate(mask):
# ADM create points on the ellipse boundary.
ras, decs = ellipse_boundary(
ellipse["RA"], ellipse["DEC"],
ellipse[radius], ellipse["E1"], ellipse["E2"])
polygon = Polygon(np.array(list(zip(ras, decs))), True)
patches.append(polygon)
p = PatchCollection(patches, alpha=0.4, facecolors='b', edgecolors='b')
ax.add_collection(p)
if show:
plt.show()
return
def is_in_bright_mask(targs, sourcemask, inonly=False):
"""Determine whether a set of targets is in a bright star mask.
Parameters
----------
targs : :class:`recarray`
A recarray of targets, skies etc., as made by, e.g.,
:func:`desitarget.cuts.select_targets()`.
sourcemask : :class:`recarray`
A recarray containing a mask as made by, e.g.,
:func:`desitarget.brightmask.make_bright_star_mask()`
inonly : :class:`boolean`, optional, defaults to False
If ``True``, then only calculate the `in_mask` return but not
the `near_mask` return, which is about a factor of 2 faster.
Returns
-------
:class:`list`
[`in_mask`, `near_mask`] where `in_mask` (`near_mask`) is a
boolean array that is ``True`` for `targs` that are IN (NEAR) a
mask. If `inonly` is ``True`` then this is just [`in_mask`].
:class: `list`
[`used_in_mask`, `used_near_mask`] where `used_in_mask`
(`used_near_mask`) is a boolean array that is ``True`` for masks
in `sourcemask` that contain a target at the IN (NEAR) radius.
If `inonly` is ``True`` then this is just [`used_in_mask`].
"""
t0 = time()
# ADM initialize arrays of all False (nothing is yet in a mask).
in_mask = np.zeros(len(targs), dtype=bool)
near_mask = np.zeros(len(targs), dtype=bool)
used_in_mask = np.zeros(len(sourcemask), dtype=bool)
used_near_mask = np.zeros(len(sourcemask), dtype=bool)
# ADM turn the mask and target coordinates into SkyCoord objects.
from astropy.coordinates import SkyCoord
from astropy import units as u
ctargs = SkyCoord(targs["RA"]*u.degree, targs["DEC"]*u.degree)
cmask = SkyCoord(sourcemask["RA"]*u.degree, sourcemask["DEC"]*u.degree)
# ADM this is the largest search radius we should need to consider.
# ADM In the future an obvious speed up is to split on radius
# ADM as large radii are rarer but take longer.
maxrad = max(sourcemask["IN_RADIUS"])*u.arcsec
if not inonly:
maxrad = max(sourcemask["NEAR_RADIUS"])*u.arcsec
# ADM coordinate match the masks and the targets.
# ADM assuming all of the masks are circles-on-the-sky.
idtargs, idmask, d2d, d3d = cmask.search_around_sky(ctargs, maxrad)
# ADM catch the case where nothing fell in a mask.
if len(idmask) == 0:
if inonly:
return [in_mask], [used_in_mask]
return [in_mask, near_mask], [used_in_mask, used_near_mask]
# ADM need to differentiate targets that are in ellipse-on-the-sky
# ADM masks from targets that are in circle-on-the-sky masks.
rex_or_psf = _rexlike(sourcemask[idmask]["TYPE"]) | _psflike(
sourcemask[idmask]["TYPE"])
w_ellipse = np.where(~rex_or_psf)
# ADM only continue if there are any elliptical masks.
if len(w_ellipse[0]) > 0:
idelltargs = idtargs[w_ellipse]
idellmask = idmask[w_ellipse]
log.info('Testing {} targets against {} elliptical masks...t={:.1f}s'
.format(len(set(idelltargs)), len(set(idellmask)), time()-t0))
# ADM to speed the calculation, make a dictionary of which
# ADM targets (the values) associate with each mask (the keys).
targidineachmask = {}
# ADM first initiate a list for each relevant key (mask ID).
for maskid in set(idellmask):
targidineachmask[maskid] = []
# ADM then append those lists until they contain the IDs of each
# ADM relevant target as the values.
for index, targid in enumerate(idelltargs):
targidineachmask[idellmask[index]].append(targid)
# ADM loop through the masks and determine which relevant points
# ADM occupy them for both the IN_RADIUS and the NEAR_RADIUS.
for maskid in targidineachmask:
targids = targidineachmask[maskid]
ellras, elldecs = targs[targids]["RA"], targs[targids]["DEC"]
mask = sourcemask[maskid]
# ADM Refine being in a mask based on the elliptical masks.
in_ell = is_in_ellipse(
ellras, elldecs, mask["RA"], mask["DEC"],
mask["IN_RADIUS"], mask["E1"], mask["E2"])
in_mask[targids] |= in_ell
used_in_mask[maskid] |= np.any(in_ell)
if not inonly:
in_ell = is_in_ellipse(ellras, elldecs,
mask["RA"], mask["DEC"],
mask["NEAR_RADIUS"],
mask["E1"], mask["E2"])
near_mask[targids] |= in_ell
used_near_mask[maskid] |= np.any(in_ell)
log.info('Done with elliptical masking...t={:1f}s'.format(time()-t0))
# ADM Finally, record targets in a circles-on-the-sky mask, which
# ADM trumps any information about just being in an elliptical mask.
# ADM Find separations less than the mask radius for circle masks
# ADM matches meeting these criteria are in at least one circle mask.
w_in = (d2d.arcsec < sourcemask[idmask]["IN_RADIUS"]) & (rex_or_psf)
in_mask[idtargs[w_in]] = True
used_in_mask[idmask[w_in]] = True
if not inonly:
w_near = (d2d.arcsec < sourcemask[idmask]["NEAR_RADIUS"]) & (rex_or_psf)
near_mask[idtargs[w_near]] = True
used_near_mask[idmask[w_near]] = True
return [in_mask, near_mask], [used_in_mask, used_near_mask]
return [in_mask], [used_in_mask]
def is_bright_source(targs, sourcemask):
"""Determine whether targets are, themselves, a bright source mask.
Parameters
----------
targs : :class:`recarray`
Targets as made by, e.g., :func:`desitarget.cuts.select_targets()`.
sourcemask : :class:`recarray`
A recarray containing a bright source mask as made by, e.g.,
:func:`desitarget.brightmask.make_bright_star_mask()`
Returns
-------
is_mask : array_like
``True`` for `targs` that are, themselves, a mask.
"""
# ADM initialize an array of all False (nothing yet has been shown
# ADM to correspond to a mask).
is_mask = np.zeros(len(targs), dtype=bool)
# ADM calculate the TARGETID for the targets.
targetid = encode_targetid(objid=targs['BRICK_OBJID'],
brickid=targs['BRICKID'],
release=targs['RELEASE'])
# ADM super-fast set-based look-up of which TARGETIDs are match
# ADM between the masks and the targets.
matches = set(sourcemask["TARGETID"]).intersection(set(targetid))
# ADM indexes of the targets that have a TARGETID in matches.
w_mask = [index for index, item in enumerate(targetid) if item in matches]
# ADM w_mask now holds target indices that match a mask on TARGETID.
is_mask[w_mask] = True
return is_mask
def generate_safe_locations(sourcemask, Nperradius=1):
"""Given a mask, generate SAFE (BADSKY) locations at its periphery.
Parameters
----------
sourcemask : :class:`recarray`
A recarray containing a bright mask as made by, e.g.,
:func:`desitarget.brightmask.make_bright_star_mask()`
Nperradius : :class:`int`, optional, defaults to 1.
Number of safe locations to make per arcsec radius of each mask.
Returns
-------
ra : array_like.
The Right Ascensions of the SAFE (BADSKY) locations.
dec : array_like.
The Declinations of the SAFE (BADSKY) locations.
Notes
-----
- See `Tech Note 2346`_ for details.
"""
# ADM the radius of each mask in arcseconds with a 0.1% kick to
# ADM ensure that positions are beyond the mask edges.
radius = sourcemask["IN_RADIUS"]*1.001
# ADM determine the number of SAFE locations to assign to each
# ADM mask given the passed number of locations per unit radius.
Nsafe = np.ceil(radius*Nperradius).astype('i')
# ADM need to differentiate targets that are in ellipse-on-the-sky masks
# ADM from targets that are in circle-on-the-sky masks.
rex_or_psf = _rexlike(sourcemask["TYPE"]) | _psflike(sourcemask["TYPE"])
w_ellipse = np.where(~rex_or_psf)
w_circle = np.where(rex_or_psf)
# ADM set up an array to hold coordinates around the mask peripheries.
ras, decs = np.array([]), np.array([])
# ADM generate the safe location for circular masks (which is quicker).
if len(w_circle[0]) > 0:
circras, circdecs = circle_boundaries(sourcemask[w_circle]["RA"],
sourcemask[w_circle]["DEC"],
radius[w_circle], Nsafe[w_circle])
ras, decs = np.concatenate((ras, circras)), np.concatenate((decs, circdecs))
# ADM generate the safe location for elliptical masks
# ADM (which is slower as it requires a loop).
if len(w_ellipse[0]) > 0:
for w in w_ellipse[0]:
ellras, elldecs = ellipse_boundary(sourcemask[w]["RA"],
sourcemask[w]["DEC"], radius[w],
sourcemask[w]["E1"],
sourcemask[w]["E2"], Nsafe[w])
ras = np.concatenate((ras, ellras))
decs = np.concatenate((decs, elldecs))
return ras, decs
def get_safe_targets(targs, sourcemask, bricks_are_hpx=False):
"""Get SAFE (BADSKY) locations for targs, set TARGETID/DESI_TARGET.
Parameters
----------
targs : :class:`~numpy.ndarray`
Targets made by, e.g. :func:`desitarget.cuts.select_targets()`.
sourcemask : :class:`~numpy.ndarray`
A bright source mask as made by, e.g.
:func:`desitarget.brightmask.make_bright_star_mask()`.
bricks_are_hpx : :class:`bool`, optional, defaults to ``False``
Instead of using bricks to calculate BRICKIDs, use HEALPixels at
the "standard" size from :func:`gaiamatch.get_gaia_nside_brick()`.
Returns
-------
:class:`~numpy.ndarray`
SAFE (BADSKY) locations for `targs` with the same data model as
for `targs`.
Notes
-----
- `Tech Note 2346`_ details SAFE (BADSKY) locations.
- `Tech Note 2348`_ details setting the SKY bit in TARGETID.
- Hard-coded to create 1 safe location per arcsec of mask radius.
The correct number (Nperradius) for DESI is an open question.
"""
# ADM number of safe locations per radial arcsec of each mask.
Nperradius = 1
# ADM grab SAFE locations around masks at a density of Nperradius.
ra, dec = generate_safe_locations(sourcemask, Nperradius)
# ADM duplicate targs data model for safe locations.
nrows = len(ra)
safes = np.zeros(nrows, dtype=targs.dtype)
# ADM return early if there are no safe locations.
if nrows == 0:
return safes
# ADM populate the safes with the RA/Dec of the SAFE locations.
safes["RA"] = ra
safes["DEC"] = dec
# ADM set the bit for SAFE locations in DESITARGET.
safes["DESI_TARGET"] |= desi_mask.BAD_SKY
# ADM add the brick information for the SAFE/BADSKY targets.
if bricks_are_hpx:
nside = get_gaia_nside_brick()
theta, phi = np.radians(90-safes["DEC"]), np.radians(safes["RA"])
safes["BRICKID"] = hp.ang2pix(nside, theta, phi, nest=True)
safes["BRICKNAME"] = 'hpxat{}'.format(nside)
else:
b = brick.Bricks(bricksize=0.25)
safes["BRICKID"] = b.brickid(safes["RA"], safes["DEC"])
safes["BRICKNAME"] = b.brickname(safes["RA"], safes["DEC"])
# ADM now add OBJIDs, counting backwards from the maximum possible
# ADM OBJID to ensure no duplicateion of TARGETIDs for supplemental
# ADM skies, which build their OBJIDs by counting forwards from 0.
maxobjid = 2**targetid_mask.OBJID.nbits - 1
sortid = np.argsort(safes["BRICKID"])
_, cnts = np.unique(safes["BRICKID"], return_counts=True)
brickids = np.concatenate([maxobjid-np.arange(i) for i in cnts])
safes["BRICK_OBJID"][sortid] = brickids
# ADM finally, update the TARGETID.
# ADM first, check the GAIA DR number for these skies.
_, _, _, _, _, gdr = decode_targetid(targs["TARGETID"])
if len(set(gdr)) != 1:
msg = "Skies are based on multiple Gaia Data Releases:".format(set(gdr))
log.critical(msg)
raise ValueError(msg)
safes["TARGETID"] = encode_targetid(objid=safes['BRICK_OBJID'],
brickid=safes['BRICKID'],
sky=1,
gaiadr=gdr[0])
# ADM return the input targs with the SAFE targets appended.
return safes
def set_target_bits(targs, sourcemask, return_masks=False):
"""Apply bright source mask to targets, return desi_target array.
Parameters
----------
targs : :class:`recarray`
Targets as made by, e.g., :func:`desitarget.cuts.select_targets()`.
sourcemask : :class:`recarray`
A recarray containing a bright source mask as made by, e.g.
:mod:`desitarget.brightmask.make_bright_star_mask` or
:mod:`desitarget.brightmask.make_bright_source_mask`.
return_masks : :class:`bool`
If ``True`` also return boolean arrays of which of the
masks in `sourcemask` contain a target.
Returns
-------
:class:`recarray`
`DESI_TARGET` column updates with bright source information bits.
:class:`list`, only returned if `return_masks` is ``True``
[`used_in_mask`, `used_near_mask`] where `used_in_mask`
(`used_near_mask`) is a boolean array that is ``True`` for masks
in `sourcemask` that contain a target at the IN (NEAR) radius.
Notes
-----
- Sets ``IN_BRIGHT_OBJECT`` and ``NEAR_BRIGHT_OBJECT`` via
matches to circular and/or elliptical masks.
- Sets ``BRIGHT_OBJECT`` via an index match on ``TARGETID``
(defined as in :func:`desitarget.targets.encode_targetid()`).
See :mod:`desitarget.targetmask` for the definition of each bit.
"""
if "TARGETID" in sourcemask.dtype.names:
bright_object = is_bright_source(targs, sourcemask)
else:
bright_object = 0
intargs, inmasks = is_in_bright_mask(targs, sourcemask)
in_bright_object, near_bright_object = intargs
desi_target = targs["DESI_TARGET"].copy()
desi_target |= bright_object * desi_mask.BRIGHT_OBJECT
desi_target |= in_bright_object * desi_mask.IN_BRIGHT_OBJECT
desi_target |= near_bright_object * desi_mask.NEAR_BRIGHT_OBJECT
if return_masks:
return desi_target, inmasks
return desi_target
def mask_targets(targs, inmaskdir, nside=2, pixlist=None, bricks_are_hpx=False):
"""Add bits for if objects occupy masks, and SAFE (BADSKY) locations.
Parameters
----------
targs : :class:`str` or `~numpy.ndarray`
An array of targets/skies etc. created by, e.g.,
:func:`desitarget.cuts.select_targets()` OR the filename of a
file that contains such a set of targets/skies, etc.
inmaskdir : :class:`str`, optional
An input bright star mask file or HEALPixel-split directory as
made by :func:`desitarget.brightmask.make_bright_star_mask()`
nside : :class:`int`, optional, defaults to 2
The nside at which the targets were generated. If the mask is
a HEALPixel-split directory, then this helps to perform more
efficient masking as only the subset of masks that are in
pixels containing `targs` at this `nside` will be considered
(together with neighboring pixels to account for edge effects).
pixlist : :class:`list` or `int`, optional
A set of HEALPixels corresponding to the `targs`. Only the subset
of masks in HEALPixels in `pixlist` at `nside` will be considered
(together with neighboring pixels to account for edge effects).
If ``None``, then the pixels touched by `targs` is derived from
from `targs` itself.
bricks_are_hpx : :class:`bool`, optional, defaults to ``False``
Instead of using bricks to calculate BRICKIDs, use HEALPixels at
the "standard" size from :func:`gaiamatch.get_gaia_nside_brick()`.
Returns
-------
:class:`~numpy.ndarray`
Input targets with the `DESI_TARGET` column updated to reflect
the `BRIGHT_OBJECT` bits and SAFE (`BADSKY`) sky locations added
around the perimeter of the mask.
Notes
-----
- `Tech Note 2346`_ details SAFE (BADSKY) locations.
"""
t0 = time()
# ADM Check if targs is a file name or the structure itself.
if isinstance(targs, str):
if not os.path.exists(targs):
raise ValueError("{} doesn't exist".format(targs))
targs = fitsio.read(targs)
# ADM determine which pixels are occupied by targets.
if pixlist is None:
theta, phi = np.radians(90-targs["DEC"]), np.radians(targs["RA"])
pixlist = list(set(hp.ang2pix(nside, theta, phi, nest=True)))
else:
# ADM in case an integer was passed.
pixlist = np.atleast_1d(pixlist)
log.info("Masking using masks in {} at nside={} in HEALPixels={}".format(
inmaskdir, nside, pixlist))
pixlistwneigh = add_hp_neighbors(nside, pixlist)
# ADM read in the (potentially HEALPixel-split) mask.
sourcemask = io.read_targets_in_hp(inmaskdir, nside, pixlistwneigh)
ntargs = len(targs)
log.info('Total number of masks {}'.format(len(sourcemask)))
log.info('Total number of targets {}...t={:.1f}s'.format(ntargs, time()-t0))
# ADM update the bits depending on whether targets are in a mask.
# ADM also grab masks that contain or are near a target.
dt, mx = set_target_bits(targs, sourcemask, return_masks=True)
targs["DESI_TARGET"] = dt
inmasks, nearmasks = mx
# ADM generate SAFE locations for masks that contain a target.
safes = get_safe_targets(targs, sourcemask[inmasks],
bricks_are_hpx=bricks_are_hpx)
# ADM update the bits for the safe locations depending on whether
# ADM they're in a mask.
safes["DESI_TARGET"] = set_target_bits(safes, sourcemask)
# ADM it's possible that a safe location was generated outside of
# ADM the requested HEALPixels.
inhp = is_in_hp(safes, nside, pixlist)
safes = safes[inhp]
# ADM combine the targets and safe locations.
done = np.concatenate([targs, safes])
# ADM assert uniqueness of TARGETIDs.
stargs, ssafes = set(targs["TARGETID"]), set(safes["TARGETID"])
msg = "TARGETIDs for targets not unique"
assert len(stargs) == len(targs), msg
msg = "TARGETIDs for safes not unique"
assert len(ssafes) == len(safes), msg
msg = "TARGETIDs for safes duplicated in targets. Generating TARGETIDs"
msg += " backwards from maxobjid in get_safe_targets() has likely failed"
msg += " due to somehow generating a large number of safe locations."
assert len(stargs.intersection(ssafes)) == 0, msg
log.info('Generated {} SAFE (BADSKY) locations...t={:.1f}s'.format(
len(done)-ntargs, time()-t0))
# ADM remove any SAFE locations that are in bright masks (because they aren't really safe).
ii = (((done["DESI_TARGET"] & desi_mask.BAD_SKY) == 0) |
((done["DESI_TARGET"] & desi_mask.IN_BRIGHT_OBJECT) == 0))
done = done[ii]
log.info("...of these, {} SAFE (BADSKY) locations aren't in masks...t={:.1f}s"
.format(len(done)-ntargs, time()-t0))
log.info('Finishing up...t={:.1f}s'.format(time()-t0))
return done
| desihub/desitarget | py/desitarget/brightmask.py | brightmask.py | py | 42,013 | python | en | code | 17 | github-code | 13 |
39579931041 | # Path: app.py
import streamlit as st
from fastai.vision.all import *
st.title("Fruit Classification")
st.write("This is a simple image classification web app to classify fruits")
# Load your trained model
model = load_learner('fruit-classifier.pkl')
# Upload an image
uploaded_file = st.file_uploader("Choose an image...", type="jpg")
if uploaded_file is not None:
# Make prediction
img = PILImage.create(uploaded_file)
pred, pred_idx, probs = model.predict(img)
# Show prediction
st.write("Prediction: ", pred)
st.write("Probability: ", probs[pred_idx])
# Show image
st.image(img, caption='Uploaded Image.', use_column_width=True)
# Save image
img.save('uploaded_image.jpg') | egoist000/fruit-classifier | app.py | app.py | py | 724 | python | en | code | 0 | github-code | 13 |
24554191016 | import os
import db_orm_youbike.YoubikeDAO as dao
import db_orm_youbike.YoubikeUtil as util
import threading
import time
def menu():
clear_screen()
print("台北市 Youbike 出借查詢系統")
print("------------------------")
print("1. 資料同步")
print("2. 列出所有站點資料")
print("3. 取出某站號或站名的資料")
print("4. 我要借 N 台")
print("5. 我要還 N 台")
print("6. 我要借 N 還 N 台")
print("------------------------")
print("0. 結束電腦選號程式")
def clear_screen():
os.system('cls' if os.name == 'nt' else 'clear')
def sched():
while True:
dao.delete_all()
dao.import_data()
time.sleep(10) # 10 秒
if __name__ == '__main__':
t = threading.Thread(target=sched)
t.start()
while True:
menu()
choice = int(input('請輸入您的選擇 : '))
if choice == 1:
dao.delete_all()
dao.import_data()
elif choice == 2:
youbikes = dao.query_all()
util.print_youbike(youbikes)
elif choice == 3:
youbikes = dao.query_by(input('請輸入站號或站名 : '))
util.print_youbike(youbikes)
elif choice == 4:
youbikes = dao.query_sbi(int(input('我要借(台) : ')))
util.print_youbike(youbikes)
elif choice == 5:
youbikes = dao.query_bemp(int(input('我要還(台) : ')))
util.print_youbike(youbikes)
elif choice == 6:
sbi, bemp = input('我要借(台), 還(台): 例如:30 30 => ').split()
youbikes = dao.query_sbi_bemp(int(sbi), int(bemp))
util.print_youbike(youbikes)
elif choice == 0:
break
input('按任意鍵返回主選單')
| vincenttuan/PythonCourse | db_orm_youbike/YoubikeMain.py | YoubikeMain.py | py | 1,811 | python | en | code | 4 | github-code | 13 |
21220393315 | from turtle import Turtle, Screen
import random
WIDTH = 900
HEIGHT = 500
screen = Screen()
screen.setup(WIDTH*2, HEIGHT*2)
screen.screensize(WIDTH*2, HEIGHT*2, 'lightblue')
screen.tracer(0)
class PlayerPad(Turtle):
def __init__(self):
super().__init__()
#self.resizemode("user")
self.penup()
self.shape("square")
self.turtlesize(2,8)
#self.shapesize(2,8)
self.setposition(0,-HEIGHT+120)
self.steps = 15
def move_left(self):
x = self.xcor()
x -= self.steps
if x < -WIDTH+180:
x=-WIDTH+180
self.setx(x)
def move_right(self):
x = self.xcor()
x += self.steps
if x > WIDTH-180:
x = WIDTH-180
self.setx(x)
class Ball(Turtle):
def __init__(self):
super().__init__()
self.penup()
self.shape("circle")
self.color("yellow")
self.speed(9)
self.setposition(0,-HEIGHT+150)
self.setheading(random.randint(15,165))
def move(self):
self.forward(2)
if self.xcor() >= WIDTH-110:
self.move_left()
elif self.xcor() <= -WIDTH+110:
self.move_right()
elif self.ycor() >= HEIGHT-110:
self.move_down()
def move_left(self):
if self.heading() < 90:
direction = random.randint(160, 200)
self.setheading(direction)
else:
direction = random.randint(200,240)
self.setheading(direction)
def move_right(self):
if self.heading() < 180:
direction = random.randint(20,60)
self.setheading(direction)
else:
direction = random.randint(290,330)
self.setheading(direction)
def move_down(self):
if self.heading() < 90:
direction = random.randint(290,330)
self.setheading(direction)
else:
direction = random.randint(200,240)
self.setheading(direction)
def move_up(self):
if self.heading() < 270:
direction = random.randint(110,150)
self.setheading(direction)
else:
direction = random.randint(20,60)
self.setheading(direction)
class Board(Turtle):
def __init__(self):
super().__init__()
self.hideturtle()
self.pensize(3)
self.penup()
self.setposition(-WIDTH + 100, -HEIGHT + 100)
self.pendown()
self.setposition(WIDTH - 100, -HEIGHT + 100)
self.setposition(WIDTH - 100, HEIGHT - 100)
self.setposition(-WIDTH + 100, HEIGHT - 100)
self.setposition(-WIDTH + 100, -HEIGHT + 100)
class Block(Turtle):
def __init__(self):
super().__init__()
self.colors = ["red", "blue", "purple", "green", "yellow"]
self.color(random.choice(self.colors))
self.shape("square")
self.penup()
self.turtlesize(1,2)
class Blocks:
def __init__(self):
self.block_list = []
self.row()
def row(self):
y = 0
x = -WIDTH + 200
for i in range(10):
for j in range(29):
new_block = Block()
new_block.setposition(x,y)
self.block_list.append(new_block)
x += 50
x = -WIDTH + 200
y += 30
board = Board()
playerpad = PlayerPad()
ball = Ball()
blocks = Blocks()
screen.listen()
screen.onkeypress(playerpad.move_left, "Left")
screen.onkeypress(playerpad.move_right, "Right")
game_on = True
while game_on:
screen.update()
ball.move()
if ball.distance(playerpad) <= 50:
ball.move_up()
if ball.ycor() < -HEIGHT-50:
ball.setposition(playerpad.xcor(), playerpad.ycor()+51)
for block in blocks.block_list:
if ball.distance(block) <= 20:
block.hideturtle()
screen.exitonclick() | ismaelconejeros/break_out_game | main.py | main.py | py | 3,906 | python | en | code | 0 | github-code | 13 |
2032518490 | import sys
rl = sys.stdin.readline
N = int(rl())
Card = rl().split()
M = int(rl())
Find = rl().split()
Table = [0] * 20000001
for i in Card:
Table[int(i)+10000000] += 1
for x in Find:
print(Table[int(x)+10000000], end=' ') | YeonHoLee-dev/Python | BAEKJOON/[10816] 숫자 카드 2.py | [10816] 숫자 카드 2.py | py | 235 | python | en | code | 0 | github-code | 13 |
39844753468 | #!/usr/bin/env python3
# OneTime Papa Edition Main Window
# With an overview of everything:
# key manager
# en/de-crypter
# KeyGen(r) :p
from tkinter import *
from tkinter import filedialog
import random, pickle, os, sys
def keygen():
save_file = filedialog.asksaveasfilename()
key = [ random.randint(0,255) for x in range(1024) ]
if save_file:
pickle.dump(key,open(save_file,'wb'))
def cipher():
sidewindow('OneTime_Cipher')
def manage():
sidewindow('OneTime_Manager')
def sidewindow(thing):
global rightbit
global righton
global right
exec("import " + thing)
if righton:
canvas.delete(rightbit)
right.destroy()
righton = 0
else:
right = Frame(canvas, relief=GROOVE, borderwidth=2)
rightbit = canvas.create_window(640,480,window=right,anchor=SE)
exec(thing + ".init(right,path)")
righton = 1
user = os.getlogin()
if sys.platform == 'darwin':
path = '/Users/%s/.dcryptpe/' % user
elif 'lin' in sys.platform:
path = '/home/%s/.dcryptpe/' % user
else:
print("Error: Can't sepcify platform")
path = str(filedialog.askdirectory() + '/.dcryptpe/')
if not os.path.isdir(path): # check for first run conditions
os.mkdir(path)
righton = 0
root = Tk()
root.wm_title('OneTime Papa Edition')
root.resizable(0,0)
canvas = Canvas(root, width=640, height=480)
background = PhotoImage(file="/home/andreas/Programming/python/papa/background.gif")
canvas.create_image(0,0,image=background,anchor=NW)
canvas.pack()
top = Frame(canvas, relief=GROOVE, borderwidth=2)
middle = Frame(canvas, relief=GROOVE, borderwidth=2)
bottom = Frame(canvas, relief=GROOVE, borderwidth=2)
Button(top, text='KeyGen', command=keygen).pack()
Button(middle, text='Manager', command=manage).pack()
Button(bottom, text='Cipher', command=cipher).pack()
canvas.create_window(100,100,window=top,anchor=CENTER)
canvas.create_window(100,200,window=middle,anchor=CENTER)
canvas.create_window(100,300,window=bottom,anchor=CENTER)
root.mainloop()
| agwilt/python | onetime/OneTime_Main.py | OneTime_Main.py | py | 1,974 | python | en | code | 0 | github-code | 13 |
41071665356 | from django.test import TestCase
from beerbookapp.models import Rating, Location, City, Beer, BeerType, BeerProducer, Event
from datetime import datetime
from django_countries.fields import CountryField
from django.contrib.auth.models import User
from django.db import IntegrityError
from django.core.urlresolvers import reverse
class TestEventCataloguePage(TestCase):
def test_event_catalogue_with_no_events(self):
response = self.client.get(reverse('event_catalogue'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, "No Events found")
self.assertQuerysetEqual(response.context['events_list'], [])
def test_event_catalogue_with_events_present(self):
make_test_event_stub()
response = self.client.get(reverse('event_catalogue'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Test Event")
num_beers = len(response.context['events_list'])
self.assertEquals(num_beers, 1)
class TestBeerCataloguePage(TestCase):
def test_beer_catalogue_with_no_beers(self):
response = self.client.get(reverse('beer_catalogue'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, "No beers matching the criteria found")
self.assertQuerysetEqual(response.context['beer_list'], [])
def test_beer_catalogue_with_beers(self):
make_test_beer_stub()
response = self.client.get(reverse('beer_catalogue'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Test Beer")
num_beers = len(response.context['beer_list'])
self.assertEquals(num_beers, 1)
class TestIndexPage(TestCase):
def test_index_page_without_beers(self):
# top_beers
response = self.client.get(reverse('index'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, "No beers found in database.")
self.assertQuerysetEqual(response.context['top_beers'], [])
def test_index_page_without_events(self):
response = self.client.get(reverse('index'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Unfortunately there are no upcoming events.")
self.assertQuerysetEqual(response.context['recent_events'], [])
class TestStubsUser(TestCase):
def test_user_stub(self):
stub_user = make_test_user_stub()
self.assertEquals((stub_user.username == "test"), True)
def test_beer_type_stub(self):
stub_beer_type = make_test_beer_type_stub()
self.assertEquals((stub_beer_type.name == "Test Brew"), True)
def test_beer_producer(self):
stub_beer_producer = make_test_beer_producer_stub()
self.assertEquals((stub_beer_producer.name == "Test Beer Producer"), True)
def test_beer_stub(self):
stub_beer = make_test_beer_stub()
self.assertEquals((stub_beer.name == "Test Beer"), True)
def test_rating_stub(self):
stub_rating = make_test_rating_stub()
self.assertEquals((stub_rating.rating == 0), True)
class UserMethodTest(TestCase):
username = "test"
first_name = "Tester"
last_name = "Testington"
password = "test"
def test_ensure_user_is_created(self):
this_user = make_test_user(self.username,
self.first_name,
self.last_name,
self.password)
this_user.save()
self.assertEqual((this_user.username == self.username), True)
self.assertEqual((this_user.first_name == self.first_name), True)
self.assertEqual((this_user.last_name == self.last_name), True)
self.assertEqual((this_user.password == self.password), True)
class BeerTypeMethodTest(TestCase):
beer_type_name = "Test Brew"
def test_ensure_beer_type_is_created(self):
this_beer_type = make_test_beer_type(self.beer_type_name)
this_beer_type.save()
self.assertEquals((this_beer_type.name == self.beer_type_name), True)
class BeerProducerMethodTest(TestCase):
producer_name = "Test Beer Producer"
def test_ensure_beer_producer_is_created(self):
this_beer_producer = make_test_beer_producer(self.producer_name)
this_beer_producer.save()
self.assertEquals((this_beer_producer.name == self.producer_name), True)
class BeerMethodTest(TestCase):
beer_name = "Test Beer"
beer_introduced = datetime.now()
beer_desc = "test beer description"
beer_country = 'GB'
def test_ensure_beer_is_created(self):
beer_type = make_test_beer_type_stub()
beer_producer = make_test_beer_producer_stub()
# name, b_type, producer, description, introduced, country
this_beer = make_test_beer(self.beer_name,
beer_type,
beer_producer,
self.beer_desc,
self.beer_introduced,
self.beer_country)
this_beer.save()
self.assertEquals((this_beer.name == self.beer_name), True)
self.assertEquals((this_beer.type == beer_type), True)
self.assertEquals((this_beer.producer == beer_producer), True)
self.assertEquals((this_beer.description == self.beer_desc), True)
self.assertEquals((this_beer.introduced == self.beer_introduced), True)
self.assertEquals((this_beer.country == self.beer_country), True)
def test_ensure_slug_is_created(self):
this_beer = make_test_beer_stub()
self.assertEquals((this_beer.slug == "test-beer"), True)
class LocationMethodTest(TestCase):
loc_name = "Test Place"
loc_latitude = 10.000
loc_longitude = 10.000
loc_country = 'GB'
def test_ensure_location_is_created(self):
loc_city = make_test_city_stub()
this_location = make_test_location(loc_city,
self.loc_name,
self.loc_latitude,
self.loc_longitude,
self.loc_country)
this_location.save()
self.assertEquals((this_location.name == self.loc_name), True)
class RatingMethodTest(TestCase):
r_rating = 1
r_review = "Test beer review"
r_date = datetime.now()
def test_ensure_rating_is_created(self):
r_beer = make_test_beer_stub()
r_owner = make_test_user_stub()
this_rating = make_test_rating(self.r_rating, self.r_review, r_owner, r_beer)
this_rating.save()
self.assertEquals((this_rating.rating == self.r_rating), True)
def test_ensure_rating_not_saved_negative_values(self):
r_beer = make_test_beer_stub()
r_owner = make_test_user_stub()
this_rating = make_test_rating(-1, self.r_review, r_owner, r_beer)
this_test = this_rating.save()
self.assertEquals((this_test is None), True)
def test_ensure_rating_not_saved_out_of_range_values(self):
r_beer = make_test_beer_stub()
r_owner = make_test_user_stub()
this_rating = make_test_rating(6, self.r_review, r_owner, r_beer)
this_test = this_rating.save()
self.assertEquals((this_test is None), True)
def test_ensure_unique_together_holds(self):
r_beer = make_test_beer_stub()
r_owner = make_test_user_stub()
this_rating1 = make_test_rating(self.r_rating, self.r_review, r_owner, r_beer)
this_rating1.save()
this_rating_bad = None
with self.assertRaises(IntegrityError):
this_rating2 = make_test_rating(self.r_rating, self.r_review, r_owner, r_beer)
this_rating_bad = this_rating2.save()
self.assertEquals((this_rating1 is None), False)
self.assertEquals((this_rating_bad is None), True)
# helper methods**********************************************************************
def make_test_user(username, first_name, last_name, password):
c = User(username=username,
first_name=first_name,
last_name=last_name,
password=password)
return c
def make_test_user_stub():
username = "test"
first_name = "Tester"
last_name = "Testington"
password = "test"
c = make_test_user(username, first_name, last_name, password)
c.save()
return c
def make_test_city_stub():
name = "Test City"
c = make_test_city(name)
c.save()
return c
def make_test_city(name):
c = City(name=name)
return c
def make_test_location(city, name, latitude, longitude, country):
c = Location(city=city,
name=name,
latitude=longitude,
longitude=latitude,
country=country)
return c
def make_test_location_stub():
city = make_test_city_stub()
name = "Test Location"
latitude = 10.00
longitude = 10.00
country = 'GB'
c = make_test_location(city, name, latitude, longitude, country)
c.save()
return c
def make_test_beer(name, b_type, producer, description, introduced, country):
c = Beer(name=name,
type=b_type,
producer=producer,
description=description,
introduced=introduced,
country=country
)
return c
def make_test_beer_stub():
name = "Test Beer"
b_type = make_test_beer_type_stub()
producer = make_test_beer_producer_stub()
description = "This is a test beer"
introduced = datetime.now()
country = 'GB'
c = make_test_beer(name, b_type, producer, description, introduced, country)
c.save()
return c
def make_test_beer_type_stub():
c = make_test_beer_type("Test Brew")
c.save()
return c
def make_test_beer_type(name):
c = BeerType(name=name)
return c
def make_test_beer_producer_stub():
name = "Test Beer Producer"
c = make_test_beer_producer(name)
c.save()
return c
def make_test_beer_producer(name):
c = BeerProducer(name=name)
return c
def make_test_rating(rating, review, owner, rated_beer):
c = Rating(rating=rating,
review=review,
owner=owner,
rated_beer=rated_beer)
return c
def make_test_rating_stub():
rating = 0
review = "Test Review!"
owner = make_test_user_stub()
rated_beer = make_test_beer_stub()
c = make_test_rating(rating, review, owner, rated_beer)
c.save()
return c
def make_test_event(title, event_datetime, description, owner, location):
c = Event(title=title,
datetime=event_datetime,
description=description,
owner=owner,
location=location)
return c
def make_test_event_stub():
title = "Test Event"
e_datetime = datetime.now()
description = "Description of Test Event"
owner = make_test_user_stub()
location = make_test_location_stub()
c = make_test_event(title, e_datetime, description, owner, location)
c.save()
return c
| enzoroiz/beerbook | beerbookapp/tests.py | tests.py | py | 11,242 | python | en | code | 1 | github-code | 13 |
16543787969 | from layers.dynamic_rnn import DynamicLSTM
from layers.attention import Attention
import torch
import torch.nn as nn
class AELSTM(nn.Module):
def __init__(self, embedding_matrix, opt):
super(AELSTM, self).__init__()
self.opt = opt
self.n_head = 1
self.embed_dim = opt.embed_dim
self.embed = nn.Embedding.from_pretrained(torch.tensor(embedding_matrix, dtype=torch.float), freeze=False)
self.lstm = DynamicLSTM(opt.embed_dim*2, opt.hidden_dim, num_layers=1, batch_first=True)
self.attention = Attention(opt.hidden_dim, score_function='mlp')
self.dense = nn.Linear(opt.hidden_dim, opt.polarities_dim)
def forward(self, inputs):
text_raw_indices, aspect_indices = inputs[0], inputs[1]
x_len = torch.sum(text_raw_indices != 0, dim=-1)
aspect_len = torch.sum(aspect_indices != 0, dim=-1)
nonzeros_aspect = torch.tensor(aspect_len, dtype=torch.float).to(self.opt.device)
x = self.embed(text_raw_indices)
aspect = self.embed(aspect_indices)
aspect = torch.sum(aspect, dim=1)
aspect = torch.div(aspect, nonzeros_aspect.view(nonzeros_aspect.size(0), 1))
asp_squ = aspect.unsqueeze(dim=1)
asp_re = asp_squ.repeat(1, x.size()[1], 1)
asp_x = torch.cat((x, asp_re), dim=-1)
text_memory, (_, _) = self.lstm(asp_x, x_len)
out_at = self.attention(text_memory, asp_squ).squeeze(dim=1)
out_at = out_at.view(out_at.size(0), -1)
out = self.dense(out_at)
return out
| xunan0812/MIMN | models/ae_lstm.py | ae_lstm.py | py | 1,549 | python | en | code | 84 | github-code | 13 |
7133931146 | """Script to plot consumers-resource population dynamics and save to PDF"""
__author__ = 'Matthew Campos (matthew.campos19@imperial.ac.uk)'
__version__ = '0.0.1'
import scipy as sc
import scipy.integrate as integrate
def dCR_dt(pops, t=0):
"""returns the growth rate of consumer and resource population at any given time step"""
R = pops[0]
C = pops[1]
dRdt = r * R - a * R * C
dCdt = -z * C + e * a * R * C
return sc.array([dRdt, dCdt])
#assign some parameter values
r = 1.
a = 0.1
z = 1.5
e = 0.75
#integrate from time point 0 to 15, using 1000 sub-divisions of time
t = sc.linspace(0, 15, 1000)
#Set the initial conditions for the two populations
R0 = 10
C0 = 5
RC0 = sc.array([R0, C0])
#numerically integrate this system forward from those starting conditions
pops, infodict = integrate.odeint(dCR_dt, RC0, t, full_output=True)
print("final Consumer and Resource population values are:", pops[-1]) #prints final values
type(infodict)
infodict.keys()
infodict['message']
import matplotlib.pylab as p
f1 = p.figure()
p.plot(t, pops[:,0], 'g-', label='Resource density') # Plot
p.plot(t, pops[:,1] , 'b-', label='Consumer density')
p.grid()
p.legend(loc='best')
p.xlabel('Time')
p.ylabel('Population density')
p.title('Consumer-Resource population dynamics')
#p.show()# To display the figure
f1.savefig('../Results/LV_model.pdf')
f2 = p.figure()
p.plot(pops[:,0], pops[:,1], 'r-', label='Consumer density') # Plot
p.grid()
p.xlabel('Resource Density')
p.ylabel('Consumer density')
p.title('Consumer-Resource population dynamics')
#p.show()# To display the figure
f2.savefig('../Results/LV_second_model.pdf')
| matthewcampos/CMEECourseWork | Week7/Code/LV1.py | LV1.py | py | 1,656 | python | en | code | 0 | github-code | 13 |
20364607369 | import math
fb = math.fabs
dotes = [[int(l) for l in input().split()] for i in range(int(input()))]
tdotes = []
for i in dotes:
if fb(i[0]) > fb(i[1]):
tdotes.append(i)
top, left, right, bottom = -1000000, 100000000, -10000000, 10000000
for i in dotes:
if i[1] > top:
dt = i
top = i[1]
if i[0] < left:
dl = i
left = i[0]
if i[0] > right:
dr = i
right = i[0]
if i[1] < bottom:
db = i
bottom = i[1]
for i in tdotes:
print(f'({i[0]}, {i[1]})')
print(f'left: ({dl[0]}, {dl[1]})\nright: ({dr[0]}, {dr[1]})\ntop: ({dt[0]}, {dt[1]})\n' +
f'bottom: ({db[0]}, {db[1]})')
| Qwerty10291/lyceum | 1/homework/dotes.py | dotes.py | py | 665 | python | en | code | 0 | github-code | 13 |
23148232930 | # biblioteca para lidar com chamadas asyncronas no mongo
import os
from dotenv import load_dotenv
from pymongo.mongo_client import MongoClient
from scraper import manage_scrape
load_dotenv()
uri = os.environ.get('MONGO_URL')
client = MongoClient(uri)
db = client['products_data']
products_collection = db['products']
website_helper = {
"mercadolivre": 'Mercado Livre',
"buscape": 'Busca Pé'
}
async def add_product(website: str, product: str):
scrape_data = manage_scrape(website, product)
products_collection.insert_many(scrape_data)
new_products = products_collection.find({"website": website_helper[website], "product_type": product})
products_list = []
for prod in list(new_products):
products_list.append({
"id": str(prod["_id"]),
"product_type": str(prod["product_type"]),
"description": prod["description"],
"price": prod["price"],
"website": prod["website"],
"external_link": prod["external_link"],
"image_link": prod['image_link']
})
return products_list
async def get_products(website: str, product: str):
products_data_result = products_collection.find({"website": website_helper[website], "product_type": product})
products_data = list(products_data_result)
if not products_data:
data = await add_product(website, product)
return data
products_list = []
for prod in products_data:
products_list.append({
"id": str(prod["_id"]),
"product_type": str(prod["product_type"]),
"description": prod["description"],
"price": prod["price"],
"website": prod["website"],
"external_link": prod["external_link"],
"image_link": prod['image_link']
})
return products_list
| IgorBrizack/Crawler-Web | backend/server/database.py | database.py | py | 1,875 | python | en | code | 0 | github-code | 13 |
17046519464 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.SettleClause import SettleClause
class AlipayTradeBatchSettleModel(object):
def __init__(self):
self._biz_product = None
self._extend_params = None
self._out_request_no = None
self._settle_clauses = None
self._settle_type = None
@property
def biz_product(self):
return self._biz_product
@biz_product.setter
def biz_product(self, value):
self._biz_product = value
@property
def extend_params(self):
return self._extend_params
@extend_params.setter
def extend_params(self, value):
self._extend_params = value
@property
def out_request_no(self):
return self._out_request_no
@out_request_no.setter
def out_request_no(self, value):
self._out_request_no = value
@property
def settle_clauses(self):
return self._settle_clauses
@settle_clauses.setter
def settle_clauses(self, value):
if isinstance(value, list):
self._settle_clauses = list()
for i in value:
if isinstance(i, SettleClause):
self._settle_clauses.append(i)
else:
self._settle_clauses.append(SettleClause.from_alipay_dict(i))
@property
def settle_type(self):
return self._settle_type
@settle_type.setter
def settle_type(self, value):
self._settle_type = value
def to_alipay_dict(self):
params = dict()
if self.biz_product:
if hasattr(self.biz_product, 'to_alipay_dict'):
params['biz_product'] = self.biz_product.to_alipay_dict()
else:
params['biz_product'] = self.biz_product
if self.extend_params:
if hasattr(self.extend_params, 'to_alipay_dict'):
params['extend_params'] = self.extend_params.to_alipay_dict()
else:
params['extend_params'] = self.extend_params
if self.out_request_no:
if hasattr(self.out_request_no, 'to_alipay_dict'):
params['out_request_no'] = self.out_request_no.to_alipay_dict()
else:
params['out_request_no'] = self.out_request_no
if self.settle_clauses:
if isinstance(self.settle_clauses, list):
for i in range(0, len(self.settle_clauses)):
element = self.settle_clauses[i]
if hasattr(element, 'to_alipay_dict'):
self.settle_clauses[i] = element.to_alipay_dict()
if hasattr(self.settle_clauses, 'to_alipay_dict'):
params['settle_clauses'] = self.settle_clauses.to_alipay_dict()
else:
params['settle_clauses'] = self.settle_clauses
if self.settle_type:
if hasattr(self.settle_type, 'to_alipay_dict'):
params['settle_type'] = self.settle_type.to_alipay_dict()
else:
params['settle_type'] = self.settle_type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayTradeBatchSettleModel()
if 'biz_product' in d:
o.biz_product = d['biz_product']
if 'extend_params' in d:
o.extend_params = d['extend_params']
if 'out_request_no' in d:
o.out_request_no = d['out_request_no']
if 'settle_clauses' in d:
o.settle_clauses = d['settle_clauses']
if 'settle_type' in d:
o.settle_type = d['settle_type']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/AlipayTradeBatchSettleModel.py | AlipayTradeBatchSettleModel.py | py | 3,735 | python | en | code | 241 | github-code | 13 |
41848550713 | import logging
from uuid import UUID
from lib.exceptions import EntityNotFoundException
from lib.ydb.mixin import YbdMixin
from modules.todo.schemas.request import TaskRequestSchema, TaskRequestUpdateSchema
logger = logging.getLogger(__name__)
class Task(YbdMixin):
table = 'task'
async def create(self, task: TaskRequestSchema | TaskRequestUpdateSchema, id: UUID | None = None) -> dict:
payload = task.model_dump(exclude_none=True)
if id:
payload.update({'id': str(id).encode()})
else:
payload.update({
'created_at': {
'val': 'Cast(CurrentUtcDatetime() as Int32)',
'mode': 'var'
}
})
return {'id': await self.upsert(payload)}
async def get(self, **payload) -> list[dict]:
obj = await self.read(payload, 'AND')
if obj is None or len(obj) == 0:
raise EntityNotFoundException()
return obj
async def delete(self, id: str) -> bool:
await self.remove({
'id': id
})
return True
| Gamer201760/Task-app | modules/todo/crud.py | crud.py | py | 1,144 | python | en | code | 0 | github-code | 13 |
73644105299 | from django.urls import path
from rest_framework_simplejwt.views import TokenRefreshView
from auth.views import (
MyObtainTokenPairView,
RegisterView,
UserView,
)
urlpatterns = [
path('login/', MyObtainTokenPairView.as_view(), name='token_obtain_pair'),
path('login/refresh/', TokenRefreshView.as_view(), name='token_refresh'),
path('register/', RegisterView.as_view(), name='auth_register'),
path('user/', UserView.as_view({'get': 'list'}), name='auth_user'),
path('user/<int:pk>', UserView.as_view({'get': 'retrieve'}), name='auth_user'),
]
| tgardela/event_manager | auth/urls.py | urls.py | py | 586 | python | en | code | 0 | github-code | 13 |
40846227022 | # -*- Mode:python; c-file-style:"gnu"; indent-tabs-mode:nil -*- */
import shutil
import os, sys, re
from mininet.clean import sh
from mininet.examples.cluster import RemoteMixin
from mininet.log import warn
from mininet.node import Switch
from minindn.apps.application import Application
from minindn.util import scp, copyExistentFile
from minindn.helpers.nfdc import Nfdc
from minindn.minindn import Minindn
class Ndvr(Application):
BIN = '/usr/local/bin/ndvrd'
def __init__(self, node, logLevel='NONE', network='/ndn', interval=None):
Application.__init__(self, node)
self.network = network
self.interval = interval
self.node = node
self.parameters = self.node.params['params']
self.prefixes = []
if self.parameters.get('ndvr-log-level', None) != None:
logLevel = self.parameters.get('ndvr-log-level')
if logLevel in ['NONE', 'WARN', 'INFO', 'DEBUG', 'TRACE']:
self.envDict = {'NDN_LOG': 'ndvr.*={}'.format(logLevel)}
else:
self.envDict = {'NDN_LOG': logLevel}
if self.parameters.get('ndvr-prefixes', None) != None:
self.prefixes = self.parameters.get('ndvr-prefixes').split(',')
else:
self.prefixes.append('/ndn/{}-site'.format(node.name))
self.logFile = 'ndvr.log'
self.routerName = '/{}C1.Router/{}'.format('%', node.name)
self.validationConfFile = '{}/ndvr-validation.conf'.format(self.homeDir)
possibleConfPaths = ['/usr/local/etc/ndn/ndvr-validation.conf', '/etc/ndn/ndvr-validation.conf']
copyExistentFile(node, possibleConfPaths, self.validationConfFile)
self.createKeysAndCertificates()
def start(self):
monitorFace = "ether://[01:00:5e:00:17:aa]"
faces = self.listEthernetMulticastFaces(monitorFace)
Application.start(self, '{} -n {} -r {} {} -v {} -f {} -m {} -p {}'.format(Ndvr.BIN
, self.network
, self.routerName
, '-i {}'.format(self.interval) if self.interval else ''
, self.validationConfFile
, ' -f '.join(faces)
, monitorFace
, ' -p '.join(self.prefixes)
),
self.logFile,
self.envDict)
Minindn.sleep(0.1)
def listEthernetMulticastFaces(self, monitorFace):
faces = []
cmd = 'nfdc face list remote {}'.format(monitorFace)
output = self.node.cmd(cmd)
for line in output.split("\n"):
m = re.search('^faceid=([0-9]+)', line)
if m:
faces.append(m.group(1))
return faces
def createKeysAndCertificates(self):
rootName = self.network
rootCertFile = '{}/root.cert'.format(Minindn.workDir)
# Create root certificate (only in the first run)
if not os.path.isfile(rootCertFile):
sh('ndnsec-key-gen {}'.format(rootName)) # Installs a self-signed cert into the system
sh('ndnsec-cert-dump -i {} > {}'.format(rootName, rootCertFile))
# Create necessary certificates for each router
shutil.copyfile(rootCertFile,
'{}/trust.cert'.format(self.homeDir))
# Create router certificate
routerName = '{}/%C1.Router/{}'.format(self.network, self.node.name)
routerKeyFile = '{}/router.keys'.format(self.homeDir)
routerCertFile = '{}/router.cert'.format(self.homeDir)
self.node.cmd('ndnsec-key-gen {} > {}'.format(routerName, routerKeyFile))
# Copy routerKeyFile from remote for ndnsec-certgen
if isinstance(self.node, RemoteMixin) and self.node.isRemote:
login = 'mininet@{}'.format(self.node.server)
src = '{}:{}'.format(login, routerKeyFile)
dst = routerKeyFile
scp(src, dst)
# Root key is in root namespace, must sign router key and then install on host
sh('ndnsec-cert-gen -s {} -r {} > {}'.format(rootName, routerKeyFile, routerCertFile))
# Copy root.cert and site.cert from localhost to remote host
if isinstance(self.node, RemoteMixin) and self.node.isRemote:
login = 'mininet@{}'.format(self.node.server)
src = '{}/router.cert'.format(self.homeDir)
src2 = '{}/root.cert'.format(self.homeDir)
dst = '{}:/tmp/'.format(login)
scp(src, src2, dst)
self.node.cmd('mv /tmp/*.cert {}'.format(self.homeDir))
# finally, install the signed router certificate
self.node.cmd('ndnsec-cert-install -f {}'.format(routerCertFile))
| FabioSantosSantos/ndvr-pathvector | minindn/apps/ndvr.py | ndvr.py | py | 4,834 | python | en | code | 0 | github-code | 13 |
22415573768 | cluster_tokens = {}
vault_clusters = ["vault-east", "vault-west"]
def get_secret():
global cluster_tokens
with open("tokens_mem", "r") as fh_:
cluster_tokens = json.load(fh_)
for vserv in vault_clusters:
if vserv not in cluster_tokens:
if not get_token(vserv):
if vserv != vault_clusters[-1]:
continue
else:
return "Unable to Retrieve Token"
else:
break
else:
break
for vserv in vault_clusters:
if vserv in cluster_tokens:
secret = vault_call(vserv, "get")
else:
continue
if secret == "Connection Failed":
if vserv != vault_clusters[-1]:
continue
else:
return "Unable to Retrieve Secret"
elif secret == 403:
if get_token(vserv):
secret = vault_call(vserv, "get")
break
else:
continue
elif secret == 404:
continue
else:
break
return secret
| btkrausen/hashicorp | vault/scripts/ha-script.py | ha-script.py | py | 1,145 | python | en | code | 771 | github-code | 13 |
10687625776 | import os
import serial
import time
port = "/dev/cu.usbserial-1420"
mirror = serial.Serial(port, 115200)
while True:
time.sleep(3600) # in sec
datas = mirror.readline()
# Got string like
# "ctn: 12, pump: open , humi: 12.3, temp: 12.3");
datas = str(datas)
datas = datas.replace("b'", "").replace(r"\r\n'", "")
splited_datas = datas.split(",")
val = [splited_data.split(":")[1] for splited_data in splited_datas]
datas = {
"cycle_number": val[4],
"humidity": val[2],
"temperature": val[3],
}
SERVER_IP = os.environ.get("SERVER_IP")
APIKEY = f"{os.environ.get('API_KEY_NAME')}={os.environ.get('API_KEY')}"
req = f"https://{SERVER_IP}/bme280?{APIKEY}"
ret = requests.post(req, json=datas)
print(ret.json())
| llPekoll/aquaPoney | raspi/get_serial.py | get_serial.py | py | 795 | python | en | code | 0 | github-code | 13 |
5902683365 | from copy import deepcopy
class BackendConfig:
# configs not needed for actor creation when
# instantiating a replica
_serve_configs = ["_num_replicas", "max_batch_size"]
# configs which when changed leads to restarting
# the existing replicas.
restart_on_change_fields = ["resources", "num_cpus", "num_gpus"]
def __init__(self,
num_replicas=1,
resources=None,
max_batch_size=None,
num_cpus=None,
num_gpus=None,
memory=None,
object_store_memory=None):
"""
Class for defining backend configuration.
"""
# serve configs
self.num_replicas = num_replicas
self.max_batch_size = max_batch_size
# ray actor configs
self.resources = resources
self.num_cpus = num_cpus
self.num_gpus = num_gpus
self.memory = memory
self.object_store_memory = object_store_memory
@property
def num_replicas(self):
return self._num_replicas
@num_replicas.setter
def num_replicas(self, val):
if not (val > 0):
raise Exception("num_replicas must be greater than zero")
self._num_replicas = val
def __iter__(self):
for k in self.__dict__.keys():
key, val = k, self.__dict__[k]
if key == "_num_replicas":
key = "num_replicas"
yield key, val
def get_actor_creation_args(self, init_args):
ret_d = deepcopy(self.__dict__)
for k in self._serve_configs:
ret_d.pop(k)
ret_d["args"] = init_args
return ret_d
| zhuohan123/hoplite-rllib | python/ray/experimental/serve/backend_config.py | backend_config.py | py | 1,686 | python | en | code | 2 | github-code | 13 |
17109134506 | from collections import OrderedDict
import json
from django.contrib.auth.models import User
from rest_framework.decorators import list_route
from rest_framework.response import Response
from rest_framework.test import APIClient
from api.pagination import CustomReadOnlyModelViewSet
from api.queries.tags import get_samples_by_tag
from sample.models import Sample
TEST_TAG = 'public-5'
class TestsViewSet(CustomReadOnlyModelViewSet):
authentication_classes = ()
permission_classes = ()
throttle_classes = ()
client = APIClient()
user = User.objects.get(username='ena')
queryset = ''
def __init__(self, *args, **kwargs):
super(TestsViewSet, self).__init__(*args, **kwargs)
self.client.force_authenticate(user=self.user)
self.samples = [s['sample_id']
for s in get_samples_by_tag(TEST_TAG, is_id=False)]
self.endpoints = [
'Connection Related',
'test_status',
# Single Sample
'Single Sample Related',
'test_sample',
'test_quality',
'test_assembly',
'test_contig',
'test_gene',
'test_indel',
'test_snp',
'test_mlst',
'test_sccmec_primer',
'test_sccmec_primer_predict',
'test_sccmec_subtype',
'test_sccmec_subtype_predict',
# Multiple Samples
'Multiple Sample Related',
'test_samples',
'test_qualities',
'test_assemblies',
'test_indels',
'test_snps',
'test_mlsts',
'test_sccmec_primers',
'test_sccmec_primers_predict',
'test_sccmec_subtypes',
'test_sccmec_subtypes_predict',
]
def __get(self, url):
response = self.client.get(url)
return [json.loads(response.content), response.status_code]
def __post(self, url, ids):
response = self.client.post(url, {'ids': ids}, format='json')
return [json.loads(response.content), response.status_code]
def list(self, request):
"""
Stored metadata information for a given sample.
"""
base_url = request.build_absolute_uri()
urls = OrderedDict()
for endpoint in self.endpoints:
if endpoint.endswith('Related'):
urls[endpoint] = ''
else:
urls[endpoint] = '{0}{1}/'.format(base_url, endpoint)
return Response(urls)
@list_route(methods=['get'])
def test_status(self, request):
response = self.client.get('/api/status/')
return Response(json.loads(response.content),
status=response.status_code)
@list_route(methods=['get'])
def test_samples(self, request):
data, status = self.__get(f'/api/sample/?tag={TEST_TAG}')
return self.formatted_response(data['results'], status=status)
@list_route(methods=['get'])
def test_sample(self, request):
sample = self.samples[0]
data, status = self.__get(f'/api/sample/{sample}/')
return self.formatted_response(data['results'], status=status)
@list_route(methods=['get'])
def test_quality(self, request):
url = '/api/sample/{0}/qc/'.format(self.samples[0])
data, status = self.__get(url)
return self.formatted_response(data['results'], status=status)
@list_route(methods=['get'])
def test_qualities(self, request):
url = '/api/sequence-quality/bulk_by_sample/'
data, status = self.__post(url, self.samples)
return self.formatted_response(data['results'], status=status)
@list_route(methods=['get'])
def test_assembly(self, request):
url = '/api/sample/{0}/assembly/'.format(self.samples[0])
data, status = self.__get(url)
return self.formatted_response(data['results'], status=status)
@list_route(methods=['get'])
def test_assemblies(self, request):
url = '/api/assembly/stat/bulk_by_sample/'
data, status = self.__post(url, self.samples)
return self.formatted_response(data['results'], status=status)
@list_route(methods=['get'])
def test_contig(self, request):
url = '/api/sample/{0}/contigs/?contig=30'.format(
self.samples[0]
)
data, status = self.__get(url)
return self.formatted_response(data['results'], status=status)
@list_route(methods=['get'])
def test_gene(self, request):
url = '/api/sample/{0}/genes/'.format(self.samples[0])
data, status = self.__get(url)
return self.formatted_response(data['results'], status=status,
limit=1)
@list_route(methods=['get'])
def test_indel(self, request):
url = '/api/sample/{0}/indels/'.format(self.samples[0])
data, status = self.__get(url)
return self.formatted_response(data['results'], status=status,
limit=10)
@list_route(methods=['get'])
def test_indels(self, request):
url = '/api/variant/indel/bulk_by_sample/'
data, status = self.__post(url, self.samples)
return self.formatted_response(data['results'], status=status,
limit=10)
@list_route(methods=['get'])
def test_snp(self, request):
url = '/api/sample/{0}/snps/'.format(self.samples[0])
data, status = self.__get(url)
return self.formatted_response(data['results'], status=status,
limit=10)
@list_route(methods=['get'])
def test_snps(self, request):
url = '/api/variant/snp/bulk_by_sample/'
data, status = self.__post(url, self.samples)
return self.formatted_response(data['results'], status=status,
limit=10)
@list_route(methods=['get'])
def test_mlst_srst2(self, request):
url = '/api/sample/{0}/st_srst2/'.format(self.samples[0])
data, status = self.__get(url)
return self.formatted_response(data['results'], status=status)
@list_route(methods=['get'])
def test_mlst_srst2_bulk(self, request):
url = '/api/mlst/srst2/bulk_by_sample/'
data, status = self.__post(url, self.samples)
return self.formatted_response(data['results'], status=status)
@list_route(methods=['get'])
def test_mlst(self, request):
url = '/api/sample/{0}/st/'.format(self.samples[0])
data, status = self.__get(url)
return self.formatted_response(data['results'], status=status)
@list_route(methods=['get'])
def test_mlsts(self, request):
url = '/api/mlst/bulk_by_sample/'
data, status = self.__post(url, self.samples)
return self.formatted_response(data['results'], status=status)
@list_route(methods=['get'])
def test_sccmec_primer(self, request):
url = '/api/sample/{0}/sccmec_primers/'.format(self.samples[0])
data, status = self.__get(url)
return self.formatted_response(data['results'], status=status)
@list_route(methods=['get'])
def test_sccmec_primer_predict(self, request):
url = '/api/sample/{0}/sccmec_primers/?predict'.format(self.samples[0])
data, status = self.__get(url)
return self.formatted_response(data['results'], status=status)
@list_route(methods=['get'])
def test_sccmec_primers(self, request):
url = '/api/sccmec/primer/bulk_by_sample/'
data, status = self.__post(url, self.samples)
return self.formatted_response(data['results'], status=status)
@list_route(methods=['get'])
def test_sccmec_primers_predict(self, request):
url = '/api/sccmec/primer/bulk_by_sample/?predict'
data, status = self.__post(url, self.samples)
return self.formatted_response(data['results'], status=status)
@list_route(methods=['get'])
def test_sccmec_subtype(self, request):
url = '/api/sample/{0}/sccmec_subtypes/'.format(
self.samples[0]
)
data, status = self.__get(url)
return self.formatted_response(data['results'], status=status)
@list_route(methods=['get'])
def test_sccmec_subtype_predict(self, request):
url = '/api/sample/{0}/sccmec_subtypes/?predict'.format(
self.samples[0]
)
data, status = self.__get(url)
return self.formatted_response(data['results'], status=status)
@list_route(methods=['get'])
def test_sccmec_subtypes(self, request):
url = '/api/sccmec/subtype/bulk_by_sample/'
data, status = self.__post(url, self.samples)
return self.formatted_response(data['results'], status=status)
@list_route(methods=['get'])
def test_sccmec_subtypes_predict(self, request):
url = '/api/sccmec/subtype/bulk_by_sample/?predict'
data, status = self.__post(url, self.samples)
return self.formatted_response(data['results'], status=status)
| staphopia/staphopia-web | api/tests.py | tests.py | py | 9,077 | python | en | code | 4 | github-code | 13 |
74187775697 | """
Django settings for macPay project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
import envvars
envvars.load()
SECRET_KEY = envvars.get('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
THIRD_PARTY_APPS = (
'envvars',
)
LOCAL_APPS = (
'apps.macpayuser',
'apps.computer',
'apps.payment',
)
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
)
INSTALLED_APPS += THIRD_PARTY_APPS + LOCAL_APPS
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'macPay.urls'
WSGI_APPLICATION = 'macPay.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
'static',
)
TEMPLATE_DIRS = (
'templates',
)
SKILLTREE_API_URL = "http://skilltree.andela.co/api/v1/users"
X_AUTH_TOKEN = "txPFk-ppyzzI0f6iAoF3jC3amGUosLsabznp26gUxn0"
from django.conf.global_settings import TEMPLATE_CONTEXT_PROCESSORS
TEMPLATE_CONTEXT_PROCESSORS += (
'django.core.context_processors.request',
)
| emmanuel-isaac/macPay | macPay/settings/base.py | base.py | py | 2,490 | python | en | code | 1 | github-code | 13 |
26654171798 | import cv2
import numpy as np
def parse_vgg(l):
d = {}
for i in l:
i = i.strip().split(",")
imagename = i[0]
coords = (int(x) for x in i[2:])
if imagename not in d.keys():
d[imagename] = [coords]
else:
d[imagename].append(coords)
return d,imagename
# imagelist = "MabiniD1_align.csv"
imagelist = "B_D5_v1.csv"
# foldername = imagelist.split("_")[0]
foldername = "Bol_D5"
dst_points = []
startframe =""
# mat = []
with open(imagelist) as imagefnames:
filenames = imagefnames.readlines()
startframe = filenames[0].split(",")[0].replace(".jpg","").split("_")[-1]
res,imagedir = parse_vgg(filenames)
for k in sorted(res.keys()):
points = np.float32([tuple(x) for x in res[k]])
if startframe in k:
dst_points = points
continue
else:
img = cv2.imread(k)
print(k)
rows,cols,z = img.shape
src_points = points
transformation_rigid_matrix, rigid_mask = cv2.estimateAffinePartial2D(src_points,dst_points)
dst = cv2.warpAffine(img,transformation_rigid_matrix,(cols,rows))
img_bw = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
corr_bw = cv2.cvtColor(dst,cv2.COLOR_BGR2GRAY)
# if len(mat) == 0:
# mat = dst
# else:
# mat+=dst
cv2.imwrite(foldername+"_translate/"+k.split("/")[1],dst)
cv2.imwrite(foldername+"_translate/BW/"+k.split("/")[1],img_bw-corr_bw)
# cv2.imwrite(foldername+"_translate/SUM_"+k.split("/")[1],mat)
# for imagename in imagefnames:
# imagename = imagename.strip()
# imagename = imagename.split(",")
# imagedir = imagename[0]
# coords = imagename[1:]
# points = np.float32([(int(x),int(coords[idx+1])) for idx,x in enumerate(coords) if idx%2 ==0])
# if "000" in imagedir:
# dst_points = points
# continue
# else:
# img = cv2.imread(imagedir)
# rows,cols,z = img.shape
# src_points = points
# transformation_rigid_matrix, rigid_mask = cv2.estimateAffinePartial2D(src_points,dst_points)
# dst = cv2.warpAffine(img,transformation_rigid_matrix,(cols,rows))
# cv2.imwrite("translate/"+imagedir.split("/")[1],dst) | rizarae-p/reef-stitching | translate.py | translate.py | py | 2,045 | python | en | code | 0 | github-code | 13 |
73564100816 | import pandas as pd
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import Imputer
# this is the path to the Iowa data that you will use
main_file_path = '../input/house-prices-advanced-regression-techniques/train.csv'
data = pd.read_csv(main_file_path)
def score_dataset(X_train,X_test,y_train,y_test):
forestModel = RandomForestRegressor()
forestModel.fit(X_train,y_train)
prediccion = forestModel.predict(X_test)
error_absoluto = mean_absolute_error(y_test,prediccion)
return (error_absoluto)
y = data.SalePrice
iowa_prediciones=data.drop(['SalePrice'],axis=1)
iowa_prediciones_numericas = iowa_prediciones.select_dtypes(exclude=['object'])
X_train, X_test, y_train, y_test = train_test_split(iowa_prediciones_numericas,
y,
train_size=0.7,
test_size=0.3,
random_state=0)
cols_with_missing = [col for col in X_train.columns
if X_train[col].isnull().any()]
redued_original_data = X_train.drop(cols_with_missing, axis=1)
reduced_test_data = X_test.drop(cols_with_missing, axis=1)
print("El error absoluto al eliminar las columnas con datos vacios es:")
print(score_dataset(redued_original_data, reduced_test_data,y_train, y_test))
my_imputer = Imputer()
imputed_X_train = my_imputer.fit_transform(X_train)
imputed_X_test = my_imputer.transform(X_test)
print("El error absoluto con imputacion es:")
print(score_dataset(imputed_X_train,imputed_X_test,y_train,y_test))
imputed_X_train_plus = X_train.copy()
imputed_X_test_plus = X_test.copy()
cols_with_missing = (col for col in X_train.columns
if X_train[col].isnull().any())
for col in cols_with_missing:
imputed_X_train_plus[col + '_was_missing'] = imputed_X_train_plus[col].isnull()
imputed_X_test_plus[col + '_was_missing'] = imputed_X_test_plus[col].isnull()
my_imputer = Imputer()
imputed_X_train_plus = my_imputer.fit_transform(imputed_X_train_plus)
imputed_X_test_plus = my_imputer.transform(imputed_X_test_plus)
print("El error absoluto del ultimo metodo es: ")
print(score_dataset(imputed_X_train_plus,imputed_X_test_plus,y_train,y_test))
| jaimecuellar14/MachineLearning | HandlingMissingValues.py | HandlingMissingValues.py | py | 2,453 | python | en | code | 0 | github-code | 13 |
22268826285 | # path/to/your/python/script.py
import sys
# 获取参数
parameter1 = sys.argv[1]
parameter2 = sys.argv[2]
# 执行计算
result = int(parameter1) + int(parameter2)
# 将结果输出到标准输出
print(result)
| matriz23/KeChengSheJi_Group2 | 后端/pyscripts/func_evaluate.py | func_evaluate.py | py | 217 | python | ja | code | 0 | github-code | 13 |
30258053564 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Actor(nn.Module):
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size, seed, fc_units_1=32, fc_units_2=32):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fc1_units (int): Number of nodes in first hidden layer
fc2_units (int): Number of nodes in second hidden layer
"""
super(Actor, self).__init__()
self.seed = torch.manual_seed(seed)
self.fc1 = nn.Linear(state_size, fc_units_1)
self.ln_1 = nn.LayerNorm(fc_units_1)
self.fc2 = nn.Linear(fc_units_1, fc_units_2)
self.ln_2 = nn.LayerNorm(fc_units_2)
self.fc3 = nn.Linear(fc_units_2, action_size)
self.ln_3 = nn.LayerNorm(action_size)
def forward(self, state):
"""Build an actor (policy) network that maps states -> actions."""
x = self.fc1(state)
x = self.ln_1(F.relu(x))
x = self.fc2(x)
x = self.ln_2(F.relu(x))
x = self.fc3(x)
x = self.ln_3(F.relu(x))
return F.tanh(x)
class Critic(nn.Module):
"""Critic (Value) Model."""
def __init__(self, state_size, action_size, seed, fcs1_units=256, fc_units_2=256, fc_units_3=128, fc_units_4=64):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fcs1_units (int): Number of nodes in the first hidden layer
fc2_units (int): Number of nodes in the second hidden layer
"""
super(Critic, self).__init__()
self.seed = torch.manual_seed(seed)
self.fcs1 = nn.Linear(state_size, fcs1_units)
self.ln_1 = nn.LayerNorm(fcs1_units)
self.fc2 = nn.Linear(fcs1_units+action_size, fc_units_2)
self.ln_2 = nn.LayerNorm(fc_units_2)
self.fc3 = nn.Linear(fc_units_2, fc_units_3)
self.ln_3 = nn.LayerNorm(fc_units_3)
self.fc4 = nn.Linear(fc_units_3, fc_units_4)
self.ln_4 = nn.LayerNorm(fc_units_4)
self.fc5 = nn.Linear(fc_units_4, 1)
def forward(self, state, action):
"""Build a critic (value) network that maps (state, action) pairs -> Q-values."""
xs = self.fcs1(state)
xs = self.ln_1(F.relu(xs))
x = torch.cat((xs, action), dim=1)
x = self.fc2(x)
x = self.ln_2(F.relu(x))
x = self.fc3(x)
x = self.ln_3(F.relu(x))
x = self.fc4(x)
x = self.ln_4(F.relu(x))
return self.fc5(x)
| Axel-Bravo/19_udacity_drlnd | 3_007_Project_Continuous_Control/model.py | model.py | py | 2,798 | python | en | code | 2 | github-code | 13 |
28942285793 | from sweetpea import *
samples = [
{
'color': ['red', 'green', 'red', 'green', 'red', 'green'],
'word': ['red', 'green', 'red', 'green', 'red', 'red'],
'congruency': ['con', 'con', 'inc', 'con', 'inc', 'con']
},
{
'color': ['red', 'green', 'red', 'green', 'red', 'green'],
'word': ['red', 'red', 'green', 'red', 'red', 'green'],
'congruency': ['con', 'con', 'con', 'inc', 'con', 'con']
},
{
'color': ['green', 'red', 'green', 'red', 'green', 'red'],
'word': ['green', 'red', 'red', 'red', 'green', 'red'],
'congruency': ['con', 'inc', 'con', 'con', 'con', 'inc']
}]
def test_score_auto_correlation_all():
# this is to prevent testing when sklearn is not installed:
try:
from sklearn.neural_network import MLPClassifier
except ImportError as e:
assert True
return
res = auto_correlation_scores_samples_between(samples)
assert 'color' in res.keys() and 'word' in res.keys() and 'congruency' in res.keys()
def test_score_auto_correlation():
# this is to prevent testing when sklearn is not installed:
try:
from sklearn.neural_network import MLPClassifier
except ImportError as e:
assert True
return
res = auto_correlation_scores_samples_between(samples, ['color'])
assert 'color' in res.keys() and 'word' not in res.keys() and 'congruency' not in res.keys()
| sweetpea-org/sweetpea-py | acceptance/test_auto_correlation_score.py | test_auto_correlation_score.py | py | 1,446 | python | en | code | 10 | github-code | 13 |
10526352013 | #Universidad de el Salvador - GUI1
#GarciaHernandez_CarlosEduardo GH17045
#Determinar la mediana de tres numeros
n1= int(input("Ingrese el primer numero"))
n2= int(input("Ingrese el segundo numero"))
n3= int(input("Ingrese el tercer numero"))
#creando funcion
def calcular_mediana(a, b, c):
if (a>b):
if(a<c):
return a
elif (b>c):
return b
else:
return c
else:
if (a>c):
return a
elif (b<c):
return b
else:
return c
print("\nla mediana ingresada es: ",calcular_mediana(n1, n2, n3))
#Determinando si el numero es un primo
n = int(input("Ingrese un numero: "))
def evaluar_primo(n):
contador =0
resultado =True
for i in range(1, n+1):
if(n%i==0):
contador+=1
if(contador>2):
resultado=False
break
return resultado
if (evaluar_primo(n)==True):
print("el numero es primo")
else:
print("el numero no es primo") | ues-fmocc-prn335/Guia01 | GarciaHernandezCarlosEduardo_GH17045_GUIA1.py | GarciaHernandezCarlosEduardo_GH17045_GUIA1.py | py | 1,020 | python | es | code | 3 | github-code | 13 |
13985382663 | l = ["margareta", "crizantema","lalea"," zorea , violeta, orhidee","trandafir","gerbera , iasomie","iris","crin "]
# 1
def add():
s = input()
if s in l:
l.remove(s)
l.append(s)
add()
print(l)
# 2
for i in range(0, len(l)):
el = l[0].split()
l.remove(l[0])
for j in el:
if j != ",":
l.append(j.strip(","))
print(l)
# 3
def make_list(c, li):
li_ch = []
for i in li:
if c in i:
li_ch.append(i)
return li_ch
l_ch = make_list("l", l)
print(l_ch)
# 4
l.sort()
print(l)
l.sort(reverse=True)
print(l)
| Loila11/fmi | Licenta 2/AI/lab1/ex8.py | ex8.py | py | 590 | python | en | code | 0 | github-code | 13 |
39817490513 | import requests
from bs4 import BeautifulSoup
import time
from pymongo import MongoClient
class Events:
def __init__(self):
# Initialize MongoDB connection
self.client = MongoClient('mongodb://your_username:your_password@localhost:27017')
self.db = self.client['your_database_name']
self.events_collection = self.db['events']
def get_events(self):
url = "https://education.github.com/events"
events_data = {"events": []}
try:
res = requests.get(url)
soup = BeautifulSoup(res.text, "html.parser")
events = soup.find_all("a", class_="d-flex js-landing-page-link event-card")
for e in events:
tags_list = []
title = e.find("h3", class_="h5").getText().strip()
img = e.find("img")["src"]
try:
desc = (
e.find("p", class_="my-3 short-event color-fg-muted")
.getText()
.strip()
)
except:
desc = ""
base = e.find_all("p", class_="color-fg-muted text-small")
date = base[0].getText().strip()
loc = base[1].getText().strip()
lang = (
e.find("p", class_="color-fg-muted text-small mb-3")
.getText()
.strip()
)
labels = e.find_all(
"span", class_="Label--small Label--blue-standard mr-2"
)
for l in labels:
tags_list.append(l.getText().strip())
link = e["href"]
events_data["events"].append(
{
"title": title,
"image_url": img,
"description": desc,
"date": date,
"location": loc,
"language": lang,
"tags": tags_list,
"link": link,
}
)
return events_data["events"]
except:
return None
def check_and_send_events(self):
"""
Periodically check for new events and send them to an endpoint.
"""
while True:
try:
current_events = self.get_events()
# Check for new events
new_events = [event for event in current_events if not self.events_collection.find_one(event)]
if new_events:
print("New events found:")
for event in new_events:
pass # You can perform any specific actions here for new events
print(new_events)
# Replace this placeholder with your actual endpoint URL
ENDPOINT_URL = 'YOUR_ENDPOINT_URL_HERE'
try:
response = requests.post(ENDPOINT_URL, json=new_events)
response.raise_for_status() # Raise an exception if the request fails (e.g., 4xx or 5xx status codes)
print("Successfully sent new events to the endpoint.")
except requests.exceptions.RequestException as request_error:
print("Error sending events to the endpoint:", request_error)
# Insert new events into MongoDB
self.events_collection.insert_many(new_events)
else:
print("No new events")
except Exception as e:
print("Error:", e)
notice = {"title": "Bot Down", "link": "", "mode": "", "Date": ""}
# Replace this placeholder with your actual endpoint URL
ENDPOINT_URL = 'YOUR_ENDPOINT_URL_HERE'
response = requests.post(ENDPOINT_URL, json=notice)
print(response.text)
break
time.sleep(600) # Sleep for 10 minutes (adjust as needed)
# Create an instance of the Events class and start checking for new events
events_instance = Events()
events_instance.check_and_send_events()
| RumbleJack56/web_scrapper_hf23 | Github Education/main.py | main.py | py | 4,410 | python | en | code | null | github-code | 13 |
8951583079 | from flask import Flask, request, Response, jsonify
from flask_cors import CORS, cross_origin
import random
import re
app = Flask(__name__)
CORS(app)
app.config['CORS_HEADERS'] = '*'
@app.route('/detectpropaganda', methods = ['POST'])
def detect_propaganda():
text = request.data.decode("utf-8")
spllitted = re.split('\?|\.|!|\n',text)
print (text)
print (len(spllitted))
confidence = random.uniform(0, 1)
propagandaResult = {'confidence': confidence, 'result': True if confidence >= 0.5 else False}
return jsonify(propagandaResult)
@app.after_request
def add_headers(response):
response.headers.add('Access-Control-Allow-Origin', '*')
response.headers.add('Access-Control-Allow-Headers', '*')
return response
if __name__ == '__main__':
app.run(host= '0.0.0.0', port=80, debug = False) | GenchoBG/NotInfo | NotInfo.API/dummyserver.py | dummyserver.py | py | 819 | python | en | code | 0 | github-code | 13 |
39859979061 |
class MotorVehicle():
color = 'black'
engineCapacity = '100'
def __init__(self,name, fuelType, yearOfManufacture):
self.name = name
self.fuelType = fuelType
self.yearOfManufacture = yearOfManufacture
def displayCarDetails(self):
print("Name: {}\nfuelType: {}\n"
"Year of Manufacture: {}\n"
"Color: {}\n"
"Engine Capacity: {}\n"
.format(self.name,self.fuelType,self.yearOfManufacture,
MotorVehicle.color,MotorVehicle.engineCapacity))
| Prathamesh0421/Practice-Problems-in-CPP-and-Python | class_objects/python/MotorVehicle.py | MotorVehicle.py | py | 535 | python | en | code | 0 | github-code | 13 |
73492570576 | from collections import deque
class Node:
def __init__(self,data):
self.left=None
self.data=data
self.right=None
def BuildTree(s):
nodes=s.split()
n=len(nodes)
if n==0 or nodes[0]=='N':
return None
root=Node(int(nodes[0]))
dq=deque()
dq.append(root)
size=1
i=1
while size>0 and i<n:
temp=dq.popleft()
size-=1
if nodes[i]!='N':
temp.left=Node(int(nodes[i]))
dq.append(temp.left)
size+=1
i+=1
if i>=n:
break
if nodes[i]!='N':
temp.right=Node(int(nodes[i]))
dq.append(temp.right)
size+=1
i+=1
return root
def FlattenBinaryTree(root):
head=root
curr=root
while curr!=None:
if curr.left==None:
curr=curr.right
else:
temp=curr.left
while temp.right!=None:
temp=temp.right
temp.right=curr.right
curr.right=curr.left
curr.left=None
curr=curr.right
return head
def Inorder(root):
if root==None:
return
Inorder(root.left)
print(root.data,end=" ")
Inorder(root.right)
def main():
s=input()
root=BuildTree(s)
Inorder(root)
print()
head=FlattenBinaryTree(root)
Inorder(head)
print()
if __name__=='__main__':
main() | Ayush-Tiwari1/DSA | Days.18/5.Flatten-Binary-Tree-into-Linked-List.py | 5.Flatten-Binary-Tree-into-Linked-List.py | py | 1,411 | python | en | code | 0 | github-code | 13 |
71702220499 | import codecs
from hacktools import common, ws
# The value in pixels that we can fit before wordwrapping
wordwrap = 206
wordwrap_angel = 136
# Speaker codes
speakercodes = {
0x00: 'Shigeru',
0x02: 'Asuka',
0x04: 'Fuyutsuki',
0x06: 'Gendo',
0x08: 'Makoto',
0x0a: 'Hikari',
0x0c: 'Kaji',
0x0e: 'Kensuke',
0x10: 'Maya',
0x12: 'Misato',
0x14: 'PenPen',
0x16: 'Rei',
0x18: 'Ritsuko',
0x1a: 'Shinji',
0x1c: 'Teacher',
0x1e: 'Toji',
}
# Ranges of strings in other bank files
fileranges = {
"bank_14.bin": [
(0x8536, 0x86a6),
(0xb715, 0xb909, 2),
(0x94a2, 0x9990),
(0xa380, 0xa388),
(0xa390, 0xa396),
(0xa3a0, 0xa3aa),
(0xa3b0, 0xa3b6),
(0xa3c0, 0xa3c6),
(0x5384, 0x5388),
(0x5394, 0x539d),
(0x53a4, 0x53ad),
(0x53b4, 0x53bd),
(0x53c4, 0x53cd),
(0x5640, 0x5643),
(0x5650, 0x5659),
(0x5660, 0x5669),
(0x5670, 0x5679),
(0x5680, 0x5689),
(0xdd74, 0xdd9a),
(0xddd8, 0xddf2),
(0xdece, 0xdef2),
(0xdf30, 0xdf44),
(0xdfc0, 0xdfd6),
(0xe020, 0xe034),
(0xe4f9, 0xe603),
],
"bank_1d.bin": [
(0xce70, 0xdad0),
]
}
# Script opcodes
# The comments are the addresses in the jump table that is used for script opcodes
# A value of -1 means the size is not known
opcodes = {
0x00: 2, # 0x7452 ?
0x02: 2, # 0x7468 ?
0x04: 2, # 0x748f ?
0x06: 2, # 0x7496 ?
0x08: 0, # 0x749d ?
0x0a: 0, # 0x74c9 String, read words until 0xffff
0x0c: 0, # 0x74d6 ?
0x0e: 0, # 0x74e3 ?
0x10: 4, # 0x74ed Portrait
0x12: 0, # 0x750a ?
0x14: 2, # 0x7513 ?
0x16: 0, # 0x7576 ?
0x18: 2, # 0x75E6 ?
0x1a: 0, # 0x764D Jumps to 0x16
0x1c: 0, # 0x7650 ?
0x1e: 0, # 0x76BC ?
0x20: 0, # 0x7723 ?
0x22: 5, # 0x77A2 ?
0x24: 7, # 0x77BD Jump?
0x26: 2, # 0x77EC ?
0x28: 4, # 0x77FF ?
0x2a: 4, # 0x7823 ?
0x2c: 4, # 0x7847 ?
0x2e: 4, # 0x786B ?
0x30: 3, # 0x788F ?
0x32: 4, # 0x78AC ?
0x34: 4, # 0x78C7 ?
0x36: 4, # 0x78E2 Jump?
0x38: 2, # 0x78FD ?
0x3a: 8, # 0x790B ?
0x3c: 4, # 0x793F ?
0x3e: 0, # 0x7987 ?
0x40: 6, # 0x7993 Jump?
0x42: 0, # 0x79B9 ?
0x44: 4, # 0x79C5 Jump?
0x46: 0, # 0x79DF ?
0x48: 2, # 0x79FD Jump?
0x4a: 4, # 0x7A07 Choice jump?
0x4c: 4, # 0x7AF4 2 jumps?
0x4e: 1, # 0x7CBA ?
0x50: 1, # 0x7CCC ?
0x52: 0, # 0x7CE0 ? call 7c2c and read bytes until 0xff
0x54: 0, # 0x7CEA ? call 7c41 and read bytes until 0xff
0x56: 0, # 0x7CF4 ? +3, call 7c58 and read bytes until 0xff
0x58: 3, # 0x7D09 Jump?
0x5a: 3, # 0x7D27 Operator jump?
0x5c: 3, # 0x7D44 Jump?
0x5e: 3, # 0x7D61 Jump?
0x60: 4, # 0x7D7D Jump?
0x62: 1, # 0x7D9D ?
0x64: 1, # 0x7DC9 ?
0x66: 3, # 0x7E16 Jump?
0x68: 3, # 0x7E37 Jump?
0x6a: 5, # 0x7E58 Jump?
0x6c: 8, # 0x7E86 Jump?
0x6e: 3, # 0x7EBE Jump?
0x70: 1, # 0x7EDC ?
0x72: 2, # 0x7F01 ?
0x74: 2, # 0x7F24 ?
0x76: 2, # 0x7F2E ?
0x78: 2, # 0x7F3E ?
0x7a: 0, # 0x7F4E ? +6, call 7c58 and read bytes until 0xff
0x7c: 0, # 0x7FBA ? +3, call 7c2c and read bytes until 0xff
0x7e: 0, # 0x7FEE ? +2, call 7c41 and read bytes until 0xff
0x80: 0, # 0x8018 ? +2, call 7c41 and read bytes until 0xff
0x82: 1, # 0x803D ?
0x84: 1, # 0x8052 ?
0x86: 2, # 0x8067 ?
0x88: 2, # 0x807D ?
0x8a: 2, # 0x8093 ?
0x8c: 2, # 0x80A9 ?
0x8e: 0, # 0x80BF ?
0x90: 3, # 0x80DA Jump?
0x92: 4, # 0x80F8 Jump?
0x94: 4, # 0x8113 Jump?
0x96: 4, # 0x812E Jump?
0x98: 2, # 0x8149 Jump?
0x9a: 6, # 0x8162 ?
0x9c: 2, # 0x818D ?
0x9e: 4, # 0x81A0 ?
0xa0: 0, # 0x81B7 ?
0xa2: 4, # 0x81C6 ?
0xa4: 0, # 0x81DD ?
0xa6: 0, # 0x81EC ?
0xa8: 5, # 0x81FB ?
0xaa: 0, # 0x8219 ?
0xac: 0, # 0x8226 ?
# The ones below are not actually used in any script
0xae: 1, # 0x8233 ?
0xb0: 1, # 0x8243 ?
0xb2: 2, # 0x8253 ?
0xb4: 1, # 0x8261 ?
0xb6: 2, # 0x8273 ?
0xb8: 2, # 0x8281 ?
0xff: 1, # END, 0xffff
}
# Repeat opcodes
repopcodes = [0x52, 0x54, 0x56, 0x7a, 0x7c, 0x7e, 0x80]
# Pointer opcodes with list of pointer offsets after the opcode
ptropcodes = {
0x24: [5],
0x36: [2],
0x40: [(4, 2)],
0x44: [(0, 2)],
0x48: [0],
0x4a: [0],
0x4c: [0, 2],
0x56: [0],
0x58: [0],
0x5a: [0],
0x5c: [0],
0x5e: [0],
0x60: [0],
0x66: [0],
0x68: [0],
0x6a: [0],
0x6c: [0],
0x6e: [0],
0x7a: [0],
0x90: [0],
0x92: [0],
0x94: [0],
0x96: [0],
0x98: [0],
}
# A list of hardcoded script pointers found in the binary file
# Format: (offset, ptr value, bank value)
# Mostly "mov word ptr ds:0x1697, value" opcodes
# and "mov ax, value / mov word ptr ds:0x149f, ax"
binptrs = [
(0x2da3+1, 0x788f, 0xf2),
(0x2dc1+1, 0x788f, 0xf2),
(0x30b7+1, 0x8e2f, 0xf2),
(0x30c1+1, 0x8f5d, 0xf2),
(0x441b+1, 0xa8d0, 0xf2),
(0x445a+1, 0xa99d, 0xf2),
(0x4499+1, 0xaa60, 0xf2),
(0x44d2+1, 0xaad7, 0xf2),
(0x44e4+1, 0xabaa, 0xf2),
(0x45ac+1, 0x938c, 0xf2),
(0x4646+1, 0x9031, 0xf2),
(0x46b1+1, 0x9296, 0xf2),
(0x46eb+1, 0x92e2, 0xf2),
(0x4703+1, 0x934b, 0xf2),
(0x47ae+1, 0xb4ba, 0xf2),
(0x4860+1, 0x93f8, 0xf2),
(0x5a73+4, 0xa69f, 0xf2),
(0x5a99+4, 0xa6f5, 0xf2),
(0x5abf+4, 0xa88e, 0xf2),
(0x8d29+1, 0x0f34, 0xf1),
(0xf029+4, 0x94cd, 0xf2),
(0xf065+4, 0x94d6, 0xf2),
(0xf12a+4, 0x970f, 0xf2),
(0xf1de+4, 0x981a, 0xf2),
(0xf292+4, 0x99b5, 0xf2),
(0xf32c+4, 0x9b7a, 0xf2),
(0xf3fc+4, 0x9dec, 0xf2),
(0xf41b+4, 0xa069, 0xf2),
(0xf443+4, 0xa08a, 0xf2),
(0xf461+4, 0xa0f4, 0xf2),
(0xf48c+4, 0xa140, 0xf2),
(0xf4bc+4, 0xa19c, 0xf2),
(0xf4ec+4, 0xa24e, 0xf2),
(0xf520+4, 0xa2de, 0xf2),
(0xf550+4, 0xa3c0, 0xf2),
(0xf56f+4, 0xa2fc, 0xf2),
(0xf599+4, 0xa30b, 0xf2),
(0xf5b5+4, 0xa411, 0xf2),
(0xf630+4, 0xa46d, 0xf2),
(0xf673+4, 0xa625, 0xf2),
(0xf6a2+4, 0xa66e, 0xf2),
(0xf72e+4, 0xa7fa, 0xf2),
(0xf746+4, 0xa853, 0xf2),
]
def getFontData(data):
fontconfig = data + "fontconfig.txt"
with codecs.open(data + "table_input.txt", "r", "utf-8") as tablef:
table = common.getSection(tablef, "")
invtable = {}
for c in table.keys():
invtable[table[c][0]] = c
with codecs.open(data + "table.txt", "r", "utf-8") as tablef:
ccodes = common.getSection(tablef, "")
glyphs = readFontGlyphs(fontconfig)
return table, invtable, ccodes, glyphs
def convertChar(b1, b2, table):
char = ""
if b1 < 0x10:
char += "0"
char += format(b1, "x")
if b2 < 0x10:
char += "0"
char += format(b2, "x")
if char in table:
charenc = table[char][0]
if charenc == "":
return "UNK(" + char + ")", False
elif charenc == "!":
return "", False
else:
return charenc, False
return "<" + char + ">", True
def checkStringStart(f, table):
b2check = f.readByte()
b1check = f.readByte()
b2check2 = f.readByte()
b1check2 = f.readByte()
f.seek(-4, 1)
charcheck, charunk = convertChar(b1check, b2check, table)
if charunk and charcheck in allcodes:
charunk = False
charcheck2, charunk2 = convertChar(b1check2, b2check2, table)
if charunk2 and (charcheck2 in allcodes or charcheck == "<ff06>"):
charunk2 = False
if charcheck != "" and charcheck2 != "" and charcheck != "_" and charcheck2 != "_" and not charunk and not charunk2:
return True
return False
def readString(f, table, binline=False, processed=None):
readingstr = ""
while True:
b2 = f.readByte()
if processed is not None and b2 >= 0x20 and b2 != 0x40 and b2 != 0x80 and b2 < 0xc0 and b2 in processed:
readingstr += processed[b2]
continue
b1 = f.readByte()
char, _ = convertChar(b1, b2, table)
if char == "<ffff>":
break
elif char == "<ff06>":
readingstr += "<ch:" + str(f.readUShort()) + ">"
elif char == "<ff08>":
readingstr += "<sp:" + str(f.readUShort()) + ">"
elif char == "<ff0a>":
readingstr += "<wt:" + str(f.readUShort()) + ">"
elif char == "<ff0c>":
readingstr += "<unk1>"
elif char == "<ff0e>":
readingstr += "<name>"
elif char == "<ff10>":
readingstr += "<item>"
elif char == "<ff12>":
readingstr += "<angl>"
elif char == "<ff14>":
readingstr += "<perc>"
else:
readingstr += char
return readingstr
codes = {"<ch:": 0xff06, "<sp:": 0xff08, "<wt:": 0xff0a}
singlecodes = {"unk1": 0xff0c, "name": 0xff0e, "item": 0xff10, "angl": 0xff12, "perc": 0xff14}
allcodes = ["<ff06>", "<ff08>", "<ff0a>", "<ff0c>", "<ff0e>", "<ff10>", "<ff12>", "<ff14>"]
def writeString(f, s, table, ccodes, maxlen=0, usebigrams=False):
s = s.replace("~", "〜")
x = i = 0
while x < len(s):
c = s[x]
if c == "<" and x < len(s) - 4 and s[x:x+4] in codes:
number = int(s[x+4:].split(">", 1)[0])
f.writeUShort(codes[s[x:x+4]])
f.writeUShort(number)
x += 4 + len(str(number))
i += 4
elif c == "<":
code = s[x+1:].split(">", 1)[0]
if code in singlecodes:
f.writeUShort(singlecodes[code])
else:
f.writeUShort(int(code, 16))
x += 1 + len(code)
i += 2
elif c == "U" and x < len(s) - 4 and s[x:x+4] == "UNK(":
code = s[x+6] + s[x+7]
f.writeByte(int(code, 16))
code = s[x+4] + s[x+5]
f.writeByte(int(code, 16))
x += 8
i += 2
elif c == ">" and s[x+1] == ">":
f.writeUShort(0xff00)
f.writeUShort(0xff04)
x += 1
i += 4
elif c == "|":
i += 2
f.writeUShort(0xff02)
elif c in ccodes or ord(c) < 256:
i += 1
if c not in ccodes:
common.logError("Character not found:", c, "in string", s)
c = " "
if x < len(s) - 1 and c + s[x+1] in ccodes:
f.writeByte(int(ccodes[c + s[x+1]][0], 16))
x += 1
else:
f.writeByte(int(ccodes[c][0], 16))
else:
if c in table:
f.writeUShort(int(table[c], 16))
else:
f.writeUShort(0)
i += 2
x += 1
if maxlen > 0 and i > maxlen:
common.logError("Line too long", str(i) + "/" + str(len(s) - x) + "/" + str(maxlen), s)
i = -1
break
return i
def detectTextCode(s, i=0):
if s[i] == "<":
return len(s[i:].split(">", 1)[0]) + 1
elif s[i] == "U" and i < len(s) - 4 and s[i:i+4] == "UNK(":
return len(s[i:].split(")", 1)[0]) + 1
elif s[i] == "C" and i < len(s) - 4 and s[i:i+4] == "CUS(":
return len(s[i:].split(")", 1)[0]) + 1
return 0
def readFontGlyphs(file):
glyphs = {}
with codecs.open(file, "r", "utf-8") as f:
fontconfig = common.getSection(f, "")
for c in fontconfig:
charlen = 0 if fontconfig[c][0] == "" else int(fontconfig[c][0])
glyphs[c] = common.FontGlyph(0, charlen, charlen + 1)
return glyphs
def getBerserkMap(outfolder):
map = ws.TileMap()
map.name = outfolder + "bank_09_03.png"
map.width = 4
map.height = 4
map.bpp = 2
map.map = [
ws.TileData(0), ws.TileData(1), ws.TileData(2), ws.TileData(0, hflip=True),
ws.TileData(3), ws.TileData(4), ws.TileData(5), ws.TileData(3, hflip=True),
ws.TileData(3), ws.TileData(6), ws.TileData(7), ws.TileData(3, hflip=True),
ws.TileData(3), ws.TileData(8), ws.TileData(9), ws.TileData(3, hflip=True),
]
return map
def getRamenMap(outfolder):
map = ws.TileMap()
map.name = outfolder + "bank_03_01.png"
map.width = 18
map.height = 2
map.bpp = 2
map.map = [
ws.TileData(38), ws.TileData(39), ws.TileData(40), ws.TileData(41), ws.TileData(42), ws.TileData(43), ws.TileData(40), ws.TileData(40, hflip=True), ws.TileData(40), ws.TileData(40, hflip=True),
ws.TileData(44), ws.TileData(45), ws.TileData(40), ws.TileData(46), ws.TileData(47), ws.TileData(17), ws.TileData(18), ws.TileData(19),
ws.TileData(52), ws.TileData(53), ws.TileData(54), ws.TileData(55), ws.TileData(56), ws.TileData(57), ws.TileData(58), ws.TileData(59), ws.TileData(60), ws.TileData(61),
ws.TileData(62), ws.TileData(63), ws.TileData(64), ws.TileData(65), ws.TileData(66), ws.TileData(52), ws.TileData(53), ws.TileData(54),
]
return map
def getLanternMap(outfolder):
map = ws.TileMap()
map.name = outfolder + "bank_03_02.png"
map.width = 2
map.height = 3
map.bpp = 2
map.map = [
ws.TileData(27), ws.TileData(28),
ws.TileData(48), ws.TileData(49),
ws.TileData(67), ws.TileData(68),
]
return map
| Illidanz/ShitoTranslation | game.py | game.py | py | 13,959 | python | en | code | 3 | github-code | 13 |
8764641611 | soma = 0
maioridade = 0
maisvelho = ' '
mulher = 0
for c in range (1,3):
print(f'===== PESSOA N°{c} =====')
nome = input('Nome: ')
sexo = input('Sexo (M/F): ').upper().strip()
idade = int(input('Idade: '))
soma = idade + soma
print(' ')
if c == 1 and sexo == 'M':
maioridade = idade
maisvelho = nome
if sexo == 'M' and idade > maioridade:
maioridade = idade
mairvelho = nome
if sexo == 'F' and idade < 20:
mulher = mulher + 1
print(f'A média de idade do grupo é {soma/4}')
print('O homem mais velho tem {} e se chama {}'.format(maioridade
,maisvelho))
print('Existem {} mulheres com menos de 20 anos na lista'.format(mulher))
| luanalbis/python | Exercícios CEV/EX056.py | EX056.py | py | 679 | python | pt | code | 0 | github-code | 13 |
13897231359 | import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
from scipy.stats import norm
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn import linear_model
x = np.arange(0, 1, 0.002)
y = norm.rvs(0, size=500, scale=0.1)
y = y + x**2
def rmse(y_test, y):
return sp.sqrt(sp.mean((y_test - y) ** 2))
def R2(y_test, y_true):
return 1 - ((y_test - y_true)**2).sum() / ((y_true - y_true.mean())**2).sum()
def R22(y_test, y_true):
y_mean = np.array(y_true)
y_mean[:] = y_mean.mean()
return 1 - rmse(y_test, y_true) / rmse(y_mean, y_true)
plt.scatter(x, y, s=5)
degree = [1,2,100]
y_test = []
y_test = np.array(y_test)
for d in degree:
clf = Pipeline([('poly', PolynomialFeatures(degree=d))
('linear', LinearRegression(fit_intercept=False))])
clf.fit(x[:, np.newaxis], y)
y_test = clf.predict(x[:, np.newaxis])
print(clf.named_steps['linear'].coef_)
print('rmse=%.2f, R2=%.2f, R22=%.2f, clf.score=%.2f' %
(rmse(y_test, y),
R2(y_test, y),
R22(y_test, y),
clf.score(x[:, np.newaxis], y)))
plt.plot(x, y_test, linewidth=2)
plt.grid()
plt.legend(['1','2','100'], loc='upper left')
plt.show()
| FangYikaii/MachineLearning_Python | Logistic Regression/LinerRegression.py | LinerRegression.py | py | 1,344 | python | en | code | 2 | github-code | 13 |
9821493998 | # coding:utf8
"""
@author: Zhangao Lu
@contact: zlu2@laurentian.ca
@time: 2021/10/10
@description:
Display the picture with bounding box
"""
import cv2
import pandas as pd
from matplotlib import pyplot as plt
BOX_COLOR = (255, 0, 0) # Red
TEXT_COLOR = (255, 255, 255) # White
def visualize_bbox(img, bbox, class_name, color=BOX_COLOR, thickness=2):
"""Visualizes a single bounding box on the image"""
x_min, y_min, w, h = bbox
x_min, x_max, y_min, y_max = int(x_min), int(x_min + w), int(y_min), int(y_min + h)
cv2.rectangle(img, (x_min, y_min), (x_max, y_max), color=color, thickness=thickness)
((text_width, text_height), _) = cv2.getTextSize(class_name, cv2.FONT_HERSHEY_SIMPLEX, 0.35, 1)
cv2.rectangle(img, (x_min, y_min - int(1.3 * text_height)), (x_min + text_width, y_min), BOX_COLOR, -1)
cv2.putText(
img,
text=class_name,
org=(x_min, y_min - int(0.3 * text_height)),
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=0.35,
color=TEXT_COLOR,
lineType=cv2.LINE_AA,
)
return img
def visualize(image, bboxes, category_ids, category_id_to_name):
"""
:param image:
:param bboxes: list
e.g. [[586.23, 324.18, 16.15, 38.93]]
:param category_ids: list
e.g. [44]
:param category_id_to_name: dict
e.g. {1: 'person', 2: 'bicycle', 3: 'car', ...}
:return:
"""
print(bboxes, category_ids, category_id_to_name)
img = image.copy()
for bbox, category_id in zip(bboxes, category_ids):
class_name = category_id_to_name[category_id]
img = visualize_bbox(img, bbox, class_name)
plt.figure(figsize=(12, 12))
plt.axis('off')
plt.imshow(img)
plt.show()
if __name__ == '__main__':
import os
from control.parse_annotations import yolo_to_coco
image_sample_paths = {
"val2017": "../raw_data/coco2017/images/sample_val2017/",
"train2017": "../raw_data/coco2017/images/sample_train2017/",
# "test2017": "../raw_data/coco2017/images/sample_test2017/"
}
label_paths = {
"val2017": "../raw_data/coco2017/labels/val2017/",
"train2017": "../raw_data/coco2017/labels/train2017/",
# "test2017": "../raw_data/coco2017/labels/test2017/",
"coco128": "../datasets/coco128/labels/train2017/"
}
label_sample_paths = {
"val2017": "../raw_data/coco2017/labels/sample_val2017/",
"train2017": "../raw_data/coco2017/labels/sample_train2017/",
# "test2017": "../raw_data/coco2017/labels/sample_test2017/",
}
key = "train2017"
# key = "val2017"
image_path = image_sample_paths[key]
label_path = label_sample_paths[key]
# names: ["person", "car", "chair", "book", "bottle"] # class names
category_id_to_name = {0: 'person', 1: 'car', 2: 'chair', 3: 'book', 4: 'bottle'}
for filename in os.listdir(label_path):
"""
The content of the text file:
2 0.670773 0.262381 0.055859 0.101052
2 0.94168 0.212268 0.034734 0.090887
0 0.673516 0.596629 0.553469 0.775278
0 0.476828 0.292237 0.352094 0.574784
"""
df = pd.read_csv(label_path + filename, header=None, sep=" ")
df.columns = ["category_id", "x_center", "y_center", "width", "height"]
# print(df)
category_ids = df["category_id"].tolist()
image_file_name = filename[: len(filename)-3] + "jpg"
current_image = cv2.imread(image_path + image_file_name)
df["bbox"] = df.apply(lambda x: yolo_to_coco([x["x_center"], x["y_center"], x["width"], x["height"]], current_image.shape[1], current_image.shape[0]), axis=1)
bboxes = df["bbox"].tolist()
visualize(current_image, bboxes, category_ids, category_id_to_name)
| luzhangao/wow-object-detection | control/check_bounding_box.py | check_bounding_box.py | py | 3,796 | python | en | code | 0 | github-code | 13 |
29660271006 |
import hashlib
from django.core.cache import cache
from .constants import CACHE_TIMEOUT_IN_SECS
def store_response_in_cache(func):
"""
Decorator to store in cache response from inner function.
"""
def wrapper_func(*args, **kwargs):
cache_key = '{sha1_key}'.format(
sha1_key=hashlib.sha1(
'{function_name}-{lat}-{lng}'.format(
function_name=func.__name__,
lat=args[1]['lat'],
lng=args[1]['lng']
).encode('utf-8')).hexdigest())
if CACHE_TIMEOUT_IN_SECS != 0 and cache.get(cache_key):
return cache.get(cache_key)
else:
response = func(*args, **kwargs)
if CACHE_TIMEOUT_IN_SECS != 0 and response.status_code == 200:
cache.set(cache_key, response, CACHE_TIMEOUT_IN_SECS)
return response
return wrapper_func
| junior92jr/location-advisor-backend | recommendations/decorators.py | decorators.py | py | 949 | python | en | code | 0 | github-code | 13 |
13014004832 | import collections
import logging
import math
import itertools
import graphviz
import torch
import numpy as np
import mfst
from mathtools import utils
from . import semirings
logger = logging.getLogger(__name__)
class FST(mfst.FST):
EXPECTATION_USES_FB = False
def __init__(
self, *args,
display_dir='LR', float_precision=None, expectation_uses_fb=None,
**kwargs):
self.display_dir = display_dir
self.float_precision = float_precision
super().__init__(*args, **kwargs)
if expectation_uses_fb is None:
self.expectation_uses_fb = FST.EXPECTATION_USES_FB
if expectation_uses_fb:
self._sum_paths = self.sum_paths
self.sum_paths = self.sum_paths_with_fb
if self._string_mapper is not None:
self.__str__ = self.prettyPrint
def lift(self, semiring=None, converter=None):
""" Wrap mfst.FST.lift so it returns an FST of the same type as `self`. """
lifted = super().lift(semiring=semiring, converter=converter)
params = dict(
_fst=lifted._fst,
semiring_class=lifted._semiring_class,
acceptor=lifted._acceptor,
string_mapper=lifted._string_mapper
)
return type(self)(**params)
def sum_paths_fb(self):
""" Use forward-backward to find the pathsum of an FST in the expectation semiring. """
assert self.semiring is semirings.ExpectationSemiringWeight
self_real = self.lift(
semirings.ExpectationSemiringWeight,
converter=lambda w: w.dropvalue()
)
alpha = self_real.shortest_distance()
beta = self_real.shortest_distance(reverse=True)
Z = beta[self_real.initial_state]
total = semirings.ExpectationSemiringWeight(0)
for s in self.states:
# if s is final, then get_arcs will yield an arc to state -1 with the final-state weight
for e in self.get_arcs(s):
multiplier = alpha[s] * (
beta[e.nextstate] if e.nextstate >= 0
else semirings.ExpectationSemiringWeight.one
)
# avoid multiplying the big `e.weight` by alpha and beta separately
total += multiplier * e.weight
# The second element of total will now be correct, but we need to replace
# the first element with Z. Here's a slightly hacky approach that remains
# safe (even if total and Z are encoded with different multipliers).
return total + (Z.dropvalue() - total.dropvalue())
def sum_paths_with_fb(self, *args, **kwargs):
if self.expectation_uses_fb and self.semiring is semirings.ExpectationSemiringWeight:
return self.sum_paths_fb(*args, **kwargs)
else:
return self._sum_paths(*args, **kwargs)
def determinize_with_merging(self, *args, **kwargs):
assert self.semiring is semirings.ExpectationSemiringWeight
# temporarily modifies behavior of `quantize` on expectation semirings
# (WARNING: not thread-safe)
semirings.ExpectationSemiringWeight.aggressive_quantization = True
# must push first for aggressive quantization to be correct
result = self.push().determinize(*args, **kwargs)
semirings.ExpectationSemiringWeight.aggressive_quantization = False
return result
def create_from_observable(self, oo, alphabet, wildcard='?'):
"""
Return an FSA that accepts all sequences compatible with `oo`. The `wildcard`
symbol in `oo` is allowed to match any element of `alphabet`. The FSA
uses the same semiring and other parameters as `self`.
"""
fsa = self.constructor(acceptor=True)
start_idx = fsa.add_state()
fsa.initial_state = start_idx
prev_state_idx = start_idx
for o in oo:
state_idx = fsa.add_state()
if o == wildcard:
for x in alphabet:
fsa.add_arc(prev_state_idx, state_idx, input_label=x)
else:
fsa.add_arc(prev_state_idx, state_idx, input_label=o)
prev_state_idx = state_idx
end_idx = state_idx
fsa.set_final_weight(end_idx)
return fsa
def toGraphviz(self, action_dict=None, state_dict=None):
""" When returned from an ipython cell, this will generate the FST visualization """
fst = graphviz.Digraph("finite state machine", filename="fsm.gv")
fst.attr(rankdir=self.display_dir)
# here we are actually going to read the states from the FST and generate nodes for them
# in the output source code
zero = self._make_weight('__FST_ZERO__')
one = self._make_weight('__FST_ONE__')
initial_state = self.initial_state
for sid in range(self.num_states):
finalW = ''
is_final = False
ww = self._fst.FinalWeight(sid)
# look at the raw returned value to see if it is zero (unset)
if ww is not None and (not isinstance(ww, str) or '__FST_ONE__' == ww):
ww = self._make_weight(ww)
if zero != ww:
is_final = True
if not (one == ww and sid != initial_state):
if isinstance(ww, semirings.BooleanSemiringWeight):
weight_str = f"{ww}"
else:
weight_str = f'{ww:.2f}'
finalW = f'\n({weight_str})'
label = f'{sid}{finalW}'
if is_final:
fst.node(str(sid), label=label, shape='doublecircle')
if self._string_mapper:
if self._string_mapper is chr:
def make_label(x):
if x == 32:
return '(spc)'
elif x < 32:
return str(x)
else:
return chr(x)
else:
def make_label(x):
return str(self._string_mapper(x))
else:
make_label = str
fst.attr('node', shape='circle')
for sid in range(self.num_states):
to = collections.defaultdict(list)
for arc in self.get_arcs(sid):
if arc.nextstate == -1:
continue
# Make the arc label
label = ''
if arc.input_label == 0:
label += '\u03B5' # epsilon
else:
label += make_label(arc.input_label)
if arc.input_label != arc.output_label:
label += ':'
if arc.output_label == 0:
label += '\u03B5'
else:
label += make_label(arc.output_label)
if one != arc.weight:
if isinstance(arc.weight, semirings.BooleanSemiringWeight):
weight_str = f"/{arc.weight}"
else:
weight_str = f'/{arc.weight:.2f}'
label += weight_str
to[arc.nextstate].append(label)
# Draw the arc
for dest, values in to.items():
if len(values) > 3:
values = values[0:2] + ['. . .']
label = '\n'.join(values)
fst.edge(str(sid), str(dest), label=label)
if initial_state >= 0:
# mark the start state
fst.node('', shape='point')
fst.edge('', str(initial_state))
return fst
def prettyPrint(self):
for state in self.states:
for arc in self.get_arcs(state):
in_label = self._string_mapper(arc.input_label)
out_label = self._string_mapper(arc.output_label)
print(f"{state} -> {arc.nextstate} {in_label} : {out_label} / {arc.weight}")
def _repr_html_(self):
if self.num_states == 0:
return '<code>Empty FST</code>'
# if the machine is too big, do not attempt to make ipython display it
# otherwise it ends up crashing and stuff...
if self.num_states > 1000:
string = (
f'FST too large to draw graphic, use fst.full_str()<br />'
f'<code>FST(num_states={self.num_states})</code>'
)
return string
fst = self.toGraphviz()
return fst._repr_svg_()
class FstIntegerizer(object):
def __init__(self, iterable=[], prepend_epsilon=True):
# OpenFST hardcodes zero to represent epsilon transitions---so make sure
# our integerizer is consistent with that.
if prepend_epsilon:
iterable = type(iterable)(['epsilon']) + iterable
super().__init__(iterable)
def updateFromSequences(self, sequences):
self.update(itertools.chain(*sequences))
def integerizeSequence(self, sequence):
return tuple(self.index(x) for x in sequence)
def deintegerizeSequence(self, index_sequence):
return tuple(self[i] for i in index_sequence)
class HashableFstIntegerizer(FstIntegerizer, utils.Integerizer):
pass
class UnhashableFstIntegerizer(FstIntegerizer, utils.UnhashableIntegerizer):
pass
class FstScorer(object):
""" Modify arbitrary data scores using a sequence model implemented as an FST.
You can use this mixin class to easily make a BiLSTM-CRF, for example.
Attributes
----------
"""
def __init__(
self, *super_args,
transition_weights=None, initial_weights=None, final_weights=None,
train_label_seqs=None, integerizer=None, vocabulary=None, device=None,
semiring=None, requires_grad=False,
forward_uses_max=False, equivalent_symbols=None,
smooth_args={}, **super_kwargs):
"""
Parameters
----------
"""
if transition_weights is None and train_label_seqs is not None:
if vocabulary is None:
vocabulary = tuple(set(itertools.chain(*train_label_seqs)))
if integerizer is None:
integerizer = HashableFstIntegerizer(vocabulary, prepend_epsilon=True)
integerizer.updateFromSequences(train_label_seqs)
integerized_label_seqs = tuple(map(integerizer.integerizeSequence, train_label_seqs))
if equivalent_symbols is not None:
def map_key(key):
if key in equivalent_symbols:
return equivalent_symbols[key]
return key
integerizer._objects = [map_key(k) for k in integerizer._objects]
transition_probs, initial_probs, final_probs = smoothCounts(
*countSeqs(integerized_label_seqs), **smooth_args
)
transition_weights = transition_probs.log()
initial_weights = initial_probs.log()
final_weights = final_probs.log()
super().__init__(*super_args, dim_out=len(vocabulary), **super_kwargs)
if device is None:
device = transition_weights.device
if semiring is None:
semiring = semirings.LogSemiringWeight
if final_weights is None:
final_weights = torch.full(transition_weights.shape[0:1], semiring.one.value)
if initial_weights is None:
initial_weights = torch.full(transition_weights.shape[0:1], semiring.one.value)
if vocabulary is None:
vocabulary = tuple(range(transition_weights.shape[0]))
if integerizer is None:
integerizer = HashableFstIntegerizer(vocabulary)
self.vocabulary = vocabulary
self.vocabulary_idxs = {n: i for i, n in enumerate(vocabulary)}
self.integerizer = integerizer
self.semiring = semiring
self.forward_uses_max = forward_uses_max
self.device = device
# Register semiring zero and one elements so they will be on the same
# device as the model's params
self._semiring_zero = torch.nn.Parameter(
self.semiring.zero.value.to(device),
requires_grad=False
)
self._semiring_one = torch.nn.Parameter(
self.semiring.one.value.to(device),
requires_grad=False
)
self.semiring.zero = self.semiring(self._semiring_zero)
self.semiring.one = self.semiring(self._semiring_one)
self.transition_weights = torch.nn.Parameter(
transition_weights.to(device), requires_grad=requires_grad
)
self.initial_weights = torch.nn.Parameter(
initial_weights.to(device), requires_grad=requires_grad
)
self.final_weights = torch.nn.Parameter(
final_weights.to(device), requires_grad=requires_grad
)
self.label_scorer = fromTransitions(
self.transition_weights, self.initial_weights, self.final_weights, self.integerizer,
index_names=self.integerizer, semiring=self.semiring
)
self._label_scorer_tropical_sr = None
def mapper(self, obj, add=False):
index = self.integerizer.index(obj, add=add)
if index is None:
raise KeyError()
return index
@property
def label_scorer_tropical_sr(self):
if self._label_scorer_tropical_sr is None:
self._label_scorer_tropical_sr = self.label_scorer.lift(
semiring=semirings.TropicalSemiringWeight, converter=lambda x: -(x.value)
)
return self._label_scorer_tropical_sr
def forward(self, batch, use_tropical_semiring=None):
if use_tropical_semiring is None:
use_tropical_semiring = self.forward_uses_max
# Predict all data scores in batch mode
batch_data_scores = super().forward(batch)
if use_tropical_semiring:
label_scorer = self.label_scorer_tropical_sr
semiring = semirings.TropicalSemiringWeight
batch_data_scores = -batch_data_scores
else:
label_scorer = self.label_scorer
semiring = self.semiring
# Construct decode graphs sequence-by-sequence
decode_graphs = tuple(
fromArray(
data_scores, semiring=semiring,
input_labels=tuple(range(1, data_scores.shape[1] + 1)),
# output_labels=tuple(self.mapper(x) for x in self.vocabulary)
output_labels=self.integerizer.integerizeSequence(self.vocabulary),
string_mapper=self.integerizer.__getitem__
).compose(label_scorer)
for data_scores in batch_data_scores
)
return decode_graphs
def predict(self, decode_graphs):
pred_labels = tuple(argmax(decode_graph, count=1) for decode_graph in decode_graphs)
x = torch.stack(
tuple(
torch.tensor([self.vocabulary_idxs[s] for s in pred_seq])
for pred_seq in pred_labels
)
)
return x
class AbstractFstSequenceModel(object):
def __init__(self, *args, **kwargs):
self._process_fst = None
self._integerizer = None
self._initialize(*args, **kwargs)
def fit(
self, label_seqs, *feat_seqs, process_only=False, observation_only=False,
**super_kwargs):
if not process_only:
self._fitObservationModel(label_seqs, *feat_seqs)
label_seqs = self._preprocessLabelSeqs(label_seqs)
self._integerizer.updateFromSequences(label_seqs)
if not observation_only:
self._process_fst = self._fitProcessModel(label_seqs)
def score(self, *feat_seqs):
observation_scores = self._scoreObservations(*feat_seqs)
scores = observation_scores.compose(self._process_fst)
return scores
def predictSeq(self, *feat_seqs):
score_lattice = self.score(*feat_seqs)
shortest_path = score_lattice.shortest_path()
predictions = shortest_path.get_unique_output_string()
return predictions
@property
def num_states(self):
# FIXME
return None
def _initialize(self):
raise NotImplementedError
def _fitIntegerizer(self, *label_seqs):
raise NotImplementedError
def _fitObservationModel(self, *feat_seqs):
raise NotImplementedError
def _fitProcessModel(self, *feat_seqs):
raise NotImplementedError
def _scoreObservations(self, *feat_seqs):
raise NotImplementedError
# -=( LIBRARY FUNCTIONS )==-----------------------------------------------------
def fstNLLLoss(decode_graphs, label_seqs):
""" Compute structured negative-log-likelihood loss using FST methods.
NOTE: Right now this library handles mini-batches by iterating over each
item in the batch, so batch operations are not vectorized.
Parameters
----------
decode_graphs : iterable(fsm.FST)
label_seqs : torch.Tensor of int, shape (batch_size, num_samples)
Returns
-------
loss : float
"""
def generateLosses(decode_graphs, label_seqs):
for decode_graph, label_seq in zip(decode_graphs, label_seqs):
# FIXME: We need a more consistent way of mapping to/from the arc labels
gt_path_acceptor = FST(decode_graph.semiring).create_from_string(label_seq + 1)
gt_path_score = decode_graph.compose(gt_path_acceptor).sum_paths().value
total_path_score = decode_graph.sum_paths().value
# import pdb; pdb.set_trace()
if decode_graph.semiring is semirings.LogSemiringWeight:
yield -(gt_path_score - total_path_score)
elif decode_graph.semiring is semirings.RealSemiringWeight:
yield -(torch.log(gt_path_score) - torch.log(total_path_score))
elif decode_graph.semiring is semirings.TropicalSemiringWeight:
yield gt_path_score - total_path_score
else:
raise AssertionError(f"{decode_graph.semiring} is not supported by fstNLLLoss")
return torch.stack(tuple(generateLosses(decode_graphs, label_seqs)))
def toArray(fst, input_labels=None, output_labels=None, array_constructor=None):
""" Return an array representing an FST's edge weights.
This function is meant to be used to create an instance of pytorch-struct's
LinearChainCRF, which we can then backpropagate through during training.
Parameters
----------
fst : FST
input_labels : iterable(int or str), optional
If this argument is not provided, it is taken to be the input labels
in `FST`.
output_labels : iterable(int or str) , optional
If this argument is not provided, it is taken to be the output labels
in `FST`.
array_constructor : function, optional
Use this to decide the return type of the array. Returns ``dict`` by
default.
Returns
-------
weights : array_like or dict, shape (num_states, num_input_labels, num_output_labels)
The FST's edge weights, arranged as an array.
semiring : semirings.AbstractSemiringWeight
The FST's semiring.
"""
weights_dict, _input_labels, _output_labels = traverse(fst)
if array_constructor is None:
def array_constructor(*args):
return {}
if input_labels is None:
input_labels = list(_input_labels)
if output_labels is None and not fst._acceptor:
output_labels = list(_output_labels)
if fst._acceptor:
weights = array_constructor((fst.num_states, len(input_labels)), fst.semiring_zero.value)
for (state_index, input_label), weight in weights_dict.items():
input_index = input_labels.index(input_label)
weights[state_index, input_index] = weight.value
else:
weights = array_constructor(
(fst.num_states, len(input_labels), len(output_labels)),
fst.semiring_zero.value
)
for (state_index, input_label, output_label), weight in weights_dict.items():
input_index = input_labels.index(input_label)
output_index = output_labels.index(output_label)
weights[state_index, input_index, output_index] = weight.value
return weights, fst.semiring, input_labels, output_labels
def fromArray(
weights, final_weight=None, semiring=None, string_mapper=None,
input_labels=None, output_labels=None):
""" Instantiate a state machine from an array of weights.
TODO: Right now this only takes input arrays that create linear-chain state
machines, but it can be extended to create arbitrary arrays by taking an
input with shape (num_states, num_input_labels, num_output_labels).
Parameters
----------
weights : array_like, shape (num_inputs, num_outputs)
Needs to implement `.shape`, so it should be a numpy array or a torch
tensor.
final_weight : semirings.AbstractSemiringWeight, optional
Should have the same type as `semiring`. Default is `semiring.zero`
semiring : semirings.AbstractSemiringWeight, optional
Default is `semirings.BooleanSemiringWeight`.
string_mapper : function, optional
is_linear_chain : bool, optional
Returns
-------
fst : fsm.FST
The transducer's arcs have input labels corresponding to the state
they left, and output labels corresponding to the state they entered.
"""
if semiring is None:
semiring = semirings.LogSemiringWeight
if final_weight is None:
final_weight = semiring.one
if len(weights.shape) == 3:
is_acceptor = False
elif len(weights.shape) == 2:
is_acceptor = True
else:
raise AssertionError(f"weights have unrecognized shape {weights.shape}")
if input_labels is None:
input_labels = tuple(str(i) for i in range(weights.shape[1]))
if output_labels is None and not is_acceptor:
output_labels = tuple(str(i) for i in range(weights.shape[2]))
fst = FST(semiring, string_mapper=string_mapper, acceptor=is_acceptor)
init_state = fst.add_state()
fst.set_initial_state(init_state)
if is_acceptor:
prev_state = init_state
for sample_index, row in enumerate(weights):
cur_state = fst.add_state()
for i, weight in enumerate(row):
if semiring(weight) != semiring.zero:
fst.add_arc(
prev_state, cur_state, input_label=input_labels[i],
weight=weight
)
prev_state = cur_state
fst.set_final_weight(cur_state, final_weight)
else:
prev_state = init_state
for sample_index, input_output in enumerate(weights):
cur_state = fst.add_state()
for i, outputs in enumerate(input_output):
for j, weight in enumerate(outputs):
if semiring(weight) != semiring.zero:
fst.add_arc(
prev_state, cur_state,
input_label=input_labels[i], output_label=output_labels[j],
weight=weight
)
prev_state = cur_state
fst.set_final_weight(cur_state, final_weight)
fst.display_dir = 'LR'
return fst
def fromTransitions(
transition_weights, init_weights, final_weights, integerizer,
index_names=None, semiring=None, as_dict=False):
""" Instantiate a state machine from state transitions.
Parameters
----------
Returns
-------
"""
if semiring is None:
semiring = semirings.LogSemiringWeight
if as_dict:
transitions = transition_weights.keys()
final_states = final_weights.keys()
init_states = init_weights.keys()
else:
transitions = (transition_weights != semiring.zero.value).nonzero().tolist()
final_states = (final_weights != semiring.zero.value).nonzero().squeeze(1).tolist()
init_states = (init_weights != semiring.zero.value).nonzero().squeeze(1).tolist()
fst = FST(semiring, string_mapper=integerizer.__getitem__)
init_state = fst.add_state()
fst.set_initial_state(init_state)
fst_states = {}
for (prev, cur) in transitions:
weight = transition_weights[prev, cur]
prev_state = fst_states.get(prev, None)
if prev_state is None:
prev_state = fst.add_state()
fst_states[prev] = prev_state
cur_state = fst_states.get(cur, None)
if cur_state is None:
cur_state = fst.add_state()
fst_states[cur] = cur_state
if index_names is not None:
prev = integerizer.index(index_names[prev])
cur = integerizer.index(index_names[cur])
fst.add_arc(
prev_state, cur_state,
input_label=prev, output_label=cur,
weight=weight
)
for state in init_states:
weight = init_weights[state]
state_idx = fst_states[state]
fst.add_arc(
init_state, state_idx,
input_label=0, output_label=0, weight=weight
)
for state in final_states:
weight = final_weights[state]
state_idx = fst_states[state]
fst.set_final_weight(state_idx, weight)
return fst
def align(scores, label_seq):
""" Align (ie segment) a sequence of scores, given a known label sequence.
NOTE: I don't know if this works with score tables that have more than 9
columns.
Parameters
----------
scores : array_like of float, shape (num_samples, num_labels)
Log probabilities (possibly un-normalized).
label_seq : iterable(string or int)
The segment-level label sequence.
Returns
-------
aligned_labels : tuple(int)
alignment_score : semirings.TropicalSemiringWeight
Score of the best path through the alignment graph (possible un-normalized)
"""
scores_fst = fromArray(-scores, semiring=semirings.TropicalSemiringWeight)
label_fst = leftToRightAcceptor(label_seq, semiring=semirings.TropicalSemiringWeight)
aligner = scores_fst.compose(label_fst)
best_path_lattice = aligner.shortest_path()
aligned_labels = best_path_lattice.get_unique_output_string()
if aligned_labels is not None:
aligned_labels = tuple(int(c) for c in aligned_labels)
# NOTE: this gives the negative log probability of the single best path,
# not that of all paths, because best_path_lattice uses the tropical semiring.
# In this case it's fine because we only have one path anyway. If we want
# to marginalize over the k-best paths in the future, we will need to lift
# best_path_lattice to the real or log semiring before calling sum_paths.
alignment_score = -(best_path_lattice.sum_paths().value)
return aligned_labels, alignment_score
def leftToRightAcceptor(input_seq, semiring=None, string_mapper=None):
""" Construct a left-to-right finite-state acceptor from an input sequence.
The input is usually a sequence of segment-level labels, and this machine
is used to align labels with sample-level scores.
Parameters
----------
input_seq : iterable(int or string), optional
semiring : semirings.AbstractSemiring, optional
Default is semirings.BooleanSemiringWeight.
string_mapper : function, optional
A function that takes an integer as input and returns a string as output.
Returns
-------
acceptor : fsm.FST
A linear-chain finite-state acceptor with `num_states` states. Each state
has one self-transition and one transition to its right neighbor. i.e.
the topology looks like this (self-loops are omitted in the diagram
below because they're hard to draw in ASCII style):
[START] s1 --> s2 --> s3 [END]
All edge weights are `semiring.one`.
"""
if semiring is None:
semiring = semirings.BooleanSemiringWeight
acceptor = FST(semiring, string_mapper=string_mapper, acceptor=True)
init_state = acceptor.add_state()
acceptor.set_initial_state(init_state)
prev_state = init_state
for token in input_seq:
cur_state = acceptor.add_state()
acceptor.add_arc(cur_state, cur_state, input_label=token, weight=semiring.one)
acceptor.add_arc(prev_state, cur_state, input_label=token, weight=semiring.one)
prev_state = cur_state
acceptor.set_final_weight(prev_state, semiring.one)
acceptor.display_dir = 'LR'
return acceptor
def sequenceFsa(seqs, integerizer=None):
if integerizer is None:
integerizer = HashableFstIntegerizer(tuple(itertools.chain(*seqs)))
seqs = tuple(map(integerizer.integerizeSequence, seqs))
sequence_acceptor = FST(
semirings.TropicalSemiringWeight, string_mapper=lambda i: str(integerizer[i])
).create_from_string(seqs[0])
for seq in seqs[1:]:
sequence_acceptor = sequence_acceptor.union(
FST(
semirings.TropicalSemiringWeight, string_mapper=lambda i: str(integerizer[i])
).create_from_string(seq)
)
sequence_acceptor = sequence_acceptor.determinize().minimize()
return sequence_acceptor
# -=( HELPER FUNCTIONS )==-----------------------------------------------------
def smoothCounts(
edge_counts, state_counts, init_states, final_states,
# empty_regularizer=0, zero_transition_regularizer=0,
init_regularizer=0, final_regularizer=0,
uniform_regularizer=0, diag_regularizer=0,
override_transitions=False, structure_only=False, as_numpy=False):
num_states = max(state_counts.keys()) + 1
bigram_counts = torch.zeros((num_states, num_states))
for (i, j), count in edge_counts.items():
bigram_counts[i, j] = count
unigram_counts = torch.zeros(num_states)
for i, count in state_counts.items():
unigram_counts[i] = count
initial_counts = torch.zeros(num_states)
for i, count in init_states.items():
initial_counts[i] = count
final_counts = torch.zeros(num_states)
for i, count in final_states.items():
final_counts[i] = count
# Regularize the heck out of these counts
initial_states = initial_counts.nonzero()[:, 0]
for i in initial_states:
bigram_counts[i, i] += init_regularizer
final_states = final_counts.nonzero()[:, 0]
for i in final_states:
bigram_counts[i, i] += final_regularizer
# bigram_counts[:, 0] += zero_transition_regularizer
# bigram_counts[0, :] += zero_transition_regularizer
bigram_counts += uniform_regularizer
diag_indices = np.diag_indices(bigram_counts.shape[0])
bigram_counts[diag_indices] += diag_regularizer
if override_transitions:
logger.info('Overriding bigram_counts with an array of all ones')
bigram_counts = torch.ones_like(bigram_counts)
if structure_only:
bigram_counts = (bigram_counts > 0).float()
initial_counts = (initial_counts > 0).float()
final_counts = (final_counts > 0).float()
denominator = bigram_counts.sum(1)
transition_probs = bigram_counts / denominator[:, None]
transition_probs[torch.isnan(transition_probs)] = 0
initial_probs = initial_counts / initial_counts.sum()
final_probs = (final_counts > 0).float()
if as_numpy:
def to_numpy(x):
return x.numpy().astype(float)
return tuple(map(to_numpy, (transition_probs, initial_probs, final_probs)))
return transition_probs, initial_probs, final_probs
def countSeqs(seqs):
""" Count n-gram statistics on a collection of sequences.
Parameters
----------
seqs : iterable( iterable(Hashable) )
Returns
-------
bigram_counts : collections.defaultdict((Hashable, Hashable) -> int)
unigram_counts : collections.defaultdict(Hashable -> int)
initial_counts : collections.defaultdict(Hashable -> int)
final_counts : collections.defaultdict(Hashable -> int)
"""
bigram_counts = collections.defaultdict(int)
unigram_counts = collections.defaultdict(int)
initial_counts = collections.defaultdict(int)
final_counts = collections.defaultdict(int)
for seq in seqs:
initial_counts[seq[0]] += 1
final_counts[seq[-1]] += 1
for state in seq:
unigram_counts[state] += 1
for prev, cur in zip(seq[:-1], seq[1:]):
bigram_counts[prev, cur] += 1
return bigram_counts, unigram_counts, initial_counts, final_counts
def printGradNorm(grad):
logger.info(f'grad: {type(grad)}')
logger.info(f'grad[0]: {type(grad[0])}')
logger.info(f'gradsize: {grad[0].size()}')
logger.info(f'grad norm: {grad[0].norm()}')
def argmax(decode_graph, count=1, squeeze=True):
""" """
if decode_graph.semiring is not semirings.TropicalSemiringWeight:
if decode_graph.semiring is semirings.LogSemiringWeight:
def converter(weight):
return -weight.value
elif decode_graph.semiring is semirings.RealSemiringWeight:
def converter(weight):
return -weight.value.log()
else:
raise NotImplementedError("Conversion to tropical semiring isn't implemented yet")
decode_graph = decode_graph.lift(
semiring=semirings.TropicalSemiringWeight, converter=converter
)
lattice = decode_graph.shortest_path(count=count)
best_paths = tuple(path.output_path for path in lattice.iterate_paths())
if squeeze and len(best_paths) == 1:
return best_paths[0]
return best_paths
def traverse(fst):
""" Traverse a transducer, accumulating edge weights and labels.
Parameters
----------
fst : FST
Returns
-------
weights_dict : dict((int, int) -> semirings.AbstractSemiringWeight)
all_input_labels : set(int)
all_output_labels : set(int)
"""
# FIXME: make this a method in the FST or something
def make_label(x):
if x == 0:
x = 'epsilon'
if x == 32:
x = '(spc)'
elif x < 32:
x = str(x)
else:
# OpenFST will encode characters as integers
x = int(chr(x))
return x
weights_dict = {}
all_input_labels = set()
all_output_labels = set()
state = fst.initial_state
to_visit = [state]
queued_states = set(to_visit)
while to_visit:
state = to_visit.pop()
for edge in fst.get_arcs(state):
# Final weights are implemented as arcs whose inputs and outputs
# are epsilon, and whose next state is -1 (ie an impossible next
# state). I account for final weights using fst.get_final_weight,
# so we can skip them here.
if edge.nextstate < 0:
continue
if edge.nextstate not in queued_states:
queued_states.add(edge.nextstate)
to_visit.append(edge.nextstate)
weight = edge.weight
input_label = make_label(edge.input_label)
output_label = make_label(edge.output_label)
if fst._acceptor:
edge_labels = (state, input_label)
else:
edge_labels = (state, input_label, output_label)
if edge_labels in weights_dict:
weights_dict[edge_labels] += weight
else:
weights_dict[edge_labels] = weight
all_input_labels.add(input_label)
all_output_labels.add(output_label)
return weights_dict, all_input_labels, all_output_labels
# -=( DEPRECATED )==-----------------------------------------------------------
def actionTransitionFsa(seqs, integerizer, semiring=None):
if semiring is None:
def semiring_transform(w):
return -math.log(w)
semiring = semirings.TropicalSemiring
else:
def transform_weight(w):
return semiring_transform(w)
edge_counts, state_counts, init_states, final_states = countSeqs(seqs)
action_acceptor = FST(semiring, string_mapper=lambda i: str(integerizer[i]))
fst_states = {}
for (prev, cur), transition_count in edge_counts.items():
action_id = cur
prev_state = fst_states.get(prev, None)
if prev_state is None:
prev_state = action_acceptor.add_state()
fst_states[prev] = prev_state
cur_state = fst_states.get(cur, None)
if cur_state is None:
cur_state = action_acceptor.add_state()
fst_states[cur] = cur_state
weight = transform_weight(transition_count / state_counts[prev])
action_acceptor.add_arc(
prev_state, cur_state,
input_label=action_id, output_label=action_id, weight=weight
)
for state in init_states:
state_idx = fst_states[state]
action_acceptor.set_initial_state(state_idx)
for state in final_states:
state_idx = fst_states[state]
weight = transform_weight(1)
action_acceptor.set_final_weight(state_idx, weight)
return action_acceptor
def stateTransitionFsa(seqs, integerizer):
edge_counts, state_counts, init_states, final_states = countSeqs(seqs)
fst_states = {}
action_acceptor = FST(semirings.TropicalSemiring, string_mapper=lambda i: str(integerizer[i]))
for (prev, cur), transition_count in edge_counts.items():
if prev == cur:
action = ''
else:
action, = cur.assembly_state - prev.assembly_state
action_id = integerizer.index(action)
prev_state = fst_states.get(prev, None)
if prev_state is None:
prev_state = action_acceptor.add_state()
fst_states[prev] = prev_state
cur_state = fst_states.get(cur, None)
if cur_state is None:
cur_state = action_acceptor.add_state()
fst_states[cur] = cur_state
weight = -math.log(transition_count / state_counts[prev])
action_acceptor.add_arc(
prev_state, cur_state,
input_label=action_id, output_label=action_id, weight=weight
)
for state in init_states:
state_idx = fst_states[state]
action_acceptor.set_initial_state(state_idx)
for state in final_states:
state_idx = fst_states[state]
weight = -math.log(1)
action_acceptor.set_final_weight(state_idx, weight)
return action_acceptor
| jd-jones/seqtools | seqtools/fstutils_mfl.py | fstutils_mfl.py | py | 38,842 | python | en | code | 1 | github-code | 13 |
10252628368 | from turtle import Turtle
class Paddle(Turtle):
def __init__(self, x_coordinate):
super().__init__()
self.shape("square")
self.turtlesize(stretch_wid=5, stretch_len=1)
self.color("white")
self.penup()
self.goto(x_coordinate, 0)
def up(self):
y_coordinate = self.ycor()
self.goto(self.xcor(), y_coordinate + 25)
def down(self):
y_coordinate = self.ycor()
self.goto(self.xcor(), y_coordinate - 25)
| joaopulsz/pong | paddle.py | paddle.py | py | 492 | python | en | code | 0 | github-code | 13 |
32604675156 | import mysql.connector
import configparser
class MySQL:
def __init__(self, config_file):
self.config = configparser.ConfigParser()
self.config.read(config_file)
self.connection = None
self.cursor = None
def connect(self):
try:
self.connection = mysql.connector.connect(host=self.config['MYSQL']['host'],
user=self.config['MYSQL']['user'],
password=self.config['MYSQL']['password'],
database=self.config['MYSQL']['database'])
self.cursor = self.connection.cursor(buffered=True)
print("Connected to the database.")
except mysql.connector.Error as error:
print(f"Failed to connect to the database: {error}")
def disconnect(self):
if self.cursor:
self.cursor.close()
if self.connection:
self.connection.close()
print("Connection to the database closed.")
def get_all(self, query):
self.cursor.execute(query)
return self.cursor.fetchall()
if __name__ == '__main__':
db = MySQL('config.ini')
db.connect()
results = db.get_all('SELECT * FROM users')
for row in results:
print(row)
db.disconnect()
| liangmartin/python-myops-tool | Mysql/conn_mysql.py | conn_mysql.py | py | 1,406 | python | en | code | 0 | github-code | 13 |
25661456785 | import numpy as np
# ASCII a = 97; A = 65
# Priority a = 1; A = 27
# 97-1 = 96; 65 - 27 = 38
def get_priority(letter: chr) -> int:
return (ord(letter) - 38) if letter.isupper() else (ord(letter) - 96)
def part1():
input_list = [x.strip() for x in open('input/day3.txt').readlines()]
intersections = map(lambda line: (set(line[:len(line)//2]) & set(line[-len(line)//2:])).pop(), input_list)
priorities = map(lambda letter: get_priority(letter), intersections)
return sum(priorities)
def part2():
input_list = [x.strip() for x in open('input/day3.txt').readlines()]
groups = np.array_split(input_list, len(input_list)/3)
intersections = map(lambda group: (set(group[0]) & set(group[1]) & set(group[2])).pop(),groups)
priorities = map(lambda letter: get_priority(letter), intersections)
return sum(priorities)
# 'expected': [157, 7766, 70, 2415],
print('part1', part1())
print('part2', part2())
| Kitri/AdventOfCodePython | 2022/day3.py | day3.py | py | 936 | python | en | code | 0 | github-code | 13 |
72829504979 | import bpy
from bpy.props import *
from ... base_types import AnimationNode
class tankSuspensionNode(bpy.types.Node, AnimationNode):
bl_idname = "an_tankSuspensionNode"
bl_label = "Suspension Wheel Tracker"
bl_width_default = 200
message1 = StringProperty("")
message2 = StringProperty("")
def create(self):
self.newInput("Object", "Wheel Control", "wheel")
self.newInput("Float", "Height Offset", "z_off")
self.newInput("Float", "Shrink Height", "s_high")
self.newInput("Float", "Fix Longitudinal", "f_long")
self.newInput("Float", "Fix Height", "base_h")
self.newInput("Float", "Trip Value", "max_v")
def draw(self, layout):
if (self.message1 != ""):
layout.label(self.message1, icon = "INFO")
if (self.message2 != ""):
layout.label(self.message2, icon = "ERROR")
def execute(self, wheel, z_off, s_high, f_long, base_h, max_v):
self.use_custom_color = True
self.useNetworkColor = False
self.color = (0.8,0.9,1)
if wheel is None or max_v == 0:
self.message2 = 'Set Parameters'
else:
self.message2 = ''
if z_off < -max_v:
wheel.location.x = f_long
wheel.location.z = (base_h - max_v)
else:
wheel.location.x = f_long
wheel.location.z = s_high
| Clockmender/My-AN-Nodes | nodes/general/suspension.py | suspension.py | py | 1,419 | python | en | code | 16 | github-code | 13 |
9070879280 | from collections import deque
# def solution(n, v):
# q = deque(v)
# total_gap = [0] * (n + 1)
#
# left_total = 0
# for i in range(n + 1):
# right = v[i:]
# temp_total = left_total - sum(right)
# total_gap[i] = abs(temp_total)
#
# total_gap[-1] = sum(v)
#
# return total_gap.index(min(total_gap))
# minimum_value = min(total_gap)
#
# for i in range(n + 1):
# if total_gap[i] == minimum_value:
# answer = i
# break
#
# return answer
def solution(n, v):
answer = 0
total_gap = [0] * (n + 1)
total_left = 0
total_right = sum(v)
for i in range(n+1):
if i == 0:
total_gap[i] = abs(total_right - total_left)
continue
total_left += v[i-1]
total_right -= v[i-1]
temp_total = total_left - total_right
total_gap[i] = abs(temp_total)
minimum_value = min(total_gap)
for i in range(n + 1):
if total_gap[i] == minimum_value:
answer = i
break
return answer
n1 = 5
v1 = [1, 2, 1, 2, 1]
print(solution(n1, v1))
n2 = 7
v2 = [3, 2, 3, 4, -1, -2, -3]
print(solution(n2, v2))
# def solution(n, v):
# q = deque(v)
# total_gap = [0] * (n + 1)
#
# left_total = 0
# for i in range(n):
# temp_total = left_total - sum(q)
# total_gap[i] = abs(temp_total)
# left_total += q.popleft()
#
# total_gap[-1] = sum(v)
#
# return total_gap.index(min(total_gap))
# def solution(n, v):
# answer = 0
# total_gap = [0] * (n + 1)
#
# for i in range(n + 1):
# left = v[:i]
# right = v[i:]
# temp_total = sum(left) - sum(right)
# total_gap[i] = abs(temp_total)
#
# minimum_value = min(total_gap)
#
# for i in range(n + 1):
# if total_gap[i] == minimum_value:
# answer = i
# break
#
# return answer | mins1031/coding-test | programmers/Ace1.py | Ace1.py | py | 1,924 | python | en | code | 0 | github-code | 13 |
35596788969 | from .i2cDevice import *
from ..device import pyLabDataLoggerIOError
import datetime, time, sys
import numpy as np
from termcolor import cprint
import smbus
########################################################################################################################
class h3lis331dlDevice(i2cDevice):
""" Class providing support for H3LIS331DL
Specify I2C bus and address on initialisation.
provide ACCL_RANGE (100/200/400g) and ACCL_SAMPLES for software averaging.
"""
# Establish connection to device
def activate(self):
assert self.params['address']
assert self.params['bus']
if 'name' in self.params: self.name = self.params['name']+' %i:%s' % (self.params['bus'],hex(self.params['address']))
if not 'driver' in self.params.keys(): self.params['driver']=None
self.params['n_channels']=4
if not 'channel_names' in self.config:
self.config['channel_names']=['accel_X','accel_Y','accel_Z','magnitude']
if not 'ACCL_RANGE' in self.params: self.params['ACCL_RANGE']=100. # 100 g default
if not 'ACCL_SAMPLES' in self.params: self.params['ACCL_SAMPLES']=10. # at 50 Hz, sample for 0.2s
self.params['ACCL_SCALING']=(2**15)/self.params['ACCL_RANGE'] # 16 bit signed output, so value is -2^15 to +2^15 in range.
self.params['raw_units']=['g','g','g','g']
self.config['eng_units']=['g','g','g','g']
self.config['scale']=np.ones(self.params['n_channels'],)
self.config['offset']=np.zeros(self.params['n_channels'],)
if ('untitled' in self.name.lower()) or (self.name==''):
self.name = 'H3LIS331DL Accelerometer I2C %i:%s' % (self.params['bus'],self.params['address'])
self.bus = smbus.SMBus(self.params['bus'])
# Select control register 1, 0x20(32)
# 0x27(39) Power ON mode, Data output rate = 50 Hz
# X, Y, Z-Axis enabled
self.bus.write_byte_data(self.params['address'], 0x20, 0x27)
# Select control register 4, 0x23(35)
# 0x00(00) Continuous update, Full scale selection = +/-100g
if self.params['ACCL_RANGE']==100.:
self.bus.write_byte_data(self.params['address'], 0x23, 0x00)
elif self.params['ACCL_RANGE']==200.:
self.bus.write_byte_data(self.params['address'], 0x23, 0x01)
elif self.params['ACCL_RANGE']==400.:
self.bus.write_byte_data(self.params['address'], 0x23, 0x02)
else:
raise ValueError("Bad ACCL_RANGE for H3LIS331DL: acceptable values are 100g, 200g, 400g.")
time.sleep(0.01)
return
# Apply configuration
def apply_config(self):
# Currently no configurable parameters.
return
# Update device with new value, update lastValue and lastValueTimestamp
def query(self):
val=[0,0,0]
for n in range(int(self.params['ACCL_SAMPLES'])):
x=0
for offset in [(0x28,0x29),(0x2a,0x2b),(0x2c,0x2d)]:
data0 = self.bus.read_byte_data(self.params['address'], offset[0])
data1 = self.bus.read_byte_data(self.params['address'], offset[1])
# Convert the data
accl = data1 * 256 + data0
if accl > 32767 :
accl -= 65536
val[x] += accl
x+=1
time.sleep(0.021) # 50 Hz refresh, so go a very little bit slower than this.
self.lastValue = [a/float(self.params['ACCL_SAMPLES'])/float(self.params['ACCL_SCALING']) for a in val]
self.lastValue.append(np.sum(np.array(self.lastValue)**2)**0.5) # geometric sum of XYZ
self.updateTimestamp()
self.lastScaled = np.array(self.lastValue) * self.config['scale'] + self.config['offset']
return
# End connection to device.
def deactivate(self):
del self.bus
pass
| djorlando24/pyLabDataLogger | src/device/i2c/h3lis331dlDevice.py | h3lis331dlDevice.py | py | 3,987 | python | en | code | 11 | github-code | 13 |
28280930549 | import asyncio
async def rhythm():
print('1')
for i in range(10):
await asyncio.sleep(1)
print('1')
async def bang():
print('Bang')
for i in range(5):
await asyncio.sleep(3)
print('Bang')
async def main():
# task1 = asyncio.create_task(bang())
# task2 = asyncio.create_task(rhythm())
# await task1
# await task2
await asyncio.gather(bang(), rhythm())
if __name__ == '__main__':
asyncio.run(main())
| AndreiZherder/python-practice | async/async2.py | async2.py | py | 478 | python | en | code | 0 | github-code | 13 |
70866692819 | import base64
import hashlib
#import secrets # no, is python3.6
from random import SystemRandom
import pkg_resources
import io
stream = io.TextIOWrapper(pkg_resources.resource_stream(__package__,'words.txt'))
try:
all_words = [line.strip() for line in stream.readlines()]
finally:
stream.close()
def randomwords(k=4):
return [SystemRandom().choice(all_words) for _ in range(k)]
def hashwords_b64(words):
msg = ' '.join(words).encode()
digest = hashlib.sha256(msg).digest()
b64_bytes = base64.urlsafe_b64encode(digest)
return str(b64_bytes,'utf-8').rstrip('=')
def hashwords_hex(words):
msg = ' '.join(words).encode()
return hashlib.sha256(msg).hexdigest()
def id_hash(words):
return hashwords_b64(words)[:8] | sanderevers/80by24 | run80by24-common/run80by24/common/id_generator/__init__.py | __init__.py | py | 753 | python | en | code | 1 | github-code | 13 |
27891630874 | from __future__ import print_function
import collections
import difflib
import os
import re
import sys
import gyp_compiler
# Find chromite!
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)),
'..', '..', '..'))
from chromite.lib import commandline
from chromite.lib import cros_logging as logging
# Object holding the result of a lint check.
LintResult = collections.namedtuple('LintResult', (
# The name of the linter checking.
'linter',
# The file the issue was found in.
'file',
# The message for this check.
'msg',
# The type of result -- error, warning, etc...
'type',
))
def WalkGyp(functor, gypdata):
"""Walk |gypdata| and call |functor| on each node."""
def WalkNode(node):
ret = []
if isinstance(node, dict):
for k, v in node.items():
ret += functor(k, v)
ret += WalkNode(v)
elif isinstance(node, (tuple, list)):
for v in node:
ret += WalkNode(v)
return ret
return WalkNode(gypdata)
def GypLintLibFlags(gypdata):
"""-lfoo flags belong in 'libraries' and not 'ldflags'."""
def CheckNode(key, value):
ret = []
if key.startswith('ldflags'):
for flag in value:
if flag.startswith('-l'):
ret.append('-l flags should be in "libraries", not "ldflags": %s' %
(flag,))
return ret
return WalkGyp(CheckNode, gypdata)
def GypLintVisibilityFlags(gypdata):
"""Packages should not change -fvisibility settings."""
def CheckNode(key, value):
ret = []
if key.startswith('cflags'):
for flag in value:
if flag.startswith('-fvisibility'):
ret.append('do not use -fvisibility; to export symbols, use '
'brillo/brillo_export.h instead')
return ret
return WalkGyp(CheckNode, gypdata)
def GypLintDefineFlags(gypdata):
"""-D flags should be in 'defines', not cflags."""
def CheckNode(key, value):
ret = []
if key.startswith('cflags'):
for flag in value:
if flag.startswith('-D'):
ret.append('-D flags should be in "defines", not "%s": %s' %
(key, flag))
return ret
return WalkGyp(CheckNode, gypdata)
def GypLintCommonTesting(gypdata):
"""Packages should use common_test.gypi instead of -lgtest/-lgmock."""
def CheckNode(key, value):
ret = []
if key.startswith('ldflags') or key.startswith('libraries'):
if '-lgtest' in value or '-lgmock' in value:
ret.append('use common-mk/common_test.gypi for tests instead of '
'linking against -lgtest/-lgmock directly')
return ret
return WalkGyp(CheckNode, gypdata)
def GypLintOrderedFiles(gypdata):
"""Files should be kept sorted.
Python's sorted() function relies on __cmp__ of the objects it is sorting.
Since we're feeding it ASCII strings, it will do byte-wise comparing. This
matches clang-format behavior when it sorts include files.
"""
def CheckNode(key, value):
ret = []
if key == 'sources':
lines = list(difflib.unified_diff(value, sorted(value), lineterm=''))
if lines:
# Skip the first three entries as those are the diff header.
ret.append('source files should be kept sorted:\n%s' %
('\n'.join(lines[3:]),))
return ret
return WalkGyp(CheckNode, gypdata)
# It's not easy to auto-discover pkg-config files as we don't require a chroot
# or a fully installed sysroot to run this linter. Plus, there's no clean way
# to correlate -lfoo names with pkg-config .pc file names. List the packages
# that we tend to use in platform2 projects.
KNOWN_PC_FILES = {
'-lblkid': 'blkid',
'-lcap': 'libcap',
'-lcrypto': 'libcrypto',
'-ldbus-1': 'dbus-1',
'-ldbus-c++-1': 'dbus-c++-1',
'-ldbus-glib-1': 'dbus-glib-1',
'-lexpat': 'expat',
'-lfuse': 'fuse',
'-lglib-2.0': 'glib-2.0',
'-lgobject-2.0': 'gobject-2.0',
'-lgthread-2.0': 'gthread-2.0',
'-lminijail': 'libminijail',
'-lpcre': 'libpcre',
'-lpcrecpp': 'libpcrecpp',
'-lpcreposix': 'libpcreposix',
'-lprotobuf': 'protobuf',
'-lprotobuf-lite': 'protobuf-lite',
'-lssl': 'libssl',
'-ludev': 'libudev',
'-lusb-1.0': 'libusb-1.0',
'-luuid': 'uuid',
'-lz': 'zlib',
}
KNOWN_PC_LIBS = frozenset(KNOWN_PC_FILES.keys())
def GypLintPkgConfigs(gypdata):
"""Use pkg-config files for known libs instead of manual -lfoo linkage."""
def CheckNode(key, value):
ret = []
if key.startswith('ldflags') or key.startswith('libraries'):
for v in KNOWN_PC_LIBS & set(value):
ret.append(('use pkg-config instead: delete "%s" from "%s" and add '
'"%s" to "deps"') % (v, key, KNOWN_PC_FILES[v]))
return ret
return WalkGyp(CheckNode, gypdata)
def RawLintWhitespace(data):
"""Make sure whitespace is sane."""
ret = []
if not data.endswith('\n'):
ret.append('missing newline at end of file')
return ret
def LineIsComment(line):
"""Whether this entire line is a comment (and whitespace).
Note: This doesn't handle inline comments like:
[ # Blah blah.
But we discourage those in general, so shouldn't be a problem.
"""
return line.lstrip().startswith('#')
def LinesLintWhitespace(lines):
"""Make sure whitespace is sane."""
ret = []
for i, line in enumerate(lines, 1):
if '\t' in line:
ret.append('use spaces, not tabs: line %i: %s' % (i, line))
if line.rstrip() != line:
ret.append('delete trailing whitespace: line %i: %s' % (i, line))
if lines:
if not lines[0]:
ret.append('delete leading blanklines')
if not lines[-1]:
ret.append('delete trailing blanklines')
return ret
def LinesLintDanglingCommas(lines):
"""Check for missing dangling commas."""
ret = []
for i, line in enumerate(lines, 1):
if not LineIsComment(line):
if len(line) > 1 and line.endswith(('}', ']', "'")):
ret.append('add a dangling comma: line %i: %s' % (i, line))
return ret
def LinesLintSingleQuotes(lines):
"""Check for double quote usage."""
ret = []
for i, line in enumerate(lines, 1):
if not LineIsComment(line):
if line.endswith('"'):
ret.append('use single quotes instead of double quotes: line %i: %s' %
(i, line))
return ret
# The regex used to find gyplint options in the file.
# This matches the regex pylint uses.
OPTIONS_RE = re.compile(r'^\s*#.*\bgyplint:\s*([^\n;]+)', flags=re.MULTILINE)
# Object holding linter settings.
LintSettings = collections.namedtuple('LinterSettings', (
# Linters to skip.
'skip',
))
def ParseOptions(options, name=None):
"""Parse out the linter settings from |options|.
Currently we support:
disable=<linter name>
Args:
options: A list of linter options (e.g. ['foo=bar']).
name: The file we're parsing.
Returns:
A LintSettings object.
"""
skip = set()
for option in options:
key, value = option.split('=', 1)
key = key.strip()
value = value.strip()
if key == 'disable':
skip.update(x.strip() for x in value.split(','))
else:
raise ValueError('%s: unknown gyplint option: %s' % (name, key))
return LintSettings(skip)
def RunLinters(prefix, name, data, settings=None):
"""Run linters starting with |prefix| against |data|."""
if settings is None:
settings = ParseOptions([])
linters = [x for x in globals()
if x.startswith(prefix) and x not in settings.skip]
ret = []
for linter in linters:
functor = globals().get(linter)
for result in functor(data):
ret.append(LintResult(linter, name, result, logging.ERROR))
return ret
def CheckGyp(name, gypdata, settings=None):
"""Check |gypdata| for common mistakes."""
return RunLinters('GypLint', name, gypdata, settings=settings)
def CheckGypData(name, data):
"""Check |data| (gyp file as a string) for common mistakes."""
try:
gypdata = gyp_compiler.CheckedEval(data)
except Exception as e:
return [LintResult('gyp.input.CheckedEval', name, 'invalid format: %s' % e,
logging.ERROR)]
# Parse the gyplint flags in the file.
m = OPTIONS_RE.findall(data)
settings = ParseOptions(m, name=name)
lines = data.splitlines()
ret = []
# Run linters on the raw data first (for style/syntax).
ret += RunLinters('RawLint', name, data, settings=settings)
ret += RunLinters('LinesLint', name, lines, settings=settings)
# Then run linters against the parsed AST.
ret += CheckGyp(name, gypdata, settings)
return ret
def CheckGypFile(gypfile):
"""Check |gypfile| for common mistakes."""
with open(gypfile) as fp:
return CheckGypData(gypfile, fp.read())
def FilterFiles(files, extensions):
"""Filter out |files| based on |extensions|."""
for f in files:
# Chop of the leading period as we'll get back ".bin".
extension = os.path.splitext(f)[1][1:]
if extension in extensions and not os.path.basename(f).startswith('.'):
logging.debug('Checking %s', f)
yield f
else:
logging.debug('Skipping %s', f)
def FilterPaths(paths, extensions):
"""Walk |paths| recursively and filter out content in it."""
files = []
dirs = []
for path in paths:
if os.path.isdir(path):
dirs.append(path)
else:
files.append(path)
for gypfile in FilterFiles(files, extensions):
yield gypfile
for gypdir in dirs:
for root, _, files in os.walk(gypdir):
for gypfile in FilterFiles(files, extensions):
yield os.path.join(root, gypfile)
def GetParser():
"""Return an argument parser."""
parser = commandline.ArgumentParser(description=__doc__)
parser.add_argument('--extensions', default='gyp,gypi',
help='Comma delimited file extensions to check. '
'(default: %(default)s)')
parser.add_argument('files', nargs='*',
help='Files to run lint.')
return parser
def main(argv):
parser = GetParser()
opts = parser.parse_args(argv)
if not opts.files:
logging.warning('No files provided to lint. Doing nothing.')
return 0
extensions = set(opts.extensions.split(','))
num_files = 0
for gypfile in FilterPaths(opts.files, extensions):
results = CheckGypFile(gypfile)
if results:
logging.error('**** %s: found %i issue(s)', gypfile, len(results))
for result in results:
logging.log(result.type, '%s: %s', result.linter, result.msg)
num_files += 1
if num_files:
logging.error('%i file(s) failed linting', num_files)
return 1 if num_files else 0
if __name__ == '__main__':
commandline.ScriptWrapperMain(lambda _: main)
| ansiwen/chromiumos-platform2 | common-mk/gyplint.py | gyplint.py | py | 10,714 | python | en | code | 0 | github-code | 13 |
73718709136 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from distutils.core import setup
pkg_name = 'symodesys'
version_ = '0.0.1'
classifiers = [
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: C",
"Programming Language :: Cython",
"Programming Language :: Fortran",
"Topic :: Software Development :: Code Generators",
"Topic :: Software Development :: Compilers",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Mathematics",
]
if '--help'in sys.argv[1:] or sys.argv[1] in (
'--help-commands', 'egg_info', 'clean', '--version'):
cmdclass_ = {}
ext_modules=ext_modules_,
else:
from pycodeexport import pce_build_ext
from symodesys.shared._setup_shared import get_shared_pce_ext
from symodesys.odepack._setup_odepack import get_odepack_pce_ext
from symodesys.gsl._setup_gsl import get_gsl_pce_ext
from symodesys.sundials._setup_sundials import get_sundials_pce_ext
ext_modules_ = [
get_shared_pce_ext(pkg_name),
#get_odepack_pce_ext(pkg_name),
get_gsl_pce_ext(pkg_name),
get_sundials_pce_ext(pkg_name),
]
cmdclass_ = {'build_ext': pce_build_ext}
setup(
name=pkg_name,
version=version_,
author='Björn Dahlgren',
author_email='bjodah@DELETEMEgmail.com',
description='Convenience functions for use with sympy.',
license="BSD",
url='https://github.com/bjodah/'+pkg_name,
download_url='https://github.com/bjodah/'+pkg_name+'/archive/v'+version_+'.tar.gz',
packages=['symodesys', 'symodesys.helpers'],
cmdclass=cmdclass_,
ext_modules=ext_modules_,
classifiers=classifiers,
)
| bjodah/symodesys | setup.py | setup.py | py | 1,911 | python | en | code | 1 | github-code | 13 |
32994523406 | import cv2
import numpy as np
import statistics as stat
def adjust_gamma(image):
# Load in the image using the typical imread function using our watch_folder path, and the fileName passed in, then set the final output image to our current image for now
# Set thresholds. Here, we are using the Hue, Saturation, Value color space model. We will be using these values to decide what values to show in the
# ranges using a minimum and maximum value. THESE VALUES CAN BE PLAYED AROUND FOR DIFFERENT COLORS
hMin = 29 # Hue minimum
sMin = 30 # Saturation minimum
vMin = 0 # Value minimum (Also referred to as brightness)
hMax = 179 # Hue maximum
sMax = 255 # Saturation maximum
vMax = 255 # Value maximum
# Set the minimum and max HSV values to display in the output image using numpys' array function. We need the numpy array since OpenCVs' inRange function will use those.
lower = np.array([hMin, sMin, vMin])
upper = np.array([hMax, sMax, vMax])
# Create HSV Image and threshold it into the proper range.
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) # Converting color space from BGR to HSV
mask = cv2.inRange(hsv, lower, upper) # Create a mask based on the lower and upper range, using the new HSV image
# Create the output image, using the mask created above. This will perform the removal of all unneeded colors, but will keep a black background.
output = cv2.bitwise_and(image, image, mask=mask)
# Add an alpha channel, and update the output image variable
*_, alpha = cv2.split(output)
dst = cv2.merge((output, alpha))
output = dst
return output
# return background
def background(grayImage):
x = []
for arr in grayImage:
x.append(np.bincount(arr).argmax())
return np.bincount(x).argmax()
def get_step(image1):
(H, W) = image1.shape[:2]
h_w_ratio = W / (1.0 * H)
h = 270
w = int(h * h_w_ratio)
step = 26
image1 = cv2.resize(image1, (w, h))
grayImage = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)
bg_x = background(grayImage)
saw_code = False
block = []
lines = []
tab_count = 0
for i, row in enumerate(grayImage):
temp = np.where(row < bg_x, 0, row)
temp = np.where(temp > bg_x, 0, temp)
skip_vlines = np.count_nonzero(temp == 0)
block.append(temp)
# print(i, skip_vlines, saw_code)
if temp.__contains__(0) and skip_vlines > 6:
saw_code = True
elif saw_code:
saw_code = False
segment = []
subseg = []
seg = []
for j in range(len(block)):
seg.append(block[j][0:50]) # would work for no lines
segment.append(block[j][step]) # would work for no lines
subseg.append(block[j][step - step // 2])
np_segment = np.asarray(segment)
np_subseg = np.asarray(subseg)
# check 13s and last line
tab = not (np.any(np_segment == 0) or np.any(np_subseg == 0))
if tab:
seg_lines = []
tab_count += 1
for se in seg:
bg_seen = False
for k, s in enumerate(se):
if s == bg_x:
bg_seen = True
if bg_seen and s == 0 or s>240: # consider black or white background
seg_lines.append(k)
break
# print(seg_lines)
return stat.mode(seg_lines)
# segment.clear()
# subseg.clear()
# seg.clear()
# break
else:
lines.append(tab_count)
tab_count = 0
block.clear()
return 20
def get_tabs(image1, step_size):
(H, W) = image1.shape[:2]
h_w_ratio = W / (1.0 * H)
h = 270
w = int(h * h_w_ratio)
if step_size > 29:
step = 26
else:
step = 20
image1 = cv2.resize(image1, (w, h))
grayImage = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)
bg_x = background(grayImage)
saw_code = False
block = []
lines = []
tab_count = 0
for i, row in enumerate(grayImage):
temp = np.where(row < bg_x, 0, row)
temp = np.where(temp > bg_x, 0, temp)
skip_vlines = np.count_nonzero(temp == 0)
block.append(temp)
if temp.__contains__(0) and skip_vlines > 6:
saw_code = True
elif saw_code:
saw_code = False
segment = []
subseg = []
for k in range(1, 40): # upto 20 tabs
for j in range(len(block)):
# print(len(block[1]))
#TODO temporary check -- needs be fixed
if (step * k) <= len(block[1]):
segment.append(block[j][step * k]) # would work for no lines
# TODO temporary check -- needs be fixed
if (step * k - step // 2) <= len(block[1]):
subseg.append(block[j][step * k - step // 2])
np_segment = np.asarray(segment)
np_subseg = np.asarray(subseg)
# check 13s and last line
tab = not (np.any(np_segment == 0) or np.any(np_subseg == 0))
if tab:
tab_count += 1
segment.clear()
subseg.clear()
else:
lines.append(tab_count)
tab_count = 0
break
block.clear()
return lines
def contrast_old(img):
# -----Converting image to LAB Color model-----------------------------------
lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
# cv2.imshow("lab",lab)
# -----Splitting the LAB image to different channels-------------------------
l, a, b = cv2.split(lab)
# cv2.imshow('l_channel', l)
# cv2.imshow('a_channel', a)
# cv2.imshow('b_channel', b)
# -----Applying CLAHE to L-channel-------------------------------------------
clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8, 8))
cl = clahe.apply(l)
# cv2.imshow('CLAHE output', cl)
# -----Merge the CLAHE enhanced L-channel with the a and b channel-----------
limg = cv2.merge((cl, a, b))
# cv2.imshow('limg', limg)
# -----Converting image from LAB Color model to RGB model--------------------
final = cv2.cvtColor(limg, cv2.COLOR_LAB2BGR)
return final
def contrast_bright(img, alpha, beta):
new_img = cv2.addWeighted( img, alpha, img, 0, beta)
return new_img | mhamdan91/IMAGE_2_CODE | utils.py | utils.py | py | 6,674 | python | en | code | 3 | github-code | 13 |
41139536739 | import numpy as np
from orangecontrib.associate.fpgrowth import *
from sklearn.externals import joblib
from os import listdir
from os.path import isfile, join
import os
master_index = {}
def create_graph(input_file, output_file, output_named_file, year_count, min_transactions = 2, confidence = 0.02 ):
print('Creating graph for : {0}'.format(input_file))
with open(input_file, 'r') as fp:
lines = fp.read()
items = [line.replace('\"' , '').split(',') for line in lines.split('\n')]
for item in set([element for item in items for element in item]):
if item not in master_index:
if not master_index.values():
master_index[item] = 1
else:
master_index[item] = max(master_index.values()) + 1
int_items = [[master_index[x] for x in item] for item in items]
itemsets = frequent_itemsets(int_items, min_transactions)
itemsets = dict(itemsets)
count_map = {list(key)[0]: value for key, value in itemsets.items() if len(key) == 1}
rules = list(association_rules(itemsets, confidence))
pair_rules = [rule for rule in rules if len(list(rule[0])) == 1 and len(list(rule[1])) == 1]
with open(output_file, 'w') as fp1:
for rule in pair_rules:
#fp1.write('{0} {1} {2}\n'.format(list(rule[0])[0],list(rule[1])[0],rule[2]/float(count_map[list(rule[0])[0]])))
fp1.write('{0} {1} {2}\n'.format(list(rule[0])[0],list(rule[1])[0],rule[2]/float(year_count)))
master_index_rev = dict((v,k) for k,v in master_index.items())
with open(output_named_file, 'w') as fp1:
for rule in pair_rules:
#fp1.write('{0} {1} {2}\n'.format(master_index_rev[list(rule[0])[0]],master_index_rev[list(rule[1])[0]],rule[2]/float(count_map[list(rule[0])[0]])))
fp1.write('{0} {1} {2}\n'.format(master_index_rev[list(rule[0])[0]],master_index_rev[list(rule[1])[0]],rule[2]/float(year_count)))
print('Finished creating graph..')
if __name__ == '__main__':
root_folder = '/home/hduser/iit_data/ask_ubuntu_new/'
os.makedirs(os.path.join(root_folder, 'year_wise_graphs'))
os.makedirs(os.path.join(root_folder, 'year_wise_named_graphs'))
os.makedirs(os.path.join(root_folder, 'models'))
input_folder = os.path.join(root_folder, 'year_wise')
output_folder = os.path.join(root_folder, 'year_wise_graphs')
output_name_folder = os.path.join(root_folder, 'year_wise_named_graphs')
model_folder = os.path.join(root_folder, 'models')
year_count_map = joblib.load('/home/hduser/iit_data/ask_ubuntu_new/year_qn_count.pkl')
for file in listdir(input_folder):
year = file.split('.')[0].strip()
create_graph(join(input_folder, file), join(output_folder, file), join(output_name_folder, file), year_count_map[year])
joblib.dump(master_index, join(model_folder, 'master_index.pkl'), compress=1) | vijinkp/graph-analysis | associate_rule_mining.py | associate_rule_mining.py | py | 2,713 | python | en | code | 0 | github-code | 13 |
18314464325 | """
An implementation of WORDLE(tm) designed with the visually impaired
in mind.
"""
import sqlite3
import random
import datetime
import pygame
from more_itertools import collapse
# pylint: disable=no-member
class Paterdal:
"""
The implementation
"""
def __init__(
self,
col_offset=10,
row_offset=10,
h_tile_size=80,
v_tile_size=80,
cols=5,
rows=6,
line_width=4,
line_color=(0, 0, 0),
background_color=(255, 255, 255),
):
"""Initializes the game
:param col_offset:
:param row_offset:
:param h_tile_size:
:param v_tile_size:
:param line_width:
:param line_color:
:param background_color:
"""
# Initialize pygame
pygame.init()
# 8x8 square board
self.cols = cols
self.rows = rows
self.board = {
1: ["", "", "", "", ""],
2: ["", "", "", "", ""],
3: ["", "", "", "", ""],
4: ["", "", "", "", ""],
5: ["", "", "", "", ""],
6: ["", "", "", "", ""]
}
self.col_offset = col_offset
self.row_offset = row_offset
self.h_tile_size = h_tile_size
self.v_tile_size = v_tile_size
self.line_width = line_width
self.line_color = line_color
self.back_color = background_color
self.current_try = 1
self.current_cell = 0
# Set up the PyGame window:
self.width = round(2 * self.col_offset + self.cols * self.h_tile_size)
self.height = round(2 * self.row_offset + self.rows * self.v_tile_size)
self.screen = pygame.display.set_mode((self.width, self.height),
pygame.RESIZABLE
)
# Set window title:
pygame.display.set_caption("Paterdal")
# Manage how frequently the screen updates
self.clock = pygame.time.Clock()
self.word = self.get_word_from_db()
self.letters = self.split_word(self.word)
# Draw the board
self.draw_board()
def split_word(self, word):
"""
Splits word into its individual letters and
returns that as a list.
:param word:
:return:
"""
letters = []
for letter in word:
letters.append(letter)
return letters
def get_word_from_db(self):
"""
Gets the word used from the database
:return:
"""
database = sqlite3.connect("wordlist.db")
db_cursor = database.cursor()
alpha_indices = db_cursor.execute(
"SELECT DISTINCT alpha_idx FROM words"
)
index_list = list(collapse(alpha_indices.fetchall()))
use_date = "2022-01-01"
while use_date != "":
index = random.choice(index_list)
word_data = db_cursor.execute(
"SELECT word, used_date FROM words WHERE alpha_idx = :index",
index
)
word_list = word_data.fetchall()
word, use_date = random.choice(word_list)
# mark the word as used
use_date = datetime.date.today()
db_cursor.execute(
"UPDATE words SET used_date = :use_date WHERE word = :word",
[use_date, word]
)
database.commit()
database.close()
return word
def is_alpha_key(self, event_key):
"""
Is the key pressed a letter key?
:param event_key:
:return:
"""
return event_key in [
pygame.K_a, pygame.K_b, pygame.K_c, pygame.K_d, pygame.K_e,
pygame.K_f, pygame.K_g, pygame.K_h, pygame.K_i, pygame.K_j,
pygame.K_k, pygame.K_l, pygame.K_m, pygame.K_n, pygame.K_o,
pygame.K_p, pygame.K_q, pygame.K_r, pygame.K_s, pygame.K_t,
pygame.K_u, pygame.K_v, pygame.K_w, pygame.K_x, pygame.K_y,
pygame.K_z
]
def get_letter(self, attempt, col):
"""
Returns a specific letter from a specific guess
:param attempt:
:param col:
:return:
"""
row = self.board[attempt]
return row[col]
def get_guess(self, attempt):
"""
Returns the entire list of letters from the guess
:param attempt:
:return:
"""
return self.board[attempt]
def calculate_points(self, col, row):
"""
Calculates the points of a given cell on the board, based
on the cell's col, row, and offsets.
:param col:
:param row:
:return:
"""
col_offset = round(self.col_offset)
row_offset = round(self.row_offset)
x_start = col_offset + self.h_tile_size * col
y_start = row_offset + self.v_tile_size * row
x_end = x_start + self.h_tile_size
y_end = y_start + self.v_tile_size
points = [
(x_start, y_start),
(x_end, y_start),
(x_end, y_end),
(x_start, y_end),
]
return points
def draw_board(self):
"""Displays the grid"""
self.screen.fill(self.back_color)
for row in range(self.rows):
for col in range(self.cols):
points = self.calculate_points(col, row)
pygame.draw.lines(self.screen, self.line_color, True, points,
self.line_width
)
def get_size(self,
col_offset=-1,
row_offset=-1,
cols=-1,
rows=-1,
h_tile_size=-1,
v_tile_size=-1
):
"""
Returns the calculated size of the board based on the
above parameters.
:param col_offset:
:param row_offset
:param cols:
:param rows:
:param h_tile_size:
:param v_tile_size:
:return:
"""
col_offset = self.col_offset if col_offset < 0 else col_offset
row_offset = self.row_offset if row_offset < 0 else row_offset
cols = self.cols if cols < 0 else cols
rows = self.rows if rows < 0 else rows
h_tile_size = self.h_tile_size if h_tile_size < 0 else h_tile_size
v_tile_size = self.v_tile_size if v_tile_size < 0 else v_tile_size
# Squares...
tile_size = max(h_tile_size, v_tile_size)
self.h_tile_size = tile_size
self.v_tile_size = tile_size
width = round((2 * col_offset) + cols * tile_size)
height = round((2 * row_offset) + rows * tile_size)
return width, height
def calculate_new_size(self, width, height):
"""
Determines the new size of the resized screen
:param width:
:param height:
:return:
"""
current_width, current_height = self.get_size()
width_ratio = width / current_width
height_ratio = height / current_height
self.col_offset *= width_ratio
self.row_offset *= height_ratio
self.v_tile_size *= width_ratio
self.h_tile_size *= height_ratio
return self.get_size()
def resize(self, new_width, new_height):
"""
Resizes the board.
:param new_width:
:param new_height:
:return:
"""
width = 200 if new_width < 200 else new_width
height = 300 if new_height < 300 else new_height
new_width, new_height = self.calculate_new_size(width, height)
self.screen = pygame.display.set_mode((new_width, new_height),
pygame.RESIZABLE
)
self.draw_board()
def start(self):
"""PyGame event loop"""
# Event loop:
finished = False
while not finished:
# Process any new PyGame events:
for event in pygame.event.get():
# Mouse moved over the PyGame window:
if event.type == pygame.MOUSEMOTION:
pass
# Mouse button was released over the PyGame window:
if event.type == pygame.MOUSEBUTTONUP:
pass
# Implement scrollable screen
# scrolls by ROWS, not incrementally
if event.type == pygame.MOUSEWHEEL:
pass
# PyGame window was resized:
if event.type == pygame.VIDEORESIZE:
self.resize(event.w, event.h)
# User closed the window:
if event.type == pygame.QUIT:
finished = True
# Some key was pressed:
if event.type == pygame.KEYDOWN:
# If the pressed key was ESC, exit:
if event.key == pygame.K_ESCAPE:
finished = True
# If the pressed key was DOWN ARROW, scroll screen
# one row down
if event.key == pygame.K_DOWN:
pass
# If the pressed key was UP ARROW, scroll screen
# one row up
if event.key == pygame.K_UP:
pass
# Handle alpha keys here
if self.is_alpha_key(event.key):
pass
# Limit refresh rate to 20 frames per second:
self.clock.tick(20)
# Refresh the screen:
pygame.display.update()
# Terminate PyGame:
pygame.quit()
P = Paterdal()
P.start()
| SeanWH/paterdal | src/paterdal.py | paterdal.py | py | 9,844 | python | en | code | 0 | github-code | 13 |
30643291242 | from tkinter import *
def click1(event):
login_entry.configure(state=NORMAL)
login_entry.delete(0,END)
login_entry.unbind("<Button-1>", clicked1)
def click2(event):
pw_entry.configure(state=NORMAL)
pw_entry.delete(0,END)
pw_entry.unbind("<Button-1>", clicked2)
window = Tk()
window.title("Kick.com login window")
window_icon = PhotoImage(file="kick.png")
window.iconphoto(True, window_icon)
window.geometry("300x125")
#-----------------------------------------------------
login_label = Label(window,
text = "Kick.com",
font = ("Ariel", 30, "bold"),
bg = "black",
fg = "#39FF14")
login_label.pack()
#-----------------------------------------------------
login_entry = Entry(window,
width = 30,
bg = "black",
fg = "#39FF14")
login_entry.insert(0, "Enter Username Here")
login_entry.pack()
clicked1 = login_entry.bind("<Button-1>", click1)
#-----------------------------------------------------
pw_entry = Entry(window,
width = 30,
bg="black",
fg="#39FF14",
show = "*")
pw_entry.insert(0, "Enter Password Here")
pw_entry.pack()
clicked2 = pw_entry.bind("<Button-1>", click2)
#-----------------------------------------------------
login_button = Button(window,
text = "Login",
bg = "black",
fg = "#39FF14")
login_button.pack()
#-----------------------------------------------------
window.mainloop() | 5puki/TheBasicsPractice | Kickloginpractice.py | Kickloginpractice.py | py | 1,655 | python | en | code | 0 | github-code | 13 |
2325284451 | import numpy as np
import torch
import torch.nn as nn
from scipy.io import loadmat
from sklearn.preprocessing import MinMaxScaler
from torch.autograd import Variable
import matplotlib.pyplot as plt
"""
Author: Sheng Kuang, Yimin Yang
"""
# Load data
training_data_path = 'Xtrain.mat'
test_data_path = 'Xtest-1.mat'
raw_data = loadmat(training_data_path)['Xtrain']
test_data = loadmat(test_data_path)['Xtest']
print(raw_data.shape)
# Normalization
s = MinMaxScaler(feature_range=(0, 1))
s = s.fit(raw_data)
raw_data = s.transform(raw_data)
# Define hyper-parameters
WINDOW_SIZE = 70
PREDICT_SIZE = 1
TRAINING_DATA_PERCENTAGE = 0.99
LEARNING_RATE = 0.01
NUM_EPOCHS = 5000
INPUT_SIZE = 1
HIDDEN_SIZE = 8
NUM_LAYERS = 1
OUTPUT_SIZE = 1
SHUFFLE = False
PREDICT_STEPS = 200
# Pre-processing: scale the dataset
total_data_size = raw_data.shape[0] - WINDOW_SIZE - PREDICT_SIZE
input_data = np.zeros((total_data_size, WINDOW_SIZE))
output_data = np.zeros((total_data_size, PREDICT_SIZE))
for i_start in range(total_data_size):
input_data[i_start, :] = raw_data[i_start:i_start + WINDOW_SIZE, :].T
output_data[i_start, :] = raw_data[i_start + WINDOW_SIZE: i_start + WINDOW_SIZE + PREDICT_SIZE, :].T
# Shuffle data
if SHUFFLE:
state = np.random.get_state()
np.random.shuffle(input_data)
np.random.set_state(state)
np.random.shuffle(output_data)
# Split data into training and validation array
train_num = int(TRAINING_DATA_PERCENTAGE * total_data_size)
train_x = Variable(torch.Tensor(input_data[:train_num, :]))
train_y = Variable(torch.Tensor(output_data[:train_num, :]))
valid_x = Variable(torch.Tensor(input_data[train_num:, :]))
valid_y = Variable(torch.Tensor(output_data[train_num:, :]))
print('Training :', train_x.shape, train_y.shape, ', Validation :', valid_x.shape, valid_y.shape)
train_x = torch.reshape(train_x, (train_x.shape[0], train_x.shape[1], 1)) # records * seq_len * input_size
valid_x = torch.reshape(valid_x, (valid_x.shape[0], valid_x.shape[1], 1)) # records * seq_len * input_size
print('Training :', train_x.shape, train_y.shape, ', Validation :', valid_x.shape, valid_y.shape)
class Model1(nn.Module):
"""LSTM model class"""
def __init__(self, v_input_size, v_hidden_size, v_num_layers, v_output_size, seq_len, batch_size):
super(Model1, self).__init__()
self.input_size = v_input_size
self.hidden_size = v_hidden_size
self.num_layers = v_num_layers
self.seq_len = seq_len
self.batch_size = batch_size
self.lstm = nn.LSTM(self.input_size, self.hidden_size, self.num_layers, batch_first=True, )
self.fc = nn.Linear(self.hidden_size * self.seq_len, v_output_size)
def forward(self, x):
# feed forward
# h_0 = torch.randn(self.num_layers, self.batch_size, self.hidden_size) # num_layers, batch_size, hidden_size
# c_0 = torch.randn(self.num_layers, self.batch_size, self.hidden_size) # num_layers, batch_size, hidden_size
# lstm_output, _ = self.lstm(x, (h_0, c_0))
lstm_output, _ = self.lstm(x)
out = self.fc(lstm_output.contiguous().view(-1, self.hidden_size * self.seq_len))
return out
def recursive_predict(self, x, steps):
seq = np.zeros((steps + x.shape[0], 1))
seq[:x.shape[0], :] = x
for s_index in range(steps):
input_seq = Variable(torch.Tensor(seq[s_index:s_index + x.shape[0], :].reshape(1, -1, 1)))
output = self.forward(input_seq)
seq[x.shape[0] + s_index, 0] = output[0, 0]
return seq[x.shape[0]:, :]
def training(v_train_x, v_train_y, v_valid_x, v_valid_y, epochs, v_input_size, v_hidden_size, v_num_layers,
v_output_size, lr):
# Define model and loss function
m = Model1(v_input_size, v_hidden_size, v_num_layers, v_output_size, seq_len=v_train_x.shape[1],
batch_size=v_train_x.shape[0])
criterion = nn.MSELoss() # MSE
optimizer = torch.optim.Adam(m.parameters(), lr=lr)
# Start training
loss_log = np.zeros((epochs, 1))
for e_index in range(epochs):
outputs = m(v_train_x)
optimizer.zero_grad()
loss = criterion(outputs, v_train_y)
loss.backward()
optimizer.step()
loss_log[e_index, 0] = loss.item()
if e_index % 100 == 0:
# Validation
t_o = m(v_valid_x)
t_l = criterion(t_o, v_valid_y)
print("Epoch: {0}, Training loss: {1:1.8f}, Validation loss: {2:1.8f}".format(e_index, loss.item(),
t_l.item()))
# Save model
torch.save(m, 'LSTM.pkl')
return m, loss_log
"""
============ =====================================================================
Prediction
------------ ---------------------------------------------------------------------
"""
# model, log = training(train_x, train_y, valid_x, valid_y, NUM_EPOCHS, INPUT_SIZE, HIDDEN_SIZE, NUM_LAYERS, OUTPUT_SIZE,
# LEARNING_RATE)
# prediction = model.recursive_predict(raw_data[-WINDOW_SIZE:, :], PREDICT_STEPS)
model = torch.load('LSTM_final.pkl')
prediction = model.recursive_predict(raw_data[-40:, :], PREDICT_STEPS)
criterion = nn.MSELoss()
# Original
inverse_prediction = s.inverse_transform(prediction)
test_MSE = criterion(Variable(torch.Tensor(test_data)), Variable(torch.Tensor(inverse_prediction))).item()
print('Test MSE(Original): {}'.format(test_MSE))
print('Test MAE(Original): {}'.format(np.average(np.abs(test_data - inverse_prediction))))
# # Normalized
# test_MSE_n = criterion(Variable(torch.Tensor(s.transform(test_data))), Variable(torch.Tensor(prediction))).item()
# print('Test MSE(Normalized): {}'.format(test_MSE_n))
# plot predictions
fig = plt.figure(figsize=(8, 2))
plt.plot(s.inverse_transform(raw_data), label='Real data')
plt_x = np.array(range(0, PREDICT_STEPS + 1, 1)) + raw_data.shape[0] - 1
plt.plot(plt_x, s.inverse_transform(np.vstack((raw_data[-1, :].reshape((1, 1)), prediction))), label='Prediction')
plt.xlabel('Time')
plt.ylabel('Amplitude')
plt.legend(loc='upper right', ncol=3, fontsize=8)
plt.show()
# comparison
fig2, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(8, 2))
ax1.plot(s.inverse_transform(prediction), label='Prediction')
ax1.set_ylabel('Amplitude')
ax1.set_ylim(-20, 250)
# ax1.legend(loc='lower right')
ax1.grid()
ax1.set_title('Predictions')
ax2.plot(test_data, label='Test data')
ax2.set_ylim(-20, 250)
# ax2.legend(loc='lower right')
ax2.grid()
ax2.set_title('Test data')
ax3.plot(np.abs(s.inverse_transform(prediction) - test_data), label='Diff')
ax3.set_ylim(-20, 250)
# ax3.legend(loc='lower right')
ax3.grid()
ax3.set_title('Absolute error')
# ax4.plot(np.abs(s.inverse_transform(prediction) - test_data), label='Diff')
# ax4.set_ylim(-20, 250)
# # ax3.legend(loc='lower right')
# ax4.grid()
# ax4.set_title('Absolute error')
plt.show()
"""
============ =====================================================================
Fine-tuning: hidden_size
------------ ---------------------------------------------------------------------
"""
# num_exps = 10
# logs = np.zeros((NUM_EPOCHS, num_exps))
#
# for i in range(num_exps):
# hidden_size = (i + 1) * 2
# model, log = training(train_x, train_y, valid_x, valid_y, NUM_EPOCHS, INPUT_SIZE, hidden_size, NUM_LAYERS, OUTPUT_SIZE, LEARNING_RATE)
# logs[:, i] = log[:, 0]
# # plot
# fig = plt.figure(figsize=(8, 3))
# for i in range(num_exps):
# plt.plot(logs[:, i], label='Hidden size={}'.format((i + 1) * 2))
#
# plt.legend(loc='upper right', ncol=3, fontsize=8)
# plt.xlabel('Epochs')
# plt.ylabel('MSE')
# # plt.ylim(0, 0.05)
# plt.yscale('log')
# plt.title('Experiment 1: Hidden size')
# plt.show()
"""
============ =====================================================================
Fine-tuning: window_size
------------ ---------------------------------------------------------------------
"""
# num_exps = 10
# logs = np.zeros((NUM_EPOCHS, num_exps))
#
# for i in range(num_exps):
# window_size = (i + 1) * 10
#
# total_data_size = raw_data.shape[0] - window_size - PREDICT_SIZE
# input_data = np.zeros((total_data_size, window_size))
# output_data = np.zeros((total_data_size, PREDICT_SIZE))
# for i_start in range(total_data_size):
# input_data[i_start, :] = raw_data[i_start:i_start + window_size, :].T
# output_data[i_start, :] = raw_data[i_start + window_size: i_start + window_size + PREDICT_SIZE, :].T
#
# # Split data into training and validation array
# train_num = int(TRAINING_DATA_PERCENTAGE * total_data_size)
# train_x = Variable(torch.Tensor(input_data[:train_num, :]))
# train_y = Variable(torch.Tensor(output_data[:train_num, :]))
# valid_x = Variable(torch.Tensor(input_data[train_num:, :]))
# valid_y = Variable(torch.Tensor(output_data[train_num:, :]))
# print('Training :', train_x.shape, train_y.shape, ', Validation :', valid_x.shape, valid_y.shape)
#
# train_x = torch.reshape(train_x, (train_x.shape[0], train_x.shape[1], 1)) # records * seq_len * input_size
# valid_x = torch.reshape(valid_x, (valid_x.shape[0], valid_x.shape[1], 1)) # records * seq_len * input_size
# print('Training :', train_x.shape, train_y.shape, ', Validation :', valid_x.shape, valid_y.shape)
#
# model, log = training(train_x, train_y, valid_x, valid_y, NUM_EPOCHS, INPUT_SIZE, HIDDEN_SIZE, NUM_LAYERS, OUTPUT_SIZE, LEARNING_RATE)
# logs[:, i] = log[:, 0]
# # plot
# fig = plt.figure(figsize=(8, 3))
# for i in range(num_exps):
# plt.plot(logs[:, i], label='Window size={}'.format((i + 1) * 10))
#
# plt.legend(loc='upper right', ncol=3, fontsize=8)
# plt.xlabel('Epochs')
# plt.ylabel('MSE')
# # plt.ylim(0, 0.05)
# plt.yscale('log')
# plt.title('Experiment 2: Window size')
# plt.show()
"""
============ =====================================================================
Fine-tuning: learning rate
------------ ---------------------------------------------------------------------
"""
# num_exps = 5
# logs = np.zeros((NUM_EPOCHS, num_exps))
#
# for i in range(num_exps):
# learning_rate = 0.1 ** (i + 1)
# model, log = training(train_x, train_y, valid_x, valid_y, NUM_EPOCHS, INPUT_SIZE, HIDDEN_SIZE, NUM_LAYERS, OUTPUT_SIZE, learning_rate)
# logs[:, i] = log[:, 0]
# # plot
# fig = plt.figure(figsize=(8, 3))
# for i in range(num_exps):
# plt.plot(logs[:, i], label='Learning rate={}'.format(np.round(0.1 ** (i + 1), i + 1)))
#
# plt.legend(loc='upper right', ncol=3, fontsize=8)
# plt.xlabel('Epochs')
# plt.ylabel('MSE')
# # plt.ylim(0, 0.05)
# plt.yscale('log')
# plt.title('Experiment 3: Learning rate')
# plt.show()
"""
============ =====================================================================
Fine-tuning: num layers
------------ ---------------------------------------------------------------------
"""
# num_exps = 4
# logs = np.zeros((NUM_EPOCHS, num_exps))
#
# for i in range(num_exps):
# num_layers = i + 1
# model, log = training(train_x, train_y, valid_x, valid_y, NUM_EPOCHS, INPUT_SIZE, HIDDEN_SIZE, num_layers, OUTPUT_SIZE, LEARNING_RATE)
# logs[:, i] = log[:, 0]
# # plot
# fig = plt.figure(figsize=(8, 3))
# for i in range(num_exps):
# plt.plot(logs[:, i], label='Num layers={}'.format(i + 1))
#
# plt.legend(loc='upper right', ncol=3, fontsize=8)
# plt.xlabel('Epochs')
# plt.ylabel('MSE')
# # plt.ylim(0, 0.05)
# plt.yscale('log')
# plt.title('Experiment 4: Num layers')
# plt.show()
| YangYimin98/deep_learning_Assignment | Group2_assignment_1/LSTM_code/LSTM_model.py | LSTM_model.py | py | 11,508 | python | en | code | 0 | github-code | 13 |
28912303060 | import os
from centerfinder import util
from centerfinder import sky
def test_pickle():
sky_ = sky.Sky(util.load_data('data/cf_mock_catalog_83C_120R.fits'), 5)
# expected radius should be default to 108
filename = 'dummy'
sky_.vote(radius=108)
util.pickle_sky(sky_, filename)
sky_1 = util.unpickle_sky(filename)
sky_1.blobs_thres(radius=108, blob_size=3, type_='difference')
os.remove(filename)
def test_blob():
sky_ = sky.Sky(util.load_data('data/cf_mock_catalog_83C_120R.fits'), 5)
# expected radius should be default to 108
sky_.vote(radius=108)
sky_.blobs_thres(radius=108, blob_size=3, type_='difference')
| yliu134/center-finder | tests/test_vote.py | test_vote.py | py | 664 | python | en | code | 0 | github-code | 13 |
31969272941 | from django.urls import path , include
from django.contrib import admin
from . import views
from .views import *
from django.conf import settings
from django.conf.urls.static import static
from django.shortcuts import render
urlpatterns = [
path('', views.login , name='login'),
path('index/', views.index , name='index' ),
path('contact/', views.contact, name='contact'),
path('del_contact/<int:id>', views.del_contact, name='del_contact'),
path('main/', views.main , name='main' ),
path('course/', views.course , name='course' ),
path('add_course/', views.add_course , name='add_course' ),
path('edit_course/<id>', views.edit_course , name='edit_course' ),
path('delete_course/<int:id>', views.delete_course, name='delete_course'),
path('topic/', views.topic , name='topic' ),
path('add_topic/', views.add_topic , name='add_topic' ),
path('edit_topic/', views.edit_topic , name='edit_topic' ),
path('del_topic/<int:id>', views.del_topic , name='del_topic' ),
path('edit_topic/<id>', views.edit_topic , name='edit_topic' ),
path('user/', views.user , name='user' ),
path('del_user/<int:id>', views.del_user , name='del_user' ),
path('slider/', views.slider , name='slider' ),
path('create_slider/', views.create_slider , name='create_slider' ),
path('edit_slider/<id>', views.edit_slider , name='edit_slider' ),
path('del_slider/<id>', views.del_slider , name='del_slider' ),
] | shakti001/Besttutorils | webadmin/urls.py | urls.py | py | 1,471 | python | en | code | 0 | github-code | 13 |
5505690475 | from discord.ext import commands
from utils.mysql import *
from utils.tools import *
from utils import checks
from utils.language import Language, lang_list
class Configuration(commands.Cog):
def __init__(self, bot):
self.bot = bot
@checks.server_admin_or_perms(manage_guild=True)
@commands.guild_only()
@commands.command()
async def config(self, ctx, type:str, *, value:str):
"""Modifies the server's local config"""
await ctx.channel.trigger_typing()
if type == "mod-role" or type == "admin-role" or type == "mute-role" or type == "join-role":
role = by_name_or_id(ctx.guild.roles, value)
if role is None:
await ctx.send("Could not find the role `" + value + "`")
return
update_data_entry(ctx.guild.id, type, role.id)
await ctx.send(Language.get("configuration.set_success", ctx).format(type, role.name))
else:
await ctx.send(Language.get("configuration.invalid_set_type", ctx).format(type))
@commands.guild_only()
@commands.command()
async def showconfig(self, ctx):
"""Shows the server's configuration"""
await ctx.channel.trigger_typing()
mod_role = id_to_name(ctx.guild.roles, read_data_entry(ctx.guild.id, "mod-role"))
admin_role = id_to_name(ctx.guild.roles, read_data_entry(ctx.guild.id, "admin-role"))
mute_role = id_to_name(ctx.guild.roles, read_data_entry(ctx.guild.id, "mute-role"))
join_role = id_to_name(ctx.guild.roles, read_data_entry(ctx.guild.id, "join-role"))
fields = {Language.get("configuration.mod_role", ctx):mod_role, "Admin Role":admin_role, Language.get("configuration.mute_role", ctx):mute_role, Language.get("configuration.join_role", ctx):join_role}
embed = make_list_embed(fields)
embed.title = Language.get("configuration.server_configuration", ctx)
embed.color = 0xFF0000
await ctx.send(embed=embed)
@commands.guild_only()
@checks.server_admin_or_perms(manage_guild=True)
@commands.command()
async def setlanguage(self, ctx, language:str):
"""Sets the bot's language for the server"""
await ctx.send(Language.set_language(ctx.guild, language))
@commands.guild_only()
@commands.command()
async def languages(self, ctx):
"""Lists the current bot languages"""
await ctx.send("Current bot languages: " + ", ".join(lang_list))
def setup(bot):
bot.add_cog(Configuration(bot))
| script-head/deadhead | commands/configuration.py | configuration.py | py | 2,528 | python | en | code | 10 | github-code | 13 |
35219091445 | import numpy as np
import cupy as cp
from cupy import dot
def init_matvec(N, local_N, T):
local_A = cp.empty((local_N, N), T)
Ax = np.empty(N, T)
local_Ax = cp.empty(local_N, T)
return local_A, Ax, local_Ax
def init_vecvec(local_N, T):
local_a = cp.empty(local_N, T)
local_b = cp.empty(local_N, T)
return local_a, local_b
def mpi_matvec(local_A, x, Ax, local_Ax, comm):
Ax = x.get()
comm.Bcast(Ax)
x = cp.asarray(Ax)
local_Ax = dot(local_A, x)
comm.Gather(local_Ax.get(), Ax)
return cp.asarray(Ax)
def mpi_vecvec1(a, local_a, comm):
ab = np.empty(1, cp.float64)
local_a_cpu = local_a.get()
comm.Scatter(a.get(), local_a_cpu)
local_a = cp.asarray(local_a_cpu)
local_ab = dot(local_a, local_a)
comm.Reduce(local_ab.get(), ab)
return cp.asarray(ab)
def mpi_vecvec2(a, b, local_a, local_b, comm):
ab = np.empty(1, cp.float64)
local_a_cpu = local_a.get()
local_b_cpu = local_b.get()
comm.Scatter(a.get(), local_a_cpu)
comm.Scatter(b.get(), local_b_cpu)
local_a = cp.asarray(local_a_cpu)
local_b = cp.asarray(local_b_cpu)
local_ab = dot(local_a, local_b)
comm.Reduce(local_ab.get(), ab)
return cp.asarray(ab)
| 5enxia/parallel-krylov | v1/processes/gpu.py | gpu.py | py | 1,233 | python | en | code | 1 | github-code | 13 |
17776956497 | import turtle as t
def rectangle(horizontal,vertical,colour):
t.pendown()
t.pensize(1)
t.color(colour)
t.begin_fill()
for counter in range(1,3):
t.forward(horizontal)
t.right (90)
t.forward(vertical)
t.right(90)
t.end_fill()
t.penup()
t.penup()
t.speed('slow')
t.bgcolor('lawn green')
#feet
t.goto(-100,-150)
rectangle(50,20,'purple')
t.goto(-30,-150)
rectangle(50,20,'purple')
#legs
t.goto(-25,-50)
rectangle(15,100,'goldenrod')
t.goto(-55,-50)
rectangle(-15,100,'goldenrod')
#body
t.goto(-90,100)
rectangle(100,150,'blue')
#arms
t.goto(-150,70)
rectangle(60,15,'grey')
t.goto(-150,110)
rectangle(15,40,'grey')
t.goto(10,70)
rectangle(60,15,'grey')
t.goto(55,110)
rectangle(15,40,'grey')
#neck
t.goto(-50,120)
rectangle(15,20,'yellow')
#head
t.goto(-85,170)
rectangle(80,50,'red')
#eyes
t.goto(-60,160)
rectangle(30,10,'white')
t.goto(-55,155)
rectangle(5,5,'black')
t.goto(-40,155)
rectangle(5,5,'black')
#mouth
t.goto(-65,135)
rectangle(40,5,'black')
t.hideturtle()
| YITExperiment/ppv2_level-3-kapinath | robot_builder 2.py | robot_builder 2.py | py | 1,100 | python | en | code | 0 | github-code | 13 |
19552359290 | import sys
memo = dict()
coinValues = [1, 5, 10, 25, 50]
def coin_change(i, n):
if n == 0:
memo[(n)] = 1
return 1
if n < 0:
memo[(n)] = 0
return 0
if i <= 0 and n >=1:
return 0
# First Term
if (i-1, n) not in memo:
# a = coin_change(i-1, n, memo)
memo[(i-1, n)] = coin_change(i-1, n)
# Second term
if (i, n-coinValues[i-1]) not in memo:
memo[(i, n-coinValues[i-1])] = coin_change(i, n-coinValues[i-1])
return memo[(i-1, n)]+memo[(i, n-coinValues[i-1])]
def load():
while(1):
n = int(next(sys.stdin))
yield(n)
for n in load():
for i in range(0, n):
coin_change(5, i)
sys.stdout.write(str(coin_change(5, n)))
sys.stdout.write("\n") | tristan-hunt/UVaProblems | coin_change.py | coin_change.py | py | 674 | python | en | code | 0 | github-code | 13 |
38319545100 | #!/usr/bin/env python3
import csv
import json
import os
import sys
from argparse import ArgumentParser
from datetime import datetime
from subprocess import list2cmdline
from typing import Dict, Tuple
from urllib.request import urljoin
import requests
session = requests.session()
session.headers["Content-Type"] = "application/json"
prefix = "https://api.ituring.com.cn/api/"
prefix_mainly = "http://www.ituring.com.cn/api/"
ebook_link = "http://www.ituring.com.cn/file/ebook/%s?type=%s"
token_path = "ituring-token.json"
def expand_paging(query):
index = 1
while True:
payload = query(index)
for item in payload["bookItems"]:
yield item
if payload["pagination"]["isLastPage"]:
break
index += 1
def get_book_shelf():
def query(page):
link = urljoin(prefix, "User/ShelfEBook")
response = session.get(link, params={"page": page, "desc": True})
return response.json()
return expand_paging(query)
def get_favourite():
def query(page):
link = urljoin(prefix, "User/Fav/Books")
response = session.get(link, params={"page": page})
return response.json()
return expand_paging(query)
def get_book(book_id):
link = urljoin(prefix, "Book/%s" % book_id)
response = session.get(link)
if response.status_code == 404:
return None
return response.json()
def download_book(book_id: int):
payload = get_book(book_id)
def make_link(kind: str) -> Tuple[str, str]:
link = ebook_link % (payload["encrypt"], kind)
filename = "[{id:05}] {name}.{kind}".format(
id=book_id, name=payload["name"].strip().replace("/", ""), kind=kind.lower()
)
return book_id, link, filename
if payload["supportPdf"]:
yield make_link("PDF")
if payload["supportEpub"]:
yield make_link("EPUB")
if payload["supportMobi"]:
yield make_link("MOBI")
def set_token():
try:
with open(token_path, "r") as fp:
token = json.load(fp)
session.headers["Authorization"] = "Bearer %(accessToken)s" % token
except:
login()
def can_refresh_token():
modified_time = os.path.getmtime(token_path)
delta_time = datetime.now() - datetime.fromtimestamp(modified_time)
return delta_time.days >= 1
def refresh_token():
if not can_refresh_token():
return
token = None
with open(token_path, "r") as fp:
token = json.load(fp)
response = requests.post(
urljoin(prefix, "Account/RefreshToken"),
json={
"AccessToken": token["accessToken"],
"RefreshToken": token["refreshToken"],
},
)
payload = response.json()
with open(token_path, "w") as fp:
json.dump(payload, fp, indent=4)
def extract_book_item(item: Dict):
return item["id"]
def make_extract_book_item(kind: str):
return lambda item: {
"id": "%(id)05d" % item,
"name": item["name"].strip(),
"kind": kind,
}
def report():
shelf_books = map(make_extract_book_item("shelf"), get_book_shelf())
favourite_books = map(make_extract_book_item("favourite"), get_favourite())
writer = csv.DictWriter(sys.stdout, ["id", "name", "kind"])
writer.writeheader()
writer.writerows(sorted(shelf_books, key=extract_book_item))
writer.writerows(sorted(favourite_books, key=extract_book_item))
def get_book_flags(payload: Dict):
if payload["presale"]:
yield "pre-sale"
if payload["canSalePaper"]:
yield "paper"
if payload["supportPdf"]:
yield "pdf"
if payload["supportEpub"]:
yield "epub"
if payload["supportMobi"]:
yield "mobi"
if payload["supportPushMobi"]:
yield "push-mobi"
def all_books():
field_names = ["id", "name", "published", "flags"]
writer = csv.DictWriter(sys.stdout, field_names)
writer.writeheader()
book_id = 1
failed_count = 0
while True:
if failed_count > 1000:
break
payload = get_book(book_id)
book_id += 1
if payload is None:
print("# ignored #%s" % book_id, file=sys.stderr)
failed_count += 1
continue
else:
failed_count = 0
if payload["publishDate"]:
payload["publishDate"] = payload["publishDate"][:10]
item = {
"id": payload["id"],
"name": payload["name"].strip(),
"published": payload["publishDate"],
"flags": ", ".join(get_book_flags(payload)),
}
writer.writerow(item)
sys.stdout.flush()
def push_books():
for book in get_book_shelf():
payload = get_book(book["id"])
mode = "PushBook" if payload["tupubBookId"] else "PushMiniBook"
link = urljoin(prefix_mainly, "Kindle/%s/%s" % (mode, book["id"]))
auth = "Authorization: %(Authorization)s" % session.headers
print(list2cmdline(["echo", "%(id)05d Push book" % book]))
print(list2cmdline(["curl", "-H", auth, link]))
print(list2cmdline(["echo"]))
sys.stdout.flush()
def clean_favourite():
shelf_books = set(map(extract_book_item, get_book_shelf()))
favourite_books = set(map(extract_book_item, get_favourite()))
purchased_items = shelf_books & favourite_books
for book_id in sorted(purchased_items):
link = urljoin(prefix, "Book/UnFav")
session.post(link, params={"id": book_id})
print("Unfavourite purchased book: %s" % book_id)
def fetch():
book_ids = map(extract_book_item, get_book_shelf())
links = (
(book_id, link, filename)
for book in map(download_book, sorted(book_ids))
for book_id, link, filename in book
)
for book_id, link, filename in links:
options = [
link,
'header="Authorization: %(Authorization)s"' % session.headers,
"referer=https://m.ituring.com.cn/book/%s" % book_id,
"out=ebooks/%s" % filename,
]
print("\n\t".join(options))
def login():
email = input("Email: ")
password = input("Password: ")
response = requests.post(
urljoin(prefix, "Account/Token"), json={"email": email, "password": password}
)
payload = response.json()
if response.status_code != 200:
print(payload["message"], file=sys.stderr)
return
with open(token_path, "w") as fp:
json.dump(payload, fp, indent=4)
print("login done")
def main():
set_token()
refresh_token()
parser = ArgumentParser(description="ituring helper")
subparsers = parser.add_subparsers(dest="action")
subparsers.add_parser("login")
subparsers.add_parser("report")
subparsers.add_parser("fetch")
subparsers.add_parser("clean-favourite")
subparsers.add_parser("all-books")
subparsers.add_parser("push-books")
args = parser.parse_args()
if len(sys.argv) == 1:
parser.print_help(sys.stderr)
sys.exit(-1)
actions = {
"login": login,
"report": report,
"fetch": fetch,
"clean-favourite": clean_favourite,
"all-books": all_books,
"push-books": push_books,
}
actions[args.action]()
if __name__ == "__main__":
main()
| NiceLabs/ituring-helper | ituring.py | ituring.py | py | 7,319 | python | en | code | 0 | github-code | 13 |
16550668986 | from django import forms
from django.forms import ModelForm
from .models import *
from multiupload.fields import MultiFileField
class UserForm(ModelForm):
class Meta:
model = User
fields = ('email',)
class LinearForm(ModelForm):
class Meta:
model = Linear
fields = '__all__'
widgets = {
'techobespech_spravochnik': forms.CheckboxSelectMultiple(),
}
class LinearFormFile(ModelForm):
class Meta:
model = LinearData
fields = ['document']
widgets = {
'document': forms.FileInput(attrs={'multiple': True}),
}
class CivilForm(ModelForm):
class Meta:
model = Citizen
fields = '__all__'
class CivilFormFile(ModelForm):
class Meta:
model = CitizenData
fields = ['document']
widgets = {
'document': forms.FileInput(attrs={'multiple': True}),
}
class IndustrialForm(ModelForm):
class Meta:
model = Industrial
fields = '__all__'
class IndustrialFormFile(ModelForm):
class Meta:
model = IndustrialData
fields = ['document']
widgets = {
'document': forms.FileInput(attrs={'multiple': True}),
}
STAGES = (
('Инженерные изыскания и проектирование',(
('1.1','Выпуск проектной документации на основе ЦИМ'),
('1.2','Пространственная междисциплинарная координация и выявление коллизий (3D-координация)'),
('1.3','Подсчет объемов работ и оценка сметной стоимости (BIM 5D)'),
)
),
('Строительство',(
('2.1','Пространственная временная координация и выявление коллизий (4D-координация)'),
('2.2','Визуализация процесса строительства (BIM 4D)'),
('2.3','Исполнительная модель «как построено»'),
)
),
('Эксплуатация', (
('3.1', 'Управление эксплуатацией зданий и сооружений'),
('3.2', 'Информационное моделирование существующего объекта «как есть»'),
)
)
)
class BimUseForm(forms.Form):
stage = forms.MultipleChoiceField(label = "BIM-USE", widget=forms.CheckboxSelectMultiple, choices=STAGES)
OKS = (('Linear','Линейный объект'),
('Citizen','Непроизводственный объект'),
('Industrial','Производственный объект'))
class OKSForm(forms.Form):
oks = forms.ChoiceField(label = "ОКС",
widget=forms.RadioSelect, choices=OKS)
class HintForm(forms.Form):
comment = forms.CharField(label='в соответствие с таблицей КСИ СЖЦ / LCS 8)стадия жизненного цикла объектов капитального строительства;')
comment2 = forms.CharField(label='или альтернативные определения:3.2 строительство: Создание зданий, строений, сооружений (в том числе на месте сносимых объектов капитального строительства) (пункт 13 статьи 1 [2]). ')
comment3 = forms.CharField(label='3')
comment4= forms.CharField(label='4')
comment5= forms.CharField(label='5')
comment6= forms.CharField(label='6')
comment7= forms.CharField(label='7')
comment8= forms.CharField(label='8')
comment9= forms.CharField(label='9')
comment10= forms.CharField(label='10')
comment11= forms.CharField(label='11')
comment12= forms.CharField(label='12')
comment13= forms.CharField(label='13')
| tagirova33/bim-zadanie | forms.py | forms.py | py | 3,951 | python | ru | code | 0 | github-code | 13 |
9072170480 | import sys
sys.stdin = open("in_out/chapter7/in4.txt", "rt")
def dfs(L, sum):
global change, smaller
if L >= smaller:
return
if sum == change:
if L < smaller:
smaller = L
return
elif sum > change:
return
else:
for i in coins:
dfs(L + 1, sum + i)
if __name__ == "__main__":
n = int(input())
coins = list(map(int, input().split()))
coins.sort(reverse=True)
change = int(input())
smaller = (change // max(coins)) + 1000
dfs(0,0)
print(smaller)
| mins1031/coding-test | section6/Chapter7.py | Chapter7.py | py | 557 | python | en | code | 0 | github-code | 13 |
13342109511 | import socket, struct, math, pickle, random, copy, json, time, pygame
from matplotlib.pyplot import disconnect
from _thread import start_new_thread
from constants import *
from games_logic import TTT_Logic, Connect4_Logic
# import numpy as np
IP = "0.0.0.0" # Address to bind to
PORT = 5555 # Arbitrary non-privileged port
DEFAULT_BYTES = 1024 # max bytes to be sent in one message
total_connections_so_far = 0
total_games_so_far = 0
active_users = {} # {id:{"conn":conn,"engaged":False}}
connections = {} # store the connections in a dict, for easy access
send_queue = {} # queue for sending data to each user
profile_pictures = {} # store all the profile pics or users
pending = {} # {"challenger_id":(challenged_to,game)}
games = {} # {game_id:{players:[],game:<string>,game_details:{board:<Board>}}}
games_lookup = {
"tic_tac_toe": TTT_Logic,
"connect4": Connect4_Logic,
} # maybe add more games in the future
# a function to set up a user
def create_user(conn, addr):
# generate the users default stats
user_id = str(total_connections_so_far)
username = f"USER#{user_id}"
user_stats = {
"id": user_id,
"username": username,
"image": None,
"color": random.choice(USER_COLORS),
"engaged": False,
"challenged": {}, # requests this user has sent and are yet to be accepted by another
"pending": {}, # requests this user needs to accept
"game": None,
"bot": False,
}
# add this user to the active users
active_users[user_id] = user_stats
connections[user_id] = conn
send_queue[user_id] = []
print(f"[NEW USER] {user_stats['username']} ({user_id})")
# send this user some start up info
# (this users id, all active users)
send(user_id, conn)
data = recieve_data(conn)
if not data:
disconnect_user(user_id, user_stats)
conn.close()
return None, None
data = pickle.loads(data)
if data.get("updated"):
update_user(user_id, data["updated"], send_all=False)
return user_id, user_stats
def update_user(user_id, updated, send_all=True):
updated_copy = updated.copy()
for key in updated:
if key in active_users[user_id]:
active_users[user_id][key] = updated[key]
else:
updated_copy.pop(key)
if len(updated_copy) == 0:
return {"error": "Unknown keys!"}
else:
if send_all:
r = {"updated": {"user_id": user_id, "changed": updated}}
send_to_all(r, user_id, True)
print(
f"[UPDATED STATS]: {active_users[user_id]['username']} ({user_id}) \n {updated}"
)
return {"message": {"title": "Updated successfully!"}}
def execute_send_queue(user_id):
while active_users.get(user_id):
try:
# send queue is expected to be list of lists
conn = connections[user_id]
for ind, items in enumerate(send_queue[user_id].copy()):
for item in items:
# item is supposed to be binary data
lenData = len(item)
if lenData >= DEFAULT_BYTES:
send_huge(conn, item)
else:
send(item, conn, pickle_data=False)
send_queue[user_id].remove(items)
# time.sleep(2)
except:
break
def add_to_send_queue(user_id, items):
send_queue[user_id].append(items)
# send some data to all connected users
def send_to_all(
data, curr_user_id, to_current_user=False, pickle_data=True, to_bots=True
):
if pickle_data:
data = pickle.dumps(data)
for user in list(active_users.values()):
if user["id"] == curr_user_id and not to_current_user:
continue
if not to_bots and user["bot"]:
continue
add_to_send_queue(user["id"], [data])
def send_image_to_all(image_data, img):
for user in list(active_users.values()):
if not user["bot"]:
add_to_send_queue(user["id"], [pickle.dumps(image_data), img])
def send_huge(conn, data_bytes):
size = len(data_bytes)
n_batches = math.ceil(size / DEFAULT_BYTES)
batch_lengths = [DEFAULT_BYTES] * (n_batches - 1) + [
size - (n_batches - 1) * DEFAULT_BYTES
]
fmt = "h" * n_batches
# send the data in batches
send({"message_type": "huge", "n_batches": n_batches}, conn)
conn.sendall(struct.pack(fmt, *batch_lengths))
for i in range(n_batches):
conn.sendall(data_bytes[i * DEFAULT_BYTES : (i + 1) * DEFAULT_BYTES])
def send(data, conn, pickle_data=True):
try:
if pickle_data:
data = pickle.dumps(data)
lenData = len(data)
conn.sendall(
struct.pack("h", lenData)
) # send the size of data padded to 2 bytes
conn.sendall(data)
except Exception as e:
print("ERROR TRYING TO SEND DATA: ", e)
def send_all_users(user_id):
pickledData = pickle.dumps(active_users)
add_to_send_queue(user_id, [pickledData])
# active_users_data_bytes = json.dumps(active_users).encode("utf-8")
def send_all_user_images(user_id):
for key, image_details in profile_pictures.items():
if image_details is not None:
imageMetaData = {
"image": {
"size": image_details["size"],
"user_id": key,
"shape": image_details["shape"],
"dtype": image_details["dtype"],
}
}
pickledMetaData = pickle.dumps(imageMetaData)
encodedImage = image_details["image"]
add_to_send_queue(user_id, [pickledMetaData, encodedImage])
def recieve_data(conn):
lenData = conn.recv(2)
if not lenData: # user disconnected
return ""
lenData = struct.unpack("h", lenData)[
0
] # length of data will be padded to 2 bytes
data = conn.recv(lenData)
try:
pickled = pickle.loads(data)
if isinstance(pickled, dict) and pickled.get("message_type") == "huge":
n_batches = pickled["n_batches"]
binData = b""
batch_sizes = struct.unpack("h" * n_batches, conn.recv(2 * n_batches))
for size in batch_sizes:
try:
batchData = conn.recv(size)
except Exception as e:
print(e)
if not batchData:
return "" # user disconnected
binData += batchData
return binData
except Exception as e:
pass
return data
def disconnect_user(user_id, user_stats):
try:
# this player was in a game when he left, deal with it
if active_users[user_id]["engaged"]:
for game_id in games:
player_ids = games[game_id]["players"]
if user_id in player_ids:
r = {}
r["message"] = {"title": "Player left", "text": "Game over."}
r["game_over"] = {
"game_id": game_id,
}
# NOTE: THIS IS ONLY FOR 2 PLAYER GAMES (HARDCODED)
for player in games[game_id]["players"].values():
id = player["id"]
if id != user_id:
# this user has quit the game, so the other id wins
active_users[id]["engaged"] = False
r["game_over"]["winner_id"] = id
add_to_send_queue(id, [pickle.dumps(r)])
games.pop(game_id) # delete that game
# if the user has challenged
for challenged_id in active_users[user_id]["challenged"]:
u = active_users.get(challenged_id)
if u:
u["pending"].pop(user_id)
r = {}
r["message"] = {
"id": f"{user_id}-{challenged_id}-{active_users[user_id]['challenged'][challenged_id]}",
"title": "User disconnected.",
"text": active_users[user_id]["username"],
}
add_to_send_queue(u["id"], [pickle.dumps(r)])
# if this user has a pending request
for pending_id in active_users[user_id]["pending"]:
u = active_users.get(
pending_id
) # pending id is the id of the one who challenged, i.e player1
if u:
u["challenged"].pop(user_id)
r = {}
r["message"] = {
"id": f"{pending_id}-{user_id}-{active_users[user_id]['pending'][pending_id]}",
"title": "User disconnected.",
"text": active_users[user_id]["username"],
}
add_to_send_queue(u["id"], [pickle.dumps(r)])
user_name = active_users[user_id]["username"]
# remove this user from active users and connections and profile pics
if user_id in profile_pictures:
profile_pictures.pop(user_id)
active_users.pop(user_id)
connections.pop(user_id)
send_queue.pop(user_id)
# let all active users know this user has disconnected
# this user is not included in the active users
d = {}
d["disconnected"] = user_id
send_to_all(d, user_id, False)
except Exception as e:
print(f"error trying to disconnect user {user_id}", e)
# remove this user from active users and connections and profile pics
if user_id in profile_pictures:
profile_pictures.pop(user_id)
user_name = active_users[user_id]["username"]
active_users.pop(user_id)
connections.pop(user_id)
send_queue.pop(user_id)
# let all active users know this user has disconnected
# this user is not included in the active users
d = {}
d["disconnected"] = user_id
send_to_all(d, user_id, False)
print(f"[DISCONNECTED]: {user_name} ({user_id}) | ADDRESS: {addr}")
# deal with sending and recieving data from and to a user
def threaded_client(conn, addr, user_id, user_stats):
# send all active users' info to this one
send_all_users(user_id) # done
if not user_stats["bot"]:
print("yes")
send_all_user_images(user_id) # done
# send all other users this user's stats
d = {"connected": user_stats}
send_to_all(d, user_id, to_current_user=False) # done
while True:
try:
data = recieve_data(conn)
# client disconnected
if not data:
break
data = pickle.loads(data) # data comes in as pickle encoded bytes
reply = {"status": "connected"} # initiate a reply, to send to the user
if data.get("challenge"):
# challenge will come in as (challenged_user_id, game_name)
challenged_user_id, game = data["challenge"]
# deal with edge cases that could raise errors
if challenged_user_id not in connections.keys():
reply["error"] = "Invalid User ID!"
elif len(active_users[user_id]["challenged"]) > 0:
reply["error"] = "You have already challenged someone!"
elif len(active_users[user_id]["pending"]) > 0:
reply["error"] = "You have a pending request!"
elif active_users[user_id]["engaged"]:
reply["error"] = "You are in a game"
elif (
active_users[challenged_user_id]["engaged"]
and not active_users[challenged_user_id]["bot"]
):
reply["error"] = "User is in a game!"
elif (
len(active_users[challenged_user_id]["pending"])
and not active_users[challenged_user_id]["bot"]
):
reply["error"] = "That user has a pending request!"
else:
# prepare a challenge request, to send to challenged_user
challenge_req = {}
# a unique game id
game_id = f"{user_id}-{challenged_user_id}-{game}"
# the client code knows how to deal with extra props being sent in with the message
# send an accept or reject button message to the challenged user
challenge_req["message"] = {
"title": f"Challenge from {active_users[user_id]['username']}: {game}",
"buttons": ["accept", "reject"],
"context": {"challenger_id": user_id, "game": game},
"closeable": False,
"id": game_id,
}
challenge_req["challenge"] = {
"challenger_id": user_id,
"game": game,
}
add_to_send_queue(challenged_user_id, [pickle.dumps(challenge_req)])
# set the respective flags of the players involved
active_users[user_id]["challenged"][challenged_user_id] = game
active_users[challenged_user_id]["pending"][user_id] = game
# send a cancel button message to the user who challenged
reply["message"] = {
"closeable": False,
"title": "Sent succesfully",
"buttons": ["cancel"],
"context": {"opp_id": challenged_user_id, "game": game},
"id": game_id,
}
print(
f"[CHALLENGE]: {active_users[user_id]['username']} ({user_id}) challenged {active_users[challenged_user_id]['username']} ({challenged_user_id}) for {game}"
)
# this user is trying to cancel a challenge request
if data.get("cancel_challenge"):
opp_id = data["cancel_challenge"]["opp_id"] # the opponents id
game = data["cancel_challenge"][
"game"
] # the game this user has challenged for
if active_users[user_id]["challenged"].get(opp_id):
# remove the opponent from this users challenges
if active_users[user_id]["challenged"].get(opp_id):
active_users[user_id]["challenged"].pop(opp_id)
# remove this user from the opponent's pending requests
if active_users[opp_id]["pending"].get(user_id):
active_users[opp_id]["pending"].pop(user_id)
# send a cancelled message
reply_to_opp = {}
reply_to_opp["cancel"] = {"id": user_id, "game": game}
reply_to_opp["message"] = {
"id": f"{user_id}-{opp_id}-{game}",
"title": "Challenged cancelled",
"text": f"by {active_users[user_id]['username']}",
}
add_to_send_queue(opp_id, [pickle.dumps(reply_to_opp)])
reply["message"] = {
"id": f"{user_id}-{opp_id}-{game}",
"title": "Message",
"text": "Cancelled successfully.",
}
print(
f"[CANCELLED CHALLENGE] {active_users[user_id]['username']} ({user_id}) to {active_users[opp_id]['username']} ({opp_id})"
)
else:
reply["error"] = "No pending challenges from that user!"
# a challenge has been accepted, start the game
if data.get("accepted"):
d = data["accepted"]
player1 = active_users.get(d["player1_id"]) # the one who challenged
player2 = active_users[user_id] # this user
game = d["game"]
# handle edge cases
if not player1:
reply["error"] = "Invalid user id!"
elif player1["engaged"]:
reply["error"] = "User is in a game!"
elif player1["challenged"].get(user_id) != game:
reply["error"] = f"{player1['username']} hasn't challenged you!"
elif not games_lookup.get(game):
reply["error"] = "Invalid game!"
# everything's good, setup the game
else:
player1["challenged"].pop(user_id)
player2["pending"].pop(player1["id"])
game_id = f"{player1['id']}-{user_id}-{game}" # a unique game_id
board = games_lookup.get(game)(
player1, player2
) # generate a game board according to the game
# player information (who is what) | ex: {player1_id:"X",player2_id:"O"}
identification_dict = board.get_identification_dict()
# setup the game
new_game = {
"players": {player1["id"]: player1, player2["id"]: player2},
"game": game,
"identification_dict": identification_dict,
"details": {"game_id": game_id, "board": board,},
}
# add this game to the existing dict of games
games[game_id] = new_game
# both these players are now in a game
player1["engaged"], player2["engaged"] = True, True
# send a message saying the game has started!
reply_to_player1 = {}
reply_to_player1["new_game"] = new_game
reply_to_player1["message"] = {
"id": game_id,
"title": "Game started.",
"text": "Have fun!",
}
add_to_send_queue(player1["id"], [pickle.dumps(reply_to_player1)])
reply["new_game"] = new_game
reply["message"] = {
"title": "Game started.",
"text": "Have fun!",
"id": game_id,
}
print(
f"[ACCEPTED CHALLENGE]: {player2['username']} ({player2['id']}) from {player1['username']} ({player1['id']})"
)
# challenge rejected
if data.get("rejected"):
d = data["rejected"]
player1 = active_users.get(d["player1_id"]) # the one who challenged
player2 = active_users[user_id] # this user
game = d["game"]
# check for edge cases
if not player1:
reply["error"] = "Invalid user id!"
elif not player1["challenged"].get(user_id):
reply["error"] = "User hasn't challenged you!"
else:
# disengage both players and send respective messages
player1["challenged"].pop(user_id)
player2["pending"].pop(player1["id"])
reply_to_player1 = {}
reply_to_player1["message"] = {
"id": f"{player1['id']}-{user_id}-{game}",
"title": "Challenge rejected",
"text": f"for {game} by {player2['username']}",
}
add_to_send_queue(player1["id"], [pickle.dumps(reply_to_player1)])
print(
f"[REJECTED CHALLENGE]: {player2['username']} ({player2['id']}) from {player1['username']} ({player1['id']})"
)
# quit a game
if data.get("quit"):
game_id = data["quit"]
if games.get(game_id) and user_id in games.get(game_id)["players"]:
r = {}
r["message"] = {
"title": f"Game ended by {active_users[user_id]['username']}"
}
# set the respective winners
winner_id = None
# figuring out the winner
if len(games.get(game_id)["players"]) == 2:
for id in games.get(game_id)["players"].keys():
if id != user_id:
winner_id = id
break
r["game_over"] = {
"game_id": game_id,
"winner_id": winner_id,
}
# disengage all players involved in the game
# NOTE: THIS WILL ONLY WORK AS EXPECTED IF IT IS A 2 PLAYER GAME
for player in games.get(game_id)["players"].values():
player["engaged"] = False
add_to_send_queue(player["id"], [pickle.dumps(r)])
games.pop(game_id) # delete this game
print(
f"[QUIT GAME]: {active_users[user_id]['username']} ({user_id}) | GAME ID: {game_id}"
)
else:
reply["error"] = "Invalid game details!"
# the player has made a move
if data.get("move") is not None: # move maybe 0
game_id = data["move"].get("game_id")
game = games.get(game_id)
if not game:
reply["error"] = "Game does not exist!"
else:
# if this move is valid, then move
is_valid, err = game["details"]["board"].validate(
user_id, data["move"].get("move")
)
if is_valid:
game_over, r = game["details"]["board"].move(
data["move"].get("move")
)
r["moved"]["game_id"] = game_id
# do something if the game is over
if game_over:
r["game_over"] = {
"game_id": game_id,
"winner_id": game_over["winner_id"],
"indices": game_over.get("indices"),
}
for id in game["players"].keys():
if game_over:
active_users[id]["engaged"] = False
add_to_send_queue(id, [pickle.dumps(r)])
else:
reply["error"] = err
# user updated profile image
if data.get("image"):
print(
f"[UPLOADING IMAGE]: {active_users[user_id]['username']} ({user_id})"
)
size, shape, dtype = (
data["image"]["size"],
data["image"]["shape"],
data["image"]["dtype"],
)
if size > max_image_size:
error = {"error": "Image too large.", "image_allowed": False}
add_to_send_queue(user_id, [pickle.dumps(error)])
print(
f"[CANCELLED UPLOADING]: {active_users[user_id]['username']} ({user_id})"
)
else:
add_to_send_queue(user_id, [pickle.dumps({"image_allowed": True})])
full_image = recieve_data(conn)
# user disconnected
if full_image == "":
continue
# print("Total bytes recieved: ", len(full_image))
print(
f"[UPLOADED IMAGE]: {active_users[user_id]['username']} ({user_id})"
)
profile_pictures[user_id] = {
"size": size,
"user_id": user_id,
"shape": shape,
"dtype": dtype,
"image": full_image,
}
image_data = {
"image": {
"size": size,
"user_id": user_id,
"shape": shape,
"dtype": dtype,
}
}
send_image_to_all(image_data, full_image)
reply["message"] = {"title": "Uploaded successfully!"}
print(
f"[FINISHED UPLOAD]: {active_users[user_id]['username']} ({user_id})"
)
# user updated username ro something else
if data.get("updated"):
reply.update(update_user(user_id, data["updated"]))
add_to_send_queue(user_id, [pickle.dumps(reply)])
except Exception as e:
print(f"error while processing data from {user_id}", e)
try:
print("data recieved was:", data, "length is:", len(data))
except:
print("no data recieved from", user_id)
break
# the user has disconnected
disconnect_user(user_id, user_stats)
# close the connection
conn.close()
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
# bind the socket to the host address and port
s.bind((IP, PORT))
print("Server started at: ", s.getsockname())
# listen for connections
s.listen()
print("Server has started. waiting for connections...")
while True:
# accept any connection
# a connection will come in the form a tuple
# the connection itself with via the server and the client can communicate
# and the address from where we are recieving the connection
conn, addr = s.accept()
print("[CONNECTED]: ", addr)
total_connections_so_far += 1 # increment the totoal connections
# generate a user
# this cannot be done inside a thread because,
# if 2 people connect at the same time, there will be an error
user_id, user_stats = create_user(conn, addr)
if not user_id:
continue
# start a thread for the new client
start_new_thread(threaded_client, (conn, addr, user_id, user_stats))
# start a thread to send messages to the new client
start_new_thread(execute_send_queue, (user_id,))
| AaravGang/server-public | server.py | server.py | py | 27,113 | python | en | code | 0 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.