seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
40521294695 | import sys
sys.path.append("..")
from common import *
opsm = {
"+": lambda *v: sum(v),
"*": lambda *v: mult(v)
}
def toks(str):
tokens = []
for ch in str:
if ch.isnumeric(): tokens.append(int(ch))
else: tokens.append(ch)
return tokens
def atom(toks):
curr = toks.pop(0)
if isinstance(curr,int):
return curr
elif curr == "(":
e = expr(toks)
toks.pop(0)
return e
def expr(toks,last=None):
left = atom(toks) if not last else last
while len(toks) > 0 and toks[0] in opsm:
op = opsm[toks.pop(0)]
next = atom(toks)
if op == opsm["*"] and len(toks) > 0 and toks[0] == "+":
left = [op,left,expr(toks,next)]
else: left = [op,left,next]
return left
def eval(ast):
op = ast[0]
if isinstance(ast[1],list):
ast[1] = eval(ast[1])
if isinstance(ast[2],list):
ast[2] = eval(ast[2])
return op(ast[1],ast[2])
def parse(data):
return expr(toks("".join(data.split(" "))))
data = fnl(parse);
p(data);
print(sum([eval(l) for l in data])) | archanpatkar/advent2020 | Day-18/part2.py | part2.py | py | 1,099 | python | en | code | 0 | github-code | 36 |
29790427487 | import json
from flask import Flask, request
from flask_cors import CORS
from queue import Queue
from helpers.utils import utils
from helpers.db import queries
from helpers.algo import boundary
from helpers.algo import neighbours
from helpers.algo import degrees_count
from helpers.algo.new_hex_loc import*
app = Flask(__name__)
CORS(app)
def logger(val):
print("\n{}\n".format(val))
@app.route('/get-hex-by-name', methods=['GET', 'POST'])
# @cross_origin()
def search_hex_byName():
name = request.args['name']
logger(name)
if(name):
try:
resp = queries.get_hex_details_by_name(name)
logger(resp)
return resp
except:
return {"err": "error"}
else:
return {"Please enter the name correctly to get all the details"}
return {'Network Error'}
@app.route('/get-hex-by-id', methods=['GET', 'POST'])
# @cross_origin()
def search_hex_byId():
id = request.args['id']
logger(id)
if(id):
try:
resp = queries.get_hex_details_by_id(id)
logger(resp)
return resp
except:
logger(resp)
return resp
else:
return {"Please enter the id correctly to get all the details"}
return {"err": 'Network Error'}
@app.route('/get-all-coordinates', methods=['GET', 'POST'])
# @cross_origin()
def get_all_coords():
try:
coords = queries.get_all_locations()
logger(coords)
return {'body': coords}
except:
return {"err": 'Network Error'}
@app.route('/add-hex', methods=['GET', 'POST'])
# @cross_origin()
def add_hex():
origin_hex = request.args['src']
new_hex = request.args['new']
boundary_of_origin_hex = request.args['loc']
boundary_of_origin_hex = int(boundary_of_origin_hex)
if(origin_hex and new_hex and (boundary_of_origin_hex >= 0)):
origin_coordinates_hex = queries.get_hex_location_by_name(origin_hex)
origin_hex_is_active_or_not = origin_coordinates_hex.get("hexagons")[0].get(
'is_active', '')
# checking if the src hex is_active or not
if origin_hex_is_active_or_not == "FALSE":
return {"err": "This origin hex is not active"}
logger('-----here-----get_hex_location_by_name-origin---')
logger(origin_coordinates_hex)
origin_existing_neighbours = queries.get_hex_details_by_name(
origin_hex).get("hexagons", "")[0].get("hex", "")
if origin_existing_neighbours[utils.user_boundary_choice[boundary_of_origin_hex]] != 'NO':
return {'err': 'already a hex exists at this boundary'}
origin_id = origin_coordinates_hex.get("hexagons")[0].get(
'location', '').get('hexagon_id', '')
# Find location of the new hex
# find neighbours around it , if present query their cluster table rows
new_hex_loc = boundary.find_new_hex_loc(
boundary_of_origin_hex, origin_hex, origin_coordinates_hex) # New Hex location
logger('-----here-----new-hex-loc-using-origin-loc-and-border---')
logger(new_hex_loc)
new_hex_neighbours = neighbours.find_new_hex_neighbours(
new_hex_loc, boundary_of_origin_hex) # Neighbours around new hex
# insertions new hex // fetch id
logger('-----here-----inserting-new-node---')
insert_new_hex_resp = queries.insert_new_hex(new_hex)
new_hexagon_id = list(map(lambda data: data.get(
'hexagon_id'), insert_new_hex_resp))[0]
logger(new_hexagon_id)
# insert neighbours of new node
logger('-----here-----inserting-new-node-neighbours---')
new_hex_neighbours["hexagon_id"] = new_hexagon_id
logger(new_hex_neighbours)
column_updates = ['n1', 'n2', 'n3', 'n4', 'n5', 'n6', 'updated_at']
insert_new_hex_neighbours = queries.insert_hex_neighbours(
{"data": new_hex_neighbours, "colm": column_updates}) # Inserting New hex Neighs. in cluster
# insert location of new node
insert_new_hex_loc = queries.insert_new_hex_loc(
new_hexagon_id, new_hex_loc[0], new_hex_loc[1], new_hex_loc[2])
# insert neighbours of origin node
origin_req = {}
origin_req[utils.user_boundary_choice[boundary_of_origin_hex]
] = new_hexagon_id
origin_req["hexagon_id"] = origin_id
column_updates = [
utils.user_boundary_choice[boundary_of_origin_hex], 'updated_at']
logger({"data": origin_req, "colm": column_updates})
update_origin_hex_neighbour = queries.insert_hex_neighbours(
{"data": origin_req, "colm": column_updates})
logger("----moving to update----")
update_neighbours(new_hex_neighbours)
return {"statusCode": 200, 'response': update_origin_hex_neighbour}
else:
return {'response': 'err'}
def update_neighbours(updating_neighbours):
# logger(updating_neighbours)
for border in updating_neighbours:
if (updating_neighbours[border] != 'NO'):
hex_id = updating_neighbours[border]
# logger(hex_id)
neighbour_location_obj = queries.get_hex_location_by_id(hex_id)
neighbour_is_active = neighbour_location_obj.get(
'hexagons', [{'location': {}}])[0].get('is_active', '')
if neighbour_is_active == 'TRUE':
neighbour_location_dict = neighbour_location_obj.get(
'hexagons', [{'location': {}}])[0].get('location', '')
# logger(neighbour_location_dict)
if(neighbour_location_dict):
loc = [
neighbour_location_dict['q'],
neighbour_location_dict['r'],
neighbour_location_dict['s']
]
updated_neighbours = neighbours.find_new_hex_neighbours(
loc, 1)
logger(updated_neighbours)
updated_neighbours["hexagon_id"] = hex_id
# logger(updated_neighbours)
column_updates = ['n1', 'n2', 'n3',
'n4', 'n5', 'n6', 'updated_at']
insert_updated_neighbours = queries.insert_hex_neighbours(
{"data": updated_neighbours, "colm": column_updates})
return {"body": insert_updated_neighbours}
return {"err": "error"}
@app.route('/remove-hex', methods=['GET', 'POST'])
# @cross_origin()
def delete_hex_bfs():
borders = ['n1', 'n2', 'n3', 'n4', 'n4', 'n5', 'n6']
border_map = {'n1': 'n4', 'n2': 'n5', 'n3': 'n6',
'n4': 'n1', 'n5': 'n2', 'n6': 'n3'}
origin_hex = request.args['src']
if origin_hex:
try:
neighbours_of_origin = queries.find_neighbours_by_name(origin_hex)
except:
return {"err": "error"}
# The hex is alerady deleted or doesn't exist
if len(neighbours_of_origin) > 0:
neighbours_of_origin = neighbours_of_origin[0]
else:
return {"err": "error"}
origin_hex_id = neighbours_of_origin.get("hex", "").get("hexagon_id", "")
degree = degrees_count.calc_degree(neighbours_of_origin)
if degree < 2:
delete_resp = delete_hexagon_final(
neighbours_of_origin, origin_hex, origin_hex_id, borders, border_map)
if delete_resp:
return {"body": "Done!"}
else:
return {"err": "error while removing"}
# starting bfs
frontier = Queue()
frontier.put(origin_hex_id)
# this map for id's
reached = set()
reached.add(origin_hex_id)
# this map for (id, border(n1, n2...n6)) to uniquely identify the path we are using
# to find it
reached_border = []
level = 0
count_of_origin_hits_using_diff_path = 0
while not frontier.empty():
level = level + 1
current = frontier.get()
# fetching all the neighbour id's in a level
details_neighbour_hex = queries.get_hex_details_by_id(
current).get("hexagons", "")
if len(details_neighbour_hex) > 0:
details_neighbour_hex = details_neighbour_hex[0]
count_of_entry_to_hex = 0
# iterating in all the neighbours of the recent id
for border in borders:
if details_neighbour_hex.get("hex", "").get(border, "") != "NO":
neighbour_id = details_neighbour_hex.get(
"hex", "").get(border, "")
if level == 1 and count_of_entry_to_hex == 0:
# reached_border.append((current_id, border))
reached_border.append((current, border_map[border]))
list(set(reached_border))
count_of_entry_to_hex = count_of_entry_to_hex + 1
# already visited node also traversed through the same path
if (neighbour_id in reached) and (neighbour_id, border) in reached_border:
continue
if (level > 1):
if ((neighbour_id not in reached) or
((neighbour_id in reached) and (neighbour_id, border) not in reached_border)):
# the origin hex is found but not from the same path ,
# from a different path
if(neighbour_id == origin_hex_id):
count_of_origin_hits_using_diff_path = count_of_origin_hits_using_diff_path + 1
# if there are 3 neighs , out of which 2 of them belongs to the same connected comp.
# and the other 1 belongs other connected comp. , we need to verify its connected or not
if count_of_origin_hits_using_diff_path == (degree - 1):
# if the hex is found update its neighs. and itself
delete_resp = delete_hexagon_final(
neighbours_of_origin, origin_hex, origin_hex_id, borders, border_map)
if delete_resp:
return {"body": "Done!"}
else:
return {"err": "error while removing"}
# mapping the new neighbour and its correspoding border
# so that we dom't end up seeing that id from the previous path
frontier.put(neighbour_id)
reached.add(neighbour_id)
logger(neighbour_id)
logger(border)
# reached_border.append((neighbour_id, border))
if level > 1:
reached_border.append((current, border_map[border]))
list(set(reached_border))
return {"err": "Not possible to remove"}
def delete_hexagon_final(neighbours_of_origin, origin_hex, origin_hex_id, borders, border_map):
for border in borders:
if neighbours_of_origin.get("hex", "").get(border, "") != "NO":
neighbour_id = neighbours_of_origin.get(
"hex", "").get(border, "")
origin_req = {}
origin_req["hexagon_id"] = neighbour_id
origin_req[border_map[border]] = "NO"
column_updates = [
border_map[border], "updated_at"]
insert_updated_neighbours = queries.insert_hex_neighbours(
{"data": origin_req, "colm": column_updates})
try:
deletion_resp = queries.delete_hex(
origin_hex, origin_hex_id)
return True
except:
return False
| ricksr/cluster-anywhr | cluster/app.py | app.py | py | 11,777 | python | en | code | 1 | github-code | 36 |
13966927417 | from itertools import combinations
def make_combinations_set(order, menu_num):
result = set()
menus = sorted([ch for ch in order])
comb_menu = combinations(menus, menu_num)
for each_comb in comb_menu:
result.add(''.join(each_comb))
# print(result)
return result
def solution(orders, course):
answer = []
comb_menu = set()
for menu_num in course:
for order in orders:
comb_menu |= make_combinations_set(order, menu_num)
comb_menu = sorted(list(comb_menu))
# print(comb_menu)
count = 0
each_count = 0
menus_count = []
for each_comb in comb_menu:
for order in orders:
for each_menu in each_comb:
if each_menu not in order:
break
else:
each_count += 1
if each_count == menu_num:
count += 1
each_count = 0
if count >= 2:
menus_count.append([each_comb, count])
count = 0
menus_count = sorted(menus_count, key = lambda x : x[1], reverse = True)
#print(menus_count)
try:
max_count = menus_count[0][1]
for each_menu in menus_count:
if each_menu[1] == max_count:
answer.append(each_menu[0])
else:
break
except:
pass
comb_menu = set()
answer = sorted(answer)
print(answer)
return answer | Devlee247/NaverBoostCamp_AlgorithmStudy | week5/P01_myeongu.py | P01_myeongu.py | py | 1,634 | python | en | code | 1 | github-code | 36 |
74174195942 | #!/usr/bin/python3
""" square class """
from models.rectangle import Rectangle
class Square(Rectangle):
""" class """
def __init__(self, size, x=0, y=0, id=None):
""" square construct """
super().__init__(size, size, x, y, id)
def __str__(self):
""" string rep """
return "[Square] ({:d}) {:d}/{:d} - {:d}".format(self.id,
self.x, self.y, self.size)
@property
def size(self):
""" gets size """
return self.width
@size.setter
def size(self, value):
""" size setter """
if not isinstance(value, int):
raise TypeError("width must be an integer")
elif value <= 0:
raise ValueError("width must be > 0")
else:
self.width = value
def to_dictionary(self):
""" dictionary """
squaredict = {
'id': self.id,
'x': self.x,
'size': self.size,
'y': self.y
}
return squaredict
def update(self, *args, **kwargs):
""" update """
precheck = len(args)
if precheck == 0:
""" set kwargs """
for key2, value in kwargs.items():
if key2 == 'id':
self.id = value
elif key2 == 'size':
self.size = value
elif key2 == 'y':
self.y = value
elif key2 == 'x':
self.x = value
return
if precheck == 1:
self.id = args[0]
return
elif precheck == 2:
self.id = args[0]
self.size = args[1]
return
elif precheck == 3:
self.id = args[0]
self.size = args[1]
self.x = args[2]
return
elif precheck == 4:
self.id = args[0]
self.size = args[1]
self.x = args[2]
self.y = args[3]
return
| humeinstein/holbertonschool-higher_level_programming | 0x0C-python-almost_a_circle/models/square.py | square.py | py | 2,114 | python | en | code | 0 | github-code | 36 |
33968414347 | class Point:
MAX_COORD = 100
MIN_COORD = 0
def __init__(self, x, y):
self.x = x
self.y = y
def set_coord(self, x, y):
if self.MIN_COORD <= x <= self.MAX_COORD and self.MIN_COORD <= y <= self.MAX_COORD:
self.x = x
self.y = y
def set_min_coord(self, min):
# создасться новая переменная в области экземпляра, атрибут класса
# не поменяется
self.MAX_COORD = min
def __getattribute__(self, item):
# Переопределение get_attr
if item=='x':
raise ValueError('Доступ закрыт')
else:
return object.__getattribute__(self,item)
pt1 = Point(12,33)
pt2 = Point(1,2)
#print(pt1.__dict__)
print(pt1.y)
print(Point.__dict__)
| ivannumberone7/my_oop | 7.py | 7.py | py | 853 | python | ru | code | 0 | github-code | 36 |
13866614560 | import datetime
from sqlalchemy import Column, Integer, String, DateTime, ForeignKey
from sqlalchemy.orm import relationship
from app.db.base_model import Base
class ArticleModel(Base):
__tablename__ = "articles"
id = Column(Integer, primary_key=True, autoincrement=True)
title = Column(String(255))
description = Column(String(255))
url_font = Column(String(255))
user_id = Column(Integer, ForeignKey("users.id"))
creator = relationship("UserModel",
back_populates="articles",
lazy="joined")
created_at = Column(DateTime, default=datetime.datetime.utcnow)
updated_at = Column(DateTime, default=datetime.datetime.utcnow, onupdate=datetime.datetime.utcnow)
| matheus-feu/FastAPI-JWT-Security | app/models/article.py | article.py | py | 753 | python | en | code | 3 | github-code | 36 |
14129279608 | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import sqlite3
from datetime import datetime
db_name = 'dbmovie{0}.db'.format(str(datetime.now())[:10].replace('-', ''))
class DbmoviePipeline(object):
def process_item(self, item, spider):
if item:
conn = sqlite3.connect(db_name)
cursor = conn.cursor()
try:
cursor.execute(
'CREATE TABLE IF NOT EXISTS movies(id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, movieName VARCHAR(50),url VARCHAR (50), directors VARCHAR(50), actors VARCHAR(200), countries VARCHAR (50), genres VARCHAR (50), languages VARCHAR (50), runtime INTEGER , udate VARCHAR (15), rate VARCHAR (5), votes INTEGER )')
cursor.execute(
'insert into movies(id, movieName,url, directors, actors, countries, genres, languages, runtime, udate,rate, votes) VALUES (NULL, \'{0}\',\'{1}\',\'{2}\',\'{3}\',\'{4}\',\'{5}\',\'{6}\',\'{7}\',\'{8}\',\'{9}\',\'{10}\' )'.format(
item['movieName'], item['url'], item['directors'], item['actors'], item['countries'],
item['genres'],
item['languages'], item['runtime'], item['date'], item['rate'], item['votes'])
)
except sqlite3.Error as e:
print(e.args[0])
cursor.close()
conn.close()
else:
conn.commit()
cursor.close()
conn.close()
return item
| zenmeder/dbmovie | dbmovie/pipelines.py | pipelines.py | py | 1,434 | python | en | code | 0 | github-code | 36 |
25168770770 | #!/usr/bin/env python3
a, b = 65, 8921
a, b = 703, 516
a_fac, b_fac = 16807, 48271
div = 2147483647
_a, _b = a, b
matches = 0
for i in range(40000000):
_a = (_a * a_fac) % div
_b = (_b * b_fac) % div
match = 1 if (_a ^ _b) & 0xffff == 0 else 0
#print(_a, _b, match)
matches += match
print('part1', matches)
_a, _b = a, b
matches = 0
for i in range(5000000):
_a = (_a * a_fac) % div
while _a % 4 != 0:
_a = (_a * a_fac) % div
_b = (_b * b_fac) % div
while _b % 8 != 0:
_b = (_b * b_fac) % div
match = 1 if (_a ^ _b) & 0xffff == 0 else 0
matches += match
print('part2', matches, _a, _b)
| piratejon/toyproblems | adventofcode/2017/15/solve.py | solve.py | py | 651 | python | en | code | 1 | github-code | 36 |
6689643445 | import os
import uuid
import json
import minio
import logging
class storage:
instance = None
client = None
def __init__(self):
try:
"""
Minio does not allow another way of configuring timeout for connection.
The rest of configuration is copied from source code of Minio.
"""
import urllib3
from datetime import timedelta
timeout = timedelta(seconds=1).seconds
mgr = urllib3.PoolManager(
timeout=urllib3.util.Timeout(connect=timeout, read=timeout),
maxsize=10,
retries=urllib3.Retry(
total=5, backoff_factor=0.2, status_forcelist=[500, 502, 503, 504]
)
)
self.client = minio.Minio(
os.getenv("MINIO_STORAGE_CONNECTION_URL"),
access_key=os.getenv("MINIO_STORAGE_ACCESS_KEY"),
secret_key=os.getenv("MINIO_STORAGE_SECRET_KEY"),
secure=False,
http_client=mgr
)
except Exception as e:
logging.info(e)
raise e
@staticmethod
def unique_name(name):
name, extension = name.split(".")
return "{name}.{random}.{extension}".format(
name=name, extension=extension, random=str(uuid.uuid4()).split("-")[0]
)
def upload(self, bucket, file, filepath):
key_name = storage.unique_name(file)
self.client.fput_object(bucket, key_name, filepath)
return key_name
def download(self, bucket, file, filepath):
self.client.fget_object(bucket, file, filepath)
def download_directory(self, bucket, prefix, path):
objects = self.client.list_objects(bucket, prefix, recursive=True)
for obj in objects:
file_name = obj.object_name
self.download(bucket, file_name, os.path.join(path, file_name))
def upload_stream(self, bucket, file, bytes_data):
key_name = storage.unique_name(file)
self.client.put_object(
bucket, key_name, bytes_data, bytes_data.getbuffer().nbytes
)
return key_name
def download_stream(self, bucket, file):
data = self.client.get_object(bucket, file)
return data.read()
@staticmethod
def get_instance():
if storage.instance is None:
storage.instance = storage()
return storage.instance
| spcl/serverless-benchmarks | benchmarks/wrappers/openwhisk/python/storage.py | storage.py | py | 2,464 | python | en | code | 97 | github-code | 36 |
5213215500 | class Solution:
def lengthOfLongestSubstring(self, s: str) -> int:
seen_set = set()
longest = 0
start, end = 0, 0
l = len(s)
while start < l and end < l:
if s[end] not in seen_set:
seen_set.add(s[end])
end += 1
longest = max(longest, end - start)
else:
seen_set.remove(s[start])
start += 1
return longest
| tugloo1/leetcode | problem_3.py | problem_3.py | py | 461 | python | en | code | 0 | github-code | 36 |
31167909236 | from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as pl
n = []
e = []
ep = []
with open('cohesive.txt') as file:
next(file)
for line in file:
value = line.strip().split(' ')
n.append(int(value[0]))
e.append(float(value[1]))
ep.append(float(value[2]))
n = [int(i) for i in n]
a = 5.256*10**-10
x = [a*i**3 for i in n]
pl.plot(n, e, '.')
pl.plot(n, ep, '.')
pl.xticks(n)
pl.xlabel('Number of Unit Cells [^(1/3)]')
pl.ylabel('Eth [eV]')
pl.grid(b=True, which='both')
pl.tight_layout()
pl.legend(['Non-periodic', 'Periodic'])
pl.show()
pl.clf()
print(x[-1])
print(e[-1])
print(ep[-1])
| leschultz/MSE760 | hw1/cohesiveplot.py | cohesiveplot.py | py | 640 | python | en | code | 0 | github-code | 36 |
43508294902 | import sys
from pathlib import Path
sys.path.append(Path(__file__).resolve().parents[2])
# rel imports when in package
if __name__ == '__main__' and __package__ is None:
__package__ = 'kuosc'
print(Path(__file__).resolve())
print(__package__)
# from kurosc.lib.plotformat import setup
| chriswilly/kuramoto-osc | Python/kurosc/kurosc/tests/pathtest.py | pathtest.py | py | 293 | python | en | code | 2 | github-code | 36 |
31556431228 | from copy import deepcopy
import numpy as np
from qode.fermion_field import occ_strings
from qode.fermion_field.state import state, dot, resolvent
from hamiltonian import hamiltonian, SD_excitations
# Energy calculated from Q-Chem 4.3:
# SCF energy in the final basis set = -2.8551604262
# CCSD total energy = -2.87178917
#
# PSI4 CISD energy = -2.8717891505016
occ_orbs = list(reversed([0,4]))
vrt_orbs = list(reversed([1,2,3,5,6,7]))
cisd_strings = occ_strings.CISD(occ_orbs,vrt_orbs)
num_config = len(cisd_strings)
reference = state(cisd_strings,0)
T = SD_excitations(reference, reference, cisd_strings, occ_orbs, vrt_orbs)
h_mat = np.load('data/h_mat.npy')
V_mat = np.load('data/V_mat.npy')
H = hamiltonian(h_mat, V_mat, cisd_strings, occ_orbs, vrt_orbs)
D = deepcopy(H)
Eo = dot(reference,H(reference))
D.C0 -= (Eo - 1e-10)
R = resolvent(D)
debug = True
while True:
accumulate = state(cisd_strings)
accumulate.increment(H(reference),-1)
intermediate = state(cisd_strings)
intermediate.increment(H(T(reference)))
intermediate.increment(T(H(reference)),-1)
#
E = Eo + dot(reference,intermediate)
print("E =", E)
if debug:
psi = T(reference)
psi.increment(reference)
E = dot(psi,H(psi)) / dot(psi,psi)
print("Evar = {} (error = {})".format(E,E+2.8717891505016))
#
phase = +1
for i in range(3):
phase *= -1
accumulate.increment(intermediate,phase)
if i==2: break
intermediate = T(intermediate)
accumulate = R(accumulate)
dT = SD_excitations(reference, accumulate, cisd_strings, occ_orbs, vrt_orbs)
T.increment(dT)
| sskhan67/GPGPU-Programming- | QODE/Applications/component_tests/ccsd/attic/non_linear_opt_w_state_arithmetic.py | non_linear_opt_w_state_arithmetic.py | py | 1,599 | python | en | code | 0 | github-code | 36 |
44647926786 | import boto3
import json
from decimal import Decimal
from boto3.dynamodb.conditions import Key
dynamodb = boto3.resource('dynamodb')
attendance_table = dynamodb.Table('attendance_table_user')
#queryで特定のuser_idの出社予定取ってくる
def query_attendance(id):
result = attendance_table.query(
KeyConditionExpression=Key('user_id').eq(id)
)
return result['Items']
def decimal_default_proc(obj):
if isinstance(obj, Decimal):
return float(obj)
raise TypeError
def lambda_handler(event, context):
attendance = query_attendance(event['pathParameters']['user_id'])
# print("attendanceのリスト", attendance)
return {
'statusCode': 200,
'body': json.dumps(attendance, default=decimal_default_proc),
'isBase64Encoded': False,
'headers' : {"content-type": "application/json",
"Access-Control-Allow-Origin": "*"}
}
| SOICHI0826/kinikare_server | lambdafunction/get_attendance.py | get_attendance.py | py | 957 | python | en | code | 0 | github-code | 36 |
74436411945 |
import os
from pathlib import Path
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
from shutil import copyfile
config = {
# General
'symbol': 'spy',
'symbol_name': 'S&P500',
'category': {'unespecified': ['spy']}, # 'gld', 'spy','xle', 'emb','dia', 'qqq', 'ewp'
'extension': '.csv',
'separator': ';',
'desired_length': 550, # for mahab states
'pattern': '_no_missing_data', # besides the name, gaps have been handled (the name it's a typo)
'cols': ['open', 'high', 'low', 'close', 'volume', 'datetime', 'gap', 'timestamp'],
'names_per_set': {
'dev': 'devset',
'train': 'train',
'mah': 'mahalanobis_state'
},
# ############ For sec level
# 'years_to_explore': ['2016', '2017', '2018', '2019', '2020'],
# 'output_path': 'C:\\Users\\suare\\data\\tmp\\spy_seeds_seconds',
# 'name': 'spy-seconds',
# 'lvl_str': 's',
# 'path': 'C:\\Users\\suare\\data\\analysis\\quantquote\\',
# 'resample': False, # resampling done in a previous script
# 'ms_field': 'timestamp', # time
# 'dt_field': 'datetime', # date
# 'desired_abs_mean_tresh': 0.01,
# 'desired_abs_min_tresh': 0.00000000000001,
# 'allowed_outliers_pct': 0.01,
# 'pattern': 'csv', # no pattern needed here by 05/04/2020
# 'prefix': 'table_'
#### get largets and get smallest applied to 1 (min and maxs)
# ############ For minute level
'years_to_explore': ['2001', '2002', '2003', '2004', '2005', '2006', '2007', '2008', '2009', '2010',
'2011', '2012', '2013', '2014', '2015', '2016', '2017', '2018', '2019', '2020'],
# IMP. don't do 2020 alone or period id == 1
'output_path': 'C:\\Users\\suare\\data\\tmp\\spy_seeds_minutes',
'name': 'spy-minutes',
# 'lvl_str': 'm',
# 'desired_abs_mean_tresh': 0.1,
'lvl_str': 'm',
'desired_abs_mean_tresh': 1,
'path': 'C:\\Users\\suare\\data\\analysis\\quantquote\\',
'resample': True, # resampling done in a previous script
'ms_field': 'timestamp', # time
'dt_field': 'datetime', # date
'desired_abs_min_tresh': 0.00000000000001,
'allowed_outliers_pct': 0.01,
'prefix': ''
}
def read_file(filename: str) -> pd.DataFrame:
path = os.sep.join([config['path'], filename])
filepath = os.sep.join([path, config['prefix'] + config['symbol'] + config['extension']])
if os.path.isfile(filepath):
print(f'Reading {filename}')
df = pd.read_csv(filepath, names=config['cols'])
# print(df.head())
return df
def get_pretraining_states(mahabset_df: pd.DataFrame, config: dict) -> dict:
"""
This function get the dataframes for pretraining
:param mahabset_df:
:param config:
:return:
"""
mahabset_df = remove_non_trading_hours_minimised(df=mahabset_df) # remove non trading hours
# Generate close price returns and moving average to select the period with a mean close to 0
log_ret = False # otherwise percentual
if log_ret:
mahabset_df['close_returns'] = np.log(mahabset_df['close'] / mahabset_df['close'].shift(1))
# mahabset_df['close_returns'] = np.log(1 + mahabset_df['close'].pct_change())
else:
mahabset_df['close_returns'] = mahabset_df['close'] / mahabset_df['close'].shift(1) - 1
# mahabset_df['close_returns'] = mahabset_df['close'].pct_change(1) # same result
mahabset_df.dropna(inplace=True)
sma_col = f"SMA_{config['desired_length']}"
mahabset_df[sma_col] = mahabset_df['close_returns'].rolling(window=config['desired_length']).mean()
mahabset_df[sma_col+'_abs'] = mahabset_df[sma_col].abs()
mahabset_df['roll_std'] = mahabset_df['close_returns'].rolling(window=config['desired_length']).std()
mahabset_df['SMA_start_date'] = mahabset_df[config['dt_field']].shift(config['desired_length'])
mahabset_df['SMA_start_ms'] = mahabset_df[config['ms_field']].shift(config['desired_length'])
# Drop first rows as the moving average is NaN
len_with_nans = len(mahabset_df)
# Prepare extra cols
# mahabset_df['datetime'] = mahabset_df.index.astype(str) # maybe for min level is relevant
mahabset_df.set_index(config['dt_field'], drop=False, inplace=True)
mahabset_df.dropna(inplace=True)
assert (len_with_nans - len(mahabset_df)) == config['desired_length'], 'There are non expected NaNs'
states_dict = dict()
for i in range(1, 4): # States 1, 2 and 3 (hardcoded by now)
selected_df = identify_state(config, mahabset_df, sma_col, state_id=i)
# assert len(selected_df) == 1, "The maximum value is not a unique value."
print(len(selected_df))
print(selected_df)
for idx, rw in selected_df.iterrows():
print(f"Current selection from {rw['SMA_start_date']} to {rw[config['dt_field']]} with mean {sma_col}")
mah_state = \
mahabset_df[(mahabset_df['SMA_start_date'].between(rw['SMA_start_date'], rw[config['dt_field']]))]
# avoid overwriting an state with another which len is <=35 (it has to be greater because of the indicators)
if len(mah_state) >= 35:
states_dict[i] = mah_state
states_dict[i].sort_index(ascending=True, inplace=True)
assert len(states_dict) == 3, f"State missing or diff than 3 states? states ok:{states_dict.keys()}"
return states_dict
def identify_state(config: dict, mahabset_df: pd.DataFrame, sma_col: str, state_id: int) -> pd.DataFrame:
"""
This function identifies the last row of a mahab state depending on a logic defined manually for that state id.
Values 35, 20 and 20 were given manually to be able to have enough records hourly for all mahab states.
These have been left as values by default after.
"""
if state_id == 1: # Select one close to 0 (the closest)
# Select one close to 0
# (not the closest cos there are period = 0 due to lack of liquidity at certain frequencies)
selected_df = mahabset_df[mahabset_df[sma_col + '_abs'] <= config['desired_abs_mean_tresh']]
# selected_df = selected_df[selected_df[sma_col + '_abs'].max() == selected_df[sma_col + '_abs']]
# Filter by desired mean fpr the lateral movement (the one with greatest STDEV)
# selected_df = selected_df[selected_df['roll_std'].max() == selected_df['roll_std']]
# inst.of max, t.g.m.
selected_df = selected_df[selected_df['roll_std'].isin(list(selected_df['roll_std'].nlargest(35)))]
elif state_id == 2: # Select one negative (min)
# get the min from a boundary. filter periods with 0 return at all. it may be due to lack of liquidity at s lvl
selected_df = mahabset_df[mahabset_df[sma_col + '_abs'] >= (config['desired_abs_min_tresh'])]
# selected_df = selected_df[selected_df[sma_col].min() == selected_df[sma_col]]
# inst.of min, to get many
selected_df = selected_df[selected_df[sma_col].isin(list(selected_df[sma_col].nsmallest(20)))]
elif state_id == 3: # Select one positive (max)
# selected_df = mahabset_df[mahabset_df[sma_col].nlargest(3) == mahabset_df[sma_col]]
# inst.of max, to get many
selected_df = mahabset_df[mahabset_df[sma_col].isin(list(mahabset_df[sma_col].nlargest(20)))]
else:
assert False, "The trend/pattern for this state has not been specified"
return selected_df
def remove_non_trading_hours_minimised(df) -> pd.DataFrame:
trading_dates = pd.read_csv('trading_dates_Q11998_to_Q32021.csv') # this list has format: dd/MM/yyyy
# inverting date format (hardcoded)
trading_dates = trading_dates.trading_dates.astype(str).apply(lambda x: x[6:10]+'-'+x[3:5]+'-'+x[:2])
df.index = pd.to_datetime(df.datetime)
df = df.between_time('09:31', '15:59')
df = df[df.index.astype(str).str[:10].isin(trading_dates)]
return df
def remove_non_trading_hours(df, config: dict(), level: str = None) -> pd.DataFrame:
# Parse cols, dates and sort
# this may be useful at minute level
# df['date'] = pd.to_datetime(df['date'], format='%Y%m%d').dt.strftime('%Y-%m-%d')
# df['time'] = (pd.to_datetime(df['time'].astype(str).str.strip(), format='%H%M').dt.strftime('%H:%M'))
# df['datetime'] = df.date.astype(str) + ' ' + df.time.astype(str)
# trading_dates = df.datetime.str[:10].unique() # list of market days
trading_dates = pd.read_csv('trading_dates_Q11998_to_Q32021.csv') # this list has format: dd/MM/yyyy
# inverting date format (hardcoded)
trading_dates = trading_dates.trading_dates.astype(str).apply(lambda x: x[6:10]+'-'+x[3:5]+'-'+x[:2])
df.index = pd.to_datetime(df.datetime)
df.drop(columns=['date', 'time', 'datetime', 'splits', 'earnings', 'dividends'],
errors='ignore', inplace=True)
df.sort_index(inplace=True, ascending=True)
# Resample (but not fill gaps. This should have been done already)
print(f' - Original size: {len(df)}')
if config['resample']:
ohlc_dict = {'open': 'first', 'high': 'max', 'low': 'min', 'close': 'last', 'volume': 'sum'}
df = df.resample(level.split('-')[0]).agg(ohlc_dict)
print(f' - Size after resampling at {level}: {len(df)}')
# Remove non-trading hours and non-trading days
df = df.between_time('09:31', '15:59') # This is potentially the only important line from the function
print(f' - Size after filtering out non-market hours: {len(df)}')
df = df[df.index.astype(str).str[:10].isin(trading_dates)]
# (low impact..) df = df.between_time('09:31','13:00') for half days (shortened sessions / early closing)
print(f' - Size after filtering out non-trading days: {len(df)}')
if config['resample']:
# Fill gaps
df['volume'] = df['volume'].fillna(0)
df['close'] = df['close'].ffill()
df['open'] = df['open'].fillna(df['close'])
df['low'] = df['low'].fillna(df['close'])
df['high'] = df['high'].fillna(df['close'])
return df
def parse_and_save(file_dict: dict, level: str, period_id: int,
setid: str, setname: str, config: dict, all_files: list()) -> list():
file_path = file_dict[setid]
if setid == 'mah':
# For the mahalanobis set, it creates a moving average of x examples over the previous period to the devset.
states_dict = get_pretraining_states(mahabset_df=pd.read_csv(file_path, sep=config['separator']),
config=config)
for k in states_dict.keys():
# non trading hours have been removed in get_pretraining_states
state_filepath = \
os.sep.join([config['output_path'], level, str(period_id), f'{config["symbol"]}_{setname}_{k}.csv'])
states_dict[k] = \
remove_non_trading_hours(df=states_dict[k], config=config, level=level) # for the sake of standarisation
assert len(states_dict[k]) >= 35, "Mahalanobis set too small (must be >40 for technical indicators)"
states_dict[k].to_csv(state_filepath, sep=';')
all_files.append(state_filepath)
else:
set_filepath = os.sep.join([config['output_path'], level, str(period_id), f'{config["symbol"]}_{setname}.csv'])
print(file_path)
df = remove_non_trading_hours(df=pd.read_csv(file_path, sep=config['separator']), config=config, level=level)
assert len(df) >=35 , "Dataset set too small (must be >=35 for technical indicators)"
df.to_csv(set_filepath, sep=';')
all_files.append(set_filepath)
return all_files
def compute(files: dict, periods: (), period_id: int, files_for_indicators: list, config: dict) -> (dict, list):
"""
This function orchestrates the whole process in both levels
"""
period, mahab_period, dev_period = periods
files[period] = dict()
# 1. it picks a period and changes the name.
for level in os.listdir(config['path']):
files[period][level] = dict()
# Process second and minute level data. For min level, filter only files with pattern (as there are many others)
if config['lvl_str'] in level and '.' not in level: # and '30' in level: # IMP!! comment last condition out
print('=========='+level+'\n'+'==========')
lvl_path = config['path'] + level + os.sep + config['symbol_name']
for file in os.listdir(lvl_path): # all of these loops are not efficient at all
if '.csv' in file and config['pattern'] in file:
if mahab_period in file:
files[period][level]['mah'] = lvl_path + os.sep + file
if dev_period in file:
files[period][level]['dev'] = lvl_path + os.sep + file
if period in file:
files[period][level]['train'] = lvl_path + os.sep + file
# 2. Export all sets in a folder with a period number (like a seed)
Path(os.sep.join([config['output_path'], level, str(period_id)])).mkdir(parents=True, exist_ok=True)
for setid, setname in config['names_per_set'].items():
print(f'set id: {setid}')
files_for_indicators = \
parse_and_save(file_dict=files[period][level],
level=level, period_id=period_id, setid=setid, setname=setname,
config=config, all_files=files_for_indicators)
# Debug
print(files[period][level]['dev'])
print(os.sep.join([config['output_path'], level, str(period_id), f'{config["symbol"]}_devset.csv']))
return files, files_for_indicators
##########################################################
# Specific handling for second or minute level frequencies
def compute_seconds() -> (pd.DataFrame, list):
mahab_m = '07'
dev_m = '10'
first = True
period_id = 1
files = dict()
files_for_indicators = list()
for yr in config['years_to_explore']:
# this will need to be refactored/changed (maybe to an iterator) for min level
for q_month in ['01', '04', '07', '10']: # just for s level
# it picks the prior period as a devset
if not first:
mahab_period = dev_period
dev_period = period
else:
mahab_period = f'{int(yr) - 1}-{mahab_m}'
dev_period = f'{int(yr) - 1}-{dev_m}'
first = False
period = yr + '-' + q_month
# Compute periods in order
files, files_for_indicators = \
compute(files=files, periods=(period, mahab_period, dev_period), period_id=period_id,
files_for_indicators=files_for_indicators, config=config)
period_id = period_id + 1
return files, files_for_indicators
def compute_minutes() -> (pd.DataFrame, list):
files = dict()
files_for_indicators = list()
period_id = 1
for yr in config['years_to_explore']:
period = str(yr)
dev_period = str(int(yr) - 1)
mahab_period = str(int(yr) - 2)
files[period] = dict()
# if period_id == 6:
# Compute periods in order
files, files_for_indicators = \
compute(files=files, periods=(period, mahab_period, dev_period), period_id=period_id,
files_for_indicators=files_for_indicators, config=config)
period_id = period_id + 1
return files, files_for_indicators
compute_func = {
's': compute_seconds,
'm': compute_minutes,
'h': compute_minutes,
}
if __name__ == "__main__":
# Difference handling periods at the second and minutes level due to data granularity and volume
all_files_dict, file_list = compute_func[config['lvl_str']]()
# Let's generate /txt files too in a TMP location
pd.DataFrame({'files': file_list}).to_csv(f'tmp/files_for_indicators_lvl-{config["lvl_str"]}.csv')
# Then trigger from here the whole convertion (technical indicators).
| cetrulin/Quant-Quote-Data-Preprocessing | src/select_mahab_series.py | select_mahab_series.py | py | 16,131 | python | en | code | 0 | github-code | 36 |
21140466023 |
# считывание списка из входного потока
a = '3 Сергей', '5 Николай', '4 Елена', '7 Владимир', '5 Юлия', '4 Светлана'
lst_in = list(a)
print(lst_in)
B = [i.split() for i in lst_in]
print(B)
F = [i.split()[0] for i in lst_in]
print(F)
d = dict.fromkeys(F)
# d_key = list(d)
# for i in range(len(d)):
# C = []
# for j in range(len(B)):
# if d_key[i] == B[j][0]:
# C.append(B[j][1])
# d[d_key[i]] = C
# for key, value in d.items():
# print(key, ", ".join(value), sep=': ') | Tosic48/FirstProject | HW/dict.py | dict.py | py | 574 | python | ru | code | 0 | github-code | 36 |
947943428 | #!/usr/bin/env python3
import requests
import bs4
base_url = "https://quotes.toscrape.com/page/{}/"
authors = set()
quotations = []
for page_num in range(1,2):
page = requests.get(base_url.format(page_num))
soup = bs4.BeautifulSoup(page.text,'lxml')
boxes = soup.select(".quote") #selected all the quotes boxes
for box in boxes:
author = box.select('span')[1].select('small')[0].getText()
quotation = box.select('span')[0].getText()
authors.add(author)
quotations.append(quotation)
print("Authors are: ")
print(authors)
print('\n')
print("Quotations are: ")
print(quotations)
print('\n')
top10 = soup.select(".tag-item")
for i in range(10 ):
print(top10[i].select('a')[0].getText())
| SKT27182/web_scaping | get_quotes_author.py | get_quotes_author.py | py | 751 | python | en | code | 0 | github-code | 36 |
11210169685 | from django.conf import settings
from travels import models
from django.utils.html import escapejs
def project_settings(request):
project_settings = models.Settings.objects.all()[0]
return { 'project_settings' : project_settings }
def settings_variables(request):
''' Provides base URLs for use in templates '''
project_settings = models.Settings.objects.all()[0]
d = {
'APP_NAME': escapejs(settings.APP_NAME),
'PROJECT_DESCRIPTION': escapejs(project_settings.project_description),
}
# Allows settings to define which variables
# it wants to expose to templates
if settings.CONTEXT_VARIABLES:
for var in settings.CONTEXT_VARIABLES:
if hasattr(settings, var):
d[var] = getattr(settings, var)
return d | UNICEF-Youth-Section/Locast-Web-Rio | travels/settings_context_processor.py | settings_context_processor.py | py | 754 | python | en | code | 0 | github-code | 36 |
10302851450 | l = input("insert list of integers with comma")
l = l.lstrip('[').rstrip(']').split(',')
sum = 0
temp = 0
for i in range(0, len(l)):
l[i] = int(l[i])
sum += l[i]
temp += l[i] ** 2
mean = sum / len(l)
var = temp / len(l) - mean ** 2
print(f"Means: {mean}")
print(f"Variance: {var}")
'''
scores = [100, 90, 80, 55, 95, 80, 65, 75, 70, 90]
sum = 0
for i in scores:
sum = sum + i
mean = sum/len(scores)
vsum = 0
for i in scores:
vsum = vsum + (i - mean)**2
variance = vsum / len(scores)
print("Means :", mean)
print("Variance :", variance)
''' | MyuB/OpenSW_Exercise | week02/ex04_02.py | ex04_02.py | py | 569 | python | en | code | 0 | github-code | 36 |
14531544904 | mhb_file = open("mhb_gesamt.txt","r",encoding="utf8")
mhb_gesamt = mhb_file.read()
mhb_file.close()
modDescrSeq = mhb_gesamt.split("Modulbezeichnung: ")
i = 1
while i<len(modDescrSeq):
fileName = "ModDescr_" + str(i)
file = open(fileName + ".txt","w")
file.write(modDescrSeq[i].strip())
file.close()
i += 1
| bmake/modcat-prototyp | dataPrep/InitialMapping/split_mhb.py | split_mhb.py | py | 328 | python | en | code | 2 | github-code | 36 |
18190036430 | import pandas as pd
import Levenshtein
import numpy as np
from anytree.search import find
from utils.category_tree import get_category_tree
from utils.io_custom import read_pickle_object
from scipy.spatial.distance import cosine
import re
def find_node(id, tree):
return find(tree, lambda node: node.name == id)
def get_relative_depth(id, tree):
return (find_node(id, tree).depth - find_node(872901, tree).depth)
def count_children(id, tree):
return len(find_node(id, tree).children)
def count_descendants(id, tree):
return len(find_node(id, tree).descendants)
def preprocessing_text(s):
"""
Предобработка текста.
:param s: str - входная строка.
:return: str - обработанная строка.
"""
return str(" ".join(re.findall("[a-zA-Zа-яА-Я0-9]+", s)).lower())
def get_levenshtein_distance_between(first_line, second_line):
"""
Получает расстояние Левенштейна между двумя строками.
:param first_line: str - первая строка.
:param second_line: str - вторая строка.
:return: int - расстояние левеншейна между этими строками.
"""
return Levenshtein.distance(first_line, second_line)
def get_lev_dist_between_query_category(query, category):
"""
Получает расстояние Левенштейна между двумя сериями.
:param query: pd.Series - запрос.
:param second_line: pd.Series - категория.
:return: np.array, int - расстояние левеншейна между соответствующими элементами серий.
"""
levenshtein_distances = []
for query, category in zip(query.values, category.values):
current_distance = get_levenshtein_distance_between(query, category)
levenshtein_distances.append(current_distance)
return np.array(levenshtein_distances)
def get_brands_and_products_lists(path_to_data):
"""
Получаем и преобразовываем списки брендов и продуктов из файлов.
"""
brands = pd.read_csv(path_to_data + "/unique_brands.csv")
brands = [str(brand) for brand in brands.iloc[:, 0]]
products = pd.read_csv(path_to_data + "/unique_products.csv")
products = [str(product) for product in products.iloc[:, 0]]
return brands, products
def create_data_with_features(path_to_data):
"""
Загружает данные для обучения и генерирует для них.
:param path_to_data: str - относительный путь к данным для обучения.
:return data: pd.DataFrame - датафрейм с кучей признаков
Оставлено для обратной совместимости с двумя блокнотами.
"""
data = pd.read_csv(path_to_data + "/data_for_model.csv")
return get_data_with_feature(data, path_to_data)
def get_cosine_dist_between_query_category(query, category, vectorizer):
"""
Получает косинусное расстояние между двумя колонками датафрейма.
:param query: pd.Series - запрос.
:param second_line: pd.Series - категория.
:param vectorizer: sklearn.feature_extraction.text.TfidfVectorizer - предобученный векторайзер на запросах и категориях из трейн выборки.
:return: np.array, int - косинусное расстояние между соответствующими элементами серий.
"""
query_sparse_matrix = vectorizer.transform(query.values)
category_sparse_matrix = vectorizer.transform(category.values)
distances = []
for query_vec, category_vec in zip(query_sparse_matrix, category_sparse_matrix):
current_distance = cosine(query_vec.toarray(), category_vec.toarray())
distances.append(current_distance)
return np.array(distances)
def get_data_with_feature(data, path_to_data):
"""
Генерирует признаки для обучающих и валидационных данных.
:param data: pd.DataFrame - обучающие или валидационные данные с колонками [query, category_id, category_name, is_redirect]
:param path_to_data: str - относительный путь к данным о брендах и продуктах.
:return data: pd.DataFrame - датафрейм с кучей признаков
"""
brands, products = get_brands_and_products_lists(path_to_data)
root = get_category_tree(path_to_data)
data['query'] = data['query'].apply(preprocessing_text)
data['category_name'] = data['category_name'].apply(preprocessing_text)
data['len_of_query'] = data['query'].apply(lambda query: len(query))
data['num_of_word_in_query'] = data['query'].apply(
lambda query:
len(query.split(' '))
)
data['category_name'] = data['category_name'].apply(preprocessing_text)
data['len_of_category'] = data['category_name'].apply(
lambda category:
len(category)
)
data['num_of_word_in_category'] = data['category_name'].apply(
lambda category:
len(category.split(' '))
)
data['how_match_brands_name_in_query'] = data['query'].apply(
lambda query:
sum([True for brand in brands if query.find(brand) != -1])
)
data['how_match_products_name_in_query'] = data['query'].apply(
lambda query:
sum([True for product in products if query.find(product) != -1])
)
data['mean_word_len_in_category'] = data['category_name'].apply(
lambda category_name:
np.mean([len(word) for word in category_name.split(' ')])
)
data['mean_word_len_in_query'] = data['query'].apply(
lambda query:
np.mean([len(word) for word in query.split(' ')])
)
data['max_word_len_in_category'] = data['category_name'].apply(
lambda category_name:
np.max([len(word) for word in category_name.split(' ')])
)
data['max_word_len_in_query'] = data['query'].apply(
lambda query:
np.max([len(word) for word in query.split(' ')])
)
data['min_word_len_in_category'] = data['category_name'].apply(
lambda category_name:
np.min([len(word) for word in category_name.split(' ')])
)
data['min_word_len_in_query'] = data['query'].apply(
lambda query:
np.min([len(word) for word in query.split(' ')])
)
data['is_query_long'] = data['len_of_query'].apply(lambda l: int(l > 50))
# TODO проверить генерацию признаков с дерева категорий (3 штуки):
data['relative_depth'] = data['category_id'].apply(
lambda category_id:
get_relative_depth(category_id, root)
)
data['children_count'] = data['category_id'].apply(
lambda category_id:
count_children(category_id, root)
)
data['descendants_count'] = data['category_id'].apply(
lambda category_id:
count_descendants(category_id, root)
)
data['lev_dist'] = get_lev_dist_between_query_category(data['query'],
data['category_name'])
vectorizer = read_pickle_object(path_to_data + '/vectorizer.obj')
data['cosine_dist'] = get_cosine_dist_between_query_category(data['query'],
data['category_name'],
vectorizer)
# data['number_of_children_category'] = get_relative_depth(data['category_id'])
# data['number_of_descendants_category'] = count_descendants(data['category_id'])
# data['category_depth'] = get_relative_depth(data['category_id'])
data = data.drop(columns=['category_id', 'query', 'category_name'])
return data
| comptech-winter-school/online-store-redirects | utils/feature_generation.py | feature_generation.py | py | 8,101 | python | ru | code | 3 | github-code | 36 |
16018321414 | import sys
def change():
change = 1000 - int(sys.stdin.readline())
counter = [1, 5, 10, 50, 100, 500]
cnt = 0
counter.reverse()
for i in counter:
if change >= i:
current_cnt, change = divmod(change, i)
cnt += current_cnt
return cnt
print(change()) | Zikx/Algorithm | Baekjoon/Greedy/change.py | change.py | py | 323 | python | en | code | 0 | github-code | 36 |
13909907482 | """Module with lagrangian decomposition methods."""
# Python packages
# Package modules
import logging as log
from firedecomp.AL import ARPP
from firedecomp.AL import ADPP
from firedecomp.fix_work import utils as _utils
from firedecomp.original import model as _model
from firedecomp.classes import problem as _problem
import time
import math
import gurobipy
import copy
###############################################################################
# CLASS LagrangianRelaxation()
###############################################################################
class AugmentedLagrangian(object):
def __init__(
self,
problem_data,
min_res_penalty=1000000,
valid_constraints=None,
gap=0.01,
max_iters=100000,
max_time=10,
log_level="AL",
solver_options=None,
):
"""Initialize the Lagrangian Relaxation object.
Args:
problem_data (:obj:`Problem`): problem data.
min_res_penalty (:obj:`int`):
gap (:obj:`float`): GAP tolerance for stop criteria.
Defaults to 0.01.
max_iters (:obj:`int`): maximum number of iterations. Defaults to
10.
max_time (:obj:`float`): maximum cpu time (in seconds). Defaults to
3600.
log_level (:obj:`str`): logging level. Defaults to ``'benders'``.
"""
# PROBLEM DATA
if problem_data.period_unit is False:
raise ValueError("Time unit of the problem is not a period.")
self.problem_data = problem_data
self.solution_best = None # index of DPP best solution
self.solution_best_original = None
# GLOBAL VARIABLES
self.max_iters = max_iters
self.max_time = max_time
self.init_time = time.time()
self.v = 1 # iterations
self.NL = (1 + len(problem_data.get_names("wildfire"))) # +
# len(problem_data.get_names("wildfire"))*len(problem_data.get_names("groups"))*2);
# GUROBI OPTIONS
if solver_options is None:
solver_options = {
'OutputFlag': 0,
'LogToConsole': 0,
}
self.solver_options = solver_options
# PARAMETERS INNER METHODS
self.change = []
self.beta_matrix = []
self.lambda_matrix = []
self.lambda_matrix_prev = []
self.upperbound_matrix = []
self.lobj_global = float("-inf")
self.fobj_global = float("inf")
self.infeas_global = float("inf")
self.subgradient_global = []
self.penalties_global = []
self.index_best = -1
self.lambda_min = 1e1
self.lambda_max = 1e5
self.lamdba_init = 1e3
self.th_sol = 10
# INITIALIZE DDecomposedPrimalProblemPP PROBLEM
# Initialize Decomposite Primal Problem Variables
self.problem_DPP = []
self.N = len(self.problem_data.get_names("resources"))
self.y_master_size = len(
self.problem_data.get_names("wildfire") + [int(min(self.problem_data.get_names("wildfire"))) - 1])
self.counterh_matrix = []
self.lobj_local = []
self.lobj_local_prev = []
self.fobj_local = []
self.infeas_local = []
self.subgradient_local = []
self.subgradient_local_prev = []
self.penalties_local = []
self.termination_counter = []
self.best_list_y = []
for i in range(0, self.y_master_size):
self.termination_counter.append(0)
self.lobj_local.append(float("inf"))
self.lobj_local_prev.append(float("inf"))
self.fobj_local.append(float("inf"))
self.infeas_local.append(float("inf"))
self.subgradient_local.append([])
self.penalties_local.append([])
self.upperbound_matrix.append(float("inf"))
self.termination_counter[self.y_master_size - 1] = self.th_sol + 1
# INITIALIZE LAMBDA AND BETA
for i in range(0, self.y_master_size):
lambda_row = []
lambda_row_prev = []
lambda_row_inf = []
beta_row = []
subgradient_prev_row = []
for j in range(0, self.NL):
lambda_row.append(self.lamdba_init)
lambda_row_inf.append(float("inf"))
beta_row.append(0.3)
lambda_row_prev.append(self.lamdba_init)
subgradient_prev_row.append(0)
self.subgradient_local_prev.append(subgradient_prev_row)
self.lambda_matrix.append(lambda_row)
self.lambda_matrix_prev.append(lambda_row_prev)
self.beta_matrix.append(beta_row)
self.change.append(1.0)
# CREATE ORIGINAL Problems list
_utils.get_initial_sol(self.problem_data)
dict_update = self.problem_data.get_variables_solution()
print("UPDATE original problem")
for i in range(0, self.y_master_size - 1):
self.y_master = dict([(p, 1) for p in range(0, self.y_master_size)])
for p in range(self.y_master_size - (1 + i), self.y_master_size):
self.y_master[p] = 0
print("Create index: " + str(i) + " y: " + str(self.y_master))
model_DPP = ADPP.DecomposedPrimalProblem(self.problem_data,
self.lambda_matrix[i], self.beta_matrix[i],
self.y_master, self.N,
min_res_penalty=min_res_penalty,
valid_constraints=valid_constraints)
self.problem_DPP.append(model_DPP)
###############################################################################
# PUBLIC METHOD subgradient()
###############################################################################
def subgradient(self, subgradient, subgradient_prev, lambda_vector, beta_vector, lambda_matrix_prev, ii):
lambda_old = lambda_vector.copy()
beta_old = beta_vector.copy()
stuck = 0
if max(subgradient_prev) < 0 and max(subgradient) > 0:
stuck = 1
for i in range(0, self.NL):
LRpen = subgradient[i]
if stuck == 1 and LRpen < 0:
new_lambda = lambda_matrix_prev[i]
else:
new_lambda = (lambda_old[i] + LRpen * beta_old[i])
lambda_vector[i] = min(max(self.lambda_min, new_lambda), self.lambda_max)
beta_vector[i] = beta_vector[i] * 1.2
# print(str(LRpen) + " -> lambda " + str(lambda_old[i]) + " + " + str(beta_old[i] * LRpen) + " = " + str(
# lambda_vector[i]) + " update " + str(beta_old[i]) + " diff " + str(
# abs(abs(lambda_vector[i]) - abs(lambda_old[i]))) + " beta " + str(
# beta_vector[i])) # + " change_per "+str(change_per) )
# print("")
# print("")
for i in range(0, self.NL):
subgradient_prev[i] = subgradient[i]
lambda_matrix_prev[i] = lambda_old[i]
del lambda_old
del beta_old
###############################################################################
# PUBLIC METHOD convergence_checking()
###############################################################################
def convergence_checking(self):
stop = bool(False)
result = 0
optimal_solution_found = 0
# print("TERMINATION COUNTER"+str(self.termination_counter))
# CHECK PREVIOUS LAMBDAS CHANGES
for i in range(0, len(self.lambda_matrix) - 1):
# print(str(self.termination_counter[i])+" "+str(self.infeas_local[i]))
if self.infeas_local[i] > 0:
self.termination_counter[i] = self.termination_counter[i] + 1
if self.termination_counter[i] < self.th_sol and self.infeas_local[i] <= 0:
lobj_diff = abs(
(abs(self.lobj_local[i]) - abs(self.lobj_local_prev[i])) / abs(self.lobj_local[i])) * 100
# # print(str(i) + "self.lobj_local[i] - self.lobj_local_prev[i] " + str(lobj_diff) + "% ")
if (lobj_diff < 0.1):
self.termination_counter[i] = self.termination_counter[i] + 1
else:
self.termination_counter[i] = 0
self.lobj_local_prev[i] = self.lobj_local[i]
# CHECK TERMINATION COUNTER MATRIX
counter = 0
all_termination_counter_finished = 0
for i in range(0, self.y_master_size):
if self.termination_counter[i] >= (self.th_sol):
counter = counter + 1
if counter == self.y_master_size:
all_termination_counter_finished = 1
# print("counter" + str(counter) + " termination_counter" + str(self.y_master_size))
# STOPPING CRITERIA CASES
current_time = time.time() - self.init_time
# check convergence
if (self.v >= self.max_iters):
print("[STOP] Max iters achieved!")
stop = bool(True)
if (current_time >= self.max_time):
print("[STOP] Max execution time achieved!")
stop = bool(True)
elif (all_termination_counter_finished == 1):
print("[STOP] Convergence achieved, optimal local point searched!")
stop = bool(True)
elif (optimal_solution_found == 1):
print("[STOP] Convergence achieved, optimal solution found!")
stop = bool(True)
return stop
###############################################################################
# PUBLIC METHOD solve()
###############################################################################
def solve(self):
print("SOLVE ALGORITHM")
termination_criteria = bool(False)
while termination_criteria == False:
# (1) Solve DPP problems
for i in range(0, self.y_master_size - 1):
# Show iteration results
if i == 0:
log.info("Iteration # mi lambda f(x) L(x,mi,lambda) penL")
print("\n\nIter: " + str(self.v) + " " +
"LR(x): " + str(self.lobj_global) + " " +
"f(x):" + str(self.fobj_global) + " " +
"penL:" + str(self.infeas_global) + " time:" +
str(time.time() - self.init_time) + "\n")
if self.termination_counter[i] < self.th_sol:
# print("### Y -> " + str(self.problem_DPP[i].list_y))
DPP_sol_row = []
DPP_sol_unfeasible = False
total_obj_function = []
total_unfeasibility = []
total_subgradient = []
total_obj_function_pen = []
total_problem = []
self.lobj_local[i] = 0
self.fobj_local[i] = 0
self.subgradient_local[i] = []
for z in range(0, self.NL):
self.subgradient_local[i].append(float("-inf"))
self.penalties_local[i] = []
self.infeas_local[i] = 0
for j in range(0, self.N):
try:
self.problem_DPP[i].change_resource(j, self.lambda_matrix[i], self.beta_matrix[i], self.v)
DPP_sol_row.append(self.problem_DPP[i].solve(self.solver_options))
if (DPP_sol_row[j].model.Status == 3) or (DPP_sol_row[j].model.Status == 4):
DPP_sol_unfeasible = True
break
except:
print("Error Solver: Lambda/beta error")
DPP_sol_unfeasible = True
break
total_problem.append(self.problem_DPP[i].problem_data.copy_problem())
subgradient = self.problem_DPP[i].return_LR_obj2()
total_obj_function_pen.append(self.problem_DPP[i].return_function_obj_total_pen())
total_obj_function.append(self.problem_DPP[i].return_function_obj_total())
total_unfeasibility.append(max(subgradient))
total_subgradient.append(subgradient)
# print(str(j) + " fobj " + str(self.problem_DPP[i].return_function_obj()) + " total " +
# str(self.problem_DPP[i].return_function_obj_total()) + "unfeas " + str(max(subgradient)))
if DPP_sol_unfeasible:
self.termination_counter[i] = self.th_sol + 1
else:
bestid = self.problem_DPP[i].return_best_candidate(total_obj_function, total_unfeasibility)
self.lobj_local[i] = total_obj_function_pen[bestid]
self.fobj_local[i] = total_obj_function[bestid]
self.infeas_local[i] = total_unfeasibility[bestid]
self.subgradient_local[i] = total_subgradient[bestid]
#print("TOTAL" + str(i) +
# " LR " + str(self.lobj_local[i]) +
# " fobj " + str(self.fobj_local[i]) +
# " Infeas " + str(self.infeas_local[i]))
self.subgradient(self.subgradient_local[i], self.subgradient_local_prev[i],
self.lambda_matrix[i], self.beta_matrix[i],
self.lambda_matrix_prev[i], i)
self.change[i] = 0
if self.fobj_global > self.fobj_local[i] and (self.infeas_local[i] <= 0):
self.problem_DPP[i].problem_data = total_problem[bestid]
self.lobj_global = self.lobj_local[i]
self.fobj_global = self.fobj_local[i]
self.subgradient_global = self.subgradient_local[i]
self.infeas_global = self.infeas_local[i]
self.solution_best_original = total_problem[
bestid].copy_problem() # self.update_problem_data_sol(self.problem_DPP[i])
self.solution_best_original.constrvio = self.infeas_global
self.solution_best_original.solve_status = 2
# print("New Solution:")
# print(self.solution_best_original.get_solution_info())
self.change[i] = 1
self.problem_DPP[i].update_original_values(DPP_sol_row[bestid], self.change[i])
DPP_sol_row.clear()
# (3) Check termination criteria
termination_criteria = self.convergence_checking()
self.v = self.v + 1
# DESTROY DPP
# print(self.solution_best_original.get_solution_info())
if self.solution_best_original is None:
self.problem_DPP[0].change_resource(0, self.lambda_matrix[i], self.beta_matrix[i], self.v)
self.problem_DPP[0].solve(self.solver_options)
min_fobj = self.problem_DPP[0].return_function_obj_total()
min_feasibility = max(self.problem_DPP[0].return_LR_obj2())
best_index = 0
for i in range(1, self.N):
self.problem_DPP[i].change_resource(0, self.lambda_matrix[i], self.beta_matrix[i], self.v)
self.problem_DPP[i].solve(self.solver_options)
fobj = self.problem_DPP[i].return_function_obj_total()
feas = max(self.problem_DPP[i].return_LR_obj2())
if min_feasibility >= feas:
if min_feasibility == feas:
if min_fobj > fobj:
best_index = i
else:
best_index = i
self.solution_best_original = self.problem_DPP[best_index].problem_data.copy_problem()
self.solution_best_original.constrvio = max(self.problem_DPP[i].return_LR_obj2())
self.solution_best_original.solve_status = 2
self.problem_data = self.solution_best_original
return self.solution_best_original
###############################################################################
# PRIVATE extract_infeasibility()
###############################################################################
def extract_infeasibility(self, subgradient):
infeas = 0
for i in range(0, len(subgradient)):
if (subgradient[i] > 0):
infeas = infeas + subgradient[i]
return infeas
###############################################################################
# PRIVATE destroy_DPP_set()
###############################################################################
def destroy_DPP_set(self):
for i in range(0, len(self.problem_DPP)):
len_p = len(self.problem_DPP[i])
if (len_p > 0):
for j in range(0, len_p):
del self.problem_DPP[i][0]
self.problem_DPP[i] = []
self.problem_DPP = []
# print("DESTROY")
###############################################################################
# PRIVATE METHOD __log__()
###############################################################################
# def __log__(self, level="AL"):
# log.addLevelName(80, "AL")
# log.Logger.LR = logging.LR
# if level != 'AL':
# log_level = getattr(log, level)
# logger = log.getLogger('AL_logging')
# logger.setLevel(log_level)
# logger.addFilter(logging.LRFilter())
# if len(logger.handlers) == 0:
# ch = log.StreamHandler()
# ch.setLevel(log_level)
# # create formatter and add it to the handlers
# formatter = log.Formatter("%(levelname)8s: %(message)s")
# ch.setFormatter(formatter)
# logger.addHandler(ch)
# else:
# log_level = 80
# logger = log.getLogger('AL')
# logger.setLevel(log_level)
# if len(logger.handlers) == 0:
# ch = log.StreamHandler()
# ch.setLevel(log_level)
# # create formatter and add it to the handlers
# formatter = log.Formatter("%(message)s")
# ch.setFormatter(formatter)
# logger.addHandler(ch)
#
# self.log = logger
# return 1
def update_problem_data_sol(self, solution):
problem = self.problem_data.copy_problem()
problem = self.solution_to_problem(problem, solution)
return problem
def solution_to_problem(self, problem, solution):
problem.mipgap = None
problem.mipgapabs = None
problem.constrvio = self.infeas_global
problem.solve_status = 2
variables = solution
data = problem.data
problem.resources.update(
{i: {'select': round(variables.z[i].getValue()) == 1}
for i in data.I})
s = {(i, t): round(variables.s[i, t].x) == 1
for i in data.I for t in data.T}
u = {(i, t): round(variables.u[i, t].getValue()) == 1
for i in data.I for t in data.T}
e = {(i, t): round(variables.e[i, t].x) == 1
for i in data.I for t in data.T}
w = {(i, t): round(variables.w[i, t].getValue()) == 1
for i in data.I for t in data.T}
r = {(i, t): round(variables.r[i, t].x) == 1
for i in data.I for t in data.T}
er = {(i, t): round(variables.er[i, t].x) == 1
for i in data.I for t in data.T}
tr = {(i, t): round(variables.tr[i, t].x) == 1
for i in data.I for t in data.T}
problem.resources_wildfire.update(
{(i, t): {
'start': s[i, t],
'use': u[i, t],
'end': e[i, t],
'work': w[i, t],
'travel': tr[i, t],
'rest': r[i, t],
'end_rest': er[i, t]
}
for i in data.I for t in data.T})
problem.groups_wildfire.update(
{(g, t): {'num_left_resources': variables.mu[g, t].x}
for g in data.G for t in data.T})
contained = {t: variables.y[t].x == 0
for t in data.T}
contained_period = [t for t, v in contained.items()
if v is True]
if len(contained_period) > 0:
first_contained = min(contained_period) + 1
else:
first_contained = data.max_t + 1
problem.wildfire.update(
{t: {'contained': False if t < first_contained else True}
for t in data.T})
return problem
def create_init_solution(self, problem_data):
T = problem_data.get_names("wildfire")
min_t = int(min(T))
list_y = dict([(p, 1) for p in range(0, len(T + [min_t - 1]))])
list_y[len(list_y) - 1] = 0
print(list_y)
problem_data_copy = problem_data.copy_problem()
init_problem = _model.InputModel(problem_data_copy)
solver_options = {
'OutputFlag': 1,
'LogToConsole': 1,
'TimeLimit': 10,
}
for i in range(0, len(list_y)):
init_problem.y[i].UB = list_y[i]
init_problem.y[i].LB = list_y[i]
init_problem.m.update()
print("COMPUTE INIT SOLUTION")
init_problem.solve(solver_options)
print("END COMPUTE INIT SOLUTION")
return init_problem
| jorgerodriguezveiga/firedecomp | firedecomp/AL/AL.py | AL.py | py | 22,213 | python | en | code | 0 | github-code | 36 |
69833306343 | class detail:
def __init__(self) -> None:
self.__name="harry"
a=detail()
# print(a.__name) #cannot access directly
# print(a._detail__name) #can be accessed indirectly #NAME MANGLING
print(a.__dir__()) #from this we see all the available methods on a
| Adarsh1o1/python-initials | oops/access_specifiers.py | access_specifiers.py | py | 268 | python | en | code | 1 | github-code | 36 |
5128321881 | # Задача-2:
# Даны два произвольные списка.
# Удалите из первого списка элементы, присутствующие во втором списке.
lst_1 = [1, 3, 5, 7, 9]
lst_2 = [3, 8, 6, 5]
new = []
for i in lst_1:
if i not in lst_2:
new.append(i)
lst_1 = new
print(lst_1, lst_2) | BocheVskiy/HW3_easy | easy_2.py | easy_2.py | py | 351 | python | ru | code | 0 | github-code | 36 |
70390276585 | from flask import Flask,render_template,request,jsonify
import utils
app = Flask(__name__)
@app.route('/') #Base API
def home():
print('Testing Home API')
return render_template('home.html')
@app.route('/predict', methods = ['POST'])
def prediction():
print('Testing prediction API')
data = request.form
if request.method == 'POST':
print('Input data is :',data)
x1 = float(data['SepalLengthCm'])
x2 = float(data['SepalWidthCm'])
x3 = float(data['PetalLengthCm'])
x4 = float(data['PetalWidthCm'])
prediction = utils.predict_class(x1,x2,x3,x4)
return render_template('after.html', data=prediction)
else:
return jsonify({'Message':'Unsuccessful'})
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5000, debug=False) | PrashantBodhe/irisproject1 | interface.py | interface.py | py | 820 | python | en | code | 0 | github-code | 36 |
31062248631 | from tkinter import *
import os
import platform
### FUNCTION TO KILL
def stopProg(e):
root.destroy()
### FUCNTION THAT DOES CALCULATIONS FOR CENTER OF SCREEN
### AND THEN CENTERS THE WINDOW
def center(window,w,h):
ws = window.winfo_screenwidth()
hs = window.winfo_screenheight()
x = (ws/2) - (w/2)
y = (hs/2) - (h/2)
window.geometry('%dx%d+%d+%d' % (w, h, x, y))
### FUNCTION THAT ACTIVATES IF TIMESHEET ALREADY EXISTS
def ts_exists(timesheet):
### FUNCTION TO KILL POPUP WINDOW
def destroy_ts_exists(e):
win.destroy()
### FUNCTION THAT STARTS Edit.py WITH RIGHT ARGUMENTS
### KILLS Main.py
def edit(event, ts):
win.destroy()
root.destroy()
op_s = platform.system()
print(op_s)
if op_s == "Windows":
os.system("python Edit.py "+'"'+ts+'"')
elif op_s == "Linux":
os.system("python3.4 Edit.py "+'"'+ts+'"')
### CREATE AND CENTER POPUP WINDOW
win = Toplevel()
win.title("Timesheet Found")
center(win, 250, 125)
### THERE'S THIS LINE AGAIN, PRETTY SURE IT'S OKAY NOW
win.wm_attributes('-topmost', 1)
### LABELS
Label(win, text = "Timesheet Found", background = "green").pack()
Label(win, text = timesheet).pack()
Label(win, text = "Edit This Timesheet?").pack()
### BUTTONS AND BINDINGS
n = Button(win, text = "No")
n.pack()
n.bind('<Button-1>', destroy_ts_exists)
y = Button(win, text = "Yes")
y.pack()
y.bind('<Button-1>',
lambda event, ts=timesheet: edit(event, ts))
### FUNCTION THAT ACTIVATES IF TIMESHEET DOES NOT EXIST
def ts_no_exist(timesheet, case):
### FUNCTION TO KILL POPUP WINDOW
def destroy_ts_no_exist(e):
pop.destroy()
### FUNCTION THAT STARTS Create.py WITH THE APPROPRIATE ARGUMENTS
def edit(event, ts):
pop.destroy()
root.destroy()
op_s = platform.system()
print(op_s)
if op_s == "Windows":
os.system("python Edit.py "+'"'+ts+'"')
elif op_s == "Linux":
os.system("python3.4 Edit.py "+'"'+ts+'"')
### CREATE AND CENTER POPUP WINDOW
pop = Toplevel()
pop.title("Timesheet Doesn't Exist")
center(pop, 250, 125)
## THIS LINE IS A TOTAL NO-GO ---- ???? KEEPS ON TOP?? ...wtf
pop.wm_attributes('-topmost', 1)
### LABELS
Label(pop, text = "Timesheet does not exist.", background = "red").pack()
Label(pop, text = "Create Time sheet?").pack()
### BUTTONS AND BINDINGS
n = Button(pop, text = "No")
n.pack()
n.bind('<Button-1>', destroy_ts_no_exist)
y = Button(pop, text = "Yes")
y.pack()
y.bind('<Button-1>',
lambda event, ts=timesheet: edit(event, ts))
### SUBMIT FUNCTION
### USES FIRST AND LAST NAME TO FORMAT A FILENAME
### CHECKS IF IT EXISTS, AND ACTIAVTES THE APPROPRIATE FUNCTION
def submit(e):
first = f_name.get()
last = l_name.get()
case = case_no.get()
#format a file name from user input
full_fn = last.upper()+", "+first.upper()+" "+case.upper()+".txt"
if os.path.isfile(full_fn) == False:
ts_no_exist(full_fn, case)
elif os.path.isfile(full_fn):
ts_exists(full_fn)
###################
### DRAWING GUI ###
###################
### CREATE MAIN WINDOW
root = Tk()
root.title("Select Timesheet")
### CENTER WINDOW
center(root, 252, 125)
### CREATING AND PLACING LABELS
Label(root,text = "First Name").grid(row = 0)
Label(root, text = "Last Name").grid(row = 1)
Label(root, text = "Case #").grid(row = 2)
### CREATING AND PLACING ENTRY BOXES
f_name = Entry(root)
f_name.grid(row = 0, column = 1)
l_name = Entry(root)
l_name.grid(row = 1, column = 1)
case_no= Entry(root)
case_no.grid(row = 2, column = 1)
### CREATING, PLACING AND BINDING BUTTONS
submit_but = Button(root, text = "Submit")
submit_but.grid(row = 3)
submit_but.bind('<Button-1>', submit)
### START MAINLOOP
root.mainloop()
| MikeVaughanG/timesheet-input-system | Main.py | Main.py | py | 3,648 | python | en | code | 0 | github-code | 36 |
19982458840 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
import unittest
from tutorons.regex.extract import ApacheConfigRegexExtractor, JavascriptRegexExtractor,\
GrepRegexExtractor, SedRegexExtractor
from tutorons.common.htmltools import HtmlDocument
logging.basicConfig(level=logging.INFO, format="%(message)s")
'''
TODO consider implementing regular expression checking for these languages:
1. tcl shell
2. Python regular expression methods
3. Java methods
'''
class ExtractRegexFromModRewriteTest(unittest.TestCase):
def setUp(self):
self.extractor = ApacheConfigRegexExtractor()
def test_extract_regex_for_rewrite_rule(self):
node = HtmlDocument('\n'.join([
"<code>",
"RewriteRule ^.*$ index.php",
"</code>",
]))
regions = self.extractor.extract(node)
self.assertEqual(len(regions), 1)
r = regions[0]
self.assertEqual(r.node, node)
self.assertEqual(r.start_offset, 13)
self.assertEqual(r.end_offset, 16)
def test_extract_regex_for_rewrite_condition(self):
node = HtmlDocument('\n'.join([
"<code>",
"RewriteCond %{HTTP_USER_AGENT} ^Mozilla",
"</code>",
]))
regions = self.extractor.extract(node)
self.assertEqual(len(regions), 1)
r = regions[0]
self.assertEqual(r.start_offset, 34)
self.assertEqual(r.end_offset, 41)
def test_allow_whitespace_before_directive(self):
node = HtmlDocument('\n'.join([
"<code>",
" RewriteCond %{HTTP_USER_AGENT} ^Mozilla",
"</code>",
]))
regions = self.extractor.extract(node)
r = regions[0]
self.assertEqual(r.start_offset, 38)
self.assertEqual(r.end_offset, 45)
def test_case_insensitive_directive_detected(self):
node = HtmlDocument('\n'.join([
"<code>",
"REWRITEcOnD %{HTTP_USER_AGENT} ^Mozilla",
"</code>",
]))
regions = self.extractor.extract(node)
r = regions[0]
self.assertEqual(r.start_offset, 34)
self.assertEqual(r.end_offset, 41)
class ExtractRegexFromJavascriptTest(unittest.TestCase):
def setUp(self):
self.extractor = JavascriptRegexExtractor()
def test_extract_regex_from_variable_declaration(self):
node = HtmlDocument('\n'.join([
'<code>',
"var pattern = /regular-expression/g;",
'</code>',
]))
regions = self.extractor.extract(node)
self.assertEqual(len(regions), 1)
r = regions[0]
self.assertEqual(r.node, node)
self.assertEqual(r.start_offset, 16)
self.assertEqual(r.end_offset, 33)
def test_skip_code_that_doesnt_pass_javascript_parser(self):
node = HtmlDocument('\n'.join([
'<code>',
"<>/regex/;",
'</code>',
]))
regions = self.extractor.extract(node)
self.assertEqual(len(regions), 0)
def test_skip_regex_with_repeated_flags(self):
node = HtmlDocument('\n'.join([
'<code>',
"var pattern = /regular-expression/gg;",
'</code>',
]))
regions = self.extractor.extract(node)
self.assertEqual(len(regions), 0)
def test_skip_regex_with_invalid_flags(self):
node = HtmlDocument('\n'.join([
'<code>',
"var pattern = /regular-expression/x;",
'</code>',
]))
regions = self.extractor.extract(node)
self.assertEqual(len(regions), 0)
class ExtractRegexFromGrepTest(unittest.TestCase):
def setUp(self):
self.extractor = GrepRegexExtractor()
def test_extract_regex_from_variable_declaration(self):
node = HtmlDocument('\n'.join([
'<code>',
"grep pattern *",
'</code>',
]))
regions = self.extractor.extract(node)
self.assertEqual(len(regions), 1)
r = regions[0]
self.assertEqual(r.node, node)
self.assertEqual(r.start_offset, 6)
self.assertEqual(r.end_offset, 12)
def test_extract_same_pattern_from_multiple_greps_in_one_element(self):
node = HtmlDocument('\n'.join([
'<code>',
"grep pattern *",
"grep pattern *",
'</code>',
]))
regions = self.extractor.extract(node)
self.assertEqual(len(regions), 2)
r1 = regions[0]
self.assertEqual(r1.start_offset, 6)
self.assertEqual(r1.end_offset, 12)
r2 = regions[1]
self.assertEqual(r2.start_offset, 21)
self.assertEqual(r2.end_offset, 27)
def test_extract_pattern_containing_spaces(self):
node = HtmlDocument('\n'.join([
'<code>',
"grep 'Pattern with spaces' *",
'</code>',
]))
regions = self.extractor.extract(node)
r = regions[0]
self.assertEqual(r.start_offset, 7)
self.assertEqual(r.end_offset, 25)
def test_extract_pattern_from_option(self):
node = HtmlDocument('\n'.join([
'<code>',
"grep -e pattern *",
'</code>',
]))
regions = self.extractor.extract(node)
r = regions[0]
self.assertEqual(r.start_offset, 9)
self.assertEqual(r.end_offset, 15)
def test_extract_patterns_from_multiple_options(self):
node = HtmlDocument('\n'.join([
'<code>',
"grep -e pattern1 -e pattern2 *",
'</code>',
]))
regions = self.extractor.extract(node)
self.assertEqual(len(regions), 2)
self.assertTrue(any([r.start_offset == 9 and r.end_offset == 16 for r in regions]))
self.assertTrue(any([r.start_offset == 21 and r.end_offset == 28 for r in regions]))
class ExtractRegexFromSedTest(unittest.TestCase):
def setUp(self):
self.extractor = SedRegexExtractor()
def test_extract_regexes_from_address_range(self):
node = HtmlDocument('\n'.join([
'<code>',
'sed "/addr1/,/addr2/p" file',
'</code>',
]))
regions = self.extractor.extract(node)
self.assertEqual(len(regions), 2)
r1 = regions[0]
self.assertEqual(r1.node, node)
self.assertEqual(r1.start_offset, 7)
self.assertEqual(r1.end_offset, 11)
r2 = regions[1]
self.assertEqual(r2.start_offset, 15)
self.assertEqual(r2.end_offset, 19)
def test_ignore_addresses_that_arent_regex(self):
node = HtmlDocument('\n'.join([
'<code>',
'sed "0,1p" file',
'</code>',
]))
regions = self.extractor.extract(node)
self.assertEqual(len(regions), 0)
def test_extract_regex_from_substitute_pattern(self):
node = HtmlDocument('\n'.join([
'<code>',
'sed "s/patt/replace/" file',
'</code>',
]))
regions = self.extractor.extract(node)
self.assertEqual(len(regions), 1)
r = regions[0]
self.assertEqual(r.start_offset, 8)
self.assertEqual(r.end_offset, 11)
def test_extract_regex_from_multiple_substitute_patterns(self):
node = HtmlDocument('\n'.join([
'<code>',
'sed -e "s/patt1/replace/" -e "s/patt2/replace/" file',
'</code>',
]))
regions = self.extractor.extract(node)
self.assertEqual(len(regions), 2)
self.assertTrue(any([r.start_offset == 11 and r.end_offset == 15 for r in regions]))
self.assertTrue(any([r.start_offset == 33 and r.end_offset == 37 for r in regions]))
def test_handle_escaped_characters(self):
node = HtmlDocument('\n'.join([
'<code>',
'sed \'s/pa\/tt/replace/\' file',
'</code>',
]))
regions = self.extractor.extract(node)
self.assertEqual(len(regions), 1)
r = regions[0]
self.assertEqual(r.start_offset, 8)
self.assertEqual(r.end_offset, 13)
self.assertEqual(r.pattern, r'pa/tt')
def test_handle_find_pattern_with_character_class(self):
'''
This test case failed earlier as we performed a regex search with the pattern found
against the original command, and it was being interpreted as a regex, and not a raw string.
'''
node = HtmlDocument('<code>sed "s/[A-Z]bc//g" file.txt</code>')
regions = self.extractor.extract(node)
self.assertEqual(len(regions), 1)
if __name__ == '__main__':
unittest.main()
| andrewhead/tutorons-server | tutorons/tests/regex/test_extractor.py | test_extractor.py | py | 8,711 | python | en | code | 6 | github-code | 36 |
16919406854 | from rest_framework.parsers import JSONParser
from rest_framework.renderers import JSONRenderer
from rest_framework.response import Response
from rest_framework.decorators import api_view
import io
from rest_framework import status
from todos.models import Task
from .serializers import TaskSerializer
@api_view(['GET'])
def get_task_list(request):
tasks = Task.objects.all()
# print('>>> tasks : ',tasks)
serializer = TaskSerializer(tasks, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
@api_view(['GET'])
def get_task_detail(request, pk):
task = Task.objects.get(pk=pk)
serializer = TaskSerializer(task)
return Response(serializer.data, status=status.HTTP_200_OK)
@api_view(['POST'])
def create_task(request):
serializer = TaskSerializer(data=request.data, context={'request':request})
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['DELETE'])
def delete_task(request, pk):
task = Task.objects.get(pk=pk)
task.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
@api_view(['PUT', 'PATCH'])
def update_task(request, pk):
task = Task.objects.get(pk=pk)
if request.method == 'PUT':
serializer = TaskSerializer(task, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'PATCH':
serializer = TaskSerializer(task, data=request.data, partial=True)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['POST'])
def completed(request,pk):
task = Task.objects.get(pk=pk)
serializer = TaskSerializer(task, data=request.data, partial=True)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) | Khot-abhishek/TODO_WITH_API | api/views.py | views.py | py | 2,318 | python | en | code | 0 | github-code | 36 |
26938917107 | # from microbit import *
# from mpu9250 import MPU9250
# from mpu9250 import MPU9250
# imu = MPU9250('X')
# while True:
# print(imu.accel.xyz)
# print(imu.gyro.xyz)
# print(imu.mag.xyz)
# print(imu.temperature)
# print(imu.accel.z)
# sleep(1000)
from PiicoDev_Unified import sleep_ms
from PiicoDev_MPU6050 import PiicoDev_MPU6050
# Example code for Motion Sensor MPU6050
# Cross-platform compatible sleep function
motion = PiicoDev_MPU6050()
while True:
# Accelerometer data
accel = motion.read_accel_data() # read the accelerometer [ms^-2]
aX = accel["x"]
aY = accel["y"]
aZ = accel["z"]
print("x:" + str(aX) + " y:" + str(aY) + " z:" + str(aZ))
# Gyroscope Data
# gyro = motion.read_gyro_data() # read the gyro [deg/s]
# gX = gyro["x"]
# gY = gyro["y"]
# gZ = gyro["z"]
# print("x:" + str(gX) + " y:" + str(gY) + " z:" + str(gZ))
# Rough temperature
# temp = motion.read_temperature() # read the device temperature [degC]
# print("Temperature: " + str(temp) + "°C")
# G-Force
# gforce = motion.read_accel_abs(g=True) # read the absolute acceleration magnitude
# print("G-Force: " + str(gforce))
sleep_ms(100)
# from microbit import *
# sleep(1000)
# print("Hello Terminal!")
| alexanderhalpern/educationalrobot | imu.py | imu.py | py | 1,299 | python | en | code | 0 | github-code | 36 |
12220208294 | from json import loads
from logging_tools import Logger
from population import create_sample
from requests import get
from requests import post
from requests.auth import HTTPBasicAuth
from string import Template
from time import sleep
from uuid import uuid1
""" Template for the whisk rest api. """
whisk_rest_api = Template(
'$APIHOST/api/v1/namespaces/$NAMESPACE/$ENDPOINT/$VALUE'
)
def evolution_parameters(function=3, instance=1, dim=3, population_size=20):
""" Returns the evolution parameters. """
return {
'id': str(uuid1()),
'problem': {
'name': 'BBOB',
'function': function,
'instance': instance,
'search_space': [-5, 5],
'dim': dim,
'error': 1e-8
},
'population': [],
'population_size': population_size,
'experiment': {
'experiment_id': 'dc74efeb-9d64-11e7-a2bd-54e43af0c111',
'owner': 'mariosky',
'type': 'benchmark'
},
'algorithm': {
'name': 'GA',
'iterations': 5,
'selection': {
'type': 'tools.selTournament',
'tournsize': 12
},
'crossover': {
'type': 'cxTwoPoint',
'CXPB': [0, 0.2]
},
'mutation': {
'type': 'mutGaussian',
'mu': 0,
'sigma': 0.5,
'indpb' : 0.05,
'MUTPB':0.5
}
}
}
def create_parameters(settings, population=None):
""" Creates the evolution parameters with a population. """
parameters = evolution_parameters(
settings.function,
settings.instance,
settings.dim,
settings.population_size
)
parameters['population'] = population or create_sample(parameters)
return parameters
def get_request_data(settings, endpoint, value, json={}):
""" Gets the request data. """
auth = settings.auth.split(':')
return {
'url': whisk_rest_api.safe_substitute(
APIHOST=settings.apihost,
NAMESPACE=settings.namespace,
ENDPOINT=endpoint,
VALUE=value,
),
'json': json,
'params': {
'blocking': str(settings.blocking),
'result': 'True'
},
'auth': HTTPBasicAuth(auth[0], auth[1]),
'verify': not settings.insecure
}
def crossover_migration(pop1, pop2, key = lambda p: p['fitness']['score']):
""" Does the crossover migration. """
pop1.sort(key=key)
pop2.sort(key=key)
size = min(len(pop1), len(pop2))
cxpoint = int(size / 2)
pop1[cxpoint:] = pop2[:cxpoint + size % 2]
return pop1
def request_evolution(settings, population):
""" Gets the population using blocking. """
data = get_request_data(settings, 'actions', 'gaService', population)
logger = Logger(settings.verbose)
logger.log('POST request to ' + data['url'])
response = post(**data).json()
logger.log('POST complete!')
return response
def request_evolution_id(settings, population):
""" Evolves a population and returns it's OpenWhisk activationid. """
response = request_evolution(settings, population)
return response['activationId']
def request_evolved(settings, id):
""" Gets the population data with it's OpenWhisk activation id. """
data = get_request_data(settings, 'activations', id)
logger = Logger(settings.verbose)
logger.log('Polling activationId ' + str(id))
for _ in range(0, settings.timeout):
logger.log('GET request to ' + data['url'])
response = get(**data).json()
logger.log('GET complete!')
if 'error' not in response:
return loads(response['response']['result']['value'])
sleep(1)
raise ValueError('Timeout exception.')
def evolve(settings, population):
""" Evolves the population with the given settings. """
response = request_evolution(settings, population)
print(response)
if 'activationId' in response:
return request_evolved(settings, response['activationId'])
else:
return loads(response['value'])
| mariosky/ga_action | py_client/evolution.py | evolution.py | py | 4,228 | python | en | code | 1 | github-code | 36 |
71551898663 | from __future__ import annotations
import falcon
from app.models import Rating
from app.schemas.ratings import rating_item_schema
class RateResource:
deserializers = {"post": rating_item_schema}
def on_post(self, req: falcon.Request, resp: falcon.Response, id: int):
"""
---
summary: Add rating for movie as logged in user
tags:
- Rating
parameters:
- in: body
schema: RatingSchema
consumes:
- application/json
produces:
- application/json
responses:
201:
description: Vote successful
401:
description: Unauthorized
422:
description: Input body formatting issue
"""
db = req.context["db"]
user = req.context["user"]
rating = req._deserialized["rating"]
user_rating = Rating(rating=rating, user=user, movie_id=id)
db.session.add(user_rating)
db.session.commit()
resp.status = falcon.HTTP_CREATED
resp.media = {"message": "rating saved"}
| alysivji/falcon-batteries-included | app/resources/ratings.py | ratings.py | py | 1,133 | python | en | code | 15 | github-code | 36 |
44676432785 | # Дан список чисел. Если среди них есть ноль - вывести yes, иначе no.
my_list = [1, 2, 100, 0, 3]
# а можно было запросить у пользователя ввести числа через пробел
# my_list = [int(i) for i in input().split()]
has_zero = False
for i in my_list:
if i == 0:
has_zero = True
break
if has_zero:
print('yes')
else:
print('no')
| buevichd/tms-lessons | lesson_06/cycles/task_04.py | task_04.py | py | 454 | python | ru | code | 3 | github-code | 36 |
38242299725 | import os, re, math, sys
from collections import defaultdict
VCF_CONTIG_PATT = re.compile('ID=(\w+),length=(\d+)')
PROG_NAME = 'Hycco'
DESCRIPTION = 'Hycco is an HMM based method to estimate hybrid chromosomal crossover points using distinguising SNPs from two parental genotypes'
FILE_TAG = 'crossover_regions'
DEFAULT_BIN_SIZE = 10000
DEFAULT_QUALITY = 100.0
DEFAULT_MIN_CHROMO_SIZE = 1.0
DEFAULT_NUM_ITER = 400
def info(msg, prefix='INFO'):
print('%8s : %s' % (prefix, msg))
def warn(msg, prefix='WARNING'):
print('%8s : %s' % (prefix, msg))
def fatal(msg, prefix='%s FAILURE' % PROG_NAME):
print('%8s : %s' % (prefix, msg))
sys.exit(0)
def check_file_problems(file_path):
problem = None
if not os.path.exists(file_path):
problem = 'File "%s" does not exist'
return problem % file_path
if not os.path.isfile(file_path):
problem = 'Location "%s" is not a regular file'
return problem % file_path
if os.stat(file_path).st_size == 0:
problem = 'File "%s" is of zero size '
return problem % file_path
if not os.access(file_path, os.R_OK):
problem = 'File "%s" is not readable'
return problem % file_path
return problem
def test_imports():
critical = False
try:
from numpy import array
except ImportError:
critical = True
warn('Critical module "numpy" is not installed or accessible')
try:
from sklearn import cluster
except ImportError:
critical = True
warn('Critical module "sklearn" is not installed or accessible')
try:
from hmmlearn import hmm
except ImportError:
critical = True
warn('Critical module "hmmlearn" is not installed or accessible')
try:
from matplotlib import pyplot
except ImportError:
warn('Module "matplotlib" is not installed or accessible. Graphing option is not available.')
if critical:
fatal('Exiting because critial Python modules are not available')
test_imports()
import numpy as np
def read_vcf(file_path, min_qual=100):
"""
Read VCF file to get SNPs with a given minimum quality.
Reurns chromsome sizes and SPN positions with qualities, keyed by chromosome
"""
if file_path.lower().endswith('.gz'):
import gzip
file_obj = gzip.open(file_path, 'rt')
else:
file_obj = open(file_path)
chromo_dict = {}
var_dict = defaultdict(list)
for line in file_obj:
if line[:9] == '##contig=':
match = VCF_CONTIG_PATT.search(line)
if match:
chromo = match.group(1)
size = int(match.group(2))
chromo_dict[chromo] = size
elif line[0] == '#':
continue
else:
chromo, pos, _id, ref, alt, qual, filt, info, fmt, xgb3 = line.split()
qual = float(qual)
if len(ref) == 1 and len(alt) == 1:
if qual >= min_qual:
var_dict[chromo].append((int(pos), qual))
file_obj.close()
# VCF may not be sorted
sort_var_dict = {}
for chromo in var_dict:
sort_var_dict[chromo] = sorted(var_dict[chromo])
return chromo_dict, sort_var_dict
def get_contig_data(chromo_dict, var_dict, bin_size=1000):
"""
Given a dictionary of chromosome sizes, bins the log_10 scores of variant
positions into a contiguous array for each chromosome. Returns a list
of lists, one for each chromosome in sorted order.
"""
contigs = []
bs = float(bin_size)
for chromo in sorted(chromo_dict):
size = chromo_dict[chromo]
n = int(math.ceil(size/bs)) + 1
contig = np.zeros(n, float)
for pos, qual in var_dict.get(chromo, []):
i = int(pos/bin_size)
w = (pos - (i*bin_size))/bs
q = np.log10(1.0 + qual)
contig[i] += (1.0-w) * q
contig[i+1] += w * q
contigs.append(contig)
return contigs
def get_training_data(contig_data1, contig_data2):
"""
Joins the TMM training data for contiguous SNP scores derived from
different genome builds.
"""
data = []
sizes = []
for i, contig1 in enumerate(contig_data1):
contig2 = contig_data2[i]
row = list(zip(contig1, contig2))
sizes.append(len(row))
data.append(np.array(row))
data = np.concatenate(data)
return data, sizes
def train_hybrid_crossover_hmm(vcf_paths_pairs, text_labels=('A','B'), out_dir='', bin_size=10000, min_qual=100,
min_chromo_size=1e6, num_hmm_iter=400, plot_graphs=False, covariance_type='diag'):
"""
Main function to train the HMM and plot the results
"""
from hmmlearn import hmm
# This is simply to remove worrysome messages which ough to be addressed in newer hmmlearn versions
from sklearn import warnings
def nullfunc(*args, **kw):
pass
warnings.warn = nullfunc
chromos = set()
var_pairs = []
chromo_dicts = []
n_states=2
# Read the VCF data and chromosome sizes
for vcf_path_a, vcf_path_b in vcf_paths_pairs:
chromo_dict_a, var_dict_a = read_vcf(vcf_path_a, min_qual)
chromo_dict_b, var_dict_b = read_vcf(vcf_path_b, min_qual)
chromos.update(chromo_dict_a.keys())
chromos.update(chromo_dict_b.keys())
var_pairs.append((var_dict_a, var_dict_b))
chromo_dicts += [chromo_dict_a, chromo_dict_b]
# Collate chromosome sizes, talking the largest,
# just in case there are any discrepencies and
# ignoring any that are too small to bother with
chromo_dict = {}
for chromo in chromos:
size = max([cd.get(chromo, 0) for cd in chromo_dicts])
if size >= min_chromo_size:
chromo_dict[chromo] = size
chromos = sorted(chromo_dict)
# Look through variant call pairs for each strain
if plot_graphs:
fig, axarr = plt.subplots(len(chromos), len(var_pairs))
title_text = 'Hybrid genome HMM states. Bin size = {:,} Min qual = {}'
fig.suptitle(title_text.format(bin_size, min_qual))
n_cols = len(var_pairs)
head_1 = '#HMM params - bin_size:%d min_qual:%d min_chromo_size:%d num_hmm_iter:%d\n'
head_2 = '#cols - chr\thaplotype\tregion_size\tregion_start\tregion_end\tfirst_SNP\tlast_SNP\tbin_start\tbin_end\n'
for col, (var_dict_a, var_dict_b) in enumerate(var_pairs):
file_name = '%s_%s.tsv' % (text_labels[col], FILE_TAG)
file_path = os.path.join(out_dir, file_name)
file_obj = open(file_path, 'w')
write = file_obj.write
write(head_1 % (bin_size, min_qual, min_chromo_size, num_hmm_iter))
write(head_2)
contig_data_a = get_contig_data(chromo_dict, var_dict_a, bin_size)
contig_data_b = get_contig_data(chromo_dict, var_dict_b, bin_size)
in_data, sizes = get_training_data(contig_data_a, contig_data_b)
# Setup an HMM object
model = hmm.GaussianHMM(n_components=n_states, covariance_type=covariance_type,
n_iter=num_hmm_iter)
# Run Baum-Welch to lear the HMM probabilities
model.fit(in_data, sizes)
mv = in_data.max()
i = 0
for row, chromo in enumerate(chromos):
m = sizes[row]
chrom_data = in_data[i:i+m]
i += m
# Run Forward-Backward to get state probabilities at each point
probs = model.predict_proba(chrom_data, [m])
# The order of state labels is arbitrary, so use a dot product to
# deduce which state best matches the first genome
dp1 = np.dot(probs[:,0], chrom_data[:,0])
dp2 = np.dot(probs[:,0], chrom_data[:,1])
if dp2 > dp1:
probs_a = probs[:,1]
probs_b = probs[:,0]
else:
probs_a = probs[:,0]
probs_b = probs[:,1]
# Create chromosome regios of contiguous state according to which of
# the probabilities for the binned regions was higest
prev_state = 0
region_start = 0
chromo_regions = []
for j in range(m):
pos = j * bin_size
if probs_a[j] > probs_b[j]:
state = 'A'
elif probs_a[j] < probs_b[j]:
state = 'B'
else:
state = ''
if state != prev_state:
if prev_state:
chromo_regions.append((region_start, pos, prev_state))
region_start = pos
prev_state = state
# Last region goes to the chromosome end
if state:
chromo_regions.append((region_start, min(pos, chromo_dict[chromo]), prev_state))
# Refine region edges according to precise SNP positions
# which could be before or after end of binned region
# Remove SNPs common to both genotypes
pos_counts = defaultdict(int)
for pos, qual in var_dict_a[chromo]:
pos_counts[pos] += 1
for pos, qual in var_dict_b[chromo]:
pos_counts[pos] += 1
# Get sorted positions (and corresponding states) of distinguishing SNPs
vars_state_pos = [(pos, 'A') for pos, qual in var_dict_a[chromo] if pos_counts[pos] < 2]
vars_state_pos += [(pos, 'B') for pos, qual in var_dict_b[chromo] if pos_counts[pos] < 2]
vars_state_pos.sort()
var_pos, var_states = zip(*vars_state_pos)
var_pos = np.array(var_pos)
n_var = len(var_pos)
for start, end, state in chromo_regions:
# Find transitions from A/B genotypes SNPs working away from region edge
# Report terminal snips and mid-way between transitions, where possible
# Refine beginning
idx_left = idx_right = np.abs(var_pos-start).argmin() # Closest SNP to bin boundary
while (idx_right < n_var-1) and (var_states[idx_right] != state): # Find next matching SNP
idx_right += 1
while (idx_left >= 0) and (var_states[idx_left] == state): # Find prev mismatching SNP
idx_left -= 1
vp1 = var_pos[idx_right]
if vp1 > end:
msg = 'No SNPs for HMM state "%s" found in chromosome region %s:%d-%d. '
msg += 'Probably the HMM was not able to separate states in the data as expected'
warn(msg)
vp1 = start
if idx_left < 0: # Off the chromosome start
rp1 = 0
else:
rp1 = int((var_pos[idx_left] + vp1)/2)
# Refine end
idx_left = idx_right = np.abs(var_pos-end).argmin() # Closest SNP to bin boundary
while (idx_left >= 0) and (var_states[idx_left] != state): # Find prev matching SNP
idx_left -= 1
while (idx_right < n_var) and (var_states[idx_right] == state): # Find next mismatching SNP
idx_right += 1
vp2 = var_pos[idx_left]
if vp2 < start:
vp2 = end
if idx_right < n_var:
rp2 = int((vp2 + var_pos[idx_right])/2)
else: # Off the chromosome end
rp2 = end
# Chromosome, state code, region start, region end, region length,
# first matching var position, last matching var pos
line_data = (chromo, state, rp2-rp1, rp1, rp2, vp1, vp2, start, end)
line = '%s\t%s\t%d\t%d\t%d\t%d\t%d\t%d\t%d\n' % line_data
write(line)
if plot_graphs:
# Plot first probabilities at a resonable scale
probs = probs_a * 0.75 * mv
# X valuys for plot in Megabases
x_vals = np.array(range(len(chrom_data))) * bin_size / 1e6
nx = x_vals[-1]
# Plot the lines
if n_cols > 1:
ax = axarr[row, col]
else:
ax = axarr[row]
ax.plot(x_vals, chrom_data[:,0], color='#FF4000', alpha=0.4, linewidth=1.5, label='Genome A SNPs')
ax.plot(x_vals, chrom_data[:,1], color='#0080FF', alpha=0.4, linewidth=1.5, label='Genome B SNPs')
ax.plot(x_vals, probs[:], color='#808080', alpha=0.75, linewidth=1.0, label='State A probability', linestyle='-')
# Titles axes and labels at the aprropriate spots
dx = bin_size / 1e6
if row == 0:
ax.set_title(text_labels[col])
ax.set_xlim((-dx, nx+dx))
ax.set_ylim((0, 1.1*mv))
ax.set_xlabel('Chr %s Position (Mb)' % chromo, fontsize=11)
ax.axhline(0, -dx, nx+dx, color='#808080', alpha=0.5)
if col == 0:
ax.set_ylabel('$\Sigma[log_{10}(1+qual)]$')
if row == 0:
ax.legend(fontsize=11, frameon=False, ncol=3)
file_obj.close()
info('Output data written to %s' % file_path)
if plot_graphs:
plt.show()
if __name__ == '__main__':
from argparse import ArgumentParser
epilog = 'For further help email tstevens@mrc-lmb.cam.ac.uk or garethb@mrc-lmb.cam.ac.uk'
arg_parse = ArgumentParser(prog=PROG_NAME, description=DESCRIPTION,
epilog=epilog, prefix_chars='-', add_help=True)
arg_parse.add_argument(metavar='VCF_FILE', nargs='+', dest='i',
help='Input VCF format files containing variant calls for each parental genotype/strain. Files should be listed sequentially in pair order. Inputs may be gzipped (this is assumed by a .gz file extension).')
arg_parse.add_argument('-b', default=DEFAULT_BIN_SIZE, type=int, metavar='BIN_SIZE',
help='Binned analysis region size (in bp) for defining HMM chromosome segments (). Default: %d bp.' % DEFAULT_BIN_SIZE)
arg_parse.add_argument('-g', default=False, action='store_true',
help='Specifies that graphical output will be displayed for each VCF pair using matplotlib')
arg_parse.add_argument('-m', default=DEFAULT_MIN_CHROMO_SIZE, type=float, metavar='MIN_CHROMO_SIZE',
help='Minimum length (in Mb) required for a contig/chromosome (as described in VCF header) to be analysed. Default: %.2f Mb.' % DEFAULT_MIN_CHROMO_SIZE)
arg_parse.add_argument('-n', default=DEFAULT_NUM_ITER, type=int, metavar='NUM_HMM_ITER',
help='Number of iterations to perform when estimating Gaussian HMM probabilities using the Baum-Welch method. Default: %d' % DEFAULT_NUM_ITER)
arg_parse.add_argument('-o', metavar='OUT_DIR',
help='Optional output directory for writing results. Defaults to the current working directory.')
arg_parse.add_argument('-q', default=DEFAULT_QUALITY, type=float, metavar='',
help='Minimum quality (phred-scale) for accepting a SNP call, as described in the VCF data lines. Default: %.2f' % DEFAULT_QUALITY)
arg_parse.add_argument('-s', default=0, type=int, metavar='RANDOM_SEED',
help='Optional random seed value, i.e. to make repeat calculations deterministic.')
arg_parse.add_argument('-t', nargs='+', metavar='TEXT_LABEL',
help='Optional text labels to describe each input pair, which are used to name output files as {NAME}_%s.tsv, in the same order as the input VCF file pairs.' % FILE_TAG)
args = vars(arg_parse.parse_args(sys.argv[1:]))
vcf_paths = args['i']
plot_graphs = args['g']
ran_seed = args['s']
out_dir = args['o'] or './'
text_labels = args['t']
bin_size = args['b']
min_qual = args['q']
min_chromo_size = int(args['m'] * 1e6)
num_hmm_iter = args['n']
try:
from matplotlib import pyplot as plt
except ImportError:
plot_graphs = False
if ran_seed:
np.random.seed(ran_seed)
n_paths = len(vcf_paths)
if n_paths < 2:
fatal('At least two VCF file paths must be specified')
if n_paths % 2 == 1:
fatal('An even number of VCF paths (i.e. pairs of files) must be input. %d paths were specified' % n_paths)
n_pairs = n_paths/2
if text_labels:
text_labels = list(text_labels)
else:
text_labels = []
while len(text_labels) < n_pairs:
label = 'pair_%d' % (1+len(text_labels))
text_labels.append(label)
if len(text_labels) > n_pairs:
warn('Number of input text labels (%d) greater than the number of input pairs (%s)' % (len(text_labels), n_pairs))
text_labels = text_labels[:n_pairs]
abs_path = os.path.abspath(out_dir)
if not os.path.exists(abs_path):
fatal('Output directory "%d" does not exist')
if not os.path.isdir(abs_path):
fatal('Output path "%d" is not a directory')
for vcf_path in vcf_paths:
problem = check_file_problems(vcf_path)
if problem:
fatal(problem)
vcf_paths_pairs = [(vcf_paths[i], vcf_paths[i+1]) for i in range(0, n_paths, 2)]
train_hybrid_crossover_hmm(vcf_paths_pairs, text_labels, out_dir, bin_size,
min_qual, min_chromo_size, num_hmm_iter, plot_graphs)
# Example ./hycco xgb3_vs12_clean.vcf.gz xgb3_vs13_clean.vcf.gz
| tjs23/hycco | hycco.py | hycco.py | py | 16,851 | python | en | code | 1 | github-code | 36 |
18180968433 | # Student Virtual Assistant
# Libraries for accessing Google Scholar and scheduling reminders
import webbrowser
import schedule
import time
# Function to access Google Scholar
def search_scholar(query):
webbrowser.open(f"https://scholar.google.com/scholar?q={query}")
# Function to input schedule
def input_schedule():
schedule = {}
while True:
class_name = input("Enter class name (or type 'done' if finished): ")
if class_name == "done":
break
class_time = input("Enter class time (e.g. 9:00 AM): ")
schedule[class_name] = class_time
return schedule
# Function to input assignments
def input_assignments():
assignments = []
while True:
assignment = input("Enter assignment (or type 'done' if finished): ")
if assignment == "done":
break
assignments.append(assignment)
return assignments
# Function to remind student to hydrate
def remind_hydrate():
print("Don't forget to drink water and stay hydrated!")
# Main function to run the virtual assistant
def run_assistant():
print("Welcome to the Student Virtual Assistant")
# Input schedule
print("Please enter your schedule:")
schedule = input_schedule()
print(f"Your schedule: {schedule}")
# Input assignments
print("Please enter your assignments:")
assignments = input_assignments()
print(f"Your assignments: {assignments}")
# Access Google Scholar
query = input("What would you like to search on Google Scholar? ")
search_scholar(query)
# Schedule reminders to hydrate
schedule.every(1).hours.do(remind_hydrate)
# Start reminders
while True:
schedule.run_pending()
time.sleep(1)
# Run the virtual assistant
if __name__ == "__main__":
run_assistant()
| macnya/Student_virtual_assistant | Student_VA.py | Student_VA.py | py | 1,790 | python | en | code | 0 | github-code | 36 |
38777184708 | #!/usr/bin/python
import csv
import json
import pprint
import re
import sys
def replace_if_not_empty(dict, key, value):
if key not in dict or not dict[key]:
dict[key] = value
def to_float_or_none(value):
# lmao lazy
try:
return float(value)
except ValueError:
return None
def replace_slashes(str):
return str.replace("/", "\\") if str else None
def strip_or_none(str):
try:
return str.strip() if str.strip() else None
except:
return None
def write_json(dic, file_name):
print(f"saving {file_name}")
with open(file=file_name, mode="w") as f:
json.dump(dic, f, indent=2)
f.write("\n")
def main(tsv_name: str):
print(f"reading {tsv_name}")
with open(file=tsv_name) as tsv_file:
# skip first 2 lines
tsv_file.readline()
tsv_file.readline()
# convert file to array of dicts
# not efficient but good for reuse
rows = list(csv.DictReader(tsv_file, delimiter="\t"))
extract_keyboards(rows)
extract_mousepads(rows)
extract_mice(rows)
extract_users(rows)
def extract_keyboards(rows):
kbs = {}
for row in rows:
kb_model = replace_slashes(strip_or_none(row["Keyboard Model"]))
if not kb_model:
continue
if kb_model not in kbs or not kbs[kb_model]:
kb = {}
else:
kb = kbs[kb_model]
# set keyboard switch
replace_if_not_empty(kb, "switch", strip_or_none(row["Key Switch"]))
kbs[kb_model] = kb
# pprint.PrettyPrinter().pprint(kbs)
write_json(kbs, "keyboards.json")
def extract_mousepads(rows):
mps = {}
for row in rows:
mp = replace_slashes(strip_or_none(row["Mousepad"]))
if not mp:
continue
if mp not in mps or not mps[mp]:
mps[mp] = {}
# pprint.PrettyPrinter().pprint(mps)
write_json(mps, "mousepads.json")
def extract_mice(rows):
mice = {}
# iterate through rows
for row in rows:
mm = replace_slashes(strip_or_none(row["Mouse Model"]))
# skip row
if not mm:
continue
# get mouse model
if mm not in mice or not mice[mm]:
mouse = {}
else:
mouse = mice[mm]
# update mouse model
# using split because sometimes there is a range of values
# im lazy so i just take first value
replace_if_not_empty(
mouse, "sensor", row["Sensor"] if row["Sensor"] else None)
replace_if_not_empty(mouse, "weight", to_float_or_none(
row["Weight"].split("g")[0]))
replace_if_not_empty(mouse, "length", to_float_or_none(
row["Length"].split("m")[0]))
replace_if_not_empty(mouse, "width", to_float_or_none(
row["Width"].split("m")[0]))
replace_if_not_empty(mouse, "height", to_float_or_none(
row["Height"].split("m")[0]))
replace_if_not_empty(
mouse, "switch", row["Mouse Switch"] if row["Mouse Switch"] else None)
# set mouse model
mice[mm] = mouse
# pprint.PrettyPrinter().pprint(mice)
write_json(mice, "mice.json")
def extract_users(rows):
status = "Mouse"
users = {}
# iterate through rows
for row in rows:
if row["Rank"].startswith("Notable"):
status = "Notable"
elif row["Rank"].startswith("Traitors"):
status = "Traitors"
elif row["Rank"].startswith("Banned"):
break
# skip notable mentions
if status == "Notable":
continue
# skip row if username is not well-formatted
pattern = "=HYPERLINK\\(\"https:\\/\\/osu\\.ppy\\.sh\\/u\\/(\\d+)\",\"(\\w+)\"\\)"
result = re.search(pattern, row["Name"])
if not result or not result.group(1) or not result.group(2):
continue
# extract user info
userID = result.group(1)
userName = result.group(2)
is_traitor = status == "Trators"
# win settings, very safe way
win_settings = row['=HYPERLINK("http://puu.sh/nJtmY/e2a5589f67.png","OS")'].split()
if win_settings:
win_sensitivity = to_float_or_none(win_settings[0].split("/")[0])
if len(win_settings) >= 2:
accl_setting = win_settings[1].strip().lower()
if accl_setting.startswith("off"):
win_acceleration = False
elif accl_setting.startswith("on"):
win_acceleration = True
else:
win_acceleration = None
# osu settings, again it is focused on safe
osu_multiplyer = to_float_or_none(
row["Multiplier"].strip().split("~")[0].rstrip("xX"))
if row["Raw"].strip().lower().startswith("on"):
osu_raw = True
elif row["Raw"].strip().lower().startswith("off"):
osu_raw = False
else:
osu_raw = None
# hardware setup info
screen_resolution = row["Resolution"].strip().split("~")[0].split("x")
if len(screen_resolution) >= 2:
screen_width = to_float_or_none(screen_resolution[0])
screen_height = to_float_or_none(screen_resolution[1])
else:
screen_width = None
screen_height = None
mousepad = strip_or_none(row["Mousepad"])
keyboard = strip_or_none(row["Keyboard Model"])
# mouse playstyle info
playstyle = strip_or_none(row["Playstyle"])
mouse = strip_or_none(row["Mouse Model"])
dpi = to_float_or_none(row["DPI"].strip().rstrip("dpi"))
polling = to_float_or_none(row["Polling"].lower().rstrip("hz"))
# possibly calculate mouse area
if win_sensitivity and osu_multiplyer and dpi and screen_width and screen_height:
m = [0.00625, 0.0125, 0.25, 0.5, 0.75, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5]
win_multi = m[int(win_sensitivity - 1)]
# get play area
if (screen_width / screen_height) >= (4 / 3):
play_height = screen_height
play_width = screen_height * (4 / 3)
else:
play_width = screen_width
play_height = screen_width / (4 / 3)
# get area
effective_ppi = dpi * win_multi * osu_multiplyer
area_width = round(25.4 * play_width / effective_ppi)
area_height = round(25.4 * play_height / effective_ppi)
else:
area_width = None
area_height = None
# create new user
users[userID] = {
"name": userName, # dont really need username tho
"rank": None,
"pp": None,
"is_banned": False,
"is_traitor": is_traitor,
"windows_sensitivity": win_sensitivity,
"windows_acceleration": win_acceleration,
"osu_multiplyer": osu_multiplyer,
"osu_raw": osu_raw,
"screen_width": screen_width,
"screen_height": screen_height,
"playstyle": playstyle,
"dpi": dpi,
"polling": polling,
"area_width": area_width,
"area_height": area_height,
"mouse": mouse,
"mousepad": mousepad,
"keyboard": keyboard,
}
# pprint.PrettyPrinter().pprint(users)
write_json(users, "users.json")
if __name__ == "__main__":
if len(sys.argv) == 2:
main(sys.argv[1])
else:
print("Usage: python ./parse_tsv 'tsv/file/path'")
| penguinuwu/Mousebase | backend/csv_parser/parse_csv.py | parse_csv.py | py | 7,636 | python | en | code | 1 | github-code | 36 |
646308278 | #Lista de Exercício 3 - Questão 38
#Dupla: 2020314273 - Cauã Alexandre e 2021327294 - Kallyne Ferro
#Disciplina: Programação Web
#Professor: Italo Arruda
#Um funcionário de uma empresa recebe aumento salarial anualmente: Sabe-se que:
#Esse funcionário foi contratado em 1995, com salário inicial de R$ 1.000,00;
#Em 1996 recebeu aumento de 1,5% sobre seu salário inicial;
#A partir de 1997 (inclusive), os aumentos salariais sempre correspondem ao dobro do percentual do ano anterior. Faça um programa que determine o salário atual desse funcionário. Após concluir isto, altere o programa permitindo que o usuário digite o salário inicial do funcionário.
class Salario:
def __init__(self, salario_inicial):
self.salario = salario_inicial
def calcular_salario(self):
try:
aumento_1996 = self.salario * 0.015
novo_salario = self.salario + aumento_1996
percentual_aumento = 0.03
ano = 1997
while ano <= 2021:
aumento = novo_salario * percentual_aumento
novo_salario += aumento
percentual_aumento *= 2
ano += 1
print(f"O salário atual do funcionário é R$ {novo_salario:.2f}")
except:
print("Error")
x = float(input("Digite o salário inicial do funcionário: "))
func1 = Salario(x)
func1.calcular_salario() | caalexandre/Revisao-Python-IFAL-2023-Caua-e-Kallyne | Lista3/l3q38CK-523.py | l3q38CK-523.py | py | 1,450 | python | pt | code | 0 | github-code | 36 |
38625939524 | from tkinter import *
from tkinter import messagebox
from tkinter import ttk #css for tkinter
from configparser import ConfigParser
# import io
# import urllib.request
# import base64
import time
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
import requests
weather_url = 'http://api.openweathermap.org/data/2.5/weather?q={},{}&appid={}'
bg_url = 'https://api.unsplash.com/search/photos?query={}&client_id={}'
config_file = 'config.ini'
config = ConfigParser()
config.read(config_file)
weather_api_key = config['weather_api_key']['key']
unsplash_access_key = config['unsplash_api_key']['access_key']
def get_image(city):
image_index = 0
result = requests.get(bg_url.format(city, unsplash_access_key))
if result:
json = result.json()
first_url = json['results'][image_index]['urls']['raw']
return first_url
u = urllib.request.urlopen(first_url)
image_byt = u.read()
u.close()
# photo = PhotoImage(data=base64.encodestring(image_byt))
# return photo
else:
return None
def get_weather(city, country):
result = requests.get(weather_url.format(city, country, weather_api_key))
if result:
json = result.json()
# (City, Country, temp_celsius, temp_fahrenheit, icon, weather)
city = json['name']
country = json['sys']['country']
temp_kelvin = json['main']['temp']
temp_celsius = temp_kelvin - 273.15
temp_fahrenheit = temp_celsius * 9/5 + 32
icon = json['weather'][0]['icon']
weather = json['weather'][0]['main']
final = (city, country, temp_celsius, temp_fahrenheit, icon, weather)
return final
else:
return None
def search():
city = city_text.get()
country = country_text.get()
weather = get_weather(city, country)
photo = get_image(city)
if weather and city and country:
location_lbl['text'] = '{}, {}'.format(weather[0], weather[1])
image['bitmap'] = 'weather_icons/{}.png'.format(weather[4])
weather_lbl['text'] = weather[5]
temp_lbl['text'] = '{:.2f}°C \n {:.2f}°F'.format(weather[2], weather[3])
url_lbl['text'] = photo
elif not city or not country:
messagebox.showerror('Error', 'Cannot find city: {} in country: {}'.format(city, country))
else:
messagebox.showerror('Error', 'Error Occured')
app = Tk()
app.title("Weather App")
app.geometry('900x700')
# city_image = Tk()
#Top Frame
top_frame = LabelFrame(app, text='Search', padx=50, pady=5)
top_frame.pack(side='top',padx=10, pady=10)
##Search Field
city_text = StringVar()
city_entry = ttk.Entry(top_frame, textvariable=city_text)
city_entry.pack(pady=2)
##Country Field
country_text = StringVar()
country_entry = ttk.Entry(top_frame, textvariable=country_text)
country_entry.pack(pady=2)
##Search Button
search_btn = ttk.Button(top_frame, text="Search by City, Country", width=20, command=search)
search_btn.pack(pady=10)
#Bottom Frame
bottom_frame = LabelFrame(app, text='Details', height=500, padx=100, pady=5)
bottom_frame.pack(side='top', padx=10, pady=10)
##Location
location_lbl = ttk.Label(bottom_frame, text='--', font=('bold', 20))
location_lbl.pack()
##Image
image = Label(bottom_frame, bitmap='--', relief='sunken')
image.pack(pady=10)
##Weather
weather_lbl = ttk.Label(bottom_frame, text='--')
weather_lbl.pack()
##Temperature
temp_lbl = ttk.Label(bottom_frame, text='--', font=('bold', 30))
temp_lbl.pack(padx=10, pady=10)
url_lbl = ttk.Label(bottom_frame, text='--')
url_lbl.pack(padx=10, pady=10)
#Bottom Frame
def bottom():
statusbar = ttk.Label(app, text='Application Opened: {}'.format(time.asctime(time.localtime())), relief='sunken', anchor='w', font=('Italic', 15))
statusbar.pack(side='bottom', fill='x')
bottom()
app.mainloop() | superduperkevin/WeatherGUI | weather_app.py | weather_app.py | py | 3,867 | python | en | code | 0 | github-code | 36 |
7952291852 | from __future__ import annotations
import logbook
from discord import Interaction
from discord import Message
from discord.ext.commands import Context
from json import dumps
from logging import Logger
from time import time
from utils import send_response
from yt_dlp.YoutubeDL import YoutubeDL
from yt_dlp import DownloadError
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from audio import Audio
log: Logger = logbook.getLogger("track")
class Track():
def __init__(
self,
title: str,
url: str,
track_type: str,
*,
duration: int = 0,
format: tuple[str, str] = ("", ""),
guild_id: int = 0,
url_original: str = "",
video_id: str = ""
) -> None:
self.title: str = title
self.url: str = url
self.track_type: str = track_type
self.duration: int = duration
self.format: tuple[str, str] = format
self.guild_id: int = guild_id
self.url_original: str = url_original
self.video_id: str = video_id
self.time_stamp: float = time()
def __repr__(self) -> str:
return dumps(self, default=vars, indent=4, ensure_ascii=False)
async def create_local_track(audio: Audio, cim: Context | Interaction | Message, url: str, track_type: str = "music") -> None | Track:
if not cim.guild: return
log.info(f"{cim.guild}: Creating local track from {url}")
if "/.Cached Tracks/" in url:
title: str = url[url.rfind("/") + 1 : -16]
else:
title: str = url[url.rfind("/") + 1:] if "/" in url else url
title = title[:title.rfind(".")]
return Track(title, url, track_type, guild_id=cim.guild.id)
async def create_stream_track(audio: Audio, cim: Context | Interaction | Message, url: str) -> None | Track:
if not cim.guild: return
log.info(f"{cim.guild}: Creating stream track from {url}")
if url.startswith(("https://www.youtube.com/", "https://youtu.be/", "https://m.youtube.com/", "https://youtube.com/")):
id: str | None = await _get_yt_video_id(url)
if not id: return None
url_local: str | None = audio.cached_tracks.get(id)
if url_local:
log.info(f"Found {id} in my cached tracks.")
return await create_local_track(audio, cim, url_local, "music")
log.info(f"{id} not found in cached tracks, downloading meta data...")
url = f"https://www.youtube.com/watch?v={id}" # Sanitize url
video_info: dict | None = await _get_yt_video_info(audio, cim, url)
if not video_info: return None
protocol: str | None = video_info.get("protocol")
if not protocol: return None
log.info(f"Stream track protocol:\n{protocol}")
if protocol == "m3u8_native":
log.info(f"Stream track is a live stream.")
title: str = video_info.get("title")[:-17] # type: ignore
url_original: str = url
url_stream: str = video_info.get("url") # type: ignore
return Track(title, url_stream, "live", guild_id=cim.guild.id, url_original=url_original)
else:
log.info(f"Stream track is a normal stream.")
duration: int = video_info["duration"] if video_info.get("duration") else await audio.get_audio_track_caching_duration_max()
format: tuple[str, str] | None = await _get_yt_video_best_audio_format(video_info)
if not format:
await send_response(cim, "I could not find a suitable audio format to stream.")
return None
title: str = video_info["title"]
url_original: str = url
url_stream: str = format[1]
track: Track = Track(title, url_stream, "stream", duration=duration, format=format, guild_id=cim.guild.id, url_original=url_original, video_id=id)
await audio.download_q.put(track)
return track
elif url.startswith("https://open.spotify.com/"):
await send_response(cim, "Spotify not implemented yet.")
return None
return None
async def create_meme_track(audio: Audio, cim: Context | Interaction | Message, url: str) -> None | Track:
if url.lower() == "despacito":
log.info(f"Creating meme track for \"{url}\"")
return await create_stream_track(audio, cim, "https://www.youtube.com/watch?v=kJQP7kiw5Fk")
return None
async def _get_yt_video_id(url: str) -> str | None:
"""Get the 11 chars long video id from a Youtube link."""
if url.find("?v=") > 0:
return url[url.find("?v=") + 3 : url.find("?v=") + 14]
elif url.find("&v=") > 0:
return url[url.find("&v=") + 3 : url.find("&v=") + 14]
elif url.find(".be/") > 0:
return url[url.find(".be/") + 4 : url.find(".be/") + 15]
elif url.find("/shorts/") > 0:
return url[url.find("/shorts/") + 8 : url.find("/shorts/") + 19]
else:
return None
async def _get_yt_video_info(audio: Audio, cim: Context | Interaction | Message, url) -> dict | None:
options: dict[str, str | bool | int | Logger] = {
"no_warnings": False,
"default_search": "auto",
"source_address": "0.0.0.0",
"logger": logbook.getLogger("yt-dlp"),
"age_limit": 21
}
try:
video_info: dict | None = await audio.maon.loop.run_in_executor(
None, lambda: YoutubeDL(options).extract_info(url, download=False)
)
except DownloadError as e:
log.error(f"{cim.guild.name}: {e.__str__()[28:]}") # type: ignore
if "looks truncated" in e.__str__():
await send_response(cim, "The link looks incomplete, paste it again, please.")
elif "to confirm your age" in e.__str__():
await send_response(cim, "The video is age gated and I couldn't proxy my way around it.")
elif "HTTP Error 403" in e.__str__():
await send_response(cim, "I received a `forbidden` error, I was locked out from downloading the meta data...\nYou could try again in a few seconds, though!")
elif "Private video." in e.__str__():
await send_response(cim, "The video has been privated and I can't view it.")
else:
await send_response(cim, "I could not download the video's meta data... maybe try again in a few seconds.")
return None
return video_info
async def _get_yt_video_best_audio_format(video_info: dict) -> tuple[str, str] | None:
formats: dict[str, str] = {}
for f in video_info.get("formats", [video_info]):
formats[f.get("format_id")] = f.get("url") # type: ignore
log.info(f"Found formats: {formats.keys()}")
if "251" in formats: return ("251", formats["251"])
elif "140" in formats: return ("140", formats["140"])
elif "250" in formats: return ("250", formats["250"])
elif "249" in formats: return ("249", formats["249"])
else: return None
| ruubytes/Maon.py | src/track.py | track.py | py | 6,962 | python | en | code | 0 | github-code | 36 |
29421162955 | import firebase_admin
from firebase_admin import credentials, firestore
import os
from gcloud import storage
from pprint import pprint
from datetime import datetime
import ast
from django import template
INDEX = 1
INDEX_historic = 1
INDEX_cv = 1
# Setup the connexion to the project
cred = credentials.Certificate("./website/serviceAccountKey.json")
#cred = credentials.Certificate("serviceAccountKey.json")
firebase_admin.initialize_app(cred)
db = firestore.client()
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = r'./website/serviceAccountKey.json'
#os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = r'serviceAccountKey.json'
storage_client = storage.Client()
def increment_index_historic():
"""Fonction qui incrémente la variable globale INDEX_historic
nécessaire pour l'unicité de notre historique dans la base de donnée"""
global INDEX_historic
INDEX_historic += 1
return INDEX_historic
def increment_index():
"""Fonction qui incrémente la variable globale INDEX
nécessaire pour l'unicité de notre fiche de poste dans la base de donnée"""
global INDEX
INDEX += 1
return INDEX
def increment_index_cv():
"""Fonction qui incrémente la variable globale INDEX_historic
nécessaire pour l'unicité de notre historique dans la base de donnée"""
global INDEX_cv
INDEX_cv += 1
return INDEX_cv
def add_CV(name_hr,url_desc,file,name,date,lieu, dict_infos):
global INDEX_historic
#date = datetime.today().strftime('%Y-%m-%d')
#name_file = "gs://snipehr_historic/"
#dict_infos={"email":"norayda.nsiemo@gmail.com","phone":"+33768373205","skills":["Python","R","DevOps","MongoDB","documentation","flux","Python","R","Java","Jenkins","Airflow","Docker","Google","MySQL","MongoDB","Firebase","Tableau","documentation","flux"]}
index_job =url_desc.split("/")[-1].split('.')[0]
name_file = name_hr + "_" + index_job + "_" + str(INDEX_cv)
increment_index_cv()
#my_bucket = storage_client.get_bucket("snipehr_cvs")
#pprint(vars(my_bucket))
#blob = my_bucket.blob(name_file)
#blob.upload_from_filename(file)
#blob.upload_from_filename(file)
db = firestore.client()
new_cv={
'name': name,
'lieu' :lieu,
'email': dict_infos['email'],
'phone': dict_infos['phone'],
'date' : date,
'status': False,
'url_resume': "gs://snipehr_cvs/" + name_file,
'skills':dict_infos['skills']
}
#print(name_hr)
db.collection('hrs').document(read_hr(name_hr).id).collection('resumes').add(new_cv)
def read_file_as_file(uri,name,date,poste):
"""Fonction qui lit la fiche de poste stocker dans le cloud storage à partir de l'URI """
destination = "resumes/"+name+"_"+poste+"_"+date+'.pdf'
blob=""
if(uri != None):
bucket = storage_client.get_bucket(uri.split("/")[-2])
blob = bucket.blob(uri.split("/")[-1])
blob = blob.download_to_filename(destination)
#blob = blob.decode('utf-8')
return blob
def read_file(uri):
"""Fonction qui lit la fiche de poste stocker dans le cloud storage à partir de l'URI """
blob=""
if(uri != None):
bucket = storage_client.get_bucket(uri.split("/")[-2])
blob = bucket.blob(uri.split("/")[-1])
blob = blob.download_as_string()
blob = blob.decode('utf-8')
return blob
def liste_historic(name_hr,url_desc):
"""Fonction qui retourne la liste des historic pour une fiche de poste donnée"""
liste =[]
index_job =url_desc.split("/")[-1].split('.')[0]
name_file = name_hr + "_" + index_job
my_bucket = storage_client.get_bucket("snipehr_historic")
blobs_all = list(my_bucket.list_blobs(prefix=name_file)) #récupère les fiches dont le nom du RH =name_hr et le job concerné à l'index du job en paramètre
print(blobs_all)
for blob in blobs_all:
blob = blob.download_as_string()
blob = blob.decode('utf-8')
liste.append(ast.literal_eval(blob))
return liste
def add_historic(name_hr, commentaire, url_desc):
"""Fonction qui ajoute un historique à une fiche de poste donnée"""
global INDEX_historic
date = datetime.today().strftime('%Y-%m-%d')
#name_file = "gs://snipehr_historic/"
index_job =url_desc.split("/")[-1].split('.')[0] #exemple dans l'URI gs://snipehr_job_desc/1.txt ça récupèrera 1
my_bucket = storage_client.get_bucket("snipehr_historic")
#pprint(vars(my_bucket))
print(index_job)
name = name_hr + "_" + index_job + "_" + str(INDEX_historic)+".txt"
increment_index_historic()
#text_file = open(name, "w")
#n = text_file.write(commentaire)
#text_file.close()
historic =str({"date":date,"commentaire":commentaire})
blob = my_bucket.blob(name)
blob.upload_from_string(historic)
print(blob)
def get_nb_missions_affectees(liste_jobs):
"""Fonction qui renvoie le nombre exacte de missions affecté à partir de la liste des jobs lié au profil"""
nb=0
for job in liste_jobs:
if(job["status"]):
nb+=1
return nb
def set_status(name_hr,job_to_set):
"""Fonction qui met à jour le status d'une mission"""
job =get_job(name_hr,job_to_set)
print(job)
print(job.to_dict()["status"])
hr=read_hr(name_hr)
status = job.to_dict()["status"]
db.collection('hrs').document(hr.id).collection('job_description').document(job.id).update({"status": not status})
def read_company(name_hr):
"""Fonction qui retourne le nom de la compagnie du RH connecté"""
hr=read_hr(name_hr)
return hr.to_dict()["company"]
def chiffrement_message(message,clef):
return None
def dechiffrement_message(message,clef):
return None
def create_message(name_hr,message,nom,post):
"""Fonction qui crée et chiffre le message dans la base de donnée"""
db = firestore.client()
date = datetime.today().strftime('%Y-%m-%d')
new_message={
'date': f'{date}',
'message': f'{message}',
'candidat':f'{nom}',
'post':f'{post}'
}
db.collection('hrs').document(read_hr(name_hr).id).collection('messages').add(new_message)
return None
def create_job_desc(titre,lieu, date, competences, fiche, name_hr):
global INDEX
"""Fonction qui ajoute une fiche de poste dans la base de donée"""
url_desc="gs://snipehr_job_desc/"
url_historic="gs://snipehr_historic/"
lieu =lieu
my_bucket = storage_client.get_bucket("snipehr_job_desc")
#pprint(vars(my_bucket))
name = str(INDEX)+".txt"
increment_index()
#text_file = open(name, "w")
#n = text_file.write(fiche)
#text_file.close()
blob = my_bucket.blob(name)
blob.upload_from_string(fiche)
url_desc+=name
db = firestore.client()
new_job_desc={
'titre': f'{titre}',
'lieu' :f'{lieu}',
'date': f'{date}',
'status': False,
'url_desc': f'{url_desc}',
'url_historic': f'{url_historic}',
'skills':f'{competences}'
}
#print(name_hr)
db.collection('hrs').document(read_hr(name_hr).id).collection('job_description').add(new_job_desc)
def create_hr(name_hr, email_hr, mdp_hr, company_hr):
"""Fonction qui ajoute un RH à notre base de donée """
db = firestore.client()
# A voir pour ajouter le doc avec un id auto généré
new_hr = {
'name': f'{name_hr}',
'email': f'{email_hr}',
'mdp': f'{mdp_hr}',
'company': f'{company_hr}'
}
db.collection('hrs').add(new_hr)
db.collection('hrs').document(read_hr(name_hr).id).collections('job_description')
def set_hr(past_name,name_hr, email_hr, company_hr):
db = firestore.client()
new_hr = {
'name': f'{name_hr}',
'email': f'{email_hr}',
'company': f'{company_hr}'
}
hr=read_hr(past_name)
db.collection('hrs').document(hr.id).update(new_hr)
def get_job(name_hr,job_to_set):
"""Fonction qui nous permet d'avoir l'ID d'une jfiche de description"""
col_jobs = db.collection('hrs').document(read_hr(name_hr).id).collection('job_description')
jobs = col_jobs.stream()
dictfilt = lambda x, y: dict([(i, x[i]) for i in x if i in set(y)])
for job in jobs:
print(dictfilt(job.to_dict(),("url_desc","date")))
print(dictfilt(job_to_set,("url_desc","date")))
#print(dictfilt(job.to_dict(),("date","titre","url_desc","url_historic")))
if dictfilt(job_to_set,("url_desc","date")) == dictfilt(job.to_dict(),("url_desc","date")):
return job
def read_hr(name_hr):
"""fonction qui nous perme d'avoir l'ID du RH à partir du nom"""
# Only get 1 document or hrs
col_hrs = db.collection('hrs').where("name", '==', f'{name_hr}')
hrs = col_hrs.stream()
for hr in hrs:
#print(f'{hr.id} => {hr.to_dict()}')
return hr
def read_hrs():
"""Fonction qui nous permet d'avoir la liste des RHs"""
# Get the hole hrs collection
col_hrs = db.collection('hrs')
hrs = col_hrs.stream()
for hr in hrs:
print(f'{hr.id} => {hr.to_dict()}')
def test(email_hr, mdp_hr):
hr = db.collection('hrs').where("email", '==', f'{email_hr}').where("mdp", '==', f'{mdp_hr}').get()
print(hr)
for h in hr:
print(f'{h.id} => {h.to_dict()}')
def read_jobs(name_hr):
"""Fonction qui retourne la liste des fiches de poste associé à un RH"""
collections = db.collection('hrs').document(read_hr(name_hr).id).collection("job_description").stream()
list_jobs = []
for collection in collections:
list_jobs.append(collection.to_dict())
return list_jobs
def read_resumes(name_hr):
"""Fonction qui retourne la liste des fiches de poste associé à un RH"""
collections = db.collection('hrs').document(read_hr(name_hr).id).collection("resumes").stream()
list_resumes = []
for collection in collections:
list_resumes.append(collection.to_dict())
return list_resumes
def read_messages(name_hr):
"""Fonction qui retourne la liste des fiches de poste associé à un RH"""
messages = db.collection('hrs').document(read_hr(name_hr).id).collection('messages').stream()
list_messages = []
i=1
message_dict ={}
for message in messages:
message_dict=message.to_dict()
message_dict['index']=i
i+=1
list_messages.append(message_dict)
return list_messages
def get_job_title(name_hr,url_resume):
titre =""
url_desc = "gs://snipehr_job_desc/" + url_resume.split("/")[-1].split("_")[-2] +'.txt'
jobs = read_jobs(name_hr)
for job in jobs:
if (job["url_desc"]==url_desc):
titre=job["titre"]
return titre
if __name__ == '__main__':
#create_hr('Khalida', 'test@gmail.fr', 'azerty', 'ESGI')
#read_hr('Test')
#test('test@test.fr', 'test')
#jobs = read_job(read_hr("Test"))
#print(jobs)
#titre=""
#date=""
#status= True
#url_desc=""
#url_historic =""
#create_job_desc(titre, date, status, url_desc, url_historic, read_hr("Khalida"))
name_hr ="Test"
#job_to_set = {'date': '2022-08-25', 'titre': 'Data Analyst', 'url_desc': '', 'url_historic': ''}
#print(get_job(name_hr,{'titre': 'Data', 'date': '2022-11-10', 'url_desc': 'gs://snipehr_job_desc/1.txt', 'url_historic': 'gs://snipehr_job_desc/'}).to_dict()["status"])
#set_status(name_hr, job_to_set)
#print(get_job(name_hr,job_to_set).to_dict()["status"])
#print(read_job(read_hr("Test"))["jobs"])
#set_status("Test",{'status': False, 'date': '2022-08-25', 'titre': 'Data Analyst', 'url_desc': '', 'url_historic': ''})
#read_file("gs://snipehr_job_desc/1.txt")
commentaire ="Test des commentaires"
url_desc = "gs://snipehr_job_desc/1.txt"
#add_historic(name_hr, commentaire, url_desc)
#add_historic(name_hr, commentaire, url_desc)
#print(liste_historic(name_hr, url_desc))
#print(get_nb_missions_affectees(read_jobs(name_hr)))
#add_CV(name_hr,url_desc,"CVNorayda_NSIEMO.pdf","Norayda NSIEMO","2022-09-23","Paris", dict())
#read_file_as_file("gs://snipehr_cvs/Test_1_1","Norayda NSIEMO","2022-09-23","Data Engineer")
| SnipeHR/SnipeHR-github.io | website/query_firestore.py | query_firestore.py | py | 12,180 | python | en | code | 0 | github-code | 36 |
29282945445 | sayilar=(1,2,4,8,12,50,100)
harfler=("a","b","h","f","ğ","r")
sonuc=min(sayilar)
sonuc=max(sayilar)
"minimum ve maximum sonuçlarını söyler"
#ekleme
sayilar.append(20)
harfler.append("p")
sayilar.insert(3,11)
harfler.insert(4,"c")
"append sona ekler insert nereye koymak istersen"
#silme
sayilar.pop()
harfler.pop()
sayilar.remove(50)
"pop sondan silmeye başlar ama remove belirttiğini siler"
#sıralama
sayilar.sort()
harfler.sort()
sayilar.reverse()
"sort küçükten büyüğe reverse ise büyükten küçüğe sıralar"
print(sayilar.count(8)) #8'in kaç kere geçtiğine bakar
print(sayilar.index(12)) #12'nin kaçıncı indexte olduğunu söyler
sayilar.clear() #sayilar değişkeni içinde olan komutları siler
print(sonuc) | FMDikici/Python_proje1 | formuller_list_methods.py | formuller_list_methods.py | py | 775 | python | tr | code | 0 | github-code | 36 |
13042186116 | #!/usr/bin/env python
"""
Datapath for QEMU qdisk
"""
import urlparse
import os
import sys
import xapi
import xapi.storage.api.v5.datapath
import xapi.storage.api.v5.volume
import importlib
from xapi.storage.libs.libcow.datapath import QdiskDatapath
from xapi.storage import log
def get_sr_callbacks(dbg, uri):
u = urlparse.urlparse(uri)
sr = u.netloc
sys.path.insert(
0,
'/usr/libexec/xapi-storage-script/volume/org.xen.xapi.storage.' + sr)
mod = importlib.import_module(sr)
return mod.Callbacks()
class Implementation(xapi.storage.api.v5.datapath.Datapath_skeleton):
"""
Datapath implementation
"""
def activate(self, dbg, uri, domain):
callbacks = get_sr_callbacks(dbg, uri)
return QdiskDatapath.activate(dbg, uri, domain, callbacks)
def attach(self, dbg, uri, domain):
callbacks = get_sr_callbacks(dbg, uri)
return QdiskDatapath.attach(dbg, uri, domain, callbacks)
def deactivate(self, dbg, uri, domain):
callbacks = get_sr_callbacks(dbg, uri)
return QdiskDatapath.deactivate(dbg, uri, domain, callbacks)
def detach(self, dbg, uri, domain):
callbacks = get_sr_callbacks(dbg, uri)
return QdiskDatapath.detach(dbg, uri, domain, callbacks)
def open(self, dbg, uri, domain):
callbacks = get_sr_callbacks(dbg, uri)
return QdiskDatapath.epc_open(dbg, uri, domain, callbacks)
def close(self, dbg, uri):
callbacks = get_sr_callbacks(dbg, uri)
return QdiskDatapath.epc_close(dbg, uri, callbacks)
if __name__ == "__main__":
log.log_call_argv()
CMD = xapi.storage.api.v5.datapath.Datapath_commandline(Implementation())
CMD_BASE = os.path.basename(sys.argv[0])
if CMD_BASE == "Datapath.activate":
CMD.activate()
elif CMD_BASE == "Datapath.attach":
CMD.attach()
elif CMD_BASE == "Datapath.close":
CMD.close()
elif CMD_BASE == "Datapath.deactivate":
CMD.deactivate()
elif CMD_BASE == "Datapath.detach":
CMD.detach()
elif CMD_BASE == "Datapath.open":
CMD.open()
else:
raise xapi.storage.api.v5.datapath.Unimplemented(CMD_BASE)
| xcp-ng/xcp-ng-xapi-storage | plugins/datapath/qdisk/datapath.py | datapath.py | py | 2,192 | python | en | code | 4 | github-code | 36 |
16028301314 | import pymongo
import os
import pandas as pd
import json
def main():
client = pymongo.MongoClient("mongodb://localhost:27017/")
databases = client.list_database_names()
if "fifa" not in databases:
db = client["fifa"]
players_collection = db["players"]
ultimate_team_collection = db["ultimate_teams"]
for file in os.listdir("data/players"):
data = pd.read_csv("data/players/" + file)
data["year"] = "20" + file.split(".")[0][-2:]
if "female" in file:
data["gender"] = "F"
else:
data["gender"] = "M"
data_json = json.loads(data.to_json(orient='records'))
for player in data_json:
columns_to_format = ["ls", "st", "rs", "lw", "lf", "cf", "rf", "rw", "lam", "cam", "ram", "lm", "lcm", "cm", "rcm", "rm", "lwb", "ldm", "cdm", "rdm", "rwb", "lb", "lcb", "cb", "rcb", "rb", "gk"]
for column in columns_to_format:
if isinstance(player[column], str):
if "+" in player[column]:
split = player[column].split("+")
player[column] = int(split[0]) + int(split[1])
elif "-" in player[column]:
split = player[column].split("-")
player[column] = int(split[0]) - int(split[1])
list_columns = ["player_positions", "player_tags", "player_traits"]
for column in list_columns:
if player[column] is not None:
player[column] = [x.strip() for x in player[column].split(',')]
players_collection.insert_many(data_json)
print("Successfully loaded data for", file)
print("Creating Indices for Faster Searching")
players_collection.create_index([('year', pymongo.ASCENDING), ('gender', pymongo.ASCENDING)])
players_collection.create_index([('year', pymongo.ASCENDING), ('gender', pymongo.ASCENDING), ('short_name', pymongo.ASCENDING)])
players_collection.create_index([('year', pymongo.ASCENDING), ('gender', pymongo.ASCENDING), ('overall', pymongo.DESCENDING)])
ultimate_team_collection.create_index([('year', pymongo.ASCENDING), ('username', pymongo.ASCENDING), ('team_name', pymongo.ASCENDING)])
else:
print("Data has been previously loaded.")
if __name__ == "__main__":
main() | wconti27/DS4300_FIFA_Tool | import_data.py | import_data.py | py | 2,486 | python | en | code | 0 | github-code | 36 |
70077095145 | from types import SimpleNamespace
import random, string, sys
'''
To execute the testing function run the code with command line argument test
ex. ~$ python change_making.py test
For normal usage run the script with an amount as a command line argument
ex. ~$ python change_making.py 23.62
'''
class InvalidInputError(Exception):
def __init__(self):
super().__init__()
# Initialize denomination definitions (as integers less than 100 for precision)
DENOMINATIONS = {
'quarter': 25,
'dime': 10,
'nickel': 5,
'penny': 1
}
def make_change(amount):
'''
Optimized Greedy Change Making Algorithm.
'''
# Initialize data structure.
namespace = SimpleNamespace(
is_valid = True,
amount = amount,
dollars = 0,
cents = 0,
change = SimpleNamespace(
dollar = 0,
quarter = 0,
dime = 0,
nickel = 0,
penny = 0)
)
# remove leading $ sign from input string, if it exists
namespace = text_process(namespace)
try:
# Validate input.
namespace = validate(namespace)
except InvalidInputError:
namespace.change = ('Amount must be a positive real number, '
'and in a valid format.')
else:
namespace.change.dollar = namespace.dollars
# The Greedy Change Making Algorithm
for denomination, value in DENOMINATIONS.items():
while namespace.cents >= value:
namespace.change.__dict__[denomination] += 1
namespace.cents -= value
finally:
return namespace
def text_process(namespace):
'''
Removes any leading '$' from amount.
'''
try:
# remove $ sign, if there is one
index = namespace.amount.index('$')+1
namespace.amount = namespace.amount[index:]
except ValueError: # $ wasn't in the string
pass
finally:
return namespace
def validate(namespace):
'''
The input validation function..
rejects character strings, negative decimal values, decimals that have
nonzero digits in place values exceeding hundreths, and empty strings
'''
try:
assert float(namespace.amount) >= 0 # digital and nonnegative
partition = namespace.amount.split('.')
decimal_part = partition[1]
# nonzero digits in place-values exceeding the hundredths place
assert int(decimal_part) == 0 or len(decimal_part) <= 2
except (ValueError, TypeError, AssertionError):
namespace.is_valid = False
raise InvalidInputError
except IndexError: # input was not floating-point
namespace.dollars = int(namespace.amount)
else: # input was of the form x.yz
namespace.dollars, namespace.cents = int(partition[0]), int(decimal_part)
return namespace
def display(namespace):
if namespace.is_valid:
length = len(str(namespace.change.dollar))
fill = ' '*length
namespace.amount = float(namespace.amount)
print('''
input : ${namespace.amount:.2f}
output:
{namespace.change.dollar} dollars
{namespace.change.quarter}{fill} quarters
{namespace.change.dime}{fill} dimes
{namespace.change.nickel}{fill} nickels
{namespace.change.penny}{fill} pennies
'''.format_map(vars()))
else:
print('''
input : {namespace.amount}
output: {namespace.change}'''.format_map(vars()))
def test():
'''
Test cases are developed and executed here.
- test_1 : charater input > error
- test_2 : negative real number input > error
- test_3 : the empty string > error
- test_4 : nonzero digits in place values exceeding hundredths > error
- test_5 : the integer zero > a multiset that is the empty set
- test_6 : a pseudorandom nonnegative real number less than or equal to one
million. > a populated multiset
'''
test_1 = random.choice(string.ascii_letters + string.punctuation)
test_2 = -1 * (random.randint(0, 1000000) + round(random.random(), 2))
test_3 = ''
test_4 = random.randint(0, 1000000) + random.random()
test_5 = 0
test_6 = random.randint(0, 1000000) + round(random.random(), 2)
# normalize test case data types
cases = list(map(str, [test_1, test_2, test_3, test_4, test_5, test_6]))
# add set membership information for each case
sets = ['non-numeric character', 'negative real number', 'empty string',
'nonzero digits in place-values exceeding hundredths',
'zero integer',
'a correct input value']
cases = list(zip(cases, sets))
# process test cases
for index, case in enumerate(cases):
amount, set_membership = case[0], case[1]
test_number = index +1
# execute the algorithm
namespace = make_change(amount)
print('\n[Test Case {test_number} ({set_membership})]:'.format_map(
vars()))
display(namespace)
def main():
try:
amount = sys.argv[1]
except IndexError:
print('Must enter an amount.')
else:
if amount == 'test':
test()
else:
namespace = make_change(amount)
display(namespace)
if __name__ == '__main__':
main()
| chris-hamberg/algorithms_python | greedy.py | greedy.py | py | 5,526 | python | en | code | 0 | github-code | 36 |
43429079723 | # coding: utf-8
class Queue:
def __init__(self):
self.tree_list = []
def enqueue(self, tree):
self.tree_list.append(tree)
def dequeue(self):
return self.tree_list.pop(0)
def is_empty(self):
pass
if __name__ == '__main__':
tree_list = []
queue = Queue()
n = int(input())
for i in range(1, n):
f, s = map(int, input().split())
queue.enqueue([f, s])
c = 0
while True:
tree = queue.dequeue()
next_tree = queue.dequeue()
if tree[0] in next_tree:
k = next_tree.index(tree[0])
queue.enqueue(sorted(next_tree[k-1:], reverse=True) + tree)
# if k != 0 and k != len(tree) and queue.tree_list not in tree:
# queue.enqueue(tree)
print(queue.tree_list)
continue
if tree[1] in next_tree:
k = next_tree.index(tree[1])
queue.enqueue(sorted(tree, reverse=True) + next_tree[:k])
# if k != 0 and k != len(tree) and queue.tree_list not in tree:
# queue.enqueue(tree)
print(queue.tree_list)
continue
queue.enqueue(tree)
queue.enqueue(next_tree)
if c > 10:
exit()
c += 1
| oamam/atcoder_amama | python/beginner/20140913/20140913D.py | 20140913D.py | py | 1,297 | python | en | code | 0 | github-code | 36 |
28228486901 | lines="hi hello hi hello"
# o/p
#hai,2
#hello,2
#split function
words=lines.split(" ")
print(words)
dic={}
for word in words:#hai hello
if(word not in dic):#hai not in dic,hello not in dic
dic[word]=1
else:
dic[word]+=1
print(dic) | Jesta398/project | collections'/dictionary/wordcount.py | wordcount.py | py | 256 | python | en | code | 0 | github-code | 36 |
22729381839 | import re
from General.utilities import *
from General import feedparser
def getContentExtend(RssUrl,Pattern, FetchNumber=None):
#get the feed
FeedContent = cachedFetch(RssUrl)
feed = feedparser.parse(FeedContent)
FetchList=[]
Num=0
reObj=re.compile(Pattern,re.M|re.S|re.U)
reObj_noScriptAll = re.compile("<script.*?<\/script>", re.M|re.S|re.U|re.I)
for entry in feed.entries:
FetchList.append(entry["link"])
Num = Num +1
if (not FetchNumber is None) and FetchNumber == Num:
break;
FetchResult = MultiFetch(FetchList)
for entry in feed.entries:
if FetchResult.has_key(entry["link"]):
htmlContent=ConvertToUnicode(FetchResult[entry["link"]])
noScriptAll = reObj_noScriptAll.split(htmlContent)
htmlContent = " ".join(noScriptAll)
reResult = reObj.search(htmlContent)
if reResult:
if len(reResult.groups()) != 0:
entry["htmlContent"]= reResult.group(1)
else:
entry["htmlContent"]= reResult.group(0)
return feed
# vim:tabstop=2 expandtab shiftwidth=2
| YuJianrong/GAEProjects | FeedsToolsBox/ControlCenter/ContentExtend.py | ContentExtend.py | py | 1,065 | python | en | code | 0 | github-code | 36 |
20053563450 | # -*- coding: utf-8 -*-
"""
Created on Fri Sep 28 15:42:03 2018
@author: tf
"""
from numpy import *
import operator
import os, sys
#2.1 a simple kNN classifier
def classify0(inX, dataSet, labels, k):
'''
a simple kNN classifier
'''
dataSetSize = dataSet.shape[0];
diffMat = tile(inX, (dataSetSize, 1)) - dataSet
sqDiffMat = diffMat**2
sqDistances = sqDiffMat.sum(axis=1)
distances = sqDistances**0.5
sortedDistIndices = distances.argsort()
classCount = {}
for i in range(k):
voteIlabel = labels[sortedDistIndices[i]]
classCount[voteIlabel] = classCount.get(voteIlabel, 0) + 1
sortedClassCount = sorted(classCount.items(), key=operator.itemgetter(1), reverse=True)
return sortedClassCount[0][0]
#2.2 Helen dating
def file2matrix(filename):
'''
convert a txt file to matrix
'''
fr = open(filename)
lineArray = fr.readlines()
lineNum = len(lineArray)
retMat = zeros((lineNum, 3))
classLabelVector = []
idx = 0
for line in lineArray:
line = line.strip()
listFromLine = line.split('\t')
retMat[idx, :] = listFromLine[0:3]
if listFromLine[-1] == 'largeDoses':
classLabelVector.append(1)
elif listFromLine[-1] == 'smallDoses':
classLabelVector.append(2)
elif listFromLine[-1] == 'didntLike':
classLabelVector.append(3)
else:
classLabelVector.append(0)
idx += 1
return retMat, classLabelVector
def autoNorm(dataSet):
'''
data normolization
normval = (oldval - min) / (max - min)
'''
minVals = dataSet.min(0)
maxVals = dataSet.max(0)
ranges = maxVals - minVals
normDataSet = zeros(shape(dataSet))
m = dataSet.shape[0]
normDataSet = dataSet - tile(minVals, (m, 1))
normDataSet = normDataSet / tile(ranges, (m, 1))
return normDataSet, ranges, minVals
def datingClassTest():
'''
test kNN classifier on dating data
'''
hoRatio = 0.1
datingDataMat, datingLabels = file2matrix('datingTestSet.txt')
normMat, ranges, minVals = autoNorm(datingDataMat)
m = normMat.shape[0]
numTestVecs = int(m * hoRatio)
errorCnt = 0
for i in range(numTestVecs):
classifierResult = classify0(normMat[i, :], normMat[numTestVecs:m, :], datingLabels[numTestVecs:m], 4)
print('the classifier came back with: %d, the real answer is: %d' %(classifierResult, datingLabels[i]))
if classifierResult != datingLabels[i]:
errorCnt += 1
errorRate = errorCnt / numTestVecs
print('the total error rate is: %f' %(errorRate))
#2.3 handwriting recognizing
def img2vector(filename):
'''
convert a 32*32-size image file to a 1*1024-size vector
'''
retVect = zeros((1,1024))
fr = open(filename)
for i in range(32):
lineStr = fr.readline()
for j in range(32):
retVect[0, 32*i+j] = int(lineStr[j])
return retVect
def handwritingClassTest():
'''
test kNN classifier on handwriting data
'''
hwLabels = []
trainingFileList = os.listdir('trainingDigits')
mTrain = len(trainingFileList)
trainingMat = zeros((mTrain, 1024))
for i in range(mTrain):
filename = trainingFileList[i]
classNum = int(filename.split('.')[0].split('_')[0])
hwLabels.append(classNum)
trainingMat[i, :] = img2vector('trainingDigits/%s' % filename)
testFileList = os.listdir('testDigits')
errorCnt = 0
mTest = len(testFileList)
for i in range(mTest):
filename = testFileList[i]
classNum = int(filename.split('.')[0].split('_')[0])
vecUnderTest = img2vector('trainingDigits/%s' % filename)
classifierResult = classify0(vecUnderTest, trainingMat, hwLabels, 3)
print('the classifier came back with: %d, the real answer is: %d' %(classifierResult, classNum))
if classifierResult != classNum:
errorCnt += 1
errorRate = errorCnt / mTest
print('\nthe total number of error rate is: %f' % errorRate)
| Cjh327/Machine-Learning-in-Action | kNN/kNN.py | kNN.py | py | 4,174 | python | en | code | 2 | github-code | 36 |
19970778695 | import eventlet
import msgpack
import random
from copy import copy
from datetime import datetime
from . import cmds
from . import msgs
import os
log_file = open(os.path.join(os.getcwd(), 'client.log'), 'w')
def write_log(msg):
global log_file
log_file.write(
"{0} - {1}\n".format(datetime.now(), str(msg)),
)
class RaftClient(object):
def __init__(self, server_address_list):
self.server_address_list = server_address_list
self._leader_address = None
self._leader_sock = None
self.status = 'init'
self.cmd_seq = 0
self.client_id = None
@classmethod
def select_server(cls, server_address_list):
return random.choice(server_address_list)
def register(self):
cmd = cmds.get_client_register_cmd()
cmd_msg = msgs.get_client_register_req_msg(cmd)
self.cmd_seq = 0
ret = self.execute_command(cmd_msg)
self.client_id = ret['resp'][1]
return ret
def get_next_seq(self):
self.cmd_seq += 1
return self.cmd_seq
def send_command_req(self, command_msg):
self._leader_sock.sendall(command_msg)
def set_value(self, key, value):
cmd = cmds.get_client_update_cmd(
key,
value,
)
cmd_msg = msgs.get_client_update_req_msg(
self.client_id,
self.get_next_seq(),
cmd,
)
return self.execute_command(cmd_msg)
def get_value(self, key):
cmd = cmds.get_client_query_cmd(key)
cmd_msg = msgs.get_client_query_req_msg(
self.client_id,
self.get_next_seq(),
cmd,
)
return self.execute_command(cmd_msg)
def wait_command_ret(self):
unpacker = msgpack.Unpacker()
while True:
chunk = self._leader_sock.recv(1024)
if len(chunk) == 0:
break
unpacker.feed(chunk)
try:
return unpacker.next()
except StopIteration:
pass
return None
def execute_command(self, command_msg):
s_addr_list = copy(self.server_address_list)
while len(s_addr_list) > 0:
try:
if self._leader_address is None:
self._leader_address = RaftClient.select_server(s_addr_list)
write_log(
"selected server {0}".format(self._leader_address))
if self._leader_sock is None:
self._leader_sock = eventlet.connect(self._leader_address)
timeout = eventlet.Timeout(2)
try:
self.send_command_req(command_msg)
write_log(
"sent {0} - cmd: {1}".format(
self._leader_address,
msgpack.unpackb(command_msg),
)
)
ret = self.wait_command_ret()
finally:
timeout.cancel()
if ret is not None:
if ret['success']:
if ret['resp'][2] < self.cmd_seq:
continue
return ret
else:
if 'leader_hint' in ret:
self._leader_sock.close()
self._leader_sock = None
self._leader_address = (
ret['leader_hint'][0],
ret['leader_hint'][1] + 1000,
)
continue
except (eventlet.timeout.Timeout, Exception) as e:
write_log("hit exception:\n {0}".format(str(e)))
pass
if self._leader_address in s_addr_list:
s_addr_list.remove(self._leader_address)
if len(s_addr_list) == 0:
s_addr_list = copy(self.server_address_list)
self._leader_address = None
self._leader_sock = None
| jason-ni/eventlet-raft | eventlet_raft/client.py | client.py | py | 4,133 | python | en | code | 3 | github-code | 36 |
19406180140 | #
# @lc app=leetcode id=202 lang=python3
#
# [202] Happy Number
#
# @lc code=start
class Solution:
def isHappy(self, n: int) -> bool:
seen = set()
while n != 1:
res = 0
for digit in str(n):
res += int(digit) ** 2
n = res
if n in seen: return False
else: seen.add(n)
return True
# @lc code=end
| Matthewow/Leetcode | vscode_extension/202.happy-number.py | 202.happy-number.py | py | 409 | python | en | code | 2 | github-code | 36 |
70562629544 | import sys
input = sys.stdin.readline
def get_primenumber_under(n):
is_primes = [False, False] + [True for _ in range(2, n+1)]
for i in range(2, int(n**0.5)+1):
j = 2
while i*j <= n:
if is_primes[i*j]:
is_primes[i*j] = False
j += 1
return is_primes
def is_prime(n):
if n > len(primenumbers):
for num, primeFlag in enumerate(primenumbers):
if primeFlag:
if n % num == 0:
return False
if num > n//2: # 정렬된 소수 배열
break
else:
if not primenumbers[n]:
return False
return True
T = int(input())
# 골드바흐의 추측 : 4보다 큰 짝수는 두 소수의 합으로 나타낼 수 있다.
# 2 -> x
# 3 -> x
# 짝수 -> o
# 홀수 -> 두 수의 합이 홀수가 되는 경우는 짝수 + 홀수 -> 소수 중 짝수는 2밖에 없다.
# 따라서 홀수의 경우 해당 수에서 2를 뺀 값이 소수인지를 확인하면 된다.
# -> 해당 수를 sqrt(n) 내의 소수들로 나눠서 나눠지면 소수가 아닌 것으로 판별할 수 있다.
primenumbers = get_primenumber_under(2*(10**6))
for t in range(T):
A, B = map(int, input().rstrip().split())
sum_AB = A+B
if sum_AB == 2 or sum_AB == 3:
print('NO')
continue
elif sum_AB % 2 == 0:
print('YES')
continue
else:
print('YES') if is_prime(sum_AB-2) else print('NO') | zsmalla/algorithm-jistudy-season1 | src/chapter2/4_기초수학(2)/임지수/15711_python_임지수.py | 15711_python_임지수.py | py | 1,493 | python | ko | code | 0 | github-code | 36 |
20851705442 | # Take the code from the How To Decode A Website exercise
# (if you didn’t do it or just want to play with some different code, use the code from the solution),
# and instead of printing the results to a screen, write the results to a txt file.
# In your code, just make up a name for the file you are saving to.
# Extras:
# Ask the user to specify the name of the output file that will be saved.
import requests
from bs4 import BeautifulSoup
base_url = 'http://www.nytimes.com'
r = requests.get(base_url)
soup = BeautifulSoup(r.text)
titles = []
for story_heading in soup.find_all(class_="story-heading"):
if story_heading.a:
# print(story_heading.a.text.replace("\n", " ").strip())
titles.append(story_heading.a.text.replace("\n", " ").strip())
else:
print(story_heading.contents[0].strip())
name_file = input('Rename the file please ')
with open('{}.txt'.format(name_file), 'w') as open_file:
open_file.write(str(titles)) | ismsadek/python-basics | Ex 21.py | Ex 21.py | py | 973 | python | en | code | 1 | github-code | 36 |
31527008003 | Test_case = int(input())
for t in range(Test_case):
N = int(input())
count = [1] * N
idx = 0
carrot = list(map(int, input().split()))
for i in range(1, N):
if carrot[i] > carrot[i-1]:
count[idx] += 1
else:
idx += 1
max_count = 0
for j in count:
if j > max_count:
max_count = j
else:
continue
print(f'#{t+1} {max_count}') | Ikthegreat/TIL | Algorithm/0203/9367.py | 9367.py | py | 434 | python | en | code | 0 | github-code | 36 |
30844385629 | """
USC Spring 2020
INF 553 Foundations of Data Mining
Assignment 3
Student Name: Jiabin Wang
Student ID: 4778-4151-95
"""
from pyspark import SparkConf, SparkContext, StorageLevel
from trainAuxiliary import *
'''
import os
import re
import json
import time
import sys
import math
import random
import itertools
'''
if __name__ == "__main__":
time_start = time.time()
# Get the input parameters
input_file_path = sys.argv[1] #"./Dataset/train_review.json"
model_file_path = sys.argv[2]
cf_type = sys.argv[3]
# Configure the Spark
conf = (
SparkConf()
.setAppName("task3")
.set("spark.driver.memory", "4g")
.set("spark.executor.memory", "4g")
)
sc = SparkContext(conf=conf)
sc.setLogLevel("ERROR")
if cf_type == "item_based":
pairCollection = itemBasedTrain(sc, input_file_path, model_file_path)
itemBasedOutputModel(model_file_path, pairCollection)
if cf_type == "user_based":
result = userBasedTrain(sc, input_file_path, model_file_path)
userBasedOutputModel(model_file_path, result)
time_end = time.time()
print("Duration: ", time_end - time_start, "s")
| jiabinwa/DSCI-INF553-DataMining | Assignment-3/task3train.py | task3train.py | py | 1,215 | python | en | code | 0 | github-code | 36 |
3853684048 | # 死锁:一直等待对方释放锁的情景叫做死锁
# 需求:多线程同时根据下标在列表中取值,要保证同一时刻只能有一个线程去取值
import threading
lock = threading.Lock()
def get_value(index):
# 上锁
lock.acquire()
my_list = [1, 4, 6]
if index >= len(my_list):
print('下标越界')
return
# 取值不成功, 也要释放互斥锁, 不要影响后面进程进行
lock.release()
else:
print(f'value = {my_list[index]}')
lock.release()
if __name__ == '__main__':
for i in range(10):
value_thread = threading.Thread(target=get_value, args=(i,))
value_thread.start()
| Edward-Lengend/python | PycharmProjects/16多任务编程/02线程/py_07_死锁.py | py_07_死锁.py | py | 701 | python | zh | code | 0 | github-code | 36 |
19476289451 | from tkinter import *
from piece import piece
window = Tk()
labels=[0]*240
for r in range(24):
for c in range(10):
labels[r*10+c]=Label(window, bg='black', height=1, width=2)
labels[r *10+c].grid(row=r,column=c, sticky=S+N+E+W)
firstSquare=piece(window, labels, 0, 10,5)
window.mainloop()
| gegoff/tetris | board.py | board.py | py | 311 | python | en | code | 0 | github-code | 36 |
69839957223 | from django.conf.urls.defaults import *
from django.contrib import admin
import os.path
admin.autodiscover()
MEDIA_ROOT = os.path.join(os.path.abspath(os.path.dirname(__file__)), "media")
urlpatterns = patterns('',
(r'^admin/doc/', include('django.contrib.admindocs.urls')),
(r'^admin/', include(admin.site.urls)),
(r'^media/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': MEDIA_ROOT}),
url(r'^p/(\w+)$', 'air.views.object', name='object'),
url(r'^compare/(\w+)$', 'air.views.compareTo', name='compareTo'),
url(r'^compare/(\w+)/(\w+)$', 'air.views.compare', name='compare'),
url(r'^categories/$', 'air.views.categories', name='categories'),
url(r'^category/$', 'air.views.category', name='category'),
url(r'^projection/(\w+)/(\w+)$', 'air.views.projection', name='projection'),
url(r'^add/$', 'air.views.add', name='add'),
url(r'^reset/$', 'air.views.reset', name='reset'),
url(r'^$', 'air.views.explore', name='explore'),
)
| friendofrobots/ice-divisi | explore/urls.py | urls.py | py | 995 | python | en | code | 1 | github-code | 36 |
28890711101 | """Tool for processing pytd files.
pytd is a type declaration language for Python. Each .py file can have an
accompanying .pytd file that specifies classes, argument types, return types
and exceptions.
This binary processes pytd files, typically to optimize them.
Usage:
pytd_tool [flags] <inputfile> <outputfile>
"""
import argparse
import sys
from pytype import utils
from pytype.imports import builtin_stubs
from pytype.pyi import parser
from pytype.pytd import optimize
from pytype.pytd import pytd_utils
def make_parser():
"""Use argparse to make a parser for command line options."""
o = argparse.ArgumentParser(
usage="%(prog)s [options] infile.pytd [outfile.pytd]")
# Input and output filenames
o.add_argument("input", help="File to process")
o.add_argument("output", nargs="?",
help=("Output file (or - for stdout). If output is omitted, "
"the input file will be checked for errors."))
o.add_argument(
"-O", "--optimize", action="store_true",
dest="optimize", default=False,
help="Optimize pytd file.")
o.add_argument(
"--lossy", action="store_true",
dest="lossy", default=False,
help="Allow lossy optimizations, such as merging classes.")
o.add_argument(
"--max-union", type=int, action="store",
dest="max_union", default=4,
help="Maximum number of objects in an 'or' clause.\nUse with --lossy.")
o.add_argument(
"--use-abcs", action="store_true",
dest="use_abcs", default=False,
help="Inject abstract bases classes for type merging.\nUse with --lossy.")
o.add_argument(
"--remove-mutable", action="store_true",
dest="remove_mutable", default=False,
help="Remove mutable parameters.")
o.add_argument(
"-V", "--python_version", type=str, action="store",
dest="python_version", default=None,
help=("Python version to target (\"major.minor\", e.g. \"3.10\")"))
o.add_argument(
"--multiline-args", action="store_true",
dest="multiline_args", default=False,
help="Print function arguments one to a line.")
return o
def main():
argument_parser = make_parser()
opts = argument_parser.parse_args()
if opts.python_version:
python_version = utils.version_from_string(opts.python_version)
else:
python_version = sys.version_info[:2]
try:
utils.validate_version(python_version)
except utils.UsageError as e:
sys.stderr.write(f"Usage error: {e}\n")
sys.exit(1)
options = parser.PyiOptions(python_version=python_version)
with open(opts.input) as fi:
sourcecode = fi.read()
try:
parsed = parser.parse_string(
sourcecode, filename=opts.input, options=options)
except parser.ParseError as e:
sys.stderr.write(str(e))
sys.exit(1)
if opts.optimize:
parsed = optimize.Optimize(
parsed,
pytd_utils.Concat(*builtin_stubs.GetBuiltinsAndTyping(options)),
lossy=opts.lossy,
use_abcs=opts.use_abcs,
max_union=opts.max_union,
remove_mutable=opts.remove_mutable,
can_do_lookup=False)
if opts.output is not None:
out_text = pytd_utils.Print(parsed, opts.multiline_args)
if opts.output == "-":
sys.stdout.write(out_text)
else:
with open(opts.output, "w") as out:
out.write(out_text)
if __name__ == "__main__":
main()
| google/pytype | pytype/pytd/main.py | main.py | py | 3,389 | python | en | code | 4,405 | github-code | 36 |
15991432175 | """Point-wise Spatial Attention Network"""
import torch
import torch.nn as nn
up_kwargs = {'mode': 'bilinear', 'align_corners': True}
norm_layer = nn.BatchNorm2d
class _ConvBNReLU(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0,
dilation=1, groups=1, relu6=False, norm_layer=norm_layer):
super(_ConvBNReLU, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias=False)
self.bn = norm_layer(out_channels)
self.relu = nn.ReLU6(True) if relu6 else nn.ReLU(True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
class PSAHead(nn.Module):
def __init__(self, in_channels=768, num_classes=6, norm_layer=norm_layer, in_index=3):
super(PSAHead, self).__init__()
self.in_index = in_index
# psa_out_channels = crop_size // stride_rate ** 2
psa_out_channels = (512 // 32) ** 2
self.psa = _PointwiseSpatialAttention(in_channels, psa_out_channels, norm_layer)
self.conv_post = _ConvBNReLU(psa_out_channels, in_channels, 1, norm_layer=norm_layer)
self.project = nn.Sequential(
_ConvBNReLU(in_channels * 2, in_channels // 2, 3, padding=1, norm_layer=norm_layer),
nn.Dropout2d(0.1, False),
nn.Conv2d(in_channels // 2, num_classes, 1))
def _transform_inputs(self, inputs):
if isinstance(self.in_index, (list, tuple)):
inputs = [inputs[i] for i in self.in_index]
elif isinstance(self.in_index, int):
inputs = inputs[self.in_index]
return inputs
def forward(self, inputs):
x = self._transform_inputs(inputs)
global_feature = self.psa(x)
out = self.conv_post(global_feature)
out = torch.cat([x, out], dim=1)
out = self.project(out)
return out
class _PointwiseSpatialAttention(nn.Module):
def __init__(self, in_channels, out_channels, norm_layer=nn.BatchNorm2d):
super(_PointwiseSpatialAttention, self).__init__()
reduced_channels = out_channels // 2
self.collect_attention = _AttentionGeneration(in_channels, reduced_channels, out_channels, norm_layer)
self.distribute_attention = _AttentionGeneration(in_channels, reduced_channels, out_channels, norm_layer)
def forward(self, x):
collect_fm = self.collect_attention(x)
distribute_fm = self.distribute_attention(x)
psa_fm = torch.cat([collect_fm, distribute_fm], dim=1)
return psa_fm
class _AttentionGeneration(nn.Module):
def __init__(self, in_channels, reduced_channels, out_channels, norm_layer):
super(_AttentionGeneration, self).__init__()
self.conv_reduce = _ConvBNReLU(in_channels, reduced_channels, 1, norm_layer=norm_layer)
self.attention = nn.Sequential(
_ConvBNReLU(reduced_channels, reduced_channels, 1, norm_layer=norm_layer),
nn.Conv2d(reduced_channels, out_channels, 1, bias=False))
self.reduced_channels = reduced_channels
def forward(self, x):
reduce_x = self.conv_reduce(x)
attention = self.attention(reduce_x)
n, c, h, w = attention.size()
attention = attention.view(n, c, -1)
reduce_x = reduce_x.view(n, self.reduced_channels, -1)
fm = torch.bmm(reduce_x, torch.softmax(attention, dim=1))
fm = fm.view(n, self.reduced_channels, h, w)
return fm
| zyxu1996/Efficient-Transformer | models/head/psa.py | psa.py | py | 3,539 | python | en | code | 67 | github-code | 36 |
29045421069 | # Python
class Solution(object):
def createTargetArray(self, nums, index):
"""
:type nums: List[int]
:type index: List[int]
:rtype: List[int]
"""
arrList = list()
for x in range(0, len(index)):
arrList.insert(index[x], nums[x])
return arrList
| richard-dao/Other | LeetCode-Problems/Easy/Target-Array-In-Order.py | Target-Array-In-Order.py | py | 345 | python | en | code | 0 | github-code | 36 |
12080387469 | from tinydb import TinyDB, Query, where
db = TinyDB("data.json", indent=4)
db.update({"score": 10}, where ("name") == "Patrick")
db.update({"roles": ["Junior"]})
db.update({"roles": ["Expert"]}, where("name") == "Patrick")
db.upsert({"name": "Pierre", "score": 120, "roles": ["Senior"]}, where("name") == "Pierre")
db.remove(where("score") == 0)
# db.truncate() # supprime tout le contenu de la db | yunus-gdk/python_beginner | tiny-db/maj.py | maj.py | py | 402 | python | en | code | 0 | github-code | 36 |
6877383992 | #from IPython.display
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
from IPython.display import display
from IPython.core.display import HTML
import json
from pprint import pprint
import os
import time
#import md5
import hashlib
import os
from aliyunsdkcore.profile import region_provider
from aliyunsdkcore.client import AcsClient
import base64
import aliyunsdkimagesearch.request.v20190325.AddImageRequest as AddImageRequest
import aliyunsdkimagesearch.request.v20190325.DeleteImageRequest as DeleteImageRequest
import aliyunsdkimagesearch.request.v20190325.SearchImageRequest as SearchImageRequest
import os
import time
from pprint import pprint
def list_images(image_folder):
images = {}
for file in os.listdir(image_folder):
if file.endswith(".jpg") or file.endswith(".png"):
image_path = os.path.join(image_folder, file)
# print(os.path.abspath(image_path))
images[image_path] = file
return images
def get_Piccontent_from_file(image_path):
file_object = open(image_path)
file_content = None
try:
file_content1 = file_object.read()
import base64
file_content = base64.b64encode(file_content1) # ('data to be encoded')
# data = base64.b64decode(encoded)
finally:
file_object.close()
return file_content
def my_image_preview(image_path, box, cate, color="red"):
#img1 = Image(filename = image_path, width=100, height=100)
img1 = Image.open(image_path)
if box is not None and box != '':
draw = ImageDraw.Draw(img1)
bb = box.split(",")
x0 = float(bb[0])
y0 = float(bb[2])
x1 = float(bb[1])
x2 = float(bb[3])
draw.rectangle([(x0, y0), (x1, x2)], outline=color)
if cate is not None and cate != "":
draw.text((x0, y0), cate, fill=color)
img = img1
return img
###########################################################
###########################################################
def match_cate_desc(cate_id):
AllCategories = [{'Id': 0, 'Name': 'Tops'}, {'Id': 1, 'Name': 'Dress'},{'Id': 2, 'Name': 'Bottoms'},{'Id': 3, 'Name': 'Bag'}, {'Id': 4, 'Name': 'Shoes'},{'Id': 5, 'Name': 'Accessories'},{'Id': 6, 'Name': 'Snack'},{'Id': 7, 'Name': 'Makeup'},{'Id': 8, 'Name': 'Bottle'},{'Id': 9, 'Name': 'Furniture'},{'Id': 20, 'Name': 'Toy'},{'Id': 21, 'Name': 'Underwear'},{'Id': 22, 'Name': 'Digital device'},{'Id': 88888888, 'Name': 'Other'}]
for c in AllCategories:
if cate_id == c['Id']:
return c['Name']
return 'Other'
def my_image_upload_base(requestClient, endpoint, instanceName, ProductId, image_name, image_path, cate_id, cate_desc, obj_region):
# load file
request = AddImageRequest.AddImageRequest()
request.set_endpoint(endpoint)
request.set_InstanceName(instanceName)
image_content = {'name': image_name, 'path': image_path, 'cate_id':cate_id, 'cate_desc':cate_desc, 'obj_region':obj_region}
request.set_CustomContent(json.dumps(image_content))
request.set_ProductId(ProductId)
request.set_PicName(image_name)
#if cate_id is not None:
request.set_CategoryId(cate_id)
print("=======", cate_id, image_name)
with open(image_path, 'rb') as imgfile:
encoded_pic_content = base64.b64encode(imgfile.read())
request.set_PicContent(encoded_pic_content)
response = requestClient.do_action_with_exception(request)
r = json.loads(response)
# print(response)
return r
#def my_image_upload_for_category():
def my_image_upload_for_similarity_search(requestClient, endpoint, instanceName, ProductId, image_name, image_path, cate_id, cate_desc, obj_region):
r = my_image_upload_base(requestClient, endpoint, instanceName, ProductId, image_name, image_path, cate_id, cate_desc, obj_region)
#print("== image upload return result ==")
#pprint(r)
cate_desc = match_cate_desc(r['PicInfo']['CategoryId'])
r['cate_desc'] = cate_desc
r['cate_id'] = r['PicInfo']['CategoryId']
r['obj_region'] = r['PicInfo']['Region']
#pprint(r)
#display(my_image_preview(image_path, r['obj_region'], r['cate_desc']))
#print(image_path, ' | found category_desc: ', r['cate_desc'], r['cate_id'], ' | found category_id: ', r['cate_id'], ' | found region: ', r['obj_region'])
return r
###########################################################
###########################################################
def my_image_search_base(requestClient, instanceName, image_path):
request = SearchImageRequest.SearchImageRequest()
request.set_InstanceName(instanceName)
with open(image_path, 'rb') as imgfile:
encoded_pic_content = base64.b64encode(imgfile.read())
request.set_PicContent(encoded_pic_content)
response = requestClient.do_action_with_exception(request)
r = json.loads(response)
#pprint(r)
return r
def my_image_search_for_category_detection(requestClient, instanceName, image_path):
r = my_image_search_base(requestClient, instanceName, image_path)
#r = json.loads(r)
#pprint(r)
category_desc = ''
for c in r['PicInfo']['AllCategories']:
if r['PicInfo']['CategoryId'] == c['Id']:
category_desc = c['Name']
r['cate_desc'] = category_desc
r['cate_id'] = r['PicInfo']['CategoryId']
r['obj_region'] = r['PicInfo']['Region']
return r
def my_image_search_for_category_detection_display(requestClient, instanceName, image_path):
r = my_image_search_for_category_detection(requestClient, instanceName, image_path)
display(my_image_preview(image_path, r['obj_region'], r['cate_desc']))
#print(image_path, ' | found category_desc: ', r['cate_desc'], r['cate_id'], ' | found category_id: ', r['cate_id'], ' | found region: ', r['obj_region'])
return r
###########################################################
###########################################################
def my_image_search_for_similarity(requestClient, instanceName, image_path):
r = my_image_search_base(requestClient, instanceName, image_path)
#r = json.loads(r)
#pprint(r)
category_desc = ''
for c in r['PicInfo']['AllCategories']:
if r['PicInfo']['CategoryId'] == c['Id']:
category_desc = c['Name']
#pprint(r)
#print(image_path, 'found category_desc: ', category_desc, r['PicInfo']['Category'], 'found category_id: ', r['PicInfo']['Category'], 'found region: ', r['PicInfo']['Region'])
#image_similar_name = r['Auctions'][1]['PicName']
#image_similar_path = r['Auctions'][1]['CustomContent']
image_similar_name = json.loads(r['Auctions'][1]['CustomContent'])['name']
image_similar_path = json.loads(r['Auctions'][1]['CustomContent'])['path']
image_similar_score = r['Auctions'][1]['SortExprValues']
category_desc = json.loads(r['Auctions'][1]['CustomContent'])['cate_desc']
obj_region = json.loads(r['Auctions'][1]['CustomContent'])['obj_region']
print(image_path)
print("similar score: ", image_similar_score, "similar image: ", image_similar_path)
#print(r['Auctions']['Auction'][1])
img1 = my_image_preview(image_path, obj_region, category_desc)
img2 = my_image_preview(image_similar_path, '0,0,0,0', 'most_similart_to', 'green')
img_height = img1.size[1]
if img1.size[1] < img2.size[1]:
img_height = img2.size[1]
img = Image.new('RGB', (img1.size[0]+img2.size[0]+40, img_height), "white")
img.paste(img1, (0, 0))
img.paste(img2, (img1.size[0]+40, 0))
#print(img)
draw = ImageDraw.Draw(img)
draw.text((img1.size[0]+20, img_height/2), '=>', fill="green")
draw.text((img1.size[0]+20, img_height/2+10), 'most', fill="red")
draw.text((img1.size[0]+20, img_height/2+20), 'similar', fill="red")
draw.text((img1.size[0]+20, img_height/2+30), 'to', fill="red")
draw.text((img1.size[0]+20, img_height/2+40), '=>', fill="green")
sim_score = "{0:.0%}".format(float(image_similar_score.split(';')[0]))
draw.text((img1.size[0]+20, img_height/2+50), image_similar_score, fill="red")
#display(img1)
#display(img2)
display(img)
return r
| jhs2jhs/AlibabaCloud_ImageSearch_Demo_py2 | myutil.py | myutil.py | py | 8,204 | python | en | code | 1 | github-code | 36 |
15770209264 |
# coding: utf-8
# In[ ]:
class Weighted_Graph():
"""Класс взвешенных графов. Для представления графа используется список ребер
Атрибуты:
:self.v: int - количество вершин
:self.graph: list - список ребер
Функции:
:add_edge(u,v,w): добавляет ребро весом w между вершинами u и v
:ford_bellman_algo(s = 0): list - возвращает минимальные расстояния от вершины s до остальных вершиню при наличии
ребер отрицательного веса - возвращает х
:list_to_dict(Oriented = False): - меняет представление графа из списка ребер в словарь словарей
:components_of_connection: int - возвращает колличество компонент связанности, работает только с представлением
графа в виде словаря словарей
"""
def __init__(self, vert):
self.v = vert
self.graph = []
def add_edge(self, u, v, w=1):
self.graph.append((u-1, v-1, w))
def ford_bellman_algo(self, s = 0):
m = len(self.graph)
d = [None]*self.v #заводим массив для записи результатов
d[s] = 0 #путь до стартовой вершины равен 0
b = float('inf')
for i in range(self.v - 1):
for u, v, w in self.graph:
if d[u] is not None:
d[v] = min(b if d[v] is None else d[v], d[u] + w) # релаксация
graph = list(d)
#проверка на ребра отрицательного веса
for u, v, w in self.graph: #проводим еще одну фазу
#print('--->', u,v,w)
if d[u] is not None:
d[v] = min(b if d[v] is None else d[v], d[u] + w)
#сравниваем, поменялись ли стоимости путей
for i in range(self.v):
if d[i] != graph[i] or d[i] == None:
d[i] = 'x'
return d
def list_to_dict(self, Oriented = False):
graph = {i:{} for i in range(self.v)}
for u, v, w in self.graph:
graph[u].update({v: w})
if not Oriented:
graph[v].update({u: w})
self.graph = graph
def components_of_connection(self):
visited = [False]*self.v # создаём массив посещённых вершины длины n, заполненный false изначально
def dfs(u: int):
visited[u] = True
for v in self.graph[u].keys():
if not visited[v]:
dfs(v)
n_comp = 0
for i in range(1, self.v):
if not visited[i]:
dfs(i)
n_comp+=1
return n_comp
if __name__ == "__main__":
string = input('Enter the string ')
pattern = input('Enter the substring ')
print(' '.join(map(str, FindSubString(string, pattern))))
| annykay/problrms_ROSALND | Bellman_FordAlgo.py | Bellman_FordAlgo.py | py | 3,407 | python | ru | code | 0 | github-code | 36 |
73679903143 | import telebot
from telebot import types
import firebase_admin
from firebase_admin import credentials
from firebase_admin import db
bot = telebot.TeleBot("") # Замените на свой токен!
DB_URL = ''
user_dict = {}
class User:
def __init__(self, tgid):
self.tgid = tgid
self.fio_name = ''
self.fac = ''
@bot.message_handler(commands=['start'])
def send_welcome(message):
#print(user_id_tg)
markup = types.ReplyKeyboardMarkup(resize_keyboard=True, one_time_keyboard=True)
markup.add('✏️ | Заполнить заявку', '📗 | О нас')
msg = bot.send_message(message.chat.id, 'Привет!👋🏾 Выбери нужный тебе пункт меню! ⬇️⬇️⬇️', reply_markup=markup)
#print(msg.chat.username)
bot.register_next_step_handler(msg, markup_handler)
def markup_handler(message):
if message.text == '✏️ | Заполнить заявку':
msg = bot.send_message(message.chat.id, 'Как тебя зовут?©️')
bot.register_next_step_handler(msg, fio_handler)
elif message.text == '📗 | О нас':
markup = types.ReplyKeyboardMarkup(resize_keyboard=True, one_time_keyboard=True)
markup.add('⬅️ | Вернуться')
bot.register_next_step_handler(message, send_welcome)
msg = bot.send_message(message.chat.id,
'Привет! 🤟🏿 Я первый киберспортивный 🤖бот-помощник,\n'
'который проведет тебя в мир игр! 👾\n'
'С моей помощью ты сможешь найти новых друзей,🤝\n'
'научить или научиться чему-то новому!\n'
'Преодолеть все границы и стать настоящим победителем! 🏆\n\n'
'С уважением, команда ODIN⚡️', reply_markup=markup)
def handle_return(message):
send_welcome(message)
bot.register_next_step_handler(message, markup_handler)
def fio_handler(message):
user_info = {
'tg_id': message.from_user.id,
'username' : message.chat.username,
'fio' : message.text
}
msg = bot.send_message(message.chat.id, 'На каком факультете ты обучаешься?💎')
bot.register_next_step_handler(msg, faculty_handler, user_info)
def faculty_handler(message, user_info):
user_info['faculty'] = message.text
markup = types.ReplyKeyboardMarkup(resize_keyboard=True, one_time_keyboard=True)
markup.add('CS2','Dota 2', 'LoL', 'Valorant')
msg = bot.send_message(message.chat.id, 'В какой дисциплине ты хочешь принимать участие?⚖️',reply_markup=markup)
bot.register_next_step_handler(msg, disciplines_handler, user_info)
def disciplines_handler(message, user_info):
user_info['disc'] = message.text
msg = bot.send_message(message.chat.id, 'Кратко расскажи о своих достижениях 📝')
bot.register_next_step_handler(msg, achievements_handler, user_info)
def achievements_handler(message, user_info):
user_info['achi'] = message.text
print(user_info)
markup = types.ReplyKeyboardMarkup(resize_keyboard=True)
markup.add('⬅️ | Вернуться')
bot.send_message(message.chat.id, 'Спасибо! Твой запрос обработан и скоро будет рассмотрен!🔔',reply_markup=markup)
bot.register_next_step_handler(message,send_welcome)
save_to_database(user_info)
def save_to_database(user_info):
if not firebase_admin._apps:
cred = credentials.Certificate('admin.json')
firebase_admin.initialize_app(cred, {'databaseURL': DB_URL})
# Запись данных о пользователе в Realtime Database
write_user_data(user_info)
# Функция для записи данных о пользователе в Realtime Database
def write_user_data(user_info):
ref = db.reference('Telegram/' + str(user_info['tg_id']))
ref.set({
'6 - достижения': user_info['achi'],
'5 - дисциплины': user_info['disc'],
'4 - факультет': user_info['faculty'],
'3 - ФИО': user_info['fio'],
'2 - Nickname': "@" + user_info['username'],
'1 - TelegramID': user_info['tg_id']
})
#716578611
#428571723
send_notific(999999999, user_info)
def send_notific(ADMIN_ID, user_info):
text = 'Пользователь ['+user_info['fio']+'](https://t.me/'+user_info['username']+') оставил\(а\) заявку:\nфакультет: '+user_info['faculty']+'\nдисциплины: '+user_info['disc']+'\nдостижения: '+user_info['achi']
bot.send_message(ADMIN_ID, text, parse_mode='MarkdownV2')
bot.polling() | psshamshin/CuberClub_BOT | tgbotlastfinal.py | tgbotlastfinal.py | py | 4,885 | python | ru | code | 0 | github-code | 36 |
8639664963 | # -*- coding: utf-8 -*-
#
# Virtual Satellite 4 - FreeCAD module
#
# Copyright (C) 2019 by
#
# DLR (German Aerospace Center),
# Software for Space Systems and interactive Visualization
# Braunschweig, Germany
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# SPDX-License-Identifier: LGPL-3.0-or-later
#
from json_io.products.json_product import AJsonProduct
from json_io.json_definitions import JSON_ELEMNT_CHILDREN, PRODUCT_IDENTIFIER, PART_IDENTIFIER, \
_get_combined_name_uuid, JSON_ELEMENT_NAME, JSON_ELEMENT_UUID
from json_io.products.json_product_child import JsonProductChild
from json_io.json_spread_sheet import FREECAD_PART_SHEET_NAME
from freecad.active_document import ActiveDocument
from itertools import compress
import FreeCAD
import os
from A2plus.a2p_importpart import updateImportedParts
Log = FreeCAD.Console.PrintLog
class JsonProductAssembly(AJsonProduct):
'''
This class represents an assembly, which consists of several children
which basically reference the parts to be imported to this assembly.
The parts/children contain information about their position and orientation.
This information is processed to correctly place the parts in the assembly.
The assembly itself can also have a referenced part. But his one does
not contain information about its position and rotation. In the current assembly,
this part is supposed to be imported in the current origin of the assembly.
In case this assembly is a sub assembly it may have a position and rotation.
Nevertheless in this particular case, the whole assembly is supposed to be positioned
and rotated in the super assembly. Actually this assembly is than a child product of
the super assembly.
'''
def _parse_position_and_rotation_from_json(self, json_object):
'''
An assembly does not have a position or orientation. If it has these properties
than it is a sub assembly which has to be processed as a child of the containing
super assembly.
'''
pass
def parse_from_json(self, json_object):
'''
This time the parse method follows the convention
to not parse the position and orientation. Actually it gets called
in the super method which refers to the protected method for
importing position and orientation. This method is overridden in this
class without implementation. Additionally this method starts parsing
the children.
'''
super().parse_from_json(json_object)
# Get all children from the json and try to parse them
# into JsonProductChild objects
json_object_children = list(json_object[JSON_ELEMNT_CHILDREN])
self.children = []
for json_object_child in json_object_children:
json_product_child = JsonProductChild().parse_from_json(json_object_child)
self.children.append(json_product_child)
# Don't hand back an assembly if there are no children
if len(self.children) > 0:
return self
else:
return None
def parse_to_json(self, isRoot=False):
if(isRoot):
json_dict = {
JSON_ELEMENT_NAME: self.name,
JSON_ELEMENT_UUID: self.uuid
}
else:
json_dict = super().parse_to_json()
children_dicts = []
for child in self.children:
if(isRoot):
children_dicts.append(child.parse_to_json())
else:
# ignore part of product assembly
if(not child.get_unique_name() == self.get_unique_name()):
children_dicts.append(child.parse_to_json())
json_dict[JSON_ELEMNT_CHILDREN] = children_dicts
return json_dict
def write_to_freecad(self, active_document):
# This assembly may refer to a part as well
# hence if there is a partUuid and if there is a part name, than
# it should be written to the FreeCAD document as well.
old_products = self.get_products_of_active_document(active_document)
old_product_names = [o[0].Label for o in old_products]
# store if a product has to be deleted
# (because it doesn't exist in the new imported JSON file)
delete_products = [True] * len(old_product_names)
update_count = 0
if self.is_part_reference():
name = _get_combined_name_uuid(self.part_name, self.part_uuid)
if(name in old_product_names):
# update
update_count += 1
super().write_to_freecad(active_document, create=False)
delete_products[old_product_names.index(name)] = False
else:
# create
super().write_to_freecad(active_document)
# And now write the children, they decide on their own if they reference
# part or a product
for child in self.children:
name = child.get_unique_name()
if(name in old_product_names):
# update
update_count += 1
child.write_to_freecad(active_document, create=False)
delete_products[old_product_names.index(name)] = False
else:
# create
child.write_to_freecad(active_document)
# delete remaining old products
old_products = list(compress(old_products, delete_products))
for old_product in old_products:
active_document.app_active_document.removeObject(old_product[0].Name)
active_document.app_active_document.removeObject(old_product[1].Name)
# only if there were updates instead of creates
if(update_count > 0):
# update already read in parts
updateImportedParts(active_document.app_active_document)
def read_from_freecad(self, active_document, working_output_directory, part_list, freecad_object=None, freecad_sheet=None):
"""
Reads an ProductAssembly from FreeCAD
Then calls read_from_freecad of his children (either another assembly or a ProductChild)
"""
products_with_sheets = self.get_products_of_active_document(active_document)
# read the assembly
super().read_from_freecad(active_document, working_output_directory, part_list, freecad_object, freecad_sheet)
self.children = []
# read the children
for product, sheet in products_with_sheets:
name, label = product.Name, product.Label
# use the source file of a2plus part
# then get the file name (.split(os.path.sep)[-1]) and ignore the FreeCAD file ending ([:-6])
child_file_name = product.sourceFile.split(os.path.sep)[-1][:-6]
# open the document for this child
child_document = ActiveDocument(working_output_directory).open_set_and_get_document(child_file_name)
if(PRODUCT_IDENTIFIER in name):
Log(f"Read ProductAssembly '{label}'\n")
child = JsonProductAssembly()
else:
Log(f"Read Product '{label}'\n")
child = AJsonProduct()
child.read_from_freecad(child_document, working_output_directory, part_list, freecad_object=product, freecad_sheet=sheet)
child_document.close_active_document(child_file_name)
self.children.append(child)
def get_products_of_active_document(self, active_document):
"""
Accesses, sorts and filters objects of the current document.
NOTE: A document always contains productAssemblies or productChild as long as it is an assembly itself
Only a document that references one part, thus contains the PART_IDENTIFIER in it's name, references a part
Returns a list of found products (that have a sheet) and the corresponding sheets
"""
products, sheets = [], []
for obj in active_document.app_active_document.Objects:
name, label = obj.Name, obj.Label
Log("Object: {}, {}\n".format(name, label))
if(FREECAD_PART_SHEET_NAME in name):
sheets.append(obj)
Log("Object is sheet\n")
elif(PRODUCT_IDENTIFIER in name or PART_IDENTIFIER in name):
products.append(obj)
Log("Object is product\n")
products_with_sheets = []
for product in products:
for sheet in sheets:
if(product.Label in sheet.Label):
products_with_sheets.append((product, sheet))
Log(f"Found products with sheets: '{[(p.Label, s.Label) for p, s in products_with_sheets]}'\n")
return products_with_sheets
def get_product_unique_name(self):
return PRODUCT_IDENTIFIER + _get_combined_name_uuid(self.name, self.uuid)
| virtualsatellite/VirtualSatellite4-FreeCAD-mod | VirtualSatelliteCAD/json_io/products/json_product_assembly.py | json_product_assembly.py | py | 9,732 | python | en | code | 9 | github-code | 36 |
70911138665 | #М8О-301Б-19
#Цыкин Иван
#Вариант 5
#Эллипсойд
from OpenGL.GLUT import * #Подключение библиотек
from OpenGL.GL import *
from OpenGL.GLU import *
import math
#константы
ngon=60
angle_step=2*math.pi/ngon
r1_step = 0.005
r2_step = 0.001
delta=0.6
theta1 = 2*math.pi/ngon
xrot = 0.2
yrot = 0.0
d1 = 1
d2 = 1
a = 0.5
b = 0.5
c = 0.5
def init():
glClearColor(0.9, 0.9, 0.9, 1.0) # Серый цвет для первоначальной закраски
gluOrtho2D(-1.0, 1.0, -1.0, 1.0) # Определяем границы рисования по горизонтали и вертикали
glEnable ( GL_DEPTH_TEST ) #установка параметров заливки
glDepthMask ( GL_TRUE )
glDepthFunc ( GL_LEQUAL )
def Draw(uistacks, uislices, fA, fB, fC): #функция рисует фигуру
tstep = math.pi/uislices
sstep = math.pi/uistacks
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)
for i in range(2*uislices):
t = tstep*i
glBegin(GL_POLYGON_BIT)
for j in range(uistacks+1):
s = sstep*j
glVertex3f(fA * math.cos(t) * math.cos(s), fB * math.cos(t) * math.sin(s), fC * math.sin(t))
glVertex3f(fA * math.cos(t+tstep) * math.cos(s), fB *math.cos(t+tstep) * math.sin(s), fC * math.sin(t+tstep))
glEnd()
def Draw2(uistacks, uislices, fA, fB, fC): #функция рисует границы
tstep = math.pi/uislices
sstep = math.pi/uistacks
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)
for i in range(2*uislices):
t = tstep*i
glBegin(GL_POLYGON_BIT)
for j in range(uistacks+1):
s = sstep*j
glVertex3f(fA * math.cos(t) * math.cos(s), fB * math.cos(t) * math.sin(s), fC * math.sin(t))
glVertex3f(fA * math.cos(t+tstep) * math.cos(s), fB *math.cos(t+tstep) * math.sin(s), fC * math.sin(t+tstep))
glEnd()
def display(): #функция вывоа на экран
global xrot, yrot, zrot
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glLoadIdentity()
gluLookAt(0.0, 0.0, 10.0,0.0, 0.0, 0.0,0.0, 1.0, 0.0)
glRotatef(xrot, 1.0, 0.0, 0.0)
glRotatef(yrot, 0.0, 1.0, 0.0)
glPointSize(1)
glLineWidth(1)
glColor3f(0.37, 0.83, 0.61)
Draw(d1, d2, a, b, c)
glPointSize(3)
glLineWidth(3)
glColor3f(0.0, 0.0, 0.0)
Draw2(d1, d2, a, b, c)
glFlush()
glutSwapBuffers()
def specialkeys(key, x, y): #работа с клавиатурой
global xrot
global yrot
global a
global b
global c
global d1
global d2
# Обработчики для клавиш со стрелками
if key == GLUT_KEY_UP: xrot -= 2.0 # Уменьшаем угол вращения по оси X
if key == GLUT_KEY_DOWN: xrot += 2.0 # Увеличиваем угол вращения по оси X
if key == GLUT_KEY_LEFT: yrot -= 2.0 # Уменьшаем угол вращения по оси Y
if key == GLUT_KEY_RIGHT: yrot += 2.0 # Увеличиваем угол вращения по оси Y
if key == GLUT_KEY_F1:
if a<3.5: a += 0.3
else: a = 0.5
if key == GLUT_KEY_F2:
if b<3.5: b += 0.3
else: b = 0.5
if key == GLUT_KEY_F3:
if c<3.5: c += 0.3
else: c = 0.5
if key == GLUT_KEY_F4:
if d1<128: d1*=2
else: d1 = 1
if key == GLUT_KEY_F5:
if d2<128: d2*=2
else: d2 = 1
glutPostRedisplay()
def resize(*args): #функция масштабирования
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glViewport(0, 0, args[0], args[1])
gluPerspective(45.0, 1.0 * args[0] / args[0], 1.0, 100.0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
#main
glutInit(sys.argv)
glutInitWindowPosition(50, 50)
glutInitWindowSize(1200, 800)
glutInitDisplayMode(GLUT_RGB | GLUT_DOUBLE)
glutCreateWindow("CG3")
glutDisplayFunc(display)
glutReshapeFunc(resize)
glutSpecialFunc(specialkeys)
init()
glutMainLoop() | youngtommypickles/ComputerGraphics | CG3.py | CG3.py | py | 4,289 | python | ru | code | 0 | github-code | 36 |
38230641429 | from __future__ import division, print_function
import numpy as np
import scipy.linalg
from MatrixIO import load, store
import click
def lqr(A,B,Q,R):
"""Solve the continuous time lqr controller.
dx/dt = A x + B u
cost = integral x.T*Q*x + u.T*R*u
"""
#ref Bertsekas, p.151
#first, try to solve the ricatti equation
P = np.matrix(scipy.linalg.solve_continuous_are(A, B, Q, R))
#compute the LQR gain
K = np.matrix(scipy.linalg.inv(R)*(B.T*P))
return K, P
def dlqr(A,B,Q,R):
"""Solve the discrete time lqr controller.
x[k+1] = A x[k] + B u[k]
cost = sum x[k].T*Q*x[k] + u[k].T*R*u[k]
"""
#ref Bertsekas, p.151
#first, try to solve the ricatti equation
P = np.matrix(scipy.linalg.solve_discrete_are(A, B, Q, R))
#compute the LQR gain
K = np.matrix(scipy.linalg.inv(B.T*X*B+R)*(B.T*X*A))
return K, P
@click.command()
@click.option('-A', type=click.Path(exists=True))
@click.option('-B', type=click.Path(exists=True))
@click.option('-Q', type=click.Path(exists=True))
@click.option('-R', type=click.Path(exists=True))
@click.option('-Kout', type=click.Path())
@click.option('-Pout', type=click.Path())
def run_lqr(a, b, q, r, kout, pout):
A = load(a)
B = load(b)
Q = load(q)
R = load(r)
K, P = lqr(A,B,Q,R)
store(kout, K)
store(pout, P)
if __name__ == '__main__':
run_lqr()
| Zomega/thesis | Wurm/Stabilize/LQR/python/LQR.py | LQR.py | py | 1,450 | python | en | code | 0 | github-code | 36 |
34887554995 | import django_filters
from .models import *
from django_filters import DateFilter, CharFilter, NumberFilter
from django.forms.widgets import TextInput, NumberInput, DateInput, SelectDateWidget
# class TitleFilter(django_filters.FilterSet):
# title = CharFilter(field_name='title', lookup_expr='icontains',
# widget=TextInput(attrs={
# 'p laceholder': "Znajdź swoją roślinę!",
# 'class': "form-control me-2 button-search-right"}))
#
# class Meta:
# model = Offer
# fields = ['title']
class OfferFilter(django_filters.FilterSet):
start_date = DateFilter(field_name='date_posted', lookup_expr='gte', label='Data od:',
widget=SelectDateWidget(empty_label=('rok', 'miesiąc', 'dzień'), attrs={
'class': "form-control me-2 button-search-right",
'style': 'width: auto; display: inline-block;'}))
end_date = DateFilter(field_name='date_posted', lookup_expr='lte', label='Data do:',
widget=SelectDateWidget(empty_label=('rok', 'miesiąc', 'dzień'), attrs={
'class': "form-control me-2 button-search-right",
'style': 'width: auto; display: inline-block;'}))
cheapest = NumberFilter(field_name='price', lookup_expr='gte', label='Cena od',
widget=NumberInput(attrs={
'class': "form-control me-2 button-search-right",
'style': 'width: auto; display: inline-block; margin: 4px'}))
expensive = NumberFilter(field_name='price', lookup_expr='lte', label='Cena do',
widget=NumberInput(attrs={
'class': "form-control me-2 button-search-right",
'style': 'width: auto; display: inline-block; margin: 4px'}))
class Meta:
model = Offer
fields = '__all__'
exclude = ['seller', 'description', 'date_posted', 'title', 'price', ]
| viginti23/project-home-gardens | home/filters.py | filters.py | py | 2,126 | python | en | code | 0 | github-code | 36 |
932080591 | import argparse
import sys
from FitsToPNG import main_run
from FitsMath import calibration_compute_process
from JsonConvert import JsonConvert
def argument_handling():
"""
Method to deal with arguments parsing
:return: file path to fits file and path to a new png file
"""
parser = argparse.ArgumentParser()
parser.add_argument('-j', '--json',
required=True,
type=str,
help='Insert json file path')
args = parser.parse_args()
return args.json
def fits_to_png_proc(path_arr: list):
fits_path, png_path = path_arr
main_run(fits_path, png_path)
def validate_files(bias_file, dark_file, flats_file, light_file) -> dict:
"""
:param bias_file:
:param dark_file:
:param flats_file:
:param light_file:
:return: dict with the existing files array as values and keys as names
"""
dict_files = {}
if len(bias_file) > 0:
dict_files['bias'] = bias_file
if len(dark_file) > 0:
dict_files['dark'] = dark_file
if len(flats_file) > 0:
dict_files['flat'] = flats_file
if len(light_file) > 0:
dict_files['light'] = light_file
return dict_files
if __name__ == '__main__':
# sys.argv = ['main.py', '-j', './stam.json']
json_file_path = argument_handling()
data = JsonConvert(json_file_path)
fits_to_png, bias, dark, flats, light, output_master_bias, output_master_dark, output_master_flat, output_calibration_file, output_calibration_folder, solve_stars_plate = data.load_data()
if fits_to_png:
fits_to_png_proc(fits_to_png)
else:
files = validate_files(bias, dark, flats, light)
calibration_compute_process(files, output_master_bias, output_master_dark, output_master_flat,
output_calibration_file, output_calibration_folder, solve_stars_plate)
| AstroPhotometry/AstroPhotometry | python/main.py | main.py | py | 1,913 | python | en | code | 1 | github-code | 36 |
5145048204 | from .range_borders import Date, FromInfinity, ToInfinity
from datetime import timedelta
class DateRange:
"""
This class implements date ranges that support open borders,
so it is possible to create date ranges that contain all dates up to
a specific date or all dates from a specific date on. Strict ranges with
specific dates as borders are supported as well.
The implementation does not support any kind of daytime measurement.
"""
# DateRange supports all formats that are supported by the datetime module
# '%Y-%m-%d' is the predefined format
DATE_FORMAT: str = '%Y-%m-%d'
#######################################################################################
# the following methods can be called on every DateRange instance (infinite and finite)
def __init__(self, date_from: str or None, date_to: str or None):
"""
Creates a new Daterange. date_to must be greater or equal to date_form
:param date_from: None value represents an open border
:param date_to: None value represents an open border
"""
if date_from is None:
self._date_from = FromInfinity()
else:
self._date_from = Date(date_from, DateRange.DATE_FORMAT)
if date_to is None:
self._date_to = ToInfinity()
else:
self._date_to = Date(date_to, DateRange.DATE_FORMAT)
# is set in the first call of the __len__ function
self._length = None
self._is_infinite = date_from is None or date_to is None
if not self._is_infinite:
if date_to < date_from:
raise ValueError(f"date_to must be equal or greater than date_form. "
f"{self.__repr__()}")
def __repr__(self):
return f"DateRange({self._date_from.to_string(DateRange.DATE_FORMAT)}, " \
f"{self._date_to.to_string(DateRange.DATE_FORMAT)})"
def __contains__(self, item: str):
date = Date(item, DateRange.DATE_FORMAT)
return self._date_from <= date <= self._date_to
def intersects(self, date_from: str or None, date_to: str or None) -> bool:
# returns true if at least one date is contained in both ranges
date_range = DateRange(date_from=date_from, date_to=date_to)
return not (self._date_to < date_range._date_from or date_range._date_to < self._date_from)
def is_infinite(self) -> bool:
return self._is_infinite
##########################################################################
# the following methods raise exceptions if called on infinite DateRanges
def __iter__(self):
if self._is_infinite:
raise ValueError(f"infinite date ranges are not iterable. date_range: {self.__repr__()}")
else:
self._current = self._date_from.date
return self
def __next__(self):
if self._current > self._date_to.date:
raise StopIteration
else:
ret = self._current.strftime(DateRange.DATE_FORMAT)
self._current += timedelta(1)
return ret
def __len__(self):
if self._is_infinite:
raise ValueError(f"length infinite date ranges is not defined. date_range: {self.__repr__()}")
# length has to be calculated and set only once because the
# length of a date range can not change
# !!!---if you want to implement the borders of date ranges to be changeable
# this method must be reimplemented---!!!
if self._length is None:
counter = 0
# __iter__ can safely be used because __len__ requires a finite date range as well
for _ in self.__iter__():
counter += 1
self._length = counter
return self._length
| tlie03/OpenDateRange | src/openDateRange/date_range.py | date_range.py | py | 3,833 | python | en | code | 0 | github-code | 36 |
32366680678 | n=input("insertar la secuencia a ser identificada : ")
m=int(input("insertar la cantidad de secuencias candidatas"))
def hamming(n,*n1):
suma=0
i=-1
while i<=len(n) :
i+=1
if n[i]!=n1[i]:
suma=suma+1
print(n[i])
print(suma)
if i>len(n):
print("insertar otro valor")
def lista(m):
for x in range(m):
n=input("valores")
n1=lista(m)
hamming(n,n1)
| masteronprime/python-codigos-diversos | comrparar.py | comrparar.py | py | 452 | python | es | code | 2 | github-code | 36 |
73118895784 | import socket
import pickle
from task11_2_user import User
class ClientUser:
def __init__(self, host, port):
self.host = host
self.port = port
self._socket = None
def run(self):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as self._socket:
self._socket.connect((self.host, self.port))
cl_user = User(input("Введите имя: "), input("Введите возраст: "))
self._socket.send(pickle.dumps(cl_user))
print(self._socket.recv(1024).decode())
if __name__ == "__main__":
client = ClientUser(host='127.0.0.1', port=5432)
client.run()
| IlyaOrlov/PythonCourse2.0_September23 | Practice/julmakarova/task11_2_client.py | task11_2_client.py | py | 657 | python | en | code | 2 | github-code | 36 |
25450914107 | from rest_framework import serializers
from taggit.serializers import TagListSerializerField, TaggitSerializer
from accounts.models import Profile
from ...models import Post, Category
class CategorySerializer(serializers.ModelSerializer):
class Meta:
model = Category
fields = ("name", "id")
read_only_fields = ("id",)
class AuthorSerializer(serializers.ModelSerializer):
class Meta:
model = Profile
fields = ("first_name", "last_name", "image")
read_only_fields = ("first_name", "last_name", "image")
class PostSerializer(TaggitSerializer, serializers.ModelSerializer):
tags = TagListSerializerField()
snippet = serializers.ReadOnlyField(source="get_snippet")
relative_url = serializers.URLField(source="get_absolute_api_url", read_only=True)
absolute_url = serializers.SerializerMethodField()
# category = serializers.SlugRelatedField(slug_field='name', many=True, queryset=Category.objects.all())
# category = CategorySerializer(many=True)
class Meta:
model = Post
fields = (
"id",
"author",
"image",
"title",
"content",
"snippet",
"relative_url",
"absolute_url",
"category",
"tags",
"counted_view",
"published_date",
)
read_only_fields = ("id", "author", "counted_view")
def get_absolute_url(self, post):
request = self.context.get("request")
return request.build_absolute_uri(post.pk)
def to_representation(self, instance):
request = self.context.get("request")
rep = super().to_representation(instance)
rep["category"] = CategorySerializer(instance.category, many=True).data
rep["author"] = AuthorSerializer(instance.author).data
if request.parser_context.get("kwargs").get("pk"):
rep.pop("snippet", None)
rep.pop("relative_url", None)
rep.pop("absolute_url", None)
else:
rep.pop("content", None)
return rep
def create(self, validated_data):
request = self.context.get("request")
validated_data["author"] = Profile.objects.get(user=request.user.id)
return super().create(validated_data)
| AmirhosseinRafiee/Blog | mysite/blog/api/v1/serializers.py | serializers.py | py | 2,319 | python | en | code | 0 | github-code | 36 |
20681022248 | __author__ = 'elmira'
import numpy as np
from lxml import etree
from collections import Counter
from matplotlib import pyplot as plt
from matplotlib import mlab
def open_corpus(fname):
parser = etree.HTMLParser() # создаем парсер хтмл-страниц
# скармливаем парсеру майстемовский xml, берем тэг body и все что внутри него
tree = etree.parse(fname, parser).getroot()[0]
sents = [Counter([w[0].attrib['gr'].split('=')[0].split(',')[0] for w in se if len(w) != 0]) for se in tree]
# этот дико жуткий list comprehension делает из каждого предложения массив
# в этом массиве находятся подряд части речи входящих в предложение слов
# # если у слова есть омонимия, то берется самый первый разбор
# # если майстем слово не разобрал, то слово игнорируется
# а потом каждый массив превращается в словарь, в котором написано, сколько раз какая часть речи встретилась
return sents
def make_features(data):
return np.array([(d['A'],
d['S'],
d['V'],
d['ADV'],
d['SPRO'] + d['APRO'] + d['ADVPRO']) # за местоимения считаются и мест-сущ, и мест-прил, и мест-наречие
for d in data])
def main():
sonets = open_corpus('corpus1.txt')
anna = open_corpus('corpus2.txt')
sonets_data = make_features(sonets)
anna_data = make_features(anna)
data = np.vstack((sonets_data, anna_data))
p = mlab.PCA(data, True)
N = len(sonets_data)
print(p.Wt)
plt.plot(p.Y[N:,0], p.Y[N:,1], 'og', p.Y[:N,0], p.Y[:N,1], 'sb')
# зелененькое - анна каренина, а синенькое - сонеты
# Правда ли, что существует линейная комбинация признаков (т.е. значение по первой оси в преобразованных методом главных компонент данных), и пороговое значение, при которых больше 70% текстов каждого жанра находятся с одной стороны от порогового значения? Напишите программу genre-by-pos.py, которая демонстрирует ответ на этот вопрос.
# Мне кажется, что ответ да, судя по картинке
print('Линейная комбинация и пороговое значение, при которых больше 70% текстов каждого жанра находятся с одной стороны от порогового значения, существуют.')
# plt.savefig('result.png')
plt.show()
# Подберем, например, на глаз по картинке пороговое значение,
# при котором больше 70% предложений анны карениной справа от него, и больше 70% предложений сонетов -- слева
# Например:
print('Пороговое значение: -4.2')
print(sum(p.Y[N:,0]>-4.2)/len(p.Y[N:,0])*100, '- процент предложений "Анны Карениной", которые лежат справа от порога')
print(sum(p.Y[:N,0]<-4.2)/len(p.Y[:N,0])*100, '- процент предложений сонетов, которые лежат слева от порога')
if __name__ == "__main__":
main() | elmiram/homework | seminar9/task2 (4 points)/genre-by-pos.py | genre-by-pos.py | py | 3,848 | python | ru | code | 0 | github-code | 36 |
35916291856 | class Solution(object):
def minWindow(self, s, t):
"""
:type s: str
:type t: str
:rtype: str
"""
t_counter = {}
for i in t:
if i in t_counter:
t_counter[i] += 1
else:
t_counter[i] = 1
least = len(t_counter)
lpoint = 0
rpoint = 0
allinthere = 0
frame = {}
result = float("inf"), int, int
while rpoint < len(s):
frame[s[rpoint]] = frame.get(s[rpoint], 0) + 1
if s[rpoint] in t_counter and frame[s[rpoint]] == t_counter[s[rpoint]]:
allinthere += 1
while lpoint <= rpoint and allinthere == least:
if rpoint - lpoint + 1 < result[0]:
result = (rpoint - lpoint + 1, lpoint, rpoint)
frame[s[lpoint]] -= 1
if s[lpoint] in t_counter and frame[s[lpoint]] < t_counter[s[lpoint]]:
allinthere -= 1
lpoint += 1
rpoint += 1
if result[0] == float("inf"):
return ""
else:
return s[result[1]:result[2] + 1]
s = Solution()
print(s.minWindow("ADOBECODEBANC","ABC"))
| emirgit/Leetcode | Solutions - Python/Minimum Window Substring.py | Minimum Window Substring.py | py | 1,262 | python | en | code | 0 | github-code | 36 |
43062682888 | import json
import scrapy
from scrapy.crawler import CrawlerProcess
def decodeEmail(e):
de = ""
k = int(e[:2], 16)
for i in range(2, len(e) - 1, 2):
de += chr(int(e[i:i + 2], 16) ^ k)
return de
headers = {
'Host': 'ufcstats.com',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.82 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Accept-Language': 'en-IN,en;q=0.9',
}
uniqe_clssses = []
class WEbCrawlerInS(scrapy.Spider):
name = 'example'
player_map = {x["url"]: x for x in json.loads(open('player_data.json', 'r', encoding='utf-8-sig').read())}
def start_requests(self):
yield scrapy.Request(
url='http://ufcstats.com/statistics/events/completed?page=all',
headers=headers, callback=self.parse,
)
def parse(self, response):
for i in response.css('table.b-statistics__table-events tr.b-statistics__table-row'):
try:
event_url = i.css('td.b-statistics__table-col:nth-child(1) a::attr(href)').get(default="").strip()
yield scrapy.Request(
url=event_url,
headers=headers, callback=self.parse_events
)
except:
pass
def parse_events(self, response):
for i in response.css('tbody.b-fight-details__table-body tr.b-fight-details__table-row'):
for k in i.css('td:nth-child(2) > p > a::attr(href)').getall():
yield scrapy.Request(
url=k,
headers=headers, callback=self.parse_fighter_data
)
def parse_fighter_data(self, response):
def getClaenList(lst):
return "".join([x.strip() for x in lst if x.strip() != ""])
record = response.css('body > section > div > h2 > span.b-content__title-record::text').get(default="").replace(
'Record:', '').strip()
wins,losses,draws = record.split('-')
dataset = {'name': response.css('body > section > div > h2 > span.b-content__title-highlight::text').get(
default="").strip(),
'Record': record, "url": response.url,"wins":wins,"losses":losses,"draws":draws}
for i in response.css('ul li.b-list__box-list-item.b-list__box-list-item_type_block'):
key = i.css('i::text').get(default="").replace(':', '').strip()
val = getClaenList(i.css('::text').getall()).replace(key, '').replace(':', '').strip()
dataset[key] = val
yield dataset
if __name__ == "__main__":
settings = {
# 'FEED_EXPORT_ENCODING': 'utf-8-sig',
# 'FEED_EXPORT_BATCH_ITEM_COUNT': 100000,
'FEED_FORMAT': 'json', # csv, json, xml
'FEED_URI': "player_data.json", #
'ROBOTSTXT_OBEY': False,
# Configure maximum concurrent requests performed by Scrapy (default: 16)
'CONCURRENT_REQUESTS': 5,
'CONCURRENT_REQUESTS_PER_DOMAIN': 2500,
'RETRY_ENABLED': False,
'COOKIES_ENABLED': True,
'LOG_LEVEL': 'INFO',
'DOWNLOAD_TIMEOUT': 700,
# 'DOWNLOAD_DELAY': 0.15,
'RETRY_TIMES': 10,
'HTTPCACHE_ENABLED': True,
'HTTPCACHE_EXPIRATION_SECS': 0,
'HTTPCACHE_DIR': 'httpcache_new',
'HTTPCACHE_IGNORE_HTTP_CODES': [int(x) for x in range(399, 600)],
'HTTPCACHE_STORAGE': 'scrapy.extensions.httpcache.FilesystemCacheStorage'
}
c = CrawlerProcess(settings)
c.crawl(WEbCrawlerInS)
c.start()
| frankamania/Scrapers | ufcstats.com/ufcstats.com_parse_player_data.py | ufcstats.com_parse_player_data.py | py | 3,749 | python | en | code | 0 | github-code | 36 |
10877583563 | import sys
import os
from tqdm.rich import tqdm
import pandas as pd
import datetime
import tables
from pathlib import Path
from typing import TypedDict
class TrialSummary(TypedDict):
subject: str
task: str
step: str
n_trials: int
data_path = Path.home() / "Dropbox" / "lab" / "autopilot" / "data"
subjects = list(data_path.glob('*.h5'))
# counting manually because the table structure of
# the subject file changed in v0.5.0 and automatically
# recreates a new file which can take awhile and this
# is almost as easy
summaries = []
for subject in tqdm(subjects):
subject_id = subject.stem
h5f = tables.open_file(str(subject), 'r')
for table in h5f.walk_nodes('/data', classname="Table"):
summary = TrialSummary(
subject = subject_id,
step = table._v_parent._v_name,
task = table._v_parent._v_parent._v_name,
n_trials = table.nrows
)
summaries.append(summary)
h5f.close()
df = pd.DataFrame(summaries)
df.to_csv('./trial_counts.csv', index=False)
print(f"Total subjects: {len(subjects)}\nTotal trials: {df['n_trials'].sum()}") | auto-pi-lot/autopilot-paper | code/log_counting/count_trials.py | count_trials.py | py | 1,137 | python | en | code | 1 | github-code | 36 |
41935148033 | import json
import re
from datetime import datetime
from newspaper import Article
i = 0 # id number
file_path="./MK.json"
news_format_json = {}
news_format_json['MK'] = []
for y in range(2020, 2021):
for m in range(1, 2):
for n in range(0, 10001):
url = "https://www.mk.co.kr/news/economy/view/{}/{:02d}/{}/".format(y, m, n) # "economy" is meaningless because article shown is determined by 'n'
art = Article(url, keep_article_html=True)
try:
art.download()
art.parse()
art2 = art.text.split()
except:
print("***** error article *****")
continue
if len(art2) == 0:
print("***** blank article *****")
continue
print(i)
# print("\n{}, {}, {}\n".format(y, m, n))
# print(art.title)
# print(art.authors)
# print(art.text)
match = re.search("\d{4}\.\d{2}\.\d{2}", art.html)
dt = datetime.strptime(match.group(), "%Y.%m.%d")
news_format_json['MK'].append({
"id": i,
"title": art.title,
"text": art.text,
"timestamp": [dt.year, dt.month, dt.day],
"html": art.article_html
})
i += 1
with open(file_path, 'w', encoding='utf-8') as outfile:
json.dump(news_format_json, outfile, indent=4, ensure_ascii=False)
print(news_format_json)
| hyeonoir/Stocksnet | MK.py | MK.py | py | 1,571 | python | en | code | 0 | github-code | 36 |
9748891676 | from flask import request, Blueprint, abort
from models.alarm import Alarm
from models.response import ResponseJSON
alarm_routes = Blueprint("alarm", __name__, url_prefix="/server/alarm")
@alarm_routes.route("/", methods=["POST"])
def add_alarm():
if not request.json or 'name' not in request.json:
abort(400)
alarm = Alarm(request.json["name"]).save()
return ResponseJSON(True, alarm.serialize(), None).serialize(), 201
@alarm_routes.route("/<int:aid_alarm>", methods=["DELETE"])
def delete_alarm(aid_alarm):
alarm = Alarm.query.get(aid_alarm)
if not alarm:
return ResponseJSON(False, None, "Does not exist").serialize(), 404
alarm.delete()
return ResponseJSON(True, None, None).serialize(), 200
@alarm_routes.route("/<int:aid_alarm>", methods=["PUT"])
def update_alarm(aid_alarm):
if len(request.args) == 0 or not request.args.get("name") and not request.args.get("desc"):
abort(400)
alarm = Alarm.query.get(aid_alarm)
if not alarm:
return ResponseJSON(False, None, "Does not exist").serialize(), 404
if request.args.get("name"):
if not request.json or "name" not in request.json:
abort(400)
alarm.name = request.json["name"]
elif request.args.get("desc"):
if not request.json or "description" not in request.json:
abort(400)
alarm.description = request.json["description"]
alarm.update()
return ResponseJSON(True, alarm.serialize(), None).serialize(), 200
@alarm_routes.route("/<int:aid_alarm>", methods=["GET"])
def get_alarm(aid_alarm):
alarm = Alarm.query.get(aid_alarm)
if not alarm:
return ResponseJSON(False, None, "Does not exist").serialize(), 404
return ResponseJSON(True, alarm.serialize(), None).serialize(), 200
| byUNiXx/kivy_flask_gps | server/src/routes/alarm.py | alarm.py | py | 1,817 | python | en | code | 0 | github-code | 36 |
6184889157 | #set encoding=utf-8
entities = dict(
laquo = u'\u00AB',
raquo = u'\u00BB')
REQUISITES = dict(
name = u"ООО «Издательский дом «Практика»",
INN = "7705166992",
BIK = "044525225",
KPP = "",
correspondentAccount = "30101810400000000225",
beneficiaryAccount = "40702810138040103580",
bankName = (u"Сбербанк России ОАО, г. Москва, Московский банк Сбербанка России ОАО"),
address=(u"121471, Москва г, Рябиновая ул, дом №44, пом.1, "
u"тел. (499) 391-48-04"),
manager = u"Ананич Сергей Владимирович",
accountant = u"Гохберг Валерия Лазоревна"
)
| temaput/practica.ru | practica/practica/requisites.py | requisites.py | py | 856 | python | ru | code | 0 | github-code | 36 |
6994057460 | from lib.cuckoo.common.abstracts import Signature
class AndroidGooglePlayDiff(Signature):
name = "android_google_play_diff"
description = "Application Permissions On Google Play Differ (Osint)"
severity = 3
categories = ["android"]
authors = ["Check Point Software Technologies LTD"]
minimum = "2.0"
def on_complete(self):
apk_permission_list = []
for perm in self.get_apkinfo("manifest", {}).get("permissions", []):
apk_permission_list.append(perm["name"])
google_permission_list = []
for perm in self.get_googleplay("permissions", []):
google_permission_list.append(perm)
permission_diff = \
list(set(google_permission_list) - set(apk_permission_list))
if permission_diff:
self.mark(permissions=permission_diff)
return True
| cuckoosandbox/community | modules/signatures/android/android_google_play_diff.py | android_google_play_diff.py | py | 867 | python | en | code | 312 | github-code | 36 |
36319285982 | from random import randint
from Savedata import Savedata
from entity.Entity import EntInt
from entity.Player import Player
from entity.Enemy.Boss import Boss
from entity.Enemy.Malicious import Malicious
from entity.Enemy.SimpleEnemy import SimpleEnemy
from entity.item.Shield import Shield
from entity.item.CadenceUp import CadUp
import pygame
class Level():
enemy=[]
end=False
def __init__(self,screen,screenw,screenh,savedata : Savedata, level_number) -> None:
self.bigarial=pygame.font.Font('C:/Windows/Fonts/arial.ttf',100)
self.arial=pygame.font.Font('C:/Windows/Fonts/arial.ttf',20)
self.entint=EntInt()
self.screen=screen
self.screenw=screenw
self.screenh=screenh
self.savedata=savedata
self.entint.killcount=0
self.nb_player=1
self.number=level_number
self.waves=self.get_waves()
self.current_wave=-1
self.win=False
self.pre_next=False
self.next=False
self.cooldown_until_next=1000
self.start_cooldown=0
def spawn_enemys_in_wave(self,wave : str):
for i in range(len(wave)//2):
nb=int(wave[2*i])
type=wave[2*i+1]
if(type=='R'):
for j in range(nb):
SimpleEnemy(self.entint,self.screen,(randint(0,self.screenw-1),50))
elif(type=='M'):
for j in range(nb):
Malicious(self.entint,self.screen,(randint(0,self.screenw-1),50))
elif(type=='B'):
for j in range(nb):
Boss(self.entint,self.screen,(randint(0,self.screenw-1),50))
def get_waves(self):
text=open("Levels/"+str(self.number)+".txt")
content=text.read()
return content.split('/')
def start(self):
if(self.entint.players.__len__()==0):
Player(self.entint,scr=self.screen,pos=(self.screenw/2, self.screenh/2),key=(pygame.K_s,pygame.K_z,pygame.K_q,pygame.K_d,pygame.K_g))
if(self.nb_player==2):
Player(self.entint,scr=self.screen,pos=(self.screenw/2, self.screenh/2),key=(pygame.K_DOWN,pygame.K_UP,pygame.K_LEFT,pygame.K_RIGHT,pygame.K_KP2))
self.clock=pygame.time.Clock()
def update(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
dt = self.clock.tick(120)
#pygame.draw.rect(self.screen, (135,206,235), pygame.Rect(0,0,self.screenw, self.screenh))
self.entint.update(dt)
if(len(self.entint.players)==0):
#run=False
game_over=self.bigarial.render("Game Over",False,(0,0,0))
self.screen.blit(game_over,((self.screenw-game_over.get_width())/2,self.screenh/2))
if((len(self.entint.enemys)==0)&(self.current_wave<len(self.waves))):
self.current_wave+=1
if(self.current_wave>=len(self.waves)):
self.win=True
else:
self.spawn_enemys_in_wave(self.waves[self.current_wave])
if(self.win):
level_cleared=self.bigarial.render("Level Cleared",False,(0,0,0))
self.screen.blit(level_cleared,((self.screenw-level_cleared.get_width())/2,self.screenh/2))
if(not self.pre_next):
self.start_cooldown=pygame.time.get_ticks()
self.pre_next=True
else:
if(pygame.time.get_ticks()-self.start_cooldown>self.cooldown_until_next):
self.next=True
self.info_print()
if(pygame.key.get_pressed()[pygame.K_ESCAPE]):
#print(self.entint.killcount)
self.savedata.totalkillcount+=self.entint.killcount
self.savedata.save()
self.end=True
#print(self.entint.players.sprites()[0].buffs,self.entint.players.sprites()[1].buffs)
#print(len(self.entint.items))
def info_print(self):
fps=self.arial.render("fps :" + str(int(self.clock.get_fps())),False,(0,0,0),(255,255,255))
self.screen.blit(fps,(self.screenw-70,self.screenh-30))
kc=self.arial.render("killcount :" + str(self.entint.killcount),False,(0,0,0),(255,255,255))
self.screen.blit(kc,(0,self.screenh-30))
| 2doupo/Shooter | Levels/Level.py | Level.py | py | 5,657 | python | en | code | 0 | github-code | 36 |
13491582898 | from datetime import datetime
import csv, os
class File:
def __init__(self, name, date):
self.fr = None
self.fw = None
self.fa = None
self.filename = f"./files/{name}_{date}.csv"
def filename_change(self, filename):
self.filename = filename
def file_write(self, title, data):
self.fw.write(f'[{title}] data = {data}')
def file_write_time(self, title, address, data):
self.fw = open(self.filename, "a")
self.fw.write(f'[{title}] address = {address} / data = {data} / datetime = {datetime.now()}\n')
self.fw.close()
def file_write_data(self, data):
self.fw = open(f"/files/{self.filename}.txt", "a")
self.fw.write(data)
self.fw.close()
def file_write_csv(self, data):
data.append(str(datetime.now())[10:19])
if os.path.isfile(os.path.join(os.getcwd(),self.filename.replace("./","").replace("/","\\"))):
f = open(self.filename, "a", newline='')
wr = csv.writer(f)
wr.writerow(data)
f.close()
else :
f = open(self.filename, "w", newline='')
wr = csv.writer(f)
wr.writerow(["spo2", "spo2 confidence", "hr", "hr confidence", "walk", "run",
"motion flag", "activity", "battery", "scd", "acc x", "acc y", "acc z",
"gyro x", "gyro y", "gyro z", "fall detect", "temp", "pressure", "time"])
wr.writerow(data)
f.close()
def file_write_close(self):
self.fw.close()
def return_today(self):
today = str(datetime.now())
return f"{today[:10]} {today[11:13]}-{today[14:16]}-{today[17:19]}" | jjaekkaemi/dgsb_app | file.py | file.py | py | 1,697 | python | en | code | 0 | github-code | 36 |
30167619288 | from itertools import combinations
import random
# If true, extra inforamtion will appear
DEBUG = False
# O(n*log(n))
def greedy(v, w, W):
n = len(w)
profit = [(0, i) for i in range(n)]
x = [False for i in range(n)]
for i in range(n):
profit[i] = (v[i]/w[i],i)
profit.sort(key = lambda profit: profit[0], reverse = True)
for e in profit:
if (w[e[1]] > W): return x
x[e[1]] = True
W = W - w[e[1]]
return x
# O(nW),
def dynamic(v, w, W):
n = len(v)
K = [[0 for i in range(W + 1)] for j in range(n + 1)]
for i in range(n + 1):
for j in range(W + 1):
if i == 0 or j == 0:
K[i][j] = 0
elif w[i-1] <= j:
K[i][j] = max(v[i-1] + K[i-1][j-w[i-1]], K[i-1][j])
else:
K[i][j] = K[i-1][j]
i, j = n, W
x = [False for i in range(n)]
while i > 0 and j > 0:
if (K[i][j] == K[i-1][j]):
i -= 1
else:
x[i-1] = True
j -= w[i-1]
i -= 1
return x
#O(2^n)
def bruteforce(v, w, W):
n = len(v)
x = []
max_value = 0
tuples = list(zip(w, v))
for number_of_items in range(n):
for combination in combinations(tuples, number_of_items+1):
weight = sum([tup[0] for tup in combination])
value = sum([tup[1] for tup in combination])
if (max_value < value and weight <= W):
max_value = value
x = [False for i in range(n)]
for tup in combination:
x[tuples.index(tup)] = True
return x
#O(n)
def genetic(v, w, W, POP_SIZE=10, MAX_GEN=200):
N = len(v)
PARENTS_PERCENTAGE = 0.4
MUTATION_CHANCE = 0.2
PARENT_CHANCE = 0.1
def fitness(perm):
value = 0
weight = 0
index = 0
for i in perm:
if index >= N:
break
if (i == 1):
value += v[index]
weight += w[index]
index += 1
if weight > W: return 0
else: return value
def generate_population(number_of_individuals):
return [[random.randint(0,1) for x in range (0,N)] for x in range (0,number_of_individuals)]
def mutate(perm):
r = random.randint(0,len(perm)-1)
if (perm[r] == 1): perm[r] = 0
else: perm[r] = 1
def evolve(perm):
parents_length = int(PARENTS_PERCENTAGE*len(perm))
parents = perm[:parents_length]
nonparents = perm[parents_length:]
for np in nonparents:
if PARENT_CHANCE > random.random():
parents.append(np)
for p in parents:
if MUTATION_CHANCE > random.random():
mutate(p)
children = []
desired_length = len(perm) - len(parents)
while len(children) < desired_length :
m = perm[random.randint(0,len(parents)-1)]
f = perm[random.randint(0,len(parents)-1)]
half = round(len(m)/2)
child = m[:half] + f[half:]
if MUTATION_CHANCE > random.random():
mutate(child)
children.append(child)
parents.extend(children)
return parents
generation = 1
population = generate_population(POP_SIZE)
for g in range(0,MAX_GEN):
if DEBUG: print (f"Generation {generation} with {len(population)}")
population = sorted(population, key=lambda x: fitness(x), reverse=True)
if DEBUG:
for i in population:
print(f"{i}, fit: {fitness(i) }")
population = evolve(population)
generation += 1
if (fitness(population[0]) == 0): return [False for i in range(N)]
else: return population[0] | X-V-III/OK2020 | algorithms.py | algorithms.py | py | 3,274 | python | en | code | 1 | github-code | 36 |
41903865929 | import sys
from django.db.models import Avg, Variance
from django.shortcuts import render
from rest_framework import generics
from rest_framework.decorators import api_view, parser_classes
from rest_framework.generics import ListCreateAPIView
from rest_framework.parsers import JSONParser
from rest_framework.response import Response
from rest_framework import status
import json
from rest_framework.views import APIView
import collections
from .models import ExchangeRate, Currency
from .model_serializer import ExchangeRateSerializer, ExchangeRateListSerializer
query_set = ExchangeRate.objects.all()
serializer_class = ExchangeRateSerializer
@api_view(['GET'])
def test_get(request):
print("request:", request.query_params.get("q"))
return Response({"message": "Hello, world!"})
@api_view(['POST'])
@parser_classes((JSONParser,))
def crate_exchange_rate(request):
request_data = request.data
date = request_data['date']
from_currency = request_data['from_currency']
to_currency = request_data['to_currency']
rate_value = request_data['rate_value']
exchange_rate_ls = ExchangeRate.objects.filter(date=date, from_currency=from_currency, to_currency=to_currency)
exchange_rate = ExchangeRate()
if exchange_rate_ls:
exchange_rate = exchange_rate_ls[0]
exchange_rate.rate_value = rate_value
else:
from_curr = Currency.objects.get(currency_code=from_currency)
to_curr = Currency.objects.get(currency_code=to_currency)
exchange_rate.date = date
exchange_rate.from_currency = from_curr
exchange_rate.to_currency = to_curr
exchange_rate.rate_value = rate_value
exchange_rate.save()
return Response(ExchangeRateSerializer(exchange_rate).data, status=status.HTTP_200_OK)
def get_rate_average(rate_date, previous_date, from_curr, to_curr):
from django.db.models import Avg
return ExchangeRate.objects.filter(date__range=(previous_date, rate_date), from_currency=from_curr,
to_currency=to_curr).aggregate(Avg('rate_value'))
from collections import OrderedDict
def ordered_dict_prepend(dct, key, value, dict_setitem=dict.__setitem__):
root = dct._OrderedDict__root
first = root[1]
if key in dct:
link = dct._OrderedDict__map[key]
link_prev, link_next, _ = link
link_prev[1] = link_next
link_next[0] = link_prev
link[0] = root
link[1] = first
root[1] = first[0] = link
else:
root[1] = first[0] = dct._OrderedDict__map[key] = [root, first, key]
dict_setitem(dct, key, value)
class GetExchangeLIst(ListCreateAPIView):
queryset = ExchangeRate.objects.none()
serializer_class = ExchangeRateSerializer
def get_queryset(self):
queryset = ExchangeRate.objects.all()
return queryset
@api_view(['GET'])
def get_exchange_track(request):
print("request_data", request.query_params)
rate_date = request.query_params.get("date")
if not rate_date:
return Response('Invalid Date parameter', status=status.HTTP_404_NOT_FOUND)
offset = request.query_params.get("offset")
if not offset:
return Response('Invalid Offset parameter', status=status.HTTP_404_NOT_FOUND)
from datetime import datetime, timedelta
previous_date = datetime.strptime(rate_date, "%Y-%m-%d").date() - timedelta(days=int(offset))
exchange_list = ExchangeRate.objects.filter(date=rate_date)
response_serializer = ExchangeRateSerializer(exchange_list, many=True)
average_dict = {er.from_currency.currency_code+'-'+er.to_currency.currency_code: get_rate_average(rate_date, previous_date, er.from_currency,
er.to_currency) for er in exchange_list}
print(response_serializer.data)
result_list =[]
for d in response_serializer.data:
d['average_val'] = average_dict[d['from_currency']+'-'+d['to_currency']]['rate_value__avg']
result_list.append(d)
return Response(result_list)
@api_view(['GET'])
def get_exchange_average(request):
from_currency = request.query_params.get("from_currency")
if not from_currency:
return Response('Invalid from_currency parameter', status=status.HTTP_404_NOT_FOUND)
to_currency = request.query_params.get("to_currency")
if not to_currency:
return Response('Invalid from_currency parameter', status=status.HTTP_404_NOT_FOUND)
offset = request.query_params.get("offset")
if not offset:
return Response('Invalid Offset parameter', status=status.HTTP_404_NOT_FOUND)
from_curr = Currency.objects.get(currency_code=from_currency)
to_curr = Currency.objects.get(currency_code=to_currency)
_vals ={'from_currency':from_curr,'to_currency':to_curr}
from django.db.models import Q
exchange_rate_list = ExchangeRate.objects.filter(Q(from_currency=from_curr) & Q(to_currency=to_curr)).order_by('-date')[:int(offset)]
response_serializer = ExchangeRateSerializer(exchange_rate_list, many=True)
avr_val = exchange_rate_list.aggregate(Avg('rate_value'))
variance = exchange_rate_list.aggregate(Variance('rate_value'))
aver_var = collections.OrderedDict()
aver_var['average_val'] =avr_val
aver_var['variance'] =variance
data = response_serializer.data
i =0
dict_response ={}
for d in data:
i=i+1
dict_response[i]=d
dict_response[i+1]=aver_var
print(data)
return Response(dict_response)
@api_view(['DELETE'])
def delete_exchange(request):
from_currency = request.query_params.get("from_currency")
if not from_currency:
return Response('Invalid from_currency parameter', status=status.HTTP_404_NOT_FOUND)
to_currency = request.query_params.get("to_currency")
if not to_currency:
return Response('Invalid from_currency parameter', status=status.HTTP_404_NOT_FOUND)
date = request.query_params.get("date")
if not date:
return Response('Invalid Date parameter', status=status.HTTP_404_NOT_FOUND)
import datetime
date_to_delete = datetime.strptime(date, "%Y-%m-%d").date()
from_curr = Currency.objects.get(currency_code=from_currency)
to_curr = Currency.objects.get(currency_code=to_currency)
from django.db.models import Q
ExchangeRate.objects.objects.filter(Q(from_currency=from_curr) & Q(to_currency=to_curr) & Q(date=date_to_delete)).delete()
return Response("Deleted data",status=status.HTTP_200_OK) | harjiwiga/exchange_rate | exchangerateapp/views.py | views.py | py | 6,788 | python | en | code | 0 | github-code | 36 |
71654885545 | import sys
import re
import ranges
def read_cleanup_file(filename, full_overlaps_only):
sum = 0
with open(filename, 'r') as fp:
for line in fp:
toks = re.split(',|-', line.strip())
if len(toks) != 4:
raise Exception('wrong line format. tokens: %s' % toks)
r1 = ranges.Range(int(toks[0]), int(toks[1]), include_end=True)
r2 = ranges.Range(int(toks[2]), int(toks[3]), include_end=True)
r12 = r1.union(r2)
if (full_overlaps_only == True and (r12 == r1 or r12 == r2))\
or (full_overlaps_only == False and r12 != None):
sum += 1
return sum
if __name__=='__main__':
if len(sys.argv) != 2:
print('usage: python main.py INPUT')
full_overlaps = read_cleanup_file(sys.argv[1], True)
print('total full overlaps: %d' % full_overlaps)
partial_overlaps = read_cleanup_file(sys.argv[1], False)
print('total partial overlaps: %d' % partial_overlaps)
| dakopoulos/aoc22 | day4/main.py | main.py | py | 1,004 | python | en | code | 0 | github-code | 36 |
14258883967 | import string
priorities = dict(zip(string.ascii_lowercase + string.ascii_uppercase, range(1,53)))
def puzzle_one():
total = 0
for line in open("input.txt"):
rucksack = line.strip()
half_length: int = len(rucksack) // 2
# Get the unique values from each compartment to compare
compartment_1 = ''.join(set(rucksack[:half_length]))
compartment_2 = ''.join(set(rucksack[half_length:]))
for item in compartment_1:
index = compartment_2.find(item)
if index != -1:
total += priorities[item]
print(f"Part 1: {total}")
puzzle_one()
def puzzle_two():
total = 0
group = []
for line in open("input.txt"):
rucksack = line.strip()
# Add each rucksack to a group as a set
group.append(set(rucksack))
if len(group) == 3:
# Find the intersection of all three rucksacks
badge_item = list(group[0].intersection(group[1], group[2]))[0]
total += priorities[badge_item]
group = []
print(f"Part 2: {total}")
puzzle_two() | Villarrealized/advent-of-code | 2022/03/main.py | main.py | py | 1,005 | python | en | code | 0 | github-code | 36 |
72040624744 | import numpy as np
import pandas as pd
from scipy.linalg import svd
def centerData(X):
X_mean = np.mean(X)
print(X_mean)
X_c = X - X_mean
return X_c
def compute_F_Mat(X_c, fullMatrices=False):
'''
Compute the Singular Value Decomposition & F matrix
Return Values
- F matrix (Fv1)
'''
Rows, Cols = X_c.shape
print (Rows, Cols)
# Compute the SVD using the built-in Numpy function
P, Dvec, Q_t = np.linalg.svd(X_c, full_matrices=fullMatrices, compute_uv=True)
# P, Dvec, Q_t = svd(X_c) # TODO find another method to compute this matrices
Q = Q_t.T
# Build the D matrix (singular value matrix). For more information, visit:
# https://docs.scipy.org/doc/numpy-1.15.0/reference/generated/numpy.linalg.svd.html
if fullMatrices:
D = np.zeros((Rows, Cols))
D[:Cols,:Cols] = np.diag(Dvec)
else:
D = np.diag(Dvec)
print(Q.shape)
# Finally, compute the F matrix using both methods
Fv1 = P @ D
Fv2 = X_c @ Q
return Fv1
# Recives a column of F and the matrix Y, returns the prob matrix for that F value
def probability_Estimate(F,Y):
rows = F.shape
#print("Matrix F for probability : ",rows)
n = 11 # Set by the profesor
row_Prob_Matrix = rows[0] - int(n/2) - int(n/2)
col_Prob_Matrix = 8
# se crea la matriz de probabilidad
prob_Matrix = np.zeros((row_Prob_Matrix,col_Prob_Matrix+1))
base_Matrix = np.zeros((rows[0],col_Prob_Matrix+1))
base_Matrix[:,0] = F
base_Matrix[:,1:] = Y
#print("Base matrix",base_Matrix)
# Sort the matrix according to F values
base_Matrix_Sorted = base_Matrix[base_Matrix[:,0].argsort()]
#print("Base matrix ordenada",base_Matrix_Sorted)
for i in range (row_Prob_Matrix):
for j in range(8):
prob_Matrix[i,j+1] = np.average(base_Matrix_Sorted[i:i+n,j+1])
rowBase,colBase = base_Matrix_Sorted.shape
prob_Matrix[:,0] = base_Matrix_Sorted[int(n/2):rowBase - int(n/2),0]
return prob_Matrix
def main():
# ======= Data set 1 ============
X_input = pd.read_excel("Clean Data.xlsx")
X = X_input.values
X = X[:,2:22]
Rows, Cols = X.shape
print("Size of X",X.shape)
Y_input = pd.read_excel("classes_example.xlsx")
# Amounts of F's that are desired
K = 5
############################## Erase this for real test#######################3333
Y = np.zeros((87,8))
Y[:,0] = Y_input.values.T
Y[:,1] = Y_input.values.T
Y[:,2] = Y_input.values.T
Y[:,3] = Y_input.values.T
Y[:,4] = Y_input.values.T
Y[:,5] = Y_input.values.T
Y[:,6] = Y_input.values.T
Y[:,7] = Y_input.values.T
print("Shape of Y",Y.shape)
##########################################################
####################### Uncomment this for real test####################
#Y = Y_input
######################################################################
X_c = centerData(X)
F = compute_F_Mat(X_c)
P_model = []
for i in range(K):
probability_Matrix = probability_Estimate(F[:,i],Y)
P_model.append(probability_Matrix)
print("==============F===========")
print(F)
print("==============Probability===========")
print(P_model)
if __name__ == "__main__":
main() | ChavezE/Parallel_PFVA | SVD_Probaility_Tables.py | SVD_Probaility_Tables.py | py | 3,354 | python | en | code | 0 | github-code | 36 |
23315150792 | import math
field = [line.strip() for line in open("input.txt").readlines()]
def traverse(field, side_step, height_step, pos=0, hit_stuff=""):
for counter in range(0, len(field), height_step):
hit_stuff += field[counter][pos]
pos = (pos + side_step) % len(field[0])
return hit_stuff.count("#")
print(traverse(field, 3, 1))
steps = ((1, 1), (3, 1), (5, 1), (7, 1), (1, 2))
print(math.prod(traverse(field, right, height) for right, height in steps))
| g-clef/advent_of_code_2020 | day 3/day3.py | day3.py | py | 478 | python | en | code | 0 | github-code | 36 |
35456769757 | from generator import Generator
from enviroment import Enviroment
import sys
utilization = round(float(Enviroment.INITAL_UTILIZATION), 2)
finalUtilization = round(float(Enviroment.FINAL_UTILIZATION), 2)
processorsNumber = int(Enviroment.PROCESSORS_NUMBER)
generationNumber = int(Enviroment.GENERATION_INITIAL_NUMBER)
decimalPlaces = int(Enviroment.DECIMAL_PLACES)
step = round(float(Enviroment.STEP), 2)
print("Quantidade de processadores: ", processorsNumber)
while utilization <= finalUtilization:
for i in range(Enviroment.ROUNDS):
Generator(utilization, processorsNumber, generationNumber, decimalPlaces).generate()
generationNumber = generationNumber + 1
utilization = round(utilization + step, 2)
print("Tarefas geradas com sucesso") | AlveZs/simulator-taskset-generator | main.py | main.py | py | 766 | python | en | code | 0 | github-code | 36 |
20288196012 | # cmd_xp.py
import discord
from user import User
from lang.lang import Lang
from cmds.cmd import ServerCmd
async def cmd_user_xp_get_self(server, userid, channel, message):
if userid not in server.members.keys():
raise Exception(f'Cannot self display user xp ({userid}) : user id not found in this guild')
await channel.send(Lang.get('CMD_XP_SELF', server.lang).format(User.get_at_mention(userid), server.members[userid].xp))
return True
async def cmd_user_xp_get_other(server, userid, channel, message):
if len(message.mentions) < 1:
await channel.send(f"{Lang.get('CMD_WRONG_SYNTAX', server.lang)}\r\n`{server.cmd_prefix}xp <users>`")
return False
display = ""
for mention in message.mentions:
if mention.id in server.members.keys():
display += f"{Lang.get('CMD_XP_OTHER', server.lang)}\r\n".format(User.get_at_mention(mention.id), server.members[mention.id].xp)
await channel.send(display)
return True
async def cmd_user_xp_give(server, userid, channel, message):
if len(message.mentions) < 1:
await channel.send(f"{Lang.get('CMD_WRONG_SYNTAX', server.lang)}\r\n`{server.cmd_prefix}xp <users> <value>`")
return False
amount_to_give = int(message.content.split()[-1])
display = ""
for mention in message.mentions:
if mention.id in server.members.keys():
server.members[mention.id].xp += amount_to_give
display += f"{Lang.get('CMD_XP_GIVE', server.lang)}\r\n".format(User.get_at_mention(mention.id), amount_to_give, User.get_at_mention(userid), server.members[mention.id].xp)
await channel.send(display)
return True
async def cmd_xp(server, userid, channel, message):
split_message = message.content.split()
if len(split_message) > 1 and len(split_message) - len(message.mentions) == 2:
return await XpGiveCmd.run_cmd(server, userid, channel, message)
elif len(split_message) > 1 and len(message.mentions) > 0:
return await XpDisplayOtherCmd.run_cmd(server, userid, channel, message)
else:
return await XpDisplaySelfCmd.run_cmd(server, userid, channel, message)
XpDisplaySelfCmd = ServerCmd('xpself', cmd_user_xp_get_self)
XpDisplaySelfCmd.required_perks = ['cmd.xp', 'cmd.xp.display', 'cmd.xp.self']
XpDisplayOtherCmd = ServerCmd('xpother', cmd_user_xp_get_other)
XpDisplayOtherCmd.required_perks = ['cmd.xp', 'cmd.xp.display', 'cmd.xp.display.other']
XpGiveCmd = ServerCmd('xpgive', cmd_user_xp_give)
XpGiveCmd.required_perks = ['cmd.xp', 'cmd.xp.give']
XpCmd = ServerCmd('xp', cmd_xp)
XpCmd.required_perks = ['cmd.xp', 'cmd.xp.give', 'cmd.xp.display', 'cmd.xp.display.other', 'cmd.xp.display.self']
| shoko31/InKeeperBot | cmds/cmd_xp.py | cmd_xp.py | py | 2,704 | python | en | code | 0 | github-code | 36 |
16310369333 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Now there are 8 patterns of packages of Oreo. How many packages should you buy so that you can get all 8 patterns? (calculate the expectation of the number.)
More generally, if there are N patterns, what is the result?
'''
import random
import sys
def findN(n=8):
pts = set()
ans=0
while len(pts)<n:
pts.add(random.randint(1,n))
ans+=1
return ans
def MC(n=8,rep=1e6):
n=int(n)
rep=int(rep)
ans=0
for i in range(int(rep)):
ans+=findN(n)
if i%1e4==0:
print(i)
return ans/rep
if __name__=='__main__':
print(MC(*sys.argv[1:]))
| yanglyuxun/my-python-codes | Oreo.py | Oreo.py | py | 662 | python | en | code | 0 | github-code | 36 |
26022845612 | """
Defina una función areaTriangulo, que
consuma un lado y la altura perpendicular a
este y entregue como salida el área del
triángulo. Busque la fórmula para calcularla.
"""
def areaTriangulo():
base = int(input('Ingrese la base del Triangulo: '))
altura = int(input('Ingrese la altura del Triangulo: '))
area = (base * altura) / 2
print('El area del triangulo es:',round(area))
areaTriangulo() | Boris1409/Python-semestre-1 | PYTHON SEMESTRE 2/Tareas/AreaTriangulo.py | AreaTriangulo.py | py | 427 | python | es | code | 0 | github-code | 36 |
73112544743 | import csv
import re
import numpy as np
class DataUtils(object):
"""
此类用于加载原始数据
"""
def __init__(
self,
data_source: str,
*,
alphabet: str = "abcdefghijklmnopqrstuvwxyz0123456789-,;.!?:'\"/\\|_@#$%^&*~`+-=<>()[]{}",
batch_size=128,
input_size: int = 1014,
num_of_classes: int = 4
):
"""
数据初始化
:param data_source: 原始数据路径
:param alphabet: 索引字母表
:param input_size: 输入特征 相当于论文中说的l0
:param num_of_classes: 据类别
"""
self.alphabet = alphabet
self.alphabet_size = len(self.alphabet)
self.batch_size = batch_size
self.data_source = data_source
self.length = input_size
self.num_of_classes = num_of_classes
# 将每个字符映射成int
self.char_dict = {}
for idx, char in enumerate(self.alphabet):
self.char_dict[char] = idx + 1
def get_batch_to_indices(self, batch_num=0):
"""
返回随机样本
:param batch_num: 每次随机分配的样本数目
:return: (data, classes)
"""
data_size = len(self.data)
start_index = batch_num * self.batch_size
# 最长不超过数据集本身大小
end_index = (
data_size
if self.batch_size == 0
else min((batch_num + 1) * self.batch_size, data_size)
)
batch_texts = self.shuffled_data[start_index:end_index]
# 类别 one hot 编码
one_hot = np.eye(self.num_of_classes, dtype="int64")
batch_indices, classes = [], []
for c, s in batch_texts:
batch_indices.append(self.str_to_indexes(s))
# 类别数字减一就是 one hot 编码的index
classes.append(one_hot[int(c) - 1])
return np.asarray(batch_indices, dtype="int64"), classes
def get_length(self):
"""
返回数据集长度
:return:
"""
return len(self.data)
def load_data(self):
"""
从文件加载原始数据
Returns: None
"""
data = []
with open(self.data_source, "r", encoding="utf-8") as f:
rdr = csv.reader(f, delimiter=",", quotechar='"')
for row in rdr:
txt = ""
for s in row[1:]:
txt = (
txt + " " + re.sub("^\s*(.-)\s*$", "%1", s).replace("\\n", "\n")
)
data.append((int(row[0]), txt))
self.data = np.array(data)
self.shuffled_data = self.data
print("Data loaded from " + self.data_source)
def shuffle_data(self):
"""
将数据集打乱
:return:
"""
data_size = len(self.data)
shuffle_indices = np.random.permutation(np.arange(data_size))
self.shuffled_data = self.data[shuffle_indices]
def str_to_indexes(self, s):
"""
根据字符字典对数据进行转化
:param s: 即将转化的字符
:return: numpy.ndarray 长度为:self.length
"""
# 论文中表明 对于比较大的数据可以考虑不用区分大小写
s = s.lower()
# 最大长度不超过 input_size 此处为 1014
max_length = min(len(s), self.length)
# 初始化数组
str2idx = np.zeros(self.length, dtype="int64")
for i in range(1, max_length + 1):
# 逆序映射
c = s[-i]
if c in self.char_dict:
str2idx[i - 1] = self.char_dict[c]
return str2idx
if __name__ == "__main__":
train_data_ins = DataUtils(data_source="./ag_news_csv/train.csv")
train_data_ins.load_data()
train_data_ins.shuffle_data()
batch_indices, classes = train_data_ins.get_batch_to_indices()
print(classes)
exit()
with open("test.vec", "w") as fo:
for i in range(len(train_data_ins.data)):
# 类别
c = train_data_ins.data[i][0]
# 文本
txt = train_data_ins.data[i][1]
# 生成向量
vec = ",".join(map(str, train_data_ins.str_to_indexes(txt)))
fo.write("{}\t{}\n".format(c, vec))
| howie6879/pylab | src/papers/character_level_convolutional_networks_for_text_classification/data_utils.py | data_utils.py | py | 4,334 | python | en | code | 49 | github-code | 36 |
43296825934 | """
Enums.
"""
from pypy.module._cffi_backend import misc
from pypy.module._cffi_backend.ctypeprim import (W_CTypePrimitiveSigned,
W_CTypePrimitiveUnsigned)
class _Mixin_Enum(object):
_mixin_ = True
def __init__(self, space, name, size, align, enumerators, enumvalues):
self._super.__init__(self, space, size, name, len(name), align)
self.enumerators2values = {} # str -> int
self.enumvalues2erators = {} # int -> str
for i in range(len(enumerators)-1, -1, -1):
self.enumerators2values[enumerators[i]] = enumvalues[i]
self.enumvalues2erators[enumvalues[i]] = enumerators[i]
def _fget(self, attrchar):
if attrchar == 'e': # elements
space = self.space
w_dct = space.newdict()
for enumvalue, enumerator in self.enumvalues2erators.iteritems():
space.setitem(w_dct, space.newint(enumvalue),
space.newtext(enumerator))
return w_dct
if attrchar == 'R': # relements
space = self.space
w_dct = space.newdict()
for enumerator, enumvalue in self.enumerators2values.iteritems():
space.setitem(w_dct, space.newtext(enumerator),
space.newint(enumvalue))
return w_dct
return self._super._fget(self, attrchar)
def extra_repr(self, cdata):
value = self._get_value(cdata)
try:
s = self.enumvalues2erators[value]
except KeyError:
return str(value)
else:
return '%s: %s' % (value, s)
def string(self, cdataobj, maxlen):
with cdataobj as ptr:
value = self._get_value(ptr)
try:
s = self.enumvalues2erators[value]
except KeyError:
s = str(value)
return self.space.newtext(s)
class W_CTypeEnumSigned(_Mixin_Enum, W_CTypePrimitiveSigned):
_attrs_ = ['enumerators2values', 'enumvalues2erators']
_immutable_fields_ = ['enumerators2values', 'enumvalues2erators']
kind = "enum"
_super = W_CTypePrimitiveSigned
def _get_value(self, cdata):
# returns a signed long
assert self.value_fits_long
return misc.read_raw_long_data(cdata, self.size)
class W_CTypeEnumUnsigned(_Mixin_Enum, W_CTypePrimitiveUnsigned):
_attrs_ = ['enumerators2values', 'enumvalues2erators']
_immutable_fields_ = ['enumerators2values', 'enumvalues2erators']
kind = "enum"
_super = W_CTypePrimitiveUnsigned
def _get_value(self, cdata):
# returns an unsigned long
assert self.value_fits_ulong
return misc.read_raw_ulong_data(cdata, self.size)
| mozillazg/pypy | pypy/module/_cffi_backend/ctypeenum.py | ctypeenum.py | py | 2,759 | python | en | code | 430 | github-code | 36 |
7052172062 |
from django.core.context_processors import csrf
from django.shortcuts import render_to_response
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import AuthenticationForm
from forms import RegistrationForm, VerificationForm
from .models import User
from twilio.rest import Client
# from django_otp.models import Device
# from django_otp.oath import TOTP
# Create your views here.
def otp_register(request):
if request.method == 'POST':
form = RegistrationForm(request.POST)
account_sid = 'ACb78b2fd3a07bfb51bc243bbc8b1a08f5' # Found on Twilio Console Dashboard
auth_token = 'd3892a335ca6e3a1a1d6dc80dddb81b8' # Found on Twilio Console Dashboard
# Phone number you used to verify your Twilio account
TwilioNumber = '+13182257674' # Phone number given to you by Twilio
client = Client(account_sid, auth_token)
if form.is_valid():
user = form.save()
phone_number = form.cleaned_data.get('phone_number')
token_number = user.token_number
if user.id:
client.api.account.messages.create(
to=phone_number,
from_=TwilioNumber,
body='I sent a text message from Python!'+str(token_number))
# user.twiliosmsdevice_set.create(name='SMS',key=token_number, number=phone_number)
# device = user.twiliosmsdevice_set.get()
# device.generate_challenge()
return HttpResponseRedirect('/otp/verify/'+str(user.id))
else:
form = RegistrationForm()
context = {}
context.update(csrf(request))
context['form'] = form
return render_to_response('register.html', context)
def otp_login(request):
if request.method == 'POST':
username = request.POST.get('username', '')
password = request.POST.get('password', '')
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
if request.POST.get('next') != 'None':
return HttpResponseRedirect(request.POST.get('next'))
return HttpResponse('User ' + user.username + ' is logged in.' +
'<p>Please <a href="/otp/status/">click here</a> to check verification status.</p>')
else:
return HttpResponse('User is invalid!' +
'<p>Please <a href="/otp/login/">click here</a> to login.</p>')
else:
form = AuthenticationForm()
context = {}
context['next'] = request.GET.get('next')
context.update(csrf(request))
context['form'] = form
return render_to_response('login.html', context)
@login_required(login_url='/otp/login')
def otp_verify(request,pk):
user_data = User.objects.filter(pk=pk)[0]
username = user_data.username
token_number = user_data.token_number
if request.method == 'POST':
form = VerificationForm(request.POST)
token = form.getToken()
if token:
user = User.objects.get_by_natural_key(request.user.username)
# token_number = form.cleaned_data.get('token_number')
# device = user.twiliosmsdevice_set.get()
# device = django_otp.devices_for_user(user)
if user:
# status = device.verify_token(token)
# if status:
if int(token_number) == int(token):
user.is_verified = True
user.save()
return HttpResponse('User: ' + username + '\n' + 'Verified.' +
'<p>Please <a href="/otp/logout/">click here</a> to logout.</p>')
else:
return HttpResponse('User: ' + username + '\n' + 'could not be verified.' +
'<p><a href="/otp/verify/'+str(pk)+'">Click here to generate new token</a></P>')
else:
return HttpResponse('User: ' + username + ' Worng token!' +
'<p><a href="/otp/verify/'+str(pk)+'">Click here to generate new token</a></P>')
else:
form = VerificationForm()
context = {}
context.update(csrf(request))
context['form'] = form
return render_to_response('verify.html', context)
@login_required(login_url='/otp/login')
def otp_token(request):
user = User.objects.get_by_natural_key(request.user.username)
# device = user.twiliosmsdevice_set.get()
# device.generate_challenge()
return HttpResponseRedirect('/otp/verify')
def otp_status(request):
if request.user.username:
user = User.objects.get_by_natural_key(request.user.username)
if user.is_verified:
return HttpResponse(user.username + ' is verified.' +
'<p>Please <a href="/otp/logout/">click here</a> to logout.</p>')
else:
return HttpResponse(user.username + ' is not verified.' +
'<p><a href="/otp/verify/'+str(user.id)+'">Click here to generate new token</a></P>')
return HttpResponse('<p>Please <a href="/otp/login/">login</a> to check verification status.</p>')
def otp_logout(request):
logout(request)
return HttpResponse('You are logged out.' +
'<p>Please <a href="/otp/login/">click here</a> to login.</p>')
| sawardekar/Django_OTP | otpapp/views.py | views.py | py | 5,575 | python | en | code | 1 | github-code | 36 |
5049967399 | import array
import struct
from contextlib import contextmanager
from typing import List, Tuple
from cuda import cudart
from cuda.cudart import cudaError_t
from .mapping import Mapping
def _raise_if_error(error: cudaError_t):
if error != cudaError_t.cudaSuccess:
raise RuntimeError(error)
@contextmanager
def peer_access(mapping: Mapping):
set_peer_access(mapping, True)
try:
yield
finally:
set_peer_access(mapping, False)
def set_peer_access(mapping: Mapping, enabled: bool = True):
src_node = mapping.rank
for dest_node in mapping.tp_group:
if dest_node == src_node:
continue
error, result = cudart.cudaDeviceCanAccessPeer(src_node, dest_node)
_raise_if_error(error)
if result == 0:
raise RuntimeError(
f"Can't enable access between nodes {src_node} and {dest_node}")
if enabled:
cudart.cudaDeviceEnablePeerAccess(dest_node, 0)
else:
cudart.cudaDeviceDisablePeerAccess(dest_node)
error = cudart.cudaGetLastError()[0]
if error not in [
cudaError_t.cudaSuccess,
cudaError_t.cudaErrorPeerAccessAlreadyEnabled,
cudaError_t.cudaErrorPeerAccessNotEnabled
]:
raise RuntimeError(error)
class IpcMemory():
IPC_BUFFERS_SIZE = 50331648
IPC_BARRIERS_SIZE_PER_GPU = 25 * 4 # Max all reduce blocks * sizeof(float)
def __init__(self, mapping, size):
self.mapping = mapping
self.peer_ptrs, self.local_ptr = IpcMemory.open_ipc_memory(
self.mapping, size, True)
def __del__(self):
IpcMemory.close_ipc_memory(self.mapping, self.peer_ptrs)
def serialize(self) -> List[int]:
buffer = bytes(0)
for ptr in self.peer_ptrs:
buffer += struct.pack("P", ptr)
return array.array("Q", buffer).tolist()
@staticmethod
def open_ipc_memory(mapping: Mapping,
size: int,
set_to_zero: bool = False) -> Tuple[List[int], int]:
""" Allocates a buffer with the given *size* on each GPU. Then, enables IPC communication between TP groups.
Returns a list of buffer pointers, buffers[i] is a handle to the corresponding buffer residing on GPU #i.
Call close_ipc_handle with the *buffer*.
"""
from mpi4py import MPI
comm = MPI.COMM_WORLD.Split(mapping.pp_rank, mapping.tp_rank)
error, local_ptr = cudart.cudaMalloc(size)
_raise_if_error(error)
if set_to_zero:
_raise_if_error(cudart.cudaMemset(local_ptr, 0, size)[0])
error, local_handle = cudart.cudaIpcGetMemHandle(local_ptr)
_raise_if_error(error)
handles_reserved = comm.allgather(local_handle.reserved)
handles = []
for reserved in handles_reserved:
handle = cudart.cudaIpcMemHandle_t()
handle.reserved = reserved
handles.append(handle)
peer_ptrs = []
for node, handle in enumerate(handles):
if node == mapping.tp_rank:
peer_ptrs.append(local_ptr)
else:
error, ptr = cudart.cudaIpcOpenMemHandle(
handle, cudart.cudaIpcMemLazyEnablePeerAccess)
_raise_if_error(error)
peer_ptrs.append(ptr)
return peer_ptrs, local_ptr
@staticmethod
def close_ipc_memory(mapping: Mapping, peer_ptrs: List[int]):
for node, ptr in enumerate(peer_ptrs):
if node == mapping.tp_rank:
_raise_if_error(cudart.cudaFree(ptr)[0])
else:
_raise_if_error(cudart.cudaIpcCloseMemHandle(ptr)[0])
| NVIDIA/TensorRT-LLM | tensorrt_llm/_ipc_utils.py | _ipc_utils.py | py | 3,753 | python | en | code | 3,328 | github-code | 36 |
20533018449 | import pandas as pd
import numpy as np
import warnings
def FeatureNormalization(X):
m = np.size(X, axis=0) # number of training examples
n = np.size(X, axis=1) # number of features
mu = np.mean(X, axis=0)
mu = np.reshape(mu, [1, n])
print("Size of mu:", np.size(mu))
sigma = np.std(X)
mu_matrix = mu * (np.ones(m, 1))
sigma_matrix = np.ones(m, 1) * mu_matrix
X_norm = (X - mu_matrix) * sigma_matrix
return mu, sigma, X_norm
### Import and clean train and test data
train = pd.read_csv('data/train.csv')
print('Shape of the train data with all features:', train.shape)
train = train.select_dtypes(exclude=['object']) # Excludes columns with strings
print("")
print('Shape of the train data with numerical features:', train.shape)
train.drop('Id',axis = 1, inplace = True) # Drop first column with name 'Id'
train.fillna(0,inplace=True) # Fill up NaN cells with (0)
print("")
print("List of features contained our dataset:",list(train.columns))
#warnings.filterwarnings('ignore') Dont know what this does
#This might be useful
#col_train = list(train.columns)
#col_train_bis = list(train.columns)
#col_train_bis.remove('SalePrice')
mat_train = np.matrix(train)
X = np.matrix(train.drop('SalePrice',axis = 1))
#mat_y = np.array(train.SalePrice).reshape((1314,1))
Y = np.matrix(train.SalePrice)
print("Shape of X: ", np.shape(X))
print("Shape of Y: ", np.shape(np.transpose(Y)))
#Feature Normatization
mu, sigma, X_norm = FeatureNormalization(X)
| Neomius/MachineLearning | LinearRegressionGradientDecent.py | LinearRegressionGradientDecent.py | py | 1,502 | python | en | code | 0 | github-code | 36 |
501279690 | import pandas as pd
import numpy as np
class GetAngles(object):
"""
Calculates angles for a image based on coordinates.
"""
def __init__(self):
self.angle = []
self.magnitude_list = []
self.col_names = []
self.origin = 8
self.relative_coor = []
self.x_coor = []
self.y_coor = []
self.x_relative_coor = []
self.y_relative_coor = []
self.coor = []
# def get_relative_coordinate(self):
# '''
# Sets the selected coordinate to be the origin and calculates the other coordinates wrt to the new origin.
# '''
# origin = self.coor[self.origin]
# dim = np.shape(self.coor)
# ones = np.ones([dim[0], 1])
# x_origin = origin[0]
# y_origin = origin[1]
# self.x_relative_coor = [x - x_origin for x in self.x_coor]
# self.y_relative_coor = [y_origin - y for y in self.y_coor]
# self.relative_coor = np.array([[x, y] for x, y in zip(self.x_relative_coor, self.y_relative_coor)])
# print("")
def get_relative_coordinate(self):
'''
Sets the selected coordinate to be the origin and calculates the other coordinates wrt to the new origin.
'''
origin = self.coor[self.origin]
dim = np.shape(self.coor)
ones = np.ones([dim[0], 1])
x_origin = origin[0]
y_origin = origin[1]
x_origin_list = ones*x_origin
y_origin_list = ones*y_origin
self.x_relative_coor = self.x_coor - x_origin_list
self.y_relative_coor = y_origin_list-self.y_coor
self.relative_coor = np.array([[x[0], y[0]] for x, y in zip(self.x_relative_coor, self.y_relative_coor)])
def calculate_angle(self):
"""
Calculates the magnitude and angle using cosine of each point.
:return:
"""
x = self.relative_coor[:, 0]
y = self.relative_coor[:, 1]
squared_x = np.square(x)
squared_y = np.square(y)
self.mag = np.sqrt(squared_x + squared_y)
inside = y / self.mag
self.angle = np.arccos(inside)
def run(self):
self.get_relative_coordinate()
self.calculate_angle()
return self.angle
class RunGetAngles(object):
def __init__(self):
self.getangles = GetAngles()
self.coor_csv_path = r"C:\Users\nguye\Documents\GitHub\asl_sentence_classification_project\data_csv\coor_orig_test_images_0.5.csv"
self.angles_save_path = r"C:\Users\nguye\Documents\GitHub\asl_sentence_classification_project\data_csv\test_angle_o9_0.5.csv"
self.classes_save_path = r"C:\Users\nguye\Documents\GitHub\asl_sentence_classification_project\data_csv\test_class_o9_0.5.csv"
self.getangles = GetAngles()
self.x_coor = []
self.y_coor = []
self.coor = []
self.col_names_angles = []
self.col_names_points = []
self.coor_df = pd.DataFrame()
self.angles = []
self.classes = []
self.angles_dict = {}
self.angle_df = []
self.n = 8
self.numbers = range(0,67)
def generate_col_names_angle(self):
"""
Creates column names for each angle calculated
:return:
"""
numbers = range(0,67)
self.col_names_angle = [f'angle{n}' for n in numbers]
def generate_col_names_point(self):
"""
Creates column names for each set of points.
:return:
"""
self.col_names_points = [f"point_{n}_{_}" for _ in ["x" , "y"] for n in self.numbers]
def read_csv(self):
"""
Read .csv file.
:return: None
"""
self.coor_df = pd.read_csv(self.coor_csv_path, sep="\t")
def get_coor(self, row):
"""
Reformats points data to be (x, y) for every file
:param row: Row in csv file that contains file and its corresponding coordinates.
:return: None
"""
self.generate_col_names_point()
self.x_coor = [[row[f"point_{j}_x"]] for j in range(67)]
self.y_coor = [[row[f"point_{j}_y"]] for j in range(67)]
self.coor = np.hstack((self.x_coor, self.y_coor))
return self.coor
def run_get_angles(self):
"""
Run get angles on all the row of .csv that holds each file's coordinates.
:return: None
"""
for i in range(len(self.coor_df.index)):
row = self.coor_df.iloc[i]
self.get_coor(row)
self.getangles.coor = self.coor
self.getangles.x_coor = self.x_coor
self.getangles.y_coor = self.y_coor
angles = self.getangles.run()
self.angles_dict[i] = np.array(angles)
self.classes = self.coor_df["CLASS"]
def save_to_csv(self):
"""
Save angle results and its corresponding class to .csv files
:return:
"""
self.generate_col_names_angle()
self.angle_df = pd.DataFrame.from_dict(self.angles_dict, orient="index", columns=self.col_names_angle)
self.angle_df = self.angle_df.drop(columns=["angle" + str(self.n)])
self.angle_df.to_csv(self.angles_save_path, index=False)
self.classes_df = pd.DataFrame(self.classes, columns=["CLASS"])
self.classes_df.to_csv(self.classes_save_path, index=False)
def run(self):
"""
Gets angles based on .csv file.
:return: None
"""
self.generate_col_names_point()
self.read_csv()
self.run_get_angles()
self.save_to_csv()
if __name__=="__main__":
rungetangles = RunGetAngles()
rungetangles.coor_csv_path = r"C:\Users\nguye\Documents\GitHub\asl_sentence_classification_project\data_csv\coor_train_all.csv"
rungetangles.angles_save_path = r"C:\Users\nguye\Documents\GitHub\asl_sentence_classification_project\data_csv\train_all_angle_o9_0.59.csv"
rungetangles.classes_save_path = r"C:\Users\nguye\Documents\GitHub\asl_sentence_classification_project\data_csv\train_all_class_o9_0.59.csv"
rungetangles.run() | janguyen86/asl_sentence_classification_project | code/angle_calculation.py | angle_calculation.py | py | 5,527 | python | en | code | 1 | github-code | 36 |
2847540723 | # QUs:https://leetcode.com/problems/cousins-in-binary-tree/
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution(object):
def isCousins(self, root, x, y):
"""
:type root: TreeNode
:type x: int
:type y: int
:rtype: bool
"""
if(x == y):
return False
d = {}
def dfs(root, par, level=0):
if(root == None):
return
# set the info of node parent and level of each node in a tree
d[root.val] = (level, par)
dfs(root.left, root, level+1)
dfs(root.right, root, level+1)
par = None
dfs(root, par)
# if par are same return fasle or level are not equal
if(d[x][1] == d[y][1] or d[x][0] != d[y][0]):
return False
return True
| mohitsinghnegi1/CodingQuestions | leetcoding qus/Cousins in Binary Tree.py | Cousins in Binary Tree.py | py | 997 | python | en | code | 2 | github-code | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.