seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
43252746752 | import pyautogui
import pygame
from pynput.mouse import Button
from protocols.my_protocol import send as my_send
def convert_button(button):
if button == Button.left:
return 'left'
elif button == Button.right:
return 'right'
else:
return 'middle'
class Mouse:
def __init__(self, client_socket):
self.client_socket = client_socket
self.display = pyautogui.size()
def on_move(self, x, y):
try:
x, y = pygame.mouse.get_pos()
except Exception:
pass
try:
my_send(self.client_socket, f"pos {x} {y},".encode())
except OSError:
return False
def on_click(self, x, y, button, pressed):
try:
x, y = pygame.mouse.get_pos()
except Exception:
pass
try:
if pressed:
my_send(self.client_socket,f"click {x} {y} {convert_button(button)},".encode())
else:
my_send(self.client_socket, f"release mouse {x} {y} {convert_button(button)},".encode())
except OSError:
return False
def on_scroll(self, x, y, dx, dy):
if pygame.display.get_active():
try:
my_send(self.client_socket, f"scroll {dx * 10} {dy * 10},".encode())
except OSError:
return False
| leosegol/TeamViewer | mouse_funcs/mouse.py | mouse.py | py | 1,421 | python | en | code | 2 | github-code | 13 |
31230015126 | import unittest
from unittest.mock import patch
import src.margin.margin_lookup
from src.margin.margin_processor import MarginProcessor
from src.tests.fixtures.margin.test_utils import TestUtils
class TestMarginProcess(unittest.TestCase):
def setUp(self) -> None:
processing_date = 20230429
self.obj = MarginProcessor(processing_date)
def test_process(self):
row = TestUtils.input_row()
input_df = TestUtils.input_df()
actual = self.obj.process(input_df)
row1 = actual.iloc[0]
self.assertEqual(0, row1['sequence_number']) # add as
self.assertEqual(1234, row1['clientNumber']) # a
@patch('src.margin.margin_lookup.MarginLookup.get_client')
def test_process_with_patch(self, mock_get_client):
mock_get_client.return_value = {'clientType': 'FX', 'clientNumber': 4567}
processing_date = 20230429
self.obj = MarginProcessor(processing_date)
row = TestUtils.input_row()
input_df = TestUtils.input_df()
actual = self.obj.process(input_df)
# self.assertEqual(1234, 1234) # add as
row1= actual.iloc[0]
self.assertEqual(0, row1['sequence_number']) # add as
self.assertEqual(4567, row1['clientNumber']) # a
if __name__ == '__main__':
unittest.main()
| mxcheung/margin-py | src/tests/margin/test_margin_processor.py | test_margin_processor.py | py | 1,315 | python | en | code | 0 | github-code | 13 |
14890228911 | import heapq
BUY = 0
SELL = 1
GOLD = 0
SILVER = 1
def solution(req_id, req_info):
reqs = [[], []]
diffs = {}
for id in req_id:
diffs[id] = [0, 0]
for i, req in enumerate(req_info):
# 판매자 등록
if req[0] == SELL:
type_, sell_amount, sell_price = req
while sell_amount > 0:
# 구매자 없음
if len(reqs[BUY]) == 0:
heapq.heappush(reqs[SELL], (sell_price, i, sell_amount))
break
buy_price, buyer_id, buy_amount = heapq.heappop(reqs[BUY])
buy_price *= -1
# 등록 종료
if buy_price < sell_price:
heapq.heappush(reqs[BUY], (-buy_price, buyer_id, buy_amount))
heapq.heappush(reqs[SELL], (sell_price, i, sell_amount))
break
amount = min(buy_amount, sell_amount)
silver_amount = amount * sell_price
diffs[req_id[i]][GOLD] -= amount
diffs[req_id[i]][SILVER] += silver_amount
diffs[req_id[buyer_id]][GOLD] += amount
diffs[req_id[buyer_id]][SILVER] -= silver_amount
sell_amount -= amount
buy_amount -= amount
if buy_amount > 0:
heapq.heappush(reqs[BUY], (-buy_price, buyer_id, buy_amount))
elif req[0] == BUY:
type_, buy_amount, buy_price = req
while buy_amount > 0:
# 판매자 없음
if len(reqs[SELL]) == 0:
heapq.heappush(reqs[BUY], (-buy_price, i, buy_amount))
break
sell_price, seller_id, sell_amount = heapq.heappop(reqs[SELL])
# 등록 종료
if buy_price < sell_price:
heapq.heappush(reqs[BUY], (-buy_price, i, buy_amount))
heapq.heappush(reqs[SELL], (sell_price, seller_id, sell_amount))
break
amount = min(buy_amount, sell_amount)
silver_amount = amount * sell_price
diffs[req_id[i]][GOLD] += amount
diffs[req_id[i]][SILVER] -= silver_amount
diffs[req_id[seller_id]][GOLD] -= amount
diffs[req_id[seller_id]][SILVER] += silver_amount
sell_amount -= amount
buy_amount -= amount
if sell_amount > 0:
heapq.heappush(reqs[SELL], (sell_price, seller_id, sell_amount))
answer = []
for k, [gold, silver] in diffs.items():
answer.append(" ".join([k, ('+' if gold > 0 else '') + str(gold), ('+' if silver > 0 else '') + str(silver)]))
answer.sort()
return answer
req_id = ["William", "Andy", "Rohan", "Rohan", "Louis", "Andy"]
req_info = [[1, 7, 20], [0, 10, 10], [1, 10, 40], [1, 4, 25], [0, 5, 11], [0, 20, 30]]
print(solution(req_id, req_info)) | jkjan/PS | Line/6.py | 6.py | py | 2,996 | python | en | code | 0 | github-code | 13 |
70502494738 | from django.test import TestCase
from src.shared.errors.AppError import AppError
from src.utils.error_messages import DEPARTMENT_NOT_FOUND ,DEPARTMENT_ALREADY_EXISTS
from ....repositories.departments_repository import DepartmentsRepository
from ...update_department.update_department_use_case import UpdateDepartmentUseCase
class UpdateDepartmentUseCaseTest(TestCase):
def setUp(self):
self.departments_repository = DepartmentsRepository()
self.use_case = UpdateDepartmentUseCase(self.departments_repository)
def test_update_department(self):
data = {
'name': 'Department Test 1',
'description': 'Department Test Description 1',
}
data_update = {
'name': 'Department Test 2',
'description': 'Department Test Description 2',
}
create_department = self.departments_repository.create(data).to_dict()
updated_department = self.use_case.execute(create_department['id'], data_update)
self.assertTrue(updated_department['updated_at'] is not None)
self.assertEqual(updated_department['name'], "Department Test 2")
def test_update_department_if_not_exists(self):
data = {
'name': 'Department Test 1',
'description': 'Department Test Description 1',
}
with self.assertRaises(Exception) as context:
self.use_case.execute('00000000-0000-0000-0000-000000000000', data)
self.assertIsInstance(context.exception, AppError)
self.assertEqual(context.exception.message, DEPARTMENT_NOT_FOUND)
def test_update_department_with_name_already_exists(self):
data_one = {
'name': 'Department Test 1',
'description': 'Department Test Description 1',
}
data_two = {
'name': 'Department Test 2',
'description': 'Department Test Description 2',
}
data_update = {
'name': 'Department Test 1',
'description': 'Department Test Description 1',
}
self.departments_repository.create(data_one)
create_department_two = self.departments_repository.create(data_two).to_dict()
with self.assertRaises(Exception) as context:
self.use_case.execute(create_department_two['id'], data_update)
self.assertIsInstance(context.exception, AppError)
self.assertEqual(context.exception.message, DEPARTMENT_ALREADY_EXISTS) | alyssonbarrera/enterprise-management-api | src/modules/departments/use_cases/update_department/tests/test_update_department_use_case.py | test_update_department_use_case.py | py | 2,480 | python | en | code | 0 | github-code | 13 |
14399435639 | from enum import Enum
class Cmp(Enum):
LESS = 1
EQUAL = 2
GREATER = 3
class Signal:
def __init__(self, signal):
self.signal = signal
def __lt__(self, other):
return less(self.signal, other.signal) == Cmp.LESS
def __eq__(self, other):
return self.signal == other.signal
def less(left, right):
if isinstance(left, list) and isinstance(right, list):
for (left_val, right_val) in zip(left, right):
cmp = less(left_val, right_val)
if cmp == Cmp.GREATER:
return cmp
elif cmp == Cmp.LESS:
return cmp
if len(left) == len(right):
return Cmp.EQUAL
elif len(left) < len(right):
return Cmp.LESS
else:
return Cmp.GREATER
elif isinstance(left, list) and isinstance(right, int):
return less(left, [right])
elif isinstance(left, int) and isinstance(right, list):
return less([left], right)
else:
if left < right:
return Cmp.LESS
elif left == right:
return Cmp.EQUAL
else:
return Cmp.GREATER
def parse_part_one(filename):
pairs = []
with open(filename) as file:
lines = [line for line in file.readlines() if line != "\n"]
for (left, right) in zip(lines[:-1:2], lines[1::2]):
pairs.append((eval(left), eval(right)))
return pairs
def part_one():
pairs = parse_part_one("input.txt")
counter = 0
for (index, pair) in enumerate(pairs):
if less(pair[0], pair[1]) == Cmp.LESS:
counter += index + 1
print("Part one: ", counter)
def part_two():
with open("input.txt") as file:
signals = [Signal(eval(line)) for line in file.readlines() if line != "\n"]
signals += [Signal([[2]]), Signal([[6]])]
signals.sort()
ixs = [ix + 1 for (ix, sig) in enumerate(signals) if sig.signal == [[2]] or sig.signal == [[6]]]
score = 1
for ix in ixs:
score *= ix
print("Part two: ", score)
if __name__ == "__main__":
part_one()
part_two()
| batconjurer/adventofcode | aoc2022/day13/main.py | main.py | py | 2,115 | python | en | code | 0 | github-code | 13 |
37846586695 | import math
import sys
import pandas as pd
import yfinance as yf
''' The original equation for finding the intrinsic value of a stock (Benjamin Graham) '''
def intrinsic_value_equation_original(eps, growth_rate, current_corporate_bonds_yield):
base_no_growth = 8.5
average_corporate_bonds_yield = 4.4
intrinsic_value = (eps*(base_no_growth + 2*growth_rate)*average_corporate_bonds_yield)/current_corporate_bonds_yield
return intrinsic_value
''' The updated equation for finding the intrinsic value of a stock '''
def intrinsic_value_equation_updated(eps, growth_rate, current_corporate_bonds_yield):
base_no_growth = 7
average_corporate_bonds_yield = 4.4
intrinsic_value = (eps*(base_no_growth + 1*growth_rate)*average_corporate_bonds_yield)/current_corporate_bonds_yield
return intrinsic_value
def buy_or_sell_position(stock_current_price, stock_intrinsic_value):
margin_of_safety = 35
difference = (stock_current_price/stock_intrinsic_value) * 100
acceptable_buy_price = (100 - margin_of_safety) / 100 * stock_intrinsic_value
buy_or_sell = ""
if stock_current_price < acceptable_buy_price:
buy_or_sell ="BUY"
else:
buy_or_sell ="SELL"
print("----------------------------------------------------------------")
print("Intrinsic Value of Stock:\t{0:.2f}".format(stock_intrinsic_value))
print("Current Stock Price:\t\t{0:.2f}".format(stock_current_price))
print("Difference:\t\t\t{0:.2f}".format(difference))
print("Margin of Safety:\t\t{0:.2f}".format(margin_of_safety))
print("Acceptable Buy Price:\t\t{0:.2f}".format(acceptable_buy_price))
print("Buy or Sell?:\t\t\t{0}".format(buy_or_sell))
return 0
def single_stock():
stock_name = input("Please enter a stock ID: ").upper()
stock = yf.Ticker(stock_name).info
eps = stock['trailingEps']
growth_rate = stock['revenueGrowth'] * 100
corporate_bond = yf.Ticker("VCIT").info
current_corporate_bonds_yield = corporate_bond['bondRatings'][2]['aaa'] * 100
original_intrinsic_value = intrinsic_value_equation_original(eps=eps, growth_rate=growth_rate, current_corporate_bonds_yield=current_corporate_bonds_yield)
new_intrinsic_value = intrinsic_value_equation_updated(eps=eps, growth_rate=growth_rate, current_corporate_bonds_yield=current_corporate_bonds_yield)
print("----------------------------------------------------------------")
print("Stock:\t\t\t{0}".format(stock_name))
print("Original equation:\t{0:.2f}".format(original_intrinsic_value))
print("Updated equation:\t{0:.2f}".format(new_intrinsic_value))
print("----------------------------------------------------------------")
stock_current_price = stock['currentPrice']
print("\n----------------------------------------------------------------")
print("\t\tOriginal Equation")
buy_or_sell_position(stock_current_price=stock_current_price, stock_intrinsic_value=original_intrinsic_value)
print("\n----------------------------------------------------------------")
print("\t\tNew Equation")
buy_or_sell_position(stock_current_price=stock_current_price, stock_intrinsic_value=new_intrinsic_value)
return 0
def multiple_stocks():
stocks = []
print("Please enter up to 10 stocks (if you want to stop type 'stop', 'STOP', or '')")
print("----------------------------------------------------------------")
for i in range(10):
stock = input("Please enter a stock ID: ").upper()
if stock == 'STOP' or stock == '':
break
else:
stocks.append(stock)
print("----------------------------------------------------------------")
print(stocks)
return 0
if __name__ == "__main__":
single_stock()
#multiple_stocks() | damonsward/intrinsic_value_calculator | calculator.py | calculator.py | py | 3,597 | python | en | code | 0 | github-code | 13 |
18541104797 | import scrapy
from scrapy.http import FormRequest
from ..items import FooddataspiderItem
class QuoteSpider(scrapy.Spider):
name = 'quotes'
allowed_domains = ['quotes.toscrape.com']
start_urls = [
'http://quotes.toscrape.com'
]
def parse(self, response, **kwargs):
quotes = response.xpath("//div[@class='quote']")
for quote in quotes:
text = quote.xpath(
".//span[@class='text']/text()").extract_first()
author = quote.xpath(
".//small//text()"
).extract_first()
token = response.css('form input::attr(value)').extract_first()
return FormRequest.from_response(response, formdata={
'csrf_token':token,
'username':'someone',
'password':'something'
}, callback = self.start_scraping)
def start_scraping(self, response):
items = FooddataspiderItem()
all_div_quotes = response.css("div.quote")
for quotes in all_div_quotes:
title = quotes.css("span.text::text").extract()
author = quotes.css('.author::text').extract()
tag = quotes.css('.tag::text').extract()
items['title'] = title
items['author'] = author
items['tag'] = tag
yield items | naruavi/zomatoSpider | foodDataSpider/foodDataSpider/spiders/quote_spider.py | quote_spider.py | py | 1,323 | python | en | code | 0 | github-code | 13 |
40690809582 | #!/usr/bin/python
"""
Add docstring here
"""
from flask import request
from flask_restful_swagger_2 import Resource, swagger
from mongoalchemy.exceptions import ExtraValueException
from qube.src.api.decorators import login_required
from qube.src.api.swagger_models.omartestpy import omartestpyModel # noqa: ignore=I100
from qube.src.api.swagger_models.omartestpy import omartestpyModelPost # noqa: ignore=I100
from qube.src.api.swagger_models.omartestpy import omartestpyModelPostResponse # noqa: ignore=I100
from qube.src.api.swagger_models.omartestpy import omartestpyModelPut # noqa: ignore=I100
from qube.src.api.swagger_models.parameters import (
body_post_ex, body_put_ex, header_ex, path_ex, query_ex)
from qube.src.api.swagger_models.response_messages import (
del_response_msgs, ErrorModel, get_response_msgs, post_response_msgs,
put_response_msgs)
from qube.src.commons.error import omartestpyServiceError
from qube.src.commons.log import Log as LOG
from qube.src.commons.utils import clean_nonserializable_attributes
from qube.src.services.omartestpyservice import omartestpyService
EMPTY = ''
get_details_params = [header_ex, path_ex, query_ex]
put_params = [header_ex, path_ex, body_put_ex]
delete_params = [header_ex, path_ex]
get_params = [header_ex]
post_params = [header_ex, body_post_ex]
class omartestpyItemController(Resource):
@swagger.doc(
{
'tags': ['omartestpy'],
'description': 'omartestpy get operation',
'parameters': get_details_params,
'responses': get_response_msgs
}
)
@login_required
def get(self, authcontext, entity_id):
"""gets an omartestpy item that omar has changed
"""
try:
LOG.debug("Get details by id %s ", entity_id)
data = omartestpyService(authcontext['context'])\
.find_by_id(entity_id)
clean_nonserializable_attributes(data)
except omartestpyServiceError as e:
LOG.error(e)
return ErrorModel(**{'error_code': str(e.errors.value),
'error_message': e.args[0]}), e.errors
except ValueError as e:
LOG.error(e)
return ErrorModel(**{'error_code': '400',
'error_message': e.args[0]}), 400
return omartestpyModel(**data), 200
@swagger.doc(
{
'tags': ['omartestpy'],
'description': 'omartestpy put operation',
'parameters': put_params,
'responses': put_response_msgs
}
)
@login_required
def put(self, authcontext, entity_id):
"""
updates an omartestpy item
"""
try:
model = omartestpyModelPut(**request.get_json())
context = authcontext['context']
omartestpyService(context).update(model, entity_id)
return EMPTY, 204
except omartestpyServiceError as e:
LOG.error(e)
return ErrorModel(**{'error_code': str(e.errors.value),
'error_message': e.args[0]}), e.errors
except ValueError as e:
LOG.error(e)
return ErrorModel(**{'error_code': '400',
'error_message': e.args[0]}), 400
except Exception as ex:
LOG.error(ex)
return ErrorModel(**{'error_code': '500',
'error_message': ex.args[0]}), 500
@swagger.doc(
{
'tags': ['omartestpy'],
'description': 'omartestpy delete operation',
'parameters': delete_params,
'responses': del_response_msgs
}
)
@login_required
def delete(self, authcontext, entity_id):
"""
Delete omartestpy item
"""
try:
omartestpyService(authcontext['context']).delete(entity_id)
return EMPTY, 204
except omartestpyServiceError as e:
LOG.error(e)
return ErrorModel(**{'error_code': str(e.errors.value),
'error_message': e.args[0]}), e.errors
except ValueError as e:
LOG.error(e)
return ErrorModel(**{'error_code': '400',
'error_message': e.args[0]}), 400
except Exception as ex:
LOG.error(ex)
return ErrorModel(**{'error_code': '500',
'error_message': ex.args[0]}), 500
class omartestpyController(Resource):
@swagger.doc(
{
'tags': ['omartestpy'],
'description': 'omartestpy get operation',
'parameters': get_params,
'responses': get_response_msgs
}
)
@login_required
def get(self, authcontext):
"""
gets all omartestpy items
"""
LOG.debug("Serving Get all request")
list = omartestpyService(authcontext['context']).get_all()
# normalize the name for 'id'
return list, 200
@swagger.doc(
{
'tags': ['omartestpy'],
'description': 'omartestpy create operation',
'parameters': post_params,
'responses': post_response_msgs
}
)
@login_required
def post(self, authcontext):
"""
Adds a omartestpy item.
"""
try:
model = omartestpyModelPost(**request.get_json())
result = omartestpyService(authcontext['context'])\
.save(model)
response = omartestpyModelPostResponse()
for key in response.properties:
response[key] = result[key]
return (response, 201,
{'Location': request.path + '/' + str(response['id'])})
except ValueError as e:
LOG.error(e)
return ErrorModel(**{'error_code': str(e.errors.value),
'error_message': e.args[0]}), 400
except ExtraValueException as e:
LOG.error(e)
return ErrorModel(**{'error_code': '400',
'error_message': "{} is not valid input".
format(e.args[0])}), 400
except Exception as ex:
LOG.error(ex)
return ErrorModel(**{'error_code': '500',
'error_message': ex.args[0]}), 500
| qubeomar/omartestpy | qube/src/api/omartestpycontroller.py | omartestpycontroller.py | py | 6,437 | python | en | code | 0 | github-code | 13 |
42091982349 | import matplotlib.pyplot as plt
import numpy as np
import networkx as nx
numNeurons = 50
steps = 1
dtMat = .025 *np.ones((numNeurons, 1), dtype=np.float32)
aMat = .02*np.ones((numNeurons, 1), dtype=np.float32)
bMat = .2*np.ones((numNeurons, 1), dtype=np.float32)
cMat = -65. * np.ones((numNeurons, 1), dtype=np.float32)
dMat = 2. * np.ones((numNeurons, 1), dtype=np.float32)
vMat = -65. * np.ones((numNeurons, 1), dtype=np.float32)
#vMat = -65 *np.random.randint(2, size=(numNeurons, 1))
print('vMat:\n', vMat)
uMat = 0. * np.random.randint(2, size=(numNeurons, 1))
dtN = np.ones((numNeurons, 1), dtype=np.float32)
vN = np.ones((numNeurons, 1), dtype=np.float32)
uN = np.ones((numNeurons, 1), dtype=np.float32)
connect = np.random.randint(2, size=(numNeurons, numNeurons))
#connect = np.random.random((numNeurons, numNeurons))
print('con:\n', connect)
def getI(vMat, connect, i):
I = 0
for j in range(0, numNeurons):
if vMat[i] > 25 and connect[i][j] == 1:
I += 1
return I + 2
spikes = np.zeros((numNeurons, int(np.ceil(steps/.025))))
t = 0
while t < steps:
if isinstance(t, int):
print()
print(t,':', end=" ")
for i in range(0, numNeurons):
I = getI(vMat, connect, i)
if isinstance(t, int):
print(I, end=" ")
u = uMat[i]
v = vMat[i]
a = aMat[i]
b = bMat[i]
c = cMat[i]
d = dMat[i]
dt = dtMat[i]
if v > 30:
spikes[i][t] = 1
v = c
u = u + d
dv = (.04 *( v ** 2)) + (5 * v) + 140 - u + 5
du = a * ((b*v) - u)
v = v + dt*dv
u = u + dt*du
uN[i] = u
vN[i] = v
#print('new vMat:\n', vMat)
uMat = uN
vMat = vN
#print(vMat)
t += dt
G=nx.from_numpy_matrix(connect)
print('graph')
nx.draw(G)
plt.show()
print()
print(vMat)
| kwcooper/CogSimulations | neuralNets/izhikevichModel/network/ichNetKV2.py | ichNetKV2.py | py | 1,946 | python | en | code | 3 | github-code | 13 |
11586324994 | #!/bin/env python3
# Evan Widloski - 2020-03-28
# test registration on upscaled AIA data
from skimage.transform import resize
from mas.strand_generator import StrandVideo, get_visors_noise
from mas.tracking import guizar_multiframe, correlate_and_sum, shift_and_sum, guizar_upsample
from mas.misc import combination_experiment
from html_slider.html_slider import render_pandas
import numpy as np
from imageio import imread
resolution_ratio = 2
fov_ratio = 2
scene = imread('scene.bmp')
size = np.array((750, 750))
scene = resize(scene, size * resolution_ratio * fov_ratio)
def experiment(*, max_count, background_noise, drift_velocity, frame_rate):
# noise
noise_model = get_visors_noise(background=background_noise)
sv = StrandVideo(
ccd_size=size,
start=((1400, 1300)),
scene=scene,
max_count=max_count,
noise_model=noise_model,
drift_velocity=drift_velocity * 1e-3,
resolution_ratio=resolution_ratio,
fov_ratio=fov_ratio,
frame_rate=frame_rate
)
# register
corr_sum = correlate_and_sum(sv.frames)
drift, _ = guizar_multiframe(corr_sum)
# drift_u, _ = guizar_upsample(corr_sum)
# reconstruct
coadded = shift_and_sum(sv.frames, drift, mode='crop')
coadded = resize(coadded, (300, 300))
frame = resize(sv.frames[0], (300, 300))
return {
'coadded': coadded,
'frame': frame,
'est_drift': drift,
# 'est_drift_u': drift_u,
'true_drift': sv.true_drift,
# 'd': d
}
# %% experiment
results = combination_experiment(
experiment,
# max_count=[20, 40, 80],
# background_noise=[25, 50, 100, 200],
# drift_velocity=[0.05, 0.1, 0.2, 0.4],
# frame_rate=[4, 6, 8]
max_count=[80],
background_noise=[100],
drift_velocity=[0.2],
frame_rate=[4]
)
# results.to_pickle('results.pkl')
# %% slider
render_pandas(
results,
output='noisy_frame',
slider_cols=['max_count', 'background_noise', 'drift_velocity', 'frame_rate'],
slider_defaults=[0, 2, 2, 0],
im_col='frame'
)
render_pandas(
results,
output='coadded',
slider_cols=['max_count', 'background_noise', 'drift_velocity', 'frame_rate'],
slider_defaults=[0, 2, 2, 0],
indicator_cols=['true_drift', 'est_drift'],
im_col='coadded'
)
| UIUC-SINE/old_website | content/reports/2020-03-28_aia/main.py | main.py | py | 2,332 | python | en | code | 1 | github-code | 13 |
43912200255 | def DataBundlePurchase(true_pin, balance):
print('Welcome to The Phone People!')
try:
if pin_attempts(true_pin):
if menu(balance) == 1:
return True, balance
elif pin_attempts(true_pin) == False:
return 'Your account has been locked, please contact us at 020 123 456.','account_locked'
else:
print('not sure what happened there')
return False, None
except SyntaxError:
('An error occurred with the syntax')
except TypeError:
('A type error occurred')
except ValueError:
('A value error occurred')
except Exception as e:
print(e)
return False
# Main pin checking function, calling 'check_pin which asks for pin
def pin_attempts(true_pin):
try:
if check_pin(true_pin):
return True
print('\nPlease try again (second attempt)')
if check_pin(true_pin):
return True
print('\nPlease try again (final attempt)')
if check_pin(true_pin):
return True
else:
return False
except Exception as e:
print(e)
return False
# Pin checking
def check_pin(true_pin):
attempt = input('Enter your pin: ')
try:
if attempt == true_pin:
return True
elif attempt != true_pin:
if len(attempt) != 4:
raise ValueError('\n(Hint: the pin is 4 digits long)')
return False
else:
raise Exception('Oops something happened whilst checking your pin, please wait for assistance')
except Exception as e:
print(e)
return False
def menu(balance):
print('Would you like to:\n1 Check balance\n2 Purchase data bundle\n')
try:
selection = int(input('Enter 1/2: '))
# ADD THE ERROR WHILE LOOP!
# while (TypeError, ValueError):
# ('Something went wrong there, please type 1 or 2')
except Exception as e:
print(e)
else:
if selection == 1:
print('You have {} remaining in your account'.format(balance))
elif selection == 2:
if mob_num_check():
# FUNCTION FOR TOP UP
print('func for top up to come')
elif mob_num_check() == False:
print('Mobile number verification failed')
# RETURN TO MAIN MENU?
def mob_num_check():
try:
mob1 = 1111
mob2 = 2222
print('*****')
# CURRENTLY THE ERRORS DO NOT WORK RE LENGTH
while mob1 != mob2:
mob1 = int(input('Enter your mobile number: '))
if len(str(mob1)) == 4:
mob2 = int(input('Confirm your mobile number: '))
if len(str(mob1)) != 4:
# CHANGE ABOVE TO 11!
raise ValueError('\n(Hint: please enter 11 digits)')
if mob1 == mob2:
return True
except (TypeError, ValueError):
print('Oops something went wrong!')
except Exception as e:
print(e)
| mabely/module3 | ch02_validation/data_bundle_validation.py | data_bundle_validation.py | py | 3,076 | python | en | code | 0 | github-code | 13 |
32812281281 | import numpy as np
import time
import string
import torch
import os
from main.objects.Config import Config
from main.objects.Batcher import Batcher
from main.objects.Vocab import Vocab
from main.objects.Tokenizer import Char
from main.objects.Scorer import Scorer
from main.objects.Writer import Writer
'''
Evaluator object which scores the model and write the model predictions
'''
class Evaluator(object):
def __init__(self, config, vocab, tokenizer, input_type, exp_dir, list_k, labeled_file=None, output_file=None):
'''
param config: configuration to use for evaluation
param vocab: vocabulary to use
param tokenizer: tokenizer to use
param input_type: input type of either dev/test
param exp_dir: experiment directory to save output
param list_k: list of k to evaluate hits@k
param labeled_file: labeled file to use for labels (default is specified in config )
param output_file: output file to use for writing prediction
'''
self.batcher = Batcher(config, input_type, tokenizer, labeled_file)
self.input_type = input_type
self.list_k = list_k
if self.input_type == "dev":
self.best_dev_score = 0
self.score_filename = os.path.join(exp_dir, "dev_scores.json")
self.best_model_filename = os.path.join(exp_dir, "best_model")
elif self.input_type == "test":
if output_file is not None:
self.test_file = output_file
else:
self.test_file = os.path.join(exp_dir, "test.predictions")
self.score_filename = os.path.join(exp_dir, "test_scores.json")
self.writer = Writer(self.test_file)
self.output_file = None
if output_file:
self.output_file = output_file
self.score = True
if self.output_file is not None and "shard" in self.output_file:
self.score = False
def evaluate(self, model, train_num_batches):
'''
Evaluates the model by scoring it and writing its predictions
param train_num_batches: number of batches the model has trained on
'''
if self.score == True:
scorer = Scorer(self.list_k, self.score_filename, train_num_batches)
# Score the model batch by batch
for qry_tk, qry, cnd_tk, cnd, labels, end_block in self.batcher.get_dev_test_batches():
scores = model.score_dev_test_batch(qry_tk, cnd_tk)
scores = list(scores.cpu().data.numpy().squeeze(1))
# Adds the batch of scores to Scorer
if self.score == True:
scorer.add_batch_pred_scores(qry_tk, scores, labels, end_block)
# Adds the batch of predictions to Writer
if self.input_type == "test":
self.writer.add_batch_pred_lab(qry, cnd, labels, scores)
# Calculate the scores and save if best so far
if self.score == True:
map_score = scorer.calc_scores()
if self.input_type == "dev":
if map_score > self.best_dev_score:
torch.save(model, self.best_model_filename)
self.best_dev_score = map_score
| iesl/stance | src/main/objects/Evaluator.py | Evaluator.py | py | 3,250 | python | en | code | 32 | github-code | 13 |
40445188472 | def slices(series, length):
results = []
if length == 0:
return 1
elif len(series) == 0 or length < 0 or len(series) < length:
raise ValueError("The series needs to have more than one element, \
the size needs to be a positive number and shorter than \
the length of the series. Please try again.")
for i in range(0, len(series) - (length - 1)):
my_slice = series[i:i+length]
results.append(my_slice)
return results
def largest_product(series, length):
results = slices(series, length)
if results == 1:
return 1
prod_list = []
for my_slice in results:
int_my_slice = [int(j) for j in my_slice]
prod = 1
for i in int_my_slice:
prod = prod * i
prod_list.append(prod)
return max(prod_list)
| CatalinPetre/Exercism | python/largest-series-product/largest_series_product.py | largest_series_product.py | py | 829 | python | en | code | 0 | github-code | 13 |
74564217938 | #!/usr/bin/env python
"""
_SetPhEDExStatus_
MySQL implementation of DBSBufferFiles.SetPhEDExStatus
"""
from WMCore.Database.DBFormatter import DBFormatter
class SetPhEDExStatus(DBFormatter):
sql = "UPDATE dbsbuffer_file SET in_phedex = :status WHERE lfn = :lfn"
def execute(self, lfns, status, conn = None, transaction = False):
if not isinstance(lfns, list):
lfns = [lfns]
if len(lfns) < 1:
return
bindVars = []
for lfn in lfns:
bindVars.append({"lfn": lfn, "status": status})
self.dbi.processData(self.sql, bindVars, conn = conn,
transaction = transaction)
return
| dmwm/WMCore | src/python/WMComponent/DBS3Buffer/MySQL/DBSBufferFiles/SetPhEDExStatus.py | SetPhEDExStatus.py | py | 696 | python | en | code | 44 | github-code | 13 |
33178352537 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import dlite
thisdir = os.path.abspath(os.path.dirname(__file__))
class Person:
def __init__(self, name, age, skills):
self.name = name
self.age = age
self.skills = skills
def __repr__(self):
return 'Person(%r, %r, %r)' % (self.name, self.age, list(self.skills))
url = 'json://' + thisdir + '/Person.json'
print('-- create: ExPerson')
ExPerson = dlite.classfactory(Person, url=url)
print('-- create: person1')
person1 = Person('Jack Daniel', 42, ['distilling', 'tasting'])
print('-- create: person2')
person2 = ExPerson('Jack Daniel', 42, ['distilling', 'tasting'])
person2.dlite_inst.save('json', 'persons.json', 'mode=w')
# Print json-representation of person2 using dlite
print(person2.dlite_inst.asjson(indent=2))
person3 = dlite.loadfactory(Person, 'json://persons.json')
person4 = dlite.objectfactory(person1, meta=person2.dlite_meta)
| DanielHoeche/dlite-MOM | bindings/python/tests/test_factory.py | test_factory.py | py | 950 | python | en | code | 0 | github-code | 13 |
8591163514 | # %%
from sklearn.datasets import load_iris
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
import pandas as pd
import matplotlib.pyplot as plt
# %%
iris = load_iris()
y = pd.DataFrame(dict(label=iris.target))
X = pd.DataFrame(iris.data, columns=iris.feature_names)
y_train, y_test, X_train, X_test = train_test_split(y, X, test_size=0.8)
print(iris.DESCR)
plt.hist(y["label"])
X.describe().transpose()
# %%
rfc = RandomForestClassifier()
rfc.fit(X_train, y_train["label"])
y_train_hat = rfc.predict(X_train)
confusion_matrix(y_train_hat, y_train["label"])
y_test_hat = rfc.predict(X_test)
confusion_matrix(y_test_hat, y_test["label"])
# %%
import lime
import lime.lime_tabular
explainer = lime.lime_tabular.LimeTabularExplainer(
training_data=X_train.to_numpy(),
feature_names=X.columns,
verbose=True,
mode="classification",
# kernel_width=2,
)
# %%
x = X.sample(1, random_state=42).to_numpy()[0]
exp = explainer.explain_instance(data_row=x, predict_fn=rfc.predict_proba)
# %%
exp.show_in_notebook(show_table=True)
exp.as_list()
rfc.predict_proba(x.reshape(1, -1))
import numpy as np
names = X.columns.to_list() + ["intercept"]
values = np.concatenate((x, np.array([1]))) * np.concatenate((lr.coef_, np.array([lr.intercept_])))
pd.DataFrame(dict(coef_name=names, coef_value=values))
sum(x * lr.coef_) + lr.intercept_
| greysweater42/cookbook | scratchpad/lime/iris.py | iris.py | py | 1,449 | python | en | code | 0 | github-code | 13 |
16127841423 | #!/usr/bin/python3
"""
Purpose: Calculating the HCF/GCD between two numbers
HCF - Highest Common Factor
"""
first_number = int(input("Enter the first number: "))
second_number = int(input("Enter the second number: "))
hcf = min(first_number, second_number)
while first_number % hcf != 0 or second_number % hcf != 0:
hcf = hcf - 1
print(
f"""Greatest Common Divisor between \
{first_number} and {second_number} is {hcf}"""
)
| udhayprakash/PythonMaterial | python3/03_Language_Components/09_Loops/p_hcf.py | p_hcf.py | py | 440 | python | en | code | 7 | github-code | 13 |
40716787014 | #Faça um programa que leia um número inteiro e
# diga se ele é ou não um número primo.
n = int(input('Digite um número: '))
c = 0
if n == 1:
c += 1
while n < 0:
print('Digite um número válido: ')
n = int(input('Digite um número inteiro: '))
for x in range(1, n+1):
if n % x == 0:
print('\033[33m', end='')
c += 1
else:
print('\033[31m', end='')
print('{} '.format(x), end='')
print('\033[m')
if c == 2:
print('O número {} é primo!'.format(n))
else:
print('O número {} não é primo!'.format(n))
| felipesch92/PythonExercicios | ex052.py | ex052.py | py | 563 | python | pt | code | 0 | github-code | 13 |
42007443638 | import argparse
name = "anagram"
class AnagramTester:
def anagram_test(self, string1: str, string2: str) -> bool:
string1 = self._remove_non_alphanumerics(string1.lower())
string2 = self._remove_non_alphanumerics(string2.lower())
string1_dict = {}
string2_dict = {}
for char in string1:
if char not in string1_dict.keys():
string1_dict[char] = 1
else:
string1_dict[char] += 1
for char in string2:
if char not in string2_dict.keys():
string2_dict[char] = 1
else:
string2_dict[char] += 1
return string1_dict == string2_dict
def _remove_non_alphanumerics(self, input_string: str) -> str:
output_string = ""
for char in input_string:
if char.isalnum():
output_string += char
return output_string
def main():
parser = argparse.ArgumentParser()
parser.add_argument("string1", type=str, help="First string")
parser.add_argument("string2", type=str, help="Second string")
args = parser.parse_args()
anagram_tester = AnagramTester()
result = anagram_tester.anagram_test(args.string1, args.string2)
print(f"Inputs were: \n{args.string1}\n{args.string2}")
if result:
print("They are anagrams.")
else:
print("They are not anagrams.")
if __name__ == "__main__":
main()
| LaurenJWeber/problem-solving | anagram-tester/anagram.py | anagram.py | py | 1,452 | python | en | code | 0 | github-code | 13 |
10006321213 | #!/usr/bin/env python
'''
DAVID LETTIER
(C) 2015.
http://www.lettier.com/
Slackotron
'''
import sys
import os
import subprocess
import signal
import time
from lib.scribe import Scribe
class DashboardManager(Scribe, object):
def start(self):
cwd = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, cwd + '/')
env = os.environ.copy()
env['PYTHONPATH'] = ":".join(sys.path)
self.info('Starting dashboard.')
self.server_process = subprocess.Popen(
['python', cwd + '/server.py'],
shell=False,
env=env
)
def stop(self):
self.info('Stopping dashboard.')
if hasattr(self, 'server_process'):
try:
cpids = subprocess.check_output(
['pgrep', '-P', str(self.server_process.pid)]
).splitlines()
for cpid in cpids:
self.info('Killing child process PID %s.' % cpid)
os.kill(int(cpid), signal.SIGINT)
except Exception as e:
self.error(e)
self.server_process.send_signal(signal.SIGINT)
while self.server_process.poll() is None:
time.sleep(1)
self.info('Dashboard stopped.')
| lettier/slackotron | src/dashboard/dashboard_manager.py | dashboard_manager.py | py | 1,153 | python | en | code | 16 | github-code | 13 |
13092916170 | from __future__ import annotations
import string
import wx
class NumberValidator(wx.Validator):
def __init__(self) -> None:
super(NumberValidator, self).__init__()
self.Bind(wx.EVT_CHAR, self.OnChar_)
def Clone(self) -> NumberValidator:
return NumberValidator()
def Validate(self, ignored: wx.Window) -> bool:
value = self.GetWindow().GetValue()
try:
float(value)
except ValueError:
return False
return True
def OnChar_(self, wxEvent: wx.CommandEvent) -> None:
keyCode = wxEvent.GetKeyCode()
if keyCode < wx.WXK_SPACE or keyCode == wx.WXK_DELETE or keyCode > 255:
wxEvent.Skip()
return
if chr(keyCode) in string.digits or chr(keyCode) in ('.', '-'):
# Allow this character to propagate
wxEvent.Skip()
return
if not wx.Validator.IsSilent():
wx.Bell()
| JiveHelix/pex | python/pex/wx/utility/number_validator.py | number_validator.py | py | 961 | python | en | code | 0 | github-code | 13 |
71329041617 | from logging.config import valid_ident
from flask import request, render_template
import ttlsap.fab_proc as fab_proc
import ttlsap.edc_data as edc_data
import ttlsap.edc_dim as edc_dim
import ttlsap.spc_dim as spc_dim
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from tools.redis_db import RedisDb, CacheType
from tools.logger import Logger
import tools.reset_config as reset_config
from tools.cache_key import get_cache_key
from tools.response_handler import *
from grpc_cust.clientapival_client import get_clientapikey, get_verified_apikey
from tools.response_handler import InvalidUsage
def process_login(**kwargs):
if "clientId" in request.headers:
client_id = request.headers["clientId"]
else:
client_id = request.args.get('clientId')
if "password" in request.headers:
password = request.headers["password"]
else:
password = request.args.get('password')
apikey = get_clientapikey(client_id, password)
if apikey.expiry == "1900-01-01":
Logger.log(f"Client id {client_id} fail on process_login")
return JSNError(f"Client Id {client_id} or password {password} is not correct.",status_code=401)
Logger.log(f'Issue request: {client_id}, {request.method}, {request.url}')
return JSNResponse(apikey.apikey)
def dummy_check_and_log():
return {"status":True,"apikey":"given_token"}
def check_and_log(ignore_token=False):
if ignore_token:
Logger.log(f'Issue request: {request.method}, {request.url}')
return True
if "apikey" in request.headers:
given_token = request.headers["apikey"]
if request.args.get('token'):
given_token = request.args.get('token')
if given_token is not None:
given_token = given_token.strip('"')
token_info = get_verified_apikey(given_token)
if token_info.assertion == 'False':
return {"status":1,"error_msg":InvalidUsage('tokern incorrect',payload="Given token is not correct! (無法辨識是哪一個 client id)",status_code=401)}
client_type = token_info.assertion.split(":")[1]
if client_type == "BLOCK":
return {"status":9,"error_msg":InvalidUsage('blocked',payload="Given token has found the client is blocked from requesting data services",status_code=401)}
if token_info.apikey == given_token:
if token_info.assertion is not None:
registry = token_info.assertion.split(":")[2]
granted = validate_ds_permission(registry,request.url)
Logger.log(f'Issue request: @{given_token}, {request.method}, {request.url}, {token_info.assertion}')
if granted == "Permit":
client_id = token_info.assertion.split(":")[0]
client_prev = token_info.assertion.split(":")[2]
return {"status":True,"apikey":given_token, "client_id":client_id}
else:
return {"status":2,"error_msg":InvalidUsage('has not the data permission',payload="Given token has not the permission for requesting data services (該 client id 沒有此資料服務的使用權限)",status_code=403)}
Logger.log(f'Deny request: {request.method}, {request.url}')
def validate_ds_permission(registry, url):
nde =url.partition("/ds")[2]
permit = []
if len(registry.strip())==0: return "No Permit"
for reg in registry.split(","):
if reg.startswith("-") and nde.startswith(reg.partition("-")[2]):
# Negative element, match pattern is not allowed
permit.append(False)
if not reg.startswith("-") and nde.startswith(reg):
permit.append(True)
if permit.__contains__(False) or permit.__len__() == 0:
return "No Permit"
else:
return "Permit"
#先檢查cache,有則回傳,沒有則建立
def process_req_fab_proc():
if not check_and_log()["status"]:
return JSNError("Token is missing or token is not correct. Please call Login API to get a new token.")
cache_type = RedisDb.cache_type()
if cache_type == CacheType.AUTO:
list = find_cache()
if list is None:
list = get_fab_proc()
set_cache(list)
elif cache_type == CacheType.READONLY:
list = find_cache()
elif cache_type == CacheType.IGNORE:
list = get_fab_proc()
elif cache_type == CacheType.RENEW:
list = get_fab_proc()
set_cache(list)
elif cache_type == CacheType.BUILD:
list = get_fab_proc()
set_cache(list)
list = []
return JSNResponse(list)
def get_fab_proc():
if request.endpoint == 'get_fab_list':
list = fab_proc.get_fab_list()
else:
fab = request.args.get('fab')
list = fab_proc.get_proc_list(fab)
return list
#依據cache type存取資料
def process_req(fab=None, process=None):
if not check_and_log()["status"]:
return JSNError("Token is missing or token is not correct. Please call login API to get a new token.")
cache_type = get_cache_type()
if cache_type == CacheType.AUTO:
list = find_cache()
if list is None:
list = get_list_from_db(fab, process)
set_cache(list)
elif cache_type == CacheType.READONLY:
list = find_cache()
if list is None: list = []
elif cache_type == CacheType.IGNORE:
list = get_list_from_db(fab, process)
elif cache_type == CacheType.RENEW:
list = get_list_from_db(fab, process)
set_cache(list)
elif cache_type == CacheType.BUILD:
list = get_list_from_db(fab, process)
set_cache(list)
list = [0]*len(list)
return JSNResponse(list)
def get_list_from_db(fab, process):
func_name = request.endpoint
args = get_args()
if 'spc' in func_name:
func = getattr(spc_dim, func_name)
else:
func = getattr(edc_dim, func_name)
list = func(fab, process, **args)
return list
#不使用cache
def process_req_no_cache(fab=None, process=None, equipment=None):
if not check_and_log()["status"]:
return JSNError("Token is missing or token is not correct. Please call login API to get a new token.")
func_name = request.endpoint
args = get_args()
func = getattr(edc_data, func_name)
if equipment: rv = func(fab, equipment=equipment, **args) #沒有process
else: rv = func(fab, process=process, **args) #equipment在args
return JSNResponse(rv)
def process_req_ui():
check_and_log(ignore_token=True)
if request.endpoint == 'get_main_menu':
return render_template('default.html', description='Main Functions:', action_url='/reset-config', action_name='Reset Config')
if request.endpoint == 'reset_config':
reset_config.reset_config()
return render_template('default.html', message='Config refreshed!', action_url='/home', action_name='Main Menu')
#region 讀寫cache
def find_cache():
key = get_cache_key(request.full_path)
redis = RedisDb.default()
list = redis.get(key)
if list is None: return None
if list == '': return []
return list.split(',')
def set_cache(list):
try:
key = get_cache_key(request.full_path)
redis = RedisDb.default()
expiry_hours=get_cache_expiry_hours()
redis.set(key, ','.join(list), expiry_hours=expiry_hours)
except Exception as err:
logger = Logger.default()
logger.error(f'"{err.args[0]}" on set_cache() in request_handler.py', exc_info=True)
#endregion
#region 取得查詢參數裡的cache選項
#優先採用request.headers裡的cacheType,若沒有或無法解析再使用系統的cache_type
def get_cache_type():
if "cacheType" in request.headers:
try:
return CacheType[request.headers["cacheType"]]
except:
pass
#200514:查詢eqpt,只使用cache,不查詢資料庫,即使當月
#200515:只限CNVR
#elif request.endpoint == "get_eqpt_list" and request.view_args['process'] == 'CNVR':
#200520:不限CNVR
elif request.endpoint == "get_eqpt_list" and "month" in request.args:
return CacheType.READONLY
return RedisDb.cache_type()
#採用request.headers裡的expiryHours,若沒有或無法解析回傳None(會使用預設的 expiry_hours),若小於等於0則cache不會timeout
def get_cache_expiry_hours():
if "expiryHours" in request.headers:
try:
return float(request.headers["expiryHours"])
except:
pass
#ch200513:如果查詢的區間是by month且是當月,非eqpt清單,cache一天
elif 'month' in request.args and request.args['month'] == datetime.today().strftime('%Y-%m') and request.endpoint != "get_eqpt_list":
return 24.0
return None
#endregion
#將查詢參數轉為一般Dictionary,並做參數轉換,與日期預處理
def get_args():
dict = request.args.to_dict()
#name mapping
if "ownerCode" in dict: dict["owner_code"] = dict.pop("ownerCode")
if "chamberCode" in dict: dict["chamber_code"] = dict.pop("chamberCode")
if "withTypeCode" in dict: dict["with_type_code"] = dict.pop("withTypeCode")=="true"
if "buildSqlOnly" in dict: dict["build_sql_only"] = dict.pop("buildSqlOnly")=="true"
if "dataShapeWide" in dict: dict["data_shape_wide"] = dict.pop("dataShapeWide")=="true"
if "prfxOpraEqpt" in dict: dict["prfx_opra_eqpt"] = dict.pop("prfxOpraEqpt")=="true"
if "spcOperation" in dict: dict["spc_operation"] = dict.pop("spcOperation")
if "measureType" in dict: dict["measure_type"] = int(dict.pop("measureType"))
#posted data
if request.method == 'POST':
posted = request.get_json()
if posted:
if "EdcItems" in posted: dict["edc_items"] = posted["EdcItems"]
if "SpcItems" in posted: dict["spc_items"] = posted["SpcItems"]
#決定資料的日期區間start and end
if "month" in dict:
month = dict.pop("month") # eg. '2020-04'
start_date = datetime.strptime(month, "%Y-%m") # eg. datetime.datetime(2020, 4, 1, 0, 0)
elif "start" in dict:
start = dict.pop("start")
start_date = datetime.strptime(start, "%Y-%m-%d") if len(start) >= 8 else datetime.strptime(start, "%Y-%m")
else:
start_date = datetime.today()
if "end" in dict:
end = dict.pop("end")
end_date = datetime.strptime(end, "%Y-%m-%d") if len(end) >= 8 else datetime.strptime(end, "%Y-%m")
if end_date - start_date > timedelta(31):
end_date = start_date + relativedelta(months = 1) - relativedelta(days = 1)
else:
end_date = start_date + relativedelta(months = 1) - relativedelta(days = 1)
#ch200514:get_edc_spc_data要多給spc_end參數,spc資料可能落後edc二~三個月
if "spc_operation" in dict and "operation" in dict:
spc_end_date = start_date + relativedelta(months = 3)
dict["spc_end"] = spc_end_date.strftime("%Y-%m-%d")
dict["start"] = start_date.strftime("%Y-%m-%d")
dict["end"] = end_date.strftime("%Y-%m-%d")
return dict
| eslywadan/dataservice | tools/request_handler.py | request_handler.py | py | 11,164 | python | en | code | 0 | github-code | 13 |
27301434115 | import json
import math
import os
import torch
from argparse import ArgumentParser
from datasets import load_metric
from helpers.asr import (
configure_lm,
configure_w2v2_for_training,
DataCollatorCTCWithPadding,
dataset_from_dict,
get_metrics_computer,
preprocess_text,
process_data
)
from transformers import (
EarlyStoppingCallback,
logging,
Trainer,
TrainingArguments
)
parser = ArgumentParser(
prog='train_asr-by-w2v2-ft',
description='Train an ASR model by fine-tuning a pre-trained wav2vec 2.0 model',
)
parser.add_argument('repo_path_or_name', help = "Pre-trained wav2vec 2.0 model, local path or HuggingFace repo name")
parser.add_argument('output_dir', help = "The output directory where the model predictions and checkpoints will be written")
parser.add_argument('train_tsv', help = "Training data. Two-column tab-separated file with 'path' (path to wav file) and 'sentence' (transcription)")
parser.add_argument('eval_tsv', help = "Evaluation data. Two-column tab-separated file with 'path' (path to wav file) and 'sentence' (transcription)")
parser.add_argument('--use_target_vocab', default=True, help='Use a vocabulary created from target transcriptions (training and evaluation)')
parser.add_argument('--lm_arpa', default=None, help='Path to language model .arpa file (optional)')
parser.add_argument('--hft_logging', default=40, help='HuggingFace Transformers verbosity level (40 = errors, 30 = warnings, 20 = info, 10 = debug)')
args = parser.parse_args()
# Turns out bool('False') evaluates to True in Python (only bool('') is False)
args.use_target_vocab = False if args.use_target_vocab == 'False' else True
logging.set_verbosity(args.hft_logging)
# For debugging
# args.repo_path_or_name = "facebook/wav2vec2-large-robust-ft-swbd-300h"
# args.train_tsv = 'data/train-asr/train.tsv'
# args.eval_tsv = 'data/train-asr/test.tsv'
# args.output_dir = 'data/asr-temp'
# args.use_target_vocab = False
os.makedirs(args.output_dir, exist_ok=True)
dataset = dataset_from_dict({
'train': args.train_tsv,
'eval' : args.eval_tsv
})
w2v2_config = {
"feature_extractor" : {
"return_attention_mask" : True
},
"model_kwargs" : {
"mask_time_prob" : 0,
"gradient_checkpointing" : True,
"ctc_loss_reduction" : "mean"
}
}
dataset, vocab_dict = preprocess_text(dataset)
model, processor = configure_w2v2_for_training(dataset, args, vocab_dict, w2v2_config)
if args.lm_arpa is not None:
processor = configure_lm(processor, args.lm_arpa, args.output_dir)
dataset = process_data(dataset, processor)
# Set logging to 'INFO' or else progress bar gets hidden
logging.set_verbosity(20)
n_epochs = 50
batch_size = 32
# How many epochs between evals?
eps_b_eval = 5
# Save/Eval/Logging steps
sel_steps = int(math.ceil(len(dataset['train']) / batch_size) * eps_b_eval)
training_args = TrainingArguments(
output_dir=args.output_dir,
group_by_length=True,
per_device_train_batch_size=batch_size,
gradient_accumulation_steps=1,
evaluation_strategy="steps",
num_train_epochs=n_epochs,
fp16=True if torch.cuda.is_available() else False,
seed=7135,
save_steps=sel_steps,
eval_steps=sel_steps,
logging_steps=sel_steps,
learning_rate=1e-4,
# Warm up: 100 steps or 10% of total optimisation steps
warmup_steps=min(100, int(0.1 * sel_steps * n_epochs)),
report_to="none",
# 2022-03-09: manually set optmizier to PyTorch implementation torch.optim.AdamW
# 'adamw_torch' to get rid of deprecation warning for default optimizer 'adamw_hf'
optim="adamw_torch",
metric_for_best_model="wer",
save_total_limit=5,
load_best_model_at_end = True,
# Lower WER is better
greater_is_better=False
)
trainer = Trainer(
model=model,
data_collator=DataCollatorCTCWithPadding(processor=processor, padding=True),
args=training_args,
compute_metrics=get_metrics_computer(processor=processor),
train_dataset=dataset['train'],
eval_dataset=dataset['eval'],
tokenizer=processor.feature_extractor,
callbacks = [EarlyStoppingCallback(early_stopping_patience=3)]
)
print("Training model ...")
trainer.train()
| CoEDL/vad-sli-asr | scripts/train_asr-by-w2v2-ft.py | train_asr-by-w2v2-ft.py | py | 4,228 | python | en | code | 18 | github-code | 13 |
14463317752 | import time
from abc import ABC, abstractclassmethod, abstractmethod
from copy import copy
from typing import Iterable, Union
import numpy as np
from neuroaiengines.utils.angles import wrap_pi
from neuroaiengines.utils.transforms import *
from numpy import cos, pi, sin
from scipy.interpolate import interp1d
from scipy.ndimage.filters import gaussian_filter1d
from scipy.signal import lfilter
class Metric(ABC):
def __init__(self):
self._dts = []
@abstractmethod
def _record(self, action, state, observation, policy):
...
def record(self,dt, action, state, observation, policy):
if len(self._dts) == 0:
self._dts.append(0)
else:
self._dts.append(self._dts[-1]+dt)
self._record(action, state, observation, policy)
@property
@abstractmethod
def result(self):
raise NotImplementedError
class BasicSimulator(object):
def __init__(self, env, policy):
self.env = env
self.policy = policy
def run(self, t : float, metrics : Union[Metric,Iterable[Metric]] , dt=0.001, initial_action=None, initial_state=None) -> Iterable:
"""
Runs an experiment with metrics.
:param t:
The time in seconds to run the experiment
:param metrics:
A metric or an iterable of metric objects.
:param dt:
Timestep
:param initial_action:
The initial action. Defaults to vector zero in the action space
:param initial_state:
The initial state. Defaults to vector zero in the state space
:returns:
An iterable of metric objects with data recorded
"""
if isinstance(metrics, Metric):
metrics = [metrics]
T = np.linspace(0,t, int(t/dt)+1)
action = initial_action or np.zeros(self.env.nA)
state = initial_state or np.zeros(self.env.nS)
observation = self.env.observe(0, state)
env = self.env
policy = self.policy
for t in T:
action = policy.step(dt, observation)
state = env.dynamics(dt, action)
observation = env.observe(dt, state)
for metric in metrics:
metric.record(dt, action, state, observation, policy)
return [metric.result for metric in metrics]
def validate_action(func):
"""
Decorator to validate that the action is of the right size
"""
def validator(env, dt, action, *args, **kwargs):
assert len(action.shape) == 1
assert len(action) == env.nA
return func(env, *args, dt=dt, action=action, **kwargs)
return validator
def validate_state(func):
"""
Decorator to validate that the state is of the right size
"""
def validator(env, dt, state, *args, **kwargs):
assert len(state.shape) == 1
assert len(state) == env.nS
return func(env, *args, dt=dt, state=state, **kwargs)
return validator
DEFAULT_DT = 0.001
class Env(ABC):
"""
A basic environment, without any state or observation dynamics. Don't use directly
"""
def __init__(self, initial_state, action_size, renderer=None, **renderer_args):
# State
initial_state = np.array(initial_state, dtype=np.float32)
assert len(initial_state.shape) == 1, "Initial state must be a single vector"
self.state = initial_state
self.nS = len(initial_state)
self.nA = action_size
if renderer is not None:
self._renderer = renderer(self, **renderer_args)
self._render = True
self._t = None
def step(self, t, action):
"""
Steps the dynamics given an action, returns an observation of the state.
"""
# Calculate the timestep
if self._t is None:
dt = DEFAULT_DT
else:
dt = t - self._t
self._t = t
state = self.dynamics(dt, action)
observation = self.observe(dt, state)
if self._render:
self._renderer.render()
return observation
@abstractmethod
def dynamics(self, dt, action):
raise NotImplementedError
@abstractmethod
def observe(self, dt, state):
raise NotImplementedError
class KinematicPoint(Env):
"""
An environment that moves a point around an empty plane
state:
[x, y, theta, vx, vtheta]
x : the x position of the agent
y : the y position of the agent
theta: the orientation of the agent ([0, 2*pi], counterclockwise is positive)
vx: linear velocity along agent's forward axis
vtheta: angular velocity
observation:
returns the full state
action:
[vx, vtheta]
vx: the desired linear speed of the agent
vtheta: the desired angular velocity of the agent
"""
def __init__(self, x: float, y: float, theta: float, vx: float, vtheta: float, *args, **kwargs):
"""
Creates a kinematic point environment. This is comparable to a two wheel robot operating in a single z plane.
Controlled with [vx, vtheta].
:param x,y: initial position
:param theta: initial angle
:param vx: initial forward velocity
:param vtheta: initial angular velocity
:returns: KinematicPoint environment
"""
initial_state = [x, y, theta, vx, vtheta]
# noise initialization
self.process_noise = None
self.input_noise = None
self.observation_noise = None
if "process_noise" in kwargs.keys():
self.process_noise = kwargs["process_noise"]
if "input_noise" in kwargs.keys():
self.input_noise = kwargs["input_noise"]
if "observation_noise" in kwargs.keys():
self.observation_noise = kwargs["observation_noise"]
super().__init__(initial_state, action_size=2, *args, **kwargs)
@validate_action
def dynamics(self, dt: float, action: np.array):
"""
Computes the dynamics (kinematics in this case) of the point
:param dt: timestep
:param action: action in form [vx, vtheta]
:returns: The full state of the object
"""
vx, vtheta = action
if self.input_noise != None:
vx = vx + self.process_noise.sample()
vtheta = vtheta + self.process_noise.sample()
theta = self.theta
x = self.x
y = self.y
dx = vx * np.cos(self.theta)
dy = vx * np.sin(self.theta)
dtheta = vtheta
self.state[0] = x + dx * dt
self.state[1] = y + dy * dt
self.state[2] = theta + dtheta * dt
self.state[3] = vx
self.state[4] = vtheta
# add process noise
if self.process_noise != None:
self.state[0] = self.state[0] + self.process_noise.sample() * dt
self.state[1] = self.state[1] + self.process_noise.sample() * dt
self.state[2] = self.state[2] + self.process_noise.sample() * dt
self.state[2] = wrap_pi(self.state[2])
return self.state
@validate_state
def observe(self, dt, state):
"""
The observation function based on the state. Can be overloaded!
:param dt: timestep
:param state: full state
:returns observation: observation of the state
"""
observation = state
return observation
@property
def x(self):
return self.state[0]
@property
def y(self):
return self.state[1]
@property
def theta(self):
return self.state[2]
@property
def vx(self):
return self.state[3]
@property
def vtheta(self):
return self.state[4]
class KinematicPointWithLandmarks(KinematicPoint):
"""
An environment that moves a point around with some landmarks.
state:
[x, y, theta, vx, vtheta]
x : the x position of the agent
y : the y position of the agent
theta: the orientation of the agent ([0, 2*pi], counterclockwise is positive)
vx: linear velocity along agent's forward axis
vtheta: angular velocity
observation:
[a1,a2...]
the angles of the landmarks relative to the agent
action:
[vx, vtheta]
vx: the desired linear speed of the agent
vtheta: the desired angular velocity of the agent
"""
def __init__(self, landmark_pos: list, *args, **kwargs):
"""
:param landmark_pos: Nx2 array of landmark positions
"""
super().__init__(*args, **kwargs)
landmark_pos = np.array(landmark_pos)
assert len(landmark_pos.shape) == 2, "Incorrect shape of landmark_pos"
assert landmark_pos.shape[1] == 2, "Incorrect shape of landmark_pos"
self.nL = landmark_pos.shape[0]
self._landmark_pos = landmark_pos
def get_landmark_angles(self, state):
"""
:param dt: timestep
:param state: full state
"""
observation = np.zeros(self.nL + 1)
x = state[0]
y = state[1]
theta = state[2]
for i, (lpx, lpy) in enumerate(self._landmark_pos,1):
angpos_noise = [0, 0]
if (self.observation_noise != None):
angpos_noise = [self.observation_noise.sample(),
self.observation_noise.sample()]
ang = np.arctan2(lpy + angpos_noise[0] - y,
lpx + angpos_noise[1] - x) - theta
observation[i] = ang
# adding angular velocity aspect of observation
angvel_noise = 0
if (self.observation_noise != None):
vel_noise = self.observation_noise.sample()
observation[0] = state[4] + angvel_noise
return observation
def observe(self, dt, state):
obs = self.get_landmark_angles(state)
return obs
class RecorderMetric(Metric):
def __init__(self):
super().__init__()
self._data = []
@property
def result(self):
ns = len(self._dts)
nr = len(self._data[0])
res = np.zeros((ns, nr+1))
# append time sequence to data
res[:,0] = self._dts
res[:,1:] = self._data
return res
class StatMetric(RecorderMetric):
@abstractclassmethod
def _calculate(self):
" Used to calculate statistic based on accumulated data"
return
@property
def result(self):
res = self._calculate()
return res
class StateRecorderMetric(RecorderMetric):
def _record(self, action, state, observation, policy):
#print(state)
self._data.append(copy(state))
class ActionRecorderMetric(RecorderMetric):
def _record(self, action, state, observation, policy):
self._data.append(copy(action))
class ObservationRecorderMetric(RecorderMetric):
def _record(self, action, state, observation, policy):
self._data.append(copy(observation))
class PolicyStateRecorderMetric(RecorderMetric):
def _record(self, action, state, observation, policy):
self._data.append(copy(policy.get_state()))
# auxilliary metrics for debugging
class PolicyStateSquaredErrorRecorderMetric(RecorderMetric):
def _record(self, action, state, observation, policy):
#print(policy.get_state()[0]-copy(state[2]), policy.get_state()[0], state[2])
self._data.append(copy([(policy.get_state()[0] - state[2])**2]))
class ClockTimeRecorderMetric(RecorderMetric):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.cur_time = time.time()
def _record(self, action, state, observation, policy):
self._data.append(copy([time.time() - self.cur_time]))
self.cur_time = time.time()
class AngUpdateStatMetric(StatMetric):
def _record(self, action, state, observation, policy):
self._data.append(copy([state[2]]))
def _calculate(self):
sout = np.array(self._data)[:,0]
sout_unwrapped = np.unwrap(sout, discont = np.pi)
dyaw_vals = np.diff(sout_unwrapped)
return dyaw_vals
# metrics for baseline comparisons
class RMSEStatMetric(StatMetric):
def _record(self, action, state, observation, policy):
self._data.append(copy([policy.get_state()[0], state[2]]))
def _calculate(self):
pout, sout = [np.array(self._data)[:,0], np.array(self._data)[:,1]]
sout_unwrapped = np.unwrap(np.ravel(sout), discont = np.pi)
squared_out = (pout - sout_unwrapped) ** 2
calc = np.sqrt(np.mean(squared_out))
return calc
class RunTimeStatMetric(StatMetric):
def _record(self, action, state, observation, policy):
self._data.append(copy([time.time()]))
def _calculate(self):
calc = np.ptp(np.ravel(self._data))
return calc
class TrialLengthStatMetric(StatMetric):
def _record(self, action, state, observation, policy):
pass
def _calculate(self):
calc = np.ptp(np.ravel(self._dts))
return calc
class AngRangeStatMetric(StatMetric):
def _record(self, action, state, observation, policy):
self._data.append(copy([state[2]]))
def _calculate(self):
calc = min(2*np.pi, np.ptp(np.unwrap(np.ravel(self._data), discont = np.pi)))
#print(np.unwrap(np.ravel(self._data).tolist(), discont = np.pi))
return calc
class NRMSEStatMetric(StatMetric):
def _record(self, action, state, observation, policy):
self._data.append(copy([policy.get_state()[0], state[2]]))
def _calculate(self):
# N-rmse = rmse/x_range
# calculating numerator
pout, sout = [np.array(self._data)[:,0], np.array(self._data)[:,1]]
sout_unwrapped = np.unwrap(np.ravel(sout), discont = np.pi)
squared_out = (pout - sout_unwrapped) ** 2
num_calc = np.sqrt(np.mean(squared_out))
# calculating denmoninator
denom_calc = min(2*np.pi, np.ptp(np.unwrap(np.ravel(np.array(self._data)[:,1]), discont = np.pi)))
return num_calc/denom_calc
class PolicyCovarianceRecorderMetric(RecorderMetric):
def _record(self, action, state, observation, policy):
self._data.append(copy(policy.get_cov()))
class CurrentTrajectoryNode(object):
"""
An environment node that generates a trajectory, keeps track of agent position, and generates ring neuron inputs at every timestep
"""
def __init__(self, landmark_pos,n_landmarks, n_features, vtx=None, i_pos=None, i_theta=0, start_after=0.2, use_vel=True, trajectory_seed=0, rn_bump_size=pi/18, total_rns=27,dropout_mask=None, **kwargs):
"""
parameters:
-----------
landmark_pos: np.array((n_landmarks,2))
the [x,y] positions of landmarks
n_landmarks: int
number of landmarks
n_features: int
number of features per landmark
vtx: [float vt,float vx]
angular/linear velocity override, for generating rotation only/circle trajectories
i_pos: [float x, float y]
initial position
i_theta: float
initial heading
start_after: float
time at the beginning of the run where only the EPGs get input (for bump stablization)
use_vel: bool
Use velocity (for landmark only runs)
trajectory_seed: int
seed for trajectory generation
rn_bump_size: float
fwhm of the RN bump
total_rns: int
total number of RNs per landmark
dropout_mask: iterable<bool>
a mask where each element is a timestep. If the mask is true at a timestep, the ring neuron activations will be 0
"""
self.vx = 0
# Need to be reset
self.poss = []
self.angles = []
# Set this here, because if you put a default in the function definition, the object persists on redefinition
if i_pos is None:
self.pos = np.array([0.,0.])
self.i_pos = np.array([0.,0.])
else:
self.pos = np.array(i_pos)
self.i_pos = np.array(i_pos)
self.num_steps = 0
self.theta = i_theta
self.i_theta = i_theta
self.last_t = None
self.landmark_pos = landmark_pos
self.start = start_after
self.use_vel = use_vel
self.offset = 0
self.seed = trajectory_seed
self.rng = np.random.RandomState(self.seed)
# Generate activation
# TODO make this based on the length of the trial, or update if it gets larger than this
# N timesteps
T = 10000
if dropout_mask is not None:
self.dropout_mask = dropout_mask
else:
self.dropout_mask = np.zeros(T, dtype=np.bool)
if vtx is None:
# Trajectory generation based on von mises function
vm = self.rng.vonmises(0, 100, T)
self.rotation = lfilter([1.0], [1, -.5], vm)
self.rotation = gaussian_filter1d(self.rotation * 100, sigma=100)*5
x = np.linspace(0, 1, int(T / 50))
y = self.rng.rand(int(T / 50)) * (.15)
f = interp1d(x, y, kind='cubic')
xnew = np.linspace(0, 1, T, endpoint=True)
self.acceleration = f(xnew)*10.5**3
else:
# constant trajectory
vt = vtx[0]
vx = vtx[1]
self.rotation = np.ones(T)*vt
self.acceleration = np.ones(T)*vx
self.n_landmarks = n_landmarks
self.n_features = n_features
# Create the RN activation fn
self.rn_activation_fn = create_activation_fn(fwhm=rn_bump_size, num_neurons=total_rns)
self.rn_slice = generate_middle_slice(total_rns, n_features)
# Make slices for the output so everything is sync'd
# Access different members of the step function to ensure output
# ex:
# out = traj_node.step(t)
# vx = out[traj_node.velocity_slice]
nrn = n_features*n_landmarks
nepg = 18
nrnepg = nrn+nepg
self.size_out = nrnepg+2
self.velocity_slice = slice(0,1)
self.rn_activation_slice = slice(1,1+nrn)
self.epg_activation_slice = slice(1+nrn, 1+nrnepg)
self.linear_velocity_slice = slice(1+nrnepg,2+nrnepg)
self.actual_angles = []
###
def step(self,t):
"""
t: float
time
"""
#a = angle wrt world
#vt = angular velocity
#vx = linear velocity
#dt = time passed
#keep track of how many steps so we draw from the distribution correctly
self.num_steps += 1
# Calculate dt
if self.last_t is None:
dt = 0.001
else:
dt = t - self.last_t
self.last_t = t
# Calculate time after initialization period
ot = t - self.start
ot = ot if ot > 0 else 0
# Approximate the number of steps into the trajectory
num_steps = int((ot-self.offset)*1000)
generate_activation = self.rn_activation_fn
output = np.zeros(self.size_out)
# if we've started yet
if (t - self.offset) >= self.start:
vt = self.rotation[num_steps]
self.vx = self.acceleration[num_steps]
#get position based on linear velocity
self.pos[0] += cos(self.theta)*self.vx*dt
self.pos[1] += sin(self.theta)*self.vx*dt
#get new angle wrt world based on angular velocity
self.theta += vt*dt
a = wrap_pi(self.theta)
angles = []
if self.use_vel:
output[self.velocity_slice] = vt
for lpos in self.landmark_pos:
angle = angle_wrt_home(self.pos, a, lpos)
angles.append(angle)
if not self.dropout_mask[num_steps]:
output[self.rn_activation_slice] = generate_activation(angles, slc=self.rn_slice).ravel()
self.actual_angles.append(self.theta)
else:
# Still in initialization period
a = wrap_pi(self.theta)
angles = []
for lpos in self.landmark_pos:
angle = angle_wrt_home(self.pos, a, lpos)
angles.append(angle)
output[self.epg_activation_slice] = get_epg_activation(self.theta).ravel()
output[self.linear_velocity_slice] = self.vx
self.poss.append(self.pos.copy())
self.angles.append(self.theta)
return output
def reset(self, time=0):
self.poss = []
self.angles = []
self.pos = self.i_pos.copy()
self.vx = 0
self.num_steps = 0
self.theta = self.i_theta
self.last_t = None
self.offset = time
| aplbrain/seismic | neuroaiengines/optimization/simulator.py | simulator.py | py | 21,181 | python | en | code | 0 | github-code | 13 |
327717541 | from pathlib import Path
from operator import attrgetter
class Node:
def __init__(self, height_char):
height = ord(height_char) - ord("a")
self.height = height
self.tentative_distance = float("inf")
self.connections = []
def add_connection(self, node):
if node.height >= self.height - 1:
self.connections.append(node)
class Graph:
def __init__(self, candidate_start_nodes, end_node, unvisited_set):
self.candidate_start_nodes = candidate_start_nodes
self.end_node = end_node
self.unvisited_set = unvisited_set
def get_closest_node(self):
return min(self.unvisited_set, key=attrgetter("tentative_distance"))
def find_shortest_path(self):
self.end_node.tentative_distance = 0
while self.unvisited_set:
current_node = self.get_closest_node()
self.unvisited_set.remove(current_node)
for connection in current_node.connections:
if connection in self.unvisited_set:
new_distance = current_node.tentative_distance + 1
if new_distance < connection.tentative_distance:
connection.tentative_distance = new_distance
# Find the minimum distance from any of the candidate start nodes
return min(node.tentative_distance for node in self.candidate_start_nodes)
def parse_input(text):
unvisited_set = set()
candidate_start_nodes = set()
node_grid = []
for line in text.splitlines():
row = []
for char in line:
if char in ["S", "a"]:
node = Node("a")
candidate_start_nodes.add(node)
elif char == "E":
node = Node("z")
end_node = node
else:
node = Node(char)
row.append(node)
unvisited_set.add(node)
node_grid.append(row)
for y, row in enumerate(node_grid):
for x, node in enumerate(row):
if x > 0:
node.add_connection(row[x - 1])
if x < len(row) - 1:
node.add_connection(row[x + 1])
if y > 0:
node.add_connection(node_grid[y - 1][x])
if y < len(node_grid) - 1:
node.add_connection(node_grid[y + 1][x])
return Graph(candidate_start_nodes, end_node, unvisited_set)
if __name__ == "__main__":
text = Path("input.txt").read_text()
graph = parse_input(text)
print(graph.find_shortest_path()) | grey-area/advent-of-code-2022-copilot | day12/part2.py | part2.py | py | 2,561 | python | en | code | 1 | github-code | 13 |
31681547664 | # Homework No.14 Exercise No.2
# File Name: echo_server.py
# Programmer: Kostyantyn Shumishyn
# Date: December 3, 2017
#
# Problem Statement: Create a Server Class
# Imports
import socket
from hw14project2.Stack import *
# Reverses a String using an Implemented Stack Class cause why not
def reverseString(myString):
# Creates Stack and Result String
stringStack = Stack()
resultString = ''
# Adds each char to Stack
for character in myString:
stringStack.push(character)
# While the Stack isn't empty, keep popping characters, now coming out in reverse
while not stringStack.isEmpty():
resultString += stringStack.pop()
# Return reversed String
return resultString
def Main():
# Host is this Machine
host = '127.0.0.1'
# Random Non-Privileged Port
port = 12345
# Creates new Socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Binds Host and Port to Socket
s.bind((host, port))
# Listens for only 1 Connection at a time
s.listen(1)
# Receives a Connection Socket and an Address
connection, address = s.accept()
# Gives Feedback of Connection
print('\nConnection from: ', str(address) + "\n")
# While talking to client
while True:
# Data received from Client
data = connection.recv(1024).decode('utf-8')
# If the connection is closed by client
if not data:
break
# Prints feedback to user
# print("Data from connected user: " + data)
data = reverseString(data)
# print("Sending: " + data + "\n")
connection.send(data.encode('utf-8'))
# Closes Socket
connection.close()
if __name__ == '__main__':
Main()
| Kshumishyn/Python | Homework #14/hw14project2/echo_server.py | echo_server.py | py | 1,763 | python | en | code | 0 | github-code | 13 |
33116697734 | import uvicorn
from api.v1.routes import router
from core.auth import security_jwt_local, security_jwt_remote
from fastapi import Depends, FastAPI
app = FastAPI()
@app.get('/')
async def root():
return {'message': 'Hello World'}
app.include_router(
router,
prefix='/api/v1',
tags=['Пользователи из сервиса авторизации по gRPC (авторизация декодированием токена в этом же сервисе)'],
dependencies=[Depends(security_jwt_local)],
)
app.include_router(
router,
prefix='/api/v1/remote_auth',
tags=['Пользователи из сервиса авторизации по gRPC (авторизация походом в сервис авторизации по gRPC)'],
dependencies=[Depends(security_jwt_remote)],
)
if __name__ == '__main__':
uvicorn.run(
'main:app', host='0.0.0.0', port=8085, reload=True,
)
| RomanAVolodin/AuthServiceFastAPI_gRPCServer | api_simple/main.py | main.py | py | 944 | python | ru | code | 0 | github-code | 13 |
43263485332 | from itertools import product
a, b, c, d, e, f = map(int, input().split())
lima = [100 * a * i for i in range(f // (100 * a) + 1)]
limb = [100 * b * i for i in range(f // (100 * b) + 1)]
setw = set([i + j for i, j in product(lima, limb) if 0 < i + j <= f])
limc = [c * i for i in range(f // c + 1)]
limd = [d * i for i in range(f // d + 1)]
sets = set([i + j for i, j in product(limc, limd) if 0 < i + j <= f])
cnt, ans = 0, (max(setw), 0)
for w in setw:
for s in sets:
if (cnt <= 100 * s / w <= e) and (s + w <= f):
cnt = 100 * s / w
ans = s + w, s
else:
print(*ans)
| Shirohi-git/AtCoder | arc081-/arc083_a.py | arc083_a.py | py | 611 | python | en | code | 2 | github-code | 13 |
39597951545 | import math
import random
import argparse
import sys
import os
import xml.etree.ElementTree as xtree
__author__ = 'Christian Rosentreter'
__version__ = '1.7'
__all__ = ['SVGArcPathSegment']
class SVGArcPathSegment():
"""An 'arc' SVG path segment."""
def __init__(self, offset=0.0, angle=90.0, radius=1.0, x=0.0, y=0.0):
self.offset = offset
self.angle = angle
self.radius = radius
self.x = x
self.y = y
def __str__(self):
if self.angle == 0:
return ''
if abs(self.angle) < 360:
path_format = (
'M {sx} {sy} '
'A {rd} {rd} 0 {fl} 1 {dx} {dy}'
)
ts = (self.offset - 180.0) * math.pi / -180.0
td = (self.offset + self.angle - 180.0) * math.pi / -180.0
else:
path_format = (
'M {sx} {sy} '
'A {rd} {rd} 0 0 1 {dx} {dy} ' # essentially a circle formed by…
'A {rd} {rd} 0 1 1 {sx} {sy} ' # … two 180° arcs
'Z'
)
ts = 0
td = math.pi
return path_format.format(
sx=round(self.x + self.radius * math.sin(ts), 9),
sy=round(self.y + self.radius * math.cos(ts), 9),
rd=round(self.radius, 9),
fl=int(abs(ts - td) > math.pi),
dx=round(self.x + self.radius * math.sin(td), 9),
dy=round(self.y + self.radius * math.cos(td), 9)
)
def main():
"""First, build fire. Second, start coffee."""
ap = argparse.ArgumentParser(
description=('Concentrically arranges randomly sized arcs into a pretty disc shape. Output is '
'generated as a set of vector shapes in Scalable Vector Graphics (SVG) format and printed '
'on the standard output stream.'),
epilog='Report bugs, request features, or provide suggestions via https://github.com/the-real-tokai/macuahuitl/issues',
add_help=False,
)
g = ap.add_argument_group('Startup')
g.add_argument('-V', '--version', action='version', help="show version number and exit", version='%(prog)s {}'.format(__version__), )
g.add_argument('-h', '--help', action='help', help='show this help message and exit')
g = ap.add_argument_group('Algorithm')
g.add_argument('--circles', metavar='INT', type=int, help='number of concentric arc elements to generate inside the disc [:21]', default=21)
g.add_argument('--stroke-width', metavar='FLOAT', type=float, help='width of the generated strokes [:6]', default=6.0)
g.add_argument('--gap', metavar='FLOAT', type=float, help='distance between the generated strokes')
g.add_argument('--inner-radius', metavar='FLOAT', type=float, help='setup inner disc radius to create an annular shape')
g.add_argument('--hoffset', metavar='FLOAT', type=float, help='shift the whole disc horizontally [:0.0]', default=0.0)
g.add_argument('--voffset', metavar='FLOAT', type=float, help='shift the whole disc vertically [:0.0]', default=0.0)
g.add_argument('--color', metavar='COLOR', type=str, help='SVG compliant color specification or identifier [:black]', default='black')
g.add_argument('--random-seed', metavar='INT', type=int, help='fixed initialization of the random number generator for predictable results')
g.add_argument('--randomize', action='store_true', help='generate truly random disc layouts; other algorithm values provided via command line parameters are utilized as limits')
g = ap.add_argument_group('Miscellaneous')
g.add_argument('--separate-paths', action='store_true', help='generate separate <path> elements for each arc; automatically implied when animation support is enabled')
g.add_argument('--outline-mode', help='generate bounding outline circles [:both]', choices=['both', 'outside', 'inside', 'none'], default='both')
g.add_argument('--background-color', metavar='COLOR', type=str, help='SVG compliant color specification or identifier; adds a background <rect> to the SVG output')
g.add_argument('--disc-color', metavar='COLOR', type=str, help='SVG compliant color specification or identifier; fills the background of the generated disc by adding an extra <circle> element')
g.add_argument('--animation-mode', help='enables SVG <animateTransform> support', choices=['random', 'bidirectional', 'cascade-in', 'cascade-out'])
g.add_argument('--animation-duration', metavar='FLOAT', type=float, help='defines base duration of one full 360° arc rotation (in seconds); negative inputs switch to counter-clockwise base direction [:6.0]', default=6.0)
g.add_argument('--animation-offset', metavar='FLOAT', type=float, help='offset the animation (in seconds) to support rendering to frame sequences for frame based animation formats. [:0]', default=0.0)
g = ap.add_argument_group('Output')
g.add_argument('-o', '--output', metavar='FILENAME', type=str, help='optionally rasterize the generated vector paths and write the result into a PNG file (requires the `svgcairo\' Python module)')
g.add_argument('--output-size', metavar='INT', type=int, help='force pixel width and height of the raster image; if omitted the generated SVG viewbox dimensions are used')
user_input = ap.parse_args()
# Initialize…
#
chaos = random.Random(user_input.random_seed)
circles = user_input.circles
stroke = abs(user_input.stroke_width) if user_input.stroke_width else 1.0
gap = user_input.gap if (user_input.gap is not None) else stroke
radius = abs(user_input.inner_radius) if (user_input.inner_radius is not None) else stroke
x = user_input.hoffset
y = user_input.voffset
color = user_input.color
if user_input.randomize:
circles = chaos.randrange(0, circles) if circles else 0
stroke = chaos.uniform(0, stroke)
stroke = 1.0 if stroke == 0 else stroke
gap = chaos.uniform(0, gap)
radius = chaos.uniform(0, radius)
x = chaos.uniform(-x, x) if x else 0.0
y = chaos.uniform(-y, y) if y else 0.0
color = '#{:02x}{:02x}{:02x}'.format(chaos.randrange(0, 255), chaos.randrange(0, 255), chaos.randrange(0, 255))
# TODO: randomize background and disc color too when the respective parameters are used
# (needs to respect color harmonies)
if radius < stroke:
radius = stroke
# Generate data…
#
outlines = []
arcs = []
if user_input.outline_mode in ('both', 'inside'):
outlines.append({'x':x, 'y':y, 'r':radius})
radius += (gap + stroke)
for _ in range(circles):
# Calculate angular space requirement for the "round" stroke caps to avoid some overlapping
sqrd2 = 2.0 * math.pow(radius, 2.0)
theta = ((2.0 * math.acos((sqrd2 - math.pow((stroke / 2.0), 2.0)) / sqrd2)) * (180.0 / math.pi))
arcs.append(SVGArcPathSegment(offset=chaos.uniform(0, 359.0), angle=chaos.uniform(0, 359.0 - theta), radius=radius, x=x, y=y))
radius += (gap + stroke)
if user_input.outline_mode in ('both', 'outside'):
outlines.append({'x':x, 'y':y, 'r':radius})
else:
radius -= (gap + stroke)
# Generate SVG/XML…
#
def _f(v, max_digits=9):
if isinstance(v, float):
v = round(v, max_digits)
return v if isinstance(v, str) else str(v)
vb_dim = (radius + (stroke * 0.5)) * (256.0 / (256.0 - 37.35)) # 37px border for 256x256; a golden ratio in there… somewhere…
vb_off = _f(vb_dim * -1.0, 2)
vb_dim = _f(vb_dim * 2.0, 2)
config = {'stroke':color, 'stroke-width':_f(stroke), 'fill':'none'}
svg = xtree.Element('svg', {'width':'100%', 'height':'100%', 'xmlns':'http://www.w3.org/2000/svg', 'viewBox':'{o} {o} {s} {s}'.format(o=vb_off, s=vb_dim)})
title = xtree.SubElement(svg, 'title')
title.text = 'A Comitl Artwork'
if user_input.background_color:
xtree.SubElement(svg, 'rect', {'id':'background', 'x':vb_off, 'y':vb_off, 'width':vb_dim, 'height':vb_dim, 'fill':user_input.background_color})
svg_m = xtree.SubElement(svg, 'g', {'id':'comitl-disc'})
if user_input.disc_color:
xtree.SubElement(svg_m, 'circle', {'id':'disc-background', 'cx':_f(x), 'cy':_f(y), 'r':_f(radius), 'fill':user_input.disc_color})
if arcs:
if user_input.separate_paths or user_input.animation_mode:
svg_ga = xtree.SubElement(svg_m, 'g', {'id':'arcs'})
for aid, a in enumerate(arcs):
svg_arc = xtree.SubElement(svg_ga, 'path', {'id':'arc-{}'.format(aid+1), 'stroke-linecap':'round', **config})
shift = 0.0
if user_input.animation_mode:
if user_input.animation_mode == 'cascade-out':
d = user_input.animation_duration * ((aid+1) * 0.25) # TODO: 1/4 decay value could be configurable
elif user_input.animation_mode == 'cascade-in':
d = user_input.animation_duration * ((len(arcs)-aid+1) * 0.25)
else:
# limits duration range into a 50% variation window to avoid super fast arcs with values closer to 0
d = chaos.uniform(abs(user_input.animation_duration) * 0.5, abs(user_input.animation_duration)) # TODO: variation could be configurable
if user_input.animation_duration < 0:
d *= -1 # restore user direction
if (user_input.animation_mode == 'bidirectional') and (chaos.random() < 0.5):
d *= -1 # switch direction randomly
shift = (360.0 / d) * user_input.animation_offset
xtree.SubElement(svg_arc, 'animateTransform', {
'attributeName': 'transform',
'type': 'rotate',
'from': '{} {} {}'.format(360 if d < 0 else 0, x, y),
'to': '{} {} {}'.format( 0 if d < 0 else 360, x, y),
'dur': '{}s'.format(abs(d)),
'repeatCount': 'indefinite'
})
a.offset += shift
svg_arc.set('d', str(a))
else:
xtree.SubElement(svg_m, 'path', {'id':'arcs', 'd':''.join(map(str, arcs)), 'stroke-linecap':'round', **config})
if outlines:
svg_go = xtree.SubElement(svg_m, 'g', {'id':'outlines'})
for oid, o in enumerate(outlines):
xtree.SubElement(svg_go, 'circle', {'id':'outline-{}'.format(oid+1), 'cx':_f(o['x']), 'cy':_f(o['y']), 'r':_f(o['r']), **config})
svg.append(xtree.Comment(' Generator: comitl.py {} (https://github.com/the-real-tokai/macuahuitl) '.format(__version__)))
rawxml = xtree.tostring(svg, encoding='unicode')
# Send happy little arcs out into the world…
#
if not user_input.output:
print(rawxml)
else:
try:
from cairosvg import svg2png
svg2png(bytestring=rawxml,
write_to=os.path.realpath(os.path.expanduser(user_input.output)),
output_width=user_input.output_size,
output_height=user_input.output_size
)
except ImportError as e:
print('Couldn\'t rasterize nor write a PNG file. Required Python module \'cairosvg\' is not available: {}'.format(str(e)), file=sys.stderr)
if __name__ == "__main__":
main()
| the-real-tokai/macuahuitl | comitl.py | comitl.py | py | 10,705 | python | en | code | 73 | github-code | 13 |
35598830763 | # interface with ESP-32 maze stepper motor board over Serial
import serial
import yaml
from utils import *
import time
class motor_interface(object):
esp32 = None
target = [0, 0]
angle_string = '<0,0>'
motor_on = '1'
motor_off = '0'
conf_file = config_files['serial']
conn_settings = None
port = None
baud = None
timeout = None
def __init__(self) -> None:
with open(self.conf_file, 'r') as file:
self.conn_settings = yaml.safe_load(file)
self.port = self.conn_settings['port']
self.baud = self.conn_settings['baudrate']
self.timeout = self.conn_settings['timeout']
self.esp32 = serial.Serial(port=self.port, baudrate=self.baud, timeout=self.timeout)
def send_angle(self):
self.esp32.write(bytes(self.angle_string, 'utf-8'))
def motor_enable(self):
self.esp32.write(bytes(self.motor_on, 'utf-8'))
def motor_disable(self):
self.esp32.write(bytes(self.motor_off, 'utf-8'))
def set_angle(self, new_target):
self.target = (round(new_target[0], ndigits=2), round(new_target[1], ndigits=2))
self.angle_string = f'<{self.target[0]},{self.target[1]}>'
def set_angle_and_send(self, new_target):
self.set_angle(new_target)
self.send_angle()
test_angles = [ [0, 0], [1, 0], [-1, 0], [0, 1], [0, -1] ]
# test motor control by cycling through some target values
def main():
controller = motor_interface()
while True:
for a in test_angles:
controller.set_angle(a)
# print(a)
print(controller.angle_string)
controller.send_angle()
time.sleep(2)
if __name__ == "__main__":
main()
| Andrey-Korn/marble-maze | src/motor_interface.py | motor_interface.py | py | 1,731 | python | en | code | 0 | github-code | 13 |
22031549453 | #
# SPDX-License-Identifier: Apache-2.0
#
import logging
import socket
import os
from random import sample
from django.core.exceptions import ObjectDoesNotExist
from api.models import Port, Node, Agent
CLUSTER_PORT_START = int(os.getenv("CLUSTER_PORT_START", 7050))
MAX_RETRY = 100
LOG = logging.getLogger(__name__)
def port_is_free(ip=None, port=0):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(1)
try:
s.connect((ip, int(port)))
s.shutdown(socket.SHUT_RDWR)
return False
except Exception:
return True
finally:
s.close()
def port_picker(agent_id=None, request_count=1, exclude_ports=None):
if exclude_ports is None:
exclude_ports = []
used_ports = Port.objects.values_list("external").filter(
node__agent__id=agent_id
)
exclude_ports += [port[0] for port in used_ports]
return sample(
[
i
for i in range(CLUSTER_PORT_START, 65535)
if i not in exclude_ports
],
request_count,
)
def find_available_ports(
ip=None,
node_id=None,
agent_id=None,
request_count=1,
exclude_ports=None,
retry=MAX_RETRY,
):
if node_id is None or agent_id is None or retry == 0:
return []
all_port_is_free = True
if exclude_ports is None:
exclude_ports = []
ports = port_picker(agent_id, request_count, exclude_ports)
for port in ports:
if not port_is_free(ip, port):
exclude_ports.append(port)
all_port_is_free = False
if not all_port_is_free:
retry -= 1
return find_available_ports(
ip, node_id, agent_id, request_count, exclude_ports, retry
)
# Removed these lines of code bc they can produce port objects with 0 internal port number.
# try:
# node = Node.objects.get(id=node_id)
# except ObjectDoesNotExist:
# return []
# else:
# port_objects = [Port(external=port, node=node) for port in ports]
# Port.objects.bulk_create(port_objects)
return ports
def set_ports_mapping(node_id=None, mapping=None, new=False):
if mapping is None:
mapping = []
if new:
try:
node = Node.objects.get(id=node_id)
except ObjectDoesNotExist:
LOG.error("Node not found")
else:
port_objects = [
Port(
external=port.get("external"),
internal=port.get("internal"),
node=node,
)
for port in mapping
]
Port.objects.bulk_create(port_objects)
else:
for port in mapping:
Port.objects.filter(
node__id=node_id, external=port.get("external")
).update(internal=port.get("internal"))
def get_available_ports(
agent_id=None,
request_count=1,
):
agent = Agent.objects.get(id=agent_id).free_ports
used_ports = agent.free_ports
ports = sample(
[
i
for i in range(CLUSTER_PORT_START, 65535)
if i not in used_ports
],
request_count,
)
agent.free_ports = used_ports.append(ports)
agent.save()
return ports
| hyperledger/cello | src/api-engine/api/utils/port_picker.py | port_picker.py | py | 3,289 | python | en | code | 862 | github-code | 13 |
17047458274 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AlCollectionCreateDebtDTO import AlCollectionCreateDebtDTO
from alipay.aop.api.domain.AlCollectionReceiveBaseInfoDTO import AlCollectionReceiveBaseInfoDTO
from alipay.aop.api.domain.ContractInfoDTO import ContractInfoDTO
from alipay.aop.api.domain.OppositeSubjectDTO import OppositeSubjectDTO
from alipay.aop.api.domain.OurSubjectInfoDTO import OurSubjectInfoDTO
class AntLinkeAlcollectioncenterCreateModel(object):
def __init__(self):
self._al_collection_create_debt = None
self._app_name = None
self._base_info = None
self._contract_info = None
self._operator = None
self._opposite_subject_dto = None
self._our_subject_info = None
self._tenant = None
@property
def al_collection_create_debt(self):
return self._al_collection_create_debt
@al_collection_create_debt.setter
def al_collection_create_debt(self, value):
if isinstance(value, AlCollectionCreateDebtDTO):
self._al_collection_create_debt = value
else:
self._al_collection_create_debt = AlCollectionCreateDebtDTO.from_alipay_dict(value)
@property
def app_name(self):
return self._app_name
@app_name.setter
def app_name(self, value):
self._app_name = value
@property
def base_info(self):
return self._base_info
@base_info.setter
def base_info(self, value):
if isinstance(value, AlCollectionReceiveBaseInfoDTO):
self._base_info = value
else:
self._base_info = AlCollectionReceiveBaseInfoDTO.from_alipay_dict(value)
@property
def contract_info(self):
return self._contract_info
@contract_info.setter
def contract_info(self, value):
if isinstance(value, ContractInfoDTO):
self._contract_info = value
else:
self._contract_info = ContractInfoDTO.from_alipay_dict(value)
@property
def operator(self):
return self._operator
@operator.setter
def operator(self, value):
self._operator = value
@property
def opposite_subject_dto(self):
return self._opposite_subject_dto
@opposite_subject_dto.setter
def opposite_subject_dto(self, value):
if isinstance(value, OppositeSubjectDTO):
self._opposite_subject_dto = value
else:
self._opposite_subject_dto = OppositeSubjectDTO.from_alipay_dict(value)
@property
def our_subject_info(self):
return self._our_subject_info
@our_subject_info.setter
def our_subject_info(self, value):
if isinstance(value, OurSubjectInfoDTO):
self._our_subject_info = value
else:
self._our_subject_info = OurSubjectInfoDTO.from_alipay_dict(value)
@property
def tenant(self):
return self._tenant
@tenant.setter
def tenant(self, value):
self._tenant = value
def to_alipay_dict(self):
params = dict()
if self.al_collection_create_debt:
if hasattr(self.al_collection_create_debt, 'to_alipay_dict'):
params['al_collection_create_debt'] = self.al_collection_create_debt.to_alipay_dict()
else:
params['al_collection_create_debt'] = self.al_collection_create_debt
if self.app_name:
if hasattr(self.app_name, 'to_alipay_dict'):
params['app_name'] = self.app_name.to_alipay_dict()
else:
params['app_name'] = self.app_name
if self.base_info:
if hasattr(self.base_info, 'to_alipay_dict'):
params['base_info'] = self.base_info.to_alipay_dict()
else:
params['base_info'] = self.base_info
if self.contract_info:
if hasattr(self.contract_info, 'to_alipay_dict'):
params['contract_info'] = self.contract_info.to_alipay_dict()
else:
params['contract_info'] = self.contract_info
if self.operator:
if hasattr(self.operator, 'to_alipay_dict'):
params['operator'] = self.operator.to_alipay_dict()
else:
params['operator'] = self.operator
if self.opposite_subject_dto:
if hasattr(self.opposite_subject_dto, 'to_alipay_dict'):
params['opposite_subject_dto'] = self.opposite_subject_dto.to_alipay_dict()
else:
params['opposite_subject_dto'] = self.opposite_subject_dto
if self.our_subject_info:
if hasattr(self.our_subject_info, 'to_alipay_dict'):
params['our_subject_info'] = self.our_subject_info.to_alipay_dict()
else:
params['our_subject_info'] = self.our_subject_info
if self.tenant:
if hasattr(self.tenant, 'to_alipay_dict'):
params['tenant'] = self.tenant.to_alipay_dict()
else:
params['tenant'] = self.tenant
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AntLinkeAlcollectioncenterCreateModel()
if 'al_collection_create_debt' in d:
o.al_collection_create_debt = d['al_collection_create_debt']
if 'app_name' in d:
o.app_name = d['app_name']
if 'base_info' in d:
o.base_info = d['base_info']
if 'contract_info' in d:
o.contract_info = d['contract_info']
if 'operator' in d:
o.operator = d['operator']
if 'opposite_subject_dto' in d:
o.opposite_subject_dto = d['opposite_subject_dto']
if 'our_subject_info' in d:
o.our_subject_info = d['our_subject_info']
if 'tenant' in d:
o.tenant = d['tenant']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/AntLinkeAlcollectioncenterCreateModel.py | AntLinkeAlcollectioncenterCreateModel.py | py | 5,962 | python | en | code | 241 | github-code | 13 |
16988329194 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# This file is released under BSD 2-clause license.
from __future__ import unicode_literals
import os
import datetime
import shutil
import yaml
import config
class PostManager(object):
def __init__(self, post_root):
self.post_root = post_root
def tryLoadMeta(self, filePath):
"""Try to load the meta data from filePath."""
with open(filePath, 'rb') as f:
firstLine = f.readline()
if firstLine and firstLine.rstrip() == b'---':
header = []
line = None
for line in f:
line = line.rstrip()
if line == b'---':
break
header.append(line)
if line != b'---':
return
header = b'\n'.join(header)
try:
return yaml.load(header)
except Exception:
return
def getTags(self):
"""Get tag list according to MRU order."""
tags = {}
names = sorted(os.listdir(self.post_root), reverse=True)
for idx, name in enumerate(names):
meta = self.tryLoadMeta(os.path.join(self.post_root, name))
if meta and meta['tags'] and isinstance(meta['tags'], list):
for t in meta['tags']:
tags.setdefault(t, 0)
tags[t] += (len(names) - idx)
return sorted(tags.keys(), key=lambda k: (-tags[k], k))
def getCategories(self):
"""Get category list according to MRU order."""
categories = {}
names = sorted(os.listdir(self.post_root), reverse=True)
for idx, name in enumerate(names):
meta = self.tryLoadMeta(os.path.join(self.post_root, name))
if meta and meta['category']:
c = meta['category']
categories.setdefault(c, 0)
categories[c] += 1
return sorted(categories.keys(), key=lambda k: (-categories[k], k))
def listPosts(self):
"""List names of posts."""
names = sorted(os.listdir(self.post_root), reverse=True)
ret = []
for name in names:
if self.tryLoadMeta(os.path.join(self.post_root, name)) is not None:
ret.append(name)
return ret
def deleteFile(self, name):
os.remove(os.path.join(self.post_root, name))
class UploadManager(object):
"""Manages uploaded static files."""
def __init__(self, upload_root):
self.upload_root = upload_root
def addFile(self, name, sourceFile):
name = '%s/%s' % (datetime.datetime.now().strftime('%Y/%m'), name)
mainName, extName = os.path.splitext(name)
retry = 1
dstPath = None
while True:
dstPath = os.path.join(self.upload_root, name)
if not os.path.exists(dstPath):
break
name = '%s-%d%s' % (mainName, retry, extName)
retry += 1
dstDir = os.path.split(dstPath)[0]
if not os.path.isdir(dstDir):
os.makedirs(dstDir)
shutil.copyfile(sourceFile, dstPath)
return name
def deleteFile(self, name):
target = os.path.join(self.upload_root, name)
targetDir = os.path.split(target)[0]
os.remove(target)
try:
os.removedirs(targetDir)
except Exception:
pass
posts = PostManager(os.path.join(config.BLOG_ROOT, '_posts'))
uploads = UploadManager(os.path.join(config.BLOG_ROOT, 'upload'))
| Nehzilrz/nehzilrz.github.io | tools/manager.py | manager.py | py | 3,683 | python | en | code | 1 | github-code | 13 |
3047232031 | import core.tree as tree
import lib.lza.lza as l
import core.acl as acl
import core.users as users
from schema.schema import getMetaType
from core.translation import lang, t
from utils.utils import dec_entry_log
from core.transition import httpstatus
@dec_entry_log
def getContent(req, ids):
user = users.getUserFromRequest(req)
if "lza" in users.getHideMenusForUser(user):
req.setStatus(httpstatus.HTTP_FORBIDDEN)
return req.getTAL("web/edit/edit.html", {}, macro="access_error")
v = {}
v['error'] = ""
nodes = []
for id in ids:
node = tree.getNode(id)
access = acl.AccessData(req)
if not access.hasWriteAccess(node):
req.setStatus(httpstatus.HTTP_FORBIDDEN)
return req.getTAL("web/edit/edit.html", {}, macro="access_error")
nodes.append(node)
if "createlza" in req.params:
# remove old file if existing
for f in node.getFiles():
if f.getType()=="lza":
node.removeFile(f)
# create new file
for f in node.getFiles():
if f.getType() in ("original", "document"):
try:
archive = l.LZA(f.retrieveFile())
schema = node.getSchema()
# test for lza export mask
if (getMetaType(schema).getMask("lza")):
m = getMetaType(schema).getMask("lza")
meta = l.LZAMetadata(m.getViewHTML([node], 8))
else:
# generate error message
meta = l.LZAMetadata("""
<?xpacket begin="\xef\xbb\xbf" id="mediatum_metadata"?>
<lza:data>
<lza:error>-definition missing-</lza:error>
</lza:data><?xpacket end="w"?>
""")
archive.writeMediatumData(meta)
node.addFile(tree.
FileNode(archive.buildLZAName(),"lza", f.getMimeType()))
except l.FiletypeNotSupported:
v['error'] = "edit_lza_wrongfiletype"
elif "removelza" in req.params:
for f in node.getFiles():
if f.getType()=="lza":
node.removeFile(f)
v['id'] = req.params.get("id","0")
v['tab'] = req.params.get("tab", "")
v['ids'] = ids
v['nodes'] = nodes
v['t'] = t
v['language'] = lang(req)
meta = {}
for id in ids:
node = tree.getNode(id)
for f in node.getFiles():
if f.getType()=="lza":
try:
archive = l.LZA(f.retrieveFile(), f.getMimeType())
meta[id] = archive.getMediatumData()
except IOError:
v['error'] = "edit_lza_ioerror"
v['meta'] = meta
return req.getTAL("web/edit/modules/lza.html", v, macro="edit_lza")
| hibozzy/mediatum | web/edit/modules/lza.py | lza.py | py | 3,083 | python | en | code | null | github-code | 13 |
10669465587 | import unittest
from selenium import webdriver
from selenium.webdriver.firefox import firefox_binary
from multiprocessing import Process
import socket
import shutil
import os
import gnupg
import urllib2
import sys
os.environ['SECUREDROP_ENV'] = 'test'
import config
import source
import journalist
import test_setup
import urllib2
import signal
import traceback
from datetime import datetime
class FunctionalTest():
def _unused_port(self):
s = socket.socket()
s.bind(("localhost", 0))
port = s.getsockname()[1]
s.close()
return port
def _create_webdriver(self):
log_file = open('tests/log/firefox.log', 'a')
log_file.write('\n\n[%s] Running Functional Tests\n' % str(datetime.now()))
log_file.flush()
firefox = firefox_binary.FirefoxBinary(log_file=log_file)
return webdriver.Firefox(firefox_binary=firefox)
def setUp(self):
signal.signal(signal.SIGUSR1, lambda _, s: traceback.print_stack(s))
test_setup.create_directories()
self.gpg = test_setup.init_gpg()
test_setup.init_db()
source_port = self._unused_port()
journalist_port = self._unused_port()
self.source_location = "http://localhost:%d" % source_port
self.journalist_location = "http://localhost:%d" % journalist_port
def start_source_server():
source.app.run(port=source_port,
debug=True,
use_reloader=False)
def start_journalist_server():
journalist.app.run(port=journalist_port,
debug=True,
use_reloader=False)
self.source_process = Process(target = start_source_server)
self.journalist_process = Process(target = start_journalist_server)
self.source_process.start()
self.journalist_process.start()
self.driver = self._create_webdriver()
# Poll the DOM briefly to wait for elements. It appears .click() does
# not always do a good job waiting for the page to load, or perhaps
# Firefox takes too long to render it (#399)
self.driver.implicitly_wait(1)
self.secret_message = 'blah blah blah'
def tearDown(self):
test_setup.clean_root()
self.driver.quit()
self.source_process.terminate()
self.journalist_process.terminate()
| emccallum/securedrop | securedrop/tests/functional/functional_test.py | functional_test.py | py | 2,396 | python | en | code | null | github-code | 13 |
7684036250 | '''
Proyecto [Python]
-----------------------------
Autor: Damian Safdie
Version: 1.0
'''
import csv
def generar_id(archivo):
with open(archivo, 'r') as csvarch:
data = list(csv.DictReader(csvarch))
if len(data) > 0:
ultima_fila = data[-1]
ultimo_id = int(ultima_fila.get('id'))
else:
ultimo_id = 0
return ultimo_id + 1
def ingresar_nuevo_usuario():
# Solicitar valores al usuario
id = generar_id("usuarios.csv")
while True:
# nombre = str(input('Nombre: '))
nombre = " "
# apellido = str(input('Apellido: '))
apellido = " "
usuario = str(input('Usuario: '))
# contra = str(input('Password: '))
contra = " "
lista_apuesta = ""
for i in range(64):
lista_apuesta += "."
with open("usuarios.csv", 'r') as csvarch:
data = list(csv.DictReader(csvarch))
existe = False
for i in range(len(data)):
if usuario == data[i].get("usuario"):
existe = True
if existe:
print("usario existenete")
else:
break
# Construir usuario insertar en nuestro usuarios.csv
nuevo = {
"id": id,
"nombre": nombre,
"apellido": apellido,
"usuario": usuario,
"contra": contra,
"apuesta": lista_apuesta
}
# Abrir archivo CSV y agregar el nuevo usuario
with open('usuarios.csv', 'a', newline='') as csvfile:
header = ['id', 'nombre', 'apellido', 'usuario', 'contra', 'apuesta']
writer = csv.DictWriter(csvfile, header)
writer.writerow(nuevo)
pass
def muestro_partidos(opcion, apuesta):
with open("partidos.csv", 'r') as csvarch:
partidos = list(csv.DictReader(csvarch))
for i in range(64):
un_partido = partidos[i]
equipo1 = un_partido.get('equipo1')
equipo2 = un_partido.get('equipo2')
if opcion == 2:
resul = un_partido.get('resultado')
else:
resul = apuesta[i]
if resul == "L":
apu = equipo1
elif resul == "E":
apu = "EMPATE"
elif resul == "V":
apu = equipo2
else:
apu = "--------"
num = str(i+1).zfill(2)
if opcion == 1:
print (num, equipo1.ljust(15), "VS", equipo2.ljust(15), "tu apuesta: ", apu)
else:
print (num, equipo1.ljust(15), "VS", equipo2.ljust(15), "Resultado: ", apu)
def valido_usuario():
id = 0
apuesta = ""
while True:
with open("usuarios.csv", 'r') as csvarch:
data = list(csv.DictReader(csvarch))
existe = False
if len(data) > 0:
quien = input("ingrese nombre de usuario: ")
for i in range(len(data)):
if quien == data[i].get("usuario"):
existe = True
id = data[i].get("id")
apuesta = data[i].get("apuesta")
break
if existe:
break
else:
print("usario inexistenete")
else:
print ("no hay usuarios generados")
break
return id, apuesta
def refresco_usuario(id):
with open("usuarios.csv", 'r') as csvarch:
data = list(csv.DictReader(csvarch))
for i in range(len(data)):
if id == data[i].get("id"):
apuesta = data[i].get("apuesta")
return id, apuesta
def guardo_apuesta(id_usuario, i_partido, pronostico):
with open("usuarios.csv", 'r') as csvarch:
data = list(csv.DictReader(csvarch))
for i in range(len(data)):
if id_usuario == data[i].get("id"):
lista = data[i].get("apuesta")
nueva_lista = ""
for j in range(64):
if j != i_partido:
nueva_lista += lista[j]
else:
nueva_lista += pronostico
data[i]['apuesta'] = nueva_lista
break
with open('usuarios.csv', 'w', newline='') as csvfile:
header = ['id', 'nombre', 'apellido', 'usuario', 'contra', 'apuesta']
writer = csv.DictWriter(csvfile, fieldnames=header)
writer.writeheader()
writer.writerows(data)
def generar_apuestas(id_usuario, apuesta):
muestro_partidos(1, apuesta)
while True:
while True:
op = input("Ingrese el numero del partido a pronosticar: ")
if op.isdigit():
break
opcion = int(op)
if opcion > 0 and opcion < 65:
break
while True:
if opcion < 49: # fase de grupos
pronostico = input ("Ingrese L/E/V (Local,Empate,Visitante): ")
p = pronostico.upper()
if p == "L" or p == "E" or p == "V":
break
else:
pronostico = input ("Ingrese L/V (Local,Visitante): ")
p = pronostico.upper()
if p == "L" or p == "V":
break
pronostico = p
guardo_apuesta(id_usuario, opcion-1, pronostico)
return pronostico
if __name__ == '__main__':
while True:
menu = '''\n
Seleccione una opcion:
1. Generar un nuevo usuario
2. Registrar pronosticos
3. Registrar resultados de los partidos
4. Ver tabla de posiciones
5. Salir
Opcion elegida: '''
while True:
opcion = input(menu)
if opcion.isdigit():
break
else:
print("Opcion incorrecta")
opcion = int(opcion)
if opcion == 1:
# nuevo usuario
ingresar_nuevo_usuario()
elif opcion == 2:
id_usuario, apuesta = valido_usuario()
if id_usuario != 0:
while True:
generar_apuestas(id_usuario, apuesta)
while True:
otro = input("Desea Hacer otro pronostico [S/N]: ")
op = otro.upper()
if op == "N" or op == "S":
break
if op == "N":
break
id_usuario, apuesta = refresco_usuario(id_usuario)
elif opcion == 3:
# registar resultados de los partidos del mundial
muestro_partidos(2, "")
while True:
while True:
op = input("Ingrese el numero del partido: ")
if op.isdigit():
break
opcion = int(op)
if opcion > 0 and opcion < 65:
break
if opcion < 49: # fase de grupos
while True:
resultado = input ("Ingrese L/E/V (Local,Empate,Visitante): ")
p = resultado.upper()
if p == "L" or p == "V" or p == "E":
break
else:
while True:
resultado = input ("Ingrese L/V (Local,Visitante): ")
p = resultado.upper()
if p == "L" or p == "V":
break
with open("partidos.csv", 'r') as csvarch:
data = list(csv.DictReader(csvarch))
data[opcion-1]['resultado'] = resultado
with open('partidos.csv', 'w', newline='') as csvfile:
header = ['id', 'fecha', 'grupo', 'hora', 'equipo1', 'equipo2', 'gol1', 'gol2', 'resultado']
writer = csv.DictWriter(csvfile, fieldnames=header)
writer.writeheader()
writer.writerows(data)
elif opcion == 4:
# genera tabla de posiciones
with open("partidos.csv", 'r') as csvarch:
resultados = []
data = list(csv.DictReader(csvarch))
for i in range(len(data)):
resultado = data[i].get("resultado")
resultados.append(resultado)
with open("usuarios.csv", 'r') as csvarch:
data = list(csv.DictReader(csvarch))
posiciones = []
for i in range(len(data)):
usuario = data[i].get("usuario")
apuesta = data[i].get("apuesta")
puntos = 0
for p in range(64):
if apuesta[p] == resultados[p]:
puntos += 1
apostador = (str(puntos).zfill(4), usuario)
posiciones.append(apostador)
tabla = sorted(posiciones, reverse=True)
print("-------------------")
print("TABLA DE POSICIONES ")
print("-------------------")
for i in range(len(tabla)):
item = tabla[i]
usuario = item[1]
puntos = int(item[0])
print(usuario.ljust(15, "."), puntos)
elif opcion == 5:
break
| Damisaf/proyecto | qatar.py | qatar.py | py | 9,152 | python | es | code | 0 | github-code | 13 |
2363528371 | #!/usr/bin/env python
# encoding: utf-8
# @author: liusir
# @file: run_all_cases.py
# @time: 2020/5/8 9:37 下午
import os
import time
import unittest
# import HTMLTestRunner
from utils import HTMLTestReportCN
from utils.config_utils import local_config
def get_testsuite():
discover = unittest.defaultTestLoader.discover(start_dir='./testcases',
pattern='*_testcase.py',
top_level_dir='./testcases')
all_suite = unittest.TestSuite()
all_suite.addTest(discover)
return all_suite
# 生成报表方式一
# now = time.strftime('%Y_%m_%d_%H_%M_%S')
# html_report = os.path.join(local_config.report_path, 'api_result_%s.html' % now)
# file =open(html_report,'wb')
# runner = HTMLTestRunner.HTMLTestRunner(stream=file,
# title='api测试',
# description='接口测试描述',
# )
# runner.run(get_testsuite())
# file.close()
# 生成报表方式二
report_dir = HTMLTestReportCN.ReportDirectory(local_config.report_path+'/')
report_dir.create_dir('api测试')
dir_path = HTMLTestReportCN.GlobalMsg.get_value('dir_path')
report_path = HTMLTestReportCN.GlobalMsg.get_value('report_path')
fp = open(report_path,'wb')
runner = HTMLTestReportCN.HTMLTestRunner(stream=fp,
title='api测试',
description='接口测试描述',
tester='xiaochao')
runner.run(get_testsuite())
fp.close()
| chaoabc/Api_Test_Line_Frame | run_all_cases.py | run_all_cases.py | py | 1,644 | python | en | code | 0 | github-code | 13 |
26693553485 | #데이터 불러오기 (menu_clean.csv)
from modules import get_csv_to_list
data = get_csv_to_list("menu_new")
print(len(data))
drink = [
"망고에이드",
"유자에이드",
"아몬드우유",
"레몬에이드",
"청포도에이드",
"딸기에이드",
"자두에이드",
"오미자에이드",
"파워에이드",
"블루레몬에이드",
"청귤에이드",
"체리에이드",
"배쥬스",
"사과쥬스",
"복숭아아이스티",
"오미자쥬스",
"블루베리쥬스",
"알로에쥬스",
"캐플쥬스",
"토마토쥬스",
"요거풋풋사과쥬스",
"복숭아쥬스",
"청귤쥬스",
"감귤쥬스",
"요거상큼복숭아쥬스",
"파인쥬스",
"포도쥬스",
"오렌지쥬스",
"샤인머스캣쥬스",
"석류쥬스",
"갈아만든배쥬스",
"파인애플쥬스",
"매실쥬스",
"복분자쥬스",
"망고쥬스",
"자몽쥬스",
"청포도쥬스",
"게토레이",
"이프로",
"보리차",
"수정과",
"유자민트아이스티",
"초코우유",
"우유",
"블루베리스무디",
"레몬아이스티",
"갈아만든배",
"아몬드브리즈",
"포카리스웨트",
"요구르트"
]
salad = []
cereals = [
"오레오오즈",
"그래놀라카카오",
"초코첵스",
"그래놀라코코아호두",
"그래놀라",
"그래놀라코코",
"아몬드콘후레이크",
"초코크런치",
"그래놀라다이제",
"초코후레이크",
"후르트링",
"후르츠링",
"허니오즈",
"코코볼",
"오레오즈",
"우리쌀링"
]
fruits = [
'멜론',
'사과',
'베',
'리치',
'청포도',
'파인애플',
'감귤',
'자몽',
'귤',
'열대과일',
'바나나',
'키위',
'자두',
'메론',
'딸기',
'포도',
'참외',
'샤인머스캣',
'망고',
'수박',
'골드키위',
'석류',
'토마토',
'오렌지'
]
data = [x for x in data if x not in drink]
for d in data:
if "샐러드" in d:
salad.append(d)
data = [x for x in data if x not in salad]
data = [x for x in data if x not in cereals]
data = [x for x in data if x not in fruits]
data_new = []
for d in data:
data_new.append(str(d).replace("&", " "))
print(len(data_new))
from modules import write_csv
write_csv(data_new, "menu_new2")
| yuni0725/school-meal-analysis | classify.py | classify.py | py | 2,294 | python | ko | code | 0 | github-code | 13 |
17053946554 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class JsonParamDemo(object):
def __init__(self):
self._array_param = None
self._bool_param = None
self._date_param = None
self._datetime = None
self._num_param = None
self._price = None
@property
def array_param(self):
return self._array_param
@array_param.setter
def array_param(self, value):
if isinstance(value, list):
self._array_param = list()
for i in value:
self._array_param.append(i)
@property
def bool_param(self):
return self._bool_param
@bool_param.setter
def bool_param(self, value):
self._bool_param = value
@property
def date_param(self):
return self._date_param
@date_param.setter
def date_param(self, value):
self._date_param = value
@property
def datetime(self):
return self._datetime
@datetime.setter
def datetime(self, value):
self._datetime = value
@property
def num_param(self):
return self._num_param
@num_param.setter
def num_param(self, value):
self._num_param = value
@property
def price(self):
return self._price
@price.setter
def price(self, value):
self._price = value
def to_alipay_dict(self):
params = dict()
if self.array_param:
if isinstance(self.array_param, list):
for i in range(0, len(self.array_param)):
element = self.array_param[i]
if hasattr(element, 'to_alipay_dict'):
self.array_param[i] = element.to_alipay_dict()
if hasattr(self.array_param, 'to_alipay_dict'):
params['array_param'] = self.array_param.to_alipay_dict()
else:
params['array_param'] = self.array_param
if self.bool_param:
if hasattr(self.bool_param, 'to_alipay_dict'):
params['bool_param'] = self.bool_param.to_alipay_dict()
else:
params['bool_param'] = self.bool_param
if self.date_param:
if hasattr(self.date_param, 'to_alipay_dict'):
params['date_param'] = self.date_param.to_alipay_dict()
else:
params['date_param'] = self.date_param
if self.datetime:
if hasattr(self.datetime, 'to_alipay_dict'):
params['datetime'] = self.datetime.to_alipay_dict()
else:
params['datetime'] = self.datetime
if self.num_param:
if hasattr(self.num_param, 'to_alipay_dict'):
params['num_param'] = self.num_param.to_alipay_dict()
else:
params['num_param'] = self.num_param
if self.price:
if hasattr(self.price, 'to_alipay_dict'):
params['price'] = self.price.to_alipay_dict()
else:
params['price'] = self.price
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = JsonParamDemo()
if 'array_param' in d:
o.array_param = d['array_param']
if 'bool_param' in d:
o.bool_param = d['bool_param']
if 'date_param' in d:
o.date_param = d['date_param']
if 'datetime' in d:
o.datetime = d['datetime']
if 'num_param' in d:
o.num_param = d['num_param']
if 'price' in d:
o.price = d['price']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/JsonParamDemo.py | JsonParamDemo.py | py | 3,675 | python | en | code | 241 | github-code | 13 |
70556271698 | import os
import re
from Action import Action
from common.VerificationSuite import VerificationSuiteAction
# CoreMark
class A117215(VerificationSuiteAction):
def __init__(self, data):
VerificationSuiteAction.__init__(self, data)
self.actionid = 117215
self.name = "CoreMark"
def verify_template(self):
return "ovt_%s" % self.version.split('.')[-1]
def tests(self):
if self.verify_template().split('_')[1] == "vhdl":
return [ "cqdxmark1" ]
elif self.verify_template().split('_')[1] == "fpga":
return [ "fcqdxmark1" ]
else:
assert False
def post_process(self):
publish_txt = os.path.join(self.getWorkPath(), ".publish.txt")
file = open(publish_txt)
log = file.read()
file.close()
match = re.search("Thread 0: 'Ticks_S \d+', 'Ticks_E \d+', 'Ticks (\d+)', 'Active (\d+)', 'Idle (\d+)' and 'Speed (\d+)'", log)
if match:
ticks, active, idle, speed = match.group (1, 2, 3, 4)
results = {'Timer Ticks' : ticks,
'Active Cycles' : active,
'Idle Cycles' : idle,
'Speed' : speed,
}
return self.testsuiteSubmit(self.tests()[0], True, results)
else:
return self.error("Could not find CoreMark results")
| MIPS/overtest | action/A117215.py | A117215.py | py | 1,286 | python | en | code | 0 | github-code | 13 |
33566349959 | import subprocess
import os
import sys
import locale
# extra supybot libs.
import supybot.conf as conf
# supybot libs.
import supybot.utils as utils
from supybot.commands import *
import supybot.plugins as plugins
import supybot.ircutils as ircutils
import supybot.callbacks as callbacks
try:
from supybot.i18n import PluginInternationalization
_ = PluginInternationalization('GitPull')
except:
# Placeholder that allows to run the plugin on a bot
# without the i18n module
_ = lambda x:x
class GitPull(callbacks.Plugin):
"""Add the help for "@plugin help GitPull" here
This should describe *how* to use this plugin."""
threaded = True
def updateplugin(self, irc, msg, args, repodir):
"""[<plugin name>|<all>]
Do a git pull on a plugin directory. Specify plugin name or all for each.
Ex: updateplugin GitPull OR updateplugin all
"""
# make our "dict" of valid plugins/dictionaries
plugindirs = conf.supybot.directories.plugins()[:] # list of valid dirs.
dirdict = {} # dict for our output. key = PluginName, value = directory.
for plugindir in plugindirs: # for each "plugin" dir.
dirfiles = os.listdir(plugindir) # get a list of files in each dir.
for dirfile in dirfiles: # iterate over each dir.
if os.path.isdir(os.path.join(plugindir, dirfile, '.git')): # if they're directories
dirdict[dirfile] = os.path.join(plugindir, dirfile) # add into the dict.
# validate input.
if repodir == "all":
workdirs = [f for (f, v) in dirdict.items()] # copy all keys
elif repodir not in dirdict:
irc.reply("ERROR: '{0}' is an invalid plugin. Valid: {1}".format(repodir, " | ".join(sorted(dirdict.keys()))))
return
else: # this means its not all but repodir is in dirdict.
workdirs = [repodir] # we inject the single key into the list.
# we're valid from here on.
for workdir in workdirs:
# we're valid from here on.
cwd = dirdict[workdir] # cwd has to be the dir (in the value)
command = ["git", "pull"]
# run the command.
pipe = subprocess.Popen(command, shell=False, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, error) = pipe.communicate()
return_code = pipe.wait()
# check if command worked or not.
if return_code == 0: # command worked.
# work around for py2/py3
if sys.version_info[0] == 2: # py2 and above.
outlines = out.split('\n') # split on newlines into list.
else: # py3 and above.
encoding = locale.getdefaultlocale()[1] # default encoding (should be utf-8)
outlines = out.decode(encoding).split('\n')
# now continue
if len(outlines) > 6: # more than six lines.
output = " ".join([i for i in outlines]) # make it all one line, separated by a space.
output = utils.str.normalizeWhitespace(output) # have no doublespaces.
irc.reply("{0} :: {1}".format(workdir, output))
else: # less than six lines.
for outline in outlines: # output each line.
if outline != '': # don't output blank lines:
irc.reply("{0} :: {1}".format(workdir, outline.strip()))
else: # error.
error = error.replace('\n', ' ') # replace newlines to spaces.
error = utils.str.normalizeWhitespace(error) # make sure no doublespaces.
irc.reply("ERROR :: {0} :: {1}".format(repodir, error))
updateplugin = wrap(updateplugin, [('checkCapability', 'owner'), ('somethingWithoutSpaces')])
Class = GitPull
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| andrewtryder/GitPull | plugin.py | plugin.py | py | 3,994 | python | en | code | 0 | github-code | 13 |
39118294951 | import logging
from pprint import pprint # noqa
from banal import ensure_list
from aleph.core import db
from aleph.model import DocumentRecord, Document
from aleph.index.entities import index_operation
from aleph.index.indexes import entities_read_index, entities_index_list
from aleph.index.util import INDEX_MAX_LEN, BULK_PAGE
from aleph.index.util import query_delete, bulk_actions
log = logging.getLogger(__name__)
def index_document(document, shallow=False, sync=False):
log.info("Index document [%s]: %s", document.id, document.name)
operations = generate_document(document, shallow=shallow)
bulk_actions(operations, sync=sync)
def delete_document(document_id, sync=False):
"""Delete all records associated with the given document."""
q = {'term': {'document_id': document_id}}
schemata = (DocumentRecord.SCHEMA_PAGE,
DocumentRecord.SCHEMA_ROW,
Document.SCHEMA)
query_delete(entities_read_index(schemata), q, sync=sync)
def generate_document(document, shallow=False):
"""Generate bulk index actions for all records and the main document."""
data = document.to_dict()
data['text'] = ensure_list(data.get('text'))
total_len = sum((len(t) for t in data['text']))
if document.supports_records:
q = db.session.query(DocumentRecord)
q = q.filter(DocumentRecord.document_id == document.id)
for idx, record in enumerate(q.yield_per(BULK_PAGE)):
texts = list(record.texts)
if total_len < INDEX_MAX_LEN:
total_len += sum((len(t) for t in texts))
data['text'].extend(texts)
record = record.to_dict()
record['collection_id'] = document.collection_id
record['created_at'] = document.created_at
record['updated_at'] = document.updated_at
record['text'] = texts
if not shallow:
entity_id, index, body = index_operation(record)
yield {
'_id': entity_id,
'_index': index,
'_source': body
}
if idx > 0 and idx % 1000 == 0:
log.info("Indexed [%s]: %s records...", document.id, idx)
# log.debug("Text length [%s]: %s", document.id, total_len)
entity_id, index, body = index_operation(data)
for other in entities_index_list(Document.SCHEMA):
if other != index:
yield {
'_id': entity_id,
'_index': other,
'_op_type': 'delete'
}
yield {
'_id': entity_id,
'_index': index,
'_source': body
}
def generate_collection_docs(collection):
q = Document.by_collection(collection.id)
q = q.order_by(Document.id.asc())
for idx, document in enumerate(q.yield_per(BULK_PAGE)):
try:
log.info("Index [%s]: %s", document.id, document.name)
yield from generate_document(document)
except Exception:
log.exception("Cannot index [%s]: %s", document.id, document.name)
if idx % 1000 == 0:
db.session.expunge_all()
| ATADDATALOG/test-repo | aleph/index/documents.py | documents.py | py | 3,166 | python | en | code | 0 | github-code | 13 |
26172457354 | from functools import reduce
# TimeComplexity wiki
# https://wiki.python.org/moin/TimeComplexity
def array_of_array_products_brute(ary: list) -> list:
res: list = list()
arr_len = len(ary)
if arr_len == 0 or arr_len == 1:
return []
for i in range(0, len(ary)):
product = 1
for j in range(0, len(ary)):
if i == j:
continue
else:
product *= ary[j]
res.append(product)
return res
def array_of_array_products_div(ary: list) -> list:
res = list()
arr_len = len(ary)
if arr_len == 0 or arr_len == 1:
return []
product: int = reduce(lambda x, y: x * y, ary)
for i in ary:
res.append(product // i)
return res
def array_of_array_products_calc(ary: list) -> list:
product_arr = []
arr_len = len(ary)
if arr_len == 0 or arr_len == 1:
return []
product = 1
for i in range(0, arr_len):
product_arr.append(product)
product *= arr[i]
product = 1
for i in range(arr_len - 1, -1, -1):
product_arr[i] *= product
product *= arr[i]
return product_arr
if __name__ == "__main__":
# arr: list = [8, 10, 2]
arr: list = [2, 7, 3, 4]
# result = array_of_array_products_div(arr)
result = array_of_array_products_calc(arr)
print(result)
| drewnix/algo | algo/py/pramp/array_of_array_products/array_of_array_products.py | array_of_array_products.py | py | 1,370 | python | en | code | 1 | github-code | 13 |
38439285067 | import os
import json
import requests
import traceback
from libs.config import config
from azure.cognitiveservices.vision.computervision import ComputerVisionClient
from msrest.authentication import CognitiveServicesCredentials
class Microsoft:
def __init__(self):
self.name = 'Microsoft'
self.end_point = config.MICROSOFT_END_POINT
self.api_key = config.MICROSOFT_API_KEY
self.client = ComputerVisionClient(self.end_point, CognitiveServicesCredentials(self.api_key))
def predict(self, image_id_list : list):
total = len(image_id_list)
counter = 0
output = {}
for id in image_id_list:
counter += 1
print(f'Predict {counter} from {total}')
image_url = f'{config.BASE_URL}{id}'
image_path = f'{config.BASE_FOLDER}{id}'
try:
response = self.client.tag_image(image_url)
image_output = []
for tag in response["tags"]:
print('%12s: %.2f' % (tag['name'], tag['confidence']))
image_output.append({
'name' : tag['name'],
'score': tag['confidence']
})
output[id] = image_output
except KeyboardInterrupt:
return
except Exception:
print('Error', id, traceback.format_exc())
with open(f'predictions/{self.name}.json', 'w') as f:
f.write(json.dumps(output))
| mobiusml/benchmark_competition | libs/microsoft.py | microsoft.py | py | 1,535 | python | en | code | 2 | github-code | 13 |
10789116590 | import json
from flask_classful import FlaskView, route
from flask import render_template, jsonify, request, session, redirect, url_for, flash
from source.WarstwaBiznesowa.PosrednikBazyDanych import PosrednikBazyDanych
from source.WarstwaBiznesowa.KontroleryModeli.KontrolerModeluInterface import TypModelu
from source.WarstwaBiznesowa.ModeleBLL.KontoPrywatneBLL import KontoPrywatneBLL
from source.WarstwaBiznesowa.SystemMailingowy.SystemMailingowy import SystemMailingowy
from source.WarstwaBiznesowa.ModeleBLL.RezerwacjaBLL import RezerwacjaBLL
from source.WarstwaBiznesowa.ModeleBLL.SalaBLL import SalaBLL
import os
from datetime import datetime
from source.WarstwaBiznesowa.wyjatki import *
class WidokKontaPrywatnego(FlaskView):
"""
Klasa odpowiedzialna za renderowanie widokow dla interfejsu konta prywatnego
"""
@route('/')
def stronaPoczatkowa(self):
"""
Wyswietla strone poczatkowa jestli uzytkownik prywatny jest zalogowany
w przeciwnym wypadku przekirowuje do logowanie do konta prywatnego
:return: Szablon html strony poczatkowej interfejsu prywatnego lub przekierowanie do logowania
"""
if "email" in session.keys() and "konto_id" in session.keys():
email = session.get('email')
kontoid = session.get('konto_id')
return render_template('WidokKontaPrywatnego/index.html', email=email, id=kontoid)
else:
return redirect(url_for("WidokStronyStartowej:Logowanie_doKontaPrywatnego"))
@route("/daneKonta/", methods=["GET", "POST"])
def daneKonta(self):
"""
Pokazuje strone z danymi uzytkownika do edycji (Metoda GET),
Metoda POST (gdy w polu methodid post'a jest "edytuj") - aktualizuje dane konta prywatnego na podstawie zmian w formularzu
Metoda POST (gdy w polu methodid jest "usun") - usuwa konto prywatne
:return:
"""
if 'konto_id' in session.keys():
bazadanych = PosrednikBazyDanych(TypModelu.KontoPrywatne)
user: KontoPrywatneBLL = bazadanych.pobierzObiekt(session['konto_id'])
user.Rezerwacje = None
if request.method == "GET":
return render_template("WidokKontaPrywatnego/daneKonta.html", text_lists=user.toDict())
else:
if request.form.get('methodid') == "edytuj":
nowe_dane = request.form
for k, v in nowe_dane.items():
setattr(user, k, v)
try:
bazadanych.zaktualizujObiekt(session['konto_id'], user)
except BladZapisuDoBazyDanych as e:
flash("Nie udalo sie zapisac twoich danych")
return redirect(url_for('WidokKontaPrywatnego:daneKonta'))
session.pop("email")
session.pop("konto_id")
flash("Zaktualizowano dane konta. Zaloguj sie ponownie!")
return redirect(url_for('WidokStronyStartowej:Logowanie_doKontaPrywatnego'))
elif request.form.get('methodid') == "usun":
try:
bazadanych.usunObiekt(session['konto_id'])
except BladWBazieDanychError as e:
flash(str(e))
return redirect(url_for('WidokKontaPrywatnego:daneKonta'))
session.pop("email")
session.pop("konto_id")
flash("Usunięto twoje konto :( ")
return redirect(url_for('WidokStronyStartowej:StronaPoczatkowa'))
else:
return redirect(url_for("WidokStronyStartowej:Logowanie_doKontaPrywatnego"))
@route("/mojeRezerwacje/")
def mojeRezerwacje(self):
"""
Wyswietla widok wszystkich rezerwacji uzytkownika zalogowanego
:return: renderuje szablon html dla listy rezerwacji lub przekierowanie do logowania
"""
if 'konto_id' in session.keys():
rezerwacje = PosrednikBazyDanych(TypModelu.Rezerwacje).pobierzWszystkieObiekty()
moje_rezerwacje = [r for r in rezerwacje if r.Id_Rezerwujacego == session['konto_id']]
moje_rezer = []
sale_db = PosrednikBazyDanych(TypModelu.Sale)
firmy_db = PosrednikBazyDanych(TypModelu.KontoFirmowe)
terminy_db = PosrednikBazyDanych(TypModelu.Terminy)
for r in moje_rezerwacje:
try:
termin = terminy_db.pobierzObiekt(r.Id_Terminu)
if not termin:
continue
sala = sale_db.pobierzObiekt(r.Id_Sali)
firma = firmy_db.pobierzObiekt(sala.Id_Wynajmujacego)
moje_rezer.append({"id_rezerwacji": r.Id_Rezerwacji, "firma": firma.Nazwa_Firmy,
"cena": sala.Cena, "Adres": sala.Adres,
"terminod": datetime.strftime(termin.Data_i_godzina_Rozpoczecia, "%d.%m.%Y %H:%M"),
"termindo": datetime.strftime(termin.Data_i_godzina_Zakonczenia, "%d.%m.%Y %H:%M")})
except BladWKontrolerzeModeliError as e:
flash("Coś poszło nie tak w pobieraniu twoich rezerwacji, sprobuj ponownie")
return redirect(url_for("WidokKontaPrywatnego:stronaPoczatkowa"))
return render_template("WidokKontaPrywatnego/mojeRezerwacje.html", mojerezer=moje_rezer)
else:
return redirect(url_for("WidokStronyStartowej:Logowanie_doKontaPrywatnego"))
@route("/ofertySal/")
def ofertySal(self):
"""
Wyswietla liste dostepnych ofert sal
:return: szablon html dla widoku ofert sal
"""
if 'konto_id' in session.keys():
database = PosrednikBazyDanych(TypModelu.Sale)
firmDb = PosrednikBazyDanych(TypModelu.KontoFirmowe)
sale_obj = database.pobierzWszystkieObiekty()
sale = [s.toDict() for s in sale_obj if s.Wolna and s]
for s in sale:
try:
s['id_wynajmujacego'] = firmDb.pobierzObiekt(s['id_wynajmujacego']).Nazwa_Firmy
except BladWKontrolerzeModeliError as e:
flash("Coś poszło nie tak w trakcie wydobywania danych sal")
return redirect(url_for("WidokKontaPrywatnego:stronaPoczatkowa"))
return render_template("WidokKontaPrywatnego/ofertySal.html",
text_lists={'sale': sale}, oferty_sal=sale)
else:
return redirect(url_for("WidokStronyStartowej:Logowanie_doKontaPrywatnego"))
@route("/ofertySal/<int:id>/", methods=['GET', 'POST'])
def szczegolyOferty(self, id):
"""
Wyswietla szczegoly oferty o id podanym w url. (dla metody GET)
dla metody POST, po wybraniu terminu tworzy obiekt rezerwacji dla wybranego terminu w sali wyswietlanej wczesniej
:param id: identyfikator wybranej sali
:return: szablon html wyswietlajacy szczeguly sali, lub redirect do tej samej strony dla GET
"""
if 'konto_id' in session.keys():
if request.method == "GET":
saleDB = PosrednikBazyDanych(TypModelu.Sale)
sala = saleDB.pobierzObiekt(id)
salaDict = sala.toDict()
terminy = salaDict.get('terminy')
try:
firma = PosrednikBazyDanych(TypModelu.KontoFirmowe).pobierzObiekt(salaDict['id_wynajmujacego'])
salaDict['id_wynajmujacego'] = firma.Nazwa_Firmy
except BladWKontrolerzeModeliError as e:
flash("Nie znaleziono podanej sali")
return render_template("WidokKontaPrywatnego/ofertySal.html")
if salaDict.get("wolna"):
return render_template("WidokKontaPrywatnego/szczegolyOferty.html", sala=salaDict, terminy=terminy)
else:
return redirect(url_for("WidokKontaPrywatnego:ofertySal"))
else:
id_sali = int(request.form['idsali'])
id_terminu = request.form['termin']
sala: SalaBLL = PosrednikBazyDanych(TypModelu.Sale).pobierzObiekt(id_sali)
rezerwacjeDB = PosrednikBazyDanych(TypModelu.Rezerwacje)
system_mailingowy = SystemMailingowy(rezerwacjeDB.kontrolerModelu)
try:
rezerwacja = RezerwacjaBLL(0, float(sala.Cena), int(session['konto_id']), int(id_sali), int(id_terminu))
rezerwacjeDB.dodajObiekt(rezerwacja)
except (TypeError, ValueError, BladZapisuDoBazyDanych) as e:
flash("Błąd modelu rezerwacji!")
return render_template("WidokKontaPrywatnego/ofertySal.html")
flash("Zarezewowano poprawnie salę.")
return redirect(url_for("WidokKontaPrywatnego:szczegolyOferty", id=id_sali))
else:
return redirect(url_for("WidokStronyStartowej:Logowanie_doKontaPrywatnego"))
@route("/szczegolyRezerwacji/<int:id>/", methods=["GET", "POST"])
def szczegolyRezerwacji(self, id):
"""
Methoda wyswietlajaca szczegoly rezerwacji (Metoda GET) lub usuwajaca rezerwacje dla metody POST
:param id: id rezerwacji w bazie danych
:return: szablon html ze szczegolami sali, redirect do strony z lista rezerwacji
"""
if 'konto_id' in session.keys():
if request.method == "GET":
try:
rezerwacje_db = PosrednikBazyDanych(TypModelu.Rezerwacje)
rezer = rezerwacje_db.pobierzObiekt(id).toDict()
sala = PosrednikBazyDanych(TypModelu.Sale).pobierzObiekt(rezer.get('Id_Sali')).toDict()
firma = PosrednikBazyDanych(TypModelu.KontoFirmowe).pobierzObiekt(sala.get('id_wynajmujacego')).toDict()
termin = PosrednikBazyDanych(TypModelu.Terminy).pobierzObiekt(rezer.get('Id_Terminu')).toDict()
if not termin:
return redirect(url_for("WidokKontaPrywatnego:mojeRezerwacje"))
rezer['sala'] = sala
rezer['firmaWynajmujaca'] = firma.get('Nazwa_Firmy')
rezer['terminOd'] = termin.get('Data_i_godzina_Rozpoczecia')
rezer['terminDo'] = termin.get('Data_i_godzina_Zakonczenia')
return render_template("WidokKontaPrywatnego/szczegolyRezerwacji.html", rezer=rezer)
except BladWKontrolerzeModeliError as e:
flash("Coś się popsuło w kontrolerze w serwerze! Spróbuj ponownie")
return redirect(url_for("WidokKontaPrywatnego:mojeRezerwacje"))
else:
posrednik = PosrednikBazyDanych(TypModelu.Rezerwacje)
mail = SystemMailingowy(posrednik.kontrolerModelu)
try:
posrednik.usunObiekt(id)
flash("Pomyślnie dokonano rezygnacji")
return redirect(url_for("WidokKontaPrywatnego:mojeRezerwacje"))
except (BladWBazieDanychError, BladWKontrolerzeModeliError) as e:
flash("Nie można zrezygnować z oferty na mniej niż 2 dni przed!")
return redirect(url_for("WidokKontaPrywatnego:szczegolyRezerwacji", id=id))
else:
return redirect(url_for("WidokStronyStartowej:Logowanie_doKontaPrywatnego"))
@route("/wylogujsie/")
def wylogujsie(self):
"""
Metoda wylogowuje uzytkownika z konta (usuwa z sesji jego dane)
:return: przekierowanie do strony poczatkowej dla niezalogowanego uzytkownika
"""
if 'email' in session.keys() and 'konto_id' in session.keys():
session.pop("email")
session.pop("konto_id")
flash('Wylogowano sie z konta prywatnego')
return redirect(url_for("WidokStronyStartowej:StronaPoczatkowa")) | danielswietlik/WypozyczalniaSalKonferencyjnych | source/WarstwaPrezentacji/KontroleryWidokow/WidokKontaPrywatnego.py | WidokKontaPrywatnego.py | py | 12,165 | python | pl | code | 0 | github-code | 13 |
31279392769 | # Configurations de la base de données
# Importation du module os pour manipuler les chemins de fichiers
import os
# Obtention du chemin absolu du dossier actuel
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
# Chemin complet du fichier SQLite dans le dossier "static"
DATABASE_PATH = os.path.join(BASE_DIR, '../static/your_database.db')
# URI de la base de données SQLite
SQLALCHEMY_DATABASE_URI = f'sqlite:///{DATABASE_PATH}'
# Désactivation du suivi des modifications de SQLAlchemy
SQLALCHEMY_TRACK_MODIFICATIONS = False
| Math94550/Projet_vente_avion_CMMP | config/database.py | database.py | py | 539 | python | fr | code | 0 | github-code | 13 |
30634521649 | '''
Write a script in python to find factorial of a number
4! = 1 X 2 X 3 X 4 = 24
7! = 1 X 2 X 3 X 4 X 5 X 6 X 7 =
'''
num = int(input("Enter a number ? "))
fact = 1
for i in range(1, num+1):
fact *= i
print(f"Factorial of {num} is {fact}")
| ajaybhatia/programming-in-python-session-2020 | ex04.py | ex04.py | py | 250 | python | en | code | 1 | github-code | 13 |
42630006369 | import rosbag
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
bag = rosbag.Bag('2020-10-27-16-32-15.bag') #get the data from the rosbag and store it in variable "bag"
topics = bag.get_type_and_topic_info()[1].keys() #get topic and type from bag and store it in variable "topic"
#print(topics) #uncomment to see the topic. Should be /myo_ros/myo_emg
dataCol = [] #initialize empty array
dataCol.append('time') #set the first column header to "time"
for i in range(0,8): #loop through 0 to 8 elements of the bag data and name it EMG0, EMG1, ... EMG7
dataCol.append('EMG'+str(i))
df = pd.DataFrame(columns=dataCol) #set the above in a dataframe
for topic, msg, t in bag.read_messages(topics=['/myo_raw/myo_emg']): #read bag file and loop through topic, msg and t of the topic
tmp = [t.to_sec()] #convert time time stamps created in the rosbag to seconds
tmp.extend(msg.data) #extend the timestamp (now in seconds) to become as long as the data
tmpSeries = pd.Series(tmp, index=df.columns) #turn time into series to place in dataframe
df = df.append(tmpSeries, ignore_index=True) #place time into dataframe
bag.close() #close bag file!
time_start = df.get('time')[0] #time in the bag file is really long (time of the computer)
df['time'] = df.get('time')-time_start #so take the first number and substract it from all other numbers to get the correct recording time
#Save dataframe to CSV
df.to_csv('47.csv', index=False)
#print(df) #print to see what the data frame looks like now
##Plot the data from the bag file into 8 windows - one single figure
#fig, axs = plt.subplots(4,2)
#for idx, ax in enumerate(axs.flat):
# ax.plot(df['time'],df['EMG'+str(idx)],label='EMG'+str(idx))
# ax.set_ylim(0, 2000)
# ax.set_xlabel('Time [s]')
# ax.set_ylabel('EMG signal')
# ax.legend()
# ax.label_outer()
#
#plt.show()
| paulavillafulton/EMG_MYO_ROS_NeuroInspiredSystemEngineering | paula-myo-ros/data/bagToCSV.py | bagToCSV.py | py | 1,877 | python | en | code | 0 | github-code | 13 |
71435645459 | def firstDuplicate(a):
d = [None] * len(a)
min_idx = len(a)
first_duplicate = -1
for i in range(len(a)):
if d[a[i] - 1]:
if i < min_idx:
min_idx = i
first_duplicate = a[i]
else:
d[a[i] - 1] = True
return first_duplicate
def firstNotRepeatingCharacter(s):
order = []
counts = {}
res = '_'
for c in s:
if c in counts:
counts[c] += 1
else:
counts[c] = 1
order.append(c)
i = 0
found = False
while i < len(order) and not found:
if counts[order[i]] == 1:
found = True
res = order[i]
i += 1
return res
def rotateImage(a):
n = len(a)
for layer in range(len(a) // 2):
first = layer
last = n - 1 - layer
for i in range(first, last):
offset = i - first
temp = a[first][i]
a[first][i] = a[last - offset][first]
a[last - offset][first] = a[last][last - offset]
a[last][last - offset] = a[i][last]
a[i][last] = temp
return a
def sudoku2(grid):
n = len(grid)
m = len(grid[0])
ts = 0
# checking rows and columns
for i in range(n):
t_str = [None] * n
t_col = [None] * n
for j in range(n):
ws = grid[i][:]
wc = [x[j] for x in grid]
v_str = grid[i][j]
v_col = grid[j][i]
if v_str != '.':
if t_str[int(v_str) - 1]:
return False
else:
t_str[int(v_str) - 1] = True
ts += int(v_str)
if v_col != '.':
if t_col[int(v_col) - 1]:
return False
else:
t_col[int(v_col) - 1] = True
ts += int(v_col)
# checking blocks 3x3
for ks in range(n // 3):
for kc in range(n // 3):
s_offset = ks * 3
c_offset = kc * 3
t = [None] * n
for i in range(3):
for j in range(3):
v = grid[s_offset + i][c_offset + j]
if v != '.':
if t[int(v) - 1]:
return False
else:
t[int(v) - 1] = True
ts += int(v)
return True
def isCryptSolution(crypt, solution):
d = {}
for s in solution:
d[s[0]] = s[1]
ces = []
for c in crypt:
ce = encrypt(c, d)
if ce[0] == '0' and len(ce) > 1:
return False
else:
ces.append(int(ce))
return ces[0] + ces[1] == ces[2]
def encrypt(w, d_sol):
res = ''
for c in w:
res += d_sol[c]
return res
# ====================================
a = [2, 3, 3, 1, 5, 2]
assert firstDuplicate(a) == 3
a = [2, 4, 3, 5, 1]
assert firstDuplicate(a) == -1
s = "abacabad"
assert firstNotRepeatingCharacter(s) == 'c'
s = "abacabaabacaba"
assert firstNotRepeatingCharacter(s) == '_'
a = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
assert rotateImage(a) == [[7, 4, 1],
[8, 5, 2],
[9, 6, 3]]
grid = [['.', '.', '.', '1', '4', '.', '.', '2', '.'],
['.', '.', '6', '.', '.', '.', '.', '.', '.'],
['.', '.', '.', '.', '.', '.', '.', '.', '.'],
['.', '.', '1', '.', '.', '.', '.', '.', '.'],
['.', '6', '7', '.', '.', '.', '.', '.', '9'],
['.', '.', '.', '.', '.', '.', '8', '1', '.'],
['.', '3', '.', '.', '.', '.', '.', '.', '6'],
['.', '.', '.', '.', '.', '7', '.', '.', '.'],
['.', '.', '.', '5', '.', '.', '.', '7', '.']]
assert sudoku2(grid) == True
grid = [['.', '.', '.', '.', '2', '.', '.', '9', '.'],
['.', '.', '.', '.', '6', '.', '.', '.', '.'],
['7', '1', '.', '.', '7', '5', '.', '.', '.'],
['.', '7', '.', '.', '.', '.', '.', '.', '.'],
['.', '.', '.', '.', '8', '3', '.', '.', '.'],
['.', '.', '8', '.', '.', '7', '.', '6', '.'],
['.', '.', '.', '.', '.', '2', '.', '.', '.'],
['.', '1', '.', '2', '.', '.', '.', '.', '.'],
['.', '2', '.', '.', '3', '.', '.', '.', '.']]
assert sudoku2(grid) == False
crypt = ["SEND", "MORE", "MONEY"]
solution = [['O', '0'],
['M', '1'],
['Y', '2'],
['E', '5'],
['N', '6'],
['D', '7'],
['R', '8'],
['S', '9']]
assert isCryptSolution(crypt, solution) == True
crypt = ["TEN", "TWO", "ONE"]
solution = [['O', '1'],
['T', '0'],
['W', '9'],
['E', '5'],
['N', '4']]
assert isCryptSolution(crypt, solution) == False | donrebel/codefights | interview/arrays.py | arrays.py | py | 4,802 | python | vi | code | 0 | github-code | 13 |
18696315294 | class Solution:
def getCommon(self, nums1: List[int], nums2: List[int]) -> int:
s1 = set(nums1)
s2 = set(nums2)
s3 = s1.intersection(s2)
if len(s3) == 0:
return -1
else:
return min(s3) | mehkey/leetcode | python6/6300. Minimum Common Value.py | 6300. Minimum Common Value.py | py | 288 | python | en | code | 0 | github-code | 13 |
70726885459 | #! python3
#findUrls.py -> find urls that begins with http:// ans https:// in a clipboard then clean it and sort it
import re, pyperclip
# urls finder Regex
urlsRegex = re.compile(
r'''
(http[s]?:\/\/[\w+\.]?\w+\.\w+)
'''
,re.I|re.VERBOSE)
# url finder function
def findUrls(text):
matches = []
answers = urlsRegex.findall(text) #getting regex groups in answers
print(answers)
for answer in answers:
matches.append(answer) #stor each answer (group) in matches list
return matches
# getting the text from the clipboard
text = pyperclip.paste()
# ex of random text with random emails:
# Lorem ipsum dolor sit amet, consectetur adipiscing elit. Suspendisse aliquam
# https://www.google.com velit vel suscipit facilisis. Integer posuere, http://www.example.com ipsum eu
# viverra tincidunt, velit ante blandit elit, a mollis mi est at velit. Nam vel urna vel odio http://news.bbc.co.uk
# blandit euismod. Aliquam id varius eros. Integer https://www.reddit.com tempor lacus euismod semper facilisis.
# Proin id erat https://github.com sed magna rutrum congue. Sed eget nulla eget nibh convallis dictum. Nunc tempor dolor
# a massa hendrerit, a congue velit http://www.wikipedia.org faucibus. In euismod orci a metus commodo, at
# elementum https://stackoverflow.com velit tincidunt. Nulla facilisi. Sed http://www.apple.com vel aliquet odio. Aenean ac
# magna quis https://www.amazon.com ipsum congue dapibus id a nibh. Morbi
# at http://www.microsoft.com nisl nisi.
if len(findUrls(text)) > 0: #check if the return is greater than 0
pyperclip.copy('\n'.join(findUrls(text))) #copying the returned list as a text to the clipboard
print("\nFound URLs in clipboard:")
else:
print("\nNo URLs found in clipboard.")
#-------------------result
# https://www.google
# http://www.example
# http://news.bbc
# https://www.reddit
# https://github.com
# http://www.wikipedia
# https://stackoverflow.com
# http://www.apple
# https://www.amazon
# http://www.microsoft | ELGHAZAL-SAID/Make-it-easy-with-Python | RegexSmallProjects/findUrls.py | findUrls.py | py | 2,140 | python | en | code | 2 | github-code | 13 |
34795653249 | #!/usr/bin/env/python
# File name : server.py
# Production : PiCar-C
# Website : www.adeept.com
# Author : William
# Date : 2019/11/21
import socket
import threading
import time
import os
import LED
import move
import servo
import switch
import RPIservo
servo.servo_init()
switch.switchSetup()
switch.set_all_switch_off()
Led = LED.LED()
Led.colorWipe(80,255,0)
step_set = 1
speed_set = 100
rad = 0.6
direction_command = 'no'
turn_command = 'no'
servo_command = 'no'
pos_input = 1
catch_input = 1
cir_input = 6
servo_speed = 5
ledthread = LED.LED_ctrl()
ledthread.start()
scGear = RPIservo.ServoCtrl()
scGear.moveInit()
P_sc = RPIservo.ServoCtrl()
P_sc.start()
T_sc = RPIservo.ServoCtrl()
T_sc.start()
class Servo_ctrl(threading.Thread):
def __init__(self, *args, **kwargs):
super(Servo_ctrl, self).__init__(*args, **kwargs)
self.__flag = threading.Event()
self.__flag.set()
self.__running = threading.Event()
self.__running.set()
def run(self):
while self.__running.isSet():
self.__flag.wait()
if servo_command == 'lookleft':
servo.lookleft(servo_speed)
elif servo_command == 'lookright':
servo.lookright(servo_speed)
elif servo_command == 'up':
servo.up(servo_speed)
elif servo_command == 'down':
servo.down(servo_speed)
time.sleep(0.03)
def pause(self):
self.__flag.clear()
def resume(self):
self.__flag.set()
def stop(self):
self.__flag.set()
self.__running.clear()
def app_ctrl():
global servo_move
app_HOST = ''
app_PORT = 10123
app_BUFSIZ = 1024
app_ADDR = (app_HOST, app_PORT)
servo_move = Servo_ctrl()
servo_move.start()
servo_move.pause()
def ap_thread():
os.system("sudo create_ap wlan0 eth0 Groovy 12345678")
def setup():
move.setup()
def appCommand(data_input):
global direction_command, turn_command, servo_command
if data_input == 'forwardStart\n':
direction_command = 'forward'
move.motor_left(1, 0, speed_set)
move.motor_right(1, 0, speed_set)
# RL.both_on()
elif data_input == 'backwardStart\n':
direction_command = 'backward'
move.motor_left(1, 1, speed_set)
move.motor_right(1, 1, speed_set)
# RL.red()
elif data_input == 'leftStart\n':
turn_command = 'left'
scGear.moveAngle(2,30)
move.motor_left(1, 0, speed_set)
move.motor_right(1, 0, speed_set)
elif data_input == 'rightStart\n':
turn_command = 'right'
scGear.moveAngle(2,-30)
move.motor_left(1, 0, speed_set)
move.motor_right(1, 0, speed_set)
# RL.both_off()
# RL.turnRight()
elif 'forwardStop' in data_input:
if turn_command == 'no':
move.motorStop()
elif 'backwardStop' in data_input:
if turn_command == 'no':
move.motorStop()
elif 'leftStop' in data_input:
turn_command = 'no'
# servo.turnMiddle()
# move.motorStop()
scGear.moveAngle(2, 0)
move.motorStop()
elif 'rightStop' in data_input:
turn_command = 'no'
# servo.turnMiddle()
# move.motorStop()
scGear.moveAngle(2, 0)
move.motorStop()
if data_input == 'lookLeftStart\n':
P_sc.singleServo(1, 1, 7)
elif data_input == 'lookRightStart\n':
P_sc.singleServo(1,-1, 7)
elif data_input == 'downStart\n':
T_sc.singleServo(0,-1, 7)
elif data_input == 'upStart\n':
T_sc.singleServo(0, 1, 7)
elif 'lookLeftStop' in data_input:
P_sc.stopWiggle()
elif 'lookRightStop' in data_input:
P_sc.stopWiggle()
elif 'downStop' in data_input:
T_sc.stopWiggle()
elif 'upStop' in data_input:
T_sc.stopWiggle()
if data_input == 'aStart\n':
if LED.ledfunc != 'police':
LED.ledfunc = 'police'
ledthread.resume()
elif LED.ledfunc == 'police':
LED.ledfunc = ''
ledthread.pause()
elif data_input == 'bStart\n':
if LED.ledfunc != 'rainbow':
LED.ledfunc = 'rainbow'
ledthread.resume()
elif LED.ledfunc == 'rainbow':
LED.ledfunc = ''
ledthread.pause()
elif data_input == 'cStart\n':
switch.switch(1,1)
switch.switch(2,1)
switch.switch(3,1)
elif data_input == 'dStart\n':
switch.switch(1,0)
switch.switch(2,0)
switch.switch(3,0)
elif 'aStop' in data_input:
pass
elif 'bStop' in data_input:
pass
elif 'cStop' in data_input:
pass
elif 'dStop' in data_input:
pass
print(data_input)
def appconnect():
global AppCliSock, AppAddr
try:
s =socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
s.connect(("1.1.1.1",80))
ipaddr_check=s.getsockname()[0]
s.close()
print(ipaddr_check)
AppSerSock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
AppSerSock.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)
AppSerSock.bind(app_ADDR)
AppSerSock.listen(5)
print('waiting for App connection...')
AppCliSock, AppAddr = AppSerSock.accept()
print('...App connected from :', AppAddr)
except:
ap_threading=threading.Thread(target=ap_thread) #Define a thread for AP Mode
ap_threading.setDaemon(True) #'True' means it is a front thread,it would close when the mainloop() closes
ap_threading.start() #Thread starts
led.colorWipe(0,16,50)
time.sleep(1)
led.colorWipe(0,16,100)
time.sleep(1)
led.colorWipe(0,16,150)
time.sleep(1)
led.colorWipe(0,16,200)
time.sleep(1)
led.colorWipe(0,16,255)
time.sleep(1)
led.colorWipe(35,255,35)
AppSerSock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
AppSerSock.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)
AppSerSock.bind(app_ADDR)
AppSerSock.listen(5)
print('waiting for App connection...')
AppCliSock, AppAddr = AppSerSock.accept()
print('...App connected from :', AppAddr)
appconnect()
setup()
app_threading=threading.Thread(target=appconnect) #Define a thread for app connection
app_threading.setDaemon(True) #'True' means it is a front thread,it would close when the mainloop() closes
app_threading.start() #Thread starts
while 1:
data = ''
data = str(AppCliSock.recv(app_BUFSIZ).decode())
if not data:
continue
appCommand(data)
pass
if __name__ == '__main__':
app_ctrl() | adeept/adeept_picar-b | server/appserver.py | appserver.py | py | 7,496 | python | en | code | 21 | github-code | 13 |
42409539541 | n = int(input())
s = []
for i in range(n):
t = list(map(int, input().split()))
if t[0] == 1:
s.append(t[1])
elif t[0] == 2:
for j in range(min(t[2], s.count(t[1]))):
s.remove(t[1])
else:
print(max(s) - min(s))
| ZicsX/CP-Solutions | C_-_Max_-_Min_Query.py | C_-_Max_-_Min_Query.py | py | 262 | python | en | code | 0 | github-code | 13 |
22196335972 | import pytest
from linkml.generators.sqlddlgen import SQLDDLGenerator
@pytest.mark.parametrize(
"dialect", ["mssql+pyodbc", "sqlite+pysqlite", "mysql+pymysql", "postgresql+psycopg2"]
)
def test_sqlddlgen(dialect, input_path, snapshot):
PATH = input_path("issue_273.yaml")
gen = SQLDDLGenerator(PATH, dialect=dialect)
ddl = gen.serialize()
assert ddl == snapshot(f'issue_273_{dialect.replace("+","_")}.sql')
| linkml/linkml | tests/test_issues/test_issue_273.py | test_issue_273.py | py | 431 | python | en | code | 228 | github-code | 13 |
41331635025 | from collective.grok import gs
from Products.CMFCore.utils import getToolByName
from zc.relation.interfaces import ICatalog
from zope.component import getUtility
from zope.app.intid.interfaces import IIntIds
from z3c.relationfield.event import _relations, updateRelations
# -*- extra stuff goes here -*-
@gs.upgradestep(title=u'Upgrade wcc.books to 2',
description=u'Upgrade wcc.books to 2',
source='1', destination='2',
sortkey=1, profile='wcc.books:default')
def to2(context):
setup = getToolByName(context, 'portal_setup')
setup.runAllImportStepsFromProfile('profile-wcc.books.upgrades:to2')
portal_catalog = getToolByName(context, 'portal_catalog')
intids = getUtility(IIntIds)
for b in portal_catalog(
portal_type=['wcc.books.book'],
Language='all'):
obj = b.getObject()
for name, relation in _relations(obj):
try:
relation.to_id = intids.getId(relation.to_object)
except KeyError:
continue
updateRelations(obj, None)
@gs.upgradestep(title=u'Upgrade wcc.books to 3',
description=u'Upgrade wcc.books to 3',
source='*', destination='3',
sortkey=1, profile='wcc.books:default')
def to3(context):
setup = getToolByName(context, 'portal_setup')
setup.runAllImportStepsFromProfile('profile-wcc.books.upgrades:to3')
portal_catalog = getToolByName(context, 'portal_catalog')
for b in portal_catalog(
portal_type=['wcc.books.book'],
Language='all'):
obj = b.getObject()
try:
obj.price = str(obj.price)
except KeyError:
continue
| oikoumene/wcc.books | wcc/books/upgrades/handlers.py | handlers.py | py | 1,742 | python | en | code | 0 | github-code | 13 |
3996748236 | n,p,m = tuple([int(x) for x in input().split()])
# f = open("test.txt","r")
# n,p,m = tuple([int(x) for x in f.readline().split()])
participants = {}
winners = []
for i in range(n):
participants[input()] = 0
# participants[f.readline().strip("\n")]=0
for i in range(m):
person,point = tuple([x for x in input().split()])
# person,point = tuple([x for x in f.readline().split()])
participants[person]+= int(point)
if(participants[person]>= p and person not in winners):
winners.append(person)
if(len(winners)!=0):
for i in winners:
print(i+" wins!")
else:
print("No winner!")
| afnanmmir/Kattis | src/ArcadeBasketball.py | ArcadeBasketball.py | py | 623 | python | en | code | 0 | github-code | 13 |
24141771061 | from typing import Tuple, List
def parse_instruction(inst: str) -> Tuple[str, int]:
"""
'acc +1' -> ('acc', 1)
'jmp -3' -> ('jmp', -3)
:param inst:
:return:
"""
inst_splt = inst.split(' ')
return inst_splt[0], int(inst_splt[1])
def nop(accum: int, pos: int, *args) -> Tuple[int, int]:
"""
update instruction accumulator and position for nop
:param pos:
:param accum:
:return:
"""
return accum, pos + 1
def acc(accum: int, pos: int, value) -> Tuple[int, int]:
"""
update instruction accumulator and position for acc
:param accum:
:param pos:
:param value:
:return:
"""
return accum + value, pos + 1
def jmp(accum: int, pos: int, value) -> Tuple[int, int]:
"""
update instruction accumulator and position for jmp
:param accum:
:param pos:
:param value:
:return:
"""
return accum, pos + value
def update_accumulator_and_pos(accum: int, pos:int, inst_str: str) -> Tuple[int, int]:
"""
:param accum:
:param pos:
:param inst_str:
:return:
"""
switcher = {
'nop': nop,
'jmp': jmp,
'acc': acc
}
inst, val = parse_instruction(inst_str)
return switcher.get(inst)(accum, pos, val)
def acc_val_bfr_frst_recurring_inst_or_prg_term(inst_set: List[str]) -> Tuple[int, List[Tuple[int, str]], bool]:
"""
:param inst_set:
:return:
"""
accumulator = 0
position = 0
pos_mem = [0]
inst_mem = []
program_terminated = True
while position < len(inst_set):
pos_inst_tup = position, inst_set[position]
inst_mem.append(pos_inst_tup)
accumulator, position = update_accumulator_and_pos(accumulator, *pos_inst_tup)
if position in pos_mem:
program_terminated = False
break
else:
pos_mem.append(position)
return accumulator, inst_mem, program_terminated
def swap_jmp_nop(inst: str) -> str:
"""
:param inst:
:return:
"""
if parse_instruction(inst)[0] == 'jmp':
return inst.replace('jmp', 'nop')
elif parse_instruction(inst)[0] == 'nop':
return inst.replace('nop', 'jmp')
else:
return inst
def fix_wrong_inst_and_ret_accum_val(inst_set: List[str]) -> int:
"""
:param inst_set:
:return:
"""
_, inst_set_mem, _ = acc_val_bfr_frst_recurring_inst_or_prg_term(inst_set)
jmp_nop_inst = [inst for inst in inst_set_mem if parse_instruction(inst[1])[0] in ['jmp', 'nop']]
for pos, inst in jmp_nop_inst:
inst_set_fixed = inst_set.copy()
inst_set_fixed[pos] = swap_jmp_nop(inst)
accumulator, inst_set_mem, prg_term = acc_val_bfr_frst_recurring_inst_or_prg_term(inst_set_fixed)
if prg_term:
return accumulator
# %%
sample_input = [
'nop +0',
'acc +1',
'jmp +4',
'acc +3',
'jmp -3',
'acc -99',
'acc +1',
'jmp -4',
'acc +6'
]
filename = "day_8/input.txt"
with open(filename) as f:
input_data = f.readlines()
input_data = [(x.strip()) for x in input_data]
#%%
# part 1
ans, _, _ = acc_val_bfr_frst_recurring_inst_or_prg_term(input_data)
print(f'Value in the accumulator immediately before first recurring instruction: {ans}')
#%%
ans = fix_wrong_inst_and_ret_accum_val(input_data)
print(f'Value in the accumulator after program terminates: {ans}')
| ajaysgowda/adventofcode2020 | day_8/day_8.py | day_8.py | py | 3,409 | python | en | code | 0 | github-code | 13 |
5125445334 | import sys
N = int(input().strip())
a : list = []
dp = [0]*N
answer = -1
for i in range(N):
a.append(list(map(int, sys.stdin.readline().split())))
a.sort(key=lambda x:x[0])
for i in range(N):
for j in range(i):
if a[i][1] > a[j][1] and dp[i] < dp[j]:
dp[i] = dp[j]
dp[i]+=1
if answer < dp[i]:
answer = dp[i]
print(N-answer) | JeongHooon-Lee/ps_python_rust | 2022_2/2565.py | 2565.py | py | 371 | python | en | code | 0 | github-code | 13 |
30313997390 | from flask_menu.classy import register_flaskview
from wazo_ui.helpers.plugin import create_blueprint
from wazo_ui.helpers.view import register_listing_url
from .service import (
ConfBridgeGeneralSettingsService,
FeaturesGeneralSettingsService,
IaxGeneralSettingsService,
PJSIPDocService,
PJSIPGlobalSettingsService,
PJSIPSystemSettingsService,
SCCPDocService,
SCCPGeneralSettingsService,
TimezoneService,
VoicemailGeneralSettingsService,
)
from .view import (
ConfBridgeGeneralSettingsView,
FeaturesGeneralSettingsView,
IaxGeneralSettingsView,
PJSIPDocListingView,
PJSIPGlobalSettingsView,
PJSIPSystemSettingsView,
SCCPDocListingView,
SCCPGeneralSettingsView,
TimezoneListingView,
VoicemailGeneralSettingsView,
)
general_settings = create_blueprint('general_settings', __name__)
class Plugin:
def load(self, dependencies):
core = dependencies['flask']
clients = dependencies['clients']
PJSIPDocListingView.service = PJSIPDocService(clients['wazo_confd'])
PJSIPDocListingView.register(
general_settings, route_base='/list_json_by_section'
)
register_listing_url(
'pjsip_doc', 'general_settings.PJSIPDocListingView:list_json_by_section'
)
SCCPDocListingView.service = SCCPDocService()
SCCPDocListingView.register(general_settings, route_base='/sccp_documentation')
register_listing_url(
'sccp_doc', 'general_settings.SCCPDocListingView:list_json'
)
PJSIPGlobalSettingsView.service = PJSIPGlobalSettingsService(
clients['wazo_confd']
)
PJSIPGlobalSettingsView.register(
general_settings, route_base='/pjsip_global_settings'
)
register_flaskview(general_settings, PJSIPGlobalSettingsView)
PJSIPSystemSettingsView.service = PJSIPSystemSettingsService(
clients['wazo_confd']
)
PJSIPSystemSettingsView.register(
general_settings, route_base='/pjsip_system_settings'
)
register_flaskview(general_settings, PJSIPSystemSettingsView)
IaxGeneralSettingsView.service = IaxGeneralSettingsService(
clients['wazo_confd']
)
IaxGeneralSettingsView.register(
general_settings, route_base='/iax_general_settings'
)
register_flaskview(general_settings, IaxGeneralSettingsView)
SCCPGeneralSettingsView.service = SCCPGeneralSettingsService(
clients['wazo_confd']
)
SCCPGeneralSettingsView.register(
general_settings, route_base='/sccp_general_settings'
)
register_flaskview(general_settings, SCCPGeneralSettingsView)
VoicemailGeneralSettingsView.service = VoicemailGeneralSettingsService(
clients['wazo_confd']
)
VoicemailGeneralSettingsView.register(
general_settings, route_base='/voicemail_general_settings'
)
register_flaskview(general_settings, VoicemailGeneralSettingsView)
FeaturesGeneralSettingsView.service = FeaturesGeneralSettingsService(
clients['wazo_confd']
)
FeaturesGeneralSettingsView.register(
general_settings, route_base='/features_general_settings'
)
register_flaskview(general_settings, FeaturesGeneralSettingsView)
ConfBridgeGeneralSettingsView.service = ConfBridgeGeneralSettingsService(
clients['wazo_confd']
)
ConfBridgeGeneralSettingsView.register(
general_settings, route_base='/confbridge_general_settings'
)
register_flaskview(general_settings, ConfBridgeGeneralSettingsView)
TimezoneListingView.service = TimezoneService(clients['wazo_confd'])
TimezoneListingView.register(general_settings, route_base='/timezones_listing')
register_flaskview(general_settings, TimezoneListingView)
register_listing_url(
'timezone', 'general_settings.TimezoneListingView:list_json'
)
core.register_blueprint(general_settings)
| wazo-platform/wazo-ui | wazo_ui/plugins/general_settings/plugin.py | plugin.py | py | 4,149 | python | en | code | 4 | github-code | 13 |
39153178781 | import math
import fractions
# Напишите программу, которая принимает две строки вида “a/b” - дробь с числителем и знаменателем.
# Программа должна возвращать сумму и произведение* дробей. Для проверки своего кода используйте модуль fractions.
# Пример:
# Ввод:
# 1/2
# 1/3
# Вывод:
# 5/6 1/6
PURPOSE_NUMERATOR = 0
PURPOSE_DENOMINATOR = 1
LEN_NUM_SPLIT = 2
def get_number() -> list:
while True:
try:
num = input('Введите дробь в формате "a/b": ')
num_split = num.split('/')
if len(num_split) == LEN_NUM_SPLIT or \
type(int(num_split[0])) == int or \
type(int(num_split[1])) == int:
num_split[0] = int(num_split[0])
num_split[1] = int(num_split[1])
return num_split
except ValueError:
print('ValueError: Неверный ввод. Попробуйте еще раз.')
except IndexError:
print('IndexError: Неверный ввод. Попробуйте еще раз.')
def calculate_amount(first_fraction: list,
second_fraction: list) -> None:
numerator_one = first_fraction[0]
numerator_two = second_fraction[0]
denominator_one = first_fraction[1]
denominator_two = second_fraction[1]
# Нахождение НОК(lcm)
lcm_num = math.lcm(denominator_one, denominator_two)
numerator_final = ((lcm_num // denominator_one) * numerator_one
+ (lcm_num // denominator_two) * numerator_two)
# Сокращение дроби
calc_fraction = get_gcd(numerator_final, lcm_num)
print(
f'\n{numerator_one}/{denominator_one} '
f'+ {numerator_two}/{denominator_two} '
f'= {calc_fraction[0]}/{calc_fraction[1]}')
def calculate_multiplication(first_fraction: list,
second_fraction: list) -> None:
numerator_one = first_fraction[0]
numerator_two = second_fraction[0]
denominator_one = first_fraction[1]
denominator_two = second_fraction[1]
numerator_final = numerator_one * numerator_two
denominator_final = denominator_one * denominator_two
calc_fraction = get_gcd(numerator_final, denominator_final)
print(
f'{numerator_one}/{denominator_one} '
f'* {numerator_two}/{denominator_two} '
f'= {calc_fraction[0]}/{calc_fraction[1]}')
# сокращение дробей через НОД
def get_gcd(numerator: int, denominator: int) -> list:
gcd = math.gcd(numerator, denominator)
numerator = numerator // gcd
denominator = denominator // gcd
fraction = [numerator, denominator]
return fraction
fraction_1 = get_number()
fraction_2 = get_number()
check_amount = (fractions.Fraction(fraction_1[0], fraction_1[1])
+ fractions.Fraction(fraction_2[0], fraction_2[1]))
check_multiplication = (fractions.Fraction(fraction_1[0], fraction_1[1])
* fractions.Fraction(fraction_2[0], fraction_2[1]))
calculate_amount(fraction_1, fraction_2)
print(f'Проверка сложения: {check_amount}\n')
calculate_multiplication(fraction_1, fraction_2)
print(f'Проверка умножения: {check_multiplication}\n')
| AngelinaSl/Python_lessons | Homeworks/Homework_2/Task_2.py | Task_2.py | py | 3,447 | python | ru | code | 0 | github-code | 13 |
71823343379 | from django.shortcuts import render_to_response, redirect
from django.http import JsonResponse
from .helpers import cheсk_login
from django.core.context_processors import csrf
from proposal.models import Tip, Vajnost, Status, User, Tema, ConfugurationOneC
from proposal.serializers import TipSeriz, VajnostSeriz, StatusSeriz, UserSerializer, TemaSeriz, ConfugurationOneC
def home(request):
user = cheсk_login(request)
if user:
if not user.is_staff:
if request.is_ajax():
types = Tip.objects.all()
importants = Vajnost.objects.all()
updates = Status.objects.all()
specs = User.objects.filter(is_staff=True)
tems = Tema.objects.filter(configuration_1c__in=user.otdel.configuration_1c.all())
data = {
'user': UserSerializer(user).data,
'types': TipSeriz(types, many=True).data,
'importants': VajnostSeriz(importants, many=True).data,
'updates': StatusSeriz(updates, many=True).data,
'specs': UserSerializer(specs, many=True).data,
'tems': TemaSeriz(tems, many=True).data
}
return JsonResponse(data)
else:
data = {'user': user}
data.update(csrf(request))
return render_to_response('pages/home.html', data)
else:
return redirect('/proposal/list/')
else:
return redirect('/auth/login/') | Evgen-nychev/supportApp | support/views.py | views.py | py | 1,553 | python | en | code | 0 | github-code | 13 |
31214386309 | # Exercicio um
# Python programa de linhas de uma sequência Fibonacci
# Espiral desenhada usando Turtle
import turtle
import math
# Função principal do desenho do Fibonacci
def fiboPlot(n):
a = 0
b = 1
quadrado_a = a
quadrado_b = b
# Configurar a cor do pincel
x.pencolor("pink")
# Desenhar o primeiro quadrado
x.forward(b*factor)
x.left(90)
x.forward(b*factor)
x.left(90)
x.forward(b*factor)
x.left(90)
x.forward(b*factor)
# Procedimento para fazer a serie Fibonacci
aux = quadrado_b
quadrado_b = quadrado_b + quadrado_a
quadrado_a = aux
# Desenhando o resto do quadrados
for i in range(1, n):
x.backward(quadrado_a * factor)
x.right(90)
x.forward(quadrado_b * factor)
x.left(90)
x.forward(quadrado_b * factor)
x.left(90)
x.forward(quadrado_b * factor)
# Procedimento para serie Fibonacci
aux = quadrado_b
quadrado_b = quadrado_b + quadrado_a
quadrado_a = aux
# Movendo para o ponto inicial a caneta
x.penup()
x.setposition(factor, 0)
x.seth(0)
x.pendown()
# Escolhendo a cor da caneta
x.pencolor("blue")
# Espiral de Fibonacci
x.left(90)
for i in range(n):
print(b)
fdwd = math.pi * (b*factor/2)
fdwd /= 90
for j in range(90):
x.forward(fdwd)
x.left(1)
aux = a
a = b
b = aux + b
# O factor significa o multiplicativo
# Do qual aumentar o tamanho das linhas
factor = 5
# Insira um número para
# Interagir com o código
num = int(input("Entre com um número para interações (maior que > 1): "))
# As linhas da espiral do Fibonacci
# Elas serão desenhadas de acordo
# com o número inserido
if num > 0:
print("Série de Fibonacci de", num, "elementos :")
x = turtle.Turtle()
x.speed(100)
fiboPlot(num)
turtle.done()
else:
print("Número tem que ser maior que > 0")
| danisimas/estcmp060 | fibonacci_exercicio_um.py | fibonacci_exercicio_um.py | py | 2,003 | python | pt | code | 0 | github-code | 13 |
18850013737 | import tcod as T
from items.Equipment import Equipment
from common.modifiers.mod import Mod
from common.utils import rand
from common.modifiers.attrib_mod import *
class Amulet(Equipment):
ABSTRACT = True
slot = 'n'
art = 'amulet'
glyph = '\'', T.gold
class RavenAmulet(Amulet):
name = 'amulet'
ABSTRACT = True
def __init__(self):
super().__init__()
v = rand(1, 4)
if v == 1:
if self.suffix("wind"):
self.modifier += Mod('speed', 1)
self.glyph = '\'', T.white
elif v == 2:
if self.suffix("sun"):
self.modifier += Mod('radius', 1)
self.glyph = '\'', T.light_yellow
elif v == 3:
if self.suffix("wolf"):
self.modifier += AddMaxLife(rand(3, 9))
self.glyph = '\'', T.light_red
else:
if self.suffix("spirit"):
self.modifier += AddMaxMana(rand(5, 11))
self.glyph = '\'', T.light_blue
class WispAmulet(Amulet):
name = 'amulet'
ABSTRACT = True
def __init__(self):
super().__init__()
v = rand(1, 3)
if v == 1:
if self.suffix("reflections"):
self.modifier += Mod('reflect_damage_bonus', 25)
self.glyph = '\'', T.light_pink
elif v == 2:
if self.suffix("tiger"):
self.modifier += AddMaxLife(rand(10, 24))
self.glyph = '\'', T.light_red
else:
if self.suffix("star"):
self.modifier += AddMaxMana(rand(12, 29))
self.glyph = '\'', T.light_blue
class WardAmulet(Amulet):
name = 'amulet'
ABSTRACT = True
def __init__(self):
super().__init__()
v = rand(1, 3)
if v == 1:
if self.suffix("mirros"):
self.modifier += Mod('reflect_damage_bonus', 50)
self.glyph = '\'', T.light_pink
elif v == 2:
if self.suffix("whale"):
self.modifier += AddMaxLife(rand(25, 45))
self.glyph = '\'', T.light_red
else:
if self.suffix("rainbow"):
self.modifier += AddMaxMana(rand(30, 50))
self.glyph = '\'', T.light_blue
class RubyAmulet(Amulet):
ABSTRACT = True
name = 'ruby amulet'
art = 'ruby_amulet'
glyph = '\'', T.red
magical = True
def __init__(self):
super().__init__()
self.modifier += AddMaxLife(75)
| devapromix/troll-temple | src/items/amulets.py | amulets.py | py | 2,559 | python | en | code | 2 | github-code | 13 |
25682245102 | from cmu_112_graphics import *
################################################################################
# Player object controls player score and player input
################################################################################
class Player(object):
def __init__(self, app, numKeys, cellWidth):
self.numKeys = numKeys
#inputs will be a list of 2 x-positions
self.inputs = set()
self.score = 0
self.oldInputs = []
#keyPressImg image obtained from https://osu.ppy.sh/community/forums/topics/807428?n=1
keyPressImg = app.loadImage('keyPressImage.png')
self.keyPressImage = keyPressImg.resize((int(cellWidth) + 20//numKeys, int(app.height)*2), Image.ANTIALIAS)
def getInputs(self):
return self.inputs
#Function creates a set of keys that are held by the player#################
def holdKey(self, event):
key = event.key
#Checks that the key pressed is:
# a digit
# will have no more than 2 inputs
# will be within the bounds of numKeys
if key.isdigit() and len(self.inputs) <2 and int(key) <= self.numKeys:
if key == '0' and self.numKeys >= 10: #For ergonomics, 0 = 10
self.inputs.add(10)
elif key == '0':
pass
else:
self.inputs.add(int(key))
#removes elements from the set with keys are released#######################
def releaseKey(self, event):
key = event.key
if key == '0' and 10 in self.inputs: #If the key released is 0, remove 10
self.inputs.remove(10)
#if the key released is a digit and the digit is in the set of inputs, remove the input
if key.isdigit() and int(key) in self.inputs:
self.inputs.remove(int(key))
def drawScore(self, canvas):
canvas.create_text(50, 50, font = "Arial 15 bold", text = str(self.score), fill = "white")
#Draws key press inputs#####################################################
def drawInputs(self, canvas, height, lBorder, cellWidth):
y = height * 0.95
for x in self.inputs:
x -= 1
x = lBorder + x * cellWidth + cellWidth/2
canvas.create_image(x, y, image=ImageTk.PhotoImage(self.keyPressImage))
#Stores old inputs in a list and pops the 0th elem once it holds 5 inputs###
#Special Use: for Down Note#################################################
def updateOldInputs(self, newInputs):
inputs = tuple(newInputs)
self.oldInputs.append(inputs)
if len(self.oldInputs) > 5:
self.oldInputs.pop(0)
#Adds score to player score#################################################
def updateScore(self, score):
if score != None:
self.score += score | JohnYanxinLiu/112-Term-Project | Player.py | Player.py | py | 2,858 | python | en | code | 0 | github-code | 13 |
41642650135 | # Method: Replace each element with (num + rev(num)) and then Use Hashmap to count frequency
# TC: O(n)
# SC: O(n)
from typing import List
from collections import defaultdict
class Solution:
def countNicePairs(self, nums: List[int]) -> int:
freq_map = defaultdict(int)
res = 0
MOD = 10 ** 9 + 7
for i in range(len(nums)):
nums[i] = nums[i] - self.rev(nums[i])
res = (res + freq_map[nums[i]]) % MOD
freq_map[nums[i]] += 1
return res
def rev(self, num: int) -> int:
res = 0
while num:
res = res * 10 + num % 10
num //= 10
return res | ibatulanandjp/Leetcode | #1814_CountNicePairsInAnArray/solution1.py | solution1.py | py | 693 | python | en | code | 1 | github-code | 13 |
26963876965 | # -*- coding: utf-8 -*-
"""#####################################################################"""
import gym
env = gym.make("Taxi-v3").env
env.render()
"""#####################################################################"""
print("Total de Ações {}".format(env.action_space))
print("Total de Estados {}".format(env.observation_space))
"""#####################################################################"""
state = env.encode(3, 1, 2, 1)
print("Número do estado:", state)
env.s = state
env.render()
env.s = 369
print("Número do estado:", env.s)
env.render()
"""#####################################################################"""
env.P[329]
"""#####################################################################"""
env.s = 329 # começa no estado do exemplo acima
epochs = 0 # total de ações realizadas
penalties = 0 # quantidade de punições recebidas por pegar ou largar no lugar errado
frames = [] # usado para fazer uma animação
done = False
while not done:
action = env.action_space.sample() # escolhe aleatoriamente uma ação
state, reward, done, info = env.step(action) # aplica a ação e pega o resultado
if reward == -10: # conta uma punição
penalties += 1
# Quarda a sequência para poder fazer a animação depois
frames.append({
'frame': env.render(mode='ansi'),
'state': state,
'action': action,
'reward': reward
}
)
epochs += 1
print("Total de ações executadas: {}".format(epochs))
print("Total de penalizações recebidas: {}".format(penalties))
"""#####################################################################"""
from IPython.display import clear_output
from time import sleep
def print_frames(frames):
for i, frame in enumerate(frames):
clear_output(wait=True)
##print(frame['frame'].getvalue())
print(f"Timestep: {i + 1}")
print(f"State: {frame['state']}")
print(f"Action: {frame['action']}")
print(f"Reward: {frame['reward']}")
sleep(.1)
##print_frames(frames)
"""#####################################################################"""
import numpy as np
# Inicialização com a tabela de valores Q
q_table = np.zeros([env.observation_space.n, env.action_space.n])
import random
from IPython.display import clear_output
# Hiperparâmetros
alpha = 0.1 # taxa de aprendizagem
gamma = 0.6 # fator de desconto
epsilon = 0.1 # chance de escolha aleatória
# Total geral de ações executadas e penalidades recebidas durante a aprendizagem
epochs, penalties = 0,0
for i in range(1, 100001): # Vai rodar 100000 diferentes versões do problema
state = env.reset() # Inicialização aleatoria do ambient
done = False
while not done:
if random.uniform(0, 1) < epsilon:
action = env.action_space.sample() # Escolhe ação aleatoriamente
else:
action = np.argmax(q_table[state]) # Escolhe ação com base no que já aprendeu
next_state, reward, done, info = env.step(action) # Aplica a ação
old_value = q_table[state, action] # Valor da ação escolhida no estado atual
next_max = np.max(q_table[next_state]) # Melhor valor no próximo estado
# Atualize o valor Q usando a fórmula principal do Q-Learning
new_value = (1 - alpha) * old_value + alpha * (reward + gamma * next_max)
q_table[state, action] = new_value
if reward == -10: # Contabiliza as punições por pegar ou deixar no lugar errado
penalties += 1
state = next_state # Muda de estado
epochs += 1
print("Total de ações executadas: {}".format(epochs))
print("Total de penalizações recebidas: {}".format(penalties))
"""#####################################################################"""
env.s = 329
env.render()
q_table[329]
"""#####################################################################"""
state = 329
epochs, penalties = 0, 0
done = False
while not done:
action = np.argmax(q_table[state])
state, reward, done, info = env.step(action)
if reward == -10:
penalties += 1
epochs += 1
print("Total de ações executadas: {}".format(epochs))
print("Total de penalizações recebidas: {}".format(penalties))
"""#####################################################################"""
total_epochs, total_penalties = 0, 0
episodes = 100
for i in range(episodes):
state = env.reset()
epochs, penalties, reward = 0, 0, 0
done = False
while not done:
action = np.argmax(q_table[state])
state, reward, done, info = env.step(action)
if reward == -10:
penalties += 1
epochs += 1
total_penalties += penalties
total_epochs += epochs
print(f"Resultados depois de {episodes} simulações:")
print(f"Média de ações por simulação: {total_epochs / episodes}")
print(f"Média de penalidades: {total_penalties / episodes}")
| Lawniet/Atividades-de-IA | Agents/Q-tables/Q_learning.py | Q_learning.py | py | 5,053 | python | pt | code | 0 | github-code | 13 |
41005348588 | import requests
from common.phrases import SEND_FAILED
from config import settings
class MailerController:
def send_url(self, email: str, url: str):
try:
result = requests.get(
f"{settings.URL_MAILER}/send_code", params={"email": email, "url": url}
)
if result.status_code != 200:
print(SEND_FAILED)
return result
except Exception as e:
print(e)
def send_greeting(self, email: str, fullname: str):
try:
result = requests.get(
f"{settings.URL_MAILER}/greeting",
params={"email": email, "fullname": fullname},
)
if result.status_code != 200:
print(SEND_FAILED)
return result
except Exception as e:
print(e)
def send_warn_signin(self, email: str):
try:
result = requests.get(
f"{settings.URL_MAILER}/warning_signin", params={"email": email}
)
if result.status_code != 200:
print(SEND_FAILED)
return result
except Exception as e:
print(e)
| Vaynbaum/simple-messenger | backend/auth/controllers/mailer_controller.py | mailer_controller.py | py | 1,192 | python | en | code | 1 | github-code | 13 |
19067325383 | import random
import pickle
import numpy as np
from collections import deque
class Useful_Memory():
'''
所有agent同步训练,它们拿到的replay应该是一样的
'''
def __init__(self, memory_size,batch_size):
self.memory_size = memory_size
self.batch_size = batch_size
self.memory = self.create_memory_pool()
self.memory_counter = 0
def create_memory_pool(self):
memory = deque(maxlen=self.memory_size)
return memory
def store_memory(self,obs, actions, rewards,obs_next, done):
self.memory.append([obs,actions,rewards,obs_next,done])
if self.memory_counter < self.memory_size:
self.memory_counter += 1
def get_useful_memory(self):
useful_memory_batch = random.sample(self.memory, self.batch_size)
obs, actions, rewards, new_obs, done = [],[],[],[],[]
for i in range(len(useful_memory_batch)):
obs.append(useful_memory_batch[i][0])
actions.append(useful_memory_batch[i][1])
rewards.append(useful_memory_batch[i][2])
new_obs.append(useful_memory_batch[i][3])
done.append(useful_memory_batch[i][4])
return obs, actions, rewards, new_obs, done
def save(self, save_path):
with open(save_path+"useful_memeory_file.pkl",'wb') as f:
pickle.dump(self.memory, f)
print("useful_memory file saved!")
def load(self, load_path):
with open(load_path + "useful_memeory_file.pkl", 'rb') as f:
self.memory = pickle.load(f) | Zeii2024/RL | EmRL/memory_useful.py | memory_useful.py | py | 1,582 | python | en | code | 0 | github-code | 13 |
19601720872 | """
Aula 3
Expressões Lambda (Funções Anônimas)
"""
"""
def funcSum(a, b):
print(a + b)
lambdaSum = lambda a, b: a + b
lambda parameter_list: expression
funcSum(2,5)
print(lambdaSum(5,33))
"""
lista = [
['nome1', 22],
['nome2', 32],
['nome3', 52],
['nome4', 12],
['nome5', 72],
]
def arruma(item):
return item[1]
lista.sort(key=lambda item: item[1], reverse=False)
print(lista)
maisUm = lambda idade: idade[0][1] + 1
print(maisUm(lista))
| joaoo-vittor/estudo-python | intermediario/aula-3.py | aula-3.py | py | 469 | python | pt | code | 0 | github-code | 13 |
36520120704 | #!/usr/bin/env python
# coding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.layers import Dense, Input
from keras.layers import Conv2D, Flatten, Lambda
from keras.layers import Reshape, Conv2DTranspose, Concatenate
from keras.models import Model
from keras.datasets import mnist
from keras.losses import mse, binary_crossentropy, mean_absolute_error, mae
from keras.optimizers import Adam
from keras.utils import plot_model
from keras import backend as K
from keras.callbacks import ModelCheckpoint, Callback
from keras.utils import Sequence
import numpy as np
import matplotlib.pyplot as plt
import argparse
import os
import glob
import cv2
import sys
from tqdm import tqdm, trange
from scipy.signal import savgol_filter
rp_l2_loss_weight = 1.
s_entropy_loss_term = 0.5
input_shape=(84, 84, 3)
latent_dim = 100
inputs = Input(shape=input_shape, name='denoising_encoder_input')
x_inputs = Conv2D(filters=32, kernel_size=4, activation='relu', strides=2, padding='same')(inputs)
x_inputs = Conv2D(filters=32, kernel_size=4, activation='relu', strides=2, padding='same')(x_inputs)
x_inputs = Conv2D(filters=64, kernel_size=4, activation='relu', strides=2, padding='same')(x_inputs)
x_inputs = Conv2D(filters=64, kernel_size=4, activation='relu', strides=2, padding='same')(x_inputs)
x_inputs = Flatten()(x_inputs)
z = Dense(latent_dim, name='z', activation='linear')(x_inputs)
# instantiate encoder model
encoder = Model(inputs, z, name='denoising_encoder')
# build decoder model
latent_inputs = Input(shape=(latent_dim,))
x_decoder = Dense(6 * 6 * 64, activation='relu')(latent_inputs)
x_decoder = Reshape((6, 6, 64))(x_decoder)
x_decoder = Conv2DTranspose(filters=64, kernel_size=4, activation='relu', strides=2, padding='same')(x_decoder)
x_decoder = Conv2DTranspose(filters=64, kernel_size=4, activation='relu', strides=2, padding='same')(x_decoder)
x_decoder = Conv2DTranspose(filters=32, kernel_size=4, activation='relu', strides=2, padding='same')(x_decoder)
x_decoder = Conv2DTranspose(filters=32, kernel_size=4, activation='relu', strides=2, padding='same')(x_decoder)
x_decoder = Conv2DTranspose(filters=3, kernel_size=1, strides=1, activation='linear', padding='same')(x_decoder)
x_decoder = Lambda(lambda x: x[:, :84, :84, :])(x_decoder)
# instantiate decoder model
decoder = Model(latent_inputs, x_decoder, name='denoising_decoder')
# instantiate VAE model
denoising = Model(inputs, decoder(encoder(inputs)), name='denoising')
denoising.load_weights("denoising_autoencoder.h5")
denoising.save("full_denoising_autoencoder.h5")
for layer in denoising.layers:
layer.name += "_denoising"
# reparameterization trick
# instead of sampling from Q(z|X), sample eps = N(0,I)
# then z = z_mean + sqrt(var)*eps
def sampling(args):
"""Reparameterization trick by sampling fr an isotropic unit Gaussian.
# Arguments
args (tensor): mean and log of variance of Q(z|X)
# Returns
z (tensor): sampled latent vector
"""
z_mean, z_log_var = args
batch = K.shape(z_mean)[0]
# by default, random_normal has mean=0 and std=1.0
epsilon = K.random_normal(shape=(batch, 84, 84, 3))
return z_mean + K.exp(0.5 * z_log_var) * epsilon
def sampling_np(args):
"""Reparameterization trick by sampling fr an isotropic unit Gaussian.
# Arguments
args (tensor): mean and log of variance of Q(z|X)
# Returns
z (tensor): sampled latent vector
"""
z_mean, z_log_var = args
batch = np.shape(z_mean)[0]
# by default, random_normal has mean=0 and std=1.0
epsilon = np.random_normal(shape=(batch, 84, 84, 3))
return z_mean + K.exp(0.5 * z_log_var) * epsilon
# network parameters
latent_dim = 32
rp_dim = 30
s_dim = latent_dim - rp_dim
input_shape = (84, 84, 3)
# build encoder model
inputs = Input(shape=input_shape, name='encoder_input')
x_inputs = Conv2D(filters=32, kernel_size=4, activation='relu', strides=2, padding='same')(inputs)
x_inputs = Conv2D(filters=32, kernel_size=4, activation='relu', strides=2, padding='same')(x_inputs)
x_inputs = Conv2D(filters=64, kernel_size=4, activation='relu', strides=2, padding='same')(x_inputs)
x_inputs = Conv2D(filters=64, kernel_size=4, activation='relu', strides=2, padding='same')(x_inputs)
x_inputs = Flatten()(x_inputs)
x_inputs = Dense(256, activation='relu')(x_inputs)
z_mean = Dense(latent_dim, name='z_mean', activation='linear')(x_inputs)
z_log_var = Dense(latent_dim, name='z_log_var', activation='linear')(x_inputs)
# instantiate encoder model
encoder = Model(inputs, [z_mean, z_log_var], name='encoder')
encoder.summary()
# build decoder model
input_z_mean = Input(shape=(latent_dim,))
input_z_log_var = Input(shape=(latent_dim,))
latent_inputs = Concatenate()([input_z_mean, input_z_log_var])
x_decoder = Dense(256, activation='relu')(latent_inputs)
x_decoder = Dense(6 * 6 * 64, activation='relu')(x_decoder)
x_decoder = Reshape((6, 6, 64))(x_decoder)
x_decoder = Conv2DTranspose(filters=64, kernel_size=4, activation='relu', strides=2, padding='same')(x_decoder)
x_decoder = Conv2DTranspose(filters=64, kernel_size=4, activation='relu', strides=2, padding='same')(x_decoder)
x_decoder = Conv2DTranspose(filters=32, kernel_size=4, activation='relu', strides=2, padding='same')(x_decoder)
x_decoder = Conv2DTranspose(filters=32, kernel_size=4, activation='relu', strides=2, padding='same')(x_decoder)
x_decoder = Conv2DTranspose(filters=6, kernel_size=1, strides=1, activation='linear', padding='same')(x_decoder)
x_decoder = Lambda(lambda x: x[:, :84, :84, :])(x_decoder)
# instantiate decoder model
decoder = Model([input_z_mean, input_z_log_var], x_decoder, name='decoder')
decoder.summary()
# instantiate VAE model
encoder_outputs = encoder(inputs)
outputs = [decoder([encoder_outputs[0], encoder_outputs[1]]), encoder_outputs[0], encoder_outputs[1]]
vae = Model(inputs, outputs, name='vae')
for layer in vae.layers:
layer.name += "_vae"
denoising_encoder = Model(denoising.inputs, denoising.layers[-2].outputs)
for layer in denoising_encoder.layers:
layer.trainable = False
def load_small_dataset():
imgs = np.asarray([cv2.imread(x) for x in tqdm(glob.glob("training_observations2/obs_104_*.png") + glob.glob("training_observations2/obs_111_*.png"))])
x_train = imgs[:, :]
x_test = imgs[128:, :]
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
y_train = np.array([])
y_test = np.array([])
image_size = imgs.shape[1]
input_shape = (image_size, image_size, 3)
return x_train, y_train, x_test, y_test, image_size, input_shape
x_train, y_train, x_test, y_test, image_size, input_shape = load_small_dataset()
def recon_loss(y_true, y_pred):
output = y_pred[0]
mean_output = output[:, :, :, :3]
log_var_output = output[:, :, :, 3:]
sampled_reconstruction = sampling([mean_output, log_var_output])
reconstruction_loss = K.square(denoising_encoder(inputs) - denoising_encoder(sampled_reconstruction))
reconstruction_loss = K.mean(reconstruction_loss, axis=-1)
return reconstruction_loss
def rp_loss(y_true, y_pred):
mean_latent = y_pred[1]
log_var_latent = y_pred[2]
rp_l2_loss = rp_l2_loss_weight * (K.mean(K.square(mean_latent[1:360, :rp_dim] -
mean_latent[0:359, :rp_dim])) +
K.mean(K.square(log_var_latent[1:360, :rp_dim] -
log_var_latent[0:359, :rp_dim])))
rp_l2_loss += rp_l2_loss_weight * (K.mean(K.square(mean_latent[361:720, :rp_dim] -
mean_latent[360:719, :rp_dim])) +
K.mean(K.square(log_var_latent[361:720, :rp_dim] -
log_var_latent[360:719, :rp_dim])))
return rp_l2_loss
def s_loss(y_true, y_pred):
mean_latent = y_pred[1]
log_var_latent = y_pred[2]
s_l2_loss = s_entropy_loss_term * K.mean(K.square(
K.mean(mean_latent[0:360, rp_dim:], axis=0) - \
K.mean(mean_latent[360:720, rp_dim:], axis=0)))
s_l2_loss += s_entropy_loss_term * K.mean(K.square(
K.mean(log_var_latent[:360, rp_dim:], axis=0) - \
K.mean(log_var_latent[360:, rp_dim:], axis=0)))
return s_l2_loss
output = vae.outputs[0]
mean_output = output[:, :, :, :3]
log_var_output = output[:, :, :, 3:]
reconstruction_loss = recon_loss(None, vae.outputs)
rp_l2_loss = rp_loss(None, vae.outputs)
s_l2_loss = s_loss(None, vae.outputs)
beta = 1.
recon_loss_weight = 1.
kl_loss = 1 + mean_output - K.square(log_var_output) - K.exp(mean_output)
kl_loss = K.mean(kl_loss, axis=[-1, -2, -3])
kl_loss *= -0.5
vae_loss = K.mean(recon_loss_weight * reconstruction_loss + beta * kl_loss) + K.mean(rp_l2_loss)# + K.mean(s_l2_loss)
vae.add_loss(vae_loss)
learning_rate = 1e-4
adam = Adam(lr=learning_rate)
vae.compile(optimizer=adam)
vae.summary()
class DataSequence(Sequence):
def __init__(self):
self.num_episodes = 2500
self.num_frames = 360
self.filenames = [["training_observations2/obs_" + str(i) + "_" + str(j) + ".png" for j in range(self.num_frames)] for i in range(self.num_episodes)]
self.image_size = 84
self.curr_episode = 0
# self.on_epoch_end()
def on_epoch_end(self):
pass
def __len__(self):
return self.num_episodes
def __getitem__(self, idx):
batch_x = []
while len(batch_x) != 720:
batch_x = np.asarray([cv2.imread(x) for x in self.filenames[self.curr_episode]])
batch_x = []
for f in self.filenames[self.curr_episode]:
x = cv2.imread(f)
if x is not None:
batch_x.append(x)
for f in self.filenames[self.curr_episode+1]:
x = cv2.imread(f)
if x is not None:
batch_x.append(x)
batch_x = np.asarray(batch_x)
batch_x = batch_x.astype('float32') / 255.
self.curr_episode = (self.curr_episode + 2) % self.num_episodes
return batch_x, None
epochs = 10
checkpoint = ModelCheckpoint('temporal_vae_checkpoint.h5', monitor='loss', verbose=0, save_best_only=True, mode='min', save_weights_only=True)
if False:
# vae.load_weights('temporal_vae_l2_big_rp.h5')
img_generator = DataSequence()
history = vae.fit_generator(img_generator, epochs=epochs, validation_data=(x_train, None))
vae.save_weights('temporal_vae_l2_big_rp.h5')
if False:
plt.plot(history.history['loss'], label='train')
plt.plot(history.history['val_loss'], label='test')
plt.legend(['train', 'test'])
plt.show()
else:
# vae.load_weights('temporal_vae_l2_big_rp.h5')
vae.load_weights('darla_vae.h5')
# vae.save("full_darla_vae.h5")
predicted_outputs = vae.predict(x_train)
# predicted_imgs = predicted_outputs[0][:, :, :, :3]
cv2.imwrite("original.png", x_train[35] * 255.)
predicted_means = predicted_outputs[1]
predicted_log_vars = predicted_outputs[2]
if True:
predicted_imgs = decoder.predict([predicted_means, predicted_log_vars])[:, :, :, :3]
denoised_predicted = denoising.predict(predicted_imgs)
denoised_imgs = np.clip(denoised_predicted, 0., 1.)
recon_loss = np.mean((denoised_imgs - x_train)**2)
print("RECONSTRUCTION_LOSS", recon_loss)
rp_l2_loss = rp_l2_loss_weight * np.mean((predicted_means[0:359, :16] - predicted_means[1:360, :16])**2) + np.mean((predicted_means[360:719, :16] - predicted_means[361:720, :16])**2)
print("RP L2 LOSS", rp_l2_loss)
if False:
for j in trange(0, 32):
step_size = (predicted_means[:, j].max() - predicted_means[:, j].min()) / 20.
predicted_min = predicted_means[:, j].min()
predicted_originals = predicted_means[:, j]
for i in range(21):
# Change the RP
v = (i * step_size) + predicted_min
predicted_means[:, j] = v
# Predict, decode, denoise, and write to file
predicted_imgs = decoder.predict([predicted_means, predicted_log_vars])[:, :, :, :3]
denoised_predicted = denoising.predict(predicted_imgs)
denoised_imgs = np.clip(denoised_predicted[35], 0., 1.)
str_i = str(i)
if i < 10:
str_i = "0" + str_i
str_j = str(j)
if j < 10:
str_j = "0" + str_j
cv2.imwrite("sweep/denoised_temporal" + str_j + "_" + str_i + ".png", cv2.resize(denoised_imgs * 255., (512, 512)))
predicted_means[:, j] = predicted_originals
# for i in range(32):
# plt.plot(savgol_filter(predicted_means[:, i], 201, 3), color='r', alpha=0.5)
# plt.plot(predicted_means[:, i], color='r', alpha=0.5)
# plt.show()
# for i in range(rp_dim):
# plt.plot(predicted_means[:, i], color='r', alpha=0.5)
# plt.plot(savgol_filter(predicted_means[:, i], 101, 3), color='r', alpha=0.5)
# for i in range(rp_dim, 32):
# plt.plot(predicted_means[:, i], color='b', alpha=0.5)
# plt.plot(savgol_filter(predicted_means[:, i], 101, 3), color='b', alpha=0.5)
# plt.show()
| joshnroy/TransferLearningThesis | deepmindLab/temporal_vae.py | temporal_vae.py | py | 13,281 | python | en | code | 0 | github-code | 13 |
19469370095 | line_count = 0
sym_sum = 0
people_file = open('people.txt','r')
errors_file = open("errors.log.txt", "w")
while True:
try:
for i_line in people_file:
line_count +=1
length = len(i_line)
if i_line.endswith("\n"):
length -= 1
if length < 3:
raise BaseException
sym_sum +=length
print('Количество символов равно: ',sym_sum)
people_file.close()
errors_file.close()
break
except BaseException:
print('Длина {} строки меньше 3 символов'.format(line_count))
errors_file.write('\nПроблема заключается в {} строке'.format(line_count))
| TurovD/Skillbox_Tasks | 24_Exceptions/01_names_2/main.py | main.py | py | 757 | python | en | code | 0 | github-code | 13 |
40054608543 | ''' 2. Procure o atributo idade para o usuário 1 e caso não tenha adicione o valor 30
nesse campo.'''
dic1 = {'user1':{'nome': 'Mioshi', 'sobrenome': 'Kanashiro', 'apelido': 'Japa'},
'user2':{'nome': 'Sergei', 'sobrenome': 'Ivanov', 'apelido': 'Russo'},
'user3':{'nome': 'Alfredo', 'sobrenome': 'Constâncio', 'apelido': 'Portuga'}}
for a, b in dic1.items():
b.setdefault('idade', 30)
print(dic1) | robinson-1985/python-zero-dnc | 33.operacoes_com_dicionarios/9.exercicio2.py | 9.exercicio2.py | py | 421 | python | pt | code | 0 | github-code | 13 |
13842831592 | #!/usr/local/bin/python
# -*- coding: utf-8 -*-
""" 4kuma comic crawler
crawl 4kuma comic
"""
URL = "http://www.shufu.co.jp/contents/4kuma/"
TEST_MODE = False
FILE_DIR = "./save"
LOG_FILE = "rirakkuma.log"
DETAIL_PATH = 'detail/detail.txt'
NOTIFICATION = True
import os
import re
import urllib2
from datetime import datetime
from HTMLParser import HTMLParser
import tweet
def main():
if not os.path.exists(FILE_DIR):
os.mkdir(FILE_DIR)
if TEST_MODE:
f = open("rirakkuma.html", "r")
p = f.read()
f.close()
else:
req = urllib2.Request(URL)
response = urllib2.urlopen(req)
p = response.read()
rh = RilakkumaHTML()
rh.feed(p)
img_path = rh.get_img()
img_file = img_path.split("/")[1]
filename = datetime.now().strftime("%Y%m%d.gif")
save_path = os.path.join(FILE_DIR, filename)
if __has_image(img_file):
__log("Already img file in save dir", img_file)
return
url = URL + img_path
req = urllib2.Request(url)
response = urllib2.urlopen(req)
page = response.read()
f = open(save_path, "w")
f.write(page)
f.close()
__log("Fetch new image", img_file)
__save_detail(img_file)
if NOTIFICATION:
tweet.main()
return
def __has_image(img_file):
if not os.path.exists(DETAIL_PATH):
return False
with open(DETAIL_PATH, 'r') as f:
images = f.readlines()
for image in images:
if img_file == image.replace('\n', ''):
return True
return False
def __log(msg, append=""):
""" Logging."""
now = datetime.now().strftime("%Y%m%d%H%M%S")
f = open(LOG_FILE, "a")
f.write("[%s] %s: %s\n" % (now, msg, append))
f.close()
def __save_detail(image_name):
""" Save file details to file.
>>> __save_detail('test')
"""
dir_name = os.path.dirname(DETAIL_PATH)
if not os.path.exists(dir_name):
os.makedirs(dir_name)
with open(DETAIL_PATH, 'a') as f:
line = '{name}\n'.format(name=image_name)
f.write(line)
class RilakkumaHTML(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.imgs = ""
def handle_starttag(self, tag, attrs):
if tag == "img":
for attr in attrs:
if attr[0] == "src":
match = re.match(r"images\/[0-9]+\.gif", attr[1])
if match:
self.imgs = attr[1]
def get_img(self):
return self.imgs
if __name__ == "__main__":
main()
# import doctest
# doctest.testmod()
| pyohei/rirakkuma-crawller | main.py | main.py | py | 2,596 | python | en | code | 0 | github-code | 13 |
1954954844 | # -*- coding: utf-8 -*-
"""
Created on Sun Feb 21 21:24:25 2021
Problem 59: XOR decryption
https://projecteuler.net/problem=59
@author: kuba
"""
# Importing encrypted values
PATH_TO_KEYS = "keys.txt"
encrypted_values = []
with open(PATH_TO_KEYS) as p:
encrypted_values = p.readlines()
encrypted_values = encrypted_values[0]
encrypted_values = encrypted_values.replace(",", " ").split()
def most_common(lst):
return max(set(lst), key=lst.count)
def solution():
list_of_encrypted_val = []
result = 0
# Split encrypted num for 3 list for each value of key
for i in range(0, 3):
list_of_encrypted_val.append(
[int(encrypted_values[j]) for j in range(i, len(encrypted_values), 3)]
)
# Looking for key
for encrypted in list_of_encrypted_val:
common_decoded_val = []
for x in encrypted:
decoded = [x ^ i for i in range(97, 123)]
common_decoded_val += set(decoded)
# Take most common value
key = most_common(common_decoded_val)
result += sum([v ^ key for v in encrypted])
print(result)
solution()
| KubiakJakub01/ProjectEuler | src/Problem59/Problem59.py | Problem59.py | py | 1,130 | python | en | code | 0 | github-code | 13 |
31940498010 | class Solution:
def numSplits(self, s: str) -> int:
p, tot = [0] * 26, [0] * 26
left, right = 0, 0
for ch in s:
idx = ord(ch) - ord('a')
tot[idx] += 1
if tot[idx] == 1:
right += 1
ans = 0
for ch in s:
idx = ord(ch) - ord('a')
p[idx] += 1
if p[idx] == 1:
left += 1
if p[idx] == tot[idx]:
right -= 1
if left == right:
ans += 1
return ans
# class Solution:
# def numSplits(self, s: str) -> int:
# n = len(s)
# lcnt, rcnt = [], []
# lcur, rcur = {}, {}
# for i in range(n):
# if s[i] in lcur:
# lcur[s[i]] += 1
# else:
# lcur[s[i]] = 1
# lcnt.append({k: v for k, v in lcur.items()})
# if s[n - 1 - i] in rcur:
# rcur[s[n - 1 - i]] += 1
# else:
# rcur[s[n - 1 - i]] = 1
# rcnt.append({k: v for k, v in rcur.items()})
# ans = 0
# for i in range(1, n):
# if len(lcnt[i - 1]) == len(rcnt[n - 1 - i]):
# ans += 1
# return ans
if __name__ == '__main__':
solu = Solution()
print(solu.numSplits('aacaba'))
print(solu.numSplits('abcd'))
print(solu.numSplits('aaaaa'))
print(solu.numSplits('acbadbaada'))
| wylu/leetcodecn | src/python/contest/leetcode31/5458.字符串的好分割数目.py | 5458.字符串的好分割数目.py | py | 1,456 | python | en | code | 3 | github-code | 13 |
9303717425 | # Nikita Akimov
# interplanety@interplanety.org
#
# GitHub
# https://github.com/Korchy/blender_dev_tools_int
#
# Script for easy reinstalling Blender 3D add-ons from source directory
#
# This version is for Blender 2.7
#
import tempfile
import os
import shutil
import glob
import bpy
import sys
# --- required custom parameters ------------
addon_name = '' # type here add-on name (source directory name) Ex: addon_name = 'my_addoon'
source_path = '' # type here full path to add-on source directory Ex: source_path = '/dev/blender/'
files_mask = ['*.py', 'LICENSE', 'README.md'] # add required masks for the add-on files
submodule_mask = ['*.py', 'LICENSE', 'README.md'] # add required masks for the files from git submodules
release_path = '' # type here path to copy add-on archive for release Ex: release_path = '/dev/blender/releases/'
# -------------------------------------------
def install_addon():
addon_path = os.path.join(source_path, addon_name)
# files from main add-on source directory
files = []
add_path_by_mask(addon_path, files_mask, files)
# git submodules
if os.path.exists(os.path.join(source_path, addon_name, '.gitmodules')):
with open(os.path.join(source_path, addon_name, '.gitmodules'), 'r') as submodules_file:
for line in submodules_file.readlines():
if 'path = ' in line:
submodule_dir = line.split(' = ')[1].strip()
add_path_by_mask(os.path.join(addon_path, submodule_dir), submodule_mask, files)
# create archive
with tempfile.TemporaryDirectory() as temp_dir:
addon_folder_to_files = os.path.join(temp_dir, addon_name, addon_name)
addon_folder_to_zip = os.path.join(temp_dir, addon_name)
os.makedirs(addon_folder_to_files)
for file in files:
current_file_path = os.path.join(addon_folder_to_files, os.path.relpath(file, addon_path))
if not os.path.exists(os.path.dirname(current_file_path)):
os.makedirs(os.path.dirname(current_file_path))
shutil.copy(file, current_file_path)
shutil.make_archive(addon_folder_to_zip, 'zip', addon_folder_to_zip)
addon_zip_path = addon_folder_to_zip + '.zip'
# copy release archive for custom directory
if release_path:
shutil.copy(addon_zip_path, release_path)
# remove old add-on version
bpy.ops.wm.addon_disable(module=addon_name)
bpy.ops.wm.addon_remove(module=addon_name)
# remove from memory
for module in list(sys.modules.keys()):
if hasattr(sys.modules[module], '__package__'):
if sys.modules[module].__package__ == addon_name:
del sys.modules[module]
# install add-on
bpy.ops.wm.addon_install(filepath=addon_zip_path, overwrite=True)
# activate add-on
bpy.ops.wm.addon_enable(module=addon_name)
def add_path_by_mask(root_path, masks_list, file_list):
for mask in masks_list:
for file in glob.glob(os.path.join(root_path, mask)):
file_list.append(file)
if __name__ == '__main__':
install_addon()
print('-'*50)
| Korchy/blender_dev_tools_int | 2.7/addon_reinst.py | addon_reinst.py | py | 3,228 | python | en | code | 4 | github-code | 13 |
25507387137 | import requests
import sqlite3
import creating_table
import telebot
token = '788927932:AAFhYxhg5aLYtgDlU11yW15-PDMjiysOjHI'
URL = 'https://api.telegram.org/bot' + token + '/'
bot = telebot.TeleBot(token)
def get_translation(text, lang):
url = 'https://translate.yandex.net/api/v1.5/tr.json/translate'
key = 'trnsl.1.1.20190423T190040Z.697d4ef188750baf.65f665b5bb9' \
'06c53747e64b89b18344475f5a71d'
r = requests.post(url, data={'key': key, 'text': text, 'lang': lang})
return eval(r.text)
with sqlite3.connect('TypingExchange.db') as conn:
creating_table.create_table(conn.cursor())
creating_table.insert_codes(conn.cursor())
def get_updates():
url = URL + 'getupdates'
r = requests.get(url)
return r.json()
def get_message():
data = get_updates()
chat_id = data['result'][-1]['message']['chat']['id']
message_text = data['result'][-1]['message']['text']
message = {'chat_id': chat_id,
'text': message_text}
return message
def send_message(chat_id, text):
url = URL + 'sendmessage?chat_id={}&text={}'.format(chat_id, text)
requests.get(url)
@bot.message_handler(commands=['start'])
def welcome():
return get_message()
def main():
chat_id = welcome()['chat_id']
send_message(welcome()['chat_id'],
'Type your language in English starting with a capital letter')
update_id = get_updates()['result'][-1]['update_id']
while True:
send_message(chat_id, "Type your text, please")
while update_id == get_updates()['result'][-1]['update_id']:
pass
user_text = get_message()['text']
update_id = get_updates()['result'][-1]['update_id']
send_message(chat_id, "Choose language")
while update_id == get_updates()['result'][-1]['update_id']:
pass
user_language = get_message()['text']
update_id = get_updates()['result'][-1]['update_id']
with sqlite3.connect('TypingExchange.db') as conn:
cur = conn.cursor()
cur.execute('''
SELECT code
FROM TypingExchange
WHERE natural_language = '{}'
'''.format(user_language))
try:
code = cur.fetchone()[0]
send_message(chat_id, *get_translation(user_text, code)['text'])
except:
send_message(chat_id, "Language is not found!")
if __name__ == '__main__':
main()
| anyklaude/Translator | Translate.py | Translate.py | py | 2,538 | python | en | code | 0 | github-code | 13 |
11383999521 | import boto3
import functions
from functions import TWO_DAYS_AGO
from pprint import pprint
client = boto3.client('ec2')
def get_instances(region):
instances=[]
client = boto3.client('ec2', region_name=region)
response = client.describe_instances()
for i in response['Reservations']:
for instance in i['Instances']:
instance['Region'] = region
instances.append(instance)
return instances
instances=functions.aggregate_region_resources(get_instances)
def do_not_delete(instance):
tags = instance.get('Tags', [])
for tag in tags:
if tag.get('Key', '') == 'do_not_delete' and tag.get('Value', '') == 'true':
return True
return False
def should_delete(instance):
return not do_not_delete(instance)
print("These instances will be deleted. Since they expired and no do_not_delete tag")
delete_instances = list(map(lambda x: {'InstanceId': x['InstanceId'],
'KeyName': x.get('KeyName', ''),
'Tags': x.get('Tags', []),
'Region': x['Region'],
'LaunchTime': x['LaunchTime']},
filter(should_delete,
filter(lambda x: x['LaunchTime'] < TWO_DAYS_AGO, instances))))
pprint(delete_instances)
print("\n\n\n\n")
print("These are the ec2 instances more than 2 days. But not going to delete them, because people tag them with do_not_delete:")
not_delete_instances = list(map(lambda x: {'InstanceId': x['InstanceId'],
'KeyName': x.get('KeyName', ''),
'Tags': x.get('Tags', []),
'Region': x['Region'],
'LaunchTime': x['LaunchTime']},
filter(do_not_delete,
filter(lambda x: x['LaunchTime'] < TWO_DAYS_AGO, instances))))
pprint(not_delete_instances)
print("Terminating ec2 instances.................:")
for instance in delete_instances:
client = boto3.client("ec2", region_name = instance['Region'])
client.terminate_instances(
InstanceIds = [instance['InstanceId']],
DryRun=False
)
| datianshi/aws_usage_reports | clean_ec2.py | clean_ec2.py | py | 2,233 | python | en | code | 0 | github-code | 13 |
29753183696 | """• Personal_info.txt 파일 읽고, 이름만 찾아서 출력하기(절대경로)"""
file = open("C:\\Users\\ducog\\PycharmProjects\\pythonProject\\Personal_info.txt", "r")
data = file.read()
list = data.split('\n')
for i in list[:]:
if 'Name' in i:
arr = i.split(':')
print(arr[1])
file.close()
| 1000hyehyang/Advanced-Python-Programming | dummy/03.py | 03.py | py | 322 | python | ko | code | 0 | github-code | 13 |
34339517832 | import os
import sys
import textwrap
from StringIO import StringIO
import wx
import Marvin
import Marvin.restriction
import Marvin.app.predictors
import Marvin.app.preferences as preferences
from Marvin.app.emboss import EmbossFrame
class ProteinPage(wx.Panel):
def __init__(self, parent, topframe, mode='Restrict'):
wx.Panel.__init__(self, parent)
# This is a stub until I find a better way of doing it.
have_emboss = False
self.topframe = topframe
#self._mode = mode
self._mode = ''
self._predictions = []
self.hbox = wx.BoxSizer(wx.HORIZONTAL)
self.hbox.Add(wx.Button(self, 20, 'Translate'), 0, wx.ALL, 5)
self.hbox.Add(wx.Button(self, 23, 'Run Predictors'), 0, wx.ALL, 5)
self.hbox.Add(wx.Button(self, wx.ID_SAVE, 'Save'), 0, wx.ALL, 5)
self.hbox.Add(wx.Button(self, wx.ID_CLEAR, 'Clear'), 0, wx.ALL, 5)
self.hbox.Add(wx.Button(self, 22, 'Clear Positions'), 0, wx.ALL, 5)
if have_emboss:
self.hbox.Add(wx.Button(self, 25, 'EMBOSS'), 0, wx.ALL, 5)
self.Bind(wx.EVT_BUTTON, self.OnTranslate, id=20)
self.Bind(wx.EVT_BUTTON, self.OnRun, id=23)
self.Bind(wx.EVT_BUTTON, self.OnSave, id=wx.ID_SAVE)
if have_emboss:
self.Bind(wx.EVT_BUTTON, self.OnEmboss, id=25)
# Forward
self.t1 = wx.StaticText(self, -1, 'Forward Overhang:')
self.fw_overhang = wx.TextCtrl(self, 21, '', style=wx.TE_LEFT)
self.fw_overhang.SetFont(wx.Font(10, wx.MODERN, wx.NORMAL, wx.NORMAL))
self.t2 = wx.StaticText(self, -1, 'Restriction Site:')
self.fw_rs_entry = wx.TextCtrl(self)
self.fw_rs_btn = wx.Button(self, label='Select')
#self.fwrs = wx.ComboBox(self, wx.ID_ANY, style=wx.CB_READONLY | wx.CB_SORT)
self.fw_lic = wx.ComboBox(self, wx.ID_ANY, style=wx.CB_READONLY | wx.CB_SORT)
self.fw_start = wx.CheckBox(self, -1, 'Add start codon', style=wx.ALIGN_RIGHT)
# Reverse
self.t4 = wx.StaticText(self, -1, 'Reverse Overhang:')
self.rv_overhang = wx.TextCtrl(self, 24, '', style=wx.TE_LEFT)
self.rv_overhang.SetFont(wx.Font(10, wx.MODERN, wx.NORMAL, wx.NORMAL))
self.t5 = wx.StaticText(self, -1, 'Restriction Site:')
#self.rvrs = wx.ComboBox(self, 25, style=wx.CB_READONLY | wx.CB_SORT)
self.rv_rs_entry = wx.TextCtrl(self)
self.rv_rs_btn = wx.Button(self, label='Select')
self.rv_lic = wx.ComboBox(self, 25, style=wx.CB_READONLY | wx.CB_SORT)
self.rv_stop = wx.CheckBox(self, -1, 'Add stop codon', style=wx.ALIGN_RIGHT)
self.win = ProteinCanvas(self, topframe)
fwbox = wx.BoxSizer(wx.HORIZONTAL)
fwbox.Add(self.t1, 0, wx.ALL | wx.ALIGN_CENTRE_VERTICAL, 5)
fwbox.Add(self.fw_overhang, 0, wx.ALL, 5)
fwbox.Add(self.t2, 0, wx.ALL | wx.ALIGN_CENTRE_VERTICAL, 5)
#fwbox.Add(self.fwrs, 0, wx.ALL, 5)
fwbox.Add(self.fw_rs_entry, 0, wx.ALL, 5)
fwbox.Add(self.fw_rs_btn, 0, wx.ALL, 5)
fwbox.Add(wx.StaticText(self, label='LIC sequence:'), 0, wx.ALIGN_CENTRE_VERTICAL)
fwbox.Add(self.fw_lic, 0, wx.ALL, 5)
fwbox.Add(self.fw_start, 0, wx.ALL | wx.ALIGN_CENTRE_VERTICAL, 5)
rvbox = wx.BoxSizer(wx.HORIZONTAL)
rvbox.Add(self.t4, 0, wx.ALL | wx.ALIGN_CENTRE_VERTICAL, 5)
rvbox.Add(self.rv_overhang, 0, wx.ALL, 5)
rvbox.Add(self.t5, 0, wx.ALL | wx.ALIGN_CENTRE_VERTICAL, 5)
#rvbox.Add(self.rvrs, 0, wx.ALL, 5)
rvbox.Add(self.rv_rs_entry, 0, wx.ALL, 5)
rvbox.Add(self.rv_rs_btn, 0, wx.ALL, 5)
rvbox.Add(wx.StaticText(self, label='LIC sequence:'), 0, wx.ALIGN_CENTRE_VERTICAL)
rvbox.Add(self.rv_lic, 0, wx.ALL, 5)
rvbox.Add(self.rv_stop, 0, wx.ALL | wx.ALIGN_CENTRE_VERTICAL, 5)
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add((-1, 5))
vbox.Add(self.hbox, 0, wx.LEFT | wx.RIGHT, 5)
vbox.Add((-1, 5))
vbox.Add(fwbox, 0, wx.LEFT | wx.RIGHT, 5)
vbox.Add(rvbox, 0, wx.LEFT | wx.RIGHT, 5)
vbox.Add((-1, 5))
vbox.Add(self.win, 1, wx.EXPAND | wx.LEFT | wx.RIGHT | wx.BOTTOM, 5)
self.SetSizer(vbox)
self.Bind(wx.EVT_BUTTON, self.OnClear, id=wx.ID_CLEAR)
self.Bind(wx.EVT_BUTTON, self.win.OnClearPositions, id=22)
self.Bind(wx.EVT_COMBOBOX, self.OnComboChange)
self.Bind(wx.EVT_TEXT, self.OnTextChange)
self.Bind(wx.EVT_CHECKBOX, self.OnCheck)
self.Bind(wx.EVT_BUTTON, self.OnSelectEnzyme, self.fw_rs_btn)
self.Bind(wx.EVT_BUTTON, self.OnSelectEnzyme, self.rv_rs_btn)
self.set_mode(mode)
def OnClear(self, event):
self._predictions = []
self.win.ClearCanvas(event)
def OnSave(self, event):
#wild = 'Text files (*.txt)|*.txt'
dia = wx.FileDialog(self, message='Save file as...',
defaultDir=os.getcwd(),
defaultFile='',
style=wx.SAVE | wx.OVERWRITE_PROMPT)
if dia.ShowModal() == wx.ID_OK:
path = dia.GetPath()
try:
f_out = open(path, 'w')
except IOError:
pass
else:
kwds = dict(width=50,
break_on_hyphens=False,
initial_indent='',
subsequent_indent='')
protseq = self.topframe.builder.protein_seq.tostring()
ps = [textwrap.wrap(protseq, **kwds)]
names = ['Protein']
for predictor in self._predictions:
name = predictor.name()
p = ''.join(predictor.prediction())
ps.append(textwrap.wrap(p, **kwds))
names.append(name)
N = max(len(n) for n in names)
for x in zip(*ps):
for i, y in enumerate(x):
f_out.write('{0:{2}}{1}\n'.format(names[i], y, N))
f_out.write('\n')
f_out.close()
dia.Destroy()
def OnTranslate(self, event):
nucseq = self.topframe.nucPage.text.GetValue()
if nucseq:
self.topframe.builder.nucleotide_seq = nucseq
protseq = self.topframe.builder.protein_seq
if self.mode == 'Restrict':
pass
#self.SetRestrictionSites()
else:
self.SetLICSequences()
self.win.DrawProteinSequence(protseq)
else:
self.topframe.statusbar.Warn(
'You must enter a nucleotide sequence!')
def OnRun(self, event):
"""Run predictions.
"""
protseq = self.topframe.builder.protein_seq
prefs = preferences.get_preferences()
# NB, predictors is the module name, DON'T overwrite it.
d = [(o, (p, c, u)) for p, (o, c, u) in prefs['predictors'].iteritems()]
d.sort()
ordered_predictors = [(p, (c, u)) for (o, (p, c, u)) in d]
for predictor, (classname, use) in ordered_predictors:
if use:
self.topframe.SetStatusText('Running {0}'.format(predictor))
#C = getattr(__import__(__name__), classname)
C = getattr(Marvin.app.predictors, classname)
p = C(protseq)
self._predictions.append(p)
self.win.DrawPrediction(p)
self.topframe.SetStatusText('')
def get_mode(self):
return self._mode
def set_mode(self, mode):
if mode == 'Restrict':
self._mode = mode
self.fw_lic.Enable(False)
#self.fw_rs_entry.Enable(True)
self.fw_rs_entry.Enable(False)
self.fw_rs_btn.Enable(True)
self.rv_lic.Enable(False)
#self.rv_rs_entry.Enable(True)
self.rv_rs_entry.Enable(False)
self.rv_rs_btn.Enable(True)
if mode == 'LIC':
self._mode = mode
self.fw_lic.Enable(True)
self.fw_rs_entry.Enable(False)
self.fw_rs_btn.Enable(False)
self.rv_lic.Enable(True)
self.rv_rs_entry.Enable(False)
self.rv_rs_btn.Enable(False)
self.SetLICSequences()
if mode == 'Mutate':
self._mode = mode
self.fw_lic.Enable(False)
self.fw_rs_entry.Enable(False)
self.fw_rs_btn.Enable(False)
self.rv_lic.Enable(False)
self.rv_rs_entry.Enable(False)
self.rv_rs_btn.Enable(False)
mode = property(get_mode, set_mode, None, None)
def SetLICSequences(self):
prefs = preferences.get_preferences()
self.fw_lic.Clear()
self.rv_lic.Clear()
self.fw_lic.Append('')
self.rv_lic.Append('')
for name, sequence in prefs['lic_sequences']:
# Ignore anything that doesn't have a sequence.
if sequence:
if name:
self.fw_lic.Append(name)
self.rv_lic.Append(name)
else:
self.fw_lic.Append(sequence)
self.rv_lic.Append(sequence)
def OnSelectEnzyme(self, event):
# There must be a sequence before calling this dialog
sequence = self.topframe.builder.nucleotide_seq
if sequence:
dia = RestrictionDialog(self, sequence)
dia.ShowModal()
Id = event.GetEventObject().GetId()
if Id == self.fw_rs_btn.GetId():
if dia.enzyme is not None:
self.fw_rs_entry.Clear()
self.fw_rs_entry.AppendText(str(dia.enzyme))
#s = Marvin.restriction.sequence(dia.enzyme)
#self.topframe.builder.forward_cloning_seq = str(s)
self.topframe.builder.forward_enzyme = dia.enzyme
elif Id == self.rv_rs_btn.GetId():
if dia.enzyme is not None:
self.rv_rs_entry.Clear()
self.rv_rs_entry.AppendText(str(dia.enzyme))
# s = Marvin.restriction.sequence(dia.enzyme)
# s.reverse_complement()
# self.topframe.builder.reverse_cloning_seq = str(s)
self.topframe.builder.reverse_enzyme = dia.enzyme
else:
pass
else:
dia = wx.MessageDialog(self, 'You need to enter a sequence to work on',
'foo', wx.OK | wx.ICON_ERROR)
dia.ShowModal()
dia.Destroy()
def OnComboChange(self, event):
obj = event.GetEventObject()
value = obj.GetValue()
if self.topframe.builder.mode == 'LIC':
prefs = preferences.get_preferences()
if obj.GetId() == self.fw_lic.GetId():
for i, j in prefs['lic_sequences']:
if i == value:
#self.topframe.builder.forward_cloning_seq = j
self.topframe.builder.forward_lic_seq = j
elif obj.GetId() == self.rv_lic.GetId():
for i, j in prefs['lic_sequences']:
if i == value:
#self.topframe.builder.reverse_cloning_seq = j
self.topframe.builder.reverse_lic_seq = j
else:
pass
def OnTextChange(self, event):
obj = event.GetEventObject()
value = obj.GetValue()
oid = obj.GetId()
if oid == self.fw_overhang.GetId() or oid == self.rv_overhang.GetId():
if set(value.lower()).issubset('atgc'):
obj.SetBackgroundColour('WHITE')
else:
obj.SetBackgroundColour('RED')
if obj.GetId() == self.fw_overhang.GetId():
self.topframe.builder.forward_overhang = value
elif obj.GetId() == self.rv_overhang.GetId():
self.topframe.builder.reverse_overhang = value
else:
pass
def OnCheck(self, event):
"""Handler for checkboxes."""
obj = event.GetEventObject()
if obj.GetId() == self.fw_start.GetId():
self.topframe.builder.insert_start_codon = obj.GetValue()
elif obj.GetId() == self.rv_stop.GetId():
self.topframe.builder.insert_stop_codon = obj.GetValue()
def OnEmboss(self, event):
e = EmbossFrame(self, self.topframe)
e.Show(True)
class CanvasObject:
def __init__(self, x, y, w, h, residue, number, state):
self.x, self.y, self.w, self.h = x, y, w, h
self.residue = residue
self.number = number
self.state = state
self.original_residue = residue # for site-mut
def get_brush(self):
if self.state == 'start':
return wx.Brush('green', wx.SOLID)
elif self.state == 'stop':
return wx.Brush('red', wx.SOLID)
elif self.state == 'mutate':
return wx.Brush('YELLOW', wx.SOLID)
else:
return wx.Brush('white', wx.SOLID)
brush = property(get_brush, None, None, None)
class ProteinCanvas(wx.ScrolledWindow):
def __init__(self, parent, topframe):
wx.ScrolledWindow.__init__(self, parent, style=wx.SUNKEN_BORDER)
self.topframe = topframe
#self.SetBackgroundColour('WHITE')
self.SetVirtualSize((5000, 5000))
self.SetScrollRate(20, 20)
self.dc = wx.PseudoDC()
self.objids = {}
self.width = 25
self.height = 25
self.indent = 100
self.current_prediction_line = 150
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.Bind(wx.EVT_MOUSE_EVENTS, self.OnMouse)
def ConvertEventCoords(self, event):
vx, vy = self.GetViewStart()
dx, dy = self.GetScrollPixelsPerUnit()
return (event.GetX() + (vx * dx), event.GetY() + (vy * dy))
def OnMouse(self, event):
hitradius = 1
dc = self.dc
dc.BeginDrawing()
builder = self.topframe.builder
mode = builder.mode
if mode == 'Restrict' or mode == 'LIC':
if event.LeftDown():
x, y = self.ConvertEventCoords(event)
L = dc.FindObjects(x, y, hitradius)
if L:
id = L[0]
obj = self.objids[id]
dc.ClearId(id)
dc.SetId(id)
if obj.state == 'start':
obj.state = None
builder.remove_start_position(obj.number)
else:
obj.state = 'start'
builder.add_start_position(obj.number)
builder.remove_stop_position(obj.number)
r = dc.GetIdBounds(id)
x, y, w, h = r.Get()
self.DrawSquare(dc, obj)
self.Refresh(True)
if event.RightDown():
x, y = self.ConvertEventCoords(event)
L = dc.FindObjects(x, y, hitradius)
if L:
id = L[0]
obj = self.objids[id]
dc.ClearId(id)
dc.SetId(id)
if obj.state == 'stop':
obj.state = None
builder.remove_stop_position(obj.number)
else:
obj.state = 'stop'
builder.add_stop_position(obj.number)
builder.remove_start_position(obj.number)
r = dc.GetIdBounds(id)
x, y, w, h = r.Get()
self.DrawSquare(dc, obj)
self.Refresh(True)
if mode == 'Mutate':
#if event.MiddleDown():
if event.LeftDown():
x, y = self.ConvertEventCoords(event)
L = dc.FindObjects(x, y, hitradius)
if L:
id = L[0]
obj = self.objids[id]
dc.ClearId(id)
dc.SetId(id)
if obj.state == 'mutate':
builder.remove_site(obj.number, obj.original_residue, obj.residue)
obj.state = None
obj.residue = obj.original_residue
else:
obj.state = 'mutate'
mutate_dialog = MutateResidue(self)
mutate_dialog.ShowModal()
if mutate_dialog.residue:
obj.residue = mutate_dialog.residue
builder.add_site(obj.number, obj.original_residue, obj.residue)
r = dc.GetIdBounds(id)
x, y, w, h = r.Get()
self.DrawSquare(dc, obj)
self.Refresh(True)
dc.EndDrawing()
def ClearCanvas(self, event):
if isinstance(self.topframe.builder, Marvin.ConstructBuilder):
self.topframe.builder.remove_start_position()
self.topframe.builder.remove_stop_position()
else:
self.topframe.builder.remove_site()
self.dc.RemoveAll()
self.Refresh(True)
def OnClearPositions(self, event):
dc = self.dc
dc.BeginDrawing()
for id, obj in self.objids.iteritems():
dc.ClearId(id)
dc.SetId(id)
obj.state = None
obj.residue = obj.original_residue
r = dc.GetIdBounds(id)
x, y, w, h = r.Get()
self.DrawSquare(dc, obj)
self.Refresh(True)
dc.EndDrawing()
## if self.topframe.builder.mode in ['Restrict', 'LIC']:
## self.topframe.builder = Marvin.ConstructBuilder()
## else:
## self.topframe.builder = Marvin.SiteBuilder()
self.topframe.builder.clear_positions()
def OnPaint(self, event):
dc = wx.BufferedPaintDC(self)
self.PrepareDC(dc)
bg = wx.Brush(self.GetBackgroundColour())
dc.SetBackground(bg)
dc.Clear()
xv, yv = self.GetViewStart()
dx, dy = self.GetScrollPixelsPerUnit()
x, y = (xv * dx, yv * dy)
rgn = self.GetUpdateRegion()
rgn.Offset(x, y)
r = rgn.GetBox()
self.dc.DrawToDCClipped(dc, r)
def GetExtent(self, text):
tmpdc = wx.PaintDC(self)
self.PrepareDC(tmpdc)
return tmpdc.GetTextExtent(text)
def DrawSquare(self, dc, obj):
"""Draw individual square (residue).
It would make more sense for this to be a method of CanvasObject, but
it calls GetExtent() that calls PrepareDC() which is a method of the
base class so this isn't possible.
"""
dc.SetPen(wx.BLACK_PEN)
dc.SetBrush(obj.brush)
dc.DrawRectangle(obj.x, obj.y, obj.w, obj.h)
dc.SetPen(wx.BLACK_PEN)
dc.SetFont(wx.Font(10, wx.MODERN, wx.NORMAL, wx.NORMAL))
#width, height = self.GetExtent(obj.residue)
width, height = 15, 15
x1 = obj.x + (obj.w / 2.0) - (width / 2.0)
y1 = obj.y + (obj.h / 2.0) - (height / 2.0)
dc.DrawText(obj.residue, x1, y1)
def DrawProteinSequence(self, protseq):
self.objids = {}
dc = self.dc
dc.BeginDrawing()
x, y, w, h = self.indent, 80, self.width, self.height
dc.SetPen(wx.BLACK_PEN)
dc.SetFont(wx.Font(10, wx.MODERN, wx.NORMAL, wx.NORMAL))
dc.DrawText('Protein', 10, y+5)
for i, residue in enumerate(protseq):
id = wx.NewId()
dc.SetId(id)
obj = CanvasObject(x,y,w,h,residue,i+1,None)
self.DrawSquare(dc, obj)
r = wx.Rect(x, y, w, h)
dc.SetIdBounds(id, r)
self.objids[id] = obj
x += w
dc.SetId(wx.NewId())
x, y = self.indent, 80
for i in range(len(protseq)):
if i == 0 or (i+1) % 10 == 0:
_x = x + (w * i)
dc.DrawLine(_x, y, _x, y - 40)
dc.DrawLine(_x, y - 40, _x + 10, y - 40)
dc.DrawText(str(i+1), _x + 15, y - 50)
dc.EndDrawing()
self.Refresh(True)
self.SetVirtualSize((len(protseq) * w + 500, 5000))
def DrawPrediction(self, predictor):
x = self.indent
width, height = self.width, self.height
dc = self.dc
dc.BeginDrawing()
y = self.current_prediction_line
id = wx.NewId()
dc.SetId(id)
dc.SetPen(wx.BLACK_PEN)
dc.SetBrush(wx.Brush('WHITE', wx.SOLID))
dc.DrawText(predictor.name(), 10, y+5)
if hasattr(predictor, 'Draw'):
f = getattr(predictor, 'Draw')
f(self, x, width, height, y)
else:
for pred in predictor.prediction():
self.DrawSquare(dc, x, y, width, height, pred, wx.Brush('white', wx.SOLID))
x += width
dc.EndDrawing()
self.Refresh(True)
self.current_prediction_line += (height + 20)
class MutateResidue(wx.Dialog):
three_to_one = {'Ala': 'A', 'Arg': 'R', 'Asn': 'N', 'Asp': 'D',
'Cys': 'C', 'Glu': 'E', 'Gln': 'Q', 'Gly': 'G',
'His': 'H', 'Ile': 'I', 'Leu': 'L', 'Lys': 'K',
'Met': 'M', 'Phe': 'F', 'Pro': 'P', 'Ser': 'S',
'Thr': 'T', 'Trp': 'W', 'Tyr': 'Y', 'Val': 'V'}
def __init__(self, parent):
wx.Dialog.__init__(self, parent, title='Mutate Residue')
self.residue = None
gbox = wx.GridSizer(5, 5, 2, 2)
gbox.Add(wx.Button(self, wx.ID_ANY, 'Ala'), 0)
gbox.Add(wx.Button(self, wx.ID_ANY, 'Cys'), 0)
gbox.Add(wx.Button(self, wx.ID_ANY, 'Asp'), 0)
gbox.Add(wx.Button(self, wx.ID_ANY, 'Glu'), 0)
gbox.Add(wx.Button(self, wx.ID_ANY, 'Phe'), 0)
gbox.Add(wx.Button(self, wx.ID_ANY, 'Gly'), 0)
gbox.Add(wx.Button(self, wx.ID_ANY, 'His'), 0)
gbox.Add(wx.Button(self, wx.ID_ANY, 'Ile'), 0)
gbox.Add(wx.Button(self, wx.ID_ANY, 'Lys'), 0)
gbox.Add(wx.Button(self, wx.ID_ANY, 'Leu'), 0)
gbox.Add(wx.Button(self, wx.ID_ANY, 'Met'), 0)
gbox.Add(wx.Button(self, wx.ID_ANY, 'Asn'), 0)
gbox.Add(wx.Button(self, wx.ID_ANY, 'Pro'), 0)
gbox.Add(wx.Button(self, wx.ID_ANY, 'Gln'), 0)
gbox.Add(wx.Button(self, wx.ID_ANY, 'Arg'), 0)
gbox.Add(wx.Button(self, wx.ID_ANY, 'Ser'), 0)
gbox.Add(wx.Button(self, wx.ID_ANY, 'Thr'), 0)
gbox.Add(wx.Button(self, wx.ID_ANY, 'Val'), 0)
gbox.Add(wx.Button(self, wx.ID_ANY, 'Trp'), 0)
gbox.Add(wx.Button(self, wx.ID_ANY, 'Tyr'), 0)
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add(wx.StaticText(self, wx.ID_ANY, label='Select residue to mutate to'),
0, wx.ALL, 20)
vbox.Add(gbox, 0, wx.LEFT | wx.RIGHT, 20)
vbox.Add(wx.Button(self, wx.ID_CANCEL, 'Cancel'), 0, wx.ALIGN_CENTRE | wx.ALL, 20)
self.SetSizerAndFit(vbox)
self.Centre()
self.Bind(wx.EVT_BUTTON, self.OnClick)
def OnClick(self, event):
label = event.EventObject.GetLabelText()
try:
self.residue = MutateResidue.three_to_one[label]
except KeyError:
self.residue = None
self.Destroy()
from Bio import Restriction
class RestrictionDialog(wx.Dialog):
def __init__(self, parent, nucleotide_seq):
wx.Dialog.__init__(self, parent, title='Select a restriction enzyme')
self.sequence = nucleotide_seq
self.enzyme = ''
enzymes = Marvin.restriction.do_not_cut(nucleotide_seq)
self.noncutters = [str(x) for x in enzymes]
self.search = wx.SearchCtrl(self, style=wx.TE_PROCESS_ENTER)
self.lst = wx.ListBox(self, style=wx.LB_SINGLE)
self.lst.InsertItems(sorted(self.noncutters), 0)
self.select = wx.Button(self, wx.ID_OK, label='OK')
self.cancel = wx.Button(self, wx.ID_CANCEL, label='Cancel')
#self.clear = wx.Button(self, wx.ID_CLEAR, label='Clear')
buttons = wx.BoxSizer(wx.HORIZONTAL)
buttons.Add(self.cancel, 0, wx.ALIGN_RIGHT)
#buttons.Add(self.clear, 0, wx.ALIGN_RIGHT)
buttons.Add(self.select, 0, wx.ALIGN_RIGHT)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.search, 0, wx.EXPAND | wx.ALL, 5)
sizer.Add(self.lst, 1, wx.EXPAND | wx.LEFT | wx.RIGHT, 5)
sizer.Add(buttons, 0, wx.ALIGN_RIGHT | wx.ALL, 5)
self.SetSizer(sizer)
self.Centre()
self.Bind(wx.EVT_TEXT, self.DoOnSearch, self.search)
self.Bind(wx.EVT_BUTTON, self.OnSelect, self.select)
#self.Bind(wx.EVT_BUTTON, self.OnClear, self.clear)
def DoOnSearch(self, event):
term = self.search.GetValue()
new_list = []
for item in self.noncutters:
if item.lower().startswith(term):
new_list.append(item)
self.lst.Clear()
self.lst.InsertItems(sorted(new_list), 0)
def OnSelect(self, event):
index = self.lst.GetSelection()
value = self.lst.GetString(index)
#self.enzyme = value
self.enzyme = getattr(Restriction, value)
self.Destroy()
# def OnClear(self, event):
# self.enzyme = ''
# self.Destroy()
def OnCancel(self, event):
self.enzyme = None
self.Destroy()
| jje42/marvin | Marvin/app/protein.py | protein.py | py | 25,790 | python | en | code | 4 | github-code | 13 |
17058433474 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class PubChannelDTO(object):
def __init__(self):
self._ext_info = None
self._pub_channel = None
@property
def ext_info(self):
return self._ext_info
@ext_info.setter
def ext_info(self, value):
self._ext_info = value
@property
def pub_channel(self):
return self._pub_channel
@pub_channel.setter
def pub_channel(self, value):
self._pub_channel = value
def to_alipay_dict(self):
params = dict()
if self.ext_info:
if hasattr(self.ext_info, 'to_alipay_dict'):
params['ext_info'] = self.ext_info.to_alipay_dict()
else:
params['ext_info'] = self.ext_info
if self.pub_channel:
if hasattr(self.pub_channel, 'to_alipay_dict'):
params['pub_channel'] = self.pub_channel.to_alipay_dict()
else:
params['pub_channel'] = self.pub_channel
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = PubChannelDTO()
if 'ext_info' in d:
o.ext_info = d['ext_info']
if 'pub_channel' in d:
o.pub_channel = d['pub_channel']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/PubChannelDTO.py | PubChannelDTO.py | py | 1,367 | python | en | code | 241 | github-code | 13 |
23943178929 | from scipy.stats import shapiro
from scipy.stats import mannwhitneyu
def shapiro_test(data):
print(shapiro(month_list))
file1 = open("WindSpeedShapiro.txt", "w")
for i in range(len(all_wind_dicts)):
file1.write("\n" + wind_station_names[i] + " Shapiro-Wilk test results\n")
for j in range(len(months_names)):
results = shapiro(all_wind_dicts[i][months_names[j]])
file1.write(months_names[j] + " " + str(results) + "\n")
file1.close()
def mannwhitneyu_test(data):
file1 = open("TemperatureMannWhitney.txt", "w")
file1.write(temp_station_names[2] + " + " + temp_station_names[3] + "\n")
for j in range(len(months_names)):
results = mannwhitneyu(all_temp_dicts[2][months_names[j]], all_temp_dicts[3][months_names[j]])
file1.write(months_names[j] + " : " + str(results) + "\n")
file1.close()
| changsteph/CITRUS-June2022 | statistics.py | statistics.py | py | 884 | python | en | code | 0 | github-code | 13 |
3480445964 | import datetime
import os
import keras.backend as K
import numpy as np
from keras.layers import Conv2D, Lambda, Input, Flatten, Dense, Multiply
from keras.models import Model, load_model
from keras.optimizers import RMSprop
from agents.constants import ATARI_INPUT_SHAPE
def build_model(obs_shape, action_size):
frames_input = Input(obs_shape, name='frames')
actions_input = Input((action_size,), name='actions')
normalized = Lambda(lambda x: x / 255.0, name='normalization')(frames_input)
conv_1 = Conv2D(16, 8, strides=4, activation='relu')(normalized)
conv_2 = Conv2D(32, 4, strides=2, activation='relu')(conv_1)
conv_flatten = Flatten()(conv_2)
hidden = Dense(256, activation='relu')(conv_flatten)
output = Dense(action_size)(hidden)
filtered = Multiply(name='q_val')([output, actions_input])
model = Model(inputs=[frames_input, actions_input], outputs=filtered)
model.summary()
optimizer = RMSprop(lr=0.00025, rho=0.95, epsilon=0.01)
model.compile(optimizer, loss=huber_loss)
return model
def build_larger_model(obs_shape, action_size):
frames_input = Input(obs_shape, name='frames')
actions_input = Input((action_size,), name='actions')
x = Lambda(lambda x: x / 255.0, name='normalization')(frames_input)
x = Conv2D(32, 8, strides=4, activation='relu')(x)
x = Conv2D(64, 4, strides=2, activation='relu')(x)
x = Conv2D(64, 3, strides=1, activation='relu')(x)
x = Flatten()(x)
x = Dense(512, activation='relu')(x)
output = Dense(action_size)(x)
filtered = Multiply(name='q_val')([output, actions_input])
model = Model(inputs=[frames_input, actions_input], outputs=filtered)
model.summary()
optimizer = RMSprop(lr=0.00025, rho=0.95, epsilon=0.01)
model.compile(optimizer, loss=huber_loss)
return model
def build_dense_model(obs_size, action_size):
obs = Input((obs_size,), name='input')
actions = Input((action_size,), name='actions')
x = Dense(obs_size, activation='relu')(obs)
#x = Dense(32, activation='relu')(x)
#x = Dense(32, activation='relu')(x)
x = Dense(16, activation='relu')(x)
output = Dense(action_size)(x)
filtered = Multiply(name='q_val')([output, actions])
model = Model(inputs=[obs, actions], outputs=filtered)
model.summary()
optimizer = RMSprop(lr=0.00025, rho=0.95, epsilon=0.01)
model.compile(optimizer, loss=huber_loss)
return model
def get_curr_datetime():
return str(datetime.datetime.now().strftime("%Y-%m-%d"))
def get_model_path(name):
return './weights/{}_{}.h5'.format(name, get_curr_datetime())
def model_exists():
return os.path.isfile(get_model_path())
def save_model(model, name):
model.save(get_model_path(name))
def load_latest_model(name):
candidates = list(
filter(lambda x: x.startswith(name) and x.endswith('.h5'),
os.listdir('./weights/')))
candidates.sort(reverse=True)
print("Found candidate", candidates[0])
model = load_model('./weights/' + candidates[0], custom_objects={'huber_loss': huber_loss})
return model
def load_model_by_filename(name):
return load_model(name, custom_objects={'huber_loss': huber_loss})
def huber_loss(y_true, y_pred):
# Huber loss, see https://en.wikipedia.org/wiki/Huber_loss and
# https://medium.com/@karpathy/yes-you-should-understand-backprop-e2f06eab496b
error = y_true - y_pred
quad_term = error*error / 2
lin_term = abs(error) - 0.5
use_linear = (abs(error) > 1.0)
use_linear = K.cast(use_linear, 'float32')
return use_linear * lin_term + (1 - use_linear) * quad_term
| srajabi/rl | agents/model.py | model.py | py | 3,656 | python | en | code | 0 | github-code | 13 |
17191510376 | if __name__ == "__main__":
t = int(input())
ans = []
for i in range(t):
n = int(input())
a = []
for i in range(n):
l = list(map(int, input().split()))
m = []
if i == 0:
m = l
else:
m = [min(a[i - 1][2], a[i - 1][1]) + l[0], min(a[i - 1][0], a[i - 1][2]) + l[1],
min(a[i - 1][0], a[i - 1][1]) + l[2]]
a.append(m)
ans.append(min(a[-1]))
for i in ans:
print(i) | yzgqy/myacm | acm/kt4/t4.py | t4.py | py | 526 | python | en | code | 0 | github-code | 13 |
73492681616 | import logging
from dataclasses import dataclass, field
from queue import PriorityQueue
from typing import Dict, List, Optional
from .schedule_common import (
ExecutorIndex,
ScheduleExecutor,
ScheduleOperation,
Scheduler,
SchedulerMinibatchSpec,
)
@dataclass(order=True)
class CompleteEvent:
completion_time: float
op: ScheduleOperation = field(compare=False)
executor: ScheduleExecutor = field(compare=False)
class WaitFreeExecutor(ScheduleExecutor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.available_queue: List[ScheduleOperation] = []
def add_operation(self, op: ScheduleOperation):
raise NotImplementedError
def try_execute(self, current_time):
raise NotImplementedError
def finish_execute(self):
raise NotImplementedError
class WaitFreeScheduler(Scheduler):
def __init__(
self,
minibatch_spec: SchedulerMinibatchSpec,
include_memory_stats: bool = True,
memory_limit: float = float("inf"),
logger: Optional[logging.Logger] = None,
) -> None:
super().__init__(
minibatch_spec,
include_memory_stats,
memory_limit,
logger=logger,
)
self._initialize()
self._pending_events: PriorityQueue[CompleteEvent] = PriorityQueue()
self.executors: Dict[ExecutorIndex, WaitFreeExecutor]
def _get_executor(
self,
executor_id,
thread_id,
n_orig_layers,
assigned_stages,
is_comm_stage,
include_memory_stats,
memory_limit=float("inf"),
):
# overrides Scheduler
raise NotImplementedError
def _init_executors(self, n_microbatches, **kwargs):
for executor in self.executors.values():
executor.reset()
self.communication_executors = [
executor
for executor in self.executors.values()
if executor.is_comm_stage
]
self.computation_executors = [
executor
for executor in self.executors.values()
if not executor.is_comm_stage
]
return True
def _inject_microbatches(
self, microbatch_offset: int, n_microbatches: int
):
for microbatch_id in range(
microbatch_offset, microbatch_offset + n_microbatches
):
executor = self.executors[
self.minibatch_spec.flattened_executor_assignment[0]
]
op = self._get_op(0, microbatch_id)
executor.add_operation(op)
def _on_op_finish(self, executor: WaitFreeExecutor, op: ScheduleOperation):
executor.finish_execute()
if op.flattened_stage < self.n_flattened_stages - 1:
next_layer = op.flattened_stage + 1
next_executor = self.minibatch_spec.flattened_executor_assignment[
next_layer
]
self.executors[next_executor].add_operation(
self._get_op(next_layer, op.microbatch)
)
def _push_end_event(self, op, executor, end_time):
self._pending_events.put(CompleteEvent(end_time, op, executor))
def schedule(self, **kwargs):
n_microbatches = len(self.minibatch_spec.microbatches)
status = self._init_executors(n_microbatches, **kwargs)
if not status:
return None
self._inject_microbatches(0, n_microbatches)
trace_events = self._get_trace_events()
current_time = 0
def __try_execute():
# priortize communication executors
for executor in (
self.communication_executors + self.computation_executors
):
end_time, launched_op, events = executor.try_execute(
current_time
)
if launched_op:
self._push_end_event(launched_op, executor, end_time)
trace_events["traceEvents"].extend(events)
while True:
__try_execute()
if self._pending_events.empty():
break
else:
next_event = self._pending_events.get()
current_time = next_event.completion_time
ready_events = [next_event]
while not self._pending_events.empty():
# try to process all events that finish at the same time
another_event = self._pending_events.get()
if another_event.completion_time <= current_time + 1e-6:
ready_events.append(another_event)
else:
self._pending_events.put(another_event)
break
for event in ready_events:
self._on_op_finish(event.executor, event.op)
self.makespan = current_time
# make sure all executors are empty
for executor_idx, executor in self.executors.items():
if hasattr(executor, "available_queue"):
assert len(executor.available_queue) == 0, (
f"Executor {executor_idx} has non-empty ready queue "
f"at end of scheduling: {executor.available_queue}"
)
if hasattr(executor, "next_op_idx"):
assert executor.next_op_idx == len(executor.operator_order), (
f"Executor {executor_idx} has not finished all operations "
f"at end of scheduling: {executor.available_queue}"
)
return trace_events
| awslabs/optimizing-multitask-training-through-dynamic-pipelines | dynapipe/schedule_opt/wait_free_schedule.py | wait_free_schedule.py | py | 5,650 | python | en | code | 1 | github-code | 13 |
20700335619 | #0. 패키지 import
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
import numpy as np
#1. Data 세팅 및 로드
X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
y = np.array([1, 1, 1, 2, 2, 2])
#2. 모델 로드(데이터 마이닝 기법)
clf = QuadraticDiscriminantAnalysis()
#3. 모델 훈련
clf.fit(X, y)
#4. 모델 테스트(Predict, score)
print(clf.predict([[-0.8, -1]])) | KSJ0128/Data | 12주차_test2.py | 12주차_test2.py | py | 430 | python | ko | code | 0 | github-code | 13 |
71042909137 | '''
To render html web pages
'''
import random
from django.http import HttpResponse
from django.template.loader import render_to_string
from articles.models import Article
def home_view(request):
'''
Take in a request (Django sends request)
Return as a response (We pick to return the response)
'''
number = random.randint(1, 5)
name = 'Justin'
#from database
article_obj = Article.objects.get(id = number)
article_queryset = Article.objects.all()
context = {
'object_list' : article_queryset,
'title': article_obj.title,
'id': article_obj.id,
'content': article_obj.content,
}
HTML_STRINGS = render_to_string('home_view.html', context=context )
return HttpResponse(HTML_STRINGS) | devbobnwaka/try-django2 | trydjango/views.py | views.py | py | 770 | python | en | code | 0 | github-code | 13 |
17055420674 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class LogicalRuleItemDTO(object):
def __init__(self):
self._crowd_name = None
self._ext_crowd_key = None
self._gmt_expired_time = None
self._schedule_type = None
self._type = None
@property
def crowd_name(self):
return self._crowd_name
@crowd_name.setter
def crowd_name(self, value):
self._crowd_name = value
@property
def ext_crowd_key(self):
return self._ext_crowd_key
@ext_crowd_key.setter
def ext_crowd_key(self, value):
self._ext_crowd_key = value
@property
def gmt_expired_time(self):
return self._gmt_expired_time
@gmt_expired_time.setter
def gmt_expired_time(self, value):
self._gmt_expired_time = value
@property
def schedule_type(self):
return self._schedule_type
@schedule_type.setter
def schedule_type(self, value):
self._schedule_type = value
@property
def type(self):
return self._type
@type.setter
def type(self, value):
self._type = value
def to_alipay_dict(self):
params = dict()
if self.crowd_name:
if hasattr(self.crowd_name, 'to_alipay_dict'):
params['crowd_name'] = self.crowd_name.to_alipay_dict()
else:
params['crowd_name'] = self.crowd_name
if self.ext_crowd_key:
if hasattr(self.ext_crowd_key, 'to_alipay_dict'):
params['ext_crowd_key'] = self.ext_crowd_key.to_alipay_dict()
else:
params['ext_crowd_key'] = self.ext_crowd_key
if self.gmt_expired_time:
if hasattr(self.gmt_expired_time, 'to_alipay_dict'):
params['gmt_expired_time'] = self.gmt_expired_time.to_alipay_dict()
else:
params['gmt_expired_time'] = self.gmt_expired_time
if self.schedule_type:
if hasattr(self.schedule_type, 'to_alipay_dict'):
params['schedule_type'] = self.schedule_type.to_alipay_dict()
else:
params['schedule_type'] = self.schedule_type
if self.type:
if hasattr(self.type, 'to_alipay_dict'):
params['type'] = self.type.to_alipay_dict()
else:
params['type'] = self.type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = LogicalRuleItemDTO()
if 'crowd_name' in d:
o.crowd_name = d['crowd_name']
if 'ext_crowd_key' in d:
o.ext_crowd_key = d['ext_crowd_key']
if 'gmt_expired_time' in d:
o.gmt_expired_time = d['gmt_expired_time']
if 'schedule_type' in d:
o.schedule_type = d['schedule_type']
if 'type' in d:
o.type = d['type']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/LogicalRuleItemDTO.py | LogicalRuleItemDTO.py | py | 2,979 | python | en | code | 241 | github-code | 13 |
27393443063 | # RGB 거리
n = int(input())
prices = [[0]]
dp = [[0] * 3 for _ in range(10001)]
for _ in range(n):
prices.append(list(map(int, input().split())))
for i in range(1, n+1):
dp[i][0] = min(dp[i-1][1], dp[i-1][2]) + prices[i][0]
dp[i][1] = min(dp[i-1][0], dp[i-1][2]) + prices[i][1]
dp[i][2] = min(dp[i-1][0], dp[i-1][1]) + prices[i][2]
print(min(dp[n][0], dp[n][1], dp[n][2])) | hyebinnn/Algorithm | BOJ/P_1149.py | P_1149.py | py | 394 | python | en | code | 0 | github-code | 13 |
7782159376 | import os
from flask import Blueprint, render_template, redirect, url_for, flash, \
request, jsonify
from flask import current_app as app
from werkzeug.utils import secure_filename
from celery.result import AsyncResult
from clustermgr.extensions import db, wlogger, celery
from clustermgr.models import LDAPServer, AppConfiguration, KeyRotation, \
OxauthServer
from clustermgr.forms import AppConfigForm, KeyRotationForm, SchemaForm
from clustermgr.tasks.all import rotate_pub_keys
from clustermgr.core.utils import encrypt_text
from clustermgr.core.utils import generate_random_key
from clustermgr.core.utils import generate_random_iv
index = Blueprint('index', __name__)
@index.route('/')
def home():
servers = LDAPServer.query.all()
config = AppConfiguration.query.first()
if len(servers) == 0:
return render_template('intro.html')
data = {"provider": 0, "consumer": 0}
for server in servers:
if server.role == 'provider':
data["provider"] += 1
elif server.role == 'consumer':
data["consumer"] += 1
return render_template('dashboard.html', data=data, servers=servers,
conf=config)
@index.route('/configuration/', methods=['GET', 'POST'])
def app_configuration():
conf_form = AppConfigForm()
sch_form = SchemaForm()
config = AppConfiguration.query.first()
schemafiles = os.listdir(app.config['SCHEMA_DIR'])
if conf_form.update.data and conf_form.validate_on_submit():
if not config:
config = AppConfiguration()
config.replication_dn = "cn={},o=gluu".format(
conf_form.replication_dn.data)
config.replication_pw = conf_form.replication_pw.data
config.certificate_folder = conf_form.certificate_folder.data
db.session.add(config)
db.session.commit()
flash("Gluu Replication Manager application configuration has been "
"updated.", "success")
if request.args.get('next'):
return redirect(request.args.get('next'))
elif sch_form.upload.data and sch_form.validate_on_submit():
f = sch_form.schema.data
filename = secure_filename(f.filename)
if any(filename in s for s in schemafiles):
name, extension = os.path.splitext(filename)
matches = [s for s in schemafiles if name in s]
filename = name + "_" + str(len(matches)) + extension
f.save(os.path.join(app.config['SCHEMA_DIR'], filename))
schemafiles.append(filename)
flash("Schema: {0} has been uploaded sucessfully.".format(filename),
"success")
if config and config.replication_dn:
conf_form.replication_dn.data = config.replication_dn.replace(
"cn=", "").replace(",o=gluu", "")
conf_form.replication_pw.data = config.replication_pw
conf_form.certificate_folder.data = config.certificate_folder
return render_template('app_config.html', cform=conf_form, sform=sch_form,
config=config, schemafiles=schemafiles,
next=request.args.get('next'))
@index.route("/key_rotation", methods=["GET", "POST"])
def key_rotation():
kr = KeyRotation.query.first()
form = KeyRotationForm()
oxauth_servers = [server for server in OxauthServer.query]
if request.method == "GET" and kr is not None:
form.interval.data = kr.interval
form.type.data = kr.type
form.oxeleven_url.data = kr.oxeleven_url
form.inum_appliance.data = kr.inum_appliance
if form.validate_on_submit():
if not kr:
kr = KeyRotation()
kr.interval = form.interval.data
kr.type = form.type.data
kr.oxeleven_url = form.oxeleven_url.data
kr.inum_appliance = form.inum_appliance.data
kr.oxeleven_token_key = generate_random_key()
kr.oxeleven_token_iv = generate_random_iv()
kr.oxeleven_token = encrypt_text(
b"{}".format(form.oxeleven_token.data),
kr.oxeleven_token_key,
kr.oxeleven_token_iv,
)
db.session.add(kr)
db.session.commit()
# rotate the keys immediately
rotate_pub_keys.delay()
return redirect(url_for("key_rotation"))
return render_template("key_rotation.html",
form=form,
rotation=kr,
oxauth_servers=oxauth_servers)
@index.route("/api/oxauth_server", methods=["GET", "POST"])
def oxauth_server():
if request.method == "POST":
hostname = request.form.get("hostname")
gluu_server = request.form.get("gluu_server")
gluu_version = request.form.get("gluu_version")
if gluu_server == "true":
gluu_server = True
else:
gluu_server = False
gluu_version = ""
if not hostname:
return jsonify({
"status": 400,
"message": "Invalid data",
"params": "hostname can't be empty",
}), 400
server = OxauthServer()
server.hostname = hostname
server.gluu_server = gluu_server
server.gluu_version = gluu_version
db.session.add(server)
db.session.commit()
return jsonify({
"id": server.id,
"hostname": server.hostname,
"gluu_server": server.gluu_server,
"get_version": server.get_version,
}), 201
servers = [{
"id": srv.id,
"hostname": srv.hostname,
"version": srv.get_version,
"gluu_server": srv.gluu_server,
} for srv in OxauthServer.query]
return jsonify(servers)
@index.route("/api/oxauth_server/<id>", methods=["POST"])
def delete_oxauth_server(id):
server = OxauthServer.query.get(id)
if server:
db.session.delete(server)
db.session.commit()
return jsonify({}), 204
@index.route('/log/<task_id>')
def get_log(task_id):
msgs = wlogger.get_messages(task_id)
result = AsyncResult(id=task_id, app=celery)
if result.state == 'SUCCESS' or result.state == 'FAILED':
wlogger.clean(task_id)
log = {'task_id': task_id, 'state': result.state, 'messages': msgs}
return jsonify(log)
| GuillaumeSmaha/cluster-mgr | clustermgr/views/index.py | index.py | py | 6,299 | python | en | code | 0 | github-code | 13 |
37086107466 | import collections
import itertools
import functools
import json
import flowws
from flowws import Argument as Arg
import keras_gtar
import numpy as np
LoadResult = collections.namedtuple(
'LoadResult', ['att_model', 'train_data', 'val_data', 'batch_size', 'type_map'])
@functools.lru_cache(maxsize=1)
def load_model(fname, path_substitutions):
path_substitutions = dict(path_substitutions)
with keras_gtar.Trajectory(fname, 'r') as traj:
weights = traj.get_weights()
workflow_json = traj.handle.readStr('workflow.json')
w = flowws.Workflow.from_JSON(json.loads(workflow_json))
stages = []
for stage in w.stages:
stage_json = stage.to_JSON()
stages.append(stage)
for (name, val) in stage_json['arguments'].items():
if isinstance(val, str):
stage.arguments[name] = path_substitutions.get(
stage.arguments[name], stage.arguments[name])
if stage_json['type'] == 'Train':
stage.arguments['epochs'] = 0
break
new_w = flowws.Workflow(stages)
scope = new_w.run()
model = scope['model']
model.set_weights(weights)
batch_size = new_w.stages[-1].arguments['batch_size']
train_xs, train_ts = scope['x_train']
type_map = scope['type_map']
attention_model = scope['attention_model']
return LoadResult(
attention_model, (train_xs, train_ts), scope['validation_data'],
batch_size, type_map)
@flowws.add_stage_arguments
class MoleculeAttentionViewer(flowws.Stage):
"""Plot pairwise attention for molecules.
This module creates a plato scene to visualize atoms in a molecule
and the attention map of a trained model. It loads a gtar file
containing a trained model and flowws workflow definition (using
`MD17`, for example) and enables visualization of the atomic
coordinates and pairwise attention.
Workflows run on different systems will likely require use of the
`path_substitutions` argument for their data-loading modules.
"""
ARGS = [
Arg('filename', '-f', str,
help='Saved model to open'),
Arg('path_substitutions', '-p', [(str, str)],
help='Paths to replace in stage descriptions'),
Arg('frame', None, int, 0,
help='Frame (data index) to visualize'),
Arg('particle', None, int, 0,
help='Particle index to focus on'),
Arg('additive_rendering', '-a', bool, True,
help='If True, use additive rendering'),
Arg('fast_antialiasing', None, bool, False,
help='Use Fast Approximate Antialiasing (FXAA)'),
Arg('ambient_occlusion', None, bool, False,
help='Use Screen Space Ambient Occlusion (SSAO)'),
Arg('filter_value', None, float,
help='Filter bonds by minimum attention value'),
Arg('value_scale', None, float, 0.,
help='Factor to modify bond values by (after filter_value; in log-space)'),
Arg('diameter_scale', '-d', float, 1.,
help='Size scale for atomic diameters'),
Arg('clip_min', None, float, 0.,
help='Minimum value of attention weights (after scaling by value_scale) to clip to'),
Arg('clip_max', None, float, 1.,
help='Maximum value of attention weights (after scaling by value_scale) to clip to'),
Arg('cmap_s', None, float, 0., valid_values=flowws.Range(-3., 3., True),
help='Bond colormap s argument (0, 1, 2) -> (blue, red, green)'),
Arg('cmap_h', None, float, 1.2, valid_values=flowws.Range(0., 2., True),
help='Bond colormap h argument controlling saturation'),
Arg('cmap_r', None, float, 1., valid_values=flowws.Range(0., 8., True),
help='Bond colormap r argument controlling number of rotations to make'),
]
def run(self, scope, storage):
loaded = load_model(
self.arguments['filename'], tuple(self.arguments['path_substitutions']))
self.attention_model = loaded.att_model
self.train_data = loaded.train_data
self.val_data = loaded.val_data
self.batch_size = loaded.batch_size
type_map = loaded.type_map
element_colors = {
1: (.8, .8, .8, 1),
6: (.5, .5, .5, 1),
7: (.5, .7, .8, 1),
8: (.8, .5, .5, 1),
}
type_color_map = [element_colors.get(i, (1., 0, 1, 1.)) for (i, j) in sorted(type_map.items())]
self.type_color_map = np.array(type_color_map)
element_radii = {
1: .25,
6: .7,
7: .65,
8: .6,
}
type_radius_map = [element_radii.get(i, 1) for (i, j) in sorted(type_map.items())]
self.type_diameter_map = np.array(type_radius_map)*2
self.arg_specifications['frame'].valid_values = flowws.Range(
0, len(self.train_data[0]), (True, False))
self.arg_specifications['particle'].valid_values = flowws.Range(
0, len(self.train_data[0][1]), (True, False))
scope.setdefault('visuals', []).append(self)
def draw_plato(self):
import plato, plato.draw as draw
train_xs, train_ts = self.train_data
i, j = self.arguments['frame'], self.arguments['particle']
features = {}
if self.arguments.get('additive_rendering', True):
features['additive_rendering'] = True
if self.arguments.get('fast_antialiasing', False):
features['fxaa'] = True
if self.arguments.get('ambient_occlusion', False):
features['ssao'] = True
prim = draw.Spheres()
edges = draw.Lines(cap_mode=1)
scene = draw.Scene([prim, edges], zoom=3.5, features=features)
xs = train_xs[i].copy()
filt = np.any(xs != 0, axis=-1)
xs = xs[filt]
xs -= np.mean(xs, axis=0, keepdims=True)
attention = self.attention_model.predict(
(train_xs[i:i+1], train_ts[i:i+1]), batch_size=self.batch_size)[0, j]
start_points = []
end_points = []
colors = []
for (ii, jj) in itertools.product(range(len(xs)), range(len(xs))):
if self.arguments.get('filter_value', 0):
if attention[ii, jj] < self.arguments['filter_value']:
continue
start_points.append(xs[ii])
end_points.append(xs[jj])
colors.append(attention[ii, jj])
colors = np.array(colors)[:, 0]*np.exp(self.arguments['value_scale'])
colors = np.clip(colors, self.arguments['clip_min'], self.arguments['clip_max'])
colors = plato.cmap.cubehelix(
colors, s=self.arguments['cmap_s'], h=self.arguments['cmap_h'],
r=self.arguments['cmap_r'])
edges.start_points = start_points
edges.end_points = end_points
edges.widths = .125
edges.colors = colors
types = np.argmax(train_ts[i, :len(xs)], axis=-1)
prim.positions = xs
colors = self.type_color_map[types]
prim.colors = colors
prim.diameters = self.type_diameter_map[types]*self.arguments['diameter_scale']
return scene
| klarh/flowws-keras-geometry | flowws_keras_geometry/viz/MoleculeAttentionViewer.py | MoleculeAttentionViewer.py | py | 7,204 | python | en | code | 6 | github-code | 13 |
71077810259 | from data_transform import *
from sklearn import preprocessing
df = df[df['vote_average'] != 9] # Remove class with 1 member
# Extract X matrix and y vector
y = df['vote_average'].values.astype(int)
# Extract data
df_X = df.drop(['revenue', 'vote_average', 'vote_count', 'popularity'],1) * 1#convert bools
#df_X = df_X[df_X['vote_average'] == 9]
X = preprocessing.scale(df_X.values)
X# = df_X.values
attributeNames = df_X.columns
N, M = df_X.shape
classNames = list(np.array(range(11)))
attributeNames = list(df_X.columns.values)
# Compute values of C.
C = len(classNames)
| maersk96/02450_machine_learning | MovieDB/scripts/Project 2/data_prepare_logistic.py | data_prepare_logistic.py | py | 577 | python | en | code | 1 | github-code | 13 |
578387883 | import numpy as np
import pandas as pd
import argparse
def main(args):
test_set = ["omniglot", "aircraft", "cu_birds", "dtd", "quickdraw", "fungi", "traffic_sign", "mscoco"]
df = pd.read_csv(args.log_path)
all_top1, all_loss, all_time = [], [], []
all_gce, all_ece, all_ace, all_tace, all_sce, all_rmsce= [], [], [], [], [], []
gce_latex = "GCE & "
ece_latex = "ECE & "
ace_latex = "ACE & "
tace_latex = "TACE & "
sce_latex = "SCE & "
rmsce_latex = "RMSCE & "
for dataset_name in test_set:
tot_tasks = len(df.loc[df["dataset"]==dataset_name])
if(tot_tasks>0):
print("Dataset:", dataset_name)
print("Tot-Tasks:", tot_tasks)
if("task-top1" in df.columns):
top1 = df.loc[df["dataset"]==dataset_name]["task-top1"]
top1_mean = top1.mean()
top1_confidence = (196.0 * np.std(top1/100.)) / np.sqrt(len(top1))
all_top1.append(top1_mean)
print(f"TOP-1: {top1_mean:.2f} +- {top1_confidence:.2f}")
if("task-loss" in df.columns):
loss = df.loc[df["dataset"]==dataset_name]["task-loss"]
loss_mean = loss.mean()
loss_confidence = (196.0 * np.std(loss/100.)) / np.sqrt(len(loss))
all_loss.append(loss_mean)
print(f"Loss: {loss_mean:.5f} +- {loss_confidence:.2f}")
if("task-gce" in df.columns):
gce = df.loc[df["dataset"]==dataset_name]["task-gce"]
gce_mean = gce.mean()
gce_confidence = (196.0 * np.std(gce/100.)) / np.sqrt(len(gce))
all_gce.append(gce_mean)
if(args.print_latex): gce_latex += str(round(gce_mean,1))+"$\pm$"+str(round(gce_confidence,1)) + " & "
print(f"GCE: {gce_mean:.2f} +- {gce_confidence:.2f}")
if("task-ece" in df.columns):
ece = df.loc[df["dataset"]==dataset_name]["task-ece"]
ece_mean = ece.mean()
ece_confidence = (196.0 * np.std(ece/100.)) / np.sqrt(len(ece))
all_ece.append(ece_mean)
if(args.print_latex): ece_latex += str(round(ece_mean,1))+"$\pm$"+str(round(ece_confidence,1)) + " & "
print(f"ECE: {ece_mean:.2f} +- {ece_confidence:.2f}")
if("task-ace" in df.columns):
ace = df.loc[df["dataset"]==dataset_name]["task-ace"]
ace_mean = ace.mean()
ace_confidence = (196.0 * np.std(ace/100.)) / np.sqrt(len(ace))
all_ace.append(ace_mean)
if(args.print_latex): ace_latex += str(round(ace_mean,1))+"$\pm$"+str(round(ace_confidence,1)) + " & "
print(f"ACE: {ace_mean:.2f} +- {ace_confidence:.2f}")
if("task-tace" in df.columns):
tace = df.loc[df["dataset"]==dataset_name]["task-tace"]
tace_mean = tace.mean()
tace_confidence = (196.0 * np.std(tace/100.)) / np.sqrt(len(tace))
all_tace.append(tace_mean)
if(args.print_latex): tace_latex += str(round(tace_mean,1))+"$\pm$"+str(round(tace_confidence,1)) + " & "
print(f"TACE: {tace_mean:.2f} +- {tace_confidence:.2f}")
if("task-sce" in df.columns):
sce = df.loc[df["dataset"]==dataset_name]["task-sce"]
sce_mean = sce.mean()
sce_confidence = (196.0 * np.std(sce/100.)) / np.sqrt(len(sce))
all_sce.append(sce_mean)
if(args.print_latex): sce_latex += str(round(sce_mean,1))+"$\pm$"+str(round(sce_confidence,1)) + " & "
print(f"SCE: {sce_mean:.2f} +- {sce_confidence:.2f}")
if("task-rmsce" in df.columns):
rmsce = df.loc[df["dataset"]==dataset_name]["task-rmsce"]
rmsce_mean = rmsce.mean()
rmsce_confidence = (196.0 * np.std(rmsce/100.)) / np.sqrt(len(rmsce))
all_rmsce.append(rmsce_mean)
if(args.print_latex): rmsce_latex += str(round(rmsce_mean,1))+"$\pm$"+str(round(rmsce_confidence,1)) + " & "
print(f"RMSCE: {rmsce_mean:.2f} +- {rmsce_confidence:.2f}")
if("task-tot-images" in df.columns):
values = df.loc[df["dataset"]==dataset_name]["task-tot-images"]
values_mean = values.mean()
print(f"Avg-Images: {values_mean:.1f}")
if("task-way" in df.columns):
values = df.loc[df["dataset"]==dataset_name]["task-way"]
values_mean = values.mean()
print(f"Avg-Way: {values_mean:.1f}")
if("task-avg-shot" in df.columns):
values = df.loc[df["dataset"]==dataset_name]["task-avg-shot"]
values_mean = values.mean()
print(f"Avg-Shot: {values_mean:.1f}")
if("time" in df.columns):
tot_time = df.loc[df["dataset"]==dataset_name]["time"].sum()
all_time.append(tot_time)
print(f"Time: {tot_time/60.0:.1f} min")
print("")
# Finished, printing overall statistics
print("-------------------")
if(len(all_top1)>0): print(f"TOP-1 ... {np.mean(all_top1):.1f}%")
if(len(all_loss)>0): print(f"Loss .... {np.mean(all_loss):.5f}")
if(len(all_gce)>0): print(f"GCE ..... {np.mean(all_gce):.1f}%")
if(len(all_ece)>0): print(f"ECE ..... {np.mean(all_ece):.1f}%")
if(len(all_ace)>0): print(f"ACE ..... {np.mean(all_ace):.1f}%")
if(len(all_tace)>0): print(f"TACE .... {np.mean(all_tace):.1f}%")
if(len(all_sce)>0): print(f"SCE ..... {np.mean(all_sce):.1f}%")
if(len(all_rmsce)>0): print(f"RMSCE ... {np.mean(all_rmsce):.1f}%")
if(len(all_time)>0): print(f"Time .... {np.sum(all_time)/60.0:.1f} min, {(np.sum(all_time)/60.0)/60.0:.1f} hour")
print("-------------------")
if(args.print_latex):
# Removing last char and adding new-line symbol
gce_latex = gce_latex[:-2] + "\\" + "\\"
ece_latex = ece_latex[:-2] + "\\" + "\\"
ace_latex = ace_latex[:-2] + "\\" + "\\"
tace_latex = tace_latex[:-2] + "\\" + "\\"
sce_latex = sce_latex[:-2] + "\\" + "\\"
rmsce_latex = rmsce_latex[:-2] + "\\" + "\\"
print("\nLatex strings:")
print(gce_latex)
print(ece_latex)
print(ace_latex)
print(tace_latex)
print(sce_latex)
print(rmsce_latex)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--log_path", default="./log.csv", help="Path to CSV file with the test log.")
parser.add_argument('--print_latex', dest='print_latex', action='store_true', help="Print latex strings.")
args = parser.parse_args()
main(args)
| mpatacchiola/contextual-squeeze-and-excitation | printer.py | printer.py | py | 6,905 | python | en | code | 21 | github-code | 13 |
17525610965 | import logging
from multiprocessing import shared_memory
import rumps
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
)
class TranslaterApp(rumps.App):
def __init__(self) -> None:
super(TranslaterApp, self).__init__(
name="Translater", quit_button=None, icon="icon.png"
)
logging.debug("Starting systray icon")
self.shm = shared_memory.SharedMemory(name="Translator")
self.shm.buf[:10] = "Yandex ".encode()
self.yandex = rumps.MenuItem(title="Yandex")
self.yandex.state = True
self.google = rumps.MenuItem(title="Google")
self.google.state = False
self.promt = rumps.MenuItem(title="Promt")
self.promt.state = False
self.exit_button = rumps.MenuItem(title="Exit")
self.menu = [
self.yandex,
self.google,
self.promt,
rumps.separator,
self.exit_button,
]
@rumps.clicked("Yandex")
def yandex_onoff(self, sender) -> None:
"""
For select yandex
"""
logging.debug("You selected Yandex for default translator")
for m in [self.yandex, self.google, self.promt]:
m.state = False
self.shm.buf[:10] = "Yandex ".encode()
sender.state = True
@rumps.clicked("Google")
def google_onoff(self, sender) -> None:
"""
For select google
"""
logging.debug("You selected Google for default translator")
for m in [self.yandex, self.google, self.promt]:
m.state = False
self.shm.buf[:10] = "Google ".encode()
sender.state = True
@rumps.clicked("Promt")
def promt_onoff(self, sender) -> None:
"""
For select promt
"""
logging.debug("You selected Promt for default translator")
for m in [self.yandex, self.google, self.promt]:
m.state = False
self.shm.buf[:10] = "Promt ".encode()
sender.state = True
@rumps.clicked("Exit")
def exit_button_click(self, sender) -> None:
"""
For exit button
"""
logging.debug("The application is shutting down")
self.shm.unlink()
rumps.quit_application()
| r4hx/Translator | systray.py | systray.py | py | 2,316 | python | en | code | 2 | github-code | 13 |
31319546294 | # Ex097
def escreva(texto):
tamanho = len(texto) + 10
print('-' * tamanho)
print(f' {texto}')
print('-' * tamanho)
escreva('O RATO ROEU A ROUPA DO REI DE ROMA, E A RAINHA COM RAIVA ROEU O RESTO')
escreva('CLAYTON GARCIA DA SILVA')
escreva('GUANABARA ENXADRISTA')
| Claayton/PythonExercises | Exercício feitos pela primeira vez/ex097.py | ex097.py | py | 285 | python | pt | code | 1 | github-code | 13 |
73771757139 | import json
import logging
import os
import uuid
from itertools import count
from random import randint
import base62
from testgen.data_schema_parser import DataSchemaParser
from testgen.exceptions import DataGenerationException
log = logging.getLogger("testgen.generator")
class DataGenerator:
def generate(self, options):
log.info(f'Start to generate with options={options}')
strategies = DataSchemaParser().read_schema(options.data_schema_json)
if options.files_count == 0:
json_string = self.generate_file(strategies)
print(json_string)
else:
filename_func = self.create_filename_func(options.file_name, options.file_prefix)
if options.clear_path:
self.cleanup(options.path_to_save_files, options.file_name)
generated = {}
for _ in range(1, options.files_count + 1):
filename = filename_func()
rows = []
for _ in range(options.data_lines):
json_string = self.generate_file(strategies)
rows.append(json_string)
rows.append('\n')
generated[filename]=rows
log.info(f'Generated {filename}')
self.store_files(options.path_to_save_files, generated)
def generate_file(self, strategies):
generated = {}
for item_name, strategy in strategies.items():
generated[item_name] = strategy.run()
return json.dumps(generated)
def store_files(self, path_to_save_files, generated_files):
for filename, content_rows in generated_files.items():
full_path = os.path.join(path_to_save_files, filename)
with open(full_path, 'w') as file:
file.writelines(content_rows)
log.info('All files are stored. File generation is done.')
def cleanup(self, path, file_name):
for file in os.listdir(path):
if file.startswith(file_name):
os.remove(os.path.join(path, file))
log.info(f'Cleanup: {file} is removed')
def create_filename_func(self, file_name, file_prefix):
counter = count(1)
def with_postfix(postfix):
return file_name + '_' + postfix
def count_func():
return with_postfix(str(next(counter)))
def random_func():
return with_postfix(base62.encode(randint(1, 1000000000)))
def uuid_func():
return with_postfix(str(uuid.uuid4()))
if file_prefix == 'count':
return count_func
if file_prefix == 'random':
return random_func
if file_prefix == 'uuid':
return uuid_func
else:
raise DataGenerationException("file_prefix has wrong value")
| gryabov/gridu_python_basics_course | testgen/generator.py | generator.py | py | 2,831 | python | en | code | 0 | github-code | 13 |
39132931242 | #2884 알람시계 - 구글 참고
h, m = map(int, input().split()) #시간과 분을 입력받고
if m < 45 : #입력받은 분이 45보다 작으면
if h == 0 : #시간이 0 시이면
h = 23 #23시로 다시 설정
m = 60 + m #시간단위가 변경되므로 다시 60에서 분을 빼줌
else : #0시가 아니라면
h = h - 1 #시간단위 -1
m = 60 + m #시간단위가 변경되므로 다시 60에서 분을 빼줌
print(h, m-45) #프린트 시에 45를 제외해서 출력 | wndnjs2037/Algorithm | 백준/Bronze/2884. 알람 시계/알람 시계.py | 알람 시계.py | py | 506 | python | ko | code | 0 | github-code | 13 |
32956381702 | # https://www.hackerrank.com/challenges/lonely-integer/problem
if __name__ == '__main__':
n = int(input().strip())
y = 0
x = input().split(" ")
for i in range(n):
y = int(x[i]) ^ y
print(y) | thecoducer/Problem-Solving-Vault | HackerRank/lonely_integer.py | lonely_integer.py | py | 224 | python | en | code | 3 | github-code | 13 |
33516528468 | """Functions to attribute IGO points with attributes related to land cover types and land use intensity within the riverscape.
Jordan Gilbert
Dec 2022
"""
import sqlite3
import rasterio
import numpy as np
from osgeo import gdal
from rasterio.mask import mask
from rscommons import Logger, VectorBase, GeopackageLayer
from rscommons.database import SQLiteCon
def igo_vegetation(windows: dict, landuse_raster: str, out_gpkg_path: str):
log = Logger('IGO Land Use')
log.info('Summarizing land cover classes within each DGO')
dataset = gdal.Open(landuse_raster)
geo_transform = dataset.GetGeoTransform()
conversion_factor = VectorBase.rough_convert_metres_to_raster_units(landuse_raster, 1.0)
cell_area = abs(geo_transform[1] * geo_transform[5]) / conversion_factor**2
with rasterio.open(landuse_raster) as src, \
GeopackageLayer(out_gpkg_path, 'DGOGeometry') as dgo_lyr:
veg_counts = []
for dgo_ftr, *_ in dgo_lyr.iterate_features():
dgoid = dgo_ftr.GetFID()
dgo_ogr = dgo_ftr.GetGeometryRef()
dgo_g = VectorBase.ogr2shapely(dgo_ogr)
dgo_geom = dgo_g.buffer(geo_transform[1] / 2) # buffer by raster resolution to ensure we get all cells
try:
raw_raster = mask(src, [dgo_geom], crop=True)[0]
mask_raster = np.ma.masked_values(raw_raster, src.nodata)
for oldvalue in np.unique(mask_raster):
if oldvalue is not np.ma.masked:
cell_count = np.count_nonzero(mask_raster == oldvalue)
veg_counts.append([dgoid, int(oldvalue), cell_count * cell_area, cell_count])
except Exception as ex:
log.warning(f'Error obtaining land cover raster values for DGO ID {dgoid}')
log.warning(ex)
with SQLiteCon(out_gpkg_path) as database:
errs = 0
batch_count = 0
for veg_record in veg_counts:
batch_count += 1
if int(veg_record[1]) != -9999:
try:
database.conn.execute('INSERT INTO DGOVegetation (DGOID, VegetationID, Area, CellCount) VALUES (?, ?, ?, ?)', veg_record)
except sqlite3.IntegrityError as err:
# THis is likely a constraint error.
errstr = "Integrity Error when inserting records: DGOID: {} VegetationID: {}".format(veg_record[0], veg_record[1])
log.error(errstr)
errs += 1
except sqlite3.Error as err:
# This is any other kind of error
errstr = "SQL Error when inserting records: DGOID: {} VegetationID: {} ERROR: {}".format(veg_record[0], veg_record[1], str(err))
log.error(errstr)
errs += 1
if errs > 0:
raise Exception('Errors were found inserting records into the database. Cannot continue.')
database.conn.commit()
log.info('IGO land use summary complete')
| Riverscapes/riverscapes-tools | packages/anthro/anthro/utils/igo_vegetation.py | igo_vegetation.py | py | 3,051 | python | en | code | 10 | github-code | 13 |
29284087952 | import pygame
import random
import sys
pygame.init()
# Arguments
WIDTH = 800
HEIGHT = 600
RED = (255, 0, 0)
BLUE = (0, 255, 0)
YELLOW = (255, 255, 0)
BACKGROUND_COLOR = (0, 0, 50)
playerSize = 50
playerPos = [WIDTH/2, HEIGHT - 2 * playerSize]
enemySize = 50
enemy_pos = [random.randint(0, WIDTH-enemySize), 0]
enemy_list = [enemy_pos]
SPEED = 10
screen = pygame.display.set_mode((WIDTH, HEIGHT))
game_over = False
score = 0
clock = pygame.time.Clock()
myFont = pygame.font.SysFont('monospace', 35)
def set_level(score, SPEED):
if score < 40:
SPEED = 5
elif score < 80:
SPEED = 10
elif score < 140:
SPEED = 15
elif score < 200:
SPEED = 20
else:
SPEED = 30
return SPEED
# Droping enemies on the screen
def dorp_enemies(enemy_list):
deley = random.random()
if len(enemy_list) < 10 and deley < 0.3:
x_pos = random.randint(0, WIDTH-enemySize)
y_pos = 0
enemy_list.append([x_pos, y_pos])
# This function drawing enemies on the screen
def draw_enemies(enemy_list):
for enemy_pos in enemy_list:
pygame.draw.rect(screen, BLUE, (enemy_pos[0], enemy_pos[1], enemySize, enemySize))
# Updateing enemy's position
def update_enemy_position(enemy_list, score):
for idx, enemy_pos in enumerate(enemy_list):
if enemy_pos[1] >= 0 and enemy_pos[1] < HEIGHT:
enemy_pos[1] += SPEED
else:
enemy_list.pop(idx)
score += 1
return score
# checking collision
def collider(enemy_list, playerPos):
for enemy_pos in enemy_list:
if detect_collision(enemy_pos, playerPos):
return True
return False
# Detect collision of player and enemy
def detect_collision(playerPos, enemy_pos):
p_x = playerPos[0]
p_y = playerPos[1]
e_x = enemy_pos[0]
e_y = enemy_pos[1]
if (e_x >= p_x and e_x < (p_x + playerSize)) or (p_x >= e_x and p_x < (e_x + enemySize)):
if(e_y >= p_y and e_y < (p_y + playerSize)) or (p_y >= e_y and p_y < (e_y + enemySize)):
return True
return False
# Game loop
while not game_over:
# Player movment
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
if event.type == pygame.KEYDOWN:
x = playerPos[0]
y = playerPos[1]
if event.key == pygame.K_LEFT:
x -= playerSize
elif event.key == pygame.K_RIGHT:
x += playerSize
playerPos = [x, y]
screen.fill(BACKGROUND_COLOR)
# Drawing player
pygame.draw.rect(screen, RED, (playerPos[0], playerPos[1], playerSize, playerSize))
# enemy's functions
dorp_enemies(enemy_list)
score = update_enemy_position(enemy_list, score)
SPEED = set_level(score, SPEED)
text = "Score: " + str(score)
label = myFont.render(text, 1, YELLOW)
screen.blit(label, (WIDTH-200, HEIGHT-40))
if collider(enemy_list, playerPos):
game_over = True
draw_enemies(enemy_list)
print(SPEED)
# Update display
clock.tick(30)
pygame.display.update()
| KarolProgramista/Blocke-The-Game | game.py | game.py | py | 3,284 | python | en | code | 1 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.