seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
72680641938 | import sublime
from .debugger_info import Debugger
import os
from os.path import dirname
import sys
from subprocess import Popen, PIPE
import subprocess
from os import path
import shlex
import select
import re
class LLDBDebugger(Debugger):
"""
LLDBDebugger debug cpp programs with
lldb
"""
RUN_PRIOR = 0.5
READ_LIMIT = 2 ** 19
class LLDBAnalyzer(object):
"""docstring for LLDBAnalyzer"""
STR_REGEX_CRASH_LINE = '((?:{name}:))(\d+)'
REGEX_RT_CODE = re.compile('(?:\(code=)([A-Za-z0-9_]+)')
REGEX_STOP_REASON = re.compile('(?:stop reason = )([a-zA-Z_]+)')
def __init__(self, on_status_change):
super(LLDBDebugger.LLDBAnalyzer, self).__init__()
self.data = ''
self.data_buff = ''
self.status = 'LAUNCHING'
self.proc_state = 'LAUNCHING'
self.change_status = on_status_change
self.change_status(self.status)
def add_out(self, out):
self.data += out
self.data_buff += out
def encode_save(self, str):
unsave = '.\\{}()[]'
rez = ''
for x in str:
if x in unsave:
rez += '\\'
rez += x
return rez
def analyze(self):
status = self.status
buff = self.data_buff
self.change_status(self.proc_state)
if status == 'LAUNCHING':
p_ind = buff.find('Process')
if p_ind == -1:
return 'NEED_MORE'
self.pid = int(buff[p_ind:].split()[1])
self.status = 'RUNNING'
self.proc_state = 'RUNNING'
self.data_buff = ''
elif status == 'RUNNING':
p_ind = buff.find('Process')
if p_ind == -1:
return 'NEED_MORE'
self.pid = int(buff[p_ind:].split()[1])
state = buff[p_ind:].split()[2]
if state == 'stopped':
self.status = 'CRASHED'
self.proc_state = 'CRASHED'
self.rtcode = 228
# print('FINALLY CRASHED')
else:
self.rtcode = buff[p_ind:].split()[6]
self.status = 'STOPPED'
self.proc_state = 'STOPPED'
# print('rtcode -> ', self.rtcode)
elif status == 'FINDING_CRASHLINE':
# print('finding crash_line')
file = path.split(self._file_crash)[1]
# self.regex_crash_line = re.compile('\\.cpp:(\d+)')
self.crash_line = self.regex_crash_line.search(self.data_buff)
if self.crash_line is None:
# print('REASON crashline NOT FOUND')
return 'NEED_MORE'
self.crash_line = int(self.crash_line.group(2))
# print(self.crash_line)
self.rtcode = self.REGEX_RT_CODE.search(self.data_buff)
if self.rtcode is None:
self.rtcode = '-'
else:
self.rtcode = self.rtcode.group(1)
self.stop_reason = self.REGEX_STOP_REASON.search(self.data_buff)
if self.stop_reason is None:
# print('REASON stop reason NOT FOUND')
return 'NEED_MORE'
self.stop_reason = self.stop_reason.group(1)
self.proc_state = 'CRASHED, stop reason = %s' % self.stop_reason
self.status = 'CRASHLINE_FOUND'
self.data_buff = ''
self.change_status(self.proc_state)
def proc_stopped(self):
return self.status in {'CRASHED', 'STOPPED'}
def find_crashline(self, file):
self.status = 'FINDING_CRASHLINE'
self._file_crash = path.split(file)[1]
self.regex_crash_line = re.compile( \
self.STR_REGEX_CRASH_LINE.format(name=self.encode_save(self._file_crash)))
# print(self.STR_REGEX_CRASH_LINE.format(name=self.encode_save(self._file_crash)))
supported_exts = ['cpp']
def __init__(self, file):
# super(LLDBDebugger, self).__init__(file)
self.file = file
self.in_buff = ''
self.on_status_change = None
def is_runnable():
return sublime.platform() == 'osx'
def has_var_view_api(self):
return True
def compile(self):
cmd = 'g++ -std=gnu++11 -g -o main "%s"' % self.file
PIPE = subprocess.PIPE
# print(dir(self))
if self.on_status_change is not None:
self.on_status_change('COMPILING')
#cwd=os.path.split(self.file)[0], \
p = subprocess.Popen(cmd, \
shell=True, stdin=PIPE, stdout=PIPE, stderr=subprocess.STDOUT, \
cwd=os.path.split(self.file)[0])
p.wait()
return (p.returncode, p.stdout.read().decode())
def run(self, args=' -debug'):
self.analyzer = LLDBDebugger.LLDBAnalyzer(self.on_status_change)
cmd = 'lldb main'
PIPE = subprocess.PIPE
#cwd=os.path.split(self.file)[0], \
process = subprocess.Popen(cmd, \
shell=True, stdin=PIPE, stdout=PIPE, stderr=subprocess.STDOUT, \
cwd=os.path.split(self.file)[0])
self.process = process
self.miss_cnt = 0
out_file = path.join(path.split(self.file)[0], 'output.txt')
f = open(out_file, 'w')
f.write('')
f.close()
cmd = 'process launch -o output.txt -- %s\n' % args
# cmd = 'process launch -- %s\n' % args
process.stdin.write(cmd.encode('utf-8'))
process.stdin.flush()
# self.miss_cnt += len(cmd)
self.need_out = True
# self.write('123\n')
sublime.set_timeout_async(self.__process_listener)
# process.stdin.write('123\n'.encode('utf-8'))
# process.stdin.flush()
# return "END"
# return process.stdout.read(4096).decode()
def __on_out(self, s):
# if self.miss_cnt > 0:
# self.miss_cnt -= 1
# return None
analyzer = self.analyzer
proc = self.process
# print(s, end='')
self.analyzer.add_out(s)
if s == '\n':
self.analyzer.analyze()
if self.analyzer.status == 'RUNNING':
self.write(self.in_buff)
self.in_buff = ''
elif self.analyzer.proc_stopped():
if analyzer.status == 'CRASHED':
analyzer.find_crashline(self.file)
proc.stdin.write('bt\n'.encode('utf-8'))
proc.stdin.flush()
if analyzer.status == 'CRASHLINE_FOUND':
self.need_out = False
proc.stdin.write('process kill\n'.encode('utf-8'))
proc.stdin.flush()
proc.stdin.write('exit\n'.encode('utf-8'))
proc.stdin.flush()
proc.wait()
# print(proc.stdout.read().decode())
# print(analyzer.crash_line)
file_out = open(path.join(path.dirname(self.file), 'output.txt'))
output = file_out.read(self.READ_LIMIT)
# print('Hello i am here the out size ->', len(output))
file_out.close()
# print('out -> ', output)
self.on_out(output)
self.on_stop(analyzer.rtcode, crash_line=analyzer.crash_line)
elif analyzer.status == 'STOPPED':
proc.terminate()
proc.kill()
# print(self.file)
output = open(path.join(path.dirname(self.file), 'output.txt')).read(self.READ_LIMIT)
if len(output) > self.READ_LIMIT:
output = "<to big>" + output[-self.READ_LIMIT:]
self.on_out(output)
self.on_stop(analyzer.rtcode)
#sys.stdout.flush()
def __process_listener(self):
proc = self.process
while proc.returncode is None:
s = proc.stdout.read(1).decode()
if s and self.need_out:
if self.miss_cnt > 0:
self.miss_cnt -= 1
continue
self.__on_out(s)
else:
return None
def set_calls(self, on_out, on_stop, on_status_change):
'''
please set calls
and calls on program out or stop
sample
on_out(s, is_err=False)
on_stop(rtcode, crash_line=None)
'''
# print('!!!!! CALLS SET')
self.on_out = on_out
self.on_stop = on_stop
self.on_status_change = on_status_change
def write(self, s):
if self.analyzer.status == 'RUNNING':
self.miss_cnt += len(s)
self.process.stdin.write(s.encode('utf-8'))
self.process.stdin.flush()
else:
self.in_buff += s
# print(self.in_buff)
def terminate(self):
proc = self.process
proc.send_signal(2)
| Jatana/FastOlympicCoding | debuggers/Cpp_OSX_Debugger.py | Cpp_OSX_Debugger.py | py | 7,219 | python | en | code | 365 | github-code | 13 |
1235522412 | from secrets import access_key, secret_access_key
import boto3
import os
client = boto3.client('s3',
aws_access_key_id = access_key,
aws_secret_access_key = secret_access_key)
for file in os.listdir():
if '.py' in file:
upload_file_bucket = 'youtube-dummy-bucket'
upload_file_key = 'python/' + str(file)
client.upload_file(file, upload_file_bucket, upload_file_key)
| Derrick-Sherrill/DerrickSherrill.com | automatic_s3_uploader.py | automatic_s3_uploader.py | py | 446 | python | en | code | 307 | github-code | 13 |
9071040680 | import sys
sys.stdin = open("in_out/section2/chapter2/in3.txt", "rt")
def sol(n, s, e, k):
a = n[s-1:e]
a.sort()
return a[k-1]
case = int(input())
for i in range(case):
n_count, s, e, k = map(int, input().split())
n = list(map(int, input().split()))
print("#%d %d" %(i, sol(n, s, e, k)))
print(i)
| mins1031/coding-test | section2/KNum.py | KNum.py | py | 328 | python | en | code | 0 | github-code | 13 |
25588243243 | from random import randint, choice as randchoice
from time import time as current_time
from datetime import timedelta
from input import Box
from constants import Colours
import pygame
import pygame.freetype
import sys
class Generate:
def __init__(self):
# Modify table into sudoku
self.solved = self.make_grid()
# Remove some cells so that player must fill puzzle
self.unsolved = self.unsolve()
def make_grid(self, table=None):
# If table not generated (not passed as argument, 1st recursion)
# Generate blank 9x9 grid, grid will be modified into sudoku puzzle
if table is None:
table = [[0 for j in range(9)] for i in range(9)]
# Intuitive algorithm to find 3x3 unit from x & y coordinates
def get_unit(x, y):
x_start = x // 3 * 3
y_start = y // 3 * 3
x_range = range(x_start, x_start + 3)
y_range = range(y_start, y_start + 3)
return [table[i][j] for j in y_range for i in x_range]
# Returns all possible values for cell
def avail_vals(x, y):
row = table[x] # Values in row
col = [table[e][y] for e in range(9)] # Values in column
unit = get_unit(x, y) # Values in 3x3 unit
sections = [row, col, unit] # Saving code from too many "if and"s
# Return number if number is not in any conflicting sections
def conflicting(i):
return any(i in section for section in sections)
return [i for i in range(1, 10) if not conflicting(i)]
# Iterate through every cell in blank soduku
for i, row in enumerate(table):
for j, cell in enumerate(row):
available_values = avail_vals(i, j)
# If no available values and row not full, recurse & reset row
if not available_values and not all(row):
# Reset current row
table[i] = [0 for _ in range(9)]
return self.make_grid(table)
# Else if cell is 0 (not assigned a value)
if not table[i][j]:
# Assign a random available value to cell
table[i][j] = randchoice(available_values)
return table
def unsolve(self):
# Returns value to fill cell with.
# If random number is 1, cell stays solved
# If random number is 0, cell becomes unsolved (0), player must fill in
def chance(cell):
return cell if randint(0, 1) else 0
# Run chance function for each cell in sudoku puzzle
return [[chance(cell) for cell in row] for row in self.solved]
class Gui:
def __init__(self, screen, unsolved_grid, solved_grid):
self.screen = screen
# Soduku game grids
self.unsolved_grid = unsolved_grid
self.solved_grid = solved_grid
self.boxes = self.init_boxes() # Initialize input boxes
# Colour constants (R, G, B) & Rendering constants
self.colours = Colours()
self.GAME_FONT = pygame.freetype.SysFont('arial', 34)
# Game variables
self.start_time = current_time()
def start(self):
while 1:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
self.update() # Update screen every loop
def update(self):
self.screen.fill(self.colours.BLACK)
self.display_clock() # Display time elapsed
self.draw_board() # Draw Sudoku board
pygame.display.flip()
def display_clock(self):
# Function for converting number of seconds to formatted version.
# I.E. 65 seconds = 1:05
def format_time(seconds):
return str(timedelta(seconds=seconds))
# Formatted number of seconds
formatted = format_time(int(current_time() - self.start_time))
self.GAME_FONT.render_to(self.screen,
(305, 25),
f"{formatted[2::]} elapsed",
self.colours.WHITE)
def draw_board(self):
# Draw each square as 78 pixels, lines as 10px, every third line as 14px
# How do I can't even explain this algorithm with comments
# I like to do math instead of too many if statements to avoid slow code
# Trust me it works though
def draw_lines():
width = 0
start = 0
for i in range(10):
x = (i % 3 - 1) * 2 + 1
y = abs((x / abs(x) - 1) / 2) * 14
z = ((x / abs(x) + 1) / 2) * 10
width = int(y + z)
rectangle = pygame.Rect(start, 82, width, 900)
pygame.draw.rect(self.screen, self.colours.GREY, rectangle)
rectangle = pygame.Rect(0, start + 82, 900, width)
pygame.draw.rect(self.screen, self.colours.GREY, rectangle)
start += (78 + width)
def draw_boxes():
for i, row in enumerate(self.boxes):
for j, box in enumerate(row):
box.update() # Update box
# If player inputs incorrect answer into cell
if box.content is not None\
and box.content != self.solved_grid[i][j]:
box.content = None
draw_lines()
draw_boxes()
def init_boxes(self):
boxes = list()
# Function that assists in returning where each box
# should be placed on board
def get_width(val):
x = (val % 3 - 1) * 2 + 1
y = abs((x / abs(x) - 1) / 2) * 14
z = ((x / abs(x) + 1) / 2) * 10
width = int(y + z)
return width
# Function that returns where each box should be placed on board
def next_coords(val):
x = (val % 3 - 1) * 2 + 1
y = abs((x / abs(x) - 1) / 2) * 14
z = ((x / abs(x) + 1) / 2) * 10
width1 = int(y + z)
return 78 + width1
start1 = 0
start2 = 0
# Generate one input box for each cell
for i in range(9):
width1 = get_width(i)
boxes.append([])
for j in range(9):
width2 = get_width(j) + 82
rect = pygame.Rect(start1 + width1, start2 + width2, 78, 78)
# Respective value of soduku cell at index
value = self.unsolved_grid[i][j]
boxes[i].append(Box(self.screen, rect, value))
start2 += next_coords(j)
start1 += next_coords(i)
start2 = 0
return boxes
| antony-c/sudoku | sudoku.py | sudoku.py | py | 7,005 | python | en | code | 1 | github-code | 13 |
74907160337 | import cv2 as cv
import numpy as np
img = cv.imread("../../resource/chapter8/opencv-logo.png")
def rotate_bond(img, angle):
h, w, _ = img.shape
cX, cY = w // 2, h // 2
M = cv.getRotationMatrix2D((cX, cY), -angle, 1.0)
cos = np.abs(M[0, 0])
sin = np.abs(M[0, 1])
nW = int((h * sin) + (w * cos))
nH = int((h * cos) + (w * sin))
M[0, 2] += (nW / 2) - cX
M[1, 2] += (nH / 2) - cY
res = cv.warpAffine(img, M, (nW, nH))
cv.imshow("origin", img)
cv.imshow("res", res)
cv.waitKey()
rotate_bond(img, 45)
| codezzzsleep/records2.1 | robot-and-vision/test/chapter8/demo03.py | demo03.py | py | 555 | python | en | code | 0 | github-code | 13 |
31392690189 | from pathlib import Path
def advance(line, pos=0, right=3):
pos = (pos + right) % 31
is_tree = True if line[pos] == "#" else False
if is_tree:
return 1, pos
else:
return 0, pos
def go_down(lines, down=1, right=3):
count = 0
pos = 0
for num_line, line in enumerate(lines):
if num_line % down != 0 or num_line == 0:
continue
s = line
hit_tree, pos = advance(s, pos, right)
count += hit_tree
return count
class Solution:
def __init__(self):
with open(Path(__file__).parent / "input", "r") as f:
self.input = f.readlines()
def solve_part_1(self):
answer = go_down(self.input, 1, 3)
print(answer)
return answer
def solve_part_2(self):
answer = go_down(self.input, 1, 1)
answer *= go_down(self.input, 1, 3)
answer *= go_down(self.input, 1, 5)
answer *= go_down(self.input, 1, 7)
answer *= go_down(self.input, 2, 1)
print(answer)
return answer
def save_results(self):
with open(Path(__file__).parent / "part1", "w") as opened_file:
opened_file.write(str(self.solve_part_1()))
with open(Path(__file__).parent / "part2", "w") as opened_file:
opened_file.write(str(self.solve_part_2()))
if __name__ == "__main__":
solution = Solution()
solution.save_results()
| Gramet/adventofcode | 2020/day3/solution.py | solution.py | py | 1,421 | python | en | code | 0 | github-code | 13 |
8310410816 | from flask_restful import Resource, reqparse
from api.models import Booking
from api.utils import get_customer_data
from bson import ObjectId
parser = reqparse.RequestParser()
parser.add_argument('customer_info', type=dict, required=True, help='Customer information must be a dictionary')
parser.add_argument('booking_date', type=str, required=True)
parser.add_argument('booking_time', type=str, required=True)
parser.add_argument('booking_place', type=str, required=True)
parser.add_argument('service', type=str, required=True)
parser.add_argument('status', type=str, required=True)
class BookingCreate(Resource):
def post(self):
args = parser.parse_args()
customer_info = args.get('customer_info')
customer_id = customer_info.get('id')
customer_data = get_customer_data(customer_id)
if customer_data:
booking = Booking(customer=customer_data, # Pass customer_data as dictionary
booking_date=args.get('booking_date'),
booking_time=args.get('booking_time'),
booking_place=args.get('booking_place'),
service=args.get('service'),
status=args.get('status'))
return {'_id': str(booking._id)}
else:
return {'error': 'Failed to fetch customer data'}, 400
class BookingDetail(Resource):
def get(self, _id):
booking = Booking.get_by_id(_id)
if booking:
booking['_id'] = str(booking['_id']) # Convert ObjectId to string
return booking
else:
return {'error': 'Booking not found'}, 404
def put(self, _id):
booking = Booking.get_by_id(_id)
if booking:
args = parser.parse_args()
customer_info = args.get('customer_info')
customer_id = customer_info.get('id')
customer_data = get_customer_data(customer_id)
if customer_data:
booking.customer = customer_data
booking.booking_date = args.get('booking_date')
booking.booking_time = args.get('booking_time')
booking.booking_place = args.get('booking_place')
booking.service = args.get('service')
booking.status = args.get('status')
booking.save()
return {'message': 'Booking updated successfully'}
else:
return {'error': 'Failed to fetch customer data'}, 400
else:
return {'error': 'Booking not found'}, 404
def delete(self, _id):
booking = Booking.get_by_id(_id)
if booking:
booking_instance = Booking(_id=booking['_id'],
customer=booking['customer'],
booking_date=booking['booking_date'],
booking_time=booking['booking_time'],
booking_place=booking['booking_place'],
service=booking['service'],
status=booking['status']
) # Create an instance of Booking with booking data
booking_instance.delete() # Call delete() method on the instance
return {'message': 'Booking deleted successfully'}
else:
return {'error': 'Booking not found'}, 404
class BookingList(Resource):
def get(self):
all_booking = Booking.get_all()
results = []
for booking in all_booking:
booking['_id'] = str(booking['_id'])
results.append(booking)
return results
| rohteemie/transafe-booking-service | api/controllers.py | controllers.py | py | 3,752 | python | en | code | 1 | github-code | 13 |
33774429726 | import argparse
import requests
from lxml import etree as ET
class Preprocess():
def __init__(self):
self.severities = {}
def Parser(self):
parser_arg = argparse.ArgumentParser()
parser_arg.add_argument('-A', '--action', help='Action for SM Create/Update default Create', default='Create')
parser_arg.add_argument('-u', '--url', help='Address API SM', default='10.121.204.68')
parser_arg.add_argument('-p', '--port', help='Port API SM', default='13085')
parser_arg.add_argument('-s', '--secure', help='https or http default http', default='http')
parser_arg.add_argument('-U', '--user', help='USER for API SM', default='int_zabbix')
parser_arg.add_argument('-P', '--passw', help='PASSWORD for API SM', default='123456')
parser_arg.add_argument('-num', '--number', default='')
parser_arg.add_argument('-des', '--description', default='Default TAG for Description')
parser_arg.add_argument('-ser', '--service', default='CI01810729')
parser_arg.add_argument('-tem', '--TemplateID', default='10000795')
parser_arg.add_argument('-pri', '--priority', default='5. Низкий')
parser_arg.add_argument('-ass', '--assignmentGroup', default='СБТ ДК ОСА Группа мониторинга (Прутских С.С.) (00010285)')
parser_arg.add_argument('-ini', '--initiator', default='zabbix (00642903)')
parser_arg.add_argument('-cal', '--callbackContact', default='zabbix (00642903)')
parser_arg.add_argument('-asi', '--assignee', default='zabbix (00642903)')
parser_arg.add_argument('-dev', '--device', default='CI00894355')
parser_arg.add_argument('-res', '--resolution', default='')
parser_arg.add_argument('-res_cod', '--resolutionCode', default='Решено полностью')
parser_arg.add_argument('-dea', '--deadlineBreachCause', default='')
parser_arg.add_argument('-typ', '--type', default='Тестовый')
parser_arg.add_argument('-dom', '--domen', default='ALPHA')
parser_arg.add_argument('-act', '--activ', default='Создание учетной записи и оповещений (Zabbix)')
parser_arg.add_argument('-sev', '--severity', default='Не знаю')
parser_arg.add_argument('-tag', '--tag', default='')
return parser_arg
def XMLrequest(self, action, number, description, service, TemplateID, priority, assignmentGroup, initiator, callbackContact, assignee, device, resolution, resolutionCode, deadlineBreachCause, type, domen, activ, severity, tag):
soapenv = 'http://schemas.xmlsoap.org/soap/envelope/'
pws = 'http://servicecenter.peregrine.com/PWS'
com = 'http://servicecenter.peregrine.com/PWS/Common'
Env = ET.Element('{%s}Envelope' % (soapenv), nsmap={"soapenv":"http://schemas.xmlsoap.org/soap/envelope/", "pws":"http://servicecenter.peregrine.com/PWS", "com":"http://servicecenter.peregrine.com/PWS/Common"})
Hea = ET.SubElement(Env, '{%s}Header' % (soapenv))
Bod = ET.SubElement(Env, '{%s}Body' % (soapenv))
actio = "{%s}" + action + "SBAPI_SBRequestRequest"
Act = ET.SubElement(Bod, actio % (pws))
mod = ET.SubElement(Act, '{%s}model' % (pws))
key = ET.SubElement(mod, '{%s}keys' % (pws))
num = ET.SubElement(key, '{%s}number' % (pws))
num.text = u''+ number +''
ins = ET.SubElement(mod, '{%s}instance' % (pws))
des = ET.SubElement(ins, '{%s}description' % (pws))
des.text = u''+ description +''
ser = ET.SubElement(ins, '{%s}service' % (pws))
ser.text = u''+ service +''
tpl = ET.SubElement(ins, '{%s}tplID' % (pws))
tpl.text = u''+ TemplateID +''
pri = ET.SubElement(ins, '{%s}priority' % (pws))
pri.text = u''+ priority +''
asi = ET.SubElement(ins, '{%s}assignmentGroup' % (pws))
asi.text = u''+ assignmentGroup +''
opt = ET.SubElement(ins, '{%s}options' % (pws))
opt.text = ET.CDATA(u'<form><select id="q1" label="Класс среды объектов мониторинга" mandatory="true" sbmodify="true" style="combo" visible="true">' + type + '</select><select id="q2" label="Домен" mandatory="true" sbmodify="true" style="combo" visible="true">' + domen + '</select><select id="q3" label="Тип запроса" mandatory="true" sbmodify="true" style="combo" visible="true">' + activ + '</select><select id="q4" label="Уровень критичности ИТ-услуги" mandatory="true" sbmodify="true" style="combo" visible="true">' + severity + '</select><text id="q5" label="Тэг" mandatory="false" sbmodify="true" sbtype="string" visible="true">' + tag + '</text></form>')
res_cod = ET.SubElement(ins, '{%s}resolutionCode' % (pws))
res_cod.text = u''+ resolutionCode +''
res = ET.SubElement(ins, '{%s}resolution' % (pws))
res.text = u''+ resolution +''
ini = ET.SubElement(ins, '{%s}initiator' % (pws))
ini.text = u''+ initiator +''
cal = ET.SubElement(ins, '{%s}callbackContact' % (pws))
cal.text = u''+ callbackContact +''
asi = ET.SubElement(ins, '{%s}assignee' % (pws))
asi.text = u''+ assignee +''
dev = ET.SubElement(ins, '{%s}device' % (pws))
dev.text = u''+ device +''
dea = ET.SubElement(ins, '{%s}deadlineBreachCause' % (pws))
dea.text = u''+ deadlineBreachCause +''
return Env
def Send(self, session, secure, url, port, user, passw, action, XML, cooki):
link = secure + '://' + url + ':' + port + '/sc62server/ws'
if cooki == '':
head = {'Connection': 'Keep-Alive', 'SOAPAction': action, 'Accept-Encoding': 'text/xml;charset=UTF-8', 'Content-Type': 'text/xml;charset=UTF-8'}
else:
head = {'Connection': 'Keep-Alive', 'SOAPAction': action, 'Accept-Encoding': 'text/xml;charset=UTF-8', 'Content-Type': 'text/xml;charset=UTF-8', 'Cookie': 'JSESSIONID='+ cooki}
API_SM = session.post(url=link, auth=(user, passw), headers=head, data=XML.encode('utf-8'))
return API_SM
def create_zno(msg,env_type):
with requests.Session() as session:
try:
parser = ET.XMLParser(remove_blank_text=True)
EndZNO = ['StartWork', 'Complete']
cooki = '56ECA4D7C381FF33730B3EE7B781B9AE'
answer = ''
prepro = Preprocess()
parser_arg = prepro.Parser()
args = parser_arg.parse_args([])
args.type = env_type
#Create
XML = prepro.XMLrequest(args.action, args.number, msg, args.service, args.TemplateID, args.priority, args.assignmentGroup, args.initiator, args.callbackContact, args.assignee, args.device, args.resolution, args.resolutionCode, args.deadlineBreachCause, args.type, args.domen, args.activ, args.severity, args.tag)
XML = ET.tostring(XML, encoding='utf-8').decode('utf-8')
answer = prepro.Send(session, args.secure, args.url, args.port, args.user, args.passw, args.action, XML, cooki)
code = answer.status_code
#cooki = answer.cookies['JSESSIONID']
answer = ET.XML(answer.text, parser)
for event in answer:
args.number = event[0][0][0][0].text
for modify in EndZNO:
args.action = modify
if modify == 'Complete':
args.resolution = 'Выполнено'
XML = prepro.XMLrequest(args.action, args.number, msg, args.service, args.TemplateID, args.priority, args.assignmentGroup, args.initiator, args.callbackContact, args.assignee, args.device, args.resolution, args.resolutionCode, args.deadlineBreachCause, args.type, args.domen, args.activ, args.severity, args.tag)
XML = ET.tostring(XML, encoding='utf-8').decode('utf-8')
answer = prepro.Send(session, args.secure, args.url, args.port, args.user, args.passw, args.action, XML, cooki)
answer = ET.XML(answer.text, parser)
except BaseException as e:
return False, code
else:
return True, args.number
if __name__ == '__main__':
create_zno('test_zno') | userpy/selfportal-back | scripts/create_zno.py | create_zno.py | py | 8,283 | python | en | code | 0 | github-code | 13 |
34003351327 | #WHILE LOOP
#WHILE(CONDITION):
#BODY OF THE LOOP
'''
i=1
s=int(input("enter the limit"))
sum=0
while i<=s:
if i%2==0:
sum=sum+i
i=i+1
print(sum)
'''
#print didits
# n=int(input("enter the number"))
# num=n
# sum=0
# while n>0:
# d=n%10
# print(d)
# n=n//10
# sum=sum+d**3
# print("sumof digits:",sum)
#amstrong number
#are the num,ber that have sum of the cubes of the digits
'''
n=int(input("enter the number"))
num=n
sum=0
while n>0:
d=n%10
n=n//10
sum=sum+d**3
if num==sum:
print("amstrong")
else:
print("not amstrong")
'''
#AMTRONG NUMBER OF MULTIPLE DIGIT
n=int(input("enter a number to check"))
num1=num=n
sum=0
c=0
while n>0:
n=n//10
c=c+1
while num>0:
d=num%10
num=num//10
sum=sum+d**c
if sum==num1:
print("amstrong")
else:
print("not amstrong")
| mhdsulaimzed/Pycharm-Practice | pythonProject/luminartech/day 1-25/whileloop.py | whileloop.py | py | 846 | python | en | code | 0 | github-code | 13 |
25102975123 | '''
Common utilities
'''
import os
import pwd
import socket
import threading as th
import traceback
import http.client
import logging
import errno
import itertools
import re
import select
import shlex
import psutil
import subprocess
import argparse
from paramiko import SSHClient, AutoAddPolicy
logger = logging.getLogger(__name__)
class Drain(th.Thread):
"""
Drain a generator to a destination that answers to put(), in a thread
"""
def __init__(self, source, destination):
super(Drain, self).__init__()
self.source = source
self.destination = destination
self._interrupted = th.Event()
def run(self):
for item in self.source:
self.destination.put(item)
if self._interrupted.is_set():
break
def close(self):
self._interrupted.set()
class SecuredShell(object):
def __init__(self, host, port, username, timeout):
self.host = host
self.port = port
self.username = username
self.timeout = timeout
def connect(self):
logger.debug(
"Opening SSH connection to {host}:{port}".format(
host=self.host, port=self.port))
client = SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(AutoAddPolicy())
try:
client.connect(
self.host,
port=self.port,
username=self.username,
timeout=self.timeout, )
except ValueError as e:
logger.error(e)
logger.warning(
"""
Patching Crypto.Cipher.AES.new and making another attempt.
See here for the details:
http://uucode.com/blog/2015/02/20/workaround-for-ctr-mode-needs-counter-parameter-not-iv/
""")
client.close()
import Crypto.Cipher.AES
orig_new = Crypto.Cipher.AES.new
def fixed_AES_new(key, *ls):
if Crypto.Cipher.AES.MODE_CTR == ls[0]:
ls = list(ls)
ls[1] = ''
return orig_new(key, *ls)
Crypto.Cipher.AES.new = fixed_AES_new
client.connect(
self.host,
port=self.port,
username=self.username,
timeout=self.timeout, )
return client
def execute(self, cmd):
logger.info("Execute on %s: %s", self.host, cmd)
with self.connect() as client:
_, stdout, stderr = client.exec_command(cmd)
output = stdout.read()
errors = stderr.read()
err_code = stdout.channel.recv_exit_status()
return output, errors, err_code
def rm(self, path):
return self.execute("rm -f %s" % path)
def rm_r(self, path):
return self.execute("rm -rf %s" % path)
def mkdir(self, path):
return self.execute("mkdir -p %s" % path)
def send_file(self, local_path, remote_path):
logger.info(
"Sending [{local}] to {host}:[{remote}]".format(
local=local_path, host=self.host, remote=remote_path))
with self.connect() as client, client.open_sftp() as sftp:
result = sftp.put(local_path, remote_path)
return result
def get_file(self, remote_path, local_path):
logger.info(
"Receiving from {host}:[{remote}] to [{local}]".format(
local=local_path, host=self.host, remote=remote_path))
with self.connect() as client, client.open_sftp() as sftp:
result = sftp.get(remote_path, local_path)
return result
def async_session(self, cmd):
return AsyncSession(self, cmd)
def check_ssh_connection():
logging.basicConfig(
level=logging.DEBUG, format='%(asctime)s %(levelname)s %(message)s')
logging.getLogger("paramiko.transport").setLevel(logging.DEBUG)
parser = argparse.ArgumentParser(
description='Test SSH connection for monitoring.')
parser.add_argument(
'-e', '--endpoint', default='example.org', help='which host to try')
parser.add_argument(
'-u', '--username', default=pwd.getpwuid(os.getuid())[0], help='SSH username')
parser.add_argument('-p', '--port', default=22, type=int, help='SSH port')
args = parser.parse_args()
logging.info(
"Checking SSH to %s@%s:%d", args.username, args.endpoint, args.port)
ssh = SecuredShell(args.endpoint, args.port, args.username, 10)
print(ssh.execute("ls -l"))
class AsyncSession(object):
def __init__(self, ssh, cmd):
self.client = ssh.connect()
self.session = self.client.get_transport().open_session()
self.session.get_pty()
self.session.exec_command(cmd)
def send(self, data):
self.session.send(data)
def close(self):
self.session.close()
self.client.close()
def finished(self):
return self.session.exit_status_ready()
def read_maybe(self):
if self.session.recv_ready():
return self.session.recv(4096)
else:
return None
# HTTP codes
HTTP = http.client.responses
# Extended list of HTTP status codes(WEBdav etc.)
# HTTP://en.wikipedia.org/wiki/List_of_HTTP_status_codes
WEBDAV = {
102: 'Processing',
103: 'Checkpoint',
122: 'Request-URI too long',
207: 'Multi-Status',
226: 'IM Used',
308: 'Resume Incomplete',
418: 'I\'m a teapot',
422: 'Unprocessable Entity',
423: 'Locked',
424: 'Failed Dependency',
425: 'Unordered Collection',
426: 'Upgrade Required',
444: 'No Response',
449: 'Retry With',
450: 'Blocked by Windows Parental Controls',
499: 'Client Closed Request',
506: 'Variant Also Negotiates',
507: 'Insufficient Storage',
509: 'Bandwidth Limit Exceeded',
510: 'Not Extended',
598: 'network read timeout error',
599: 'network connect timeout error',
999: 'Common Failure',
}
HTTP.update(WEBDAV)
# NET codes
NET = {
0: "Success",
1: "Operation not permitted",
2: "No such file or directory",
3: "No such process",
4: "Interrupted system call",
5: "Input/output error",
6: "No such device or address",
7: "Argument list too long",
8: "Exec format error",
9: "Bad file descriptor",
10: "No child processes",
11: "Resource temporarily unavailable",
12: "Cannot allocate memory",
13: "Permission denied",
14: "Bad address",
15: "Block device required",
16: "Device or resource busy",
17: "File exists",
18: "Invalid cross-device link",
19: "No such device",
20: "Not a directory",
21: "Is a directory",
22: "Invalid argument",
23: "Too many open files in system",
24: "Too many open files",
25: "Inappropriate ioctl for device",
26: "Text file busy",
27: "File too large",
28: "No space left on device",
29: "Illegal seek",
30: "Read-only file system",
31: "Too many links",
32: "Broken pipe",
33: "Numerical argument out of domain",
34: "Numerical result out of range",
35: "Resource deadlock avoided",
36: "File name too long",
37: "No locks available",
38: "Function not implemented",
39: "Directory not empty",
40: "Too many levels of symbolic links",
41: "Unknown error 41",
42: "No message of desired type",
43: "Identifier removed",
44: "Channel number out of range",
45: "Level 2 not synchronized",
46: "Level 3 halted",
47: "Level 3 reset",
48: "Link number out of range",
49: "Protocol driver not attached",
50: "No CSI structure available",
51: "Level 2 halted",
52: "Invalid exchange",
53: "Invalid request descriptor",
54: "Exchange full",
55: "No anode",
56: "Invalid request code",
57: "Invalid slot",
58: "Unknown error 58",
59: "Bad font file format",
60: "Device not a stream",
61: "No data available",
62: "Timer expired",
63: "Out of streams resources",
64: "Machine is not on the network",
65: "Package not installed",
66: "Object is remote",
67: "Link has been severed",
68: "Advertise error",
69: "Srmount error",
70: "Communication error on send",
71: "Protocol error",
72: "Multihop attempted",
73: "RFS specific error",
74: "Bad message",
75: "Value too large for defined data type",
76: "Name not unique on network",
77: "File descriptor in bad state",
78: "Remote address changed",
79: "Can not access a needed shared library",
80: "Accessing a corrupted shared library",
81: ".lib section in a.out corrupted",
82: "Attempting to link in too many shared libraries",
83: "Cannot exec a shared library directly",
84: "Invalid or incomplete multibyte or wide character",
85: "Interrupted system call should be restarted",
86: "Streams pipe error",
87: "Too many users",
88: "Socket operation on non-socket",
89: "Destination address required",
90: "Message too long",
91: "Protocol wrong type for socket",
92: "Protocol not available",
93: "Protocol not supported",
94: "Socket type not supported",
95: "Operation not supported",
96: "Protocol family not supported",
97: "Address family not supported by protocol",
98: "Address already in use",
99: "Cannot assign requested address",
100: "Network is down",
101: "Network is unreachable",
102: "Network dropped connection on reset",
103: "Software caused connection abort",
104: "Connection reset by peer",
105: "No buffer space available",
106: "Transport endpoint is already connected",
107: "Transport endpoint is not connected",
108: "Cannot send after transport endpoint shutdown",
109: "Too many references: cannot splice",
110: "Connection timed out",
111: "Connection refused",
112: "Host is down",
113: "No route to host",
114: "Operation already in progress",
115: "Operation now in progress",
116: "Stale NFS file handle",
117: "Structure needs cleaning",
118: "Not a XENIX named type file",
119: "No XENIX semaphores available",
120: "Is a named type file",
121: "Remote I/O error",
122: "Disk quota exceeded",
123: "No medium found",
124: "Wrong medium type",
125: "Operation canceled",
126: "Required key not available",
127: "Key has expired",
128: "Key has been revoked",
129: "Key was rejected by service",
130: "Owner died",
131: "State not recoverable",
999: 'Common Failure',
}
def log_stdout_stderr(log, stdout, stderr, comment=""):
"""
This function polls stdout and stderr streams and writes their contents
to log
"""
readable = select.select([stdout], [], [], 0)[0]
if stderr:
exceptional = select.select([stderr], [], [], 0)[0]
else:
exceptional = []
log.debug("Selected: %s, %s", readable, exceptional)
for handle in readable:
line = handle.read()
readable.remove(handle)
if line:
log.debug("%s stdout: %s", comment, line.strip())
for handle in exceptional:
line = handle.read()
exceptional.remove(handle)
if line:
log.warn("%s stderr: %s", comment, line.strip())
def expand_to_milliseconds(str_time):
"""
converts 1d2s into milliseconds
"""
return expand_time(str_time, 'ms', 1000)
def expand_to_seconds(str_time):
"""
converts 1d2s into seconds
"""
return expand_time(str_time, 's', 1)
def expand_time(str_time, default_unit='s', multiplier=1):
"""
helper for above functions
"""
parser = re.compile('(\d+)([a-zA-Z]*)')
parts = parser.findall(str_time)
result = 0.0
for value, unit in parts:
value = int(value)
unit = unit.lower()
if unit == '':
unit = default_unit
if unit == 'ms':
result += value * 0.001
continue
elif unit == 's':
result += value
continue
elif unit == 'm':
result += value * 60
continue
elif unit == 'h':
result += value * 60 * 60
continue
elif unit == 'd':
result += value * 60 * 60 * 24
continue
elif unit == 'w':
result += value * 60 * 60 * 24 * 7
continue
else:
raise ValueError(
"String contains unsupported unit %s: %s" % (unit, str_time))
return int(result * multiplier)
def pid_exists(pid):
"""Check whether pid exists in the current process table."""
if pid < 0:
return False
try:
os.kill(pid, 0)
except OSError as exc:
logging.debug("No process[%s]: %s", exc.errno, exc)
return exc.errno == errno.EPERM
else:
p = psutil.Process(pid)
return p.status != psutil.STATUS_ZOMBIE
def execute(cmd, shell=False, poll_period=1.0, catch_out=False):
"""
Wrapper for Popen
"""
log = logging.getLogger(__name__)
log.debug("Starting: %s", cmd)
stdout = ""
stderr = ""
if not shell and isinstance(cmd, basestring):
cmd = shlex.split(cmd)
if catch_out:
process = subprocess.Popen(
cmd,
shell=shell,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
close_fds=True)
else:
process = subprocess.Popen(cmd, shell=shell, close_fds=True)
stdout, stderr = process.communicate()
if stderr:
log.error("There were errors:\n%s", stderr)
if stdout:
log.debug("Process output:\n%s", stdout)
returncode = process.returncode
log.debug("Process exit code: %s", returncode)
return returncode, stdout, stderr
def splitstring(string):
"""
>>> string = 'apple orange "banana tree" green'
>>> splitstring(string)
['apple', 'orange', 'green', '"banana tree"']
"""
patt = re.compile(r'"[\w ]+"')
if patt.search(string):
quoted_item = patt.search(string).group()
newstring = patt.sub('', string)
return newstring.split() + [quoted_item]
else:
return string.split()
def pairs(lst):
"""
Iterate over pairs in the list
"""
return itertools.izip(lst[::2], lst[1::2])
def update_status(status, multi_key, value):
if len(multi_key) > 1:
update_status(status.setdefault(multi_key[0], {}), multi_key[1:], value)
else:
status[multi_key[0]] = value
class AddressWizard:
def __init__(self):
self.lookup_fn = socket.getaddrinfo
self.socket_class = socket.socket
def resolve(self, address_str, do_test=False, explicit_port=False):
"""
:param address_str:
:return: tuple of boolean, string, int - isIPv6, resolved_ip, port (may be null), extracted_address
"""
if not address_str:
raise RuntimeError("Mandatory option was not specified: address")
logger.debug("Trying to resolve address string: %s", address_str)
port = None
braceport_pat = "^\[([^]]+)\]:(\d+)$"
braceonly_pat = "^\[([^]]+)\]$"
if re.match(braceport_pat, address_str):
logger.debug("Braces and port present")
match = re.match(braceport_pat, address_str)
logger.debug("Match: %s %s ", match.group(1), match.group(2))
address_str, port = match.group(1), match.group(2)
elif re.match(braceonly_pat, address_str):
logger.debug("Braces only present")
match = re.match(braceonly_pat, address_str)
logger.debug("Match: %s", match.group(1))
address_str = match.group(1)
else:
logger.debug("Parsing port")
parts = address_str.split(":")
if len(parts) <= 2: # otherwise it is v6 address
address_str = parts[0]
if len(parts) == 2:
port = int(parts[1])
if port is not None:
port = int(port)
try:
resolved = self.lookup_fn(address_str, port)
logger.debug("Lookup result: %s", resolved)
except Exception as exc:
logger.debug(
"Exception trying to resolve hostname %s : %s", address_str,
traceback.format_exc(exc))
msg = "Failed to resolve hostname: %s. Error: %s"
raise RuntimeError(msg % (address_str, exc))
for (family, socktype, proto, canonname, sockaddr) in resolved:
is_v6 = family == socket.AF_INET6
parsed_ip, port = sockaddr[0], sockaddr[1]
if explicit_port:
logger.warn(
"Using phantom.port option is deprecated. Use phantom.address=[address]:port instead"
)
port = int(explicit_port)
elif not port:
port = 80
if do_test:
try:
self.__test(family, (parsed_ip, port))
except RuntimeError as exc:
logger.warn(
"Failed TCP connection test using [%s]:%s", parsed_ip,
port)
continue
return is_v6, parsed_ip, int(port), address_str
msg = "All connection attempts failed for %s, use phantom.connection_test=0 to disable it"
raise RuntimeError(msg % address_str)
def __test(self, af, sa):
test_sock = self.socket_class(af)
try:
test_sock.settimeout(5)
test_sock.connect(sa)
except Exception as exc:
logger.debug(
"Exception on connect attempt [%s]:%s : %s", sa[0], sa[1],
traceback.format_exc(exc))
msg = "TCP Connection test failed for [%s]:%s, use phantom.connection_test=0 to disable it"
raise RuntimeError(msg % (sa[0], sa[1]))
finally:
test_sock.close()
class Chopper(object):
def __init__(self, source):
self.source = source
def __iter__(self):
for chunk in self.source:
for item in chunk:
yield item
| Alcereo/LoadTestingToolsCentos | tank/tank_src/yandextank/common/util.py | util.py | py | 18,274 | python | en | code | 0 | github-code | 13 |
40502832385 | from pwn import *
import math
from Crypto.Util.number import inverse
r = remote('134.209.237.231', 4242, level='debug')
r.sendline('challenge')
r.recvuntil('Problem:')
s = r.recvline()
s = s.split(b',')
s = [int(x) for x in s]
m = 0
for i in range(3, len(s)):
x0 = s[i-3]
x1 = s[i-2]
x2 = s[i-1]
x3 = s[i]
tmp = (x1-x0)*(x3-x2) - (x2-x1) * (x2-x1)
if i == 3:
m = tmp
else:
m = math.gcd(m, tmp)
a = (s[2] - s[1]) * inverse(s[1] - s[0], m) % m
c = (s[1] - a * s[0]) % m
nonce = (s[-1] * a + c) % m
r.sendline(str(nonce))
print("s: ", s)
print("m: ", m)
print("a: ", a)
print("c: ", c)
r.interactive()
| forward0606/CTF | CCUISC/Crypto/Crypto/SCIST_LCG/exploit.py | exploit.py | py | 649 | python | en | code | 2 | github-code | 13 |
31704323998 | import random
def makegame(range):
randomnumb = random.randint(0, range)
while True:
guess = int(input('Geef een willekeurig getal: '))
if guess == randomnumb:
print('Goed geraden!')
break
else:
print('Verkeerd')
range = int(input('Geef een bereik: '))
makegame(range) | LukaHerrmann/ProjectC_Opdr | Structured_Programming/ForOpdr1/a/randomgame.py | randomgame.py | py | 338 | python | nl | code | 0 | github-code | 13 |
72544325458 | from __future__ import absolute_import
import pprint
import logging
import importlib
from itertools import repeat
import boto3
from botocore.client import Config
from botocore.exceptions import ClientError
from .models import TaskMeta
log = logging.getLogger(__name__)
def import_class(qualname):
parts = qualname.split(".")
module_name = '.'.join(parts[0:-1])
class_name = parts[-1]
return import_action(from_name=module_name, import_name=class_name)
def import_action(from_name, import_name=None):
try:
module = importlib.import_module(from_name)
except ImportError as e:
log.exception(e)
raise
if import_name is None:
return module
try:
return getattr(module, import_name)
except AttributeError as e:
log.exception(e)
raise
def get_client():
boto_config = Config(connect_timeout=50, read_timeout=70)
swf = boto3.client('swf', config=boto_config)
return swf
def get_workflow_data(workflow_class):
domain = workflow_class.domain
tasklist = workflow_class.tasklist
workflow_type_versions = []
activities = []
# workflow_class.name is set with @workflow() from activities.utils
workflow_type_name = "{0}.{1}".format(
workflow_class.name, workflow_class.activities.name
)
# get the entrypoint versions for our workflow types
for name, method in workflow_class.__dict__.iteritems():
if not hasattr(method, "is_entrypoint"):
continue
workflow_type_versions.append(method.version)
for name, method in workflow_class.activities.__class__.__dict__.iteritems():
if not hasattr(method, "is_activity"):
continue
activities.append((method.swf_name, method.swf_version))
# namedtuple might be better here
return {
"domain": domain,
"tasklist": tasklist,
"workflows": zip(repeat(workflow_type_name), workflow_type_versions),
"activities": activities
}
def create_resources(workflow_class):
client = get_client()
data = get_workflow_data(workflow_class)
domain = data["domain"]
tasklist = data["tasklist"]
workflows = data["workflows"]
activities = data["activities"]
create_domain(client, domain)
for name, version in workflows:
create_workflow(client, domain, name, version, tasklist)
for name, version in activities:
create_activity(client, domain, name, version, tasklist)
def create_domain(client, domain, description="", retention_period=1):
log.debug("Creating SWF Domain: '%s'", domain)
try:
client.register_domain(
name=domain,
description=description,
workflowExecutionRetentionPeriodInDays=str(retention_period)
)
except ClientError as e:
code = e.response.get("Error", {}).get("Code")
log.debug("Domain already exists '%s'", code)
def create_workflow(client, domain, workflow, version, tasklist, description="", max_execution_length=(86400 * 365)):
log.debug(
"Creating SWF Workflow: '%s:%s@%s' on task list: '%s'",
workflow, version, domain, tasklist
)
try:
client.register_workflow_type(
domain=domain,
name=workflow,
version=version,
description=description,
defaultExecutionStartToCloseTimeout=str(max_execution_length),
defaultTaskStartToCloseTimeout="NONE",
defaultChildPolicy="TERMINATE",
defaultTaskList={"name": tasklist}
)
except ClientError as e:
code = e.response.get("Error", {}).get("Code")
log.debug("Workflow already exists '%s'", code)
def create_activity(client, domain, activity, version, tasklist, description=""):
log.debug(
"Creating SWF Activity: '%s:%s@%s' on task list: '%s'",
activity, version, domain, tasklist
)
try:
client.register_activity_type(
domain=domain,
name=activity,
version=version,
description=description,
defaultTaskStartToCloseTimeout="NONE",
defaultTaskList={"name": tasklist}
)
except ClientError as e:
code = e.response.get("Error", {}).get("Code")
log.debug("Activity '%s:%s' already exists '%s'", activity, version, code)
def schedule_later(client, task_token, seconds, timer_id, payload=None):
decision = {
"timerId": timer_id,
"startToFireTimeout": str(seconds)
}
if payload is not None:
decision["control"] = payload
client.respond_decision_task_completed(
taskToken=task_token,
decisions=[{
"decisionType": "StartTimer",
"startTimerDecisionAttributes": decision
}]
)
def schedule_activity(
client, task_token, name, version, activity_id,
tasklist, payload="", close_timeout="NONE", start_timeout="10",
timeout="10", heartbeat_timeout="NONE", priority=0, attempt=0):
client.respond_decision_task_completed(
taskToken=task_token,
decisions=[{
"decisionType": "ScheduleActivityTask",
"scheduleActivityTaskDecisionAttributes": {
"activityId": "{0}-{1}".format(activity_id, attempt),
"input": payload,
"taskPriority": str(priority),
"scheduleToCloseTimeout": timeout, # maximum duration for this task
"scheduleToStartTimeout": start_timeout, # maximum duration the task can wait to be assigned to a worker
"startToCloseTimeout": close_timeout, # maximum duration a worker may take to process this task
"heartbeatTimeout": heartbeat_timeout, # maximum time before which a worker processing a task of this type must report progress
"activityType": {
"name": name,
"version": version
},
"taskList": {
"name": tasklist
},
}
}]
)
def schedule_activity_later(client, task_token, payload, timer_id):
later = 5
schedule_later(
client=client,
task_token=task_token,
seconds=later,
payload=payload,
timer_id=timer_id
)
log.info("Scheduled task for later: '%ss' with payload '%s' %s'", later, payload, timer_id)
def cancel_workflow(client, task_token, reason=""):
client.respond_decision_task_completed(
taskToken=task_token,
decisions=[{
"decisionType": "CancelWorkflowExecution",
"cancelWorkflowExecutionDecisionAttributes": {
"details": reason
}
}]
)
def complete_activity(client, task_token, result=None):
client.respond_activity_task_completed(
taskToken=task_token,
result=result
)
def fail_activity(client, task_token, reason, details=""):
client.respond_activity_task_failed(
taskToken=task_token,
reason=reason,
details=details
)
def fail_workflow(client, task_token, reason, details=""):
client.respond_decision_task_completed(
taskToken=task_token,
decisions=[{
"decisionType": "FailWorkflowExecution",
"failWorkflowExecutionDecisionAttributes": {
"reason": reason,
"details": details
}
}]
)
def complete_workflow(client, task_token, result="success"):
client.respond_decision_task_completed(
taskToken=task_token,
decisions=[{
"decisionType": "CompleteWorkflowExecution",
"completeWorkflowExecutionDecisionAttributes": {
"result": result
}
}]
)
def poll_for_decision_task(client, domain, identity, tasklist, next_page_token=None):
params = {
"domain": domain,
"taskList": {"name": tasklist},
"identity": identity,
"reverseOrder": False
}
if next_page_token:
params["nextPageToken"] = next_page_token
try:
task = client.poll_for_decision_task(**params)
except ClientError as e:
log.error(e.message)
return None
log.debug("Received new decision task: \n%s", pprint.pformat(task))
if "taskToken" not in task:
log.debug("Poll timed out, no new task.")
return None
if "events" not in task:
log.info("No events found in new task")
return None
return task
def poll_for_activity_task(client, domain, identity, tasklist):
params = {
"domain": domain,
"taskList": {"name": tasklist},
"identity": identity,
}
try:
task = client.poll_for_activity_task(**params)
except ClientError as e:
print("WORKER FAILURE")
log.error(e.message)
return None
log.debug("Received new activity task: \n%s", pprint.pformat(task))
if "taskToken" not in task:
log.debug("Poll timed out, no new task.")
return None
return task
def get_task_meta(task, domain, tasklist):
task_token = task["taskToken"]
run_id = task["workflowExecution"]["runId"]
workflow_id = task["workflowExecution"]["workflowId"]
meta = TaskMeta(
task_token=task_token,
run_id=run_id,
workflow_id=workflow_id,
domain=domain,
tasklist=tasklist
)
return meta
| blitzagency/flowbee | flowbee/utils.py | utils.py | py | 9,489 | python | en | code | 0 | github-code | 13 |
29200447785 | import pprint
import random
w, h = 4, 5;
# Generating a random grid of 0s and 1s
# 0s are blocks and 1s are valid
grid = [
[1, 1, 0, 0],
[0, 1, 1, 0],
[0, 0, 1, 1],
[1, 1, 1, 0],
[1, 0, 1, 1]
]
# grid = [[random.randint(0, 1) for x in range(w)] for y in range(h)]
# pprint.pprint(grid)
| SunandanBose/practiceDS | DataStructure/src/main/java/com/raj/robo_nav/robo_nav.py | robo_nav.py | py | 311 | python | en | code | 1 | github-code | 13 |
6662462914 | from typing import NamedTuple, Optional
from datetime import date
from flask import session
from .db import query
from .tables.listings import Listing
from .consts import AMENITIES_CHOICES
def suggest_price(listing: Listing, simulate_extra_amenities=[]):
existing_amenities = listing.amenities.split(', ')
if simulate_extra_amenities:
existing_amenities.extend(simulate_extra_amenities)
# Mean (computed as weighted average) of rental price per existing amenity
common_amenities_count_sql = (
' + '.join(f'(amenities LIKE %(amenity_{idx})s)' for idx in range(len(existing_amenities)))
)
return query(
f'''
WITH
A AS (
SELECT L.id AS listing_id, L.amenities, A.rental_price
FROM Availability A
JOIN BookingSlots S ON S.id = A.slot_id
JOIN Listings L ON L.id = S.listing_id
WHERE L.id <> %(listing_id)s
),
WeightedSums AS (
SELECT listing_id, SUM(A.rental_price * ({common_amenities_count_sql})) AS value
FROM A
GROUP BY listing_id
),
Sums AS (
SELECT listing_id, SUM({common_amenities_count_sql}) AS value
FROM A
GROUP BY listing_id
)
SELECT SUM(WeightedSums.value) / SUM(Sums.value) AS expected_price
FROM WeightedSums, Sums
WHERE WeightedSums.listing_id = Sums.listing_id
''',
listing_id=listing.id,
**{
f'amenity_{idx}': f'%{amenity}%'
for (idx, amenity) in enumerate(existing_amenities)
}
).fetchall()[0].expected_price
def suggest_amenities(listing: Listing):
existing_amenities = listing.amenities.split(', ')
expected_current_revenue = suggest_price(listing)
print('Expected current revenue:', expected_current_revenue, '\n')
class AmenitySuggestion(NamedTuple):
amenity: str
expected_revenue_increase: float
return sorted(
(
suggestion
for suggestion in (
AmenitySuggestion(
amenity=amenity,
expected_revenue_increase=(suggest_price(listing, simulate_extra_amenities=[amenity]) - expected_current_revenue)
)
for amenity in AMENITIES_CHOICES
if amenity not in existing_amenities
)
if suggestion.expected_revenue_increase > 0
),
key=lambda suggestion: suggestion.expected_revenue_increase,
reverse=True
)
| ThatsJustCheesy/C43-project | mybnb/host_toolkit.py | host_toolkit.py | py | 2,726 | python | en | code | 0 | github-code | 13 |
70587607058 |
import cv2
import json
from pathlib import Path
def check(data_dir, target_dir):
anno_dir = Path(data_dir) / 'annotation'
for img_path in Path(data_dir).glob('./*.png'):
img_name = img_path.stem
anno_file = anno_dir / f'{img_name}.json'
with open(str(anno_file), 'r') as file:
anno = json.load(file)
image = cv2.imread(str(img_path))
for name, pos in anno.items():
x, y = pos
cv2.circle(image, center=(x,y), radius=5, color=(0, 255, 0), thickness=5)
target_path = Path(target_dir) / f'{img_name}.png'
cv2.imwrite(str(target_path), image)
check('/home/andrii/Desktop/diploma_data/GRpRGZHJFhU', '/home/andrii/Desktop/DEBUG') | dnmca/vfr | src/tools/check_annotation.py | check_annotation.py | py | 733 | python | en | code | 0 | github-code | 13 |
38298409767 | import einstellungen
import pygame
import random
# Bilder
gras_bild = pygame.image.load("bilder/gras.png")
game_over_bild = pygame.image.load("bilder/game_over.png")
game_over_oberfläche = pygame.transform.scale(
game_over_bild,
(einstellungen.BILDSCHIRM_BREITE // 2, einstellungen.BILDSCHIRM_HÖHE // 2),
)
gras_oberfläche = pygame.transform.scale(
gras_bild, (einstellungen.BILDSCHIRM_BREITE, einstellungen.BILDSCHIRM_HÖHE)
)
# Auf "links-rechts"-Achse vom Bildschirm soll die Schlange genau auf der Hälfte sein
schlange_x = einstellungen.BILDSCHIRM_BREITE // 2
# Auf "unten-oben"-Achse vom Bildschirm soll die Schlange auch genau auf der Hälfte sein
schlange_y = einstellungen.BILDSCHIRM_HÖHE // 2
# Schlangen-Position ist Kombi aus schlange_x und schlange_y
schlangen_kopf = [schlange_x, schlange_y]
schlangen_körper_liste = [
schlangen_kopf
] # Schlangenkopf wird in Schlangenkörperliste gespeichert
apfel_x = einstellungen.BILDSCHIRM_BREITE - 10 * einstellungen.PIXEL_PRO_TICK
apfel_y = einstellungen.BILDSCHIRM_HÖHE - 10 * einstellungen.PIXEL_PRO_TICK
apfel_pos_liste = [apfel_x, apfel_y]
# Bildschirm mit Maßen erstellen
bildschirm = pygame.display.set_mode(
(einstellungen.BILDSCHIRM_BREITE, einstellungen.BILDSCHIRM_HÖHE)
)
schlangen_oberfläche = pygame.Surface(
(einstellungen.PUNKT_DURCHMESSER, einstellungen.PUNKT_DURCHMESSER)
)
schlangen_oberfläche.fill(einstellungen.SCHLANGEN_FARBE)
apfel_oberfläche = pygame.Surface(
(einstellungen.PUNKT_DURCHMESSER, einstellungen.PUNKT_DURCHMESSER)
)
apfel_oberfläche.fill(einstellungen.APFEL_FARBE)
RICHTUNG = "RECHTS"
def wandcrash_prüfen():
global schlange_x
global schlange_y
if (
(schlange_x >= einstellungen.BILDSCHIRM_BREITE)
or (schlange_x <= 0)
or (schlange_y <= 0)
or (schlange_y >= einstellungen.BILDSCHIRM_HÖHE)
):
game_over()
return True
return False
def selbstcrash_prüfen():
global schlangen_kopf
global schlangen_körper_liste
anzahl_körperteile = len(schlangen_körper_liste)
for i in range(anzahl_körperteile):
körperteil = schlangen_körper_liste[i]
# wenn letztes Körperteil, also Kopfteil, dann überspringe
if i == anzahl_körperteile - 1:
pass
elif schlangen_kopf == körperteil:
game_over()
return True
return False
def apfel_essen_prüfen():
global schlangen_kopf
global apfel_pos_liste
if schlangen_kopf == apfel_pos_liste:
return True
return False
def game_over():
global bildschirm
bildschirm.blit(
game_over_oberfläche,
(einstellungen.BILDSCHIRM_BREITE // 4, einstellungen.BILDSCHIRM_HÖHE // 4),
)
pygame.display.flip()
pygame.time.wait(5000)
def schlange_bewegen():
global schlange_x
global schlange_y
global schlangen_kopf
global schlangen_körper_liste
# Schlange bewegt sich in RICHTUNG
# Wenn Richtung = OBEN ist, soll Schlange sich nach oben bewegen
if RICHTUNG == "OBEN":
schlange_y = schlange_y - einstellungen.PIXEL_PRO_TICK
# Wenn Richtung = RECHTS ist, soll Schlange sich nach rechts bewegen
elif RICHTUNG == "RECHTS":
schlange_x = schlange_x + einstellungen.PIXEL_PRO_TICK
# Wenn Richtung = LINKS ist, soll Schlange sich nach links bewegen
elif RICHTUNG == "LINKS":
schlange_x = schlange_x - einstellungen.PIXEL_PRO_TICK
# Wenn Richtung = UNTEN ist, soll Schlange sich nach unten bewegen
elif RICHTUNG == "UNTEN":
schlange_y = schlange_y + einstellungen.PIXEL_PRO_TICK
schlangen_kopf = [schlange_x, schlange_y]
schlangen_körper_liste.append(schlangen_kopf)
if schlangen_kopf != apfel_pos_liste:
del schlangen_körper_liste[0]
def events_prüfen():
global RICHTUNG
# Events durchschauen, ob ein Event namens "Beenden" dabei ist. Wenn ja, dann beenden!
for event in pygame.event.get():
# Ist das Event das Beenden-Event? Wenn ja, dann beende das Programm.
if event.type == pygame.QUIT:
return False
# Ansonsten, wenn Taste gedrückt wurde, dann...:
elif event.type == pygame.KEYDOWN:
# Wenn Nutzer Pfeiltaste "Oben" gedrückt hat, dann setze RICHTUNG = "OBEN"
if event.key == pygame.K_UP:
RICHTUNG = "OBEN"
# Ansonsten, wenn Nutzer Pfeiltaste "Rechts" gedrückt hat, dann setze RICHTUNG = "RECHTS"
elif event.key == pygame.K_RIGHT:
RICHTUNG = "RECHTS"
# Ansonsten, wenn Nutzer Pfeiltaste "Links" gedrückt hat, dann setze RICHTUNG = "LINKS"
elif event.key == pygame.K_LEFT:
RICHTUNG = "LINKS"
# Ansonsten, wenn Nutzer Pfeiltaste "Unten" gedrückt hat, dann setze RICHTUNG = "Unten"
elif event.key == pygame.K_DOWN:
RICHTUNG = "UNTEN"
return True
def bildschirm_rendern():
global bildschirm
global schlangen_körper_liste
global apfel_pos_liste
global schlangen_oberfläche
global apfel_oberfläche
# Bildschirm "reinigen": alle Alte löschen und nur Hintergrundfarbe reinsetzen
bildschirm.blit(gras_oberfläche, (0, 0))
# Schlange und Apfel in Bildschirm reinsetzen
# schlangen_körper_liste = [ [körperteil_x, körperteil_y], [körperteil_x, körperteil_y], [körperteil_x, körperteil_y] ]
for körperteil_pos_liste in schlangen_körper_liste:
bildschirm.blit(schlangen_oberfläche, körperteil_pos_liste)
bildschirm.blit(apfel_oberfläche, apfel_pos_liste)
# Bildschirm anzeigen
pygame.display.flip()
def apfel_teleportieren():
global apfel_pos_liste
apfel_x = (
random.randint(
0,
(einstellungen.BILDSCHIRM_BREITE // 10) - 1,
)
* einstellungen.PUNKT_DURCHMESSER
)
apfel_y = (
random.randint(
0,
(einstellungen.BILDSCHIRM_HÖHE // 10) - 1,
)
* einstellungen.PUNKT_DURCHMESSER
)
apfel_pos_liste = [apfel_x, apfel_y]
| paulutsch/snake | logik.py | logik.py | py | 6,133 | python | de | code | 0 | github-code | 13 |
31944979469 | from config import *
from functions import *
from graphic_functions import *
from colorset import *
from classes import Teilchen, Sun
import pygame
import matplotlib.pyplot as plt
test_pos = []
liste_teilchen = init_particle_list(100,width,height)
for i in liste_teilchen:
i.velocity = np.array([0,0])
i.x = np.random.randint(-100,100) *5e9
i.y = np.random.randint(-100,100) *5e9
#print i.x, " , ", i.y
#print i.velocity
#print "----"
screen = pygame.display.set_mode((width, height))
pygame.display.set_caption(TITLE)
#sonne = Sun()
massen = color_list(liste_teilchen)
hmass = massen[0]
mmass = massen[1]
#for counter in xrange(500):
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
#graphics
screen.fill(background_colour)
#draw2(sonne, screen)
for particle in liste_teilchen:
if particle.active == True:
draw(particle, screen)
pygame.display.update()
massen = color_list(liste_teilchen)
hmass = massen[0]
mmass = massen[1]
#values
force_matrix = get_force_matrix(liste_teilchen)
for a, particle in enumerate(liste_teilchen):
particle.force = total_force_per_part(force_matrix,a)# + get_force(particle, sonne)
particle.move()
particle.colour = fcolorset(particle, hmass, mmass)
if a == 0:
test_pos.append([particle.force[0], particle.force[1]])
plt.plot(test_pos)
plt.show()
| Tyyyr/manythingsfloatinspace | test.py | test.py | py | 1,476 | python | en | code | 0 | github-code | 13 |
40545267325 | """
Base module for the waifu.im API. The API documentation can be found at
https://waifu.im/docs/
"""
import typing
import requests
from anime_api import exceptions
from .types import ImageTag, SearchSort, ImageOrientation
from .objects import Image, _ImageDimensions
class WaifuImAPI:
"""
Docs: https://waifu.im/docs/
"""
endpoint = "https://api.waifu.im"
def __init__(self, endpoint: typing.Optional[str] = None):
self.endpoint = endpoint or self.endpoint
def __get_random_images(
self,
many: bool = False,
tags: typing.Optional[
typing.List[typing.Union[ImageTag.SFW, ImageTag.NSFW]]
] = None,
excluded_tags: typing.Optional[
typing.List[typing.Union[ImageTag.SFW, ImageTag.NSFW]]
] = None,
included_files: typing.Optional[typing.List[str]] = None,
excluded_files: typing.Optional[typing.List[str]] = None,
is_nsfw: typing.Optional[bool] = None,
is_gif: bool = False,
order_by: typing.Optional[SearchSort] = SearchSort.RANDOM,
orientation: typing.Optional[ImageOrientation] = None,
) -> typing.List[Image]:
params = {
"included_tags": ",".join([tag.value for tag in tags]) if tags else None,
"excluded_tags": ",".join([tag.value for tag in excluded_tags])
if excluded_tags
else None,
"is_nsfw": is_nsfw,
"gif": is_gif,
"order_by": order_by.value,
"orientation": orientation.value if orientation else None,
"included_files": ",".join(included_files) if included_files else None,
"excluded_files": ",".join(excluded_files) if excluded_files else None,
"many": many,
}
headers = {}
response = requests.get(
f"{self.endpoint}/search/", params=params, headers=headers
)
if response.status_code != 200:
raise exceptions.ServerError(
response.status_code, msg=response.json().get("detail")
)
return [
Image(
id=image["image_id"],
signature=image["signature"],
extension=image["extension"],
favorites=image["favorites"],
dominant_color=image["dominant_color"],
source=image["source"],
uploaded_at=image["uploaded_at"],
is_nsfw=image["is_nsfw"],
dimens=_ImageDimensions(
width=image["width"],
height=image["height"],
),
url=image["url"],
preview_url=image["preview_url"],
tags=[ImageTag().get_tag(tag["name"]) for tag in image["tags"]],
)
for image in response.json()["images"]
]
def get_random_image(
self,
tags: typing.Optional[
typing.List[typing.Union[ImageTag.SFW, ImageTag.NSFW]]
] = None,
excluded_tags: typing.Optional[
typing.List[typing.Union[ImageTag.SFW, ImageTag.NSFW]]
] = None,
selected_file: typing.Optional[typing.List[str]] = None,
excluded_files: typing.Optional[typing.List[str]] = None,
is_nsfw: typing.Optional[bool] = None,
is_gif: bool = False,
order_by: typing.Optional[SearchSort] = SearchSort.RANDOM,
orientation: typing.Optional[ImageOrientation] = None,
) -> Image:
"""
Get a random image.
"""
return self.__get_random_images(
many=False,
tags=tags,
excluded_tags=excluded_tags,
included_files=selected_file,
excluded_files=excluded_files,
is_nsfw=is_nsfw,
is_gif=is_gif,
order_by=order_by,
orientation=orientation,
)[0]
def get_many_random_images(
self,
tags: typing.Optional[
typing.List[typing.Union[ImageTag.SFW, ImageTag.NSFW]]
] = None,
excluded_tags: typing.Optional[
typing.List[typing.Union[ImageTag.SFW, ImageTag.NSFW]]
] = None,
included_files: typing.Optional[typing.List[str]] = None,
excluded_files: typing.Optional[typing.List[str]] = None,
is_nsfw: typing.Optional[bool] = None,
is_gif: bool = False,
order_by: typing.Optional[SearchSort] = SearchSort.RANDOM,
orientation: typing.Optional[ImageOrientation] = None,
) -> typing.List[Image]:
"""
Get many random images.
"""
return self.__get_random_images(
many=True,
tags=tags,
excluded_tags=excluded_tags,
included_files=included_files,
excluded_files=excluded_files,
is_nsfw=is_nsfw,
is_gif=is_gif,
order_by=order_by,
orientation=orientation,
)
| Nekidev/anime-api | anime_api/apis/waifu_im/__init__.py | __init__.py | py | 4,973 | python | en | code | 115 | github-code | 13 |
42150680839 | from random import shuffle
def draw():
balls = [x for x in range(1,60)]
shuffle(balls)
numbers = balls[:6]
numbers.sort()
return numbers
def checkResults(ticket, draw):
return draw == ticket
if __name__ == '__main__':
won = False
ticket = draw() # generate our ticket
# simulate one million draws
for x in range(1000000):
won = checkResults(ticket, draw())
if won:
print("You won!")
else:
print("Better luck next time!")
| tliesnham/python-lottery | lottery.py | lottery.py | py | 502 | python | en | code | 0 | github-code | 13 |
41643204275 | class Solution:
def lengthOfLongestSubstring(self, s: str) -> int:
seenMap = {}
maxLength = 0
l = 0
for r in range(len(s)):
if s[r] in seenMap:
l = max(l, seenMap[s[r]]+1)
maxLength = max(maxLength, r-l+1)
seenMap[s[r]] = r
return maxLength
| ibatulanandjp/Leetcode | #3_LongestSubstringWithoutRepeatingCharacters/solution1.py | solution1.py | py | 354 | python | en | code | 1 | github-code | 13 |
38466919522 | # 7. В одномерном массиве целых чисел определить два
# наименьших элемента. Они могут быть как равны между
# собой (оба являться минимальными), так и различаться.
import random
SIZE = 10 # >1
lst = [random.randint(0, 10) for _ in range(SIZE)]
print(lst)
# решение 1: за линию
def swap_if(a, b):
if a > b:
return b, a
else:
return a, b
min1, min2 = [max(lst)] * 2
for elem in lst:
min2, elem = swap_if(min2, elem)
min1, min2 = swap_if(min1, min2)
print(min1, min2)
# решение 2: O(N*logN), но короче:
lst.sort()
print(*lst[:2])
| 1frag/alg_and_data | geekbrains/lesson3/7.py | 7.py | py | 748 | python | ru | code | 0 | github-code | 13 |
75041571856 | """Total Ways to Sum - 18"""
from math import ceil
combinations = []
def split(num, other_parts=[]):
for a in range(1, ceil(num/2.0)):
current_combination = [a, num-a]+other_parts
current_combination.sort()
if current_combination not in combinations:
combinations.append(current_combination)
if a != 1:
split(a, [num-a]+other_parts)
if num-a!=1:
split(num-a, [a]+other_parts)
print(combinations)
def main():
split(18)
print(combinations)
print(len(combinations))
if __name__ == '__main__':
main()
| Cynthia7979/bitburner | contract-224193.cct.py | contract-224193.cct.py | py | 601 | python | en | code | 0 | github-code | 13 |
15316892501 | import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# from sklearn.datasets.samples_generator import make_blobs
# Load csv & visualize it
cust_df = pd.read_csv("Cust_Segmentation.csv")
print(cust_df.head())
# Preprocess it
# Address in this dataset is a categorical variable. k-means algorithm isn't
# directly applicable to categorical variables because Euclidean distance
# function isn't really meaningful for discrete variables.
# Cut it off & check data one more time
df = cust_df.drop('Address', axis=1)
print(df.head())
# Normalize Data
# It is a statistical method that helps mathematical-based algorithms
# to interpret features with different magnitudes & distributions equally
X = df.values[:, 1:]
X = np.nan_to_num(X)
Clus_dataSet = StandardScaler().fit_transform(X)
print(Clus_dataSet)
# Lets apply k-means on our dataset & take look at cluster labels.
clusterNum = 3
k_means = KMeans(init="k-means++", n_clusters=clusterNum, n_init=12)
k_means.fit(X)
labels = k_means.labels_
print(labels)
# Assign the labels to each row in data-frame.
df["Clus_km"] = labels
print(df.head(5))
# easily check the centroid values by averaging the features in each cluster.
print(df.groupby('Clus_km').mean())
# Look at the distribution of customers based on their age and income
area = np.pi * (X[:, 1])**2
plt.scatter(X[:, 0], X[:, 3], s=area, c=labels.astype(np.float), alpha=0.5)
plt.xlabel('Age', fontsize=16)
plt.ylabel('Income', fontsize=16)
plt.show()
# Visualize it on 3D plot
fig = plt.figure(1, figsize=(8, 6))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
ax.set_xlabel('Education')
ax.set_ylabel('Age')
ax.set_zlabel('Income')
ax.scatter(X[:, 1], X[:, 0], X[:, 3], c=labels.astype(np.float))
plt.show()
| elsheikh21/clustering-techniques | k-means-clustering.py | k-means-clustering.py | py | 1,896 | python | en | code | 0 | github-code | 13 |
3345649973 | '''
https://github.com/DFIR-ORC/dfir-orc/tree/main/src/OrcLib
'''
import ndk, ptypes
from ptypes import *
from ndk.datatypes import *
class _REPARSE_DATA_BUFFER(pstruct.type):
def __PathBuffer(self):
length = self['ReparseDataLength'].li.int()
return dyn.clone(pstr.wstring, length=length)
_fields_ = [
(ULONG, 'ReparseTag'),
(USHORT,'ReparseDataLength'),
(USHORT,'Reserved'),
(USHORT,'SubstituteNameOffset'),
(USHORT,'SubstituteNameLength'),
(USHORT,'PrintNameOffset'),
(USHORT,'PrintNameLength'),
(ULONG,'Flags'),
(__PathBuffer, 'PathBuffer'),
]
# copied from http://www.writeblocked.org/resources/NTFS_CHEAT_SHEETS.pdf
# redefined based on https://raw.githubusercontent.com/REhints/Publications/master/Conferences/RECON'2013/RECON_2013.pdf
class _BIOS_PARAMETER_BLOCK(pstruct.type):
'''BPB @ EBPB'''
_fields_ = [
(WORD, 'BytesPerSector'),
(BYTE, 'SecPerCluster'),
(WORD, 'ReservedSectors'),
(dyn.array(BYTE, 5), 'Reserved'), # 0x000000xxxx
(BYTE, 'MediaDescriptorID'),
(WORD, 'Reserved2'), # 0x0000
(WORD, 'SectorsPerTrack'),
(WORD, 'NumberOfHeads'),
(DWORD, 'HiddenSectors'),
(dyn.array(DWORD, 2), 'Reserved3'),
# Extended BIOS Parameter Block
(LONGLONG, 'TotalSectors'),
(LONGLONG, 'StartingCluster'), # $MFT
(LONGLONG, 'MFTMirrStartingCluster'), # $MFTMirror
(DWORD, 'ClustersPerMFTRecord'),
(DWORD, 'ClustersPerIndexBuffer'),
(LONGLONG, 'VolumeSerialNumber'),
(DWORD, 'Reserved4'), # checksum
]
class Boot(pstruct.type):
_fields_ = [
(dyn.block(3), 'branch'),
(ULONGLONG, 'oem'),
(_BIOS_PARAMETER_BLOCK, 'bios parameter block'),
(dyn.block(426), 'code'),
(USHORT, 'sentinel'), # 0xaa55
]
class Descriptor(pstruct.type):
'''File Record Segment Header'''
_fields_ = [
(ULONG, 'Signature'), # "FILE"
(USHORT, 'Update Sequence Array Offset'),
(USHORT, 'Update Sequence Array Size'),
(ULONG, '$LogFile Sequence Number'),
(USHORT, 'Sequence Number'),
(USHORT, 'Hard Link Count'),
(USHORT, '1st Attribute Offset'),
(USHORT, 'Flags'),
(ULONG, 'Used size'),
(ULONG, 'Allocated size'),
(ULONGLONG, 'File reference'),
(USHORT, 'Next Attribute Id'),
#(USHORT, 'Update Sequence Number'),
#(dyn.array(ptype.undefined, 0), 'Update Sequence Array'),
#(dyn.block(2), 'unused'),
#(ULONG, 'MFT record number'),
#(dyn.clone(pint.uinteger_t, length=6), 'Default Location of Update Sequence Array'),
#(dyn.clone(pint.uinteger_t, length=10), 'Reserved for Update Sequence Array'),
#(ULONGLONG, 'Reserved for Sequence Array'),
#(ULONGLONG, 'Common Location of 1st Attribute'),
]
### MFT Attributes
class MFT_Attribute(ptype.definition):
cache, attribute = {}, 'id'
class MFT_Attribute_Id(pint.enum, ULONG):
_values_ = [
('$STANDARD_INFORMATION', 0x10),
('$ATTRIBUTE_LIST', 0x20),
('$FILE_NAME', 0x30),
('$OBJECT_ID', 0x40),
('$SECURITY_DESCRIPTOR', 0x50),
('$VOLUME_NAME', 0x60),
('$VOLUME_INFORMATION', 0x70),
('$DATA', 0x80),
('$INDEX_ROOT', 0x90),
('$INDEX_ALLOCATION', 0xa0),
('$BITMAP', 0xb0),
('$REPARSE_POINT', 0xc0),
('$EA_INFORMATION', 0xd0),
('$PROPERTY_SET', 0xe0),
('$LOGGED_UTILITY_STREAM', 0x100),
]
@MFT_Attribute.define
class Attribute_List(ptype.undefined):
id = 0x20
@MFT_Attribute.define
class Attribute_ObjectID(ptype.undefined):
id = 0x40
@MFT_Attribute.define
class Attribute_SecurityDescriptor(ptype.block):
'''$SECURITY_DESCRIPTOR'''
id = 0x50
@MFT_Attribute.define
class Attribute_VolumeName(ptype.undefined):
id = 0x60
@MFT_Attribute.define
class Attribute_VolumeInformation(ptype.undefined):
id = 0x70
@MFT_Attribute.define
class Attribute_Data(ptype.block):
'''$DATA'''
id = 0x80
@MFT_Attribute.define
class Attribute_IndexRoot(pstruct.type):
'''$INDEX_ROOT'''
id = 0x90
_fields_ = [
(ULONG, 'Type'),
(ULONG, 'Collation Rule'),
(ULONG, 'Allocation Index Entry Size'),
(BYTE, 'Clusters per Index Record'),
(dyn.align(8), 'Padding'), # FIXME: padding, not alignment
]
@MFT_Attribute.define
class Attribute_IndexAllocation(ptype.undefined):
id = 0xa0
@MFT_Attribute.define
class Attribute_Bitmap(ptype.undefined):
'''$BITMAP'''
id = 0xb0
@MFT_Attribute.define
class Attribute_ReparsePoint(ptype.undefined):
id = 0xc0
@MFT_Attribute.define
class Attribute_LoggedToolStream(ptype.undefined):
id = 0x100
class Attribute(pstruct.type):
class Header(pstruct.type):
'''Non-Resident and Resident Attribute Header'''
class _Form_Code(pint.enum, BYTE):
_values_ = [('Resident', 0x00), ('Non-Resident', 0x01)]
class _Flags(pbinary.flags):
_fields_ = [
(1, 'Sparse'),
(1, 'Encrypted'),
(6, 'Unused'),
(8, 'Compressed'),
]
_fields_ = [
(ULONG, 'Attribute Length'),
(BYTE, 'Form Code'),
(BYTE, 'Name Length'),
(BYTE, 'Name Offset'),
(_Flags, 'Flags'),
(USHORT, 'Attribute Id'),
]
def __Header(self):
res = self['Header'].li
return NonResident_Header if res['Form Code'].int() else Resident_Header
def __Attribute(self):
res = self['Header'].li
if res['Form Code'].int():
return DataRun
# Resident attribute
res, h = self['Id'].li, self['Residency'].li
t = MFT_Attribute.lookup(res.int())
if issubclass(t, ptype.block):
return dyn.clone(t, length=h.Length())
return t
def __Space(self):
res = self['Residency'].li
cb = sum(self[fld].li.size() for fld in ['Id', 'Header', 'Residency'])
return dyn.block(res.Offset() - cb)
def __Extra(self):
res = self['Residency'].li
attribute = self['Attribute'].li
cb = res.Length() - attribute.size()
return dyn.block(max(0, cb))
_fields_ = [
(MFT_Attribute_Id, 'Id'),
(Header, 'Header'),
(__Header, 'Residency'),
(__Space, 'Space'),
(__Attribute, 'Attribute'),
(__Extra, 'Extra'),
]
### Residency headers
class Resident_Header(pstruct.type):
'''Resident Attribute Header'''
_fields_ = [
(ULONG, 'Length'),
(USHORT, 'Offet'),
(BYTE, 'Indexed Flag'),
(BYTE, 'Padding'),
]
def Offset(self):
return self['Offset'].int()
def Length(self):
return self['Length'].int()
class DataRun(pstruct.type):
class InfoSize(pbinary.struct):
_fields_ = [
(4, 'Cluster Length'),
(4, 'Offset Length'),
]
def __SizedInteger(field):
def SizedInteger(self, name=field):
res = self['info'].li
return dyn.clone(pint.uinteger_t, length=res[name])
return SizedInteger
_fields_ = [
(InfoSize, 'info'),
(__SizedInteger('Cluster Length'), 'Size'),
(__SizedInteger('Offset Length'), 'Offset'),
(dyn.align(8), 'Padding'), # FIXME: This shouldn't be alignment, but rather padding to make the size a multiple of 8
]
class NonResident_Header(pstruct.type):
'''Non-resident Attribute Header'''
_fields_ = [
(ULONGLONG, 'Start virtual cluster number'),
(ULONGLONG, 'End virtual cluster number'),
(BYTE, 'Runlist Offset'),
(BYTE, 'Compression Unit Size'),
(dyn.align(8), 'Padding'), # FIXME: This should be padding, not alignment (but I'm lazy)
(ULONGLONG, 'Size of attribute content'),
(ULONGLONG, 'Size on disk of attribute content'),
(ULONGLONG, 'Initialized size of attribute content'),
]
def Offset(self):
res = self['Runlist Offset'].li
return res.int()
def Length(self):
# This is the minimum size of a Data Run to calculate its real size
return 1
### MFT Attribute Types
class Standard_Flags(pbinary.flags):
_fields_ = [
(17, 'Reserved'),
(1, 'Encrypted'),
(1, 'Not Indexed'),
(1, 'Offline'),
(1, 'Compressed'),
(1, 'Reparse Point'),
(1, 'Sparse File'),
(1, 'Temporary'),
(1, 'Normal'),
(1, 'Device'),
(1, 'Archive'),
(2, 'Unused'),
(1, 'System'),
(1, 'Hidden'),
(1, 'Read-only'),
]
@MFT_Attribute.define
class Standard_Information(pstruct.type):
'''$STANDARD_INFORMATION'''
id = 0x10
_fields_ = [
(ULONGLONG, 'Date Created'),
(ULONGLONG, 'Date Modified'),
(ULONGLONG, 'Date MFT Modified'),
(ULONGLONG, 'Date Accessed'),
(Standard_Flags, 'Flags'),
(ULONG, 'Max Versions'),
(ULONG, 'Version Number'),
#(ULONG, 'Class Id'),
#(ULONG, 'Owner Id'),
#(ULONG, 'Security Id'),
#(ULONGLONG, 'Quota Charged'),
#(ULONGLONG, 'Update Sequence Number'),
#(dyn.block(8), 'unused'),
]
@MFT_Attribute.define
class File_Name(pstruct.type):
'''$FILE_NAME'''
id = 0x30
class _Name_Type(pint.enum, BYTE):
_values_ = [
('POSIX', 0),
('Win32', 1),
('DOS', 2),
('7DOS', 3),
]
_fields_ = [
(ULONGLONG, 'Parent Directory'),
(ULONGLONG, 'Date Created'),
(ULONGLONG, 'Date Modified'),
(ULONGLONG, 'Date MFT Modified'),
(ULONGLONG, 'Date Accessed'),
(ULONGLONG, 'Logical Size'),
(ULONGLONG, 'Physical Size'),
(Standard_Flags, 'Flags'),
(ULONG, 'Reparse Value'),
(BYTE, 'Name Length'),
(_Name_Type, 'Name Type'),
(lambda self: dyn.block(self['Name Length'].li.int()), 'Name'),
]
### FIXME: integrate this into the deviceiocontrol context manager, maybe fix the names too
class USNRecord(pstruct.type):
@pbinary.littleendian
class _reason(pbinary.flags):
_fields_ = [
(1, 'CLOSE'),
(9, 'RESERVED'),
(1, 'STREAM_CHANGE'),
(1, 'REPARSE_POINT_CHANGE'),
(1, 'OBJECT_ID_CHANGE'),
(1, 'ENCRYPTION_CHANGE'),
(1, 'COMPRESSION_CHANGE'),
(1, 'HARD_LINK_CHANGE'),
(1, 'BASIC_INFO_CHANGE'),
(1, 'INDEXABLE_CHANGE'),
(1, 'RENAME_NEW_NAME'),
(1, 'RENAME_OLD_NAME'),
(1, 'SECURITY_CHANGE'),
(1, 'EA_CHANGE'),
(1, 'FILE_DELETE'),
(1, 'FILE_CREATE'),
(1, 'unknown(7)'),
(1, 'NAMED_DATA_TRUNCATION'),
(1, 'NAMED_DATA_EXTEND'),
(1, 'NAMED_DATA_OVERWRITE'),
(1, 'unknown(3)'),
(1, 'DATA_TRUNCATION'),
(1, 'DATA_EXTEND'),
(1, 'DATA_OVERWRITE'),
]
@pbinary.littleendian
class _fileAttributes(pbinary.flags):
_fields_ = [
(14, 'RESERVED'),
(1, 'NO_SCRUB_DATA'),
(1, 'VIRTUAL'),
(1, 'INTEGRITY_STREAM'),
(1, 'ENCRYPTED'),
(1, 'NOT_CONTENT_INDEXED'),
(1, 'OFFLINE'),
(1, 'COMPRESSED'),
(1, 'REPARSE_POINT'),
(1, 'SPARSE_FILE'),
(1, 'TEMPORARY'),
(1, 'NORMAL'),
(1, 'DEVICE'),
(1, 'ARCHIVE'),
(1, 'DIRECTORY'),
(1, 'unknown'),
(1, 'SYSTEM'),
(1, 'HIDDEN'),
(1, 'READONLY'),
]
@pbinary.littleendian
class _referenceNumber(pbinary.struct):
_fields_ = [
(16, 'sequenceNumber'),
(48, 'entryNumber'),
]
def __filenameOffset(self):
length = self['filenameLength'].li
t = dyn.clone(pstr.wstring, length=length.int())
return dyn.rpointer(t, self, USHORT)
_fields_ = [
(USHORT, 'majorVersion'),
(USHORT, 'minorVersion'),
(_referenceNumber, 'fileReferenceNumber'),
(_referenceNumber, 'parentFileReferenceNumber'),
(ULONGLONG, 'usn'),
(ULONGLONG, 'timestamp'), # might be an ndk.FILETIME from epoch
(_reason, 'reason'),
(ULONG, 'sourceInfo'),
(ULONG, 'securityId'),
(_fileAttributes, 'fileAttributes'),
(USHORT, 'filenameLength'),
(__filenameOffset, 'filenameOffset'), # this is an rpointer to a utf16 pstr.swstring
]
class USNJournal(parray.block):
class Record(pstruct.type):
def __content(self):
res, fields = self['length'].li, ['length', 'record']
return dyn.block(max(0, res - sum(self[fld].li.size() for fld in fields)))
_fields_ = [
(ULONG, 'length'),
(USNRecord, 'record'),
(__content, 'content'),
]
_object_ = Record
| arizvisa/syringe | template/fs/ntfs.py | ntfs.py | py | 13,363 | python | en | code | 35 | github-code | 13 |
9548607140 |
from rest_framework.decorators import api_view
from rest_framework.response import Response
from base.models import Room
from base.api.serializer import RoomSerializer
@api_view(['GET'])
def getRoutes(request):
routes = [
'GET /api',
'GET / api/rooms',
'GET /api/room/:id'
]
return Response(routes)
@api_view(['GET'])
def getRooms(request):
rooms = Room.room.all()
roomsSerialized = RoomSerializer(rooms,many = True)
return Response(roomsSerialized.data)
@api_view(['GET'])
def getRoom(request,pk):
room = Room.room.get(id=pk)
roomSerialized = RoomSerializer(room,many=False)
return Response(roomSerialized.data) | jithinrajmm/chat-room | base/api/views.py | views.py | py | 683 | python | en | code | 0 | github-code | 13 |
28081561200 | import os
from os.path import join as opj
from scipy import spatial
import copy
import numpy as np
import cv2
import pickle
from math import *
def parse_pt(pt_file):
with open(pt_file) as f:
lines = f.readlines()
img_rects = dict()
for line in lines:
line = line.strip().split(',')
fid, tid = int(line[0]), int(line[1])
rect = map(lambda x:int(float(x)), line[2:6])
rect[2] += rect[0]
rect[3] += rect[1]
if fid not in img_rects:
img_rects[fid] = list()
rect.insert(0, tid)
img_rects[fid].append(rect)
return img_rects
if __name__ == '__main__':
data_dir = '../data/Filter_MOT/'
roi_dir = '../data/roi/'
scene_name = ['S02', 'S05']
scene_cluster = [[6,7,8,9], [10,16,17,18,19,20,21,22,23,24,25,26,27,28,29,33,34,35,36]]
map_tid = pickle.load(open('test_cluster.data', 'rb'))['cluster']
f_w = open('track1.txt', 'wb')
txt_paths = os.listdir(data_dir)
txt_paths = sorted(txt_paths, key=lambda x: int(x.split('.')[0][-3:]))
for txt_path in txt_paths:
cid = int(txt_path.split('.')[0][-3:])
roi = cv2.imread(opj(roi_dir, '{}.jpg'.format(txt_path.split('.')[0])), 0)
height, width = roi.shape
img_rects = parse_pt(opj(data_dir, txt_path))
for fid in img_rects:
tid_rects = img_rects[fid]
for tid_rect in tid_rects:
tid = tid_rect[0]
rect = tid_rect[1:]
cx = 0.5*rect[0] + 0.5*rect[2]
cy = 0.5*rect[1] + 0.5*rect[3]
w = rect[2] - rect[0]
h = rect[3] - rect[1]
rect[2] -= rect[0]
rect[3] -= rect[1]
rect[0] = max(0, rect[0])
rect[1] = max(0, rect[1])
x1, y1 = max(0, cx - 0.5*w - 20), max(0, cy - 0.5*h - 20)
x2, y2 = min(width-x1, w + 40), min(height-y1, h + 40)
new_rect = map(int, [x1, y1, x2, y2])
rect = map(int, rect)
if (cid, tid) in map_tid:
new_tid = map_tid[(cid, tid)]
f_w.write(str(cid) + ' ' + str(new_tid) + ' ' + str(fid) + ' ' + ' '.join(map(str, new_rect)) + ' -1 -1' '\n')
f_w.close() | he010103/Traffic-Brain | AI-City-MTMC/tools/gen_res.py | gen_res.py | py | 2,312 | python | en | code | 15 | github-code | 13 |
32611798190 | def main():
height = get_height()
for i in range(height):
# to print space
for k in range(height - i - 1):
print(' ', end="")
# to print # and do not break to new line
for l in range(i + 1):
print("#", end="")
# to print a new line
print("")
def get_height():
while True:
try:
n = int(input("Height: "))
if n > 0 and n < 9:
break
except ValueError:
print("That's not an integer!")
return n
main()
| minhngocda/cs50_2022 | sentimental-mario-less/mario.py | mario.py | py | 558 | python | en | code | 0 | github-code | 13 |
26018000246 | from absl.testing import parameterized
import tensorflow as tf
from nucleus7.coordinator.predictors import predict_using_predictor
from nucleus7.coordinator.predictors import (
represent_predictor_through_nucleotides)
from nucleus7.core.nucleotide import Nucleotide
from nucleus7.utils import nest_utils
class TestPredictors(parameterized.TestCase):
def test_represent_predictor_through_nucleotides(self):
class PredictorMock(object):
def __init__(self_):
self_._fetch_tensors = None
@property
def fetch_tensors(self_):
return self_._fetch_tensors
@property
def feed_tensors(self_):
return self_._feed_tensors
tf.reset_default_graph()
predictor = PredictorMock()
fetch_tensors = {
'nucleotide1': {'output1': 'value1',
'output2': 'value2'},
'nucleotide2': {'output3': 'value3',
'output4': 'value4'}
}
feed_tensors = {
"data1": tf.placeholder(tf.float32),
"data2": tf.placeholder(tf.float32),
"parameter1": tf.placeholder_with_default(10, [])
}
fetch_tensors_flatten = nest_utils.flatten_nested_struct(fetch_tensors)
predictor._fetch_tensors = fetch_tensors_flatten
predictor._feed_tensors = feed_tensors
nucleotides = represent_predictor_through_nucleotides(predictor)
nucleotide1_must = Nucleotide(name='nucleotide1')
nucleotide1_must.generated_keys = ['output1', 'output2']
nucleotide1_must.incoming_keys = ['data1', 'data2']
nucleotide2_must = Nucleotide(name='nucleotide2')
nucleotide2_must.generated_keys = ['output3', 'output4']
nucleotide2_must.incoming_keys = ['data1', 'data2']
nucleotides_must = [nucleotide1_must, nucleotide2_must]
for nucleotide, nucleotide_must in zip(nucleotides, nucleotides_must):
self.assertEqual(nucleotide_must.name,
nucleotide.name)
self.assertSetEqual(set(nucleotide_must.generated_keys),
set(nucleotide.generated_keys))
self.assertSetEqual(set(nucleotide_must.incoming_keys),
set(nucleotide.incoming_keys))
@parameterized.parameters({"with_model_parameters": True},
{"with_model_parameters": False})
def test_predict_using_predictor(self, with_model_parameters):
class PredictorMock(object):
def __init__(self_, fetch_tensors, feed_tensors):
self_.fetch_tensors = fetch_tensors
self_.feed_tensors = feed_tensors
def __call__(self_, inputs: dict):
return {k + '_out': v for k, v in inputs.items()}
data = {'node1': {'out1': 10, 'out2': 20},
'node2': {'out3': 30, 'out4': 40},
'node3': {'out5': 50}}
data_for_predictor = {k: v for k, v in data.items()
if k in ['node1', 'node2']}
if with_model_parameters:
model_parameters = {"nucleotide1": {"parameter1": 10},
"nucleotide3": {"parameter2": 20,
"parameter3": [30, 40]}}
else:
model_parameters = None
data_for_predictor_flatten = nest_utils.flatten_nested_struct(
data_for_predictor, flatten_lists=False)
feed_tensors = nest_utils.flatten_nested_struct(
data_for_predictor, flatten_lists=False)
predictor_out_flatten_must = {
k + '_out': v for k, v in data_for_predictor_flatten.items()}
predictor_out_must = nest_utils.unflatten_dict_to_nested(
predictor_out_flatten_must)
if with_model_parameters:
predictor_out_must.update(
{"nucleotide1": {"parameter1_out": 10},
"nucleotide3": {"parameter2_out": 20,
"parameter3_out": [30, 40]}})
predictor = PredictorMock(feed_tensors=feed_tensors,
fetch_tensors=None)
if with_model_parameters:
result = predict_using_predictor(predictor, inputs=data,
model_parameters=model_parameters)
else:
result = predict_using_predictor(predictor, inputs=data)
self.assertDictEqual(predictor_out_must, result)
def test_predictor_from_load_config(self):
pass
| audi/nucleus7 | tests/coordinator/predictors_test.py | predictors_test.py | py | 4,628 | python | en | code | 35 | github-code | 13 |
28598008840 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 7 14:22:19 2022
@author: Dartoon
"""
import numpy as np
import astropy.io.fits as pyfits
import matplotlib.pyplot as plt
import glob, pickle
from galight.tools.astro_tools import plt_fits
from galight.data_process import DataProcess
ID = 10004
band = 'I'
folder = 'fit_result/'
file_ = glob.glob(folder+"{0}-{1}.pkl".format(ID, band))
data_folder = '/Volumes/Seagate_Expansion_Drive/data_backup/Liu19_catalog/'
# data_folder = './'
if file_ != []:
file = file_[0]
fit_run = pickle.load(open(file,'rb'))
print(fit_run.final_result_galaxy)
host_image = fit_run.flux_2d_out['data'] - fit_run.image_ps_list[0]
plt_fits(host_image)
ID = file.split('/')[1].split('-')[0]
band = file.split('-')[1][0]
fits_name = glob.glob(data_folder+'gfarm_data_download/{0}_HSC-{1}.fits'.format(ID,band)) + glob.glob(data_folder+
'online_data_download/{0}/*cutout*-{1}-*.fits'.format(ID,band) )
f = open("catalog.txt","r")
string = f.read()
lines = string.split('\n') # Split in to \n
line = [lines[i] for i in range(len(lines)) if lines[i].split(' ')[0] == ID][0]
_, Ra, Dec = line.split(' ')
QSO_RA, QSO_DEC = float(Ra), float(Dec)
fitsFile = pyfits.open(fits_name[0])
file_header0 = fitsFile[0].header
zp = 27.0
data_process = DataProcess(fov_image = fitsFile[1].data, fov_noise_map = fitsFile[3].data ** 0.5, target_pos = [QSO_RA, QSO_DEC],
pos_type = 'wcs', header = fitsFile[1].header,
rm_bkglight = True, if_plot=False, zp = zp)
fov_image = data_process.fov_image
fit_run.targets_subtraction(sub_qso_list=[0], org_fov_data=fov_image, header=data_process.header, target_pos=data_process.target_pos)
fov_image_targets_sub = fit_run.fov_image_targets_sub #Host image after sub PSF.
plt_fits(fit_run.fov_image_targets_sub) | dartoon/my_code | projects/2022_fit_Liu19_catalog/3_read_pickle.py | 3_read_pickle.py | py | 1,950 | python | en | code | 0 | github-code | 13 |
7740380885 | import numpy as np
import gym # import from Gym
class OpenAIEnvironment(object):
"""This class serves as an interface between the standardized environments of OpenAI Gym and the PS agent.
You must install the gym package and make sure that the programm can access it (ie provide the path) to run this code."""
## NEED TO SET UP A NUMBER OF METHODS FOR INTERNAL USE FIRST
def decompose_variable_base(self, total, base_list):
"""Given an integer total (between 0 and np.prod(base_list)-1)
and a list base_list of integers >=0, this function returns a list coefs of integers in the interval 0 <= coefs[i] < base_list[i] such that
total = sum( [ coefs[position]*np.prod(base_list[:position]) for position in range(len(base_list)) ] ).
If base_list = [B]*length, this yields the representation of total in the base B
(with the i-th entry in the lists multiplying B**i, ie the ordering is the opposite of the usual way of writing multi-digit numbers)."""
if total >= np.prod(base_list):
raise Exception('Number too large for decomposition in given base.')
remainder = total
coefs=[]
for position in range(len(base_list)-1):
coefs.append( int( remainder % base_list[position] ) )
remainder = (remainder - coefs[-1]) / base_list[position]
coefs.append(int(remainder))
return coefs
def set_up_space_simple(self, space):
"""Given a space (percept or action space) of the type Discrete(single integer) or Box(tuple of integers) used in OpenAIGym,
this function returns two flat lists and an integer:
- cardinality runs over categories (ie independently discretized variables) and encodes how many discrete values the variable can take
- discretization runs over simple spaces (each one is either Discrete or Box). For Discrete, it is None;
for Box, it is a list of two arrays encoding offset and slope for translating discrete into continuous values.
- space_type is 0 for Discrete and 1 for Box."""
if isinstance(space, gym.spaces.Discrete):
cardinality_list_local = [space.n]
discretization_local = [None] # None - Discrete
space_type = 0
elif isinstance(space, gym.spaces.Box):
#memo: make a global variable self.discretization_num_bins
cardinality_list_local = space.low.size * [self.discretization_num_bins] #for Box(N1,N2,...), this yields a flat list of length N1*N2*... and all entries equal to discretization_num_bins
discretization_local = [[space.low, (space.high-space.low)/self.discretization_num_bins]]
space_type = 1
return cardinality_list_local, discretization_local, space_type
def set_up_space_generic(self, space):
"""Given a space (percept or action space) of the type Discrete, Box or Tuple,
this function returns two flat lists and an integer:
- cardinality runs over categories (ie independently discretized variables) and encodes how many discrete values the variable can take
- discretization runs over simple spaces (each one is either Discrete or Box). For Discrete, it is None;
for Box, it contains two arrays encoding offset and slope for translating discrete into continuous values.
- space_type is 0 for a single Discrete, 1 for a single Box and 2 for Tuple."""
if isinstance(space, gym.spaces.Discrete) or isinstance(space, gym.spaces.Box):
cardinality_list, discretization_list, space_type = self.set_up_space_simple(space)
elif isinstance(space, gym.spaces.Tuple):
cardinality_list = []
discretization_list = []
for factor_space in space.spaces:
cardinality_list_local, discretization_local, space_type = self.set_up_space_simple(factor_space)
cardinality_list += cardinality_list_local
discretization_list += discretization_local
space_type = 2
return cardinality_list, discretization_list, space_type
def observation_preprocess_simple(self, observation, discretize_observation): # preparing a discretized observation
"""Turns a raw observation from a space of type Discrete or Box
into a one-dimensional list of integers."""
if type(observation) == np.ndarray:
observation_discretized = ((observation - discretize_observation[0])/discretize_observation[1]).astype(int) #element-wise: (raw observation - offset) / slope; casting as integers automatically acts like floor function
observation_discretized[observation_discretized >= self.discretization_num_bins] = self.discretization_num_bins - 1
#safeguard against the case where the value of an observation is exactly at the upper bound of the range, which would give a discretized value outside the allowed range(self.discretization_num_bins)
observation_preprocessed = list(observation_discretized.flatten())
else:
observation_preprocessed = [observation]
return observation_preprocessed
def observation_preprocess_generic(self, observation): # preparing a discretized observation
"""Turns a raw observation, which may be an array or tuple of arbitrary shapes containing continuous values,
into a one-dimensional list of integers."""
if self.percept_space_type == 0 or self.percept_space_type == 1:
observation_preprocessed = self.observation_preprocess_simple(observation, self.discretize_percepts_list[0])
elif self.percept_space_type == 2:
observation_preprocessed = []
for tuple_index in range(len(self.env.observation_space.sample())):
observation_preprocessed += self.observation_preprocess_simple(observation[tuple_index],self.discretize_percepts_list[tuple_index])
return observation_preprocessed
def action_postprocess_simple(self, action_flattened, discretize_action):
"""For a single simple space (Discrete or Box), given a flat list of integer action indices that runs over the categories in that space
and the appropriate discretization information (None or [array_offset, array_slope], respectively),
this method returns an integer (for Discrete) or an array of continuous variables (for Box)."""
if discretize_action == None:
action = action_flattened[0] #This takes just the integer, not a list
else:
action_reshaped = np.array(action_flattened).reshape(discretize_action[0].shape) #use offset array to get the right shape
action = discretize_action[0] + (action_reshaped + 0.5) * discretize_action[1] #offset + (discrete+1/2)*slope
return action
def action_postprocess_generic(self, action_index):
"""Given a single integer action index, this function unpacks it back into an integer (for a single Discrete space),
an array of continuoues values (for a single Box space), or a tuple of several of those (in the case of a Tuple space)."""
#decompose a single action index into indices for the different categories, whose cardinalities are given in self.num_actions_list
action_flattened = self.decompose_variable_base(action_index, self.num_actions_list)
if self.action_space_type == 0 or self.action_space_type == 1:
action = self.action_postprocess_simple (action_flattened, self.discretize_actions_list[0])
elif self.action_space_type == 2:
category_index = 0 #runs over categories, viz elements of action_flattened
action = () #This tuple will collect the actions (generally arrays of continuous variables) from all Discretes / Boxes
for tuple_index in range(len(self.env.action_space.sample())):
if self.discretize_actions_list[tuple_index] == None:
categories_in_subspace = 1
else:
categories_in_subspace = len(self.discretize_actions_list[tuple_index][0])
action_subset = action_flattened[category_index:category_index+categories_in_subspace]
print(self.action_postprocess_simple(action_subset, self.discretize_actions_list[tuple_index]))
action += tuple([self.action_postprocess_simple(action_subset, self.discretize_actions_list[tuple_index])])
category_index += categories_in_subspace
return action
## METHODS TO BE USED FROM OUTSIDE: INIT, RESET, MOVE
def __init__(self, openai_env_name, discretization_num_bins=10):
"""Initialize an environment, specified by its name, given as a string.
A list of existing environments can be found at https://gym.openai.com/envs/;
examples include 'CartPole-v1' and 'MountainCar-v0'.
Optional argument: discretization_num_bins, for the case of continuous percept spaces."""
self.env = gym.make(openai_env_name)
self.discretization_num_bins = discretization_num_bins
self.num_percepts_list, self.discretize_percepts_list, self.percept_space_type = self.set_up_space_generic(self.env.observation_space)
self.num_actions_list, self.discretize_actions_list, self.action_space_type = self.set_up_space_generic(self.env.action_space)
self.num_actions = np.prod(self.num_actions_list)
def reset(self):
"""Reset environment and return (preprocessed) new percept."""
observation = self.env.reset()
return self.observation_preprocess_generic(observation)
def move(self, action):
"""Given an action (single integer index), this method uses action_postprocess to put it in the format
expected by the OpenAIGym environment, applies the action and returns the resulting new percept, reward and trial_finished.
The percept is again preprocessed into a one-dimensional list of integers."""
observation, reward, trial_finished, info = self.env.step(self.action_postprocess_generic(action))
discretized_observation = self.observation_preprocess_generic(observation)
return discretized_observation, reward, trial_finished
| EazyReal/Quantum-Machine-Learning-2020-fall | HW2/environments/env_openai.py | env_openai.py | py | 10,307 | python | en | code | 1 | github-code | 13 |
31515843768 | import copy
import hydra
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import utils
from agent.dreamer import DreamerAgent, stop_gradient
import agent.dreamer_utils as common
class RND(nn.Module):
def __init__(self,
obs_dim,
hidden_dim,
rnd_rep_dim,
encoder,
aug,
obs_shape,
obs_type,
clip_val=5.):
super().__init__()
self.clip_val = clip_val
self.aug = aug
if obs_type == "pixels":
self.normalize_obs = nn.BatchNorm2d(obs_shape[0], affine=False)
else:
self.normalize_obs = nn.BatchNorm1d(obs_shape[0], affine=False)
self.predictor = nn.Sequential(encoder, nn.Linear(obs_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, rnd_rep_dim))
self.target = nn.Sequential(copy.deepcopy(encoder),
nn.Linear(obs_dim, hidden_dim), nn.ReLU(),
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, rnd_rep_dim))
for param in self.target.parameters():
param.requires_grad = False
self.apply(utils.weight_init)
def forward(self, obs):
if type(obs) == dict:
img = obs['observation']
img = self.aug(img)
img = self.normalize_obs(img)
img = torch.clamp(img, -self.clip_val, self.clip_val)
obs['observation'] = img
else:
obs = self.aug(obs)
obs = self.normalize_obs(obs)
obs = torch.clamp(obs, -self.clip_val, self.clip_val)
prediction, target = self.predictor(obs), self.target(obs)
prediction_error = torch.square(target.detach() - prediction).mean(
dim=-1, keepdim=True)
return prediction_error
class RNDDreamerAgent(DreamerAgent):
def __init__(self, rnd_rep_dim, rnd_scale, **kwargs):
super().__init__(**kwargs)
self.reward_free = True
self.rnd_scale = rnd_scale
self.obs_dim = self.wm.embed_dim
self.hidden_dim = self.wm.embed_dim
self.aug = nn.Identity()
self.obs_shape = (3,64,64)
self.obs_type = self.cfg.obs_type
encoder = copy.deepcopy(self.wm.encoder)
self.rnd = RND(self.obs_dim, self.hidden_dim, rnd_rep_dim,
encoder, self.aug, self.obs_shape,
self.obs_type).to(self.device)
self.intrinsic_reward_rms = utils.RMS(device=self.device)
# optimizers
self.rnd_opt = common.Optimizer('rnd', self.rnd.parameters(), **self.cfg.model_opt, use_amp=self._use_amp)
self.rnd.train()
self.requires_grad_(requires_grad=False)
def update_rnd(self, obs, step):
metrics = dict()
prediction_error = self.rnd(obs)
loss = prediction_error.mean()
metrics.update(self.rnd_opt(loss, self.rnd.parameters()))
metrics['rnd_loss'] = loss.item()
return metrics
def compute_intr_reward(self, obs):
prediction_error = self.rnd(obs)
_, intr_reward_var = self.intrinsic_reward_rms(prediction_error)
reward = self.rnd_scale * prediction_error / (
torch.sqrt(intr_reward_var) + 1e-8)
return reward
def update(self, data, step):
metrics = {}
B, T, _ = data['action'].shape
obs_shape = data['observation'].shape[2:]
if self.reward_free:
temp_data = self.wm.preprocess(data)
temp_data['observation'] = temp_data['observation'].reshape(B*T, *obs_shape)
with common.RequiresGrad(self.rnd):
with torch.cuda.amp.autocast(enabled=self._use_amp):
metrics.update(self.update_rnd(temp_data, step))
with torch.no_grad():
intr_reward = self.compute_intr_reward(temp_data).reshape(B, T, 1)
data['reward'] = intr_reward
state, outputs, mets = self.wm.update(data, state=None)
metrics.update(mets)
start = outputs['post']
start = {k: stop_gradient(v) for k,v in start.items()}
reward_fn = lambda seq: self.wm.heads['reward'](seq['feat']).mean #.mode()
metrics.update(self._task_behavior.update(
self.wm, start, data['is_terminal'], reward_fn))
return state, metrics | mazpie/mastering-urlb | agent/rnd.py | rnd.py | py | 4,723 | python | en | code | 23 | github-code | 13 |
12861904350 | print('Task 1 of 3: Function "Create your list"')
def fun_list():
"""fun_list makes the list of the length = n and of the item maximum value = m"""
n = int(input("Input the number of items in your list:\n"))
m = int(input("Input the maximum value of the item in your list:\n"))
q = input("Do you want to set the values of the list items by yourself? (Y/N)\n")
your_list = list(range(m - n, m))
i = 0
if q == 'Y' or q == 'y':
while i < n:
print("Input the value of item", i + 1, "not greater than maximum value:")
your_list[i] = int(input())
if your_list[i] <= m:
i = i + 1
else:
import random
while i < n:
your_list[i] = random.randint(0, m)
i = i + 1
print("Your list:", your_list)
return your_list
L = fun_list()
print('\nTask 2 of 3: Function "Items greater than a given number"')
def fun_greater(your_list):
"""fun_greater creates new list from items that are greater than a given number"""
lim = int(input("Input the lower limit (will not be included) of your new list item value (for example, 7):\n"))
new_list = []
for x in your_list:
if x > lim:
new_list[len(new_list):] = [x]
print("Your new list:", new_list)
return new_list
q = input("Do you want to use the list from the previous task? (Y/N)\n")
if q == 'Y' or q == 'y':
New_L = fun_greater(L)
else:
New_L = fun_greater(fun_list())
print('\nTask 3 of 3: Function "The list of the common items of two other lists"')
def fun_intersect(your_list_1, your_list_2):
"""fun_intersect creates a list of common items of two other lists"""
intersect_list = []
for x in your_list_1:
for y in your_list_2:
z = y in intersect_list
if x == y and not z:
intersect_list[len(intersect_list):] = [y]
print("The list of the common items:", intersect_list)
return intersect_list
q = input("Do you want to use lists from the Task 1 and the Task 2? (Y/N)\n")
if q == 'Y' or q == 'y':
print("Your first list:", L)
print("Your second list:", New_L)
I_L = fun_intersect(L, New_L)
else:
print("Let's create the first list:")
L1 = fun_list()
print("\nLet's create the second list:")
L2 = fun_list()
print("\nYour first list:", L1)
print("Your second list:", L2)
I_L = fun_intersect(L1, L2) | nestelementary/Contacts | G117_Nesteruk_DZ_4_Function_3_in_1.py | G117_Nesteruk_DZ_4_Function_3_in_1.py | py | 2,439 | python | en | code | 0 | github-code | 13 |
17160653382 | # 접을 수 있느냐?
# 없다면 전에 접었던 것은 성공했느냐 했다면 return/ 못했다면 하나 더 지우는 것으로 가자
# 원소 두개 남긴다면 length C length-2 가 경우의 수, 길이는 2
def getResult(erase):
global visited
if length - erase == 2:
return 2, length*(length-1)/2
elif length <= 1:
return 0, 1
else:
# 접을 수 없는가?
if (length - erase)%2 == 1:
if count != 0:
return length-erase+1, count
else:
visited = [False for i in range(length)]
# getResult
visited = []
length = 0
count = 0
lst = []
T = int(input())
for t in range(1, T+1):
length = int(input())
visited = [False for i in range(length)]
count = 0
input_str = input().split(' ')
lst = [int(i) for i in input_str]
slong, num = getResult()
print(f'#{t} {slong} {num}')
# getResult(0)
print(visited)
| jiyong1/problem-solving | swea/fold_sequence.py | fold_sequence.py | py | 976 | python | ko | code | 2 | github-code | 13 |
14386456385 | #
# @lc app=leetcode.cn id=78 lang=python3
#
# [78] 子集
#
from typing import List
count = 0
# @lc code=start
class Solution:
def subsets(self, nums: List[int]) -> List[List[int]]:
res = []
track = []
def backtrack(track, nums, start):
global count
# print(' '*4*count, f'args {track}, {start}')
res.append(track[:]) # 遍历过程的每个结点都要放到 res 中
for i in range(start, len(nums)):
track.append(nums[i])
count += 1
backtrack(track, nums, i + 1)
count -= 1
track.pop()
backtrack(track, nums, 0)
return res
# @lc code=end
Solution().subsets([1, 2, 3])
| largomst/leetcode-problem-solution | 78.子集.2.py | 78.子集.2.py | py | 753 | python | en | code | 0 | github-code | 13 |
69798141137 |
from django.contrib import admin
from django.urls import include, path
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
path('accounts/',include('accounts.urls')),
path('eatin/',include('feed.urls')),
path('cart/',include('myCart.urls')),
path('cook/',include('cook.urls')),
path('payment/',include('payment.urls')),
path('admin/', admin.site.urls),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| limorB/EatIn | eatin/urls.py | urls.py | py | 506 | python | en | code | 0 | github-code | 13 |
19420522287 | import subprocess
import shutil
import numpy as np
import os
from dataclasses import dataclass
from multiprocessing import Pool
from rnamake_ens_gen import logger, wrapper
log = logger.get_logger("score")
@dataclass(frozen=True, order=True)
class Opts:
wrapper_opts: wrapper.Opts = wrapper.Opts()
output_num: int = 0
runs: int = 1
threads: int = 1
build_files_path: str = ""
def write_ensemble_file(ens_mems, output_num):
f = open(f"ires.{output_num}.csv", "w")
f.write("path,end_0,end_1,end_2\n")
for ens_mem in ens_mems:
f.write(f"{ens_mem.path},{ens_mem.end_1},{ens_mem.end_2},\n")
f.close()
def simulate(df, wrapper, opts):
scores = []
for i, row in df.iterrows():
seq = row["sequence"][8:-8]
build_file = f"{opts.build_files_path}/{row['topology']}.csv"
ens_file = os.path.abspath(f"ires.{opts.output_num}.csv")
avg = 0
for i in range(opts.runs):
score = wrapper.run(seq, build_file, ens_file, opts.wrapper_opts)
if score == -1:
log.warn('wrapper returned an error!')
return np.zeros(len(df))
avg += score
scores.append(avg / opts.runs + 1)
return scores
def simulate_single(df, wrapper, opts):
scores = np.zeros(len(df))
row = df.iloc[0]
seq = row["sequence"][8:-8]
build_file = f"{opts.build_files_path}/{row['topology']}.csv"
ens_file = os.path.abspath(f"ires.{opts.output_num}.csv")
avg = 0
for i in range(opts.runs):
score = wrapper.run(seq, build_file, ens_file, opts.wrapper_opts)
if score == -1:
log.warn('wrapper returned an error!')
return np.zeros(len(df))
avg += score
scores[0] = avg / opts.runs + 1
return scores
class Simulater(object):
def __init__(self, construct_df, opts: Opts):
self.opts = opts
self.construct_df = construct_df
for col in ["sequence", "topology", "exp_score"]:
if col not in self.construct_df:
log.error(f"{col} must be included in construct dataframe")
exit()
def setup(self):
self.wrapper = wrapper.BuildMotifGraphWrapper()
self.wrapper.setup()
self.p = Pool(processes=self.opts.threads)
def score(self, ens_members):
write_ensemble_file(ens_members, self.opts.output_num)
#init_scores = simulate_single(self.construct_df, self.wrapper, self.opts)
#if init_scores[0] < 200:
# return init_scores
if self.opts.threads == 1:
scores = simulate(self.construct_df, self.wrapper, self.opts)
else:
wrappers = [self.wrapper for _ in range(self.opts.threads)]
opts = [self.opts for _ in range(self.opts.threads)]
dfs = np.array_split(self.construct_df, self.opts.threads)
score_arrays = self.p.starmap(simulate, zip(dfs, wrappers, opts))
scores = []
for score_a in score_arrays:
scores.extend(score_a)
return scores
# print(scores)
| jyesselm/rnamake_ens_gen | rnamake_ens_gen/simulate.py | simulate.py | py | 3,109 | python | en | code | 0 | github-code | 13 |
35386366491 | from distutils.core import setup, Extension
from sys import platform
import os
libraries = [];
if platform == 'darwin':
libraries.append('glfw');
os.environ['LDFLAGS'] = '-framework Cocoa -framework OpenGL -framework IOKit -framework CoreFoundation -framework CoreVideo';
elif platform == 'win32':
libraries.append('glfw3');
libraries.append('opengl32');
elif platform == 'linux':
libraries.append('glfw3');
libraries.append('GL');
PyGeom2 = Extension('PyGeom2',
include_dirs = ['gl3w/include','/usr/local/include'],
libraries = libraries,
library_dirs = ['/usr/local/lib'],
sources = ['PyGeom2.c', 'gl3w/src/gl3w.c', 'nanovg/src/nanovg.c', 'fonts/dejavu_sans_mono.c']
)
setup (name = 'PyGeom2',
version = '1.0',
description = 'Interactive 2D geometry visualization tool',
ext_modules = [PyGeom2]) | victorliu/PyGeom2 | setup.py | setup.py | py | 812 | python | en | code | 1 | github-code | 13 |
1011948550 | from django.db import models
class Course(models.Model):
"""
普通课程
"""
title = models.CharField(max_length=32)
class DegreeCourse(models.Model):
"""
学位课程
"""
title = models.CharField(max_length=32)
class PricePolicy(models.Model):
"""价格策略"""
price = models.IntegerField()
period = models.IntegerField()
table_name = models.CharField(verbose_name='关联的表名称')
object_id = models.CharField(verbose_name='关联表中的数据行的ID')
| FatPuffer/Django-ContentType | contenttype/app01/models_bak.py | models_bak.py | py | 505 | python | en | code | 0 | github-code | 13 |
74880472978 | class Query:
def __init__(self, query):
self.type = query[0]
self.number = int(query[1])
if self.type == 'add':
self.name = query[2]
class HashTable:
def __init__(self, size):
self.size = size
self.table = [[] for _ in range(self.size)]
self.prime = 10000001
def _hash_func(self, key):
hashed = 0
for ch in key:
hashed = (hashed * 263 + ord(ch)) % self.prime
return hashed % self.size
def add(self, key, value):
hashed = self._hash_func(str(key))
for i, (k, v) in enumerate(self.table[hashed]):
if k == key:
self.table[hashed][i] = (key, value)
return
self.table[hashed].append((key, value))
def remove(self, key):
hashed = self._hash_func(str(key))
for i, (k, v) in enumerate(self.table[hashed]):
if k == key:
del self.table[hashed][i]
return
def find(self, key):
hashed = self._hash_func(str(key))
for k, v in self.table[hashed]:
if k == key:
return v
return None
def read_queries():
n = int(input())
return [Query(str(input()).split()) for i in range(n)]
def write_responses(result):
print('\n'.join(str(r) if r is not None else "not found" for r in result))
def process_queries(queries):
result = []
contacts = HashTable(100000)
for query in queries:
if query.type == 'add':
contacts.add(query.number, query.name)
elif query.type == 'del':
contacts.remove(query.number)
else:
result.append(contacts.find(query.number))
return result
if __name__ == '__main__':
write_responses(process_queries(read_queries()))
| DA-testa/phone-book-221RDB020 | main.py | main.py | py | 1,847 | python | en | code | 0 | github-code | 13 |
21039192316 | import io
import requests
import json
from PIL import Image
api_url = "API_URL"
payload = {"img_url":"https://raw.githubusercontent.com/pjreddie/darknet/master/data/person.jpg",
"search_query":"horse"}
response = requests.post(api_url, data = json.dumps(payload))
if response.status_code == 200:
bbox = response.json()['bbox']
img_bytes = requests.get(payload['img_url']).content
img = Image.open(io.BytesIO(img_bytes))
img.show()
crop = img.crop((bbox[0], bbox[1], bbox[2], bbox[3]))
crop.show()
else:
print(response.status_code)
| bismillahkani/AWS-Serverless-AI | SAM/clip_crop/app/testapi.py | testapi.py | py | 577 | python | en | code | 1 | github-code | 13 |
3490732337 | def solve_by_recuisive(weights,values,bagsize):
left = 0
right = len(weights) - 1
return recursive(weights,values,left,right,bagsize)
def recursive(weights,values,left,right,rest):
"""
:param weights: 物品的重量
:param values: 价值
:param left:
:param right:
:param rest: 背包剩余的承重
:return:
"""
if left > right:
return 0
#basecase
if rest <= 0:
return 0
# 不选择拿最左边物品
case2 = recursive(weights, values, left + 1, right, rest)
#选择拿最左边的物品,先判断该物品是否可以装的下
if rest-weights[left] >= 0:
case1 = values[left] + recursive(weights,values,left+1,right,rest-weights[left])
#取最大值
return max(case1,case2)
else:
return case2
def solve_dp(weights,values,bag):
length = len(weights)
dp_table = []
for i in range(length):
dp_table.append([])
for j in range(bag+1):
dp_table[i].append(0)
#basecase,由于初始化的时候就都是0了,所以不用专门的写basecase了
#写表,从下往上写,从左到右
for i in range(length-1,-1,-1):
for j in range(0,bag+1):
case2 = pick(dp_table,i+1,j)
if j - weights[i] >=0:
case1 = values[i] + pick(dp_table,i+1,j-weights[i])
dp_table[i][j] = max(case1,case2)
else:
dp_table[i][j] = case2
return dp_table[0][bag]
def pick(dp,i,j):
length = len(dp)
if i >=length:
return 0
else:
return dp[i][j]
if __name__ == '__main__':
weights = [3, 2, 4, 7, 3, 1, 7]
values = [5, 6, 3, 19, 12, 4, 2]
weights = [5]
values = [5]
bag = 15
print(solve_by_recuisive(weights,values,bag))
print(solve_dp(weights,values,bag)) | guyuejia/LearnDS | DynamicProgramming/Knapsack.py | Knapsack.py | py | 1,852 | python | zh | code | 0 | github-code | 13 |
24417006396 | import sys
class Redirection(object):
def __init__(self, in_obj, out_obj):
self.input = in_obj
self.output = out_obj
def read_line(self):
res = self.input.readline()
self.output.write(res)
return res
if __name__ == '__main__':
if not sys.stdin.isatty():
sys.stdin = Redirection(in_obj=sys.stdin, out_obj=sys.stdout)
a = input('Enter a string: ')
b = input('Enter another string: ')
print ('Entered strings are: ', repr(a), 'and', repr(b))
| PacktPublishing/Mastering-Python-Scripting-for-System-Administrators- | Chapter04/redirection.py | redirection.py | py | 461 | python | en | code | 178 | github-code | 13 |
32485454353 | import spotipy
from spotipy.oauth2 import SpotifyClientCredentials
import pandas as pd
from config import CLIENT_ID, CLIENT_SECRET
client_credentials_manager = SpotifyClientCredentials(client_id=CLIENT_ID, client_secret=CLIENT_SECRET)
sp = spotipy.Spotify(client_credentials_manager=client_credentials_manager)
# user_id = "1218881629"
def search_spotify(query: str = None):
q = query.replace(" ", "+")
results = sp.search(q, limit=100)
r = results["tracks"]["items"]
return r
def get_new_released_albums(country="US"):
album_list = sp.new_releases(country=country)
# for album in album_list["albums"]["items"]:
# print(album["name"], [artist["name"] for artist in album["artists"]])
return album_list["albums"]["items"]
def get_audio_features_for_album(album_id):
df = pd.DataFrame(
columns=[
"id",
"danceability",
"energy",
"key",
"loudness",
"speechiness",
"acousticness",
"instrumentalness",
"liveness",
"valence",
"tempo",
"duration_ms",
]
)
album_tracks = sp.album_tracks(album_id)
track_id_to_name = {track["id"]: track["name"] for track in album_tracks["items"]}
track_ids = [track["id"] for track in album_tracks["items"]]
feature_objects_array = sp.audio_features(tracks=track_ids)
for i in range(0, len(feature_objects_array)):
feature = feature_objects_array[i]
if feature:
for key in df.keys():
df.loc[i, key] = feature[key]
df["name"] = df["id"].apply(lambda x: track_id_to_name[x])
df["duration_min"] = df["duration_ms"] / 1000 / 60
df = df.drop(columns=["duration_ms"])
return df
def get_avg_album_scores(album_tuples):
df = pd.DataFrame(
columns=[
"album_name",
"danceability",
"energy",
"loudness",
"speechiness",
"acousticness",
"instrumentalness",
"liveness",
"valence",
"tempo",
"duration_min",
]
)
for i in range(0, len(album_tuples)):
feature_df = album_tuples[i][0]
album_name = album_tuples[i][1]
for column in df:
if column not in ("album_name"):
df.loc[i, column] = feature_df[column].mean()
df.loc[i, "album_name"] = album_name
return df
| jmoussa/spotify-visualizer | utils.py | utils.py | py | 2,483 | python | en | code | 0 | github-code | 13 |
18218834827 | # Tuple is a data type made up of collection of items
# !!!!!!!! Tuples are immutable !!!!!!!!
# There are 2 ways to declare a tuple
# Declaring a tuple with tuple() function implies that the argument is an iterable one (list or string)
# Tuple Declaration is as follows
tuple_1 = ("a", "b", "c", "d", "e")
tuple_2 = (2.718, False, [1, 2, 3])
tuple_3 = (1, 1, 0, 0, 0)
tuple_5 = tuple([3.14, 2.205, 10])
tuple_6 = tuple("edcba")
print(tuple_5)
print(tuple_6)
occupations = {("Angus", "Young"): "musician",
("Narendra", "Modi"): "prime minister",
("Richard", "Branson"): "entrepreneur",
("Quentin", "Tarantino"): "filmmaker"
}
print(type(occupations))
# Iterating through a tuple is the same as iterating through a list
major_cities = ("Tokyo", "London", "New York", "Shangai", "Mumbay")
for element in major_cities:
print(element)
counter = 0
while counter < len(major_cities):
print(major_cities[counter])
counter += 1
# How to step in a tuple
ints = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
print(ints[::3]) # stride length of 3.
print(ints[1::2]) # evens number only
print(ints[7::-1]) # backwards from 8
print(ints[8::-2]) # odds only backwards
| eagledeath85/Python_Courses | python_for_beginners/tuples.py | tuples.py | py | 1,225 | python | en | code | 0 | github-code | 13 |
19481071488 | import sys
from cx_Freeze import setup, Executable
base = None
if sys.platform == "win32":
base = "Win32GUI"
setup(
name = "xtsi",
version = "1",
description = "xtsi",
options = {"build_exe" : {"includes" : "atexit" }},
executables = [Executable("xtsi.py", base = base)])
| dxe4/xls_to_mysql_insert | setup.py | setup.py | py | 319 | python | en | code | 4 | github-code | 13 |
21271882607 | from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.core.urlresolvers import reverse
from proj.models import XssProject, XssItem
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.contrib import auth
# Create your views here.
@login_required
def index(request):
project_list = request.user.xssproject_set.order_by('-create_date')
return render(request, 'proj/index.html', {'error': False, 'project_list': project_list})
@login_required
def detail(request, proj_id):
project_list = request.user.xssproject_set.order_by('-create_date')
selected_project = get_object_or_404(XssProject, owner = request.user, pk = proj_id)
item_list = selected_project.xssitem_set.order_by('-recv_date')
return render(
request,
'proj/detail.html',
{
'error': False,
'project_list': project_list,
'proj_id': proj_id,
'item_list': item_list
}
)
| xiaoxiaoleo/xsstry | proj/views.py | views.py | py | 1,003 | python | en | code | 2 | github-code | 13 |
70336819858 | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../'))
import re
# The master toctree document.
master_doc = 'index'
# -- Project information -----------------------------------------------------
project = 'FLIRT'
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
def find_version():
result = re.search(r'{}\s*=\s*[\'"]([^\'"]*)[\'"]'.format("__version__"), open('../flirt/__init__.py').read())
return result.group(1)
version = find_version()
# The full version, including alpha/beta/rc tags.
release = version
# General information about the project.
def find_author():
result = re.search(r'{}\s*=\s*[\'"]([^\'"]*)[\'"]'.format("__author__"), open('../flirt/__init__.py').read())
return str(result.group(1))
author = find_author()
copyright = "2020, " + author
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'sphinx_rtd_theme',
'm2r2'
]
autodoc_default_options = {
'undoc-members': True,
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
source_suffix = ['.rst', '.md']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '**.ipynb_checkpoints']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'monokai' # 'default', 'monokai'
# nbsphinx_codecell_lexer = 'default' # Doesn't do anything :/
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
html_theme_options = {
# 'logo_only': True,
'display_version': False,
'style_nav_header_background': '#343131',
}
html_logo = 'img/flirt-white.png'
html_favicon = 'img/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ---------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'flirtdoc'
# -- Options for LaTeX output ------------------------------------------
pdf_title = u'FLIRT'
author_field = u'Official Documentation'
latex_elements = {
'sphinxsetup': r"""
VerbatimColor={RGB}{38,50,56},
verbatimwithframe=false,
"""
# Background color of chunks
# '
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto, manual, or own class]).
latex_documents = [
(master_doc,
'flirt.tex',
pdf_title,
author_field,
'manual'),
]
# Other
add_module_names = True # so functions aren’t prepended with the name of the package/module
add_function_parentheses = True # to ensure that parentheses are added to the end of all function names
# set up the types of member to check that are documented
members_to_watch = ['function', ]
def warn_undocumented_members(app, what, name, obj, options, lines):
if what in members_to_watch and len(lines) is 0:
# warn to terminal during build
print("Warning: " + what + " is undocumented: " + name)
# or modify the docstring so the rendered output is highlights the omission
lines.append(".. Warning:: %s '%s' is undocumented" % (what, name))
def setup(app):
app.connect('autodoc-process-docstring', warn_undocumented_members)
| im-ethz/flirt | docs/conf.py | conf.py | py | 5,329 | python | en | code | 48 | github-code | 13 |
17055329214 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class LinkTypeResult(object):
def __init__(self):
self._level = None
self._link_type_code = None
self._link_type_name = None
self._parent_code = None
self._state = None
self._tnt_inst_id = None
@property
def level(self):
return self._level
@level.setter
def level(self, value):
self._level = value
@property
def link_type_code(self):
return self._link_type_code
@link_type_code.setter
def link_type_code(self, value):
self._link_type_code = value
@property
def link_type_name(self):
return self._link_type_name
@link_type_name.setter
def link_type_name(self, value):
self._link_type_name = value
@property
def parent_code(self):
return self._parent_code
@parent_code.setter
def parent_code(self, value):
self._parent_code = value
@property
def state(self):
return self._state
@state.setter
def state(self, value):
self._state = value
@property
def tnt_inst_id(self):
return self._tnt_inst_id
@tnt_inst_id.setter
def tnt_inst_id(self, value):
self._tnt_inst_id = value
def to_alipay_dict(self):
params = dict()
if self.level:
if hasattr(self.level, 'to_alipay_dict'):
params['level'] = self.level.to_alipay_dict()
else:
params['level'] = self.level
if self.link_type_code:
if hasattr(self.link_type_code, 'to_alipay_dict'):
params['link_type_code'] = self.link_type_code.to_alipay_dict()
else:
params['link_type_code'] = self.link_type_code
if self.link_type_name:
if hasattr(self.link_type_name, 'to_alipay_dict'):
params['link_type_name'] = self.link_type_name.to_alipay_dict()
else:
params['link_type_name'] = self.link_type_name
if self.parent_code:
if hasattr(self.parent_code, 'to_alipay_dict'):
params['parent_code'] = self.parent_code.to_alipay_dict()
else:
params['parent_code'] = self.parent_code
if self.state:
if hasattr(self.state, 'to_alipay_dict'):
params['state'] = self.state.to_alipay_dict()
else:
params['state'] = self.state
if self.tnt_inst_id:
if hasattr(self.tnt_inst_id, 'to_alipay_dict'):
params['tnt_inst_id'] = self.tnt_inst_id.to_alipay_dict()
else:
params['tnt_inst_id'] = self.tnt_inst_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = LinkTypeResult()
if 'level' in d:
o.level = d['level']
if 'link_type_code' in d:
o.link_type_code = d['link_type_code']
if 'link_type_name' in d:
o.link_type_name = d['link_type_name']
if 'parent_code' in d:
o.parent_code = d['parent_code']
if 'state' in d:
o.state = d['state']
if 'tnt_inst_id' in d:
o.tnt_inst_id = d['tnt_inst_id']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/LinkTypeResult.py | LinkTypeResult.py | py | 3,380 | python | en | code | 241 | github-code | 13 |
23951364709 | #With the use of Sparse matrix (sparse matrix is implemented by using dictionary)
from math import *
import numpy as np
from datetime import datetime
def loadMovieLens(path='.', file='/Train.dat'):
# Load data
prefs={}
for line in open(path+file):
(user,movieid,rating)=line.split(':')
prefs.setdefault(int(user)-1,{})
prefs[int(user)-1][int(movieid)-1]=float(rating)
return prefs
def loadGenre(path='.', file='/genrelist.dat'):
G=np.zeros((3952,18))
for line in open(path+file):
(movieid,genre)=line.split('::')
if(movieid != 'movieid') :
l=[]
l=genre.split(' ')
for i in range(18) :
l[i]=int(l[i])
G[int(movieid)-1]=l
return G
def matrix_factorization(R, genre, K, N, M, steps=50, alpha=0.0002, beta=0.02):
P = np.random.rand(N,K)
Q = np.random.rand(M,K)
G = np.random.rand(18,K)
print("makePQG_time=", datetime.now()-st)
# print(P)
# print(Q)
Q = Q.T
for step in range(steps):
e = 0
for i in R :
for j in R[i] :
count=0
for i1 in range(18) :
if genre[j][i1]==1 :
count+=1;
li=np.dot(genre[j],G)
for i1 in range(K) :
li[i1]=li[i1]/count; #average for jth item
eij=R[i][j] - np.dot(P[i,:],Q[:,j]+li.T)
for k in range(K) :
#P[i][k] = 5+5+5
#Q[k][j] = 5+5+5
P[i][k] = P[i][k] + alpha * (2 * eij * (Q[k][j] + li[k]) - beta * P[i][k])
Q[k][j] = Q[k][j] + alpha * (2 * eij * P[i][k] - beta * Q[k][j])
for i1 in range(18) :
if genre[j][i1]==1 :
G[i1][k] = G[i1][k] + alpha * (2 * eij * P[i][k] - beta * Q[k][j])
return P, Q.T, G
if __name__=='__main__':
st=datetime.now()
trainPrefs = loadMovieLens(file="/Train.dat")
testPrefs = loadMovieLens(file='/Test.dat')
genre = loadGenre(file='/genrelist.dat')
# for i in range(3952):
# for j in range(18) :
# print(genre[i,j],end=' ')
# print("\n")
print("load_time=", datetime.now()-st)
print("makeR_time=", datetime.now()-st)
N=6040
M=3952
K=5
steps=5
nP, nQ, nG = matrix_factorization(trainPrefs, genre, K, N, M, steps, alpha=0.0002, beta=0.02)
print("MF_time=", datetime.now()-st)
# print(nP)
# print(nQ)
total_err=[]
#calculating error (MAE)
for i in testPrefs :
for j in testPrefs[i] :
count=0
for i1 in range(18) :
if genre[j][i1]==1 :
count+=1;
li=np.dot(genre[j],nG)
for i1 in range(K) :
li[i1]=li[i1]/count; #average for jth item
diff=fabs(np.dot(nP[i,:],nQ.T[:,j]+li.T)-testPrefs[i][j])
total_err.append(diff)
print(K,steps)
print("MAE=%lf" % (sum(total_err)/len(total_err)))
print("time=", datetime.now()-st)
#MAE=0.933934,(k=1, steps=1)
#MAE=0.968972, time=0:00:44.945159 (K=10, steps=5)
| jaydeep1997/Cross_Domain_Recommender_System | matrix_factorization/single_domain/100K/3single_domain_genre/5testing_sparse_1m_genre.py | 5testing_sparse_1m_genre.py | py | 3,293 | python | en | code | 3 | github-code | 13 |
29423220139 | import json
from models import User, NewsItem, EmailSubscription
from __main__ import app
@app.route('/news/<int:news_item_id>/subscribe', methods=['GET'])
def subscribe_to_news_item(news_item_id):
member = User.get_signed_in_user()
subscribed = 'n/a'
if member is None:
status = '401'
else:
news_item = NewsItem.query.filter_by(id=news_item_id).first()
if news_item is None:
status = '404'
if member and news_item:
subscription = EmailSubscription.subscribe(member=member, news_item=news_item)
if subscription:
status = '200'
subscribed = 'y'
return json.dumps({
'status': status,
'subscribed': subscribed
})
| javilm/msx-center | routes/subscriptions/subscribe_to_news_item.py | subscribe_to_news_item.py | py | 648 | python | en | code | 0 | github-code | 13 |
72343775058 | import os
import numpy as np
import scipy.sparse as sp
import random
from copy import deepcopy
from collections import OrderedDict
from random import shuffle
import torch
import networkx as nx
import dgl
from torch.nn import functional as F
import time
import datetime
import argparse
from collections import defaultdict
import gc
from termcolor import colored, cprint
import itertools
from load_data import load
from encoder import Encoder
from decoder import Decoder
from train import train_unshared, train_shared
from tool import bpr_loss, l1_loss, BCELoss, early_stopping
from evaluation import evaluate
from tensorboardX import SummaryWriter
writer = SummaryWriter() #使用时间命名保存在runs/文件夹下
parser = argparse.ArgumentParser()
parser.add_argument('--n_epochs', type=int, default=100, help='number of epochs of training')
parser.add_argument('--batchSize', type=int, default=64, help='batch size of users for cross-domain training')
parser.add_argument('--batch_links', type=int, default=128, help='batch size of links for single-domain link prediction training')
parser.add_argument('--lr', type=float, default=0.0001, help='initial learning rate')
parser.add_argument('--decay', type=float, default=0.0, help='weight deacy of adam optimizer')
parser.add_argument('--bpr_reg', type=float, default=5e-4, help='weight deacy of adam optimizer')
parser.add_argument('--hidden', nargs='+', type=int, default=[64,64], help='hidden layers')
parser.add_argument('--n_layers', type=int, default=2, help='number of hidden layers')
parser.add_argument('--out_dim_encode', type=int, default=32, help='dimension of output of encoder')
parser.add_argument('--lambda_l1', type=float, default=1.0, help='weight of L1 loss')
parser.add_argument('--lambda_gan', type=float, default=1.0, help='weight of GAN loss')
parser.add_argument('--lambda_hsic', type=float, default=1.0, help='weight of hsic loss')
parser.add_argument('--beta', type=float, default=0.5, help='momentum term of adam')
parser.add_argument('--topk', type=list, default=[5,10,20], help='Evaluation topk results.')
parser.add_argument('--activation_gcn', type=str, default='leaky_relu_', help='Activation function of GCN.')
parser.add_argument('--activation_mlp', type=str, default='tanh', help='Activation function of mlp layers.')
parser.add_argument('--user_emb_mode', type=str, default='cml', help='mode of initial user embedding.')
parser.add_argument('--cold_ratio', type=float, default=0.5, help='Cold ratio.')
parser.add_argument('--source', type=str, default='Book', help='source domain name.')
parser.add_argument('--target', type=str, default='CD', help='target domain name.')
parser.add_argument('--test', action='store_true', help='Testing model')
parser.add_argument('--normalize', action='store_true', help='normalize initial embedding')
parser.add_argument('--load_model', action='store_true', help='Load saved model')
parser.add_argument('--device', type=str, default='0', help='use GPU computation')
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.device
if __name__ == '__main__':
if args.source == 'Book':
path = 'path to back2cd'
else:
path = 'path to cd2movie'
config = {
'user_file_1':path + 'user_ids_' + args.source + '.csv', #
'item_file_1':path + 'item_ids_' + args.source + '.csv',
'user_file_2':path + 'user_ids_' + args.target + '.csv',
'item_file_2':path + 'item_ids_' + args.target + '.csv',
'user_item_npy_1':path + 'user_item_dict_' + args.source + '.npy', # dict, key是user id, value 是该user 连接的 positive items (CD domain, source domain)
'user_item_npy_2':path + 'user_item_dict_' + args.target + '.npy', # dict , key是user id, value 是该user 连接的 positive items (Movie domain, target domain)
'user_item_pair_npy_1':path + 'user_item_pair_dict_' + args.source + '.npy', # dict, key 是user id,value是该user和其所有 positive items组成的pair,例如: user1:[[user1,pos_item1],[user1,pos_item2]]
'user_item_pair_npy_2':path + 'user_item_pair_dict_' + args.target + '.npy',
'cold_user_ids_file':path + 'cold_user_ids_' + args.source + '_and_' + args.target + '_cold' + str(args.cold_ratio) +'.csv', # 从overlapped user中选出部分用于测试集中的冷启动用户
'warm_user_ids_file':path + 'warm_user_ids_' + args.source + '_and_' + args.target + '_cold' + str(args.cold_ratio) +'.csv', # overlapped user 中去除cold-start user之后的user
'source_name':args.source,
'target_name':args.target
}
# 准备数据
# 数据编号顺序: shared warm users -> shared cold start users -> unshared users -> items
start = datetime.datetime.now()
# load source domain data, 这里的trainX 和 valX用于单个domain的prediction,trainX[:,0]=userIdx, trainX[:,1]=posIdx, trainX[:,2]=negIdx
num_userX, num_itemX, GX, warm_user_ids, cold_user_ids, unshared_uidsX, train_user_item_dictX, trainX = load(config['user_file_1'],config['item_file_1'],config['warm_user_ids_file'],config['cold_user_ids_file'],config['user_item_npy_1'],config['user_item_pair_npy_1'],domain='source')
# load target domain data
num_userY, num_itemY, GY, warm_user_ids, cold_user_ids, unshared_uidsY, train_user_item_dictY, test_user_item_dict,trainY, train_cross_dict, val_cross_dict = load(config['user_file_2'],config['item_file_2'],config['warm_user_ids_file'],config['cold_user_ids_file'],config['user_item_npy_2'],config['user_item_pair_npy_2'],domain='target')
end = datetime.datetime.now()
cprint('Prepare data cost: '+ str(end-start), 'yellow')
n_warm = len(warm_user_ids)
n_cold = len(cold_user_ids)
path_emb = 'path to input embed'
if args.user_emb_mode == 'mean':
user_emb_X = np.load(path_emb + 'CDs_and_Vinyl_mean_user_emb.npy')
item_emb_X = np.load(path_emb + 'CDs_and_Vinyl_description_emb.npy')
user_emb_Y = np.load(path_emb + 'Movies_and_TV_mean_user_emb.npy')
item_emb_Y = np.load(path_emb + 'Movies_and_TV_description_emb.npy')
elif args.user_emb_mode == 'sum':
user_emb_X = np.load(path_emb + 'CDs_and_Vinyl_sum_user_emb.npy')
item_emb_X = np.load(path_emb + 'CDs_and_Vinyl_description_emb.npy')
user_emb_Y = np.load(path_emb + 'Movies_and_TV_sum_user_emb.npy')
item_emb_Y = np.load(path_emb + 'Movies_and_TV_description_emb.npy')
elif args.user_emb_mode == 'random':
user_emb_X = np.random.randn(num_userX, 64)
item_emb_X = np.random.randn(num_itemX, 64)
user_emb_Y = np.random.randn(num_userY, 64)
item_emb_Y = np.random.randn(num_itemY, 64)
else:
cprint('Loading pretrained embedding...')
if args.source == 'CD':
path_emb = 'path to pretrained cd2movie emb'
user_emb_X = np.load(path_emb + 'user_CDs_and_Vinyl.npy')
item_emb_X = np.load(path_emb + 'item_CDs_and_Vinyl.npy')
user_emb_Y = np.load(path_emb + 'user_Movie_and_TV.npy')
item_emb_Y = np.load(path_emb + 'item_Movie_and_TV.npy')
elif args.source=='Book':
path_emb = 'path to pretrained book2cd emb'
user_emb_X = np.load(path_emb + 'user_book.npy')
item_emb_X = np.load(path_emb + 'item_book.npy')
user_emb_Y = np.load(path_emb + 'user_cd.npy')
item_emb_Y = np.load(path_emb + 'item_cd.npy')
featureX = np.concatenate((user_emb_X,item_emb_X),0)
# target domain 中把cold-start user拿掉
featureY = np.concatenate((user_emb_Y[:n_warm,:], user_emb_Y[(n_warm+n_cold):,:], item_emb_Y),0)
featureX_cuda = torch.FloatTensor(featureX).cuda()
featureY_cuda = torch.FloatTensor(featureY).cuda()
if args.normalize:
featureX_cuda = F.normalize(featureX_cuda, dim=1)
featureY_cuda = F.normalize(featureY_cuda, dim=1)
n_user_uniqueX = len(unshared_uidsX) + len(cold_user_ids) #
n_user_uniqueY = len(unshared_uidsY)
n_shared = n_shared = len(warm_user_ids)
in_dimX = user_emb_X.shape[1]
in_dimY = user_emb_Y.shape[1]
GX_dgl = dgl.DGLGraph()
GX_dgl.from_networkx(GX)
GY_dgl = dgl.DGLGraph()
GY_dgl.from_networkx(GY)
encoderX2Y = Encoder(GX_dgl, featureX_cuda, in_dimX, args.hidden, args.out_dim_encode, args.n_layers, F.leaky_relu_, 0.0).cuda()
encoderY2X = Encoder(GY_dgl, featureY_cuda, in_dimY, args.hidden, args.out_dim_encode, args.n_layers, F.leaky_relu_, 0.0).cuda()
in_dim = args.out_dim_encode
out_dimX_1 = args.hidden[-1]
out_dimX_2 = in_dimY
out_dimY_1 = args.hidden[-1]
out_dimY_2 = in_dimX
decoderX2Y = Decoder(in_dim, out_dimX_1, out_dimX_2).cuda()
decoderY2X = Decoder(in_dim, out_dimY_1, out_dimY_2).cuda()
in_dim_disX = in_dimX + out_dimX_2
in_dim_disY = in_dimY + out_dimY_2
optim_pre_X = torch.optim.Adam(encoderX2Y.parameters(), lr=args.lr, betas=(args.beta, 0.999))
optim_pre_Y = torch.optim.Adam(encoderY2X.parameters(), lr=args.lr, betas=(args.beta, 0.999))
optim_pre_X2Y = torch.optim.Adam(itertools.chain(encoderX2Y.parameters(), decoderX2Y.layers.parameters()), lr=args.lr,betas=(args.beta,0.999),weight_decay=args.decay)
optim_pre_Y2X = torch.optim.Adam(itertools.chain(encoderY2X.parameters(), decoderY2X.layers.parameters()), lr=args.lr,betas=(args.beta,0.999),weight_decay=args.decay)
optim_auto_X = torch.optim.Adam(itertools.chain(encoderX2Y.parameters(),decoderY2X.parameters()), lr=args.lr, betas=(args.beta,0.999),weight_decay=args.decay)
optim_auto_Y = torch.optim.Adam(itertools.chain(encoderY2X.parameters(),decoderX2Y.parameters()), lr=args.lr, betas=(args.beta,0.999),weight_decay=args.decay)
optim_superX2Y = torch.optim.Adam(itertools.chain(encoderX2Y.parameters(),decoderX2Y.parameters()), lr=args.lr, betas=(args.beta,0.999),weight_decay=args.decay)
optim_superY2X = torch.optim.Adam(itertools.chain(encoderY2X.parameters(),decoderY2X.parameters()), lr=args.lr, betas=(args.beta,0.999),weight_decay=args.decay)
optims = [optim_pre_X, optim_pre_Y, optim_pre_X2Y, optim_pre_Y2X, optim_auto_X, optim_auto_Y, optim_superX2Y,optim_superY2X]
if args.load_model:
encoderX2Y.load_state_dict(torch.load('./models/encoderX2Y.pkl'))
encoderY2X.load_state_dict(torch.load('./models/encoderY2X.pkl'))
decoderX2Y.load_state_dict(torch.load('./models/decoderX2Y.pkl'))
decoderY2X.load_state_dict(torch.load('./models/decoderY2X.pkl'))
save_encoderX2Y_name = './models/encoderX2Y.pkl'
save_encoderY2X_name = './models/encoderY2X.pkl'
save_decoderX2Y_name = './models/decoderX2Y.pkl'
save_decoderY2X_name = './models/decoderY2X.pkl'
result_path = './result/' + config['source_name'] + '2'+ config['target_name'] + '/'
result_file = result_path + 'DAN_' + datetime.datetime.now().strftime('%Y-%m-%d')+'.txt'
if not os.path.exists(result_path):
os.makedirs(result_path)
ofile = open(result_file,'a')
ofile.write('Time: '+datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')+'\n')
ofile.write('Config: \n')
for key, value in vars(args).items():
ofile.write('%s:%s\n'%(key, value))
print(key, value)
##### Training #####
print('Single-domain X prediction')
batchSize = args.batchSize
batch_links = args.batch_links
stopping_step = 0
flag_step = 20
cur_best_ndcg = 0.0
for epoch in range(args.n_epochs):
random.shuffle(warm_user_ids)
print('Epoch: ',epoch)
start = datetime.datetime.now()
print('Single-domain X prediction')
n_batchX = len(trainX) // batch_links + 1
print('{:d} batchs in domain X'.format(n_batchX))
for batch in range(n_batchX):
batchX = trainX[batch*batch_links:(batch+1)*batch_links,:]
userIdx_X = torch.LongTensor(batchX[:,0]).cuda()
pos_itemIdx_X = torch.LongTensor(batchX[:,1]).cuda()
neg_itemIdx_X = torch.LongTensor(batchX[:,2]).cuda()
loss_X,loss_reg_X, loss_pre_X = train_unshared(args, encoder=encoderX2Y,optim_pre=optim_pre_X ,userIdx=userIdx_X, posIdx=pos_itemIdx_X, negIdx=neg_itemIdx_X, reg=args.bpr_reg)
writer.add_scalars('Train Loss',{'loss_X':loss_X}, epoch*n_batchX + batch)
if batch % 400 == 0:
print('batch {:d},loss_X: {:.4f} | loss_reg_X: {:.4f} | Loss_pre_X : {:.4f}'.format(batch,loss_X,loss_reg_X,loss_pre_X))
gc.collect()
print('Single-domain Y prediction')
n_batchY = len(trainY) // batch_links + 1
print('{:d} batchs in domain Y'.format(n_batchY))
for batch in range(n_batchY):
batchY = trainY[batch*batch_links:(batch+1)*batch_links,:]
userIdx_Y = torch.LongTensor(batchY[:,0]).cuda()
pos_itemIdx_Y = torch.LongTensor(batchY[:,1]).cuda()
neg_itemIdx_Y = torch.LongTensor(batchY[:,2]).cuda()
loss_Y,loss_reg_Y, loss_pre_Y = train_unshared(args, encoder=encoderY2X,optim_pre=optim_pre_Y, userIdx=userIdx_Y, posIdx=pos_itemIdx_Y, negIdx=neg_itemIdx_Y,reg=args.bpr_reg)
writer.add_scalars('Train Loss',{'loss_Y':loss_Y},epoch*n_batchY + batch)
if batch % 200 == 0:
print('batch: {:d}, loss_Y: {:.4f}| loss_reg_Y: {:.4f} | loss_pre_Y : {:.4f}'.format(batch,loss_Y,loss_reg_Y,loss_pre_Y))
gc.collect()
# shared user
n_batch = n_warm // batchSize + 1
print('{:d} batchs in cross domain'.format(n_batch))
for batch in range(n_batch):
batch_users = warm_user_ids[batch* batchSize : (batch+1) * batchSize]
batch_users = np.array(batch_users)
inputsX = featureX[batch_users,:]
inputsY = featureY[batch_users,:]
batch_users = torch.LongTensor(batch_users).cuda()
inputsX_cuda = torch.FloatTensor(inputsX).cuda()
inputsY_cuda = torch.FloatTensor(inputsY).cuda()
loss_all, loss_preX, loss_regX,loss_preY,loss_regY,loss_preX2Y, loss_preY2X,loss_regX2Y,loss_regY2X,\
loss_autoX, loss_autoY,loss_superX2Y,loss_superY2X = train_shared(args, optims,inputsX_cuda, inputsY_cuda,\
encoderX2Y, encoderY2X, decoderX2Y, decoderY2X,\
batch_users, train_user_item_dictX,\
train_user_item_dictY, train_cross_dict,reg=args.bpr_reg)
writer.add_scalars('Train Loss',{'loss_preX': loss_preX, 'loss_preY': loss_preY,'loss_preX2Y':loss_preX2Y,'loss_preY2X':loss_preY2X,'loss_regX':loss_regX,'loss_regY':loss_regY,'loss_regX2Y':loss_regX2Y,'loss_regY2X':loss_regY2X,'loss_all':loss_all,'loss_autoX':loss_autoX,'loss_autoY':loss_autoY,'loss_superX2Y':loss_superX2Y,'loss_superY2X':loss_superY2X},epoch*n_batch+batch)
if batch % 80 == 0:
print('batch {:d} \n loss_all: {:.4f} | loss_preX2Y: {:.4f} | loss_preY2X: {:.4f}\n loss_regX2Y:{:.4f} | loss_regY2X: {:.4f} \n loss_autoX: {:.4f} | loss_autoY: {:.4f} \n loss_superX2Y:{:.4f} | loss_superY2X: {:.4f}'.format(batch, loss_all, loss_preX2Y, loss_preY2X,loss_regX2Y,loss_regY2X, loss_autoX, loss_autoY, loss_superX2Y,loss_superY2X))
gc.collect()
if epoch==0:
end = datetime.datetime.now()
cprint('Training one epoch cost: ' + str(end - start), 'yellow', attrs=['bold'])
# evaluation
start = datetime.datetime.now()
warm_users_cuda = torch.LongTensor(warm_user_ids).cuda()
hit_rate, ndcg = evaluate(args, encoderX2Y, encoderY2X, decoderX2Y, warm_users_cuda, val_cross_dict)
if epoch == 0:
end = datetime.datetime.now()
cprint('Evaluating one epoch cost: '+ str(end - start), 'yellow', attrs=['bold'])
for i,k in enumerate(args.topk):
print('Hit@{:d}: {:.4f} | NDCG@{:d}: {:.4f}'.format(k, hit_rate[i], k, ndcg[i]))
# early stopping
cur_best_ndcg, stopping_step, should_stop = early_stopping(ndcg[2], cur_best_ndcg, stopping_step, flag_step)
# 保存取得最好性能时候的模型
if ndcg[2] == cur_best_ndcg:
torch.save(encoderX2Y.state_dict(), save_encoderX2Y_name)
torch.save(encoderY2X.state_dict(), save_encoderY2X_name)
torch.save(decoderX2Y.state_dict(), save_decoderX2Y_name)
torch.save(decoderY2X.state_dict(), save_decoderY2X_name)
if should_stop:
cprint('Early stopping at epoch '+ str(epoch), 'magenta')
print('Early stopping at epoch ', epoch, file=ofile)
cprint('Best ndcg@20: ' + str(cur_best_ndcg), 'magenta')
print('Best ndcg@20: ',cur_best_ndcg, file=ofile)
break
# test
if args.test:
start = datetime.datetime.now()
cprint('Testing...','yellow')
print('Testing...', file=ofile)
encoderX2Y.load_state_dict(torch.load(save_encoderX2Y_name))
encoderY2X.load_state_dict(torch.load(save_encoderY2X_name))
decoderX2Y.load_state_dict(torch.load(save_decoderX2Y_name))
cold_users_cuda = torch.LongTensor(cold_user_ids).cuda()
hit_rate, ndcg = evaluate(args, encoderX2Y, encoderY2X, decoderX2Y, cold_users_cuda, test_user_item_dict,test=True)
end = datetime.datetime.now()
print('Testing cost: ',end - start)
for i,k in enumerate(args.topk):
print('Hit@{:d}: {:.4f} | NDCG@{:d}: {:.4f}'.format(k, hit_rate[i], k, ndcg[i]))
print('Hit@{:d}: {:.4f} | NDCG@{:d}: {:.4f}'.format(k, hit_rate[i], k, ndcg[i]), file=ofile)
ofile.write('==================================\n\n')
ofile.close()
| bbjy/DAN | src/main_version.py | main_version.py | py | 16,761 | python | en | code | 1 | github-code | 13 |
20747633887 | #!/usr/bin/env python3
###################################################################################
# This script will detect unecessary includes in header files. #
###################################################################################
import os
import sys
import subprocess
with open("CheckIncludesIntermediate.txt", "w") as f:
subprocess.call([
sys.executable,
'C:/Python38/Scripts/cppclean', #cppclean
'./Source',
'--include-path=Source',
'--include-path=cocos2d',
'--include-path=cocos2d/cocos',
'--include-path=proj.win32',
'--include-path=proj.linux',
'--include-path=proj.mac'
], stdout=f)
ignored_words = ['Squally.dir', 'cocos2d.dir', 'spriter2dx.dir', ' static data ', 'precheader', 'asmjit', 'asmtk', 'libudis86']
with open("CheckIncludesIntermediate.txt", "r") as input, open("CheckIncludes.txt", "w") as output:
for line in input:
if not any(ignored_word in line for ignored_word in ignored_words):
output.write(line)
os.remove("CheckIncludesIntermediate.txt")
| 0xff7/Squally | CheckIncludes.py | CheckIncludes.py | py | 1,131 | python | en | code | null | github-code | 13 |
15302546648 | import os
def create_dir_if_exists(dir_path):
if not os.path.exists(dir_path):
os.makedirs(dir_path)
def download_data():
filenames = ["train.csv", "test.csv", "enhanced_train.csv"]
download_url = "https://rhdzmota-cloud-storage.herokuapp.com/temporal-link/dropbox-files?file_name={}&file_path={}"
for file in filenames:
if os.path.isfile("data/" + file):
continue
r = requests.get(download_url.format(file, "/kaggle/nyc-taxi"))
f = requests.get(r.json().get("url"))
with open("data/"+file, "wb") as code:
code.write(f.content)
if __name__ == '__main__':
create_dir_if_exists('data')
create_dir_if_exists('output')
download_data()
| RHDZMOTA/PAP-ML-17 | HW02/setup.py | setup.py | py | 727 | python | en | code | 0 | github-code | 13 |
20102402314 | from ursina import *
app = Ursina()
me = Animation('assets\player',collider ='box',y=1,)
Sky()
camera.orthographic = True
camera.fov = 20
Entity(
model = 'quad',
texture = 'assets\BG',
scale=50,z=5
)
def update():
me.y += held_keys['w']*6*time.dt
me.y += held_keys['s']*6*time.dt
app.run() | salihslx/Games | game1.py | game1.py | py | 324 | python | en | code | 0 | github-code | 13 |
19856224373 | from zipfile import ZipFile
from io import BytesIO
from openpyxl.xml.constants import (
ARC_CORE,
ARC_WORKBOOK,
ARC_STYLE,
ARC_THEME,
SHARED_STRINGS,
EXTERNAL_LINK,
)
from openpyxl.workbook.properties import DocumentProperties, read_properties
from openpyxl.workbook.names.external import detect_external_links
from openpyxl.workbook.names.named_range import read_named_ranges
from openpyxl.reader.strings import read_string_table
from openpyxl.reader.style import read_style_table
from openpyxl.reader.workbook import (
read_content_types,
read_excel_base_date,
detect_worksheets,
read_rels,
read_workbook_code_name,
)
from openpyxl.reader.worksheet import read_worksheet
from openpyxl.reader.comments import read_comments, get_comments_file
from patch_reader.drawings import read_drawings, get_drawings_file
def _load_workbook(wb, archive, filename, read_only, keep_vba):
valid_files = archive.namelist()
# If are going to preserve the vba then attach a copy of the archive to the
# workbook so that is available for the save.
if keep_vba:
try:
f = open(filename, 'rb')
s = f.read()
f.close()
except:
pos = filename.tell()
filename.seek(0)
s = filename.read()
filename.seek(pos)
wb.vba_archive = ZipFile(BytesIO(s), 'r')
if read_only:
wb._archive = ZipFile(filename)
# get workbook-level information
try:
wb.properties = read_properties(archive.read(ARC_CORE))
except KeyError:
wb.properties = DocumentProperties()
wb._read_workbook_settings(archive.read(ARC_WORKBOOK))
# what content types do we have?
cts = dict(read_content_types(archive))
rels = dict
strings_path = cts.get(SHARED_STRINGS)
if strings_path is not None:
if strings_path.startswith("/"):
strings_path = strings_path[1:]
shared_strings = read_string_table(archive.read(strings_path))
else:
shared_strings = []
try:
wb.loaded_theme = archive.read(ARC_THEME) # some writers don't output a theme, live with it (fixes #160)
except KeyError:
assert wb.loaded_theme == None, "even though the theme information is missing there is a theme object ?"
style_table, color_index, cond_styles = read_style_table(archive.read(ARC_STYLE))
wb.shared_styles = style_table
wb.style_properties = {'dxf_list':cond_styles}
wb.cond_styles = cond_styles
wb.properties.excel_base_date = read_excel_base_date(xml_source=archive.read(ARC_WORKBOOK))
# get worksheets
wb.worksheets = [] # remove preset worksheet
for sheet in detect_worksheets(archive):
sheet_name = sheet['title']
worksheet_path = sheet['path']
if not worksheet_path in valid_files:
continue
if not read_only:
new_ws = read_worksheet(archive.read(worksheet_path), wb,
sheet_name, shared_strings, style_table,
color_index=color_index,
keep_vba=keep_vba)
else:
new_ws = read_worksheet(None, wb, sheet_name, shared_strings,
style_table,
color_index=color_index,
worksheet_path=worksheet_path)
new_ws.sheet_state = sheet.get('state') or 'visible'
wb._add_sheet(new_ws)
if not read_only:
# load comments into the worksheet cells
comments_file = get_comments_file(worksheet_path, archive, valid_files)
if comments_file is not None:
read_comments(new_ws, archive.read(comments_file))
drawings_file = get_drawings_file(worksheet_path, archive, valid_files)
if drawings_file is not None:
read_drawings(new_ws, drawings_file, archive, valid_files)
wb._named_ranges = list(read_named_ranges(archive.read(ARC_WORKBOOK), wb))
wb.code_name = read_workbook_code_name(archive.read(ARC_WORKBOOK))
if EXTERNAL_LINK in cts:
rels = read_rels(archive)
wb._external_links = list(detect_external_links(rels, archive))
| jchuahtacc/openpyxl-imagereader-patch | patch_reader/excel.py | excel.py | py | 4,287 | python | en | code | 1 | github-code | 13 |
10255536366 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 9 19:06:48 2020
@author: sarroutim2
"""
import torch.nn as nn
import torch
from .base_rnn import BaseRNN
from torch.autograd import Variable
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
class Classifier(nn.Module):
#define all the layers used in model
def __init__(self, vocab_size, embedding_dim, embedding, hidden_dim, output_dim, num_layers,
bidirectional, dropout,rnn_cell='LSTM'):
super().__init__()
self.embedding = nn.Embedding(vocab_size, embedding_dim, padding_idx=0)
if self.embedding is not None:
self.embedding.weight=nn.Parameter(self.embedding.weight, requires_grad=True)
self.lstm = nn.LSTM(embedding_dim, hidden_dim, batch_first=True)
self.linear = nn.Linear(hidden_dim, 5)
self.dropout = nn.Dropout(0.2)
#activation function
self.act = nn.Softmax()
def forward(self, x, l):
x = self.embedding(x)
x = self.dropout(x)
x_pack = pack_padded_sequence(x, l, batch_first=True)
lstm_out, (ht, ct) = self.lstm(x_pack)
dense_outputs=self.linear(ht[-1])
#Final activation function
outputs=self.act(dense_outputs)
return outputs | sarrouti/multi-class-text-classification-pytorch | models/classifier.py | classifier.py | py | 1,337 | python | en | code | 3 | github-code | 13 |
16132904003 | #!/usr/bin/env python3
from jinja2 import Template
# We can use raw, endraw markers to escape
# Jinja delimiters.
data = """
{% raw %}
His name is {{ name }}
{% endraw %}
"""
tm = Template(data)
msg = tm.render(name="Peter")
print(msg)
| udhayprakash/PythonMaterial | python3/jinja_templating/e_jinja_rawdata.py | e_jinja_rawdata.py | py | 240 | python | en | code | 7 | github-code | 13 |
74009936016 | import os
from unittest import TestCase
from models import db, User, Message, Follows
# BEFORE we import our app, let's set an environmental variable
# to use a different database for tests (we need to do this
# before we import our app, since that will have already
# connected to the database
os.environ['DATABASE_URL'] = "postgresql://postgres:postgres@localhost:5433/warbler-test"
# Now we can import app
from app import app
# Create our tables (we do this here, so we only create the tables
# once for all tests --- in each test, we'll delete the data
# and create fresh new clean test data
db.drop_all()
db.create_all()
class MessageModelTestCase(TestCase):
"""Test views for messages."""
def setUp(self):
"""Create test client, add sample data."""
User.query.delete()
Message.query.delete()
Follows.query.delete()
self.user = User.signup(
username='testuser',
email='test@user.com',
password='password',
image_url=None
)
db.session.add(self.user)
db.session.commit()
self.client = app.test_client()
def test_message_model(self):
"""Does basic model work?"""
m = Message(
text="This is a test message",
timestamp=None,
user_id=self.user.id
)
db.session.add(m)
db.session.commit()
self.assertEqual(m.user, self.user)
| tylerfelsted/warbler | test_message_model.py | test_message_model.py | py | 1,531 | python | en | code | 0 | github-code | 13 |
38138107286 | import requests
from selenium import webdriver
from selenium.webdriver import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.options import Options
import time
url = 'https://www.realtor.com/realestateagents/catherine-qian_los-altos_ca_549162_150199108'
url = 'https://www.realtor.com/realestateagents/ben-caballero_addison_tx_560046_970894102'
options = Options()
# options.add_argument("--window-size=1920,1080")
# options.add_argument("--disable-extensions")
# options.add_argument("--proxy-server='direct://'")
# options.add_argument("--proxy-bypass-list=*")
options.add_argument("--start-maximized")
# options.add_argument('--headless')
# options.add_argument('--disable-gpu')
# options.add_argument('--disable-dev-shm-usage')
# options.add_argument('--no-sandbox')
# options.add_argument('--ignore-certificate-errors')
driver = webdriver.Chrome(options=options)
# driver = webdriver.Chrome()
driver.get(url)
#listings arrow down
WebDriverWait(driver, 25).until(
EC.presence_of_element_located((By.XPATH, '//*[@id="review"]/div[2]/i')))
driver.find_element_by_xpath('//*[@id="review"]/div[2]/i').click()
time.sleep(2)
count_onmarket = 4
while(True):
try:
driver.find_element_by_xpath('//*[@id="collapseOne5"]/div[2]/div/div[3]/p').click()
time.sleep(2)
count_onmarket += 4
print("cliked on new onmarket")
if(count_onmarket>=8):
break
except:
break
print("count_onmarket: "+str(count_onmarket))
#on market
onmarket_url_list=[]
for i in range(1,count_onmarket+1):
try:
driver.find_element_by_xpath('//*[@id="collapseOne5"]/div[2]/div/div[2]/div['+str(i)+']/div/div[1]/div').click()
time.sleep(2)
driver.switch_to.window(driver.window_handles[1])
onmarket_url_list.append(driver.current_url)
driver.close()
driver.switch_to.window(driver.window_handles[0])
time.sleep(1)
except:
break
print(onmarket_url_list)
count_offmarket = 4
while(True):
try:
driver.find_element_by_xpath('//*[@id="collapseOne5"]/div[3]/div/div[3]/p').click()
time.sleep(2)
count_offmarket += 4
print("cliked on new onmarket")
if(count_offmarket>=8):
break
except:
break
print("count_offmarket: "+str(count_offmarket))
#off market
offmarket_url_list=[]
for i in range(1,count_offmarket+1):
try:
driver.find_element_by_xpath('//*[@id="collapseOne5"]/div[3]/div/div[2]/div['+str(i)+']/div/div[1]/div').click()
time.sleep(2)
driver.switch_to.window(driver.window_handles[1])
offmarket_url_list.append(driver.current_url)
driver.close()
driver.switch_to.window(driver.window_handles[0])
time.sleep(1)
except:
break
print(offmarket_url_list)
| manojnilanga/scrape_realstate | check_realstate.py | check_realstate.py | py | 3,129 | python | en | code | 0 | github-code | 13 |
37933917452 | import random
class Healthy():
def __init__(self, row, col, title='0'):
self.row = row
self.col = col
self.title = title
class Sick(Healthy):
def __init__(self, row, col, IR, MR, title='I'):
super().__init__(row, col, title)
self.title = title
self.IR = IR
self.MR = MR
def infect(self, visual):
try:
if random.randint(0, 100) < self.IR and visual[self.row - 1][self.col - 1].title == '0':
visual[self.row - 1][self.col - 1] = Sick(self.row - 1, self.col - 1, self.IR, self.MR)
if random.randint(0, 100) < self.IR and visual[self.row - 1][self.col].title == '0':
visual[self.row - 1][self.col] = Sick(self.row - 1, self.col, self.IR, self.MR)
if random.randint(0, 100) < self.IR and visual[self.row - 1][self.col + 1].title == '0':
visual[self.row - 1][self.col + 1] = Sick(self.row - 1, self.col + 1, self.IR, self.MR)
if random.randint(0, 100) < self.IR and visual[self.row][self.col - 1].title == '0':
visual[self.row][self.col - 1] = Sick(self.row, self.col - 1, self.IR, self.MR)
if random.randint(0, 100) < self.IR and visual[self.row][self.col + 1].title == '0':
visual[self.row][self.col + 1] = Sick(self.row, self.col + 1, self.IR, self.MR)
if random.randint(0, 100) < self.IR and visual[self.row + 1][self.col - 1].title == '0':
visual[self.row + 1][self.col - 1] = Sick(self.row + 1, self.col - 1, self.IR, self.MR)
if random.randint(0, 100) < self.IR and visual[self.row + 1][self.col].title == '0':
visual[self.row + 1][self.col] = Sick(self.row + 1, self.col, self.IR, self.MR)
if random.randint(0, 100) < self.IR and visual[self.row + 1][self.col + 1].title == '0':
visual[self.row + 1][self.col + 1] = Sick(self.row + 1, self.col + 1, self.IR, self.MR)
if random.randint(0, 100) < self.MR:
visual[self.row][self.col] = Dead(self.row, self.col, self.IR)
except IndexError:
pass
class Dead(Sick):
def __init__(self, row, col, IF, title='X'):
super().__init__(row, col, IF, title)
self.IR = 0
self.title = title
class Simmulation():
def __init__(self, size, visual=[]):
self.size = size
self.visual = visual
def generateVisual(self, IR, MR):
col = []
r = 0
annoying = 1
for pos, val in enumerate(range(self.size)):
if random.randint(0, 100) < IR:
col.append(Sick(len(self.visual),
len(col),
IR, MR))
else:
col.append(Healthy(len(self.visual),
len(col)))
if annoying % 100 == 0:
self.visual.append(col)
col = []
r += 1
annoying += 1
self.visual.append(col)
def showVisual(self):
for i in self.visual:
for j in i:
print(j.title, end='')
print()
def dayPass(self):
for i in self.visual:
for j in i:
if j.title == 'I':
j.infect(self.visual)
print("Welcome to the epidemic spread map")
size = int(input("What would you like the population size to be: "))
IR = int(input("What presentage is the infection rate: "))
MR = int(input("What percentage is the mortality rate: "))
sim = Simmulation(size)
sim.generateVisual(IR, MR)
sim.showVisual()
day = 1
while True:
print("AFTER " + str(day) + " DAYS")
input("Press any key to cycle another day")
sim.dayPass()
sim.showVisual()
day += 1
| RiskyClick/40Challenges | EpidemicOutbreakTerminal.py | EpidemicOutbreakTerminal.py | py | 3,803 | python | en | code | 0 | github-code | 13 |
42628693650 | """
Forms for reservation:
* Creation
* Update
* Delete
"""
import datetime
from django import forms
from roomalloc.models import Profile, Reservation
from roomalloc.util import validation
time_start_help_text = "\
<ul> \
<li> Minute should be 30 or 0 </li>\
<li> Second should be 0 </li>\
<li> Date should be in future </li>\
</ul>"
time_end_help_text = "\
<ul> \
<li> Minute should be 30 or 0 </li>\
<li> Second should be 0 </li>\
<li> Date should be in future </li>\
<li> Occur after time_start </li>\
<li> In 1 day with time_start </li>\
</ul>"
amount_help_text = "\
<ul> \
<li> Estimated amount of people </li>\
<li> Greater than 0 </li>\
<li> Fit within room max-capacity </li>\
</ul>"
class ReserveCreationForm(forms.ModelForm):
"""
Reserve Creation Form
"""
# instance variable: Room model
room = None
user = None
def __init__(self, *args, **kwargs):
# get key
if 'room' in kwargs:
self.room = kwargs.pop('room')
if 'user' in kwargs:
self.user = kwargs.pop('user')
# run super.init
super(ReserveCreationForm, self).__init__(*args, **kwargs)
self.fields['amount'].validators = \
[validation.validate_room_amount(self.room),]
self.fields['time_start'].validators = \
[validation.validate_time_round_up(self.user, self.room),]
self.fields['time_end'].validators = \
[validation.validate_time_round_up(self.user, self.room),]
# time_start
time_start = forms.DateTimeField(
help_text = time_start_help_text,
widget = forms.DateTimeInput(
attrs = {'class' : 'form-control datetime-input'}
),
)
# time_end
time_end = forms.DateTimeField(
help_text = time_end_help_text,
widget = forms.DateTimeInput(
attrs = {'class' : 'form-control datetime-input'}
),
)
# amount
amount = forms.IntegerField(
help_text = amount_help_text,
widget = forms.NumberInput(
attrs = {'class' : 'form-control'}
)
)
# reason
reason = forms.CharField(
help_text = "Short line of reason",
widget = forms.TextInput(
attrs = {'class' : 'form-control'}
)
)
class Meta:
model = Reservation
fields = ('time_start', 'time_end', 'amount', 'reason')
def clean_time_end(self):
"""
Auto-run
check: time_end > time_start
check: time_end, time_start in 1 day
check: overlapping with others
"""
# get time submit
time_start = self.cleaned_data.get("time_start")
time_end = self.cleaned_data.get("time_end")
# get room reservations in date submit
# cur res should not overlapped with other reservations
room_res = Reservation.objects.filter(
time_start__date=time_start.date(),
room=self.room
)
# error_lit
errors = []
# check time_end > time_start
if (time_start > time_end):
errors.append(forms.ValidationError(
"time_start greater than time_end",
code="error_time_start_greater"
))
# check time_start, time_end on same day
if (time_start.date() != time_end.date()):
errors.append(forms.ValidationError(
"time_start, time_end on different day",
code="error_diff_day"
))
# check time_end <= time_start + 3h
if (time_end > time_start + datetime.timedelta(hours=3)):
errors.append(forms.ValidationError(
"Event should less than 3 hours",
code="error_too_long"
))
# check overlapping, 2d collision :'(
for res in room_res:
if (time_start < res.time_end
and time_end > res.time_start):
errors.append(forms.ValidationError(
"Overlapping reservations",
code="error_overlap"
))
break
# total errors
if (len(errors) > 0):
raise forms.ValidationError(errors)
return time_end | cnguyenm/RoomAlloc | roomalloc/form/reservation.py | reservation.py | py | 4,515 | python | en | code | 0 | github-code | 13 |
70476114258 | t=int(input())
for v in range(t):
binary = input()
length = len(binary)
if binary == '1'*length:
print('0'*length)
continue
number = int(binary,2)
number+=1
res=bin(number).replace('0b', '')
while len(res)<length:
res='0'+res
print(res) | baquyptit2001/ctdl-gt | nhi_phan_ke_tiep.py | nhi_phan_ke_tiep.py | py | 293 | python | en | code | 0 | github-code | 13 |
30612183888 | import numpy as np
import pandas as pd
class QLearningAgents:
def __init__(self, n_agents, action_space, gamma=0.0):
self.gamma = gamma
self.n_agents = n_agents
self.agents = [QLearningTable(action_space, gamma=self.gamma) for _ in range(self.n_agents)]
def select_action(self, obs):
action = [self.agents[i].choose_action(obs) for i in range(self.n_agents)]
return np.array(action)
def learn(self, s, a, r, s_):
if self.n_agents == 1:
self.agents[0].learn(s, a, r, s_)
else:
for i in range(self.n_agents):
self.agents[i].learn(s, a[i], r[i], s_)
class QLearningTable:
def __init__(self, actions, learning_rate=0.01, gamma=0.0, e_greedy=0.9):
self.actions = actions # a list
self.lr = learning_rate
self.gamma = gamma
self.epsilon = e_greedy
self.n_steps = 0
self.q_table = pd.DataFrame(columns=self.actions, dtype=np.float64)
def choose_action(self, observation):
self.check_state_exist(observation)
self.n_steps += 1
if self.epsilon > 0.1:
self.epsilon = 0.9993 ** self.n_steps
else:
self.epsilon = 0.05
if np.random.uniform() > self.epsilon:
# choose best action
state_action = self.q_table.loc[observation, :]
# some actions may have the same value, randomly choose on in these actions
action = np.random.choice(state_action[state_action == np.max(state_action)].index)
else:
# choose random action
action = np.random.choice(self.actions)
return action
def learn(self, s, a, r, s_):
self.check_state_exist(s_)
q_predict = self.q_table.loc[s, a]
if s_ != 'terminal':
q_target = r + self.gamma * self.q_table.loc[s_, :].max() # next state is not terminal
else:
q_target = r # next state is terminal
self.q_table.loc[s, a] += self.lr * (q_target - q_predict) # update
def check_state_exist(self, state):
if state not in self.q_table.index:
# append new state to q table
self.q_table = self.q_table.append(
pd.Series(
[0]*len(self.actions),
index=self.q_table.columns,
name=state,
)
)
| liangyancang/agent-based-modeling-in-electricity-market-using-DDPG-algorithm | algorithm/QLearning.py | QLearning.py | py | 2,423 | python | en | code | 20 | github-code | 13 |
3726597100 | import functools
import numpy as np
from garage.experiment import deterministic
from garage.sampler import DefaultWorker
from iod.utils import get_np_concat_obs
class OptionWorker(DefaultWorker):
def __init__(
self,
*, # Require passing by keyword, since everything's an int.
seed,
max_path_length,
worker_number,
sampler_key):
super().__init__(seed=seed,
max_path_length=max_path_length,
worker_number=worker_number)
self._sampler_key = sampler_key
self._max_path_length_override = None
self._cur_extras = None
self._cur_extra_idx = None
self._cur_extra_keys = set()
self._render = False
self._deterministic_initial_state = None
self._deterministic_policy = None
def update_env(self, env_update):
if env_update is not None:
if isinstance(env_update, dict):
for k, v in env_update.items():
setattr(self.env, k, v)
else:
super().update_env(env_update)
def worker_init(self):
"""Initialize a worker."""
if self._seed is not None:
deterministic.set_seed(self._seed + self._worker_number * 10000)
def update_worker(self, worker_update):
if worker_update is not None:
if isinstance(worker_update, dict):
for k, v in worker_update.items():
setattr(self, k, v)
if k == '_cur_extras':
if v is None:
self._cur_extra_keys = set()
else:
if len(self._cur_extras) > 0:
self._cur_extra_keys = set(self._cur_extras[0].keys())
else:
self._cur_extra_keys = None
else:
raise TypeError('Unknown worker update type.')
def get_attrs(self, keys):
attr_dict = {}
for key in keys:
attr_dict[key] = functools.reduce(getattr, [self] + key.split('.'))
return attr_dict
def start_rollout(self):
"""Begin a new rollout."""
if 'goal' in self._cur_extra_keys:
goal = self._cur_extras[self._cur_extra_idx]['goal']
reset_kwargs = {'goal': goal}
else:
reset_kwargs = {}
env = self.env
while hasattr(env, 'env'):
env = getattr(env, 'env')
if hasattr(env, 'fixed_initial_state') and self._deterministic_initial_state is not None:
env.fixed_initial_state = self._deterministic_initial_state
self._path_length = 0
self._prev_obs = self.env.reset(**reset_kwargs)
self._prev_extra = None # last option/xi
if 'lstm_xi' in self._cur_extra_keys:
hidden, cell_state = self._cur_extras[self._cur_extra_idx]['lstm_xi']
self.agent.reset(hidden_cell_state_tuples=(hidden[:, None, :], cell_state[:, None, :]))
else:
self.agent.reset()
def step_rollout(self):
"""Take a single time-step in the current rollout.
Returns:
bool: True iff the path is done, either due to the environment
indicating termination of due to reaching `max_path_length`.
"""
cur_max_path_length = self._max_path_length if self._max_path_length_override is None else self._max_path_length_override
if self._path_length < cur_max_path_length:
if 'option' in self._cur_extra_keys:
cur_extra_key = 'option'
elif 'step_xi' in self._cur_extra_keys:
cur_extra_key = 'step_xi'
else:
cur_extra_key = None
if cur_extra_key is None:
agent_input = self._prev_obs
else:
if isinstance(self._cur_extras[self._cur_extra_idx][cur_extra_key], list):
cur_extra = self._cur_extras[self._cur_extra_idx][cur_extra_key][self._path_length]
if cur_extra is None:
# Reuse previous option/xi
cur_extra = self._prev_extra
self._cur_extras[self._cur_extra_idx][cur_extra_key][self._path_length] = cur_extra
else:
cur_extra = self._cur_extras[self._cur_extra_idx][cur_extra_key]
agent_input = get_np_concat_obs(
self._prev_obs, cur_extra,
)
self._prev_extra = cur_extra
if self._deterministic_policy is not None:
self.agent._force_use_mode_actions = self._deterministic_policy
a, agent_info = self.agent.get_action(agent_input)
if self._render:
next_o, r, d, env_info = self.env.step(a, render=self._render)
else:
next_o, r, d, env_info = self.env.step(a)
self._observations.append(self._prev_obs)
self._rewards.append(r)
self._actions.append(a)
for k, v in agent_info.items():
self._agent_infos[k].append(v)
for k in self._cur_extra_keys:
if isinstance(self._cur_extras[self._cur_extra_idx][k], list):
self._agent_infos[k].append(self._cur_extras[self._cur_extra_idx][k][self._path_length])
else:
self._agent_infos[k].append(self._cur_extras[self._cur_extra_idx][k])
for k, v in env_info.items():
self._env_infos[k].append(v)
self._path_length += 1
self._terminals.append(d)
if not d:
self._prev_obs = next_o
return False
self._terminals[-1] = True # Set the last done to True
self._lengths.append(self._path_length)
self._last_observations.append(self._prev_obs)
return True
def rollout(self):
"""Sample a single rollout of the agent in the environment.
Returns:
garage.TrajectoryBatch: The collected trajectory.
"""
if self._cur_extras is not None:
self._cur_extra_idx += 1
self.start_rollout()
while not self.step_rollout():
pass
return self.collect_rollout()
| jaekyeom/IBOL | garagei/sampler/option_worker.py | option_worker.py | py | 6,427 | python | en | code | 28 | github-code | 13 |
6680862619 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gdmapstool', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='gdmap',
name='scenarios',
field=models.ManyToManyField(to='flooding_lib.Scenario', blank=True),
),
]
| nens/flooding-public | flooding_lib/tools/gdmapstool/migrations/0002_auto_20200928_1652.py | 0002_auto_20200928_1652.py | py | 427 | python | en | code | 0 | github-code | 13 |
17703721349 |
from django.core.management.base import BaseCommand, CommandError
from apps.orders.models import Order, OrderItem
from apps.shipments.models import ShipmentLog, Shipment
from django.utils import timezone
import requests
import json
import datetime
class Command(BaseCommand):
def handle(self, *args, **options):
order_id = input("Input order id: ")
try:
int(order_id)
except ValueError:
print("Wrong order id type")
else:
check_order = Order.objects.filter(pk=order_id).first()
if not check_order:
print("Order is not exist")
else:
params = {
"invoice_id": Shipment.objects.values_list('invoice_id', flat=True).get(public_id=order_id),
"delivery_address": Shipment.objects.values_list('delivery_address', flat=True).get(
public_id=order_id),
"user_name": Order.objects.values_list('user', flat=True).get(
public_id=order_id),
"post_branch": Shipment.objects.values_list('delivery_address', flat=True).get(public_id=order_id),
"status_change_date": str(datetime.datetime.now()),
"shipment_status": Shipment.objects.values_list('shipment_status', flat=True).get(
public_id=order_id)
}
response = requests.post('http://localhost:8000/delivery_api/', json=params, headers={'format': 'json'}).text
delivery_api_log = ShipmentLog()
delivery_api_log.send_date = timezone.now()
delivery_api_log.log_field = response
delivery_api_log.request = params
if response:
delivery_api_log.is_processed = True
else:
delivery_api_log.is_processed = False
delivery_api_log.save()
print(response)
| oshevelo/sep_py_shop | FirstShop/apps/shipments/management/commands/create.py | create.py | py | 1,915 | python | en | code | 0 | github-code | 13 |
21498501659 | ############HELPER CLASS AND FUNCTIONS######################
#Class that defines an operation (i.e one line in the input)
class Operation:
def __init__(self,line,w=0,x=0,y=0,z=0):
self.x=x
self.w=w
self.y=y
self.z=z
op_parts = line.split(" ")
self.operand = op_parts[0]
self.arg1 = op_parts[1]
self.arg2 = op_parts[2].strip()
def set_variables(self,w,x,y,z):
self.w=w
self.x=x
self.y=y
self.z=z
def get_op_result(self):
if self.operand=="mul":
return self.format_result(self.arg1,self.str_to_var(self.arg1)*self.str_to_var(self.arg2))
elif self.operand=="add":
return self.format_result(self.arg1,self.str_to_var(self.arg1)+self.str_to_var(self.arg2))
elif self.operand=="div":
return self.format_result(self.arg1,self.str_to_var(self.arg1)//self.str_to_var(self.arg2))
elif self.operand=="mod":
return self.format_result(self.arg1,self.str_to_var(self.arg1)%self.str_to_var(self.arg2))
elif self.operand=="eql":
return self.format_result(self.arg1,1 if self.str_to_var(self.arg1)==self.str_to_var(self.arg2) else 0)
def format_result(self,arg,value):
if arg=="w":
return value,self.x,self.y,self.z
elif arg=="x":
return self.w,value,self.y,self.z
elif arg=="y":
return self.w,self.x,value,self.z
elif arg=="z":
return self.w,self.x,self.y,value
def str_to_var(self,arg):
if arg=="x":
return self.x
elif arg=="w":
return self.w
elif arg=="y":
return self.y
elif arg=="z":
return self.z
else:
return int(arg)
#Execute one block of operations (there are 14 in the )
def execute_operations_one_block(op_block,w,x,y,z):
op_block[0].set_variables(w,x,y,z)
new_w,new_x,new_y,new_z=op_block[0].get_op_result()
for i in range(1,len(op_block)):
op_block[i].set_variables(new_w,new_x,new_y,new_z)
new_w,new_x,new_y,new_z=op_block[i].get_op_result()
return new_x,new_y,new_z
#How the complete Alu would be executed (not used though)
def execute_alu(operations,w_array):
new_x,new_y,new_z=0,0,0
for i in range(len(w_array)):
new_x,new_y,new_z=execute_operations_one_block(operations[i],w_array[i],new_x,new_y,new_z)
return new_z
##############MAIN PART###########################
#Read the input
operations=[] #2d array, 13xY, 13 is the size of the input, Y the number of operations done per number
with open("input_day_24","r") as fp:
lines = fp.readlines()
current_operation=[]
for i,line in enumerate(lines):
if line.startswith("inp"):
if i!=0:
operations.append(current_operation)
current_operation=[]
else:
current_operation.append(Operation(line))
operations.append(current_operation)
#Do the alu in order.
#We see that in order to be 0 at the end, z needs to be smaller than 26 in the last operation block
#Z can only become smaller when dividing it by 26, otherwise it will become greater or equal.
#Thus, z values need to be smaller than 26**(14-i-1) for block i
#In practice, we found that z stays always under 100'000. (Otherwise the programm takes around 30 seconds, like this only 3-4 seconds)
nr_ops=14
z_upper_bound=100000
possibilities=set([(0,0,0)])
poss=set() #(turn,w,res_x,res_y,res_z,inp_x,inp_y,inp_z)
for i in range(nr_ops):
print(i)
new_possibilities=set()
for w in range(1,10):
for x,y,z in possibilities:
next_x,next_y,next_z = execute_operations_one_block(operations[i],w,x,y,z)
if next_z<z_upper_bound: #This should be 26**(14-i-1)
new_possibilities.add((next_x,next_y,next_z))
poss.add((i,w,next_x,next_y,next_z,x,y,z))
possibilities=set(new_possibilities)
#Arrange the possibilities in a usable form (per turn)
poss_per_turn=dict()
for i in range(nr_ops):
for a in poss:
if a[0]==i:
if i in poss_per_turn:
poss_per_turn[i].append((a[1],a[2],a[3],a[4],a[5],a[6],a[7]))
else:
poss_per_turn[i] = [(a[1],a[2],a[3],a[4],a[5],a[6],a[7])]
#Take only the possibilities per turn that will lead to a z=0 after the last op
valid_poss_per_turn=dict()
valid_last=[a for a in poss_per_turn[nr_ops-1] if a[3]==0]
for val in valid_last:
if nr_ops-1 in valid_poss_per_turn:
valid_poss_per_turn[nr_ops-1].append(val)
else:
valid_poss_per_turn[nr_ops-1] = [val]
for i in range(nr_ops-2,-1,-1):
for val in poss_per_turn[i]:
for val_last in valid_last:
if val[1]==val_last[4] and val[2]==val_last[5] and val[3]==val_last[6]:
if i in valid_poss_per_turn:
valid_poss_per_turn[i].append(val)
else:
valid_poss_per_turn[i] = [val]
break
valid_last=valid_poss_per_turn[i]
#Part 1:
numbers=[]
for i in range(nr_ops):
numbers.append(str(max([a[0] for a in valid_poss_per_turn[i]])))
print("Solution part 1:")
print("".join(numbers))
#Part 2:
numbers=[]
for i in range(nr_ops):
numbers.append(str(min([a[0] for a in valid_poss_per_turn[i]])))
print("Solution part 2:")
print("".join(numbers))
#Solution part 1:
#65984919997939
#Solution part 2:
#11211619541713
| shaefeli/AdventOfCode2021 | day24/day24.py | day24.py | py | 5,506 | python | en | code | 0 | github-code | 13 |
34300679615 | # Data processing workflow
import numpy as np
import pandas as pd
#import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
# Using the element Table in rdkit, the pt_dict out of the comment part is from the code below
# from rdkit import Chem
# pt = Chem.GetPeriodicTable()
# pt_dict = dict()
# for i in range(1,109):
# pt_dict[i] = pt.GetElementSymbol(i)
pt_dict = {1: 'H',
2: 'He',
3: 'Li',
4: 'Be',
5: 'B',
6: 'C',
7: 'N',
8: 'O',
9: 'F',
10: 'Ne',
11: 'Na',
12: 'Mg',
13: 'Al',
14: 'Si',
15: 'P',
16: 'S',
17: 'Cl',
18: 'Ar',
19: 'K',
20: 'Ca',
21: 'Sc',
22: 'Ti',
23: 'V',
24: 'Cr',
25: 'Mn',
26: 'Fe',
27: 'Co',
28: 'Ni',
29: 'Cu',
30: 'Zn',
31: 'Ga',
32: 'Ge',
33: 'As',
34: 'Se',
35: 'Br',
36: 'Kr',
37: 'Rb',
38: 'Sr',
39: 'Y',
40: 'Zr',
41: 'Nb',
42: 'Mo',
43: 'Tc',
44: 'Ru',
45: 'Rh',
46: 'Pd',
47: 'Ag',
48: 'Cd',
49: 'In',
50: 'Sn',
51: 'Sb',
52: 'Te',
53: 'I',
54: 'Xe',
55: 'Cs',
56: 'Ba',
57: 'La',
58: 'Ce',
59: 'Pr',
60: 'Nd',
61: 'Pm',
62: 'Sm',
63: 'Eu',
64: 'Gd',
65: 'Tb',
66: 'Dy',
67: 'Ho',
68: 'Er',
69: 'Tm',
70: 'Yb',
71: 'Lu',
72: 'Hf',
73: 'Ta',
74: 'W',
75: 'Re',
76: 'Os',
77: 'Ir',
78: 'Pt',
79: 'Au',
80: 'Hg',
81: 'Tl',
82: 'Pb',
83: 'Bi',
84: 'Po',
85: 'At',
86: 'Rn',
87: 'Fr',
88: 'Ra',
89: 'Ac',
90: 'Th',
91: 'Pa',
92: 'U',
93: 'Np',
94: 'Pu',
95: 'Am',
96: 'Cm',
97: 'Bk',
98: 'Cf',
99: 'Es',
100: 'Fm',
101: 'Md',
102: 'No',
103: 'Lr',
104: 'Rf',
105: 'Db',
106: 'Sg',
107: 'Bh',
108: 'Hs'}
def extract(cont,beginl,endl,num): #extract and process xyz and SCF energy information in IRC out
# cont::list/["line_1","line_2"..."line_x"] the content of file to be processedd
# beginl::int/ begin line of the target part
# endl::int/ end line of the targer part
# num::int/ The index of the irc structure to be processed
# Gaussian output args
geom_segl = "---------------------------------------------------------------------"
geom_begin = "Number Number Type X Y Z"
# Args define
raw_xyz = []
processed_xyz = []
scf = None
geom_flag = 0
idx = beginl
# detect the line of information (xyz and scf)
while idx<=endl:
temp = cont[idx].strip("\n").strip()
if geom_flag==0: # in front of xyz
if temp==geom_begin:
geom_flag = 1 #recieving
idx+=1
elif geom_flag==1: # in xyz
if temp!=geom_segl:
raw_xyz.append(temp)
else:
geom_flag=2
else: # behind xyz
if temp[0:8]=="SCF Done":
scf = float(temp.split()[4])
break
idx+=1
# process xyz
ele_id_lis = [] # record the order of the elements (written in numbers in out file)
for item in raw_xyz:
temp = item.split()
ele = pt_dict[int(temp[1])]
ele_id_lis.append(ele)
new_lis = [ele]+temp[3:]+["\n"]
processed_xyz.append(" ".join(new_lis))
processed_xyz = [str(len(raw_xyz))+"\n","IRC:"+str(num)+"\n"]+processed_xyz
# raw_xyz::list/ raw xyz information in out file
# processed_xyz::list/ xyz information in the formal .xyz style
# ele_id_lis::list/ elements with order of the molecule
return raw_xyz,processed_xyz,scf,ele_id_lis
def compute_internal(xyz_lis,args,numpy_flag=False): # compute internal arguments for a given xyz(numpy based)
# xyz_lis::list/ xyz information in formal .xyz style(or numpy format)
# args::list/[(arg1),(arg2)] bond args to be calculated
# numpy_flag::bool/ False:formal .xyz style; True:numpy format
res = []
for item in args:
if len(item)==2: # Bond Distance
if numpy_flag:
vec1 = xyz_lis[item[0]-1]
vec2 = xyz_lis[item[1]-1]
else:
new_xyzlis = xyz_lis[1:]
xyz1 = new_xyzlis[item[0]].split()[-3:]
xyz2 = new_xyzlis[item[1]].split()[-3:]
vec1 = np.array(list(map(float,xyz1)))
vec2 = np.array(list(map(float,xyz2)))
dist = np.linalg.norm(vec1-vec2)
res.append(dist)
elif len(item)==3: # Bond Angle
if numpy_flag:
vec1 = xyz_lis[item[0]-1]-xyz_lis[item[1]-1]
vec2 = xyz_lis[item[2]-1]-xyz_lis[item[1]-1]
else:
xyz1 = new_xyzlis[item[0]].split()[-3:]
xyz2 = new_xyzlis[item[1]].split()[-3:]
xyz3 = new_xyzlis[item[2]].split()[-3:]
vec1 = np.array(list(map(float,xyz1)))-np.array(list(map(float,xyz2)))
vec2 = np.array(list(map(float,xyz3)))-np.array(list(map(float,xyz2)))
angle = np.arccos(vec1.dot(vec2)/(np.linalg.norm(vec1) * np.linalg.norm(vec2)))/np.pi*180
res.append(angle)
elif len(item)==4: # Dihedral Angle
if numpy_flag:
vec1 = xyz_lis[item[0]-1]-xyz_lis[item[1]-1]
vec2 = xyz_lis[item[1]-1]-xyz_lis[item[2]-1]
vec3 = xyz_lis[item[3]-1]-xyz_lis[item[2]-1]
else:
xyz1 = new_xyzlis[item[0]].split()[-3:]
xyz2 = new_xyzlis[item[1]].split()[-3:]
xyz3 = new_xyzlis[item[2]].split()[-3:]
xyz4 = new_xyzlis[item[3]].split()[-3:]
vec1 = -np.array(list(map(float,xyz2)))+np.array(list(map(float,xyz1))) # 2->1
vec2 = -np.array(list(map(float,xyz2)))+np.array(list(map(float,xyz3))) # 2->3
vec3 = -np.array(list(map(float,xyz3)))+np.array(list(map(float,xyz4))) # 3->4
cross1 = np.cross(vec1,vec2)
cross2 = np.cross(-vec2,vec3)
cross3 = np.cross(vec1,vec3)
dihe = np.sign(cross3.dot(vec2))*np.arccos(cross1.dot(cross2)/(np.linalg.norm(cross1) * \
np.linalg.norm(cross2)))/np.pi*180
res.append(dihe)
else: # input error?
res.append(np.nan)
# information of the bond args in corresponding order
return res
def xyz2mat(xyz): # convert xyz information to numpy matrix
# xyz: formal .xyz; result: numpy.array
temp = list(map(lambda x:list(map(float,x.split()[-3:])),xyz[2:]))
result = np.array([l for l in temp])
return result
def irc_processor(filenames,irc_bond_arg=[],write_xyz=True,write_rescsv=True,\
special_file=False): #!!The core function: Generate res_DF using irc.out
# filenames::list/["str1","str2"...] the file(files) to be processed
# irc_bond_arg::list/[(arg1),(arg2)] The bond args to be monitored along IRC
# write_xyz::bool/ whether or not generate a frame-wise .xyz file of IRC
# write_rescsv::bool/ whether or not generate a .csv file containing all the information
# special_file::bool/
# it could work to run the program in a ugly way when there are more than 1 IRC file describing the irc process
# (I turned it to True when I did a debug)
# Gaussian 16 output file line arguments
irc_segl = "IRC-IRC-IRC-IRC-IRC-IRC-IRC-IRC-IRC-IRC-IRC-IRC-IRC-IRC-IRC-IRC-IRC-IRC"
# specify filename and do reading
assert type(filenames)==list
cont = []
basename = filenames[0].split("\\")[1].split(".")[0]
file_segl = -1
for filename in filenames: #!!!!!!!
assert filename.split(".")[-1].lower() in ["out","log"]
file = open(filename,"r")
file_segl = file_segl+1 if file_segl<0 else len(cont)
cont = cont+file.readlines()
file.close() #!!!!!!
# Read the irc geom segmentation position
count =0
seg_idx_list = []
for i in range(len(cont)):
cont[i] = cont[i].strip("\n").strip()
if cont[i]==irc_segl:
count+=1
seg_idx_list.append(i)
seg_idx_list=seg_idx_list[1:-1] # The first would be the redundant line
# create and fill a list: irc_idx_list=[[irc.No,[raw_xyz,processed_xyz,scf]]*n]
irc_idx_list = []
num=0
for i in range(len(seg_idx_list)):
idx = seg_idx_list[i]
if i>0: # The frame of ts is an exlusive frame
info_flag = True
for j in range(2,5):
try:
cont[idx-j].split()[0]
except:
continue
if cont[idx-j].split()[0]=="Point":
temp = cont[idx-j]
info_flag = False
break
if info_flag:
continue
flag = temp.split()[4]
try:
num = int(temp.split()[2]) if flag=="FORWARD" else -1*int(temp.split()[2])
except:
flag = temp.split()[3]
try: # abs(num)>=100
num = int(temp.split()[1][-3:]) if flag=="FORWARD" else -1*int(temp.split()[1][-3:])
except:
break
if special_file and idx<file_segl:
num = num/100
result = extract(cont,seg_idx_list[i],seg_idx_list[i+1],num=num)
irc_idx_list.append([num,result])
else:
result = extract(cont,seg_idx_list[i],seg_idx_list[i+1],num=0)
irc_idx_list.append([0,result])
irc_idx_list.sort(key=lambda x:x[0])
# write the output multi-frame xyz trajectory file
if write_xyz:
with open("./irc_analysis_out/output_"+basename+".xyz","w") as f:
for item in irc_idx_list:
f.writelines(item[1][1])
# Bond args processing
irc_arg_dict = dict()
for item in irc_idx_list:
irc_arg_dict[item[0]] = {"spe":float(item[1][2])}
arg_res = compute_internal(item[1][1],irc_bond_arg)
irc_arg_dict[item[0]]["xyz_mat"] = xyz2mat(item[1][1])
for i in range(len(irc_bond_arg)):
irc_arg_dict[item[0]][str(irc_bond_arg[i])] = arg_res[i]
# generate res_DF
res_DF = pd.DataFrame.from_dict(irc_arg_dict,orient = "index")
# convert res_df to csv file
if write_rescsv:
res_DF.to_csv("./irc_analysis_out/infoCSV_"+basename+".csv")
# res_DF::pandas.Dataframe/ result Dataframe
# irc_idx_list[0][1][-1]::list/ ele_id_lis
return res_DF,irc_idx_list[0][1][-1]
def cal_rmsd(coord_1, coord_2): # calculate RMSD between 2 geometry without alighnment
# coord_1::numpy.array/ coordinate(vector) No.1
# coord_2::numpy.array/ coordinate(vector) No.2
rmsd = np.sqrt(((coord_1 - coord_2) ** 2).mean()) ## this would be the formula
# rmsd:: float
return rmsd
def fitting(df,arg1,arg2,kind="cubic",axis=0): # fitting 2 arguments, axis and kind to be specified
# df::pandas.Dataframe/ The dataframe of data
# arg1::any/ target label1
# arg2::any/ target label2
# kind::any/ the key word of interp1d choosing the algorithm
# axis::int/ the key word of interp1d choosing the axis
if arg1.lower()=="index":
x = df.index
else:
x = np.array([l for l in df[arg1]])
if arg2.lower()=="index":
y = df.index
else:
y = np.array([l for l in df[arg2]])
# interp1d(x, y, kind = kind,axis=axis)::scipy.interpolate._interpolate.interp1d/ the function
return interp1d(x, y, kind = kind,axis=axis)
def arr_eval(lis,print_arr=False): # Describe a list quickly
# lis::list
# print_arr::bool/ whether or not print the whold lis
arr = np.array(lis)
# measures of dispersion
min = np.amin(arr)
max = np.amax(arr)
mean = np.average(arr)
range = np.ptp(arr)
variance = np.var(arr)
sd = np.std(arr)
if print_arr:
print("Array =", arr)
print("Measures of arr")
print("Arr Length =", len(lis))
print("Mean =", mean)
print("Minimum =", min)
print("Maximum =", max)
print("Range =", range)
print("Variance =", variance)
print("Standard Deviation =", sd)
def generate_xyz(frames,ele_id_lis,filename="generate_xyz.xyz",path_flag=True): #Generate a xyz file
# frames::pd.core.series.Series or list or numpy.array/ xyz information
# ele_id_lis::list/ elements with order of the molecule
# filename::str/ the filename of output file
# path_flag::bool/ specify the path
res = []
if type(frames)==pd.core.series.Series:
for i in range(len(frames)):
for j in range(len(frames[i])):
if j==0:
res.append(str(len(frames[i]))+"\n")
res.append("\n")
temp = list(map(str,list(frames[i][j])))
res.append(" ".join([ele_id_lis[j]]+temp)+"\n")
else: # list or numpy.array
for j in range(len(frames)):
if j==0:
res.append(str(len(frames))+"\n")
res.append("\n")
temp = list(map(str,list(frames[j])))
res.append(" ".join([ele_id_lis[j]]+temp)+"\n")
path = "./irc_analysis_out/" if path_flag else "./compute_inpout/"
with open(path+filename,"w") as f:
f.writelines(res)
return
def generate_gjf(frames,ele_id_lis,filename="generate_gjf",path_flag=True,suffix="",\
chg_spin="0 1 "): # generate gjf files for each frame
# frames::pd.core.series.Series or list or numpy.array/ xyz information
# ele_id_lis::list/ elements with order of the molecule
# filename::str/ the filename of output file
# path_flag::bool/ specify the path
# suffix::str/ is there a line needed after geometry part?(why should I define this?)
# chg_spin::str/ charge and spin information in .gjf
res = []
if type(frames)==pd.core.series.Series:
for i in range(len(frames)):
for j in range(len(frames[i])):
if j==0:
res.append("#! generated by scriipt\n")
res.append("\n")
res.append("Title\n")
res.append("\n")
res.append(chg_spin+"\n")
temp = list(map(str,list(frames[i][j])))
res.append(" ".join([ele_id_lis[j]]+temp)+"\n")
res.append("\n")
res.append(suffix+"\n")
res+=["\n"]*2
else:
for j in range(len(frames)):
if j==0:
res.append("#! generated by scriipt"+"\n")
res=res+ ["\n"]+["Title\n"]+["\n"]
res.append(chg_spin+"\n")
temp = list(map(str,list(frames[j])))
res.append(" ".join([ele_id_lis[j]]+temp)+"\n")
res.append("\n")
res.append(suffix+"\n")
res+=["\n"]*2
path = "./irc_analysis_out/" if path_flag else "./compute_inpout/"
with open(path+filename+".gjf","w") as f:
f.writelines(res)
return
def dicho_solve(func,limit,bond_arg,bond_id,ang=False,len_thresh=0.001,ang_thresh=0.1):
# use Bisection method to find corresponding xyz geom in fitted line
# func::scipy.interpolate._interpolate.interp1d/ the function
# limit:: [lower limit,upper limit]/ Define the range of finding solution
# bond_arg::float/ target value for bond arg value
# bond_id::tuple/ the investigated bond arg
# ang::bool/ whether or not we are talking about an angle
# len_thresh::float/ the threshold of finding solution for bond length
# ang_thresh::float/ the threshold of finding solution for angle
beg = limit[0]
end = limit[1]
if ang:
thresh = ang_thresh
else:
thresh = len_thresh
beg_value = compute_internal(func(beg),[bond_id],numpy_flag=True)[0]
end_value = compute_internal(func(end),[bond_id],numpy_flag=True)[0]
while abs(end_value-beg_value)>thresh:
beg_value = compute_internal(func(beg),[bond_id],numpy_flag=True)[0]
end_value = compute_internal(func(end),[bond_id],numpy_flag=True)[0]
gap = end-beg
mid = (end+beg)/2
mid_value = compute_internal(func(mid),[bond_id],numpy_flag=True)[0]
if mid_value>bond_arg:
if end_value>bond_arg:
end = mid
else:
beg = mid
else:
if end_value>bond_arg:
beg = mid
else:
end = mid
beg_value = compute_internal(func(beg),[bond_id],numpy_flag=True)[0]
end_value = compute_internal(func(end),[bond_id],numpy_flag=True)[0]
# result::numpy.array/ xyz matrix solution
if np.sign(bond_arg-beg_value) != np.sign(bond_arg-end_value):
result = beg
return result
else:
print("Optimization ERROR!!!!!!")
return None | Jingdan-Chen/IRC_helper | funarg.py | funarg.py | py | 16,444 | python | en | code | 0 | github-code | 13 |
44765823526 | import json
import xml.etree.ElementTree as ET
def readXML(xmlFile):
if xmlFile is None:
return None
else:
tree = ET.parse(xmlFile)
root = tree.getroot()
prefix = root.tag
# save all the variables in a dictionary
variables = root.attrib
# used to make the keys of the variables all different between each others
attributes = {}
for v in variables:
key = prefix + "_" + v
attributes[key] = variables[v]
if variables[v].count("[")>0:
#print "ho trovato una lista corrispondente a :", variables[v], "devo trasformarla"
variables[v] = variables[v].replace("[","").replace("]","")
#print variables[v]
splitted = variables[v].split(",")
for element in splitted:
#remove the old instance
splitted.remove(element)
#convert it without spaces
elem = element.strip()
#add the new element
splitted.append(elem)
attributes[key] = splitted
#print type(attributes[key]), attributes[key]
return attributes
def myLogic(tests, data=None):
# You've recursed to a primitive, stop!
if tests is None or type(tests) != dict:
return tests
#print type(tests), tests
data = data or {}
#print type(data), data
op = tests.keys()[0]
#print type(op), op
values = tests[op]
#print type(values), values
operations = {
"==" : (lambda a, b: a == b),
"!=" : (lambda a, b: a != b),
">" : (lambda a, b: a > b),
">=" : (lambda a, b: a >= b),
"<" : (lambda a, b, c=None: a < b if (c is None) else (a < b) and (b < c)),
"<=" : (lambda a, b, c=None: a <= b if (c is None) else (a <= b) and (b <= c)),
"!" : (lambda a: not a),
"and" : (lambda *args: reduce(lambda total, arg: total and arg, args, True)),
"or" : (lambda *args: reduce(lambda total, arg: total or arg, args, False)),
#funziona se passo due elementi ( and e or invece riescono a fare controllo su una serie di n elementi nello stesso gruppo)
"xor" : (lambda a, b: (a and (not b)) or ((not a) and b )),
#funziona se passo due elementi
"->" : (lambda a, b: (not a) or b),
#funziona se passo due elementi
"<->": (lambda a, b: ((a and b) or ((not a) and (not b)))),
#todo: da sistemare come funziona
"in" : (lambda a, b: a in b if "__contains__" in dir(b) else False),
#todo: tentativo per 'in'
"+++" : (lambda x: sorted(x,key=(lambda a, b: a + b))),
"var" : (lambda a, not_found=None:
reduce(lambda data, key: (data.get(key, not_found)
if type(data) == dict
else data[int(key)]
if (type(data) in [list, tuple] and
str(key).lstrip("-").isdigit())
else not_found),
str(a).split("."),
data)
),
#stessa funzione di "var", usato solo per distinguere tra variabili e regole semplic
#da usare come punto di partenza per tentativo ricorsione
"rule": (lambda a, not_found=None:
reduce(lambda data, key: (data.get(key, not_found)
if type(data) == dict
else data[int(key)]
if (type(data) in [list, tuple] and str(key).lstrip("-").isdigit())
else not_found),
str(a).split("."),
data)
),
"min" : (lambda *args: min(args)),
"max" : (lambda *args: max(args)),
"count": (lambda *args: sum(1 if a else 0 for a in args)),
}
#print type(operations), operations
if op not in operations:
raise RuntimeError("Unrecognized operation %s" % op)
# Easy syntax for unary operators, like {"var": "x"} instead of strict
# {"var": ["x"]}
if type(values) not in [list, tuple]:
values = [values]
# Recursion!
values = map(lambda val: myLogic(val, data), values)
return operations[op](*values)
def evaluateRules(rule_data_file, data_for_rules):
with open(rule_data_file) as json_rules:
json_closes = json.load(json_rules)
json_rules.close()
# first compute the simple rules (aka the one not composed by other rules)
rules_results = {}
for c in json_closes:
# print c, type(c)
if (c == "basic_rules"):
for rule in json_closes[c]:
rules_results[rule] = myLogic(json_closes[c][rule], data_for_rules)
#print "valori regole semplici: ", rules_results
# put together for simplicity the results of the rules and the data needed
mixed_data_dictionary = data_for_rules
mixed_data_dictionary.update(rules_results)
for c in json_closes:
if (c == "complex_rules"):
for rule in json_closes[c]:
rules_results[rule] = myLogic(json_closes[c][rule], mixed_data_dictionary)
#print "valore regola complessa :", rules_results[rule]
#print "insieme di tutti i risultati delle regole \n", rules_results
return rules_results
| paolocesa/c4aEnginePaolo | controller/mini_planner/Logica.py | Logica.py | py | 5,303 | python | en | code | 0 | github-code | 13 |
846669462 | # The `serializers.py` file is responsible for defining serialization
# and deserialization logic to convert complex data types, such as
# model instances, into JSON or other formats for API responses and vice versa.
# Create your serializers here.
from rest_framework import serializers
from apps.todo.models import TodoList
class TodoListSerializer(serializers.ModelSerializer):
class Meta:
model = TodoList
fields = ('id', 'title')
extra_kwargs = {
'id': {'read_only': True},
}
| MentorMate/mentormate-django-cookiecutter-template | {{cookiecutter.project_name}}/apps/todo/api/v1/serializers.py | serializers.py | py | 533 | python | en | code | 0 | github-code | 13 |
7600562856 | #!/usr/bin/env python
"""Defines common algorithms over FSTs"""
from pyfoma.fst import FST, State, Transition
import pyfoma.private.partition_refinement as partition_refinement
import heapq, operator, itertools, functools
from collections import deque
from typing import Dict, Callable
# region Function Wrappers
def _copy_param(func):
"""Automatically uses a copy of the FST parameter instead of the original value, in order to avoid mutating the
object. Use on any method that returns a modified version of an FST."""
@functools.wraps(func)
def wrapper_decorator(fst: 'FST', *args, **kwargs):
return func(fst.__copy__(), *args, **kwargs)
return wrapper_decorator
def _harmonize_alphabet(func):
"""A wrapper for expanding .-symbols when operations of arity 2 are performed.
For example, if calculating the union of FSM1 and FSM2, and both contain
.-symbols, the transitions with . are expanded to include the symbols that
are present in the other FST."""
@functools.wraps(func)
def wrapper_decorator(fst1: 'FST', fst2: 'FST', **kwargs):
for A, B in [(fst1, fst2), (fst2, fst1)]:
if '.' in A.alphabet and (A.alphabet - {'.'}) != (B.alphabet - {'.'}):
Aexpand = B.alphabet - A.alphabet - {'.', ''}
if A == fst2:
A, _ = fst2.copy_filtered()
fst2 = A # Need to copy to avoid mutating other
for s, l, t in list(A.all_transitions(A.states)):
if '.' in l:
for sym in Aexpand:
newl = tuple(lbl if lbl != '.' else sym for lbl in l)
s.add_transition(t.targetstate, newl, t.weight)
newalphabet = fst1.alphabet | fst2.alphabet
value = func(fst1, fst2, **kwargs)
# Do something after
value.alphabet = newalphabet
return value
return wrapper_decorator
# endregion
# region Algorithms
@_copy_param
def trimmed(fst: 'FST') -> 'FST':
"""Returns a modified FST, removing states that aren't both accessible and coaccessible."""
return filtered_coaccessible(filtered_accessible(fst))
@_copy_param
def filtered_accessible(fst: 'FST') -> 'FST':
"""Returns a modified FST, removing states that are not on a path from the initial state."""
explored = { fst.initialstate }
stack = deque([fst.initialstate])
while stack:
source = stack.pop()
for label, transition in source.all_transitions():
if transition.targetstate not in explored:
explored.add(transition.targetstate)
stack.append(transition.targetstate)
fst.states = explored
fst.finalstates &= fst.states
return fst
@_copy_param
def filtered_coaccessible(fst: 'FST') -> 'FST':
"""Returns a modified FST, removing states and transitions to states that have no path to a final state."""
explored = {fst.initialstate}
stack = deque([fst.initialstate])
inverse = {s: set() for s in fst.states} # store all preceding arcs here
while stack:
source = stack.pop()
for target in source.all_targets():
inverse[target].add(source)
if target not in explored:
explored.add(target)
stack.append(target)
stack = deque([s for s in fst.finalstates])
coaccessible = {s for s in fst.finalstates}
while stack:
source = stack.pop()
for previous in inverse[source]:
if previous not in coaccessible:
coaccessible.add(previous)
stack.append(previous)
coaccessible.add(fst.initialstate) # Let's make an exception for the initial
for s in fst.states: # Need to also remove transitions to non-coaccessibles
s.remove_transitions_to_targets(fst.states - coaccessible)
fst.states &= coaccessible
fst.finalstates &= fst.states
return fst
def scc(fst: 'FST') -> set:
"""Calculate the strongly connected components of an FST.
This is a basic implementation of Tarjan's (1972) algorithm.
Tarjan, R. E. (1972), "Depth-first search and linear graph algorithms",
SIAM Journal on Computing, 1 (2): 146–160.
Returns a set of frozensets of states, one frozenset for each SCC."""
index = 0
S = deque([])
sccs, indices, lowlink, onstack = set(), {}, {}, set()
def _strongconnect(state):
nonlocal index, indices, lowlink, onstack, sccs
indices[state] = index
lowlink[state] = index
index += 1
S.append(state)
onstack.add(state)
targets = state.all_targets()
for target in targets:
if target not in indices:
_strongconnect(target)
lowlink[state] = min(lowlink[state], lowlink[target])
elif target in onstack:
lowlink[state] = min(lowlink[state], indices[target])
if lowlink[state] == indices[state]:
currscc = set()
while True:
target = S.pop()
onstack.remove(target)
currscc.add(target)
if state == target:
break
sccs.add(frozenset(currscc))
for s in fst.states:
if s not in indices:
_strongconnect(s)
return sccs
@_copy_param
def pushed_weights(fst: 'FST') -> 'FST':
"""Returns a modified FST, pushing weights toward the initial state. Calls dijkstra and maybe scc."""
potentials = {s:dijkstra(fst, s) for s in fst.states}
for s, _, t in fst.all_transitions(fst.states):
t.weight += potentials[t.targetstate] - potentials[s]
for s in fst.finalstates:
s.finalweight = s.finalweight - potentials[s]
residualweight = potentials[fst.initialstate]
if residualweight != 0.0:
# Add residual to all exits of initial state SCC and finals in that SCC
mainscc = next(s for s in scc(fst) if fst.initialstate in s)
for s, _, t in fst.all_transitions(mainscc):
if t.targetstate not in mainscc: # We're exiting the main SCC
t.weight += residualweight
for s in mainscc & fst.finalstates: # Add res w to finals in initial SCC
s.finalweight += residualweight
return fst
@_copy_param
def mapped_labels(fst: 'FST', map: dict) -> 'FST':
"""Returns a modified FST, relabeling the transducer with new labels from dictionary mapping.
Example: ``map_labels(myfst, {'a':'', 'b':'a'})``"""
for s in fst.states:
newlabelings = []
for lbl in s.transitions.keys():
if any(l in lbl for l in map):
newlabel = tuple(map[lbl[i]] if lbl[i] in map else lbl[i] for i in range(len(lbl)))
newlabelings.append((lbl, newlabel))
for old, new in newlabelings:
s.rename_label(old, new)
fst.alphabet = fst.alphabet - map.keys() | set(map.values()) - {''}
return fst
def epsilon_removed(fst: 'FST') -> 'FST':
"""Returns a modified FST, creating new epsilon-free FSM equivalent to original."""
# For each state s, figure out the min-cost w' to hop to a state t with epsilons
# Then, add the (non-e) transitions of state t to s, adding w' to their cost
# Also, if t is final and s is not, make s final with cost t.finalweight ⊗ w'
# If s and t are both final, make s's finalweight s.final ⊕ (t.finalweight ⊗ w')
eclosures = {s:epsilon_closure(fst, s) for s in fst.states}
if all(len(ec) == 0 for ec in eclosures.values()): # bail, no epsilon transitions
return fst.__copy__()
newfst, mapping = fst.copy_filtered(labelfilter = lambda lbl: any(len(sublabel) != 0 for sublabel in lbl))
for state, ec in eclosures.items():
for target, cost in ec.items():
# copy target's transitions to source
for label, t in target.all_transitions():
if all(len(sublabel) == 0 for sublabel in label): # is epsilon: skip
continue
mapping[state].add_transition(mapping[t.targetstate], label, cost + t.weight)
if target in fst.finalstates:
if state not in fst.finalstates:
newfst.finalstates.add(mapping[state])
mapping[state].finalweight = 0.0
mapping[state].finalweight += cost + target.finalweight
return newfst
def epsilon_closure(fst: 'FST', state) -> dict:
"""Finds, for a state the set of states reachable by epsilon-hopping."""
explored, cntr = {}, itertools.count()
q = [(0.0, next(cntr), state)]
while q:
cost, _, source = heapq.heappop(q)
if source not in explored:
explored[source] = cost
for target, weight in source.all_epsilon_targets_cheapest().items():
heapq.heappush(q, (cost + weight, next(cntr), target))
explored.pop(state) # Remove the state where we started from
return explored
def dijkstra(fst: 'FST', state) -> float:
"""The cost of the cheapest path from state to a final state. Go Edsger!"""
explored, cntr = {state}, itertools.count() # decrease-key is for wusses
Q = [(0.0, next(cntr), state)] # Middle is dummy cntr to avoid key ties
while Q:
w, _ , s = heapq.heappop(Q)
if s == None: # First None we pull out is the lowest-cost exit
return w
explored.add(s)
if s in fst.finalstates:
# now we push a None state to signal the exit from a final
heapq.heappush(Q, (w + s.finalweight, next(cntr), None))
for trgt, cost in s.all_targets_cheapest().items():
if trgt not in explored:
heapq.heappush(Q, (cost + w, next(cntr), trgt))
return float("inf")
@_copy_param
def labelled_states_topology(fst: 'FST', mode = 'BFS') -> 'FST':
"""Returns a modified FST, topologically sorting and labelling states with numbers.
Keyword arguments:
mode -- 'BFS', i.e. breadth-first search by default. 'DFS' is depth-first.
"""
cntr = itertools.count()
Q = deque([fst.initialstate])
inqueue = {fst.initialstate}
while Q:
s = Q.popleft() if mode == 'BFS' else Q.pop()
s.name = str(next(cntr))
for label, t in s.all_transitions():
if t.targetstate not in inqueue:
Q.append(t.targetstate)
inqueue.add(t.targetstate)
return fst
def words_nbest(fst: 'FST', n) -> list:
"""Finds the n cheapest word in an FST, returning a list."""
return list(itertools.islice(words_cheapest(fst), n))
def words_cheapest(fst: 'FST'):
"""A generator to yield all words in order of cost, cheapest first."""
cntr = itertools.count()
Q = [(0.0, next(cntr), fst.initialstate, [])]
while Q:
cost, _, s, seq = heapq.heappop(Q)
if s is None:
yield cost, seq
else:
if s in fst.finalstates:
heapq.heappush(Q, (cost + s.finalweight, next(cntr), None, seq))
for label, t in s.all_transitions():
heapq.heappush(Q, (cost + t.weight, next(cntr), t.targetstate, seq + [label]))
@_copy_param
def determinized_unweighted(fst: 'FST') -> 'FST':
"""Returns a modified FST, determinized with all zero weights."""
return determinized(fst, staterep = lambda s, w: (s, 0.0), oplus = lambda *x: 0.0)
def determinized_as_dfa(fst: 'FST') -> 'FST':
"""Returns a modified FST, determinized as a DFA with weight as part of label, then apply unweighted det."""
newfst = fst.copy_mod(modlabel = lambda l, w: l + (w,), modweight = lambda l, w: 0.0)
determinized = determinized_unweighted(newfst) # run det, then move weights back
return determinized.copy_mod(modlabel = lambda l, _: l[:-1], modweight = lambda l, _: l[-1])
def determinized(fst: 'FST', staterep = lambda s, w: (s, w), oplus = min) -> 'FST':
"""Returns a modified FST, by weighted determinization of FST."""
newfst = FST(alphabet = fst.alphabet.copy())
firststate = frozenset({staterep(fst.initialstate, 0.0)})
statesets = {firststate:newfst.initialstate}
if fst.initialstate in fst.finalstates:
newfst.finalstates = {newfst.initialstate}
newfst.initialstate.finalweight = fst.initialstate.finalweight
Q = deque([firststate])
while Q:
currentQ = Q.pop()
collectlabels = {} # temp dict of label:all transitions {(src1, trans1),...}
for s, _ in currentQ:
for label, transitions in s.transitions.items():
for t in transitions:
collectlabels[label] = collectlabels.get(label, set()) | {(s, t)}
residuals = {s:r for s, r in currentQ}
for label, tset in collectlabels.items():
# wprime is the maximum amount the matching outgoing arcs share -
# some paths may therefore accumulate debt which needs to be passed on
# and stored in the next state representation for future discharge
wprime = oplus(t.weight + residuals[s] for s, t in tset)
# Note the calculation of the weight debt we pass forward, reused w/ finals below
newQ = frozenset(staterep(t.targetstate, t.weight + residuals[s] - wprime) for s, t in tset)
if newQ not in statesets:
Q.append(newQ)
newstate = State()
statesets[newQ] = newstate
newfst.states.add(statesets[newQ])
#statesets[newQ].name = {(s.name, w) if w != 0.0 else s.name for s, w in newQ}
else:
newstate = statesets[newQ]
statesets[currentQ].add_transition(newstate, label, wprime)
if any(t.targetstate in fst.finalstates for _, t in tset):
newfst.finalstates.add(newstate)
# State was final, so we discharge the maximum debt we can
newstate.finalweight = oplus(t.targetstate.finalweight + t.weight + \
residuals[s] - wprime for s, t in tset if t.targetstate in fst.finalstates)
return newfst
def minimized_as_dfa(fst: 'FST') -> 'FST':
"""Returns a modified FST, minimized as a DFA with weight as part of label, then apply unweighted min."""
newfst = fst.copy_mod(modlabel = lambda l, w: l + (w,), modweight = lambda l, w: 0.0)
minimized_fst = minimized(newfst) # minimize, and shift weights back
return minimized_fst.copy_mod(modlabel = lambda l, _: l[:-1], modweight = lambda l, _: l[-1])
@_copy_param
def minimized(fst: 'FST') -> 'FST':
"""Returns a modified FST, minimized by constrained reverse subset construction, Hopcroft-ish."""
reverse_index = create_reverse_index(fst)
finalset, nonfinalset = fst.finalstates.copy(), fst.states - fst.finalstates
initialpartition = [x for x in (finalset, nonfinalset) if len(x) > 0]
P = partition_refinement.PartitionRefinement(initialpartition)
Agenda = {id(x) for x in (finalset, nonfinalset) if len(x) > 0}
while Agenda:
S = P.sets[Agenda.pop()] # convert id to the actual set it corresponds to
for label, sourcestates in find_sourcestates(fst, reverse_index, S):
splits = P.refine(sourcestates) # returns list of (A & S, A - S) tuples
Agenda |= {new for new, _ in splits} # Only place A & S on Agenda
equivalenceclasses = P.astuples()
if len(equivalenceclasses) == len(fst.states):
return fst # we were already minimal, no need to reconstruct
return merging_equivalent_states(fst, equivalenceclasses)
def merging_equivalent_states(fst: 'FST', equivalenceclasses: set) -> 'FST':
"""Merge equivalent states given as a set of sets."""
eqmap = {s[i]:s[0] for s in equivalenceclasses for i in range(len(s))}
representerstates = set(eqmap.values())
newfst = FST(alphabet = fst.alphabet.copy())
statemap = {s:State() for s in fst.states if s in representerstates}
newfst.initialstate = statemap[eqmap[fst.initialstate]]
for s, lbl, t in fst.all_transitions(fst.states):
if s in representerstates:
statemap[s].add_transition(statemap[eqmap[t.targetstate]], lbl, t.weight)
newfst.states = set(statemap.values())
newfst.finalstates = {statemap[s] for s in fst.finalstates if s in representerstates}
for s in fst.finalstates:
if s in representerstates:
statemap[s].finalweight = s.finalweight
return newfst
def find_sourcestates(fst: 'FST', index, stateset):
"""Create generator that yields sourcestates for a set of target states.
Yields the label, and the set of sourcestates."""
all_labels = {l for s in stateset for l in index[s].keys()}
for l in all_labels:
sources = set()
for state in stateset:
if l in index[state]:
sources |= index[state][l]
yield l, sources
def create_reverse_index(fst: 'FST') -> dict:
"""Returns dictionary of transitions in reverse (indexed by state)."""
idx = {s:{} for s in fst.states}
for s, lbl, t in fst.all_transitions(fst.states):
idx[t.targetstate][lbl] = idx[t.targetstate].get(lbl, set()) | {s}
return idx
def minimized_brz(fst: 'FST') -> 'FST':
"""Returns a modified FST, minimized through Brzozowski's trick."""
return determinized(reversed_e(determinized(reversed_e(epsilon_removed(fst)))))
def kleene_closure(fst: 'FST', mode = 'star') -> 'FST':
"""Returns a modified FST, applying self*. No epsilons here. If mode == 'plus', calculate self+."""
q1 = {k:State() for k in fst.states}
newfst = FST(alphabet = fst.alphabet.copy())
for lbl, t in fst.initialstate.all_transitions():
newfst.initialstate.add_transition(q1[t.targetstate], lbl, t.weight)
for s, lbl, t in fst.all_transitions(fst.states):
q1[s].add_transition(q1[t.targetstate], lbl, t.weight)
for s in fst.finalstates:
for lbl, t in fst.initialstate.all_transitions():
q1[s].add_transition(q1[t.targetstate], lbl, t.weight)
q1[s].finalweight = s.finalweight
newfst.finalstates = {q1[s] for s in fst.finalstates}
if mode != 'plus' or fst.initialstate in fst.finalstates:
newfst.finalstates |= {newfst.initialstate}
newfst.initialstate.finalweight = 0.0
newfst.states = set(q1.values()) | {newfst.initialstate}
return newfst
def kleene_star(fst: 'FST') -> 'FST':
"""Returns a modified FST, applying self*."""
return kleene_closure(fst, mode='star')
def kleene_plus(fst: 'FST') -> 'FST':
"""Returns a modified FST, applying self+."""
return kleene_closure(fst, mode='plus')
@_copy_param
def added_weight(fst: 'FST', weight) -> 'FST':
"""Returns a modified FST, adding weight to the set of final states in the FST."""
for s in fst.finalstates:
s.finalweight += weight
return fst
@_copy_param
def optional(fst: 'FST') -> 'FST':
"""Returns a modified FST, calculated as T|'' ."""
if fst.initialstate in fst.finalstates:
return fst
newinitial = State()
for lbl, t in fst.initialstate.all_transitions():
newinitial.add_transition(t.targetstate, lbl, t.weight)
fst.initialstate = newinitial
fst.states.add(newinitial)
fst.finalstates.add(newinitial)
newinitial.finalweight = 0.0
return fst
@_harmonize_alphabet
def concatenate(fst1: 'FST', fst2: 'FST') -> 'FST':
"""Concatenation of T1T2. No epsilons. May produce non-accessible states."""
ocopy, _ = fst2.copy_filtered() # Need to copy since self may equal other
q1q2 = {k:State() for k in fst1.states | ocopy.states}
for s, lbl, t in fst1.all_transitions(q1q2.keys()):
q1q2[s].add_transition(q1q2[t.targetstate], lbl, t.weight)
for s in fst1.finalstates:
for lbl2, t2 in ocopy.initialstate.all_transitions():
q1q2[s].add_transition(q1q2[t2.targetstate], lbl2, t2.weight + s.finalweight)
newfst = FST()
newfst.initialstate = q1q2[fst1.initialstate]
newfst.finalstates = {q1q2[f] for f in ocopy.finalstates}
for s in ocopy.finalstates:
q1q2[s].finalweight = s.finalweight
if ocopy.initialstate in ocopy.finalstates:
newfst.finalstates |= {q1q2[f] for f in fst1.finalstates}
for f in fst1.finalstates:
q1q2[f].finalweight = f.finalweight + ocopy.initialstate.finalweight
newfst.states = set(q1q2.values())
return newfst
@_harmonize_alphabet
def cross_product(fst1: 'FST', fst2: 'FST', optional: bool = False) -> 'FST':
"""Perform the cross-product of T1, T2 through composition.
Keyword arguments:
optional -- if True, calculates T1:T2 | T1."""
newfst_a = fst1.copy_mod(modlabel = lambda l, _: l + ('',))
newfst_b = fst2.copy_mod(modlabel = lambda l, _: ('',) + l)
if optional == True:
return union(compose(newfst_a, newfst_b), fst1)
else:
return compose(newfst_a, newfst_b)
@_harmonize_alphabet
def compose(fst1: 'FST', fst2: 'FST') -> 'FST':
"""Composition of A,B; will expand an acceptor into 2-tape FST on-the-fly."""
def _mergetuples(x: tuple, y: tuple) -> tuple:
if len(x) == 1:
t = x + y[1:]
elif len(y) == 1:
t = x[:-1] + y
else:
t = x[:-1] + y[1:]
if all(t[i] == t[0] for i in range(len(t))):
t = (t[0],)
return t
# Mode 0: allow A=x:0 B=0:y (>0), A=x:y B=y:z (>0), A=x:0 B=wait (>1) A=wait 0:y (>2)
# Mode 1: x:0 B=wait (>1), x:y y:z (>0)
# Mode 2: A=wait 0:y (>2), x:y y:z (>0)
newfst = FST()
Q = deque([(fst1.initialstate, fst2.initialstate, 0)])
S = {(fst1.initialstate, fst2.initialstate, 0): newfst.initialstate}
while Q:
A, B, mode = Q.pop()
currentstate = S[(A, B, mode)]
currentstate.name = "({},{},{})".format(A.name, B.name, mode)
if A in fst1.finalstates and B in fst2.finalstates:
newfst.finalstates.add(currentstate)
currentstate.finalweight = A.finalweight + B.finalweight # TODO: oplus
for matchsym in A.transitionsout.keys():
if mode == 0 or matchsym != '': # A=x:y B=y:z, or x:0 0:y (only in mode 0)
for outtrans in A.transitionsout.get(matchsym, ()):
for intrans in B.transitionsin.get(matchsym, ()):
target1 = outtrans[1].targetstate # Transition
target2 = intrans[1].targetstate # Transition
if (target1, target2, 0) not in S:
Q.append((target1, target2, 0))
S[(target1, target2, 0)] = State()
newfst.states.add(S[(target1, target2, 0)])
# Keep intermediate
# currentstate.add_transition(S[(target1, target2)], outtrans[1].label[:-1] + intrans[1].label, outtrans[1].weight + intrans[1].weight)
newlabel = _mergetuples(outtrans[1].label, intrans[1].label)
currentstate.add_transition(S[(target1, target2, 0)], newlabel, outtrans[1].weight + intrans[1].weight)
for outtrans in A.transitionsout.get('', ()): # B waits
if mode == 2:
break
target1, target2 = outtrans[1].targetstate, B
if (target1, target2, 1) not in S:
Q.append((target1, target2, 1))
S[(target1, target2, 1)] = State()
newfst.states.add(S[(target1, target2, 1)])
newlabel = outtrans[1].label
currentstate.add_transition(S[(target1, target2, 1)], newlabel, outtrans[1].weight)
for intrans in B.transitionsin.get('', ()): # A waits
if mode == 1:
break
target1, target2 = A, intrans[1].targetstate
if (target1, target2, 2) not in S:
Q.append((target1, target2, 2))
S[(target1, target2, 2)] = State()
newfst.states.add(S[(target1, target2, 2)])
newlabel = intrans[1].label
currentstate.add_transition(S[(target1, target2, 2)], newlabel, intrans[1].weight)
return newfst
@_copy_param
def inverted(fst: 'FST') -> 'FST':
"""Returns a modified FST, calculating the inverse of a transducer, i.e. flips label tuples around."""
for s in fst.states:
s.transitions = {lbl[::-1]:tr for lbl, tr in s.transitions.items()}
return fst
def ignore(fst1: 'FST', fst2: 'FST') -> 'FST':
"""A, ignoring intervening instances of B."""
newfst = FST.re("$^output($A @ ('.'|'':$B)*)", {'A': fst1, 'B': fst2})
return newfst
def rewritten(fst: 'FST', *contexts, **flags) -> 'FST':
"""Returns a modified FST, rewriting self in contexts in parallel, controlled by flags."""
defs = {'crossproducts': fst}
defs['br'] = FST.re("'@<@'|'@>@'")
defs['aux'] = FST.re(". - ($br|#)", defs)
defs['dotted'] = FST.re(".*-(.* '@<@' '@>@' '@<@' '@>@' .*)")
defs['base'] = FST.re("$dotted @ # ($aux | '@<@' $crossproducts '@>@')* #", defs)
if len(contexts) > 0:
center = FST.re("'@<@' (.-'@>@')* '@>@'")
lrpairs = ([l.ignore(defs['br']), r.ignore(defs['br'])] for l,r in contexts)
defs['rule'] = center.context_restrict(*lrpairs, rewrite = True).compose(defs['base'])
else:
defs['rule'] = defs['base']
defs['remrewr'] = FST.re("'@<@':'' (.-'@>@')* '@>@':''") # worsener
worseners = [FST.re(".* $remrewr (.|$remrewr)*", defs)]
if flags.get('longest', False) == 'True':
worseners.append(FST.re(".* '@<@' $aux+ '':('@>@' '@<@'?) $aux ($br:''|'':$br|$aux)* .*", defs))
if flags.get('leftmost', False) == 'True':
worseners.append(FST.re(\
".* '@<@':'' $aux+ ('':'@<@' $aux* '':'@>@' $aux+ '@>@':'' .* | '':'@<@' $aux* '@>@':'' $aux* '':'@>@' .*)", defs))
if flags.get('shortest', False) == 'True':
worseners.append(FST.re(".* '@<@' $aux* '@>@':'' $aux+ '':'@>@' .*", defs))
defs['worsen'] = functools.reduce(lambda x, y: x.union(y), worseners).determinize_unweighted().minimize()
defs['rewr'] = FST.re("$^output($^input($rule) @ $worsen)", defs)
final = FST.re("(.* - $rewr) @ $rule", defs)
newfst = final.map_labels({s:'' for s in ['@<@','@>@','#']}).epsilon_remove().determinize_as_dfa().minimize()
return newfst
@_copy_param
def context_restricted(fst: 'FST', *contexts, rewrite = False) -> 'FST':
"""Returns a modified FST, where self only allowed in the context L1 _ R1, or ... , or L_n _ R_n."""
for fsm in itertools.chain.from_iterable(contexts):
fsm.alphabet.add('@=@') # Add aux sym to contexts so they don't match .
fst.alphabet.add('@=@') # Same for self
if not rewrite:
cs = (FST.re("$lc '@=@' (.-'@=@')* '@=@' $rc", \
{'lc':lc.copy_mod().map_labels({'#': '@#@'}),\
'rc':rc.copy_mod().map_labels({'#': '@#@'})}) for lc, rc in contexts)
else:
cs = (FST.re("$lc '@=@' (.-'@=@')* '@=@' $rc", {'lc':lc, 'rc':rc}) for lc, rc in contexts)
cunion = functools.reduce(lambda x, y: x.union(y), cs).determinize().minimize()
r = FST.re("(.-'@=@')* '@=@' $c '@=@' (.-'@=@')* - ((.-'@=@')* $cunion (.-'@=@')*)",\
{'c':fst, 'cunion':cunion})
r = r.map_labels({'@=@':''}).epsilon_remove().determinize_as_dfa().minimize()
for fsm in itertools.chain.from_iterable(contexts):
fsm.alphabet -= {'@=@'} # Remove aux syms from contexts
r = FST.re(".? (.-'@#@')* .? - $r", {'r': r})
newfst = r.map_labels({'@#@':''}).epsilon_remove().determinize_as_dfa().minimize()
return newfst
@_copy_param
def projected(fst: 'FST', dim = 0) -> 'FST':
"""Returns a modified FST, by projecting fst. dim = -1 will get output proj regardless of # of tapes."""
sl = slice(-1, None) if dim == -1 else slice(dim, dim+1)
newalphabet = set()
for s in fst.states:
newtransitions = {}
for lbl, tr in s.transitions.items():
newtransitions[lbl[sl]] = newtransitions.get(lbl[sl], set()) | tr
for t in tr:
t.label = lbl[sl]
newalphabet |= {sublabel for sublabel in lbl[sl]}
s.transitions = newtransitions
fst.alphabet = newalphabet
return fst
def reversed(fst: 'FST') -> 'FST':
"""Returns a modified FST, reversing the FST, epsilon-free."""
newfst = FST(alphabet = fst.alphabet.copy())
newfst.initialstate = State()
mapping = {k:State() for k in fst.states}
newfst.states = set(mapping.values()) | {newfst.initialstate}
newfst.finalstates = {mapping[fst.initialstate]}
if fst.initialstate in fst.finalstates:
newfst.finalstates.add(newfst.initialstate)
newfst.initialstate.finalweight = fst.initialstate.finalweight
mapping[fst.initialstate].finalweight = 0.0
for s, lbl, t in fst.all_transitions(fst.states):
mapping[t.targetstate].add_transition(mapping[s], lbl, t.weight)
if t.targetstate in fst.finalstates:
newfst.initialstate.add_transition(mapping[s], lbl, t.weight + \
t.targetstate.finalweight)
return newfst
def reversed_e(fst: 'FST') -> 'FST':
"""Returns a modified FST, reversing the FST, using epsilons."""
newfst = FST(alphabet = fst.alphabet.copy())
newfst.initialstate = State(name = tuple(k.name for k in fst.finalstates))
mapping = {k:State(name = k.name) for k in fst.states}
for t in fst.finalstates:
newfst.initialstate.add_transition(mapping[t], ('',), t.finalweight)
for s, lbl, t in fst.all_transitions(fst.states):
mapping[t.targetstate].add_transition(mapping[s], lbl, t.weight)
newfst.states = set(mapping.values()) | {newfst.initialstate}
newfst.finalstates = {mapping[fst.initialstate]}
mapping[fst.initialstate].finalweight = 0.0
return newfst
@_harmonize_alphabet
def union(fst1: 'FST', fst2: 'FST') -> 'FST':
"""Epsilon-free calculation of union of fst1 and fst2."""
mapping = {k:State() for k in fst1.states | fst2.states}
newfst = FST() # Get new initial state
newfst.states = set(mapping.values()) | {newfst.initialstate}
# Copy all transitions from old initial states to new initial state
for lbl, t in itertools.chain(fst1.initialstate.all_transitions(), fst2.initialstate.all_transitions()):
newfst.initialstate.add_transition(mapping[t.targetstate], lbl, t.weight)
# Also add all transitions from old FSMs to new FSM
for s, lbl, t in itertools.chain(fst1.all_transitions(fst1.states), fst2.all_transitions(fst2.states)):
mapping[s].add_transition(mapping[t.targetstate], lbl, t.weight)
# Make old final states final in new FSM
for s in fst1.finalstates | fst2.finalstates:
newfst.finalstates.add(mapping[s])
mapping[s].finalweight = s.finalweight
# If either initial state was final, make new initial final w/ weight min(f1w, f2w)
newfst.finalstates = {mapping[s] for s in fst1.finalstates|fst2.finalstates}
if fst1.initialstate in fst1.finalstates or fst2.initialstate in fst2.finalstates:
newfst.finalstates.add(newfst.initialstate)
newfst.initialstate.finalweight = min(fst1.initialstate.finalweight, fst2.initialstate.finalweight)
return newfst
def intersection(fst1: 'FST', fst2: 'FST') -> 'FST':
"""Intersection of self and other. Uses the product algorithm."""
return product(fst1, fst2, finalf = all, oplus = operator.add, pathfollow = lambda x,y: x & y)
def difference(fst1: 'FST', fst2: 'FST') -> 'FST':
"""Returns self-other. Uses the product algorithm."""
return product(fst1, fst2, finalf = lambda x: x[0] and not x[1],\
oplus = lambda x,y: x, pathfollow = lambda x,y: x)
@_harmonize_alphabet
def product(fst1: 'FST', fst2: 'FST', finalf = any, oplus = min, pathfollow = lambda x,y: x|y) -> 'FST':
"""Generates the product FST from fst1, fst2. The helper functions by default
produce fst1|fst2."""
newfst = FST()
Q = deque([(fst1.initialstate, fst2.initialstate)])
S = {(fst1.initialstate, fst2.initialstate): newfst.initialstate}
dead1, dead2 = State(finalweight = float("inf")), State(finalweight = float("inf"))
while Q:
t1s, t2s = Q.pop()
currentstate = S[(t1s, t2s)]
currentstate.name = (t1s.name, t2s.name,)
if finalf((t1s in fst1.finalstates, t2s in fst2.finalstates)):
newfst.finalstates.add(currentstate)
currentstate.finalweight = oplus(t1s.finalweight, t2s.finalweight)
# Get all outgoing labels we want to follow
for lbl in pathfollow(t1s.transitions.keys(), t2s.transitions.keys()):
for outtr in t1s.transitions.get(lbl, (Transition(dead1, lbl, float('inf')), )):
for intr in t2s.transitions.get(lbl, (Transition(dead2, lbl, float('inf')), )):
if (outtr.targetstate, intr.targetstate) not in S:
Q.append((outtr.targetstate, intr.targetstate))
S[(outtr.targetstate, intr.targetstate)] = State()
newfst.states.add(S[(outtr.targetstate, intr.targetstate)])
currentstate.add_transition(S[(outtr.targetstate, intr.targetstate)], lbl, oplus(outtr.weight, intr.weight))
return newfst
# endregion
# Defines a list of functions that should be added as instance methods to the FST class dynamically
_algorithms_to_add: Dict[str, Callable] = {
'trim': trimmed,
'filter_accessible': filtered_accessible,
'filter_coaccessible': filtered_coaccessible,
'scc': scc,
'push_weights': pushed_weights,
'map_labels': mapped_labels,
'epsilon_remove': epsilon_removed,
'epsilon_closure': epsilon_closure,
'dijkstra': dijkstra,
'label_states_topology': labelled_states_topology,
'words_nbest': words_nbest,
'words_cheapest': words_cheapest,
'determinize_unweighted': determinized_unweighted,
'determinize_as_dfa': determinized_as_dfa,
'determinize': determinized,
'minimize_as_dfa': minimized_as_dfa,
'minimize': minimized,
'merge_equivalent_states': merging_equivalent_states,
'find_sourcestates': find_sourcestates,
'create_reverse_index': create_reverse_index,
'minimize_brz': minimized_brz,
'kleene_closure': kleene_closure,
'add_weight': added_weight,
'optional': optional,
'concatenate': concatenate,
'cross_product': cross_product,
'compose': compose,
'invert': inverted,
'ignore': ignore,
'rewrite': rewritten,
'context_restrict': context_restricted,
'project': projected,
'reverse': reversed,
'reverse_e': reversed_e,
'union': union,
'intersection': intersection,
'difference': difference,
'product': product
} | mhulden/pyfoma | src/pyfoma/algorithms.py | algorithms.py | py | 34,910 | python | en | code | 25 | github-code | 13 |
10173364835 | import numpy as np
def simple_predict(x, theta):
"""
Computes the prediction vector y_hat from two non-empty numpy.array.
Args:
x: has to be an numpy.array, a matrix of dimension m * n.
theta: has to be an numpy.array, a vector of dimension (n + 1) * 1.
Return:
y_hat as a numpy.array, a vector of dimension m * 1.
None if x or theta are empty numpy.array.
None if x or theta dimensions are not matching.
None if x or theta is not of expected type.
Raises:
This function should not raise any Exception.
"""
if not isinstance(x, np.ndarray) or not isinstance(theta, np.ndarray):
print("Invalid input: arguments of ndarray type required")
return None
if not x.ndim == 2:
print("Invalid input: wrong shape of x", x.shape)
return None
if theta.ndim == 1 and theta.size == x.shape[1] + 1:
theta = theta.reshape(x.shape[1] + 1, 1)
elif not (theta.ndim == 2 and theta.shape == (x.shape[1] + 1, 1)):
print("Invalid input: wrong shape of theta ", theta.shape)
return None
y_hat = []
for i in range(x.shape[0]):
y_hat_row_sum = 0.0
for j in range(x.shape[1]):
y_hat_row_sum += theta[j + 1] * x[i][j]
y_hat.append([float(theta[0] + y_hat_row_sum)])
return np.array(y_hat)
def ex1():
x = np.arange(1,13).reshape((4,-1))
print("x:", x, x.shape)
# Example 1:
theta1 = np.array([5, 0, 0, 0]).reshape((-1, 1))
print("\nexample1:")
print(simple_predict(x, theta1)) # Ouput: array([[5.], [5.], [5.], [5.]])
# Do you understand why y_hat contains only 5’s here?
# Example 2:
theta2 = np.array([0, 1, 0, 0]).reshape((-1, 1))
print("\nexample2:")
print(simple_predict(x, theta2)) # Output: array([[ 1.], [ 4.], [ 7.], [10.]])
# Do you understand why y_hat == x[:,0] here?
# Example 3:
theta3 = np.array([-1.5, 0.6, 2.3, 1.98]).reshape((-1, 1))
print("\nexample3:")
print(simple_predict(x, theta3)) # Output: array([[ 9.64], [24.28], [38.92], [53.56]])
# Example 4:
theta4 = np.array([-3, 1, 2, 3.5]).reshape((-1, 1))
print("\nexample4:")
print(simple_predict(x, theta4)) # Output: array([[12.5], [32. ], [51.5], [71. ]])
def ex2():
x = np.arange(1,13).reshape(-1,2)
theta = np.ones(3).reshape(-1,1)
print(simple_predict(x, theta)) # Ouput: array([[4.], [ 8.], [12.], [16.], [20.], [24.]])
x = (np.arange(1,13)).reshape(-1,3)
theta = np.ones(4).reshape(-1,1)
print(simple_predict(x, theta)) # Ouput: array([[ 7.], [16.], [25.], [34.]])
x = (np.arange(1,13)).reshape(-1,4)
theta = np.ones(5).reshape(-1,1)
print(simple_predict(x, theta)) # Ouput: array([[11.], [27.], [43.]])
if __name__ == "__main__":
ex2()
| jmcheon/ml_module | 02/ex00/prediction.py | prediction.py | py | 2,584 | python | en | code | 0 | github-code | 13 |
25530206403 | import os
from linebot import LineBotApi, WebhookParser
from linebot.models import MessageEvent, TextMessage, TextSendMessage, TemplateSendMessage, ButtonsTemplate, MessageTemplateAction, ImageCarouselColumn, ImageCarouselTemplate, CarouselTemplate, CarouselColumn
channel_access_token = os.getenv("LINE_CHANNEL_ACCESS_TOKEN", None)
def send_text_message(reply_token, text):
line_bot_api = LineBotApi(channel_access_token)
line_bot_api.reply_message(reply_token, TextSendMessage(text=text))
return "OK"
# def send_button_message(reply_token):
# line_bot_api = LineBotApi(channel_access_token)
# line_bot_api.reply_message( # 回復傳入的訊息文字
# reply_token,
# TemplateSendMessage(
# alt_text='Buttons template',
# template=ButtonsTemplate(
# thumbnail_image_url='https://i.imgur.com/FBvQEoq.png',
# title='Menu',
# text='Choose function',
# actions=[
# MessageTemplateAction(
# label='search player',
# text='search player'
# ),
# MessageTemplateAction(
# label='show hottest player',
# text='show hottest player'
# ),
# ]
# )
# )
# )
# return "OK"
def send_button_message(reply_token, img, title, uptext, labels, texts):
line_bot_api = LineBotApi(channel_access_token)
acts = []
for i, lab in enumerate(labels):
acts.append(
MessageTemplateAction(
label=lab,
text=texts[i]
)
)
message = TemplateSendMessage(
alt_text='Buttons template',
template=ButtonsTemplate(
thumbnail_image_url=img,
title=title,
text=uptext,
actions=acts
)
)
line_bot_api.reply_message(reply_token, message)
return "OK"
def send_button_message2(reply_token, title,url, uptext, labels, texts):
line_bot_api = LineBotApi(channel_access_token)
acts = []
for i, lab in enumerate(labels):
acts.append(
MessageTemplateAction(
label=lab,
text=texts[i]
)
)
message = TemplateSendMessage(
alt_text='Buttons template',
template=ButtonsTemplate(
title=title,
text=uptext,
actions=acts
)
)
replyarr=[]
replyarr.append(message)
replyarr.append(TextSendMessage(text=url))
line_bot_api.reply_message(reply_token, replyarr)
return "OK"
def send_button_message3(reply_token, title, uptext, labels, texts):
line_bot_api = LineBotApi(channel_access_token)
acts = []
for i, lab in enumerate(labels):
acts.append(
MessageTemplateAction(
label=lab,
text=texts[i]
)
)
message = TemplateSendMessage(
alt_text='Buttons template',
template=ButtonsTemplate(
title=title,
text=uptext,
actions=acts
)
)
line_bot_api.reply_message(reply_token, message)
return "OK"
def send_image_carousel(reply_token, imglinks, labels, texts):
line_bot_api = LineBotApi(channel_access_token)
cols = []
for i, url in enumerate(imglinks):
cols.append(
ImageCarouselColumn(
image_url=url,
action=MessageTemplateAction(
label=labels[i],
text=texts[i]
)
)
)
message = TemplateSendMessage(
alt_text='ImageCarousel template',
template=ImageCarouselTemplate(columns=cols)
)
line_bot_api.reply_message(reply_token, message)
return "OK"
| wuyibang/linebottest | utils.py | utils.py | py | 4,217 | python | en | code | 0 | github-code | 13 |
71648261457 | import logging
from qark.issue import Severity, Issue
from qark.scanner.plugin import ManifestPlugin
log = logging.getLogger(__name__)
TASK_REPARENTING_DESCRIPTION = (
"This allows an existing activity to be reparented to a new native task i.e task having the same affinity as the "
"activity. This may lead to UI spoofing attack on this application."
"https://www.usenix.org/system/files/conference/usenixsecurity15/sec15-paper-ren-chuangang.pdf"
)
class TaskReparenting(ManifestPlugin):
def __init__(self):
super(TaskReparenting, self).__init__(category="manifest", name="android:allowTaskReparenting='true' found",
description=TASK_REPARENTING_DESCRIPTION)
self.severity = Severity.WARNING
def run(self):
with open(self.manifest_path, "r") as manifest_file:
for line_number, line in enumerate(manifest_file):
if 'android:allowTaskReparenting="true"' in line:
self.issues.append(Issue(
category=self.category, severity=self.severity, name=self.name,
description=self.description,
file_object=self.manifest_path,
line_number=line_number)
)
plugin = TaskReparenting()
| linkedin/qark | qark/plugins/manifest/task_reparenting.py | task_reparenting.py | py | 1,330 | python | en | code | 3,071 | github-code | 13 |
73868994577 | from datetime import datetime
from json import loads
def process_time(time):
time = time.strftime('%Y-%m-%dT%H:%M:%S')
return time
def convert_dict_to_string(message):
"May not use this function as dictionary seems to be working fine."
msg_str = ''
for key, value in message.items():
if value:
msg_str += value + "cs631separator" #cs631separator because , or ; may be in the tweet itself.
else:
msg_str += "NAcs631separator"
return msg_str
def get_products_to_track(return_dict = 0):
# load and read json file
with open("products_to_track.json") as f:
data = loads(f.read())
# get products list
products_to_track = []
for company in data:
products_to_track += data[company]
if return_dict == 1:
return products_to_track, data
return products_to_track
def get_associated_company_and_product(tweet):
tweet = tweet.lower()
# load json data
products_to_track, data = get_products_to_track(return_dict=1)
companies = [company for company in data]
company_found = 0
associated_company = ""
associated_product = ""
for product in products_to_track:
if product.lower() in tweet:
for company in companies:
if product in data[company]:
associated_company = company
associated_product = product
company_found += 1
# keeping it simple for now. also since search products are more explicit now,
# the exact substring match may suffice.
# else:
# product_tokens = product.split(" ")
# if len(product_tokens) == 2:
# if product_tokens[0] in tweet and product_tokens[1] in tweet:
# for company in companies:
# company_products = ' '.join(data[company])
# if product_tokens[0] in company_products and product_tokens[1] in company_products:
# associated_company = company
# company_found += 1
# if len(product_tokens) == 3:
# if product_tokens[0] in tweet and product_tokens[1] in tweet and product_tokens[2] in tweet:
# for company in companies:
# company_products = ' '.join(data[company])
# if product_tokens[0] in company_products and product_tokens[1] in company_products and product_tokens[2] in company_products:
# associated_company = company
# company_found += 1
# the code does not differentiate between multiple companies and multiple products
# for now :)
if company_found > 1:
return ("mix", "mix")
elif company_found == 0:
return ("none", "none")
elif company_found == 1:
return associated_company, associated_product
#print(get_associated_company_and_product("Samsung Galaxy Buds Pro see 1-day discount down to $165 (Sav"))
| chrisbombino/cs631-project | scripts/helper.py | helper.py | py | 3,117 | python | en | code | 8 | github-code | 13 |
6656684898 | with open("input.txt") as f:
input = f.read().splitlines()
draw_numbers = input.pop(0).split(",")
boards = []
new_board = []
for line in input:
if len(line.strip()) == 0:
if new_board != []:
boards.append(
{
"board": new_board,
"row": [0, 0, 0, 0, 0],
"col": [0, 0, 0, 0, 0],
"marked": [],
}
)
new_board = []
else:
new_board.append(line.split())
boards.append(
{"board": new_board, "row": [0, 0, 0, 0, 0], "col": [0, 0, 0, 0, 0], "marked": []}
)
def part_one():
winning_board = None
for number in draw_numbers:
if winning_board:
break
for board in boards:
for row_index, row in enumerate(board["board"]):
if number in row:
board["row"][row_index] += 1
board["col"][row.index(number)] += 1
board["marked"].append(int(number))
if 5 in board["row"] or 5 in board["col"]:
winning_board = board
winning_number = int(number)
flat_board = [int(number) for row in winning_board["board"] for number in row]
unmarked_numbers = list(
filter(lambda num: num not in winning_board["marked"], flat_board)
)
print(sum(unmarked_numbers) * winning_number)
def part_two():
winning_boards = []
for number in draw_numbers:
if len(winning_boards) == len(boards):
break
for board in boards:
for row_index, row in enumerate(board["board"]):
if number in row:
board["row"][row_index] += 1
board["col"][row.index(number)] += 1
board["marked"].append(int(number))
if 5 in board["row"] or 5 in board["col"]:
if board not in winning_boards:
winning_boards.append(board)
winning_number = int(number)
last_winning_board = winning_boards.pop()
flat_board = [int(number) for row in last_winning_board["board"] for number in row]
unmarked_numbers = list(
filter(lambda num: num not in last_winning_board["marked"], flat_board)
)
print(sum(unmarked_numbers) * winning_number)
part_one()
part_two()
| RuairidhCa/aoc2021 | 04/04.py | 04.py | py | 2,362 | python | en | code | 0 | github-code | 13 |
74675180816 | import io
from mstk.topology import Molecule, Topology, UnitCell
from mstk.forcefield import ForceField, ZftTyper
from mstk.simsys import System
from mstk.wrapper import Packmol
definition = '''
TypeDefinition
h_1 [H][CX4]
c_4 [CX4]
c_4h2 [CX4;H2]
c_4h3 [CX4;H3]
HierarchicalTree
h_1
c_4
c_4h2
c_4h3
'''
typer = ZftTyper(io.StringIO(definition))
butane = Molecule.from_smiles('CCCC butane')
typer.type(butane)
# Load force field parameters from ZFF file
ff = ForceField.open('alkane.zff')
# Initialize a topology with periodic boundary condition
# For now, it contains only one butane molecule
top = Topology([butane], cell=UnitCell([3, 3, 3]))
# Assign atomic charges based on the charge parameters in the force field
ff.assign_charge(top)
# Call Packmol to build a configuration containing 100 butane molecules
packmol = Packmol(r'/path/of/packmol')
top.scale_with_packmol([100], packmol=packmol)
# Associate force field parameters to the topology
# And then export input files for simulation engines
system = System(top, ff)
system.export_gromacs()
system.export_lammps()
system.export_namd()
| z-gong/mstk | docs/examples/export.py | export.py | py | 1,125 | python | en | code | 7 | github-code | 13 |
25969066453 | import streamlit as st
import sklearn
import pickle
import pandas as pd
import numpy as np
iris_data = pickle.load(open("irismodel.sav", 'rb'))
st.title('Iris Data prediction app')
#adding images
from PIL import Image
setosa = Image.open("iris_setosa.jpg")
virginica = Image.open("Iris_virginica.jpg")
versicolor = Image.open("iris_versicolor.jpg")
def user_report():
sepal_length = st.sidebar.slider('sepal.length', 4.3, 10.0, 0.1)
sepal_width = st.sidebar.slider('sepal.width',2.0 ,10.0, 0.1 )
petal_length = st.sidebar.slider('petal.length', 1.0,10.0, 0.1 )
petal_width = st.sidebar.slider('petal.width', 0.1,10.0, 0.1 )
user_report_data= {
'sepal.length' : sepal_length,
'sepal.width':sepal_width,
'petal.length': petal_length,
'petal.width': petal_width
}
user_report_data = pd.DataFrame(user_report_data, index=[0])
return user_report_data
user_data = user_report()
st.header('Iris data')
st.write(user_data)
iris = iris_data.predict(user_data)
st.subheader('iris prediction')
if (iris == 0):
st.image(setosa, width = 350, caption='Setosa')
elif (iris == 1 ):
st.image(versicolor, width = 350, caption='Versicolor')
else:
st.image(virginica, width = 350, caption='Virginica')
| SimeonIfalore/Iris_prediction_app | iris.py | iris.py | py | 1,291 | python | en | code | 0 | github-code | 13 |
9370249293 |
#This function is mainly responsible for setting up the host server for the multipl-screen version of the game
#Check CitedCode for specific citations
import socket
import threading
from queue import Queue
IP = socket.gethostbyname(socket.gethostname())
HOST = str(IP) # put your IP address here if playing on multiple computers
PORT = 50003
BACKLOG = 4
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((HOST,PORT))
server.listen(BACKLOG)
print("This is the IP : %s"%(IP))
print("looking for connection")
def handleClient(client, serverChannel, cID, clientele):
client.setblocking(1)
msg = ""
while True:
try:
msg += client.recv(10).decode("UTF-8") #Recieves the messages, decode it, and splits on \n
command = msg.split("\n")
while (len(command) > 1):
readyMsg = command[0]
msg = "\n".join(command[1:])#Takes first message of all the messages
serverChannel.put(str(cID) + " " + readyMsg) #Puts it on the server channel and tells everyone it was you who sent it
command = msg.split("\n") #This will be put into the serverthread and distributed
except:
# we failed
return
def serverThread(clientele, serverChannel):
while True:
msg = serverChannel.get(True, None) #Gets a message from the server channel (messages added by handleClient)
print("msg recv: ", msg)
msgList = msg.split(" ")
senderID = msgList[0] #Gets the senderID (added in handleClient)
instruction = msgList[1] #Instructions
if instruction == 'status':
print('status update recieved')
global playerStatus
playerStatus[senderID] = msgList[2]
sendMsg = 'status Player One Ready : ' + playerStatus['playerOne'] + ' Player Two Ready : ' + playerStatus['playerTwo'] + '\n'
for cID in clientele:
clientele[cID].send(sendMsg.encode())
serverChannel.task_done()
continue
if instruction == 'continue':
print('Continue request recieved!')
global requestStatus
print(requestStatus)
requestStatus[senderID] = msgList[2]
sendMsg = 'continue Player One Continue : ' + requestStatus['playerOne'] + ' Player Two Continue : ' + requestStatus['playerTwo'] + '\n'
for cID in clientele:
clientele[cID].send(sendMsg.encode())
if requestStatus['playerOne'] == 'True' and requestStatus['playerTwo'] == 'True':
requestStatus['playerOne'] = 'False'
requestStatus['playerTwo'] = 'False' #Setting the request status back to original
if requestStatus['playerOne'] == 'close' or requestStatus['playerTwo'] == 'close':
requestStatus['playerOne'] = 'False'
requestStatus['playerTwo'] = 'False' #Setting the request status back to original
serverChannel.task_done()
continue
details = " ".join(msgList[2:])
if (details != ""):
for cID in clientele:
if cID != senderID:
sendMsg = instruction + " " + senderID + " " + details + "\n" #Sends it to all other members
clientele[cID].send(sendMsg.encode())
print("> sent to %s:" % cID, sendMsg[:-1])
print()
serverChannel.task_done()
requestStatus = {'playerOne':'False', 'playerTwo':'False'} #Continuing or not
playerStatus = {'playerOne':'False', 'playerTwo':'False'} #Starting or not
clientele = dict()
playerNum = 0 #Looking for new player to join
serverChannel = Queue(100)
threading.Thread(target = serverThread, args = (clientele, serverChannel)).start()
names = ['playerOne', 'playerTwo']
while True:
client, address = server.accept()
# myID is the key to the client in the clientele dictionary
myID = names[playerNum] #Gives new player a name
print(myID, playerNum)
for cID in clientele:
print (repr(cID), repr(playerNum))
clientele[cID].send(("newPlayer %s\n" % myID).encode()) #Informs other player that new player has joined
client.send(("newPlayer %s\n" % cID).encode()) #Sends existing players to new player
clientele[myID] = client #Adds new client into clientele
client.send(("myIDis %s \n" % myID).encode()) #Sends client his own name
print("connection recieved from %s" % myID)
threading.Thread(target = handleClient, args =
(client ,serverChannel, myID, clientele)).start()
playerNum += 1
| ryanyxw/GomokuAI | Host.py | Host.py | py | 4,354 | python | en | code | 1 | github-code | 13 |
40912726702 | import pandas as pd
from openpyxl.workbook import Workbook
from openpyxl.worksheet.table import TableStyleInfo, Table
from openpyxl.utils.dataframe import dataframe_to_rows
from openpyxl.styles import Alignment, DEFAULT_FONT
from startup_file_manage import FileManager
class ExcelManipulator:
def __init__(self):
self.file_manager = FileManager()
self.chartsearch = self.file_manager.excel_manipulator_locations()[0]
self.alphabet = {
"1": "A",
"2": "B",
"3": "C",
"4": "D",
"5": "E",
"6": "F",
"7": "G",
"8": "H",
"9": "I",
"10": "J",
"11": "K",
"12": "L",
"13": "M",
"14": "N",
"15": "O",
"16": "P",
"17": "Q",
"18": "R",
"19": "S",
"20": "T",
"21": "U",
"22": "V",
"23": "W",
"24": "X",
"25": "Y",
"26": "Z"
}
self.toggle_path = self.file_manager.excel_manipulator_locations()[1]
self.column_lists = {
'empty_inserted_columns': ['Current Week Client Comments',
'Age',
'Prior Week Client Comments',
'RAI Reconciliation Comments'],
'left_columns': ['DOS',
'Account #',
'MRN',
'Patient Name',
'Carrier'],
'hackensack_columns': ['DOS',
'Account #',
'MRN',
'Patient Name',
'Carrier',
'Department'],
'singe_uac': ['UAC Reason - Provider(DOS)',
'UAC Reason'],
'multiple_uac': ['UAC Reason 1 - Provider(DOS)',
'UAC Reason 1',
'UAC Reason 2 - Provider(DOS)',
'UAC Reason 2'],
'right_columns': ['Current Week Client Comments',
'Age',
'Pro Date Sent To Client',
'Prior Week Client Comments',
'RAI Reconciliation Comments'],
'lhi_search_list': ['Status',
'Comments']
}
def pandas_column_rearrange(self):
self.toggle_dict = self.file_manager.json_dict(self.toggle_path)
self.lhi_format = self.toggle_dict.get("Toggle LHI Search List")
self.department_format = self.toggle_dict.get("Toggle Department")
print(self.toggle_dict)
print(self.lhi_format)
print(self.department_format)
excel_chartsearch = pd.read_excel(self.chartsearch)
if self.lhi_format is False:
for col in self.column_lists['empty_inserted_columns']:
excel_chartsearch[col] = ""
elif self.lhi_format is True:
for col in self.column_lists['lhi_search_list']:
excel_chartsearch[col] = ""
else:
print("Something went wrong...")
name_col = excel_chartsearch.columns.tolist()
if 'UAC Reason 1' in name_col and 'UAC Reason 2' in name_col:
middle_columns = self.column_lists['multiple_uac']
else:
middle_columns = self.column_lists['singe_uac']
if self.department_format is False:
left_columns_chartsearch = excel_chartsearch[self.column_lists['left_columns']] # noqa: E501
elif self.department_format is True:
left_columns_chartsearch = excel_chartsearch[self.column_lists['hackensack_columns']] # noqa: E501
print("hackensack used")
else:
quit("wrong input, exiting...")
middle_columns_chartsearch = excel_chartsearch[middle_columns]
if self.lhi_format is False:
right_excel_chartsearch = excel_chartsearch[self.column_lists['right_columns']] # noqa: E501
elif self.lhi_format is True:
right_excel_chartsearch = excel_chartsearch[self.column_lists['lhi_search_list']] # noqa: E501
else:
quit("wrong input, exiting...")
concatenated_excel_file = pd.concat([left_columns_chartsearch,
middle_columns_chartsearch,
right_excel_chartsearch],
axis=1)
return concatenated_excel_file
def openpyxl_format_workbook(self, concatenated_excel_file):
alphabet = self.alphabet
wb = Workbook()
worksheet = wb.active
worksheet.title = "RAI Report"
for row in dataframe_to_rows(concatenated_excel_file, index=False, header=True):
worksheet.append(row)
col_names = []
for col in concatenated_excel_file.columns:
col_names.append(col)
col_num = len(concatenated_excel_file.axes[1])
col_num_str = str(col_num)
row_num = len(concatenated_excel_file.axes[0])
row_num_str = str(row_num + 1)
col_to_letter = alphabet.get(col_num_str)
table_dimension = "A1:" + col_to_letter + row_num_str
excel_dimensions = (#col_names,
"Total columns:", col_num,
"Total rows:", row_num_str,
"Table Data:", table_dimension)
print(excel_dimensions)
if self.lhi_format is False:
age_index = col_names.index('Age')
date_index = col_names.index('Pro Date Sent To Client')
age_index = str(age_index + 1)
date_index = str(date_index + 1)
col_to_letter_age = alphabet.get(age_index)
#print("Age column", col_to_letter_age)
col_to_letter_date = alphabet.get(date_index)
#print("Date column", col_to_letter_date)
age_range = 2
for row_num in range(age_range, int(row_num_str) + 1):
worksheet[col_to_letter_age + '{}'.format(row_num)] = '=datedif(a{},today(),"D")'.format(str(age_range)) # noqa: E501
age_range += 1
col_to_letter = alphabet.get(str(col_num))
table = Table(displayName = "table", ref = table_dimension)
# Change table style to normal format
style = TableStyleInfo(name = "TableStyleMedium2", showRowStripes = True)
# Attatched the styles to table
table.tableStyleInfo = style
if self.lhi_format is False:
for cell in worksheet[col_to_letter_date]:
cell.alignment = Alignment(horizontal='center')
for cell in worksheet[col_to_letter_age]:
cell.alignment = Alignment(horizontal='center')
for cell in worksheet['A']:
cell.alignment = Alignment(horizontal='center')
for cell in worksheet['B']:
cell.alignment = Alignment(horizontal='center')
for cell in worksheet['C']:
cell.alignment = Alignment(horizontal='left')
# Attach table to worksheet
worksheet.add_table(table)
DEFAULT_FONT.size = 8
return wb | Mike-Durning/work_ui | pyqt_proj/src/excel_manipulation.py | excel_manipulation.py | py | 8,294 | python | en | code | 0 | github-code | 13 |
31805809623 | from abc import ABC, abstractmethod
import datetime
import glob
import itertools
from joblib import dump, load
import os
import pickle
import time
import uuid
import sys
# Manipulating, analyzing and processing data
from collections import OrderedDict
import numpy as np
import pandas as pd
import scipy as sp
from scipy.stats.stats import pearsonr, f_oneway, zscore
from scipy.stats.mstats import winsorize
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.compose import ColumnTransformer
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer, SimpleImputer
from sklearn.neighbors import LocalOutlierFactor
from sklearn.preprocessing import FunctionTransformer, StandardScaler
from sklearn.preprocessing import OneHotEncoder, PowerTransformer
from category_encoders import TargetEncoder, LeaveOneOutEncoder
from sklearn.neighbors import LocalOutlierFactor
# Feature and model selection and evaluation
from sklearn.feature_selection import RFECV, SelectKBest
from sklearn.feature_selection import VarianceThreshold, f_regression
from sklearn.metrics import make_scorer, mean_squared_error
from sklearn.model_selection import GridSearchCV
from sklearn.inspection import permutation_importance
# Regression based estimators
from sklearn.linear_model import LinearRegression, LassoCV, RidgeCV, ElasticNetCV
# Tree-based estimators
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import AdaBoostRegressor, BaggingRegressor, ExtraTreesRegressor
from sklearn.ensemble import GradientBoostingRegressor, RandomForestRegressor
from sklearn.ensemble import HistGradientBoostingRegressor
from sklearn.tree import DecisionTreeRegressor
# Visualizing data
import seaborn as sns
import matplotlib.pyplot as plt
from tabulate import tabulate
cmap = sns.dark_palette("#69d", reverse=True, as_cmap=True)
palette = sns.color_palette("dark:#124683")
sns.set_theme(palette=palette, style="whitegrid")
# Utilities
from utils import notify, PersistEstimator, comment, print_dict, print_dict_keys
from utils import print_list
# Data Source
from data import AmesData
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', None)
pd.set_option('display.max_colwidth', None)
pd.set_option('mode.chained_assignment', None)
random_state = 6589
# =========================================================================== #
# 0. SCORING #
# =========================================================================== #
def RMSE(y_true, y_pred):
mse = mean_squared_error(y_true, y_pred)
return np.sqrt(mse)
rmse = make_scorer(RMSE, greater_is_better=False)
# =========================================================================== #
# 1. MODELS #
# =========================================================================== #
baseline_group = {
"Regressors": {
"Linear Regression": {
"Estimator": LinearRegression(),
"Parameters": {"normalize": [False],"n_jobs": [4],"copy_X": [True]}
},
"Lasso": {
"Estimator": Lasso(),
"Parameters": {
"alpha": [1.0]}
},
"Ridge": {
"Estimator": Ridge(),
"Parameters": {
"alpha": [1.0]}
},
"ElasticNet": {
"Estimator": ElasticNet(),
"Parameters": {
"alpha": [1.0],
"l1_ratio": np.arange(0.5)}
}
},
"Ensembles": {
"Random Forest": {
"Estimator": RandomForestRegressor(),
"Parameters": {
"n_estimators": [100],
"criterion": ["mse"],
"max_features": ["auto"],
"n_jobs": [4]}
},
"AdaBoost": {
"Estimator": AdaBoostRegressor(),
"Parameters": {
"base_estimator": [DecisionTreeRegressor()],
"n_estimators": [100]}
},
"Extra Trees": {
"Estimator": ExtraTreesRegressor(),
"Parameters": {
"n_estimators": [100],
"max_features": ["auto"],
"n_jobs": [4]}
},
"Gradient Boosting": {
"Estimator": GradientBoostingRegressor(),
"Parameters": {
"n_estimators": [100],
"criterion": ["friedman_mse"],
"max_features": ["auto"]}
}
}
}
optimized_group = {
"Regressors": {
"Linear Regression": {
"Estimator": LinearRegression(),
"Parameters": {"normalize": [False],"n_jobs": [4],"copy_X": [True]}
},
"Lasso": {
"Estimator": Lasso(),
"Parameters": {
"alpha": [1e-7, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 0.25, 0.50, 0.75, 1.0]}
},
"Ridge": {
"Estimator": Ridge(),
"Parameters": {
"alpha": [1e-7, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 0.25, 0.50, 0.75, 1.0]}
},
"ElasticNet": {
"Estimator": ElasticNet(),
"Parameters": {
"alpha": [1e-7, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 0.25, 0.50, 0.75, 1.0],
"l1_ratio": np.arange(0.0,1.0,0.1)}
}
},
"Ensembles": {
"Random Forest": {
"Estimator": RandomForestRegressor(),
"Parameters": {
"n_estimators": [50,100],
"max_depth": [2,3,4,5,6],
"criterion": ["mse"],
"min_samples_split": [0.005, 0.01, 0.05, 0.10],
"min_samples_leaf": [0.005, 0.01, 0.05, 0.10],
"max_features": ["auto"],
"n_jobs": [4]}
},
"AdaBoost": {
"Estimator": AdaBoostRegressor(),
"Parameters": {
"base_estimator": [DecisionTreeRegressor()],
"n_estimators": [50,100],
"learning_rate": [0.001, 0.01, 0.05, 0.1, 0.25, 0.50, 0.75, 1.0]}
},
"Extra Trees": {
"Estimator": ExtraTreesRegressor(),
"Parameters": {
"n_estimators": [50,100],
"max_depth": [2,3,4,5,6],
"min_samples_split": [0.005, 0.01, 0.05, 0.10],
"min_samples_leaf": [0.005, 0.01, 0.05, 0.10],
"max_features": ["auto"],
"n_jobs": [4]}
},
"Gradient Boosting": {
"Estimator": GradientBoostingRegressor(),
"Parameters": {
"learning_rate": [0.15,0.1,0.05,0.01,0.005,0.001],
"n_estimators": [50,100],
"max_depth": [2,3,4,5,6],
"criterion": ["friedman_mse"],
"min_samples_split": [0.005, 0.01, 0.05, 0.10],
"min_samples_leaf": [0.005, 0.01, 0.05, 0.10],
"max_features": ["auto"]}
}
}
}
model_groups = {"Baseline": baseline_group, "Optimized": optimized_group}
# =========================================================================== #
# 3. FEATURE METADATA #
# =========================================================================== #
discrete = ["Year_Built","Year_Remod_Add","Bsmt_Full_Bath","Bsmt_Half_Bath",
"Full_Bath","Half_Bath","Bedroom_AbvGr","Kitchen_AbvGr","TotRms_AbvGrd",
"Fireplaces","Garage_Cars","Mo_Sold","Year_Sold", "Garage_Yr_Blt"]
continuous = ["Lot_Frontage","Lot_Area","Mas_Vnr_Area","BsmtFin_SF_1","BsmtFin_SF_2",
"Bsmt_Unf_SF","Total_Bsmt_SF","First_Flr_SF","Second_Flr_SF","Low_Qual_Fin_SF",
"Gr_Liv_Area","Garage_Area","Wood_Deck_SF","Open_Porch_SF","Enclosed_Porch",
"Three_season_porch","Screen_Porch","Pool_Area","Misc_Val"]
numeric = discrete + continuous
n_nominal_levels = 191
nominal = ['MS_SubClass', 'MS_Zoning', 'Street', 'Alley', 'Land_Contour', 'Lot_Config', 'Neighborhood',
'Condition_1', 'Condition_2', 'Bldg_Type', 'House_Style', 'Roof_Style', 'Roof_Matl',
'Exterior_1st', 'Exterior_2nd', 'Mas_Vnr_Type', 'Foundation', 'Heating', 'Central_Air',
'Garage_Type', 'Misc_Feature', 'Sale_Type', 'Sale_Condition']
ordinal = ['BsmtFin_Type_1', 'BsmtFin_Type_2', 'Bsmt_Cond', 'Bsmt_Exposure',
'Bsmt_Qual', 'Electrical', 'Exter_Cond', 'Exter_Qual', 'Fence', 'Fireplace_Qu',
'Functional', 'Garage_Cond', 'Garage_Finish', 'Garage_Qual', 'Heating_QC', 'Kitchen_Qual',
'Land_Slope', 'Lot_Shape', 'Overall_Cond', 'Overall_Qual', 'Paved_Drive', 'Pool_QC', 'Utilities']
class FeatureMetadata:
def __init__(self, discrete=discrete, continuous=continuous,
nominal=nominal, ordinal=ordinal):
self._discrete = discrete
self._continuous = continuous
self._nominal = nominal
self._ordinal = ordinal
self.load_features()
def load_features(self):
self.fm_ = pd.DataFrame()
columns = {"Continuous": self._continuous, "Discrete": self._discrete,
"Nominal": self._nominal, "Ordinal": self._ordinal}
for ftype, features in columns.items():
d = {"Feature": features, "Type": ftype, "Source": "Original", "Active": True,
"Signature": "FeatureMetadata: load_features"}
df = pd.DataFrame(data=d)
self.fm_ = pd.concat((self.fm_,df),axis=0)
def get_feature(self, feature):
return self.fm_[self.fm_["Type"] == coltype]
def get_features(self, feature_type=None):
"""Returns all features or all features of the requested feature type."""
if feature_type:
return list(self.fm_[(self.fm_["Type"] == feature_type)]["Feature"].values)
else:
return list(self.fm_["Feature"].values)
def get_categorical_features(self):
"""Returns a list of nominal and ordinal features."""
nominal = list(self.fm_[(self.fm_["Type"] == "Nominal") & (self.fm_["Active"] == True)]["Feature"].values)
ordinal = list(self.fm_[(self.fm_["Type"] == "Ordinal") & (self.fm_["Active"] == True)]["Feature"].values)
return nominal + ordinal
def get_numeric_features(self):
"""Returns a list of continuous and discrete features."""
discrete = list(self.fm_[(self.fm_["Type"] == "Discrete") & (self.fm_["Active"] == True)]["Feature"].values)
continuous = list(self.fm_[(self.fm_["Type"] == "Continuous") & (self.fm_["Active"] == True)]["Feature"].values)
return discrete + continuous
def get_original_features(self, feature_type=None):
"""Returns original features or original features of the requested feature type."""
if feature_type:
return list(self.fm_[(self.fm_["Type"] == feature_type)& (self.fm_["Source"] == "Original")]["Feature"].values)
else:
return list(self.fm_[(self.fm_["Source"] == "Original")]["Feature"].values)
def get_active_features(self, feature_type=None):
"""Returns original features or original features of the requested feature type."""
if feature_type:
return list(self.fm_[(self.fm_["Active"] == True) & (self.fm_["Type"] == feature_type)]["Feature"].values)
else:
return list(self.fm_[(self.fm_["Active"] == True)]["Feature"].values)
def exclude_feature(self, feature):
self.fm_.loc[self.fm_["Feature"]==feature, "Active"] = False
self.fm_.loc[self.fm_["Feature"]==feature, "Signature"] = sys._getframe(1).f_code.co_name
def include_feature(self, feature):
self.fm_.loc[self.fm_["Feature"]==feature, "Active"] = True
self.fm_.loc[self.fm_["Feature"]==feature, "Signature"] = sys._getframe(1).f_code.co_name
def exclude_features(self,features):
for feature in features:
self.exclude_feature(feature)
def include_features(self,features):
for feature in features:
self.include_feature(feature)
def add_feature(self, feature, feature_type, active=True):
if self.fm_[self.fm_["Feature"]==feature].shape[0] == 0:
d = {"Feature": feature, "Type": feature_type, "Source": "Derived",
"Active": active, "Signature": sys._getframe(1).f_code.co_name }
df = pd.DataFrame(data=d, index=[0])
self.fm_ = pd.concat((self.fm_,df),axis=0)
def print(self, feature=None, feature_type=None):
if feature_type:
print(self.fm_[self.fm_["Type"]==feature_type])
elif feature:
print(self.fm_[self.fm_["Feature"]==feature])
else:
print(self.fm_)
# =========================================================================== #
# 1. PREPROCESSOR #
# =========================================================================== #
class Preprocessor:
def __init__(self, feature_metadata=FeatureMetadata(),
looe=LeaveOneOutEncoder(drop_invariant=False, return_df=True)):
self._feature_metadata = feature_metadata
self._looe = looe
def fit(self, X, y=None):
self.X_ = X
self.y_ = y
return self
def transform(self, X, y=None):
self.fit(X,y)
if y is not None:
self.clean().detect_outliers().engineer().transformer().filter()
else:
self.clean().engineer().transformer()
self._get_active_features()
self.X_ = self.X_[self.active_features_]
return self.X_, self.y_
def fit_transform(self, X, y=None):
return self.fit(X,y).transform(X,y)
def _check_data(self):
# Check for nulls and na
if self.X_.isnull().sum().sum() != 0:
n_nulls = self.X_.isnull().sum().sum()
print(f"\nWarning, {n_nulls} nulls found by {sys._getframe(1).f_code.co_name}")
print(self.X_[self.X_.isnull().any(axis=1)])
# Confirm lengths of X and y (if y is not None)
if self.y_ is not None:
assert(self.X_.shape[0] == self.y_.shape[0]), \
f"X has length {self.X_.shape[0]} and y has length {self.y_.shape[0]}. coming from {sys._getframe(1).f_code.co_name}."
# ====================================================================== #
# FEATURE NAMES #
# ====================================================================== #
def _get_feature(self, feature):
return self._feature_metadata.get_feature(feature)
def _get_features(self):
self.continuous_ = self._feature_metadata.get_features("Continuous")
self.discrete_ = self._feature_metadata.get_features("Discrete")
self.nominal_ = self._feature_metadata.get_features("Nominal")
self.ordinal_ = self._feature_metadata.get_features("Ordinal")
self.features_ = self._feature_metadata.get_features()
def _get_original_features(self):
self.continuous_ = self._feature_metadata.get_original_features("Continuous")
self.discrete_ = self._feature_metadata.get_original_features("Discrete")
self.nominal_ = self._feature_metadata.get_original_features("Nominal")
self.ordinal_ = self._feature_metadata.get_original_features("Ordinal")
self.original_features_ = self._feature_metadata.get_original_features()
def _get_active_features(self):
self.continuous_ = self._feature_metadata.get_active_features("Continuous")
self.discrete_ = self._feature_metadata.get_active_features("Discrete")
self.nominal_ = self._feature_metadata.get_active_features("Nominal")
self.ordinal_ = self._feature_metadata.get_active_features("Ordinal")
self.active_features_ = self._feature_metadata.get_active_features()
def clean(self):
# Transform the target
self.y_ = None if self.y_ is None else np.log(self.y_)
# Initiate imputers
mean_imputer = SimpleImputer(strategy="mean")
median_imputer = SimpleImputer(strategy="median")
frequent_imputer = SimpleImputer(strategy="most_frequent")
# Get Feature Names
self._get_original_features()
# ------------------------------------------------------------------- #
# Continuous Variables #
# ------------------------------------------------------------------- #
# Remove Nulls
self.X_[self.continuous_].fillna(self.X_[self.continuous_].mean(), inplace = True)
self.X_["Garage_Yr_Blt"].fillna(self.X_["Garage_Yr_Blt"].mean(), inplace = True)
self._check_data()
# Correct data entry errors
self.X_["Garage_Yr_Blt"].replace(to_replace=2207, value=2007, inplace=True)
# ------------------------------------------------------------------- #
# Discrete Variables #
# ------------------------------------------------------------------- #
frequent_imputer.fit(self.X_[self.discrete_])
self.X_[self.discrete_] = frequent_imputer.transform(self.X_[self.discrete_])
# ------------------------------------------------------------------- #
# Ordinal Variables #
# ------------------------------------------------------------------- #
frequent_imputer.fit(self.X_[self.ordinal_])
self.X_[self.ordinal_] = frequent_imputer.transform(self.X_[self.ordinal_])
# ------------------------------------------------------------------- #
# Ordinal Variables #
# ------------------------------------------------------------------- #
frequent_imputer.fit(self.X_[self.nominal_])
self.X_[self.nominal_] = frequent_imputer.transform(self.X_[self.nominal_])
#Check for again nulls
self._check_data()
return self
def detect_outliers(self):
features = self.continuous_ + self.discrete_
X_numeric = self.X_[features]
X_outliers = X_numeric[(np.abs(zscore(X_numeric)) > 3).all(axis=1)]
if (X_outliers.shape[0]>0):
print("\n")
print("="*40)
print(" Outlier Detection")
print("-"*40)
print(f" Observations: {X_numeric.shape[0]}")
print(f" Features: {X_numeric.shape[1]}")
print(f" # Outliers Detected: {X_outliers.shape[0]}")
print("-"*40)
if (X_outliers.shape[0]> 0):
print(X_outliers)
return self
def engineer(self):
"""Create new features that increase predictive capability."""
# ------------------------------------------------------------------- #
# Age Related #
# ------------------------------------------------------------------- #
self.X_.drop(columns=["PID","Latitude", "Longitude"], inplace=True)
self._feature_metadata.exclude_feature("PID")
self._feature_metadata.exclude_feature("Latitude")
self._feature_metadata.exclude_feature("Longitude")
# Age
self.X_["Age"] = self.X_["Year_Sold"] - self.X_["Year_Built"]
self.X_["Age"].fillna(self.X_["Age"].median(),inplace=True)
self.X_.drop(columns="Year_Built", inplace=True)
self._feature_metadata.include_feature("Age")
self._feature_metadata.exclude_feature("Year_Built")
self._check_data()
# Garage Age
self.X_["Garage_Age"] = self.X_["Year_Sold"] - self.X_["Garage_Yr_Blt"]
self.X_["Garage_Age"].fillna(self.X_["Garage_Age"].mean(),inplace=True)
self.X_.drop(columns="Garage_Yr_Blt", inplace=True)
self._feature_metadata.include_feature("Garage_Age")
self._feature_metadata.exclude_feature("Garage_Yr_Blt")
self._check_data()
# Age since remodeled
self.X_["Age_Remod"] = self.X_["Year_Sold"] - self.X_["Year_Remod_Add"]
self.X_["Age_Remod"].fillna(self.X_["Age_Remod"].median(),inplace=True)
self.X_.drop(columns="Year_Remod_Add", inplace=True)
self._feature_metadata.include_feature("Age_Remod")
self._feature_metadata.exclude_feature("Year_Remod_Add")
self._check_data()
# ------------------------------------------------------------------- #
# Amenity Features #
# ------------------------------------------------------------------- #
self.X_["Has_Garage"] = "No_Garage" not in self.X_["Garage_Type"].values
self.X_["Has_Pool"] = "No_Pool" not in self.X_["Pool_QC"].values
self.X_["Has_Basement"] = "No_Basement" not in self.X_["Bsmt_Qual"].values
self.X_["Has_Fireplace"] = "No_Fireplace" not in self.X_["Fireplace_Qu"].values
self.X_["Has_Porch"] = self.X_["Open_Porch_SF"].values + \
self.X_["Enclosed_Porch"].values + \
self.X_["Three_season_porch"].values + \
self.X_["Screen_Porch"].values == 0
self.X_["Has_Garage"].replace(to_replace=[True, False], value=["Y","N"], inplace=True)
self.X_["Has_Pool"].replace(to_replace=[True, False], value=["Y","N"], inplace=True)
self.X_["Has_Basement"].replace(to_replace=[True, False], value=["Y","N"], inplace=True)
self.X_["Has_Fireplace"].replace(to_replace=[True, False], value=["Y","N"], inplace=True)
self.X_["Has_Porch"].replace(to_replace=[True, False], value=["Y","N"], inplace=True)
self._feature_metadata.add_feature(feature="Has_Garage",feature_type="Nominal",active=True)
self._feature_metadata.add_feature(feature="Has_Pool",feature_type="Nominal",active=True)
self._feature_metadata.add_feature(feature="Has_Basement",feature_type="Nominal",active=True)
self._feature_metadata.add_feature(feature="Has_Fireplace",feature_type="Nominal",active=True)
self._feature_metadata.add_feature(feature="Has_Porch",feature_type="Nominal",active=True)
self._check_data()
# ------------------------------------------------------------------- #
# School and Zip Code Information #
# ------------------------------------------------------------------- #
# filename = "../data/external/schools.csv"
# schools = pd.read_csv(filename)
# self.X_ = pd.merge(self.X_, schools, on="Neighborhood", how="inner")
# Add variables to metadata
# self._feature_metadata.add_feature(feature="Zip",feature_type="Nominal",active=True)
# self._feature_metadata.add_feature(feature="School_Title_1",feature_type="Nominal",active=True)
# self._feature_metadata.add_feature(feature="School_Students",feature_type="Continuous",active=True)
# self._feature_metadata.add_feature(feature="School_Teachers",feature_type="Continuous",active=True)
# self._feature_metadata.add_feature(feature="School_Student_Teacher_Ratio",feature_type="Continuous",active=True)
# self._feature_metadata.add_feature(feature="Free_or_Reduced_Lunch",feature_type="Continuous",active=True)
# self._check_data()
# ------------------------------------------------------------------- #
# 2008 Financial Crisis Sale #
# ------------------------------------------------------------------- #
self.X_.loc[self.X_["Mo_Sold"].isin([1,2,3]), "Qtr_Sold"] = str(1)
self.X_.loc[self.X_["Mo_Sold"].isin([4,5,6]), "Qtr_Sold"] = str(2)
self.X_.loc[self.X_["Mo_Sold"].isin([7,8,9]), "Qtr_Sold"] = str(3)
self.X_.loc[self.X_["Mo_Sold"].isin([10,11,12]), "Qtr_Sold"] = str(4)
# Format Qtr-Sold Feature
self.X_["Year_Sold"] = self.X_["Year_Sold"].astype(int)
self.X_["Qtr_Sold"] = self.X_["Year_Sold"].astype(str) + "-" + self.X_["Qtr_Sold"].astype(str)
# Housing Price Index for 2006-2010. Merge with data
d = {'Qtr_Sold':
['2006-1', '2006-2', '2006-3', '2006-4', '2007-1', '2007-2',
'2007-3', '2007-4', '2008-1', '2008-2', '2008-3', '2008-4',
'2009-1', '2009-2', '2009-3', '2009-4', '2010-1', '2010-2',
'2010-3', '2010-4'],
'HPI': [368.63, 372.4 , 375.47, 379.31, 380.72, 380.48, 376.22, 375.02,
372.29, 362.88, 351.46, 348.23, 350.79, 341.56, 332.61, 330.09,
326.14, 323.2 , 326.31, 323.96]}
hpi = pd.DataFrame(data=d)
self.X_ = pd.merge(self.X_, hpi, on="Qtr_Sold", how="inner")
# Add new fields to feature metadata
self._feature_metadata.add_feature(feature="Qtr_Sold",feature_type="Nominal",active=True)
self._feature_metadata.add_feature(feature="HPI",feature_type="Continuous",active=True)
self._check_data()
assert(self.X_["HPI"].mean()>300), "HPI merge problem in engineer."
return self
def transformer(self,sigma=0.3):
"""Power transform continuous and leave-one-out target encode categorical."""
# Get current feature names just in case
self._get_features()
# ------------------------------------------------------------------- #
# Continuous #
# ------------------------------------------------------------------- #
# Power transformation to make feature distributions closer to Guassian
power = PowerTransformer(method="yeo-johnson", standardize=False)
self.X_[self.continuous_] = power.fit_transform(self.X_[self.continuous_])
self._check_data()
# ------------------------------------------------------------------- #
# Categorical #
# ------------------------------------------------------------------- #
categorical = self.nominal_ + self.ordinal_
if self.y_ is not None:
self._looe.fit(self.X_[categorical], self.y_)
self.X_[categorical] = self._looe.transform(self.X_[categorical])
self._check_data()
# ------------------------------------------------------------------- #
# Standardize #
# ------------------------------------------------------------------- #
# Obtain active features for standardization and processing.
self._get_active_features()
self.X_ = self.X_[self.active_features_]
standard = StandardScaler()
standard.fit(self.X_)
X = standard.transform(self.X_)
self.X_ = pd.DataFrame(data=X, columns=self.active_features_)
self._check_data()
return self
def _select_redundant_feature(self, a,b):
features = [a,b]
model = LinearRegression()
model.fit(self.X_[features], self.y_)
return features[np.argmin(abs(model.coef_))]
def filter(self, max_collinearity=0.7):
features = self._feature_metadata.get_active_features()
self.feature_correlations_ = pd.DataFrame()
# Perform pairwise correlation coefficient calculations
for col_a, col_b in itertools.combinations(features,2):
r, p = pearsonr(self.X_[col_a], self.X_[col_b])
cols = col_a + "__" + col_b
d = {"Columns": cols, "A": col_a, "B": col_b,"Correlation": abs(r), "p-value": p}
df = pd.DataFrame(data=d, index=[0])
self.feature_correlations_ = pd.concat((self.feature_correlations_, df), axis=0)
# Select correlations above threshold
redundancies = self.feature_correlations_[self.feature_correlations_["Correlation"]>max_collinearity]
if redundancies.shape[0] > 0:
features_to_remove = []
print("\nFiltering Redundant Features")
print(f"{redundancies.shape[0]} pairs of redundant features found.")
print(redundancies)
for idx, row in redundancies.iterrows():
features_to_remove.append(self._select_redundant_feature(row["A"], row["B"]))
self._feature_metadata.exclude_features(features_to_remove)
print("\nThe following features are excluded.")
print_list(features_to_remove,2)
return self
# =========================================================================== #
# 2.0 FEATURE SELECTORS: FULL DATA SET #
# =========================================================================== #
class FullSelector(BaseEstimator, TransformerMixin):
"""Simple class that returns the full data set."""
def __init__(self, estimator):
self._estimator = estimator
def fit(self, X, y=None):
return self
def transform(self, X):
return X
def fit_transform(self, X, y=None):
return self.transform(X)
# =========================================================================== #
# 2.1 FEATURE SELECTORS: IMPORTANCE SELECTOR #
# =========================================================================== #
class ImportanceSelector(BaseEstimator, TransformerMixin):
"""Returns a dataset with top N important features."""
def __init__(self, estimator, top_n=10):
self._estimator = estimator
self._top_n = top_n
def _fit_regression_feature_importance(self, X, y=None):
self._estimator.fit(X, y)
importances = {"Feature": X.columns.tolist(), "Importance": abs(model.coef_)}
self.importances_ = pd.DataFrame(data=importances)
def _fit_tree_based_feature_importance(self, X, y=None):
self._estimator.fit(X, y)
importances = {"Feature": X.columns.tolist(), "Importance": model.feature_importances_}
self.importances_ = pd.DataFrame(data=importances)
def fit(self, X, y=None):
regressors = ["LinearRegression", "LassoCV", "RidgeCV", "ElasticNetCV"]
estimator = self._estimator.__class__.__name__
if estimator in regressors:
self._fit_regression_feature_importance(X, y)
elif "GridSearchCV" == estimator:
self._estimator = self._estimator.best_estimator_
self.fit(X, y)
else:
self._fit_tree_based_feature_importance(X, y)
return self
def transform(self, X):
importances = self.importances_.sort_values(by="Importance", ascending=False)
top_importances = importances.head(self._top_n)
top_features = top_importances["Feature"].values
return X[top_features]
def fit_transform(X, y=None):
return self.fit(X,y).transform(X)
# =========================================================================== #
# 2.2 FEATURE SELECTORS: RFECV SELECTOR #
# =========================================================================== #
class RFECVSelector(BaseEstimator, TransformerMixin):
"""Returns a dataset with top N important features."""
def __init__(self, estimator, step=1, min_features_to_select=5,cv=5,
scoring=rmse, n_jobs=4):
self._estimator = estimator
self._step = step
self._min_features_to_select = min_features_to_select
self._cv = cv
self._scoring = scoring
self._n_jobs = n_jobs
def fit(self, X, y=None):
if self._estimator.__class__.__name__ == "GridSearchCV":
self._estimator = self._estimator.best_estimator_
self.fit(X,y)
selector = RFECV(estimator=self._estimator, step=self._step,
min_features_to_select=self._min_features_to_select,
cv=self._cv, scoring=self._scoring, n_jobs=self._n_jobs)
selector.fit(X,y)
self.selected_features_ = list(itertools.compress(X.columns,selector.support_))
return self
def transform(self, X):
return X[self.selected_features_]
def fit_transform(X, y=None):
return self.fit(X,y).transform(X)
selectors = {"Full": FullSelector, "Importance": ImportanceSelector, "RFECV": RFECVSelector}
# =========================================================================== #
# 3. MODEL #
# =========================================================================== #
class Model:
def __init__(self, estimator, selector, **kwargs):
self._estimator = estimator
self._selector = selector
self.model_id_ = uuid.uuid4().time_low
def _start_training(self):
now = datetime.datetime.now()
self.start_training_ = time.time()
print("\n")
print("="*40)
print(f" Estimator: {self._estimator.__class__.__name__}")
print("-"*40)
print(" ", end="")
print(now.strftime("%Y-%m-%d %H:%M:%S"))
print("-"*40)
def _get_params(self):
"""Obtains parameter for LassoCV, RidgeCV, and ElasticNetCV."""
if self._estimator.__class__.__name__ in ["LassoCV", "RidgeCV"]:
return self._estimator.alpha_
elif self._estimator.__class__.__name__ == "ElasticNetCV":
return {"alpha_": self._estimator.alpha_,
"l1_ratio":self._estimator.l1_ratio_}
else:
return None
def _end_training(self, X, y):
self.end_training_ = time.time()
self.training_time_ = round(self.end_training_ - self.start_training_,3)
if self._estimator.__class__.__name__ == "GridSearchCV":
self.name_ = self._estimator.best_estimator_.__class__.__name__
self.best_estimator_ = self._estimator.best_estimator_
self.best_params_ = self._estimator.best_params_
self.train_score_ = self._estimator.best_score_
else:
self.name_ = self._estimator.__class__.__name__
self.best_estimator_ = self._estimator
self.best_params_ = self._get_params()
self.train_score_ = self._score(X, y)
self.n_features_ = X.shape[1]
print(f" # Features: {self.n_features_}")
print("-"*40)
print(f" Train Score: {self.train_score_}")
def select_features(self, X, y):
X = self._selector.fit(X.values, y).transform(X)
self.select_features_ = X.columns.tolist()
self.n_features_ = X.shape[1]
print(f" ")
print(f" # Features: {self.n_features_}")
def fit(self, X, y):
self._start_training()
self._estimator.fit(X.values,y)
self._end_training(X, y)
return self
def predict(self, X):
start = time.time()
y_pred = self._estimator.predict(X)
end = time.time()
self.prediction_time_ = round(end - start,3)
return y_pred
def _score(self, X, y):
y_pred = self.predict(X.values)
self.train_score_ = RMSE(y, y_pred)
return self.train_score_
def score(self, X, y):
PID = y["PID"]
y = np.log(y["Sale_Price"])
y_pred = self.predict(X)
self.test_score_ = RMSE(y, y_pred)
print(f" Test Score: {self.test_score_}")
print(f" Training Time: {self.training_time_}")
print(f" Predict Time: {self.prediction_time_}")
print("="*40)
print("\n")
return self.test_score_
# =========================================================================== #
# 4. EVALUATOR #
# =========================================================================== #
class Evaluator:
"""Captures performance results, reports, and returns best estimator."""
def __init__(self, persist=False):
self.models_ = {}
self.results_ = pd.DataFrame()
self._persist = persist
def add_model(self, model, cv):
self.models_[model.model_id_] = model
d = {"CV": cv, "Id": model.model_id_, "Estimator": model.name_,
"# Features": model.n_features_, "Train Score": model.train_score_,
"Test Score": model.test_score_, "Train Time": model.training_time_,
"Predict Time": model.prediction_time_}
df = pd.DataFrame(data=d, index=[0])
self.results_ = pd.concat((self.results_,df), axis=0)
if self._persist:
self.save(model,cv)
def detail(self):
scores = self.results_.pivot(index="Estimator", columns="CV", values="Test Score")
print("\n")
print("="*40)
print(" Scores by Cross-Validation Set")
print("-"*40)
print(scores)
print("-"*40)
return scores
def summary(self):
results = self.results_.groupby(by=["Estimator"]).mean()
results.sort_values(by="Test Score", inplace=True)
print("\n")
print("="*40)
print(" Performance Results by Algorithm")
print("-"*40)
print(results)
print("-"*40)
return results
def save(self, model, cv):
# Save model
directory = "../models/"
filename = directory +str(cv) + "_" + model.name_ + "_" + str(model.model_id_) +\
"_score_" + str(model.test_score_) + ".joblib"
dump(model, filename)
# Save performance results
cdate = datetime.datetime.now()
date = cdate.strftime("%B") + "-" + str(cdate.strftime("%d")) + "-" + str(cdate.strftime("%Y"))
filename = directory + "performance_results_" + date + ".csv"
self.results_.to_csv(filename, index=False)
# =========================================================================== #
# 5. PIPELINE #
# =========================================================================== #
class Pipeline:
"""Selects best algorithm for regression via cross-validation."""
def __init__(self, model_groups=model_groups, selectors=selectors,
data_loader=AmesData(), preprocessor=Preprocessor(),
evaluator=Evaluator(persist=True)):
self._model_groups = model_groups
self._selectors = selectors
self._data_loader = data_loader
self._preprocessor = preprocessor
self._evaluator = evaluator
def fit(self):
max_alphas_lasso = []
min_alphas_lasso = []
max_alphas_enet = []
min_alphas_enet = []
for i in range(1,11):
train, test = self._data_loader.get_cv(i)
X_train = train["X"]
y_train = train["y"]
X_test = test["X"]
y_test = test["y"]
# Clean, screen, transform and encode the data
X_train, y_train = self._preprocessor.fit_transform(X_train, y_train)
X_test, _ = self._preprocessor.transform(X_test)
for groupname, model_group in self._model_groups.items():
print(groupname)
for setname, model_set in model_group.items():
print(setname)
for name, estimator in model_set.items():
for selector in self._selectors.values():
model = Model(estimator=estimator, selector=selector)
model.fit(X_train,y_train)
model.score(X_test, y_test)
self._evaluator.add_model(model,i)
return self
# =========================================================================== #
# 15. SUBMISSION #
# =========================================================================== #
class Submission:
def __init__(self, estimator, cv):
self._estimator = estimator
self._cv = cv
def fit(self, X, y):
"""Retrain best model on entire training set."""
pass
def predict(self, X):
pass
def score(self, X, y):
pass
def submit(self):
"""Saves submission"""
filename = "mysubmission" + str(self._cv) + ".txt"
self.submission_.to_csv(filename)
def main():
pipe = Pipeline().fit()
if __name__ == "__main__":
main()
#%%
| john-james-ai/Ames | src/pipeline_v3.py | pipeline_v3.py | py | 41,699 | python | en | code | 0 | github-code | 13 |
20259393504 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Populate documentation for a release.
This will introduce one-off changes in
* ``README.rst``
* ``docs/index.rst``
* ``docs/python/binary-extension.rst``
* ``DEVELOPMENT.rst``
that are not intended to be checked into ``main`` (except maybe
to be reverted after a release).
This changes will cause ``nox --session lint`` to fail because it will make
those documents look incorrect to the ``check_doc_templates.py``
script.
"""
import importlib.machinery
import os
_SCRIPTS_DIR = os.path.dirname(__file__)
_ROOT_DIR = os.path.dirname(_SCRIPTS_DIR)
README_FILE = os.path.join(_ROOT_DIR, "README.rst")
RELEASE_README_FILE = os.path.join(_ROOT_DIR, "README.rst.release.template")
INDEX_FILE = os.path.join(_ROOT_DIR, "docs", "index.rst")
RELEASE_INDEX_FILE = os.path.join(
_ROOT_DIR, "docs", "index.rst.release.template"
)
DEVELOPMENT_TEMPLATE = os.path.join(_ROOT_DIR, "DEVELOPMENT.rst.template")
DEVELOPMENT_FILE = os.path.join(_ROOT_DIR, "DEVELOPMENT.rst")
def get_version():
"""Get the current version from ``setup.py``.
Assumes that importing ``setup.py`` will have no side-effects (i.e.
assumes the behavior is guarded by ``if __name__ == "__main__"``).
Returns:
str: The current version in ``setup.py``.
"""
filename = os.path.join(_ROOT_DIR, "setup.py")
loader = importlib.machinery.SourceFileLoader("setup", filename)
setup_mod = loader.load_module()
return setup_mod.VERSION
def populate_readme(
version,
linux_run,
windows_run,
coveralls_build,
macos_run,
):
"""Populates ``README.rst`` with release-specific data.
This is because ``README.rst`` is used on PyPI.
Args:
version (str): The current version.
linux_run (int): The GitHub Actions run ID (for Linux) corresponding to
the release.
windows_run (int): The GitHub Actions run ID (for Windows)
corresponding to the release.
coveralls_build (Union[str, int]): The Coveralls.io build ID
corresponding to the release.
macos_run (int): The GitHub Actions run ID (for macOS) corresponding to
the release.
"""
with open(RELEASE_README_FILE, "r") as file_obj:
template = file_obj.read()
contents = template.format(
version=version,
linux_run=linux_run,
windows_run=windows_run,
coveralls_build=coveralls_build,
macos_run=macos_run,
)
with open(README_FILE, "w") as file_obj:
file_obj.write(contents)
def populate_index(
version,
linux_run,
windows_run,
coveralls_build,
macos_run,
):
"""Populates ``docs/index.rst`` with release-specific data.
Args:
version (str): The current version.
linux_run (int): The GitHub Actions run ID (for Linux) corresponding to
the release.
windows_run (int): The GitHub Actions run ID (for Windows)
corresponding to the release.
coveralls_build (Union[str, int]): The Coveralls.io build ID
corresponding to the release.
macos_run (int): The GitHub Actions run ID (for macOS) corresponding to
the release.
"""
with open(RELEASE_INDEX_FILE, "r") as file_obj:
template = file_obj.read()
contents = template.format(
version=version,
linux_run=linux_run,
windows_run=windows_run,
coveralls_build=coveralls_build,
macos_run=macos_run,
)
with open(INDEX_FILE, "w") as file_obj:
file_obj.write(contents)
def populate_development(version):
"""Populates ``DEVELOPMENT.rst`` with release-specific data.
This is because ``DEVELOPMENT.rst`` is used in the Sphinx documentation.
Args:
version (str): The current version.
"""
with open(DEVELOPMENT_TEMPLATE, "r") as file_obj:
template = file_obj.read()
contents = template.format(revision=version, rtd_version=version)
with open(DEVELOPMENT_FILE, "w") as file_obj:
file_obj.write(contents)
def main():
"""Populate the templates with release-specific fields.
Requires user input for the GitHub Actions (Linux, macOS and Windows)
and Coveralls.io build IDs.
"""
version = get_version()
linux_run = input("Linux GitHub Actions Run ID: ")
windows_run = input("Windows GitHub Actions Run ID: ")
coveralls_build = input("Coveralls Build ID: ")
macos_run = input("macOS GitHub Actions Run ID: ")
populate_readme(
version,
linux_run,
windows_run,
coveralls_build,
macos_run,
)
populate_index(
version,
linux_run,
windows_run,
coveralls_build,
macos_run,
)
populate_development(version)
if __name__ == "__main__":
main()
| dhermes/bezier | scripts/doc_template_release.py | doc_template_release.py | py | 5,336 | python | en | code | 230 | github-code | 13 |
42386709326 | from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.common.by import By
import time
# ----------------------------- PREPARE OPTIONS ----------------------------- #
URL = "https://orteil.dashnet.org/experiments/cookie"
TIMEOUT = time.time() + 5
FIVE_MIN = time.time() + 60*5 #5 minutes
options = webdriver.ChromeOptions()
options.add_experimental_option("detach", True)
# ----------------------------- GETTING THE URL ----------------------------- #
s = Service(ChromeDriverManager().install())
driver = webdriver.Chrome(service=s, options=options)
driver.maximize_window()
driver.get(URL)
# ----------------------------- Game Automation ----------------------------- #
cookie = driver.find_element(By.ID, "cookie")
items = driver.find_elements(By.CSS_SELECTOR, "#store div")
items_ids = [item.get_attribute("id") for item in items]
print(items_ids)
while True:
cookie.click()
if time.time() > TIMEOUT:
prices = driver.find_elements(By.CSS_SELECTOR, "#store b")
items_prices = []
for price in prices:
text = price.text
if text != "":
cost = int(text.split("-")[1].strip().replace(",", ""))
items_prices.append(cost)
shop_dictionary = {}
money = int(driver.find_element(By.ID, "money").text.replace(",", ""))
for n in range(len(items_prices)):
shop_dictionary[items_ids[n]] = items_prices[n]
possible_upgrades = {}
for key, value in shop_dictionary.items():
if value < money:
possible_upgrades[key] = value
maximum_upgrade = max(possible_upgrades, key=possible_upgrades.get)
driver.find_element(By.ID, maximum_upgrade).click()
TIMEOUT = time.time() + 5
# After 5 minutes stop the bot and check cookies per s
if time.time() > FIVE_MIN:
cookie_per_s = driver.find_element(By.ID, "cps").text
print(cookie_per_s)
break
| Carmui/100-days-Python | Day 48 PROJECT/Cookie_Game_Bot.py | Cookie_Game_Bot.py | py | 2,072 | python | en | code | 0 | github-code | 13 |
11420427417 | import boto3
from sqs_client.contracts import SqsConnection as SqsConnectionBase
class SqsConnection(SqsConnectionBase):
def __init__(
self,
region_name: str = None,
access_key: str = None,
secret_key: str = None,
endpoint_url: str = None,
):
self._access_key = access_key
self._secret_key = secret_key
self._endpoint_url = endpoint_url
self._region_name = region_name
self._queue_url = None
self._load_resource()
self._load_client()
def set_queue(self, queue_url: str):
self._queue_url = queue_url
def get_queue_resource(self, queue_url: str = None):
self._set_queue(queue_url)
return self.resource.Queue(self._queue_url)
def _set_queue(self, queue_url: str = None):
self._queue_url = queue_url if queue_url else self._queue_url
if not self._queue_url:
raise Exception("Queue is not defined.")
def _load_resource(self):
session = boto3.Session(
aws_access_key_id=self._access_key,
aws_secret_access_key=self._secret_key,
)
self.resource = session.resource(
"sqs", endpoint_url=self._endpoint_url, region_name=self._region_name
)
def _load_client(self):
self.client = boto3.client(
"sqs",
aws_access_key_id=self._access_key,
aws_secret_access_key=self._secret_key,
endpoint_url=self._endpoint_url,
region_name=self._region_name,
)
| jptavarez/python-sqs-client | sqs_client/connection.py | connection.py | py | 1,559 | python | en | code | 7 | github-code | 13 |
1732534886 | """
DECANUM - Robot Inventor MicroPython Software -
Project : Guidage vehicule 2 pairMotor avec joystick ( motor)
: et mémorisation du circuit.
Application : carguidage.py
Auth : remybeaudenon@yahoo.com
Date : 06/2023
"""
version = "v1p1"
import gc, os, umachine, ubinascii
gc.collect()
from mindstorms import MSHub
hub = MSHub()
lm = hub.light_matrix
lm.set_orientation('right')
lm.show_image('SQUARE')
sp = hub.speaker
sp.beep(72)
# Remove any files from older versions
for fn in os.listdir("/"):
if len(fn) > 10:
ver = None
if fn[-3:] == ".py" and (
fn[:-7] == "mcricolors_" or
fn[:-7] == "mcrimaps_" or
fn[:-7] == "mcrisolver_" or
fn[:-7] == "mindcuberri_"
):
ver = fn[-7:-3]
elif fn[-4:] == ".bin" and (
fn[:-8] == "mcrimtab1_" or
fn[:-8] == "mcrimtab4_"
):
ver = fn[-8:-4]
if ver != None and ver < version:
print("DELETING: "+fn)
os.unlink(fn)
def file_exists(fn):
try:
ok = os.stat(fn) != None
except:
ok = False
return ok
found = 0
def open_bin(fn):
global sp, ofn, of, on, found
sp.beep(67)
ofn = fn
of= open(ofn, 'wb')
on= 0
found += 1
def wbin(data):
global of, on
of.write(data)
on += 1
if on % 50 == 0:
lm.show_image('CLOCK'+str(1+(int(on/50)%12)))
def close_of():
global of, ofn
of.close()
of = None
print("SAVED: "+ofn+" "+str(os.stat(ofn)[6])+"B")
gc.collect()
print("Installing...")
open_bin("mcrimtab1_"+version+".bin")
wbin(b"\x0c\x33\xf5\x0e\xff\xff\x03\x53\xff\x03\x55\xff\x09\xf9\xff\x03\x54\xff\x11\x35\xf3\x06\x88\xff\x0c\xff\xff\x0a\xf9\xff\x06\x86\xff\x00\xfa\xff\x0b\xf9\xff\x0d\xff\xff\x02\x98\xff\x02\xfa\xff\x03\x5b\xff\x05\x33\xff\x04\x4b\xff\x01\xfa\xff\x00\x98\xff\x08\x6e\xf5\x06\x96\xf3\x08\xee\xf5\x03\x6c\xff\x08\x5e\xff\x09\x96\xf3\x08\x8e\xf5\x08\x9e\x59\x0b\xe8\xf5\x06\x39\xff\x08\x7e\xf5\x09\xe8\xf5\x0a\xe8\xf5\x0b\x96\xf3\x0c\x96\xf3\x0e\xc3\xf6\x03\x3c\xf3\x06\x39\xfb\x05\xb4\xf8\x04\xb2\xf3\x02\xc0\x33\x03\x5c")
wbin(b"\xff\x0f\xc5\xe8\xe6\xf3\xff\xff\x08\xd1\xe1\xe1\xe1\xf6\xff\x03\x99\x80\x0b\xe2\xc2\xff\x05\xd4\xc6\xc8\xc6\x36\xff\x07\xbc\x0e\x2e\x9d\x7e\xff\x11\x81\xb9\xab\xc1\xff\xff\x02\xa2\xd9\x87\x3a\x2d\xff\x0f\x77\xb2\x70\xe7\x46\xf5\x0f\xe5\x02\x8b\x33\x09\xff\x09\x5d\x3e\x5d\x3e\xbc\xff\x05\x3c\x5e\x3c\x0d\x2c\xff\x05\x35\x5e\x8e\x6d\x35\xf4\x00\x2e\xfc\xff\xff\xff\xff\x11\xc0\xac\xbd\x9c\xac\x2b\x0f\xd1\x28\x08\x66\x2d\x2e\x08\x1d\x2c\x2e\x6d\x03\x22\x11\xbb\x25\x90\xc9\xf0\xff\x0f\x60\x36\x5e\x85\xf2\xff\x03")
wbin(b"\xab\xc4\xe6\x4a\x29\xf2\x05\x85\xe2\x60\x36\xfc\xff\x0f\x96\x00\xb2\xe8\xf5\xff\x03\x99\x0c\xbe\x5b\xf2\xff\x05\xb2\xe8\xc3\x96\xf0\xff\x03\xb0\x4a\x8c\xae\x94\xf5")
close_of()
# Install MindCuber-RI v1p1 files
prj = "/projects/"
with open(prj+".slots","r") as f:
slots = eval(f.read())
for s in slots:
base = prj+str(slots[s]['id'])
# Filename used by latest hub OS
fn = base+"/__init__.py"
if not file_exists(fn):
# Try filename used by older versions of hub OS
fn = base+".py"
if file_exists(fn):
with open(fn) as f:
for i in range(3):
l = f.readline()
if l == "#MINDCUBERRI_FILES_V1P1#\n":
print("SLOT: "+str(s)+" "+fn+" "+str(os.stat(fn)[6])+"B")
print("Installing...")
of = None
b64 = False
n = 0
for l in f:
if l[:5] == "#FILE":
lfn = l[5:-1]
b64 = lfn[-4:] == ".bin"
open_bin(lfn)
elif l[:8] == "#ENDFILE":
close_of()
elif of != None:
if b64:
if l[0:5] != "#====":
of.write(ubinascii.a2b_base64(l[1:-1]))
else:
of.write(l)
n += 1
if n % 50 == 0:
lm.show_image('CLOCK'+str(1+(int(n/50)%12)))
if of != None:
# Missing end of file
of.close()
print("ERROR: end file marker expected")
print("DELETING: "+ofn)
ofn.unlink()
os.sync()
if found > 0:
sp.beep(72)
msg = "MindCuber-RI v1p1 "+str(found)+" files installed"
print("FINISHED "+msg)
lm.write(msg)
lm.show_image('YES')
else:
msg = "ERROR: no files found to install"
print(msg)
lm.write(msg)
lm.show_image('NO')
# END
| remybeaudenon/lego_hub_pyfw | MCR/MCRIInstall-v1p1-4.py | MCRIInstall-v1p1-4.py | py | 4,927 | python | en | code | 0 | github-code | 13 |
8626824657 | import json
import argparse
from tqdm import tqdm
from pyserini.search.lucene import LuceneSearcher
from utils import read_json, write_json, read_config
def retrieve(queries, num_candidates, searcher, pid2title):
def get_text(hit, title):
text = json.loads(hit.raw)['contents'][len(title):].strip()
return text
results = []
for query in tqdm(queries):
query_id = query['id']
question = query['question']
answers = query['answers']
labels = query['labels']
wiki_ids = set()
for label in labels:
if len(label):
for prov_item in label['provenance']:
wiki_ids.add(str(prov_item['wikipedia_id']))
hits = searcher.search(question, k=num_candidates)
titles = [pid2title[hit.docid] for hit in hits]
candidates = [
{
'id': hit.docid,
'wikipedia_id': json.loads(hit.raw)['wikipedia_id'],
'paragraph_id': json.loads(hit.raw)['passage_id'],
'start_span': json.loads(hit.raw)['start_span'],
'end_span': json.loads(hit.raw)['end_span'],
'title': title,
'text': get_text(hit, title),
'score': hit.score,
'has_answer': str(json.loads(hit.raw)['wikipedia_id']) in wiki_ids
}
for hit, title in zip(hits, titles)]
results.append({
'id': query_id,
'question': question,
'answers': answers,
'ctxs': candidates
})
return results
def read_queries(query_file, trunc=None, verbose=False):
queries = read_json(query_file)
if type(queries) == dict and 'content' in list(queries.keys()):
queries = queries['content']
if trunc:
if verbose:
print(f"Test run, truncating to {trunc} queries.")
queries = queries[:trunc]
return queries
def answer_queries(args):
cfg = read_config(args.config_path)
queries = read_queries(args.query_file, trunc=args.trunc, verbose=args.verbose)
pid2title = read_json(cfg['title_path'])
searcher = LuceneSearcher(str(cfg['index_dir']))
results = retrieve(queries,
args.num_cands,
searcher,
pid2title)
write_json(args.out_file, results)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--trunc', type=int,
default=-1, help='For testing. Only check first `trunc` queries.')
parser.add_argument('--num_cands', type=int,
required=True, help='Value of k (num. of candidates to retrieve).')
parser.add_argument('--config_path', type=str,
required=True, help='Path to config file')
parser.add_argument('--query_file', type=str,
required=True, help='Path to query file')
parser.add_argument('--out_file', type=str, required=True,
help='Path of output file.')
parser.add_argument('--verbose', action='store_true',
help='More detailed print statements.')
args = parser.parse_args()
args.trunc = args.trunc if args.trunc > 0 else None
answer_queries(args)
if __name__ == '__main__':
main()
| VedangW/upr-kilt | bm25/search.py | search.py | py | 3,359 | python | en | code | 0 | github-code | 13 |
37923025538 | ## @file AthenaPoolCnvSvc_jobOptions.py
## @brief AthenaPoolCnvSvc job options file to illustrate available AthenaPoolCnvSvc properties.
## @author Peter van Gemmeren <gemmeren@bnl.gov>
## $Id: AthenaPoolCnvSvc_jobOptions.py,v 1.13 2008-12-04 20:54:31 gemmeren Exp $
###############################################################
#
# AthenaPoolCnvSvc job options file
# Principally for documentation, to illustrate available AthenaPoolCnvSvc properties
#
#==============================================================
# The next lines tell Athena to load the AthenaPoolCnvSvc, and also where to find it.
## get a handle to the Service Manager
from AthenaCommon.AppMgr import ServiceMgr
from AthenaPoolCnvSvc.AthenaPoolCnvSvcConf import AthenaPoolCnvSvc
svcMgr += AthenaPoolCnvSvc()
# Display detailed size and timing statistics for writing and reading AthenaPool objects.
svcMgr.AthenaPoolCnvSvc.UseDetailChronoStat = False
# Prefix for top level POOL container.
svcMgr.AthenaPoolCnvSvc.PoolContainerPrefix = "CollectionTree"
# Naming hint policy for top level POOL container. E.G.:
# - "", no hint will cause all objects to be written to the same tree (requires branches).
# - "<type>", use the data object type as container name (tree per type).
# - "<type>/<key>", use the data object type and key as container name (tree per type/key).
svcMgr.AthenaPoolCnvSvc.TopLevelContainerName = ""
# Naming hint policy for top level POOL branching. E.G.:
# - "", no hint will cause all objects of a tree to be written to the same branch.
# - "<type>", use the data object type as branch name (required if type is not used for tree name).
# - "<type>/<key>", use the data object type and key as branch name.
svcMgr.AthenaPoolCnvSvc.SubLevelBranchName = "<type>/<key>"
# MaxFileSizes, vector with maximum file sizes for Athena POOL output files.
# Set domain MaxSize default for all Databases.
# Files larger than 5GB are not supported by some DDM tools and are disallowed by ATLAS policy.
# They should only be produced for private use or in special cases.
svcMgr.AthenaPoolCnvSvc.MaxFileSizes = [ "10000000000" ]
# Set MaxSize for a Database only
svcMgr.AthenaPoolCnvSvc.MaxFileSizes += [ "<DatabaseName> = <value>" ]
svcMgr.AthenaPoolCnvSvc.MaxFileSizes = [ ]
# PoolAttributes, vector with names and values of technology specific attributes for POOL.
# Set Domain Attribute
svcMgr.AthenaPoolCnvSvc.PoolAttributes += [ "<attributeName> = '<value>'" ]
# Set Database Attribute
svcMgr.AthenaPoolCnvSvc.PoolAttributes += [ "DatabaseName = '<fileName>'; <attributeName> = '<value>'" ]
# Set Container Attribute
svcMgr.AthenaPoolCnvSvc.PoolAttributes += [ "DatabaseName = '<fileName>'; ContainerName = '<type>/<key>'; <attributeName> = '<value>'" ]
# Set Input Database Attribute - set attribute for each incoming file/db
svcMgr.AthenaPoolCnvSvc.InputPoolAttributes += [ "<attributeName> = '<value>'" ]
# Set Input Database Attribute - set attribute for each incoming file/db for a particular container/tree
svcMgr.AthenaPoolCnvSvc.InputPoolAttributes += [ "ContainerName = '<treeName>'; <attributeName> = '<value>'" ]
# To get an Attribute value printed to the log while commit, use the same syntax as for
# setting attributes, but replace the <value> with its <dataType> (int, DbLonglong, double).
# E.G.: get Container Attribute
svcMgr.AthenaPoolCnvSvc.PoolAttributes += [ "DatabaseName = '<fileName>'; ContainerName = '<type>/<key>'; <attributeName> = '<dataType>'" ]
svcMgr.AthenaPoolCnvSvc.PoolAttributes = [ ]
## backward compat
AthenaPoolCnvSvc = svcMgr.AthenaPoolCnvSvc
| rushioda/PIXELVALID_athena | athena/Database/AthenaPOOL/AthenaPoolCnvSvc/share/AthenaPoolCnvSvc_jobOptions.py | AthenaPoolCnvSvc_jobOptions.py | py | 3,584 | python | en | code | 1 | github-code | 13 |
16360911007 | # https://leetcode.com/problems/kth-largest-element-in-a-stream/
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
# https://leetcode.com/problems/reverse-linked-list
class Solution:
def reverseList(self, head: Optional[ListNode]) -> Optional[ListNode]:
prev = None
while head:
cur = head
head = head.next
cur.next = prev
prev = cur
return prev | aux-Issa/Leetcode | representative_60_problems/LinkedList/reverse-linked-list.py | reverse-linked-list.py | py | 521 | python | en | code | 0 | github-code | 13 |
15599619490 | from collections import deque
people = deque(input().split(" "))
n = int(input())
counter = 1
while len(people) > 1:
final = people.popleft()
if counter == n:
print(f"Removed {final}")
counter = 1
else:
counter += 1
people.append(final)
winner = people.popleft()
print(f"Last is {winner}")
| PowerCell12/Programming_Advanced_Python | Lists as Stacks and Queues/Lab/05. Hot Potato.py | 05. Hot Potato.py | py | 346 | python | en | code | 0 | github-code | 13 |
23247994356 | # encoding: utf-8
"""
Created by misaka-10032 (longqic@andrew.cmu.edu).
All rights reserved.
DFS. A bit faster than BFS, because str (immutable) operation is time consuming.
"""
__author__ = 'misaka-10032'
class Solution(object):
letters = [
' ', # 0
'', # 1
'abc', # 2
'def', # 3
'ghi', # 4
'jkl', # 5
'mno', # 6
'pqrs', # 7
'tuv', # 8
'wxyz', # 9
]
def dfs(self, digits, curr, sol):
"""
:param curr: list[char]
:param sol:
:return:
"""
p = len(curr)
if p >= len(digits):
comb = ''.join(curr)
if comb:
sol.append(comb)
return
idx = ord(digits[p]) - ord('0')
for c in self.letters[idx]:
curr.append(c)
self.dfs(digits, curr, sol)
curr.pop()
def letterCombinations(self, digits):
"""
:type digits: str
:rtype: List[str]
"""
digits = digits.replace('1', '')
sol = []
self.dfs(digits, [], sol)
return sol
| misaka-10032/leetcode | coding/00017-letter-comb-of-phone-number/solution.py | solution.py | py | 1,180 | python | en | code | 1 | github-code | 13 |
15400312097 | """Coordinator for E3DC integration."""
from datetime import timedelta, datetime
import logging
from time import time
from typing import Any
import pytz
from e3dc import E3DC # Missing Exports:; SendError,
from e3dc._rscpLib import rscpFindTag
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_USERNAME
from homeassistant.core import HomeAssistant
from homeassistant.util.dt import as_timestamp, start_of_local_day
from homeassistant.exceptions import ConfigEntryAuthFailed, HomeAssistantError
from homeassistant.helpers import device_registry as dr
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.update_coordinator import ( # CoordinatorEntity,; UpdateFailed,
DataUpdateCoordinator,
)
from .const import CONF_RSCPKEY, DOMAIN
_LOGGER = logging.getLogger(__name__)
_STAT_REFRESH_INTERVAL = 60
class E3DCCoordinator(DataUpdateCoordinator[dict[str, Any]]):
"""E3DC Coordinator, fetches all relevant data and provides proxies for all service calls."""
e3dc: E3DC = None
_mydata: dict[str, Any] = {}
_sw_version: str = ""
_update_guard_powersettings: bool = False
_timezone_offset: int = 0
_next_stat_update: float = 0
def __init__(self, hass: HomeAssistant, config_entry: ConfigEntry) -> None:
"""Initialize E3DC Coordinator and connect."""
self.host: str | None = config_entry.data.get(CONF_HOST)
self.username: str | None = config_entry.data.get(CONF_USERNAME)
self.password: str | None = config_entry.data.get(CONF_PASSWORD)
self.rscpkey: str | None = config_entry.data.get(CONF_RSCPKEY)
assert isinstance(config_entry.unique_id, str)
self.uid: str = config_entry.unique_id
super().__init__(
hass, _LOGGER, name=DOMAIN, update_interval=timedelta(seconds=10)
)
async def async_connect(self):
"""Establish connection to E3DC."""
try:
self.e3dc: E3DC = await self.hass.async_add_executor_job(
create_e3dcinstance,
self.username,
self.password,
self.host,
self.rscpkey,
)
except Exception as ex:
raise ConfigEntryAuthFailed from ex
self._mydata["system-derate-percent"] = self.e3dc.deratePercent
self._mydata["system-derate-power"] = self.e3dc.deratePower
self._mydata["system-additional-source-available"] = (
self.e3dc.externalSourceAvailable != 0
)
self._mydata[
"system-battery-installed-capacity"
] = self.e3dc.installedBatteryCapacity
self._mydata["system-battery-installed-peak"] = self.e3dc.installedPeakPower
self._mydata["system-ac-maxpower"] = self.e3dc.maxAcPower
self._mydata["system-battery-charge-max"] = self.e3dc.maxBatChargePower
self._mydata["system-battery-discharge-max"] = self.e3dc.maxBatDischargePower
self._mydata["system-mac"] = self.e3dc.macAddress
self._mydata["model"] = self.e3dc.model
self._mydata[
"system-battery-discharge-minimum-default"
] = self.e3dc.startDischargeDefault
# Idea: Maybe Port this to e3dc lib, it can query this in one go during startup.
self._sw_version = await self._async_e3dc_request_single_tag(
"INFO_REQ_SW_RELEASE"
)
await self._load_timezone_settings()
async def _async_e3dc_request_single_tag(self, tag: str) -> Any:
"""Send a single tag request to E3DC, wraps lib call for async usage, supplies defaults."""
# Signature for reference: Tag, Retries, Keepalive
result = await self.hass.async_add_executor_job(
self.e3dc.sendRequestTag, tag, 3, True
)
return result
async def _async_update_data(self) -> dict[str, Any]:
"""Update all data required by our entities in one go."""
# Now we've to update all dynamic values in self._mydata,
# connect did already retrieve all static values.
_LOGGER.debug("Polling general status information")
poll_data: dict[str, Any] = await self.hass.async_add_executor_job(
self.e3dc.poll, True
)
self._process_poll(poll_data)
if self._update_guard_powersettings is False:
_LOGGER.debug("Poll power settings")
power_settings: dict[
str, Any | None
] = await self.hass.async_add_executor_job(
self.e3dc.get_power_settings, True
)
self._process_power_settings(power_settings)
else:
_LOGGER.debug("Not polling powersettings, they are updating right now")
_LOGGER.debug("Polling manual charge information")
request_data = await self.hass.async_add_executor_job(
self.e3dc.sendRequest, ("EMS_REQ_GET_MANUAL_CHARGE", "None", None), 3, True
)
self._process_manual_charge(request_data)
# Only poll power statstics once per minute. E3DC updates it only once per 15
# minutes anyway, this should be a good compromise to get the metrics shortly
# before the end of the day.
if self._next_stat_update < time():
_LOGGER.debug("Polling today's power metrics")
db_data_today: dict[str, Any] = await self.hass.async_add_executor_job(
self.e3dc.get_db_data_timestamp,
self._get_db_data_day_timestamp(),
86400,
True,
)
self._process_db_data_today(db_data_today)
self._next_stat_update = time() + _STAT_REFRESH_INTERVAL
# TODO: Reduce interval further, but take start_ts into account to get an
# end of day reading of the metric.
else:
_LOGGER.debug("Skipping power metrics poll.")
return self._mydata
def _process_power_settings(self, power_settings: dict[str, Any | None]):
"""Process retrieved power settings."""
self._mydata["pset-limit-charge"] = power_settings["maxChargePower"]
self._mydata["pset-limit-discharge"] = power_settings["maxDischargePower"]
self._mydata["pset-limit-discharge-minimum"] = power_settings[
"dischargeStartPower"
]
self._mydata["pset-limit-enabled"] = power_settings["powerLimitsUsed"]
self._mydata["pset-powersaving-enabled"] = power_settings["powerSaveEnabled"]
self._mydata["pset-weatherregulationenabled"] = power_settings[
"weatherRegulatedChargeEnabled"
]
def _process_poll(self, poll_data: dict[str, Any]):
self._mydata["additional-production"] = poll_data["production"]["add"]
self._mydata["autarky"] = poll_data["autarky"]
self._mydata["battery-charge"] = max(0, poll_data["consumption"]["battery"])
self._mydata["battery-discharge"] = (
min(0, poll_data["consumption"]["battery"]) * -1
)
self._mydata["battery-netchange"] = poll_data["consumption"]["battery"]
self._mydata["grid-consumption"] = max(0, poll_data["production"]["grid"])
self._mydata["grid-netchange"] = poll_data["production"]["grid"]
self._mydata["grid-production"] = min(0, poll_data["production"]["grid"]) * -1
self._mydata["house-consumption"] = poll_data["consumption"]["house"]
self._mydata["selfconsumption"] = poll_data["selfConsumption"]
self._mydata["soc"] = poll_data["stateOfCharge"]
self._mydata["solar-production"] = poll_data["production"]["solar"]
self._mydata["wallbox-consumption"] = poll_data["consumption"]["wallbox"]
def _process_db_data_today(self, db_data: dict[str, Any | None]) -> None:
"""Process retrieved db data settings."""
self._mydata["db-day-autarky"] = db_data["autarky"]
self._mydata["db-day-battery-charge"] = db_data["bat_power_in"]
self._mydata["db-day-battery-discharge"] = db_data["bat_power_out"]
self._mydata["db-day-grid-consumption"] = db_data["grid_power_out"]
self._mydata["db-day-grid-production"] = db_data["grid_power_in"]
self._mydata["db-day-house-consumption"] = db_data["consumption"]
self._mydata["db-day-selfconsumption"] = db_data["consumed_production"]
self._mydata["db-day-solar-production"] = db_data["solarProduction"]
self._mydata["db-day-startts"] = db_data["startTimestamp"]
def _process_manual_charge(self, request_data) -> None:
"""Parse manual charge status."""
self._mydata["manual-charge-active"] = rscpFindTag(
request_data, "EMS_MANUAL_CHARGE_ACTIVE"
)[2]
# these seem to be kAh per individual cell, so this is considered very strange.
# To get this working for a start, we assume 3,65 V per cell, taking my own unit
# as a base, but this obviously will need some real work to base this on
# current voltages.
self._mydata["manual-charge-energy"] = (
3.65 * rscpFindTag(request_data, "EMS_MANUAL_CHARGE_ENERGY_COUNTER")[2]
)
# The timestamp seem to correctly show the UTC Date when manual charging started
# Not yet enabled, just for reference.
# self._mydata["manual-charge-start"] = rscpFindTag(
# request_data, "EMS_MANUAL_CHARGE_LASTSTART"
# )[2]
async def _load_timezone_settings(self):
"""Load the current timezone offset from the E3DC, using its local timezone data.
Required to correctly retrieve power statistics for today.
"""
try:
tz_name: str = await self._async_e3dc_request_single_tag(
"INFO_REQ_TIME_ZONE"
)
except:
_LOGGER.exception("Failed to loade timezone from E3DC")
# Once we have better exception handling available, we need to throw
# proper HomeAssistantErrors at this point.
raise
tz_offset: int | None = None
try:
tz_info: pytz.timezone = pytz.timezone(tz_name)
dt_tmp: datetime = datetime.now(tz_info)
tz_offset = dt_tmp.utcoffset().seconds
except pytz.UnknownTimeZoneError:
_LOGGER.exception("Failed to load timezone from E3DC")
if tz_offset is None:
try:
# Fallback to compute the offset using current times from E3DC:
ts_local: int = int(
await self._async_e3dc_request_single_tag("INFO_REQ_TIME")
)
ts_utc: int = int(
await self._async_e3dc_request_single_tag("INFO_REQ_UTC_TIME")
)
delta: int = ts_local - ts_utc
tz_offset = int(1800 * round(delta / 1800))
except:
_LOGGER.exception("Failed to load timestamps from E3DC")
# Once we have better exception handling available, we need to throw
# proper HomeAssistantErrors at this point.
raise
self._mydata["e3dc_timezone"] = tz_name
self._timezone_offset = tz_offset
def _get_db_data_day_timestamp(self) -> int:
"""Get the local start-of-day timestamp for DB Query, needs some tweaking."""
today: datetime = start_of_local_day()
today_ts: int = int(as_timestamp(today))
_LOGGER.debug(
"Midnight is %s, DB query timestamp is %s, applied offset: %s",
today,
today_ts,
self._timezone_offset,
)
# tz_hass: pytz.timezone = pytz.timezone("Europe/Berlin")
# today: datetime = datetime.now(tz_hass).replace(hour=0, minute=0, second=0, microsecond=0)
# today_ts: int = today.timestamp()
# Move to local time, the Timestamp needed by the E3DC DB queries are
# not in UTC as they should be.
today_ts += self._timezone_offset
_LOGGER.debug(
"Midnight DB query timestamp is %s, applied offset: %s",
today_ts,
self._timezone_offset,
)
return today_ts
def device_info(self) -> DeviceInfo:
"""Return default device info structure."""
return DeviceInfo(
manufacturer="E3DC",
model=self.e3dc.model,
name=self.e3dc.model,
connections={(dr.CONNECTION_NETWORK_MAC, self.e3dc.macAddress)},
identifiers={(DOMAIN, self.uid)},
sw_version=self._sw_version,
configuration_url="https://s10.e3dc.com/",
)
async def async_set_weather_regulated_charge(self, enabled: bool) -> bool:
"""Enable or disable weather regulated charging."""
_LOGGER.debug("Updating weather regulated charging to %s", enabled)
self._update_guard_powersettings = True
self._mydata["pset-weatherregulationenabled"] = enabled
try:
new_value: bool = await self.hass.async_add_executor_job(
self.e3dc.set_weather_regulated_charge, enabled, True
)
except:
_LOGGER.exception(
"Failed to update weather regulated charging to %s", enabled
)
# Once we have better exception handling available, we need to throw
# proper HomeAssistantErrors at this point.
raise
else:
# Ignore newValue at this point, needs fixing e3dc lib.
new_value = enabled
self._mydata["pset-weatherregulationenabled"] = new_value
finally:
self._update_guard_powersettings = False
if new_value != enabled:
raise HomeAssistantError(
f"Failed to update weather regulated charging to {enabled}"
)
_LOGGER.debug("Successfully updated weather regulated charging to %s", enabled)
return True
async def async_set_powersave(self, enabled: bool) -> bool:
"""Enable or disable SmartPower powersaving."""
_LOGGER.debug("Updating powersaving to %s", enabled)
self._update_guard_powersettings = True
self._mydata["pset-powersaving-enabled"] = enabled
try:
new_value: bool = await self.hass.async_add_executor_job(
self.e3dc.set_powersave, enabled, True
)
except:
_LOGGER.exception("Failed to update powersaving to %s", enabled)
# Once we have better exception handling available, we need to throw
# proper HomeAssistantErrors at this point.
raise
else:
# Ignore newValue at this point, needs fixing e3dc lib.
new_value = enabled
self._mydata["pset-powersaving-enabled"] = new_value
finally:
self._update_guard_powersettings = False
if new_value != enabled:
raise HomeAssistantError(f"Failed to update powersaving to {enabled}")
_LOGGER.debug("Successfully updated powersaving to %s", enabled)
return True
async def async_clear_power_limits(self) -> None:
"""Clear any active power limit."""
_LOGGER.debug("Clearing any active power limit.")
try:
# Call RSCP service.
# no update guard necessary, as we're called from a service, not an entity
result: int = await self.hass.async_add_executor_job(
self.e3dc.set_power_limits, False, None, None, None, True
)
except Exception as ex:
_LOGGER.exception("Failed to clear power limits")
raise HomeAssistantError("Failed to clear power limits") from ex
if result == -1:
raise HomeAssistantError("Failed to clear power limits")
if result == 1:
_LOGGER.warning("The given power limits are not optimal, continuing anyway")
else:
_LOGGER.debug("Successfully cleared the power limits")
async def async_set_power_limits(
self, max_charge: int | None, max_discharge: int | None
) -> None:
"""Set the given power limits and enable them."""
# Validate the arguments, at least one has to be set.
if max_charge is None and max_discharge is None:
raise ValueError(
"async_set_power_limits must be called with at least one of "
"max_charge or max_discharge."
)
if max_charge is not None and max_charge > self.e3dc.maxBatChargePower:
_LOGGER.warning("Limiting max_charge to %s", self.e3dc.maxBatChargePower)
max_charge = self.e3dc.maxBatChargePower
if max_discharge is not None and max_discharge > self.e3dc.maxBatDischargePower:
_LOGGER.warning(
"Limiting max_discharge to %s", self.e3dc.maxBatDischargePower
)
max_discharge = self.e3dc.maxBatDischargePower
_LOGGER.debug(
"Enabling power limits, max_charge: %s, max_discharge: %s",
max_charge,
max_discharge,
)
try:
# Call RSCP service.
# no update guard necessary, as we're called from a service, not an entity
result: int = await self.hass.async_add_executor_job(
self.e3dc.set_power_limits, True, max_charge, max_discharge, None, True
)
except Exception as ex:
_LOGGER.exception("Failed to set power limits")
raise HomeAssistantError("Failed to set power limits") from ex
if result == -1:
raise HomeAssistantError("Failed to set power limits")
if result == 1:
_LOGGER.warning("The given power limits are not optimal, continuing anyway")
else:
_LOGGER.debug("Successfully set the power limits")
async def async_manual_charge(self, charge_amount: int) -> None:
"""Start manual charging the given amount, zero will stop charging."""
# Validate the arguments
if charge_amount < 0:
raise ValueError("Charge amount must be positive or zero.")
_LOGGER.debug(
"Starting manual charge of: %s",
charge_amount,
)
try:
# Call RSCP service.
# no update guard necessary, as we're called from a service, not an entity
result_data = await self.hass.async_add_executor_job(
self.e3dc.sendRequest,
("EMS_REQ_START_MANUAL_CHARGE", "Uint32", charge_amount),
3,
True,
)
except Exception as ex:
_LOGGER.exception("Failed to initiate manual charging")
raise HomeAssistantError("Failed to initiate manual charging") from ex
result: bool = result_data[2]
if not result:
_LOGGER.warning("Manual charging could not be activated")
else:
_LOGGER.debug("Successfully started manual charging")
def create_e3dcinstance(username: str, password: str, host: str, rscpkey: str) -> E3DC:
"""Create the actual E3DC instance, this will try to connect and authenticate."""
e3dc = E3DC(
E3DC.CONNECT_LOCAL,
username=username,
password=password,
ipAddress=host,
key=rscpkey,
)
return e3dc
| torbennehmer/hacs-e3dc | custom_components/e3dc_rscp/coordinator.py | coordinator.py | py | 19,363 | python | en | code | 22 | github-code | 13 |
35648439655 | import random
import numpy as np
from PIL import Image
from captcha.image import ImageCaptcha
NUMBER = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
LOW_CASE = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u',
'v', 'w', 'x', 'y', 'z']
UP_CASE = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U',
'V', 'W', 'X', 'Y', 'Z']
CAPTCHA_LIST = NUMBER
CAPTCHA_LEN = 4 # 验证码长度
CAPTCHA_HEIGHT = 60 # 验证码高度
CAPTCHA_WIDTH = 160 # 验证码宽度
def random_captcha_text(char_set=CAPTCHA_LIST, captcha_size=CAPTCHA_LEN):
"""
随机生成定长字符串
:param char_set: 备选字符串列表
:param captcha_size: 字符串长度
:return: 字符串
"""
captcha_text = [random.choice(char_set) for _ in range(captcha_size)]
return ''.join(captcha_text)
def gen_captcha_text_and_image(width=CAPTCHA_WIDTH, height=CAPTCHA_HEIGHT, save=None):
"""
生成随机验证码
:param width: 验证码图片宽度
:param height: 验证码图片高度
:param save: 是否保存(None)
:return: 验证码字符串,验证码图像np数组
"""
image = ImageCaptcha(width=width, height=height)
# 验证码文本
captcha_text = random_captcha_text()
captcha = image.generate(captcha_text)
# 保存
if save:
image.write(captcha_text, './img/' + captcha_text + '.jpg')
captcha_image = Image.open(captcha)
# 转化为np数组
captcha_image = np.array(captcha_image)
return captcha_text, captcha_image
if __name__ == '__main__':
t, im = gen_captcha_text_and_image(save=True)
print(t, im.shape) # (60, 160, 3) | scusec/Data-Mining-for-Cybersecurity | Homework/2019/Task8/4/Code/captcha_gen.py | captcha_gen.py | py | 1,841 | python | en | code | 66 | github-code | 13 |
40614168980 | import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
from config import *
sess = tf.InteractiveSession()
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
# 占位符
x_ = tf.placeholder("float", shape=[None, 9, 10, 64])
y_ = tf.placeholder("float", shape=[None, 1440])
W_conv1 = weight_variable([5, 5, 64, 128])
b_conv1 = bias_variable([128])
W_conv2 = weight_variable([3, 3, 128, 128])
b_conv2 = bias_variable([128])
W_conv3 = weight_variable([3, 3, 128, 128])
b_conv3 = bias_variable([128])
W_conv4 = weight_variable([3, 3, 128, 128])
b_conv4 = bias_variable([128])
W_conv5 = weight_variable([3, 3, 128, 128])
b_conv5 = bias_variable([128])
W_fc1 = weight_variable([2880 * 4, 1440])
b_fc1 = bias_variable([1440])
# x_image = tf.reshape(x_, [-1,9,10,64])
h_conv1 = tf.nn.relu(conv2d(x_, W_conv1) + b_conv1)
h_conv2 = tf.nn.relu(conv2d(h_conv1, W_conv2) + b_conv2)
h_conv3 = tf.nn.relu(conv2d(h_conv2, W_conv3) + b_conv3)
h_conv4 = tf.nn.relu(conv2d(h_conv3, W_conv4) + b_conv4)
h_conv5 = tf.nn.softmax(conv2d(h_conv4, W_conv5) + b_conv5)
h_flat = tf.reshape(h_conv5, [-1, 2880 * 4])
h_output = tf.nn.softmax(tf.matmul(h_flat, W_fc1) + b_fc1)
loss = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(h_output), 1))
train_step = tf.train.AdadeltaOptimizer(0.01).minimize(loss)
# train_step = tf.train.GradientDescentOptimizer(100).minimize(loss)
correct_prediction = tf.equal(tf.argmax(h_output, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
saver = tf.train.Saver()
if os.path.exists(DATA_PATH + ".data-00000-of-00001"):
saver.restore(sess, DATA_PATH)
print('restore')
else:
sess.run(tf.global_variables_initializer())
| VGxiaozhao/ChineseChess | CNN/model.py | model.py | py | 1,953 | python | en | code | 0 | github-code | 13 |
23965360860 | """
归并排序 n*log_2(n)
使用递归 先对二分的左右两个列表排序,然后对左右半部合并排序
"""
def mergeSort(alist):
print('Splitting', alist)
if len(alist) > 1:
mid = len(alist) // 2
lefthalf = alist[:mid]
righthalf = alist[mid:]
mergeSort(lefthalf)
mergeSort(righthalf)
# 合并操作
i = j = k = 0
while i < len(lefthalf) and j < len(righthalf):
if lefthalf[i] < righthalf[j]:
alist[k] = lefthalf[i]
i = i+1
else:
alist[k] = righthalf[j]
j = j+1
k = k+1
while i < len(lefthalf):
alist[k] = lefthalf[i]
k = k+1
i = i+1
while j < len(righthalf):
alist[k] = righthalf[j]
k = k+1
j = j+1
print('merging', alist)
alist = [54, 26, 93, 17, 77, 31, 44, 55, 20]
mergeSort(alist)
print(alist)
| siyi-wind/cs-course-project | homework/7_6_1.py | 7_6_1.py | py | 1,012 | python | en | code | 3 | github-code | 13 |
25995737804 | #!/usr/bin/env python
# coding: utf-8
import keras
from keras.layers import Dropout, Activation, BatchNormalization, Dense, average, Lambda, Concatenate, Flatten
from keras.layers import Input, Conv2D, MaxPooling2D, concatenate, Dropout, AveragePooling2D, ConvLSTM2D, Conv3D, MaxPooling3D, GlobalAveragePooling3D, MaxPool3D, Convolution3D, ZeroPadding3D
from keras.models import Model, Sequential
from keras.regularizers import l2
from keras import backend as K
from keras.utils import plot_model
# import pydot
import os
# os.environ["PATH"] += os.pathsep + 'C:/Program Files (x86)/Graphviz2.38/bin/'
def conv2d_block(input_tensor, n_filters, kernel_size, strides = (2,2)):
x = Conv2D(n_filters,(kernel_size,kernel_size), strides = strides, padding = 'same', kernel_initializer='he_normal')(input_tensor)
x = BatchNormalization()(x)
x = Activation('relu')(x)
return x
def squeeze3d(layer):
print(layer.shape)
return K.squeeze(layer,axis = 1)
def conv3d_block(input_tensor, n_filters = 16, kernel_size = 3, strides = (1,1,1)):
x = Conv3D(n_filters,(kernel_size, kernel_size, kernel_size), strides = strides, padding = 'same', kernel_initializer='he_normal')(input_tensor)
x = BatchNormalization()(x)
x = Activation('relu')(x)
return x
def convlstm2d_block(input_tensor, n_filters = 64, kernel_size = 3, strides = (1, 1)):
x = ConvLSTM2D(filters=n_filters, kernel_size=(kernel_size, kernel_size), strides=strides, padding='same', kernel_initializer='he_normal', recurrent_initializer='he_normal', return_sequences=True)(input_tensor)
return x
def NetworkC3D(inp, num_classes = 101):
c1 = conv3d_block(inp, n_filters = 16)
mp1 = MaxPooling3D(pool_size=(2,2,2), padding = 'same')(c1)
c2 = conv3d_block(mp1, n_filters = 32)
c3 = conv3d_block(c2, n_filters = 32)
mp2 = MaxPooling3D(pool_size=(1,2,2), padding = 'same')(c3)
c4 = conv3d_block(mp2, n_filters = 64)
c5 = conv3d_block(c4, n_filters = 64)
c6 = conv3d_block(c5, n_filters = 64)
mp3 = MaxPooling3D(pool_size=(1,2,2), padding = 'same')(c6)
c7 = conv3d_block(mp3, n_filters = 64)
c8 = conv3d_block(c7, n_filters = 64)
c9 = conv3d_block(c8, n_filters = 64)
mp4 = MaxPooling3D(pool_size=(1,2,2), padding = 'same')(c9)
cl1 = convlstm2d_block(mp4)
g1 = GlobalAveragePooling3D()(cl1)
d1 = Dropout(rate = 0.5)(g1)
res = Dense(num_classes, kernel_initializer='he_normal')(d1)
# out = Lambda(squeeze3d)(res)
out = res
return out
def C3D_f():
inp = Input(shape = (16, 224,224,3), name = "input_1")
out = NetworkC3D(inp)
model = Model(inputs = [inp], outputs =[out])
return model
def C3D():
# Define model
l2=keras.regularizers.l2
nb_classes = 101
weight_decay = 0.00005
patch_size, img_cols, img_rows = 16, 224, 224
model = Sequential()
model.add(Conv3D(16,(3,3,3),
input_shape=(patch_size, img_cols, img_rows, 3),
activation='relu'))
model.add(Conv3D(16,(3,3,3), strides=(1,1,1),padding='same',
dilation_rate=(1,1,1), kernel_initializer='he_normal',
kernel_regularizer=l2(weight_decay), use_bias=False,
name='Conv3D_2a_a', activation = 'relu'))
model.add(MaxPooling3D(pool_size=(2,2,2)))
model.add(Conv3D(32,(3,3,3), strides=(1,1,1),padding='same',
dilation_rate=(1,1,1), kernel_initializer='he_normal',
kernel_regularizer=l2(weight_decay), use_bias=False,
name='Conv3D_2b_a', activation = 'relu'))
model.add(Conv3D(32,(3,3,3), strides=(1,1,1),padding='same',
dilation_rate=(1,1,1), kernel_initializer='he_normal',
kernel_regularizer=l2(weight_decay), use_bias=False,
name='Conv3D_2b_b', activation = 'relu'))
model.add(MaxPooling3D(pool_size=(1, 2,2)))
model.add(Conv3D(64,(3,3,3), strides=(1,1,1),padding='same',
dilation_rate=(1,1,1), kernel_initializer='he_normal',
kernel_regularizer=l2(weight_decay), use_bias=False,
name='Conv3D_2c_a', activation = 'relu'))
model.add(Conv3D(64,(3,3,3), strides=(1,1,1),padding='same',
dilation_rate=(1,1,1), kernel_initializer='he_normal',
kernel_regularizer=l2(weight_decay), use_bias=False,
name='Conv3D_2c_b', activation = 'relu'))
model.add(Conv3D(64,(3,3,3), strides=(1,1,1),padding='same',
dilation_rate=(1,1,1), kernel_initializer='he_normal',
kernel_regularizer=l2(weight_decay), use_bias=False,
name='Conv3D_2c_c', activation = 'relu'))
model.add(MaxPooling3D(pool_size=(1, 2,2)))
model.add(Conv3D(128,(3,3,3), strides=(1,1,1),padding='same',
dilation_rate=(1,1,1), kernel_initializer='he_normal',
kernel_regularizer=l2(weight_decay), use_bias=False,
name='Conv3D_2d_a', activation = 'relu'))
model.add(Conv3D(128,(3,3,3), strides=(1,1,1),padding='same',
dilation_rate=(1,1,1), kernel_initializer='he_normal',
kernel_regularizer=l2(weight_decay), use_bias=False,
name='Conv3D_2d_b', activation = 'relu'))
model.add(Conv3D(128,(3,3,3), strides=(1,1,1),padding='same',
dilation_rate=(1,1,1), kernel_initializer='he_normal',
kernel_regularizer=l2(weight_decay), use_bias=False,
name='Conv3D_2d_c', activation = 'relu'))
model.add(MaxPooling3D(pool_size=(1, 2, 2)))
model.add(ConvLSTM2D(filters=64, kernel_size=(3,3),
strides=(1,1),padding='same',
kernel_initializer='he_normal', recurrent_initializer='he_normal',
kernel_regularizer=l2(weight_decay), recurrent_regularizer=l2(weight_decay),
return_sequences=True, name='gatedclstm2d_2'))
model.add(ConvLSTM2D(filters=64, kernel_size=(3,3),
strides=(1,1),padding='same',
kernel_initializer='he_normal', recurrent_initializer='he_normal',
kernel_regularizer=l2(weight_decay), recurrent_regularizer=l2(weight_decay),
return_sequences=True, name='gatedclstm2d_3'))
model.add(ConvLSTM2D(filters=64, kernel_size=(3,3),
strides=(1,1),padding='same',
kernel_initializer='he_normal', recurrent_initializer='he_normal',
kernel_regularizer=l2(weight_decay), recurrent_regularizer=l2(weight_decay),
return_sequences=True, name='gatedclstm2d_4'))
#model.add(MaxPooling3D(pool_size=(nb_pool[0], nb_pool[0], nb_pool[0])))
#model.add(Flatten())
model.add(GlobalAveragePooling3D())
model.add(Dropout(0.5))
model.add(Dense(nb_classes,kernel_initializer='normal'))
model.add(Activation('softmax'))
return model
def C3D_V2(summary=False, backend='tf'):
model = Sequential()
if backend == 'tf':
input_shape=(16, 112, 112, 3) # l, h, w, c
else:
input_shape=(3, 16, 112, 112)
# 1st layer group
model.add(Convolution3D(64, 3, 3, 3, activation='relu',
border_mode='same', name='conv1',
subsample=(1, 1, 1),
input_shape=input_shape))
model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2),
border_mode='valid', name='pool1'))
# 2nd layer group
model.add(Convolution3D(128, 3, 3, 3, activation='relu',
border_mode='same', name='conv2',
subsample=(1, 1, 1)))
model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),
border_mode='valid', name='pool2'))
# 3rd layer group
model.add(Convolution3D(256, 3, 3, 3, activation='relu',
border_mode='same', name='conv3a',
subsample=(1, 1, 1)))
model.add(Convolution3D(256, 3, 3, 3, activation='relu',
border_mode='same', name='conv3b',
subsample=(1, 1, 1)))
model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),
border_mode='valid', name='pool3'))
# 4th layer group
model.add(Convolution3D(512, 3, 3, 3, activation='relu',
border_mode='same', name='conv4a',
subsample=(1, 1, 1)))
model.add(Convolution3D(512, 3, 3, 3, activation='relu',
border_mode='same', name='conv4b',
subsample=(1, 1, 1)))
model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),
border_mode='valid', name='pool4'))
# 5th layer group
model.add(Convolution3D(512, 3, 3, 3, activation='relu',
border_mode='same', name='conv5a',
subsample=(1, 1, 1)))
model.add(Convolution3D(512, 3, 3, 3, activation='relu',
border_mode='same', name='conv5b',
subsample=(1, 1, 1)))
model.add(ZeroPadding3D(padding=(0, 1, 1)))
model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),
border_mode='valid', name='pool5'))
model.add(Flatten())
# FC layers group
model.add(Dense(4096, activation='relu', name='fc6'))
model.add(Dropout(.5))
model.add(Dense(4096, activation='relu', name='fc7'))
model.add(Dropout(.5))
model.add(Dense(487, activation='relu', name='fc8'))
if summary:
print(model.summary())
return model
def finalC3D():
base_model = C3D_V2()
for layer in base_model.layers[:-3]:
layer.trainable = False
x = base_model.output
x = Dropout(.5)(x)
res = Dense(101,activation='softmax')(x)
return base_model, Model(inputs=[base_model.input], outputs=[res])
def c3d_model():
input_shape = (16,112,112,3)
weight_decay = 0.005
nb_classes = 101
inputs = Input(input_shape)
x = Conv3D(64,(3,3,3),strides=(1,1,1),padding='same',
activation='relu',kernel_regularizer=l2(weight_decay))(inputs)
x = MaxPool3D((2,2,1),strides=(2,2,1),padding='same')(x)
x = Conv3D(128,(3,3,3),strides=(1,1,1),padding='same',
activation='relu',kernel_regularizer=l2(weight_decay))(x)
x = MaxPool3D((2,2,2),strides=(2,2,2),padding='same')(x)
x = Conv3D(128,(3,3,3),strides=(1,1,1),padding='same',
activation='relu',kernel_regularizer=l2(weight_decay))(x)
x = MaxPool3D((2,2,2),strides=(2,2,2),padding='same')(x)
x = Conv3D(256,(3,3,3),strides=(1,1,1),padding='same',
activation='relu',kernel_regularizer=l2(weight_decay))(x)
x = MaxPool3D((2,2,2),strides=(2,2,2),padding='same')(x)
x = Conv3D(256, (3, 3, 3), strides=(1, 1, 1), padding='same',
activation='relu',kernel_regularizer=l2(weight_decay))(x)
x = MaxPool3D((2, 2, 2), strides=(2, 2, 2), padding='same')(x)
x = Flatten()(x)
x = Dense(2048,activation='relu',kernel_regularizer=l2(weight_decay))(x)
x = Dropout(0.5)(x)
x = Dense(2048,activation='relu',kernel_regularizer=l2(weight_decay))(x)
x = Dropout(0.5)(x)
x = Dense(nb_classes,kernel_regularizer=l2(weight_decay))(x)
x = Activation('softmax')(x)
model = Model(inputs, x)
return model
| rvsingh31/VidRecognizer | archive/Code/gcp/TSN/models.py | models.py | py | 11,835 | python | en | code | 1 | github-code | 13 |
20215312473 | import pytest
from Pages.MainPage import MainPage
from Pages.Checkboxes import Checkboxes
@pytest.mark.usefixtures("init_driver")
class TestCheckboxes():
def test_checkboxes(self):
self.mainPage=MainPage(self.driver)
self.mainPage.click_on_checkboxes_page()
self.checkbox=Checkboxes(self.driver)
before_click_stat=self.checkbox.get_status_of_checkboxes()
print(before_click_stat)
after_click_stat=self.checkbox.do_click_on_checkbox()
print(after_click_stat)
final_result=True
for data in range(0,len(before_click_stat)):
if (before_click_stat[data] == after_click_stat[data]):
final_result=False
break
assert True == final_result | kakamband/HerokuPracticeSelenium | Tests/TestCheckboxes.py | TestCheckboxes.py | py | 784 | python | en | code | 1 | github-code | 13 |
18750222891 | import youtube_dl
from time import sleep as s
from tkinter import *
from PIL import ImageTk
import sys
import os
root = Tk()
w = 1000 # width for the Tk root
h = 600 # height for the Tk root
# get screen width and height
ws = root.winfo_screenwidth() # width of the screen
hs = root.winfo_screenheight() # height of the screen
# calculate x and y coordinates for the Tk root window
x = (ws / 2) - (w / 2)
y = (hs / 2) - (h / 2)
# set the dimensions of the screen
# and where it is placed
root.geometry('%dx%d+%d+%d' % (w, h, x, y))
def download(options, link):
with youtube_dl.YoutubeDL(options) as ydl:
try:
info_dict = ydl.extract_info(link)
video_title = info_dict.get('title', None)
download_label = Label(upper_frame,
text=f'{video_title} is downloaded')
download_label.place(relx=0.5, rely=0.5, relheight=0.1, anchor='n')
download_label.after(2000, lambda: download_label.destroy())
except Exception as e:
error_label = Label(upper_frame, text={e})
error_label.after(2000, lambda: error_label.destroy())
error_label.place(relx=0.5, rely=0.5, relheight=0.1, anchor='n')
def resource_path(relative_path):
if hasattr(sys, '_MEIPASS'):
return os.path.join(sys._MEIPASS, relative_path)
return os.path.join(os.path.abspath("./data"), relative_path)
def clear_text():
entry.delete(0, 'end')
def get_m4a():
link = entry.get()
options = {
'noplaylist': True,
'quiet': True,
'format': 'bestaudio[ext=m4a]',
}
download(options, link)
def get_mp4():
link = entry.get()
options = {'noplaylist': True, 'format': 'best', 'quiet': True}
download(options, link)
def get_mp3():
link = entry.get()
options = {'noplaylist': True, 'format': 'bestaudio[ext=mp3]', 'quiet': True}
download(options, link)
def get_wav():
link = entry.get()
options = {'noplaylist': True, 'format': 'bestaudio[ext=wav]', 'quiet': True}
download(options, link)
def open_folder():
os.startfile(os.getcwd())
def buttons():
m4a = Button(upper_frame,
text='m4a',
command=lambda: [get_m4a(), clear_text()])
m4a.place(relx=0.8, rely=0.6, relheight=0.1, relwidth=0.1, anchor='n')
mp4 = Button(upper_frame,
text='best video+audio',
command=lambda: [get_mp4(), clear_text()])
mp4.place(relx=0.2, rely=0.4, relheight=0.1, anchor='n')
mp3 = Button(upper_frame,
text='mp3',
command=lambda: [get_mp3(), clear_text()])
mp3.place(relx=0.2, rely=0.6, relheight=0.1, relwidth=0.1, anchor='n')
wav = Button(upper_frame,
text='wav',
command=lambda: [get_wav(), clear_text()])
wav.place(relx=0.8, rely=0.4, relheight=0.1, relwidth=0.1, anchor='n')
open_folder_location = Button(upper_frame,
text='Open download folder location',
command=open_folder)
open_folder_location.place(relx=0.5, rely=0.7, relheight=0.1, anchor='n')
quit = Button(upper_frame,
text='Quit Program',
command=lambda: root.destroy())
quit.place(relx=0.5, relheight=0.1, rely=0.8, anchor='n')
#application background image
background_image = ImageTk.PhotoImage(file=resource_path("background1.png"))
background_label = Label(root, image=background_image, bd=0)
background_label.place(x=0, y=0)
#upper upper_frame
upper_frame = Frame(root)
upper_frame.place(relx=0.5,
rely=0.1,
relwidth=0.75,
relheight=0.75,
anchor='n')
upper_frame_image = ImageTk.PhotoImage(file=resource_path('background2.jpg'))
upper_frame_image_label = Label(upper_frame, image=upper_frame_image)
upper_frame_image_label.place(relwidth=1, relheight=1)
#application title
title = Label(upper_frame, text='Universal Downloader', font=50)
title.place(relx=0.5, rely=0.1, relwidth=0.4, relheight=0.15, anchor='n')
entry = Entry(upper_frame)
entry.place(relx=0.5, rely=0.3, relwidth=0.4, relheight=0.1, anchor='n')
buttons()
root.mainloop()
| owenwijaya22/universal-downloader | src/downloader.py | downloader.py | py | 4,257 | python | en | code | 4 | github-code | 13 |
23574708212 | from datetime import datetime, timedelta
from dateutil import parser
def get_video_ids(response, days, keyword):
items = response["items"]
ids = []
limit_date = (datetime.now() - timedelta(days=days)).date() if days > -1 else None
for item in items:
published_date = parser.parse(item["snippet"]["publishedAt"]).date()
if (limit_date is None) or (published_date >= limit_date):
if keyword in item["snippet"]["title"]:
ids.append(item["snippet"]["resourceId"]["videoId"])
return ids
def get_playlist_ids(response):
items = response["items"]
ids = []
for item in items:
channel_id = item["snippet"]["resourceId"]["channelId"]
ids.append(channel_id.replace("UC", "UU", 1))
return ids
| ivan-svetlich/youtube_autoplaylist | youtube_autoplaylist/get_ids.py | get_ids.py | py | 783 | python | en | code | 0 | github-code | 13 |
12046148315 | from nonebot import export, on_command
from nonebot.rule import to_me
from nonebot.typing import T_State
from nonebot.permission import SUPERUSER
from nonebot.adapters.cqhttp import Bot, Event
from .data_source import get_pixiv
export = export()
export.description = 'Pixiv图片'
export.usage = 'Usage:\n pixiv {日榜/周榜/月榜/id/关键词}'
export.help = export.description + '\n' + export.usage
pixiv = on_command('pixiv', priority=25)
@pixiv.handle()
async def _(bot: Bot, event: Event, state: T_State):
keyword = str(event.get_message()).strip()
if not keyword:
await pixiv.finish(export.usage)
msg = await get_pixiv(keyword)
if not str(msg):
await pixiv.finish('出错了,请稍后再试')
await pixiv.send(message=msg)
await pixiv.finish()
| yintian710/nb2_test | awesome/plugins/pixiv/__init__.py | __init__.py | py | 802 | python | en | code | 0 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.