text
stringlengths
38
1.54M
import asyncio import aiohttp from bs4 import BeautifulSoup from sanic.exceptions import Forbidden, ServerError, Unauthorized from yarl import URL from spider.ua import ua def auth_server_dump_cookies(session: aiohttp.ClientSession) -> dict: """ 导出 AuthServer 的 Cookies """ cookies = {} for cookie in session.cookie_jar: cookies[cookie.key] = { 'key': cookie.key, 'value': cookie.value, 'path': cookie['path'], 'domain': cookie['domain'], } return cookies def auth_server_load_cookies(session: aiohttp.ClientSession, cookies: dict): """ 导入 Authserver 的 Cookies """ for key, cookie in cookies.items(): if cookie['domain'].startswith('authserver.sdut.edu.cn'): session.cookie_jar.update_cookies( {key: cookie['value']}, URL(f'http://authserver.sdut.edu.cn{cookie["path"]}')) async def auth_server(session: aiohttp.ClientSession, username: str, password: str): """ 登录到 SDUT AuthServer(七天有效期) """ # 获取页面参数 async with session.get('http://authserver.sdut.edu.cn/authserver/login') as resp: text = await resp.text() cookies = resp.cookies soup = BeautifulSoup(text, 'html.parser') ipts = soup.form.find_all('input') data = { 'username': username, 'password': password, 'rememberMe': 'on', # 七天内记住我 } for ipt in ipts: if ipt.get('value'): data[ipt.get('name')] = ipt.get('value') JSESSIONID_auth = cookies.get('JSESSIONID_auth').value # 提交登录 # 山东理工大学统一登录平台有一处 Set-Cookie 错误,Python 没有对错误的格式进行兼容 # 手动处理第一次跳转,处理格式兼容 async with session.post(f'http://authserver.sdut.edu.cn/authserver/login;{JSESSIONID_auth}', data=data, allow_redirects=False) as resp: headers = resp.headers next_url = headers.get('Location') for key in headers: if key.lower() == 'set-cookie' and headers[key].startswith('CASTGC'): castgc = headers[key].split(';')[0][7:] session.cookie_jar.update_cookies( {'CASTGC': castgc}, URL('http://authserver.sdut.edu.cn/authserver')) break else: raise Unauthorized( '获取 Cookie 失败,请检查用户名与密码。如果问题持续出现,请联系作者。') # 手动进行后续的跳转 async with session.get(next_url) as resp: text = await resp.text() url = str(resp.url) # 若页面跳转至首页,则说明登录成功 if url == 'http://authserver.sdut.edu.cn/authserver/index.do': return True # 若页面跳转回登录界面,则说明登录失败(用户名或密码错误) elif url == 'http://authserver.sdut.edu.cn/authserver/login': raise Unauthorized('用户名或密码错误') elif url == 'http://authserver.sdut.edu.cn/authserver/pcImproveInfo.do': raise Forbidden('需要修改初始密码后使用') else: print(url) raise ServerError('发生意料之外的错误,如果问题持续出现,请联系作者。')
from framework import entities, numbers, movement, timers, shapes, flags, pathfinding import ai_squad_director import ai_factions import ai_debugger import mapgen import zones import life import ai import time def get_nearest_entity_in_list(entity, entity_list): _nearest_entity = {'entity': None, 'distance': 0} for entity_id in entity_list: _entity = entities.get_entity(entity_id) _distance = numbers.distance(movement.get_position(entity), movement.get_position(_entity)) if not _nearest_entity['entity'] or _distance < _nearest_entity['distance']: _nearest_entity['entity'] = _entity _nearest_entity['distance'] = _distance return _nearest_entity['entity'] ######### #Actions# ######### def _get_item(entity, item_id, hold=False, weight=None): _item = entities.get_entity(item_id) _x, _y = movement.get_position(_item) _distance = numbers.distance(movement.get_position(entity), (_x, _y)) if weight: ai.set_meta_weight(entity, weight, 10*numbers.clip(_distance/30.0, 0, 1)) if _distance: movement.walk_to_position(entity, _x, _y, zones.get_active_astar_map(), zones.get_active_weight_map(), smp=True) else: if hold: life.get_and_hold_item(entity, item_id) else: life.get_and_store_item(entity, item_id) #TODO: Combine these def get_weapon(entity): _nearest_weapon = get_nearest_entity_in_list(entity, entity['ai']['visible_items']['weapon']) if not _nearest_weapon: return _get_item(entity, _nearest_weapon['_id'], hold=True, weight='find_weapon') def get_ammo(entity): _nearest_weapon = get_nearest_entity_in_list(entity, entity['ai']['visible_items']['ammo']) if not _nearest_weapon: return _get_item(entity, _nearest_weapon['_id'], weight='find_ammo') def get_container(entity): _nearest_weapon = get_nearest_entity_in_list(entity, entity['ai']['visible_items']['container']) if not _nearest_weapon: return _get_item(entity, _nearest_weapon['_id'], weight='find_container', hold=True) def find_cover(entity): _squad = entities.get_entity(ai_factions.FACTIONS[entity['ai']['faction']]['squads'][entity['ai']['squad']]) _cover_position = ai_squad_director.get_cover_position(_squad, entity['_id']) if not _cover_position or not numbers.distance(movement.get_position(entity), _cover_position): entities.trigger_event(entity, 'finish_turn') return movement.walk_to_position(entity, _cover_position[0], _cover_position[1], zones.get_active_astar_map(), zones.get_active_weight_map()) def find_firing_position(entity): _squad = entities.get_entity(ai_factions.FACTIONS[entity['ai']['faction']]['squads'][entity['ai']['squad']]) _x, _y = movement.get_position(entity) _fire_position = ai_squad_director.get_vantage_point(_squad, entity['_id']) if not _fire_position: return #if not numbers.distance((_x, _y), _fire_position) and not entity['ai']['visible_targets']: # entity['ai']['meta']['has_lost_target'] = True movement.walk_to_position(entity, _fire_position[0], _fire_position[1], zones.get_active_astar_map(), zones.get_active_weight_map()) def find_push_position(entity): _squad = entities.get_entity(ai_factions.FACTIONS[entity['ai']['faction']]['squads'][entity['ai']['squad']]) _x, _y = movement.get_position(entity) _push_position = ai_squad_director.get_push_position(_squad, entity['_id']) if not _push_position: return #if not numbers.distance((_x, _y), _push_position) and not entity['ai']['visible_targets']: # entity['ai']['meta']['has_lost_target'] = True movement.walk_to_position(entity, _push_position[0], _push_position[1], zones.get_active_astar_map(), zones.get_active_weight_map()) def _search_for_target(entity, target_id): _nodes = flags.get_flag(entity, 'search_nodes') if not _nodes: flags.delete_flag(entity, 'search_nodes') entities.trigger_event(entity, 'target_search_failed', target_id=target_id) return _node_list = _nodes.keys() _node_list.sort() _node_x, _node_y = _nodes[_node_list[0]][0] _distance = numbers.distance(movement.get_position(entity), (_node_x, _node_y)) if _distance <= 15 and life.can_see_position(entity, (_node_x, _node_y)): _nodes[_node_list[0]].remove((_node_x, _node_y)) if not _nodes[_node_list[0]]: del _nodes[_node_list[0]] else: movement.walk_to_position(entity, _node_x, _node_y, zones.get_active_astar_map(), zones.get_active_weight_map()) def search_for_target(entity): _lost_targets = entity['ai']['targets_to_search'] _inside = zones.get_active_inside_positions() if not _lost_targets: print 'Trying to search with no lost targets' return _closest_target = {'distance': 0, 'target_id': None} for target_id in _lost_targets: _memory = entity['ai']['life_memory'][target_id] _distance = numbers.distance(movement.get_position(entity), _memory['last_seen_at']) if not _closest_target['target_id'] or _distance < _closest_target['distance']: _closest_target['target_id'] = target_id _closest_target['distance'] = _distance _target = entities.get_entity(_closest_target['target_id']) _solids = zones.get_active_solids(entity) if flags.has_flag(entity, 'search_nodes'): _search_for_target(entity, _target['_id']) return _x, _y = movement.get_position(entity) _tx, _ty = entity['ai']['life_memory'][_target['_id']]['last_seen_at'] _nodes_to_search = {} if entity['ai']['life_memory'][_target['_id']]['last_seen_velocity']: _vx, _vy = entity['ai']['life_memory'][_target['_id']]['last_seen_velocity'] _tx + _vx*6 _ty + _vy*6 entities.trigger_event(entity, 'set_flag', flag='search_nodes', value=_nodes_to_search) for node_x, node_y in zones.get_active_node_grid(): _distance = numbers.distance((_tx, _ty), (node_x, node_y)) if _distance >= 30: continue if not (node_x, node_y) in _inside: continue _continue = False for pos in shapes.line((_tx, _ty), (node_x, node_y)): if pos in _solids: _continue = True break if _continue: continue if _distance in _nodes_to_search: if not (node_x, node_y) in _nodes_to_search[_distance]: _nodes_to_search[_distance].append((node_x, node_y)) else: _nodes_to_search[_distance] = [(node_x, node_y)] def find_melee_position(entity): _target = entity['ai']['nearest_target'] _x, _y = entity['ai']['life_memory'][_target]['last_seen_at'] _closest_pos = {'pos': None, 'distance': 0} _solids = zones.get_active_solids(entity, ignore_entities=[_target]) for x, y in [(_x-1, _y), (_x+1, _y), (_x, _y-1), (_x, _y+1), (_x-1, _y-1), (_x+1, _y-1), (_x-1, _y+1), (_x+1, _y+1)]: if (x, y) in _solids: continue _distance = numbers.distance(movement.get_position(entity), (x, y)) if not _closest_pos['pos'] or _distance < _closest_pos['distance']: _closest_pos['distance'] = _distance _closest_pos['pos'] = (x, y) movement.walk_to_position(entity, _closest_pos['pos'][0], _closest_pos['pos'][1], zones.get_active_astar_map(), zones.get_active_weight_map()) def reload_weapon(entity): life.reload_weapon(entity) def shoot_weapon(entity): _target = entity['ai']['nearest_target'] entities.trigger_event(entity, 'shoot', target_id=_target) def melee(entity): _target = entity['ai']['nearest_target']
#!/usr/bin/python3.7 import sys def longest(dice): if len(dice) <= 4: return len(dice) # pick them all # if more than 4.. i can pick at least 4. # I can pick a 5th if there's at least one >= 5 # I can pick a 6th if i can pick dice = sorted(dice) ans = 1 for d in dice: if d >= ans: ans += 1 ans -= 1 return ans # Input lines = iter(sys.stdin.readlines()) cases = int(next(lines)) for case in range(cases): n = int(next(lines)) dice = list(map(int, next(lines).split())) print(f"Case #{str(case+1)}: {longest(dice)}")
from services.tradehub import TradeHub from control.sanctioned_customer import SanctionedCustomer class Runner(object): def __init__(self): self.trade_service = TradeHub() self.queue = self.trade_service.get_trades() def execute_queue(self): for element in self.queue: sanctioned = SanctionedCustomer() sanctioned.is_covered_by_sanctions(element) if __name__ == '__main__': runner = Runner() runner.execute_queue()
# 62. Unique Paths # https://leetcode.com/problems/unique-paths/ class Solution: def uniquePaths(self, m: int, n: int) -> int: # in grid[m][n], the number of paths = grid[m-1][n] + grid[m][n-1] grid = [([0]*(n+1)) for line in range(m+1)] grid[1][1] = 1 for i in range(1, m+1): for j in range(1, n+1): if i == 1 and j == 1: continue grid[i][j] = grid[i-1][j] + grid[i][j-1] return grid[-1][-1] # https://leetcode.com/problems/unique-paths/discuss/23234/Accpeted-simple-Python-DP-solution. # 也可以全初始化为1 # aux = [[1 for x in range(n)] for x in range(m)] # for i in range(1, m): # for j in range(1, n): # aux[i][j] = aux[i][j-1]+aux[i-1][j] # return aux[-1][-1]
# Testing import csv import random from os import path user_orderlist = [] absolute_path = path.abspath(path.curdir) def table_reservation(): total_tables = 20 reserved_tables = random.randint(0, 21) empty_tables = total_tables - reserved_tables if total_tables == reserved_tables: print("Sorry, All the tables are reserved at the moment.\n") elif reserved_tables < total_tables: print("The number of empty tables are {} out of {} at the Restaurant.\n".format(empty_tables, total_tables)) return empty_tables def user_order_ftn(): add = input( "\nWhich item do u want to add to your Order List, \"OR\" go back to menu(M/m)? ") return add def menu_starters(): with open(path.join(absolute_path, "menu_starters.csv"), "r") as rf: csv_reader = csv.DictReader(rf, delimiter=",") print() print("Starters Menu".center(25)) print(F"No. \t Item \t\t Price") for line in csv_reader: print(line["No."]+")", line["Item"], "\t", line["Price"]) def menu_steaks(): with open(path.join(absolute_path, "menu_steaks.csv"), "r") as rf: csv_reader = csv.DictReader(rf, delimiter=",") print() print("Steak Menu".center(25)) print("No. \t Item \t\t\t Price") for line in csv_reader: if line["No."] == "1": print(line["No."]+")", line["Item"], "\t", line["Price"]) else: print(line["No."]+")", line["Item"], "\t\t", line["Price"]) def menu_burgers(): with open(path.join(absolute_path, "menu_burgers.csv"), "r") as rf: csv_reader = csv.DictReader(rf, delimiter=",") print() print("Burgers Menu".center(25)) print("No. \tItem\t\t\tPrice") for line in csv_reader: if line["No."] == "1": print(line["No."]+")", line["Item"], "\t", line["Price"]) else: print(line["No."]+")", line["Item"], "\t\t", line["Price"]) def menu_pizza(): with open(path.join(absolute_path, "menu_pizza.csv"), "r") as rf: csv_reader = csv.DictReader(rf) print() print("Pizza Menu".center(25)) print("No. \t Item\t S M L") for line in csv_reader: print(line["No."]+")", line["Item"], line["Small Size Price"], line["Medium Size Price"], line["Large Size Price"]) def menu_drinks(): with open(path.join(absolute_path, "menu_drinks.csv"), "r") as rf: csv_reader = csv.DictReader(rf, delimiter=",") print() print("Drinks Menu".center(25)) print("No. \tItem \t\tPrice") for line in csv_reader: if line["No."] == "4": print(line["No."]+")", line["Item"], "\t", line["Price"]) elif line["No."] == "5": print(line["No."]+")", line["Item"], "", line["Price"]) else: print(line["No."]+")", line["Item"], "\t\t", line["Price"]) valid_quantity = 0 def inventory(file_to_open, item_selected): global valid_quantity valid_quantity = 0 if file_to_open == 1: with open(path.join(absolute_path, "menu_starters.csv"), "r", newline="") as rf: csv_reader = csv.DictReader(rf, delimiter=",") csv_reader_list = list(csv_reader) with open(path.join(absolute_path, "menu_starters.csv"), "w", newline="") as wf: headers = ["No.", "Item", "Price", "Inventory"] csv_writer = csv.DictWriter(wf, fieldnames=headers, delimiter=",") csv_writer.writeheader() for line in range(len(csv_reader_list)): if csv_reader_list[line]["No."] == item_selected: while True: try: item_quantity = int(input("Quantity of the \"{}\" item: ".format(csv_reader_list[line]["Item"]))) except ValueError: print("Value Error, Try Again.\n") else: if item_quantity > 0: break else: print("Invalid Item Quantity, Try Again.") if item_quantity <= int(csv_reader_list[line]["Inventory"]): valid_quantity = item_quantity new_quantity = str(int(csv_reader_list[line]["Inventory"]) - item_quantity) csv_writer.writerow( {"No.": csv_reader_list[line]["No."], "Item": csv_reader_list[line]["Item"], "Price": csv_reader_list[line]["Price"], "Inventory": new_quantity} ) elif item_quantity > int(csv_reader_list[line]["Inventory"]): csv_writer.writerow( {"No.": csv_reader_list[line]["No."], "Item": csv_reader_list[line]["Item"], "Price": csv_reader_list[line]["Price"], "Inventory": csv_reader_list[line]["Inventory"]} ) print("Sorry, the \"{}\" item remaining quantity is: {}.\n".format(csv_reader_list[line]["Item"], csv_reader_list[line]["Inventory"])) else: csv_writer.writerow( {"No.": csv_reader_list[line]["No."], "Item": csv_reader_list[line]["Item"], "Price": csv_reader_list[line]["Price"], "Inventory": csv_reader_list[line]["Inventory"]} ) elif file_to_open == 2: with open(path.join(absolute_path, "menu_steaks.csv"), "r", newline="") as rf: csv_reader = csv.DictReader(rf, delimiter=",") csv_reader_list = list(csv_reader) with open(path.join(absolute_path, "menu_steaks.csv"), "w", newline="") as wf: headers = ["No.", "Item", "Price", "Inventory"] csv_writer = csv.DictWriter(wf, fieldnames=headers, delimiter=",") csv_writer.writeheader() for line in range(len(csv_reader_list)): if csv_reader_list[line]["No."] == item_selected: while True: try: item_quantity = int(input("Quantity of the \"{}\" item: ".format(csv_reader_list[line]["Item"]))) except ValueError: print("Value Error, Try Again.\n") else: if item_quantity > 0: break else: print("Invalid Item Quantity, Try Again.") if item_quantity <= int(csv_reader_list[line]["Inventory"]): valid_quantity = item_quantity new_quantity = str(int(csv_reader_list[line]["Inventory"]) - item_quantity) csv_writer.writerow( {"No.": csv_reader_list[line]["No."], "Item": csv_reader_list[line]["Item"], "Price": csv_reader_list[line]["Price"], "Inventory": new_quantity} ) elif item_quantity > int(csv_reader_list[line]["Inventory"]): csv_writer.writerow( {"No.": csv_reader_list[line]["No."], "Item": csv_reader_list[line]["Item"], "Price": csv_reader_list[line]["Price"], "Inventory": csv_reader_list[line]["Inventory"]} ) print("Sorry, the \"{}\" item remaining quantity is: {}.\n".format(csv_reader_list[line]["Item"], csv_reader_list[line]["Inventory"])) else: csv_writer.writerow( {"No.": csv_reader_list[line]["No."], "Item": csv_reader_list[line]["Item"], "Price": csv_reader_list[line]["Price"], "Inventory": csv_reader_list[line]["Inventory"]} ) elif file_to_open == 3: with open(path.join(absolute_path, "menu_burgers.csv"), "r", newline="") as rf: csv_reader = csv.DictReader(rf, delimiter=",") csv_reader_list = list(csv_reader) with open(path.join(absolute_path, "menu_burgers.csv"), "w", newline="") as wf: headers = ["No.", "Item", "Price", "Inventory"] csv_writer = csv.DictWriter(wf, fieldnames=headers ,delimiter=",") csv_writer.writeheader() for line in range(len(csv_reader_list)): if csv_reader_list[line]["No."] == item_selected: while True: try: item_quantity = int(input("Quantity of the \"{}\" item: ".format(csv_reader_list[line]["Item"]))) except ValueError: print("Value Error, Try Again.\n") else: if item_quantity > 0: break else: print("Invalid Item Quantity, Try Again.") if item_quantity <= int(csv_reader_list[line]["Inventory"]): valid_quantity = item_quantity new_quantity = str(int(csv_reader_list[line]["Inventory"]) - item_quantity) csv_writer.writerow( {"No.": csv_reader_list[line]["No."], "Item": csv_reader_list[line]["Item"], "Price": csv_reader_list[line]["Price"], "Inventory": new_quantity} ) elif item_quantity > int(csv_reader_list[line]["Inventory"]): csv_writer.writerow( {"No.": csv_reader_list[line]["No."], "Item": csv_reader_list[line]["Item"], "Price": csv_reader_list[line]["Price"], "Inventory": csv_reader_list[line]["Inventory"]} ) print("Sorry, the \"{}\" item remaining quantity is: {}.\n".format(csv_reader_list[line]["Item"], csv_reader_list[line]["Inventory"])) else: csv_writer.writerow( {"No.": csv_reader_list[line]["No."], "Item": csv_reader_list[line]["Item"], "Price": csv_reader_list[line]["Price"], "Inventory": csv_reader_list[line]["Inventory"]} ) elif file_to_open == 4: with open(path.join(absolute_path, "menu_pizza.csv"), "r", newline="") as rf: csv_reader = csv.DictReader(rf, delimiter=",") csv_reader_list = list(csv_reader) with open(path.join(absolute_path, "menu_pizza.csv"), "w", newline="") as wf: headers = ["No.", "Item", "Small Size Price", "Medium Size Price", "Large Size Price", "Inventory"] csv_writer = csv.DictWriter(wf, fieldnames=headers, delimiter=",") csv_writer.writeheader() for line in range(len(csv_reader_list)): if csv_reader_list[line]["No."] == item_selected: while True: try: item_quantity = int(input("Quantity of the \"{}\" item: ".format(csv_reader_list[line]["Item"]))) except ValueError: print("Value Error, Try Again.") else: if item_quantity > 0: break else: print("Invalid Item Quantity, Try Again.") if item_quantity <= int(csv_reader_list[line]["Inventory"]): valid_quantity = item_quantity new_quantity = str(int(csv_reader_list[line]["Inventory"]) - item_quantity) csv_writer.writerow( {"No.": csv_reader_list[line]["No."], "Item": csv_reader_list[line]["Item"], "Small Size Price": csv_reader_list[line]["Small Size Price"], "Medium Size Price": csv_reader_list[line]["Medium Size Price"], "Large Size Price": csv_reader_list[line]["Large Size Price"], "Inventory": new_quantity} ) elif item_quantity > int(csv_reader_list[line]["Inventory"]): csv_writer.writerow( {"No.": csv_reader_list[line]["No."], "Item": csv_reader_list[line]["Item"], "Small Size Price": csv_reader_list[line]["Small Size Price"], "Medium Size Price": csv_reader_list[line]["Medium Size Price"], "Large Size Price": csv_reader_list[line]["Large Size Price"], "Inventory": csv_reader_list[line]["Inventory"]} ) print("Sorry, the \"{}\" item remaining quantity is: {}.\n".format(csv_reader_list[line]["Item"], csv_reader_list[line]["Inventory"])) else: csv_writer.writerow( {"No.": csv_reader_list[line]["No."], "Item": csv_reader_list[line]["Item"], "Small Size Price": csv_reader_list[line]["Small Size Price"], "Medium Size Price": csv_reader_list[line]["Medium Size Price"], "Large Size Price": csv_reader_list[line]["Large Size Price"], "Inventory": csv_reader_list[line]["Inventory"]} ) elif file_to_open == 5: with open(path.join(absolute_path, "menu_drinks.csv"), "r", newline="") as rf: csv_reader = csv.DictReader(rf, delimiter=",") csv_reader_list = list(csv_reader) with open(path.join(absolute_path, "menu_drinks.csv"), "w", newline="") as wf: headers = ["No.", "Item", "Price", "Inventory"] csv_writer = csv.DictWriter(wf, fieldnames=headers, delimiter=",") csv_writer.writeheader() for line in range(len(csv_reader_list)): if csv_reader_list[line]["No."] == item_selected: while True: try: item_quantity = int(input("Quantity of the \"{}\" item: ".format(csv_reader_list[line]["Item"]))) except ValueError: print("Value Error, Try Again.\n") else: if item_quantity > 0: break else: print("Invalid Item Quantity, Try Again.") if item_quantity <= int(csv_reader_list[line]["Inventory"]): valid_quantity = item_quantity new_quantity = str(int(csv_reader_list[line]["Inventory"]) - item_quantity) csv_writer.writerow( {"No.": csv_reader_list[line]["No."], "Item": csv_reader_list[line]["Item"], "Price": csv_reader_list[line]["Price"], "Inventory": new_quantity} ) elif item_quantity > int(csv_reader_list[line]["Inventory"]): csv_writer.writerow( {"No.": csv_reader_list[line]["No."], "Item": csv_reader_list[line]["Item"], "Price": csv_reader_list[line]["Price"], "Inventory": csv_reader_list[line]["Inventory"]} ) print("Sorry, the \"{}\" item remaining quantity is: {}.\n".format(csv_reader_list[line]["Item"], csv_reader_list[line]["Inventory"])) else: csv_writer.writerow( {"No.": csv_reader_list[line]["No."], "Item": csv_reader_list[line]["Item"], "Price": csv_reader_list[line]["Price"], "Inventory": csv_reader_list[line]["Inventory"]} ) def menu(): while True: print("Menu".center(15)) print("1) Starters") print("2) Steaks") print("3) Burgers") print("4) Pizza") print("5) Drinks") while True: try: open_menu = int(input("\nInput the number of which menu you want to open: ")) except ValueError: print("Value Error, Please Try Again.") else: if open_menu in range(1, 6): break else: print("You can only select between 1 to 5.") if open_menu == 1: menu_starters() control = user_order_ftn() if control == "1" or control == "2" or control == "3" or control == "4": inventory_call = inventory(open_menu, control) with open(path.join(absolute_path, "menu_starters.csv"), "r") as rf: csv_reader = csv.DictReader(rf, delimiter=",") for line in csv_reader: if valid_quantity > 0: if line["No."] == control: user_orderlist.append({"Item": line["Item"], "Price": int(line["Price"]), "Quantity": valid_quantity, "File": 1}) print("The item has been added to your order list.\n") elif control == "M" or control == "m": continue else: print("Invalid Item Selection, Please Try Again.\n") continue elif open_menu == 2: menu_steaks() control = user_order_ftn() if control == "1" or control == "2" or control == "3" or control == "4": inventory_call = inventory(open_menu, control) with open(path.join(absolute_path, "menu_steaks.csv"), "r") as rf: csv_reader = csv.DictReader(rf, delimiter=",") for line in csv_reader: if valid_quantity > 0: if line["No."] == control: user_orderlist.append({"Item": line["Item"], "Price": int(line["Price"]), "Quantity": valid_quantity, "File": 2}) print("The item has been added to your order list.\n") elif control == "M" or control == "m": continue else: print("Invalid Item Selection, Please Try Again.\n") elif open_menu == 3: menu_burgers() control = user_order_ftn() if control == "1" or control == "2" or control == "3" or control == "4": inventory_call = inventory(open_menu, control) with open(path.join(absolute_path, "menu_burgers.csv"), "r") as rf: csv_reader = csv.DictReader(rf, delimiter=",") for line in csv_reader: if valid_quantity > 0: if line["No."] == control: user_orderlist.append({"Item": line["Item"], "Price": int(line["Price"]), "Quantity": valid_quantity, "File": 3}) print("The Item has been added to your order list.\n") elif control == "M" or control == "m": continue else: print("Invalid Item Selection, Please Try Again.\n") elif open_menu == 4: menu_pizza() control = user_order_ftn() if control == "1" or control == "2" or control == "3": while True: size = input("Select the size of pizza? (S/M/L): ") if size == "S" or size == "M" or size =="L": break else: print("Invalid Size, Try Again.") inventory_call = inventory(open_menu, control) with open(path.join(absolute_path, "menu_pizza.csv"), "r", newline="") as rf: csv_reader = csv.DictReader(rf, delimiter=",") for line in csv_reader: if valid_quantity > 0: if line["No."] == control and size == "S": user_orderlist.append( {"Item": line["Item"], "Price": int(line["Small Size Price"]), "Quantity": valid_quantity, "File": 4} ) print("The Item has been added to your order list.\n") elif line["No."] == control and size == "M": user_orderlist.append( {"Item": line["Item"], "Price": int(line["Medium Size Price"]), "Quantity": valid_quantity, "File": 4} ) print("The Item has been added to your order list.\n") elif line["No."] == control and size == "L": user_orderlist.append( {"Item": line["Item"], "Price": int(line["Large Size Price"]), "Quantity": valid_quantity, "File": 4} ) print("The Item has been added to your order list.\n") elif control == "M" or control == "m": continue else: print("Invalid Item Selection, Please Try Again.\n") elif open_menu == 5: menu_drinks() control = user_order_ftn() if control == "1" or control == "2" or control == "3" or control == "4" or control == "5" or control == "6": inventory_call = inventory(open_menu, control) with open(path.join(absolute_path, "menu_drinks.csv"), "r") as rf: csv_reader = csv.DictReader(rf, delimiter=",") for line in csv_reader: if valid_quantity > 0: if line["No."] == control: user_orderlist.append( {"Item": line["Item"], "Price": int(line["Price"]), "Quantity": valid_quantity, "File": 5} ) print("The Item has been added to your order list.\n") elif control == "M" or control == "m": continue else: print("Invalid Item Selection, Please Try Again.\n") while True: check = input("Do u want to add an item (1), OR exit the menu (2): ") if check == "1" or check == "2": break else: print("Wrong Input, Please Try Again.\n") if check == "1": continue elif check == "2": break for each_dict in range(len(user_orderlist)): user_orderlist[each_dict]["No."] = str(each_dict + 1) def add_to_inventory(item): if item["File"] == 1: with open(path.join(absolute_path, "menu_starters.csv"), "r", newline="") as rf: csv_reader = csv.DictReader(rf, delimiter=",") file_list = list(csv_reader) with open(path.join(absolute_path, "menu_starters.csv"), "w", newline="") as wf: headers = ['No.', 'Item', 'Price', 'Inventory'] writeFile = csv.DictWriter(wf, fieldnames=headers, delimiter=",") writeFile.writeheader() for line in file_list: if line["Item"] == item["Item"]: new_value = str(item["Quantity"] + int(line["Inventory"])) writeFile.writerow( {"No.": line["No."], "Item": line["Item"], "Price": line["Price"], "Inventory": new_value} ) else: writeFile.writerow( {"No.": line["No."], "Item": line["Item"], "Price": line["Price"], "Inventory": line["Inventory"]} ) elif item["File"] == 2: with open(path.join(absolute_path, "menu_steaks.csv"), "r", newline="") as rf: csv_reader = csv.DictReader(rf, delimiter=",") file_list = list(csv_reader) with open(path.join(absolute_path, "menu_steaks.csv"), "w", newline="") as wf: headers = ['No.', 'Item', 'Price', 'Inventory'] writeFile = csv.DictWriter(wf, fieldnames=headers, delimiter=",") writeFile.writeheader() for line in file_list: if line["Item"] == item["Item"]: new_value = str(item["Quantity"] + int(line["Inventory"])) writeFile.writerow( {"No.": line["No."], "Item": line["Item"], "Price": line["Price"], "Inventory": new_value} ) else: writeFile.writerow( {"No.": line["No."], "Item": line["Item"], "Price": line["Price"], "Inventory": line["Inventory"]} ) elif item["File"] == 3: with open(path.join(absolute_path, "menu_burgers.csv"), "r", newline="") as rf: csv_reader = csv.DictReader(rf, delimiter=",") file_list = list(csv_reader) with open(path.join(absolute_path, "menu_burgers.csv"), "w", newline="") as wf: headers = ['No.', 'Item', 'Price', 'Inventory'] writeFile = csv.DictWriter(wf, fieldnames=headers, delimiter=",") writeFile.writeheader() for line in file_list: if line["Item"] == item["Item"]: new_value = str(item["Quantity"] + int(line["Inventory"])) writeFile.writerow( {"No.": line["No."], "Item": line["Item"], "Price": line["Price"], "Inventory": new_value} ) else: writeFile.writerow( {"No.": line["No."], "Item": line["Item"], "Price": line["Price"], "Inventory": line["Inventory"]} ) elif item["File"] == 4: with open(path.join(absolute_path, "menu_pizza.csv"), "r", newline="") as rf: csv_reader = csv.DictReader(rf, delimiter=",") file_list = list(csv_reader) with open(path.join(absolute_path, "menu_pizza.csv"), "w", newline="") as wf: headers = ["No.", "Item", "Small Size Price", "Medium Size Price", "Large Size Price", "Inventory"] writeFile = csv.DictWriter(wf, fieldnames=headers, delimiter=",") writeFile.writeheader() for line in file_list: if line["Item"] == item["Item"]: new_value = str(item["Quantity"] + int(line["Inventory"])) writeFile.writerow( {"No.": line["No."], "Item": line["Item"], "Small Size Price": line["Small Size Price"], "Medium Size Price": line["Medium Size Price"], "Large Size Price": line["Large Size Price"], "Inventory": new_value} ) else: writeFile.writerow( {"No.": line["No."], "Item": line["Item"], "Small Size Price": line["Small Size Price"], "Medium Size Price": line["Medium Size Price"], "Large Size Price": line["Large Size Price"], "Inventory": line["Inventory"]} ) elif item["File"] == 5: with open(path.join(absolute_path, "menu_drinks.csv"), "r", newline="") as rf: csv_reader = csv.DictReader(rf, delimiter=",") file_list = list(csv_reader) with open(path.join(absolute_path, "menu_drinks.csv"), "w", newline="") as wf: headers = ['No.', 'Item', 'Price', 'Inventory'] writeFile = csv.DictWriter(wf, fieldnames=headers, delimiter=",") writeFile.writeheader() for line in file_list: if line["Item"] == item["Item"]: new_value = str(item["Quantity"] + int(line["Inventory"])) writeFile.writerow( {"No.": line["No."], "Item": line["Item"], "Price": line["Price"], "Inventory": new_value} ) else: writeFile.writerow( {"No.": line["No."], "Item": line["Item"], "Price": line["Price"], "Inventory": line["Inventory"]} ) def view_order(): total_bill = 0 for line in user_orderlist: total_bill += line["Price"] * line["Quantity"] print("\n", "\"Order List\"".center(40)) print("No.".ljust(5) ,"Item".ljust(30), "Price".ljust(5), "Quantity".ljust(5)) for line in user_orderlist: print(str(line["No."]).ljust(5) , line["Item"].ljust(30), str(line["Price"]).ljust(5), str(line["Quantity"]).center(5)) print("Total Bill:", str(total_bill) + " Rs") return total_bill def remove_item(): while True: if len(user_orderlist) > 0: view_order() try: item = input("If u want to remove any item just write its number \"OR\" to add item(Add/add) \"OR\" to confirm order(Exit/exit): ") if item == "": raise ValueError except ValueError: print("Wrong input, you cannot enter an empty string.\n") else: item = item.strip() for line in user_orderlist: if item == line["No."]: index = user_orderlist.index(line) popped_item = user_orderlist.pop(index) add_to_inventory(popped_item) print("The \"{}\" item has been removed from your order list.\n".format(popped_item["Item"])) for each_dict in range(len(user_orderlist)): user_orderlist[each_dict]["No."] = str(each_dict+1) break else: if item == "Exit" or item == "exit": break elif item == "Add" or item == "add": menu() continue else: print("Such item number was not found in your order list.\n") else: print("The order list is empty.") break def LogIn(): while True: print("\n", " \"Log In\" ".center(20)) gmail = input("Gmail: ") password = input("Password: ") username = "" with open(path.join(absolute_path, "users.csv"), "r") as rf: reader = csv.DictReader(rf, delimiter=",") for line in reader: if (line["Gmail"] == gmail) and (line["Password"] == password): username = line["Username"] print("\nDear {}, Login Successful!\n".format(username)) return username else: print("Login Failed, Incorrect gmail or password.") while True: try: choice = input("Do u want to Sign Up (Yes/No): ") if choice == "": raise IndexError except IndexError: print("Wrong Input.\n") continue else: if (choice == "Yes") or (choice == "yes"): SignUp() break elif (choice == "No") or (choice == "no"): break else: print("Wrong Input.\n") def SignUp(): print("\n", " \"Sign Up\" ".center(25)) while True: try: username = input("Input Username: ") if username == "": raise IndexError except IndexError: print("Invalid Input, Try Again.\n") else: if username[0] == " ": print("Username cannot have a space as its first character, Try Again.") else: username = username.strip() with open("users.csv", "r") as rf: csv_reader = csv.DictReader(rf, delimiter=",") for line in csv_reader: if line["Username"] == username: print("\nThe username {}, is already taken.\n".format(username)) break continue else: break while True: password = input("Input Password: ") if len(password) < 8: print("The lenght of the password should be atleast 8.\n") continue elif len(password) > 20: print("The lenght of the password cannot exceed 20.\n") continue for char in password: if char == " ": print("You cannot put spaces in your password.\n") break continue else: break while True: try: gmail = input("Input the preceding text of @gmail.com : ") if gmail == "": raise IndexError except IndexError: print("Wrong Input, You cannot input an empty string.\n") continue else: for char in gmail: if char == " ": print("You cannot input spaces in your gmail.\n") break elif char == "@": print("You cannot use \"@\" this symbol.\n") break continue else: new_gmail = gmail + "@gmail.com" with open(path.join(absolute_path, "users.csv"), "r") as rf: csv_reader = csv.DictReader(rf, delimiter=",") for line in csv_reader: if line["Gmail"] == new_gmail: print("Sorry, this gmail is already taken please try again.\n") break continue else: break with open(path.join(absolute_path, "users.csv"), "a", newline="") as af: headers = ["Username", "Password", "Gmail"] appender = csv.DictWriter(af, fieldnames=headers, delimiter=",") appender.writerow( {"Username": username, "Password": password, "Gmail": new_gmail}) print("Your account has been succesfuly created.\n") menu_check = False def main(): global menu_check menu_check = False while True: print("\"Food On Fire Restaurant App\"".center(50)) user_state = input("\nDo u want to Login or create a new account? Log In(L/l) or Sign Up(S/s) : ") user_state = user_state.strip() if user_state == "L" or user_state == "l": login_ftn = LogIn() break elif user_state == "S" or user_state == "s": signup_ftn = SignUp() continue else: print("Invalid Input, Try Again.\n") while True: print(F"Dear {login_ftn}, What type of order do u want to do?") order_type = input("Table Reservation OR Home Delivery (Table/Home): ") order_type = order_type.strip() if order_type == "Table" or order_type == "table": table_ftn = table_reservation() if table_ftn > 0: menu_check = True menu() break else: print("Sorry all the tables are reserved, You should try again after some time.") break elif order_type == "Home" or order_type == "home": menu_check = True menu() break else: print("Wrong Input, Please Try Again.\n") continue main() if menu_check: remove_item() if len(user_orderlist) > 0: total = view_order() print("\"Your order has been placed.\"")
from flask import Flask, jsonify, send_from_directory, render_template import pandas as pd import os app = Flask(__name__) CSV_URL = 'https://data.cityofnewyork.us/resource/uvpi-gqnh.json' CSV_PATH = 'data.csv' def get_df(): #If the data is already saved to disk if os.path.isfile(CSV_PATH): return pd.read_csv(CSV_PATH) #If the data is not saved, download and save df = pd.read_json(CSV_URL) df.to_csv(CSV_PATH) return df # This is an API meant to serve some housing price index data @app.route('/tpi_html/<string:boroname>/<string:health>') def to_html(boroname, health): df = get_df() df = df[(df['boroname'] == boroname) & (df['health']== health)] return df.to_html() # This is an API meant to serve some housing price index data @app.route('/tpi_json/<string:boroname>/<string:health>') def to_json(boroname, health): df = get_df() df = df[(df['boroname'] == boroname) & (df['health']== health)] return df.to_json() if __name__ == '__main__': app.run(debug=True)
#!/usr/bin/env python3 def bits(hexc): return bin(int(hexc, 16))[2:].zfill(4) def run(lengths): position = 0 skip = 0 sequence = list(range(256)) for _ in range(64): for l in lengths: for i in range(l // 2): now = (position + i) % len(sequence) later = (position + l - 1 - i) % len(sequence) sequence[now], sequence[later] = sequence[later], sequence[now] position += l + skip skip += 1 return sequence def hash(string): lengths = [ord(x) for x in string] + [17, 31, 73, 47, 23] seq = run(lengths) hashstr = "" for i in range(len(seq) // 16): num = 0 for j in range(16): num ^= seq[i * 16 + j] hashstr += hex(num)[2:].zfill(2) return hashstr key = "hwlqcszp" grid = [] for i in range(128): has = hash(key + "-" + str(i)) row = list(int(x) for x in "".join([bits(x) for x in has])) grid.append(row) filled = [(x, y) for x in range(128) for y in range(128) if grid[y][x]] print(len(filled)) regions = 0 seen = [] for f in filled: if f in seen: continue q = [f] while q: sq = q.pop() if sq in seen or sq not in filled: continue seen.append(sq) x, y = sq q += [(x, y - 1), (x - 1, y), (x, y + 1), (x + 1, y)] regions += 1 print(regions)
# -*- coding: utf-8 -*- """ Created on Tue Mar 24 11:18:30 2020 @author: Martin Jessen """ import matplotlib.pyplot as plt import os import numpy as np from skimage import transform from PIL import Image def load_images_from_folder(folder): images=[] for filename in os.listdir(folder): img = Image.open(os.path.join(folder,filename)) images.append(np.asarray(img)) return images def load_images_from_all_folders(folder, limit=100): images = [] for root, dirs, files in os.walk(folder, topdown=True): #one from each folder try: img = Image.open(os.path.join(root, files[0])) if np.size(np.asarray(img))==262144: images.append(np.asarray(img)) img.close() except: print('Unable to open file') if len(images) > limit: break print('there are {} images'.format(len(images))) return images def normalize(images): ### Normalize minval= 29744 maxval= 33000 for i in range(len(images)): images[i]=(images[i]-minval)/maxval return images def normalize_0(images): ### Normalize to 0-1 normalized = [] for img in images: lmin = np.float(np.min(img)) lmax = np.float(np.max(img)) normalized.append( (img - lmin)/(lmax-lmin) ) return normalized def normalize_32768(images): ### Normalize to 0-1 for img in images: img = img - 32768 return images def compress_images(images): img=[] for n in range(len(images)): img.append(transform.resize(images[n], (images[n].shape[0] / 4, images[n].shape[1] / 4), anti_aliasing=True)) return img def show_img(image): plt.imshow(image,cmap='gist_gray') plt.show()
import numpy as np from pprint import pprint import os class Stats(): def top_level_stats(self, path): count = 0 file_exts = [] for dirname, _, filenames in os.walk(path): for filename in filenames: count += 1 file_ext = filename.split(".")[-1] file_exts.append(file_ext) file_ext_set = set(file_exts) print(f"Files: {count}") print(f"Files extensions: {file_ext_set}\n\n=====================\nFiles extension count:\n=====================") file_ext_list = list(file_ext_set) for fe in file_ext_list: fe_count = file_exts.count(fe) print(f"{fe}: {fe_count}") count = 0 for root, folders, filenames in os.walk(path): print(root, folders) def get_venues(self,all_files): venues=dict() for file in all_files: # pprint(file['metadata']['title']) # pprint('...bib venues....') bibs = list(file['bib_entries'].values()) for bib in bibs: venue=str(bib['venue']).replace(' ','_').replace('.','').lower() if venue in venues.keys(): venues[venue]+=1 else: venues[venue]=1 for key, venue in venues.items(): print(f"{key}: {venue}") return venues # def get_publication_year(self,all_files): # pub_years=dict() # for file in all_files:
import pandas from pandas.plotting import scatter_matrix import matplotlib.pyplot as plt from sklearn import model_selection from sklearn.metrics import classification_report from sklearn.metrics import confusion_matrix from sklearn.metrics import accuracy_score from sklearn.linear_model import LogisticRegression from sklearn.tree import DecisionTreeClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.naive_bayes import GaussianNB from sklearn.svm import SVC url = "https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data" fields = ['sepal-length', 'sepal-width', 'petal-length', 'petal-width', 'class'] dataset = pandas.read_csv(url, names=fields) #Describes the dimensions of the dataset print(dataset.shape) #Get first 20 rows from the dataset print(dataset.head(20)) # Get basic statistics about the dataset print(dataset.describe()) # Class distribution print(dataset.groupby('class').size()) # box and whisker plots # Outliers are points that are atleast 3/2 times below min or 3/2 times above max. dataset.plot(kind='box', subplots=True, layout=(2, 2), sharex=False, sharey=False) plt.show() # The sepel-length and sepal-width have a guassian distribution dataset.hist() plt._show() # Multi-variate plot # scatter plot matrix scatter_matrix(dataset) plt.show() # Split data set with 80% training and 20% validation array = dataset.values X = array[:, 0:4] # Get only the last label as output y = array[:, 4] validation_size = 0.2 seed = 7 X_train, X_validation, y_train, y_validation = model_selection.train_test_split(X, y, test_size=validation_size, random_state=seed) scoring = 'accuracy' models = [] models.append(('LR', LogisticRegression())) models.append(('LDA', LinearDiscriminantAnalysis())) models.append(('KNN', KNeighborsClassifier())) models.append(('CART', DecisionTreeClassifier())) models.append(('NB', GaussianNB())) models.append(('SVM', SVC())) results = [] names = [] for name, model in models: kfold = model_selection.KFold(n_splits=10, random_state=seed) cv_results = model_selection.cross_val_score(model, X_train, y_train, cv=kfold, scoring=scoring) results.append(cv_results) names.append(name) msg = "%s: %f (%f)" % (name, cv_results.mean(), cv_results.std()) print(msg) # Make predictions on validation dataset print('KNeighbors') knn = KNeighborsClassifier() knn.fit(X_train, y_train) predictions = knn.predict(X_validation) print(accuracy_score(y_validation, predictions)) print(confusion_matrix(y_validation, predictions)) print(classification_report(y_validation, predictions)) print('Logistic Regression') lr = LogisticRegression() lr.fit(X_train, y_train) predictions = lr.predict(X_validation) print(accuracy_score(y_validation, predictions)) print(confusion_matrix(y_validation, predictions)) print(classification_report(y_validation, predictions))
from bs4 import BeautifulSoup import networkx as nx import json from urllib.request import urlopen from datetime import datetime import time import pandas as pd import numpy as np class TweetJob: def __init__(self): self.twitter_handle = None self.follower_limit = 1 self.following_limit = 1 self.job_name = None self.json_name = None self.edges_name = None self.vertices_name = None self.followers = None self.profile = None self.dt = datetime.now().strftime('%H%M%S') # add seconds self.followers_list = [] self.following_list = [] self.profile_html = None self.followers_html = None self.following_html = None self.following_bs = None self.profile_bs = None self.follower_bs = None self.following_start_page = None self.follower_start_page = None self.followers_to_crawl = 5 self.followers_crawl_limit = 20 self.sleep= 1 self.graph_name = None def initial_information_input(self): self.twitter_handle = input('Enter twitter handle and press enter ') followers_to_scrape = int(input('How many followers to collect? ')) following_to_scrape = int(input('How many accounts following to collect? ')) self.followers_to_crawl = int(input('How many of account 0 followers do you want to also scrape? ')) self.followers_crawl_limit = int(input('And how many of their followers to record?')) self.follower_limit = followers_to_scrape / 20 self.following_limit = following_to_scrape / 20 self.profile = 'https://mobile.twitter.com/'+str(self.twitter_handle) self.following_start_page = str(self.profile)+'/following' self.follower_start_page = str(self.profile)+'/followers' self.job_name = self.twitter_handle+"_"+self.dt self.json_name = self.job_name + '.json' self.edges_name = self.job_name + '_edges.csv' self.vertices_name = self.job_name + '_vertices.csv' self.graph_name = self.job_name + '.gexf' def load_initial_pages(self): self.profile_html = urlopen(self.profile).read() self.followers_html = urlopen(self.following_start_page).read() self.following_html = urlopen(self.following_start_page).read() self.following_bs = BeautifulSoup(self.following_html, 'html.parser') self.profile_bs = BeautifulSoup(self.profile_html, 'html.parser') self.follower_bs = BeautifulSoup(self.followers_html, 'html.parser') def get_user_info(self, bs): full_name = bs.find('div', {'class': 'fullname'}).get_text().strip() username = bs.find('span', {'class': 'screen-name'}).get_text().strip() username = '@'+username location = bs.find('div', {'class': 'location'}).get_text().strip() bio = bs.find('div', {'class': 'dir-ltr'}).get_text().strip() url = bs.find('div', {'class': 'url'}).get_text().strip() tweets = bs.findAll('div', {'class': 'statnum'})[0].get_text().replace(',', '') following = bs.findAll('div', {'class': 'statnum'})[1].get_text().replace(',', '') followers = bs.findAll('div', {'class': 'statnum'})[2].get_text().replace(',', '') date_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S') return([full_name, username, location, bio, url, tweets, following, followers, date_time]) def get_following(self, bs_object): following = [] namelist = bs_object.findAll('span' , {'class': 'username'}) for name in namelist: following.append(name.get_text()) following = following[1:] return(following) def get_followers(self, bs_object): self.followers = [] nameList = bs_object.findAll('span',{'class': 'username'}) for name in nameList: self.followers.append(name.get_text()) self.followers = self.followers[1:] def get_next_page(self, bs): next_div = bs.find('div', attrs={'class' : 'w-button-more'}) try: next_page = next_div.a['href'] return(next_page) except AttributeError: return("END") def following_scraping_process(self, page, limit): whole_following_list = [] while limit >0: time.sleep(self.sleep) print(str(limit)+" pages remaining") following_html = urlopen(page).read() following_bs = BeautifulSoup(following_html, 'html.parser') page_following = self.get_following(following_bs) whole_following_list += page_following page = self.get_next_page(following_bs) print('page crawled, moving to next page...') print('next page is: '+str(page)) if page =='END': limit = 0 else: page = 'https://mobile.twitter.com'+str(page) limit -=1 self.following_list = whole_following_list def followers_scraping_process(self, page, limit): whole_followers_list = [] while limit >0: time.sleep(self.sleep) print(str(limit)+" pages remaining") followers_html = urlopen(page).read() followers_bs = BeautifulSoup(followers_html, 'html.parser') page_followers = self.get_following(followers_bs) whole_followers_list += page_followers page = self.get_next_page(followers_bs) print('page crawled, moving to next page...') print('next page is: '+str(page)) if page =='END': limit = 0 else: page = 'https://mobile.twitter.com'+str(page) limit -=1 self.followers_list = whole_followers_list def compile_data(self, profile_data ): follower_list = self.followers_list following_list = self.following_list self.user_info = { 'full_name':profile_data[0], 'username': profile_data[1], 'twitter_user_url': self.profile, 'location': profile_data[2], 'bio':profile_data[3], 'bio_url':profile_data[4], 'tweet_count':profile_data[5], 'following_count': profile_data[6], 'follower_count':profile_data[7], 'following':following_list, 'followers':follower_list, 'date_updated': profile_data[8] } def write_job(self, write_mode): head = None if write_mode == 'a': head = False user = self.twitter_handle if write_mode == 'w': head = True user = '@'+self.twitter_handle with open(self.json_name, write_mode) as outfile: json.dump(self.user_info, outfile) print('Job written to '+str(self.json_name)) v = self.followers_list+self.following_list v.append(user) v=pd.DataFrame(v, columns=['vertices']) v = v.replace({'@@':'@'}) e=pd.DataFrame(self.following_list, columns = ['following']) e['user']=user e=e[['user', 'following']] e2=pd.DataFrame(self.followers_list, columns = ['user']) e2['following']=user edges = e.append(e2) edges = edges.replace({'@@':'@'}) edges.to_csv(self.edges_name, mode=write_mode, header = head) v.to_csv(self.vertices_name, mode = write_mode , header = head) def opening_sequence(self): flag =True while flag == True: #twitter_handle, follower_pages_to_scrape, following_pages_to_scrape = self.initial_information_input() self.initial_information_input() print(self.job_name) print(self.json_name) print(self.edges_name ) print(self.vertices_name) if input(("Is this correct? y/n "))== "y": flag =False else: print("ok lets try again...") flag = True print("ok, starting") def primary_crawl(self): self.load_initial_pages() print("initalized, scraping profile 0...") profile_data = self.get_user_info(self.profile_bs) #self.compile_data(profile_data) print('profile 0 scraped, getting followers...') self.following_scraping_process(self.following_start_page, self.following_limit) print('following scraped, moving to followers...') self.followers_scraping_process(self.follower_start_page, self.follower_limit) self.compile_data(profile_data) print('followers scraped, writing...') self.write_job('w') def get_new_list(self): with open(self.json_name) as f: data = json.load(f) new_crawl_list = data['following'][:self.followers_to_crawl] return(new_crawl_list) def secondary_crawl(self, new_list): for profile in new_list: # reset attributes self.followers_list = [] self.following_list = [] self.twitter_handle = profile self.profile = 'https://mobile.twitter.com/'+str(self.twitter_handle) self.following_start_page = str(self.profile)+'/following' self.follower_start_page = str(self.profile)+'/followers' self.following_limit = self.followers_crawl_limit self.followers_limit = self.followers_crawl_limit self.load_initial_pages() print("initalized, scraping profile 0...") profile_data = self.get_user_info(self.profile_bs) print('profile 0 scraped, getting followers...') self.following_scraping_process(self.following_start_page, self.following_limit) print('following scraped, moving to followers...') self.followers_scraping_process(self.follower_start_page, self.follower_limit) self.compile_data(profile_data) print('followers scraped, writing...') self.write_job('a') def make_graph(self): df = pd.read_csv(self.edges_name) G = nx.MultiGraph() G = nx.from_pandas_edgelist(df, 'user', 'following') nx.write_gexf(G, self.graph_name) def main(self): self.load_initial_pages() print('loaded, moving on to primary crawl') self.primary_crawl() print('Primary complete, moving to secondary') n_list = self.get_new_list() print(n_list) self.secondary_crawl(n_list) print('secondary crawl complete') self.make_graph() print('graph complete') if __name__ == '__main__': tj=TweetJob() tj.opening_sequence() tj.main()
# 程序文件Pex17_2.py import numpy as np from numpy.random import randint, rand, shuffle from matplotlib.pyplot import plot, show, rc a = np.loadtxt("../data/Pdata17_2.txt") xy, d = a[:, :2], a[:, 2:]; N = len(xy) w = 50; g = 10 # w为种群的个数,g为进化的代数 J = []; for i in np.arange(w): c = np.arange(1, N - 1); shuffle(c) c1 = np.r_[0, c, 101]; flag = 1 while flag > 0: flag = 0 for m in np.arange(1, N - 3): for n in np.arange(m + 1, N - 2): if d[c1[m], c1[n]] + d[c1[m + 1], c1[n + 1]] < \ d[c1[m], c1[m + 1]] + d[c1[n], c1[n + 1]]: c1[m + 1:n + 1] = c1[n:m:-1]; flag = 1 c1[c1] = np.arange(N); J.append(c1) J = np.array(J) / (N - 1) for k in np.arange(g): A = J.copy() c1 = np.arange(w) shuffle(c1) # 交叉操作的染色体配对组 c2 = randint(2, 100, w) # 交叉点的数据 for i in np.arange(0, w, 2): temp = A[c1[i], c2[i]:N - 1] # 保存中间变量 A[c1[i], c2[i]:N - 1] = A[c1[i + 1], c2[i]:N - 1] A[c1[i + 1], c2[i]:N - 1] = temp B = A.copy() by = [] # 初始化变异染色体的序号 while len(by) < 1: by = np.where(rand(w) < 0.1) by = by[0]; B = B[by, :] G = np.r_[J, A, B] ind = np.argsort(G, axis=1) # 把染色体翻译成0,1,…,101 NN = G.shape[0]; L = np.zeros(NN) for j in np.arange(NN): for i in np.arange(101): L[j] = L[j] + d[ind[j, i], ind[j, i + 1]] ind2 = np.argsort(L) J = G[ind2, :] path = ind[ind2[0], :]; zL = L[ind2[0]] xx = xy[path, 0]; yy = xy[path, 1]; rc('font', size=16) plot(xx, yy, '-*'); show() # 画巡航路径 print("所求的巡航路径长度为:", zL)
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('assets', '0001_initial'), ('accounts', '0001_initial'), ] operations = [ migrations.CreateModel( name='Support', fields=[ ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)), ('name', models.CharField(verbose_name='name', max_length=75)), ('created', models.DateTimeField(verbose_name='date created', auto_now=True)), ('modified', models.DateTimeField(verbose_name='last modified', auto_now_add=True)), ('contract_id', models.CharField(max_length=50)), ('description', models.CharField(blank=True, max_length=100)), ('price', models.DecimalField(null=True, decimal_places=2, blank=True, default=0, max_digits=10)), ('date_from', models.DateField(null=True, blank=True)), ('date_to', models.DateField()), ('escalation_path', models.CharField(blank=True, max_length=200)), ('contract_terms', models.CharField(blank=True, max_length=200)), ('remarks', models.TextField(blank=True)), ('sla_type', models.CharField(blank=True, max_length=200)), ('status', models.PositiveSmallIntegerField(choices=[(1, 'new')], verbose_name='status', default=1)), ('producer', models.CharField(blank=True, max_length=100)), ('supplier', models.CharField(blank=True, max_length=100)), ('serial_no', models.CharField(blank=True, max_length=100)), ('invoice_no', models.CharField(blank=True, db_index=True, max_length=100)), ('invoice_date', models.DateField(null=True, verbose_name='Invoice date', blank=True)), ('period_in_months', models.IntegerField(null=True, blank=True)), ('base_objects', models.ManyToManyField(to='assets.BaseObject', related_name='supports')), ('region', models.ForeignKey(to='accounts.Region')), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='SupportType', fields=[ ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)), ('name', models.CharField(verbose_name='name', max_length=255, unique=True)), ], options={ 'abstract': False, }, ), migrations.AddField( model_name='support', name='support_type', field=models.ForeignKey(null=True, to='supports.SupportType', default=None, on_delete=django.db.models.deletion.PROTECT, blank=True), ), ]
import sys from pprint import pprint sys.stdin = open('input.txt', 'r') d = [(-1, 0), (1, 0), (0, -1), (0, 1)] def isField(i, j): return 0 <= i < n and 0 <= j < n def bfs(i, j): global max_road visited = [(i, j)] queue = [(i, j)] road = 1 while queue: temp = queue[:] queue = [] while temp: si, sj = temp.pop(0) for di, dj in d: if isField(si+di, sj+dj) and g[si+di][sj+dj] < g[si][sj] and (si+di, sj+dj) not in queue: queue.append((si+di, sj+dj)) visited.append((si+di, sj+dj)) if not queue: break road += 1 if road > max_road: max_road = road def set_start(g, peek): for i in range(n): for j in range(n): if g[i][j] == peek: bfs(i, j) for test in range(int(input())): n, k = map(int, input().split()) g = [list(map(int, input().split())) for _ in range(n)] max_road = 0 peek = 0 for i in range(n): for j in range(n): if g[i][j] > peek: peek = g[i][j] for kk in range(k+1): for i in range(n): for j in range(n): g[i][j] -= kk set_start(g, peek) g[i][j] += kk print('#{} {}'.format(test+1, max_road))
# encoding: utf-8 BATCH_SIZE = 50 def readMNIST(): import torchvision from torchvision import transforms import torch.utils.data as data train_data = torchvision.datasets.MNIST( root='./MNIST/', train=True, #training set #transform=torchvision.transforms.ToTensor(),#converts a PIL.Image or numpy.ndarray transform = torchvision.transforms.Compose([transforms.Resize(224),transforms.ToTensor()]), download=True ) test_data = torchvision.datasets.MNIST( root='./MNIST/', train=False, #training set #transform=torchvision.transforms.ToTensor(),#converts a PIL.Image or numpy.ndarray transform = torchvision.transforms.Compose([transforms.Resize(224),transforms.ToTensor()]), download=True ) train_loader = data.DataLoader(dataset=train_data, batch_size=BATCH_SIZE,shuffle=True) test_loader = data.DataLoader(dataset=test_data, batch_size=1,shuffle=True) return train_loader, test_loader
from diagnnose.attribute.decomposer import ShapleyDecomposer from diagnnose.attribute.explainer import Explainer from diagnnose.config import create_config_dict from diagnnose.models import LanguageModel from diagnnose.models.import_model import import_model from diagnnose.tokenizer import create_tokenizer from diagnnose.utils.misc import profile if __name__ == "__main__": config_dict = create_config_dict() model: LanguageModel = import_model(config_dict) tokenizer = create_tokenizer(**config_dict["tokenizer"]) decomposer = ShapleyDecomposer(model, num_samples=10) explainer = Explainer(decomposer, tokenizer) sens = [f"The author talked to Sara about {tokenizer.mask_token} book."] tokens = ["the", "their"] with profile(): full_probs, contribution_probs = explainer.explain(sens, tokens) explainer.print_attributions(full_probs, contribution_probs, sens, tokens)
# pylint: disable=no-self-use class TestOverview: def test_get_overview(self, client): response = client.get("/overview") assert response.status_code == 200 assert response.mimetype == 'text/html' assert '<img src="' in response.get_data(as_text=True)
# -*- coding:utf-8 -*- from __future__ import absolute_import, division, print_function, unicode_literals, \ with_statement import logging import argparse from jenkinstask import taskload from jenkinstask.api import JenkinsApi from jenkinstask.confFile import generator, loadConfig logger = logging.getLogger(__name__) def main(args): logger.debug('arguments %s', args) is_upgrade = args.upgrade workpath = args.workpath env = args.env if args.config: __get_job_config(args.config, env, workpath) return if args.name: module = taskload.load(args.name, work_path = workpath) if not module: logger.error('not module "%s" found or execute with error', args.name) return __create_job([module], is_upgrade, env, workpath) elif args.all: all_module = taskload.load_all(work_path = workpath) __create_job(all_module, is_upgrade, env, workpath) def __create_job(all_module, is_upgrade, env_name, workpath): logger.info('create job') all_env = loadConfig(workpath) if env_name: logger.info('specific env "%s"', env_name) all_env = {env_name: all_env[env_name]} logger.debug('all envs %s', all_env) for currentEnv_name in all_env: __create_job_in_env(all_env[currentEnv_name], all_module, is_upgrade, workpath) def __create_job_in_env(currentEnv, all_module, is_upgrade, workpath): logger.info('create job in env "%s"', currentEnv) deploySetting = currentEnv['deploy'] for deploy in deploySetting: api = JenkinsApi(currentEnv['jenkins_url'], currentEnv.get('username'), currentEnv.get('token')) for module in all_module: task_name = __create_task_name(deploy, module.name) if not api.has_job(task_name): xml_content = generator(deploy, module, work_path = workpath) api.create_job(task_name, xml_content) elif is_upgrade: xml_content = generator(deploy, module, work_path = workpath) api.update_config(task_name, xml_content) def __create_task_name(deploy, module_name): logger.debug('task name is"%s"', module_name) task_name_format = deploy.get('task_name_format') if task_name_format: module_name = task_name_format.format(module_name) logger.debug('task name changed to "%s"', module_name) return module_name def __get_job_config(name, env_name, workpath): if not env_name: raise Exception('env name must be set') logger.info('get config in env "%s" for task "%s"', env_name, name) all_env = loadConfig(workpath) currentEnv = all_env[env_name] api = JenkinsApi(currentEnv['jenkins_url'], currentEnv.get('username'), currentEnv.get('token')) config = api.get_config(name) print('Result ========>>>>>') print(config) print('=========<<<<<<< END') if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('-a', '--all', action='store_true', help='run all tasks define in task folder') parser.add_argument('-n', '--name', help='run specific task') parser.add_argument('-u', '--upgrade', action='store_true', help='upgrade task') parser.add_argument('-e', '--env', help='only create specific env') parser.add_argument('-c', '--config', help='get config of specific task') parser.add_argument('-w', '--workpath', help='set work path') parser.add_argument('--debug', action='store_true', help='open debug') args = parser.parse_args() if args.debug: logging.basicConfig(level=logging.DEBUG, format='%(levelname)-s: %(message)s') logger.debug('open debug') else: logging.basicConfig(level=logging.INFO, format='%(levelname)-s: %(message)s') if not args.all and \ not args.upgrade and \ not args.name and \ not args.config: parser.print_help() else: main(args)
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright 2020 University of Liège # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from sph.helpers import * if __name__=="__main__": boxL = 2. Lfloor = 0.7 Lwater = 0.5 sep = 0.05/2 kernel = Kernel('cubic', False) # 'cubic', 'quadratic' or 'quintic' law = EqState('liquid') # 'gas' or 'liquid' # parameters model = Model() model.kernel = kernel model.law = law model.h_0 = 0.06/2 # initial smoothing length [m] model.c_0 = 35.0 # initial speed of sound [m/s] model.rho_0 = 1000.0 # initial density [kg/m^3] model.dom_dim = boxL # domain size (cube) model.alpha = 0.5 # artificial viscosity factor 1 model.beta = 0.0 # artificial viscosity factor 2 model.maxTime = 3.0 # simulation time model.saveInt = 0.01/2 # save interval # mobile particles cube = Cube( o=(((boxL-Lwater)/2),((boxL-Lwater)/2), ((boxL)/2)+0.5), L=(Lwater,Lwater,Lwater), rho=model.rho_0, s=sep) model.addMobile(cube.generate()) # fixed particles #obstacle plane = Cube( o=(((boxL-Lfloor)/2),((boxL-Lfloor)/2), (boxL/2)), L=(Lfloor,Lfloor,sep), rho=model.rho_0, s=sep) model.addFixed(plane.generate()) #floor plane = Cube( o=(0,0,0), L=(boxL,boxL,sep), rho=model.rho_0, s=sep) model.addFixed(plane.generate()) #x=0 plane = Cube( o=(0,0,2*sep), L=(sep,boxL,boxL-2*sep), rho=model.rho_0, s=sep) model.addFixed(plane.generate()) #y=0 plane = Cube( o=(2*sep,0,2*sep), L=(boxL-4*sep,sep,boxL-2*sep), rho=model.rho_0, s=sep) model.addFixed(plane.generate()) #x=L plane = Cube( o=(boxL-sep,0,2*sep), L=(sep,boxL,boxL-2*sep), rho=model.rho_0, s=sep) model.addFixed(plane.generate()) #y=L plane = Cube( o=(2*sep,boxL-sep,2*sep), L=(boxL-4*sep,sep,boxL-2*sep), rho=model.rho_0, s=sep) model.addFixed(plane.generate()) # run SPH model print(model) model.run() # convert to VTK import sph.gui as gui gui.ToParaview(verb=False).convertall()
while 1: ess1 = int(input("Essay 1(/22) :")) ess2 = int(input("Essay 2(/22) :")) ess3 = int(input("Essay 3(/22) :")) pres = int(input("Oral Presentation(/8) :")) diss = int(input("Student Led Discussion(/7) :")) ascr = int(input("ASCR(/10) :")) refl = int(input("Self-progress Reflection Task(/5) :")) total = ess1*15/22 + ess2*15/22 + ess3*15/22 + pres + diss + ascr + refl A = (95-total)*2.5*22/100 AE = (90-total)*2.5*22/100 BA = (86-total)*2.5*22/100 B = (82-total)*2.5*22/100 BE = (78-total)*2.5*22/100 CA = (74-total)*2.5*22/100 C = (70-total)*2.5*22/100 CE = (64-total)*2.5*22/100 print(str(A)+" for A") print(str(AE)+" for A-") print(str(BA)+" for B+") print(str(B)+" for B") print(str(BE)+" for B-") print(str(CA)+" for C+") print(str(C)+" for C") print(str(CE)+" for C-")
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests metrics correctness using Keras model.""" from absl.testing import parameterized import numpy as np from tensorflow.python.keras import keras_parameterized from tensorflow.python.keras import layers from tensorflow.python.keras import losses from tensorflow.python.keras import metrics from tensorflow.python.keras import testing_utils from tensorflow.python.keras.utils import losses_utils from tensorflow.python.platform import test from tensorflow.python.util import nest def get_multi_io_model(): inp_1 = layers.Input(shape=(1,), name='input_1') inp_2 = layers.Input(shape=(1,), name='input_2') x = layers.Dense(3, kernel_initializer='ones', trainable=False) out_1 = layers.Dense( 1, kernel_initializer='ones', name='output_1', trainable=False) out_2 = layers.Dense( 1, kernel_initializer='ones', name='output_2', trainable=False) branch_a = [inp_1, x, out_1] branch_b = [inp_2, x, out_2] return testing_utils.get_multi_io_model(branch_a, branch_b) def custom_generator_multi_io(sample_weights=None): batch_size = 2 num_samples = 5 inputs = np.asarray([[1.], [2.], [3.], [4.], [5.]]) targets_1 = np.asarray([[2.], [4.], [6.], [8.], [10.]]) targets_2 = np.asarray([[1.], [2.], [3.], [4.], [5.]]) start = 0 while True: if start > num_samples: start = 0 end = start + batch_size x = [inputs[start:end], inputs[start:end]] y = [targets_1[start:end], targets_2[start:end]] if sample_weights: sw = nest.map_structure(lambda w: w[start:end], sample_weights) else: sw = None start = end yield x, y, sw @keras_parameterized.run_with_all_model_types(exclude_models=['sequential']) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) class TestMetricsCorrectnessMultiIO(keras_parameterized.TestCase): def _get_compiled_multi_io_model(self): model = get_multi_io_model() model.compile( optimizer='rmsprop', loss='mse', metrics=[metrics.MeanSquaredError(name='mean_squared_error')], weighted_metrics=[ metrics.MeanSquaredError(name='mean_squared_error_2') ], run_eagerly=testing_utils.should_run_eagerly()) return model def setUp(self): super(TestMetricsCorrectnessMultiIO, self).setUp() self.x = np.asarray([[1.], [2.], [3.], [4.], [5.]]) self.y1 = np.asarray([[2.], [4.], [6.], [8.], [10.]]) self.y2 = np.asarray([[1.], [2.], [3.], [4.], [5.]]) self.sample_weight_1 = np.asarray([2., 3., 4., 5., 6.]) self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5, 3.]) # y_true_1 = [[2.], [4.], [6.], [8.], [10.]] # y_pred_1 = [[3.], [6.], [9.], [12.], [15.]] # y_true_2 = [[1.], [2.], [3.], [4.], [5.]] # y_pred_2 = [[3.], [6.], [9.], [12.], [15.]] # Weighted metric `output_1`: # Total = ((3 - 2)^2 * 2 + (6 - 4)^2 * 3) + # ((9 - 6)^2 * 4 + (12 - 8)^2 * 5) + # ((15 - 10)^2 * 6) # = 280 # Count = (2 + 3) + (4 + 5) + 6 = 20 # Result = 14 # Weighted metric `output_2`: # Total = ((3 - 1)^2 * 3.5 + (6 - 2)^2 * 2.5) + # ((9 - 3)^2 * 1.5 + (12 - 4)^2 * 0.5) + # (15 - 5)^2 * 3.0 # = 440 # Count = (3.5 + 2.5) + (1.5 + 0.5) + 3.0 = 11.0 # Result = 40 # Loss `output_1` with weights: # Total = ((3 - 2)^2 * 2 + (6 - 4)^2 * 3) + # ((9 - 6)^2 * 4 + (12 - 8)^2 * 5) + # ((15 - 10)^2 * 6) # = 280 # Count = 2 + 2 + 1 # Result = 56 # Loss `output_1` without weights/Metric `output_1`: # Total = ((3 - 2)^2 + (6 - 4)^2) + ((9 - 6)^2 + (12 - 8)^2) + (15 - 10)^2 # = 55 # Count = 2 + 2 + 1 # Result = 11 # Loss `output_2` with weights: # Total = ((3 - 1)^2 * 3.5 + (6 - 2)^2 * 2.5) + # ((9 - 3)^2 * 1.5 + (12 - 4)^2 * 0.5) + # (15 - 5)^2 * 3.0 # = 440 # Count = 2 + 2 + 1 # Result = 88 # Loss `output_2` without weights/Metric `output_2`: # Total = ((3 - 1)^2 + (6 - 2)^2) + ((9 - 3)^2 + (12 - 4)^2) + (15 - 5)^2 # = 220 # Count = 2 + 2 + 1 # Result = 44 # Total loss with weights = 56 + 88 = 144 # Total loss without weights = 11 + 44 = 55 self.wmse = 'mean_squared_error_2' self.expected_fit_result_with_weights = { 'output_1_mean_squared_error': [11, 11], 'output_2_mean_squared_error': [44, 44], 'output_1_' + self.wmse: [14, 14], 'output_2_' + self.wmse: [40, 40], 'loss': [144, 144], 'output_1_loss': [56, 56], 'output_2_loss': [88, 88], } self.expected_fit_result_with_weights_output_2 = { 'output_1_mean_squared_error': [11, 11], 'output_2_mean_squared_error': [44, 44], 'output_1_' + self.wmse: [11, 11], 'output_2_' + self.wmse: [40, 40], 'loss': [99, 99], 'output_1_loss': [11, 11], 'output_2_loss': [88, 88], } self.expected_fit_result = { 'output_1_mean_squared_error': [11, 11], 'output_2_mean_squared_error': [44, 44], 'output_1_' + self.wmse: [11, 11], 'output_2_' + self.wmse: [44, 44], 'loss': [55, 55], 'output_1_loss': [11, 11], 'output_2_loss': [44, 44], } # In the order: 'loss', 'output_1_loss', 'output_2_loss', # 'output_1_mean_squared_error', 'output_1_mean_squared_error_2', # 'output_2_mean_squared_error', 'output_2_mean_squared_error_2' self.expected_batch_result_with_weights = [144, 56, 88, 11, 14, 44, 40] self.expected_batch_result_with_weights_output_2 = [ 99, 11, 88, 11, 11, 44, 40 ] self.expected_batch_result = [55, 11, 44, 11, 11, 44, 44] def test_fit(self): model = self._get_compiled_multi_io_model() history = model.fit([self.x, self.x], [self.y1, self.y2], batch_size=2, epochs=2, shuffle=False) for key, value in self.expected_fit_result.items(): self.assertAllClose(history.history[key], value, 1e-3) def test_fit_with_sample_weight(self): model = self._get_compiled_multi_io_model() history = model.fit([self.x, self.x], [self.y1, self.y2], sample_weight={ 'output_1': self.sample_weight_1, 'output_2': self.sample_weight_2, }, batch_size=2, epochs=2, shuffle=False) for key, value in self.expected_fit_result_with_weights.items(): self.assertAllClose(history.history[key], value, 1e-3) # Set weights for one output (use batch size). history = model.fit([self.x, self.x], [self.y1, self.y2], sample_weight={'output_2': self.sample_weight_2}, batch_size=2, epochs=2, shuffle=False) for key, value in self.expected_fit_result_with_weights_output_2.items(): self.assertAllClose(history.history[key], value, 1e-3) def test_eval(self): model = self._get_compiled_multi_io_model() eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2], batch_size=2) self.assertAllClose(eval_result, self.expected_batch_result, 1e-3) def test_eval_with_sample_weight(self): model = self._get_compiled_multi_io_model() eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2], batch_size=2, sample_weight={ 'output_1': self.sample_weight_1, 'output_2': self.sample_weight_2, }) self.assertAllClose(eval_result, self.expected_batch_result_with_weights, 1e-3) # Set weights for one output. model = self._get_compiled_multi_io_model() eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2], batch_size=2, sample_weight={ 'output_2': self.sample_weight_2, }) self.assertAllClose(eval_result, self.expected_batch_result_with_weights_output_2, 1e-3) # Verify that metric value is same with arbitrary weights and batch size. x = np.random.random((50, 1)) y = np.random.random((50, 1)) w = np.random.random((50,)) mse1 = model.evaluate([x, x], [y, y], sample_weight=[w, w], batch_size=5)[3] mse2 = model.evaluate([x, x], [y, y], sample_weight=[w, w], batch_size=10)[3] self.assertAllClose(mse1, mse2, 1e-3) def test_train_on_batch(self): model = self._get_compiled_multi_io_model() result = model.train_on_batch([self.x, self.x], [self.y1, self.y2]) self.assertAllClose(result, self.expected_batch_result, 1e-3) def test_train_on_batch_with_sample_weight(self): model = self._get_compiled_multi_io_model() result = model.train_on_batch([self.x, self.x], [self.y1, self.y2], sample_weight={ 'output_1': self.sample_weight_1, 'output_2': self.sample_weight_2, }) self.assertAllClose(result, self.expected_batch_result_with_weights, 1e-3) # Set weights for one output. result = model.train_on_batch([self.x, self.x], [self.y1, self.y2], sample_weight={ 'output_2': self.sample_weight_2, }) self.assertAllClose(result, self.expected_batch_result_with_weights_output_2, 1e-3) def test_test_on_batch(self): model = self._get_compiled_multi_io_model() result = model.test_on_batch([self.x, self.x], [self.y1, self.y2]) self.assertAllClose(result, self.expected_batch_result, 1e-3) def test_test_on_batch_with_sample_weight(self): model = self._get_compiled_multi_io_model() result = model.test_on_batch([self.x, self.x], [self.y1, self.y2], sample_weight={ 'output_1': self.sample_weight_1, 'output_2': self.sample_weight_2, }) self.assertAllClose(result, self.expected_batch_result_with_weights, 1e-3) # Set weights for one output. result = model.test_on_batch([self.x, self.x], [self.y1, self.y2], sample_weight={ 'output_2': self.sample_weight_2, }) self.assertAllClose(result, self.expected_batch_result_with_weights_output_2, 1e-3) def test_fit_generator(self): model = self._get_compiled_multi_io_model() history = model.fit_generator( custom_generator_multi_io(), steps_per_epoch=3, epochs=2) for key, value in self.expected_fit_result.items(): self.assertAllClose(history.history[key], value, 1e-3) def test_fit_generator_with_sample_weight(self): model = self._get_compiled_multi_io_model() history = model.fit_generator( custom_generator_multi_io( sample_weights=[self.sample_weight_1, self.sample_weight_2]), steps_per_epoch=3, epochs=2) for key, value in self.expected_fit_result_with_weights.items(): self.assertAllClose(history.history[key], value, 1e-3) # Set weights for one output. history = model.fit_generator( custom_generator_multi_io( sample_weights={'output_2': self.sample_weight_2}), steps_per_epoch=3, epochs=2) for key, value in self.expected_fit_result_with_weights_output_2.items(): self.assertAllClose(history.history[key], value, 1e-3) def test_eval_generator(self): model = self._get_compiled_multi_io_model() eval_result = model.evaluate_generator(custom_generator_multi_io(), steps=3) self.assertAllClose(eval_result, self.expected_batch_result, 1e-3) def test_eval_generator_with_sample_weight(self): model = self._get_compiled_multi_io_model() eval_result = model.evaluate_generator( custom_generator_multi_io( sample_weights=[self.sample_weight_1, self.sample_weight_2]), steps=3) self.assertAllClose(eval_result, self.expected_batch_result_with_weights, 1e-3) # Set weights for one output. eval_result = model.evaluate_generator( custom_generator_multi_io( sample_weights={'output_2': self.sample_weight_2}), steps=3) self.assertAllClose(eval_result, self.expected_batch_result_with_weights_output_2, 1e-3) @keras_parameterized.run_with_all_model_types @keras_parameterized.run_all_keras_modes(always_skip_v1=True) class TestMetricsCorrectnessSingleIO(keras_parameterized.TestCase): def _get_model(self): x = layers.Dense(3, kernel_initializer='ones', trainable=False) out = layers.Dense( 1, kernel_initializer='ones', name='output', trainable=False) model = testing_utils.get_model_from_layers([x, out], input_shape=(1,)) model.compile( optimizer='rmsprop', loss='mse', metrics=[metrics.MeanSquaredError(name='mean_squared_error')], weighted_metrics=[ metrics.MeanSquaredError(name='mean_squared_error_2') ], run_eagerly=testing_utils.should_run_eagerly()) return model def _custom_generator(self, sample_weight=None): batch_size = 2 num_samples = 4 x = np.asarray([[1.], [2.], [3.], [4.]]) y = np.asarray([[2.], [4.], [6.], [8.]]) w = sample_weight i = 0 while True: batch_index = i * batch_size % num_samples i += 1 start = batch_index end = start + batch_size yield x[start:end], y[start:end], None if w is None else w[start:end] def setUp(self): super(TestMetricsCorrectnessSingleIO, self).setUp() self.x = np.asarray([[1.], [2.], [3.], [4.]]) self.y = np.asarray([[2.], [4.], [6.], [8.]]) self.sample_weight = np.asarray([2., 3., 4., 5.]) self.class_weight = {i: 1 for i in range(10)} self.class_weight.update({2: 2, 4: 3, 6: 4, 8: 5}) # y_true = [[2.], [4.], [6.], [8.]], y_pred = [[3.], [6.], [9.], [12.]] # Metric: # Total = ((3 - 2)^2 + (6 - 4)^2) + ((9 - 6)^2 + (12 - 8)^2) = 30, # Count = 2 + 2 # Result = 7.5 # Weighted metric: # Total = ((3 - 2)^2 * 2 + (6 - 4)^2 * 3) + # ((9 - 6)^2 * 4 + (12 - 8)^2 * 5) # = 130 # Count = (2 + 3) + (4 + 5) # Result = 9.2857141 # Total loss with weights: # Total = ((3 - 2)^2 * 2 + (6 - 4)^2 * 3) + # ((9 - 6)^2 * 4 + (12 - 8)^2 * 5) # = 130, # Count = 2 + 2 # Result = 32.5 # Total loss without weights: # Total = ((3 - 2)^2 + (6 - 4)^2) + # ((9 - 6)^2 + (12 - 8)^2) # = 30, # Count = 2 + 2 # Result = 7.5 wmse = 'mean_squared_error_2' self.expected_fit_result_with_weights = { 'mean_squared_error': [7.5, 7.5], wmse: [9.286, 9.286], 'loss': [32.5, 32.5] } self.expected_fit_result = { 'mean_squared_error': [7.5, 7.5], wmse: [7.5, 7.5], 'loss': [7.5, 7.5] } # In the order: 'loss', 'mean_squared_error', 'mean_squared_error_2' self.expected_batch_result_with_weights = [32.5, 7.5, 9.286] self.expected_batch_result = [7.5, 7.5, 7.5] def test_fit(self): model = self._get_model() history = model.fit( self.x, self.y, batch_size=2, epochs=2, shuffle=False) for key, value in self.expected_fit_result.items(): self.assertAllClose(history.history[key], value, 1e-3) def test_fit_with_sample_weight(self): model = self._get_model() history = model.fit( self.x, self.y, sample_weight=self.sample_weight, batch_size=2, epochs=2, shuffle=False) for key, value in self.expected_fit_result_with_weights.items(): self.assertAllClose(history.history[key], value, 1e-3) def test_fit_with_class_weight(self): model = self._get_model() history = model.fit( self.x, self.y, class_weight=self.class_weight, batch_size=2, epochs=2, shuffle=False) for key, value in self.expected_fit_result_with_weights.items(): self.assertAllClose(history.history[key], value, 1e-3) def test_eval(self): model = self._get_model() eval_result = model.evaluate(self.x, self.y, batch_size=2) self.assertAllClose(eval_result, self.expected_batch_result, 1e-3) def test_eval_with_sample_weight(self): model = self._get_model() eval_result = model.evaluate( self.x, self.y, batch_size=2, sample_weight=self.sample_weight) self.assertAllClose(eval_result, self.expected_batch_result_with_weights, 1e-3) # Verify that metric value is same with arbitrary weights and batch size. x = np.random.random((50, 1)) y = np.random.random((50, 1)) w = np.random.random((50,)) mse1 = model.evaluate(x, y, sample_weight=w, batch_size=5)[1] mse2 = model.evaluate(x, y, sample_weight=w, batch_size=10)[1] self.assertAllClose(mse1, mse2, 1e-3) def test_train_on_batch(self): model = self._get_model() result = model.train_on_batch(self.x, self.y) self.assertAllClose(result, self.expected_batch_result, 1e-3) def test_train_on_batch_with_sample_weight(self): model = self._get_model() result = model.train_on_batch( self.x, self.y, sample_weight=self.sample_weight) self.assertAllClose(result, self.expected_batch_result_with_weights, 1e-3) def test_train_on_batch_with_class_weight(self): model = self._get_model() result = model.train_on_batch( self.x, self.y, class_weight=self.class_weight) self.assertAllClose(result, self.expected_batch_result_with_weights, 1e-3) def test_test_on_batch(self): model = self._get_model() result = model.test_on_batch(self.x, self.y) self.assertAllClose(result, self.expected_batch_result, 1e-3) def test_test_on_batch_with_sample_weight(self): model = self._get_model() result = model.test_on_batch( self.x, self.y, sample_weight=self.sample_weight) self.assertAllClose(result, self.expected_batch_result_with_weights, 1e-3) def test_fit_generator(self): model = self._get_model() history = model.fit_generator( self._custom_generator(), steps_per_epoch=2, epochs=2) for key, value in self.expected_fit_result.items(): self.assertAllClose(history.history[key], value, 1e-3) def test_fit_generator_with_sample_weight(self): model = self._get_model() history = model.fit_generator( self._custom_generator(sample_weight=self.sample_weight), steps_per_epoch=2, epochs=2) for key, value in self.expected_fit_result_with_weights.items(): self.assertAllClose(history.history[key], value, 1e-3) def test_fit_generator_with_class_weight(self): model = self._get_model() history = model.fit_generator( self._custom_generator(), steps_per_epoch=2, epochs=2, class_weight=self.class_weight) for key, value in self.expected_fit_result_with_weights.items(): self.assertAllClose(history.history[key], value, 1e-3) def test_eval_generator(self): model = self._get_model() eval_result = model.evaluate_generator(self._custom_generator(), steps=2) self.assertAllClose(eval_result, self.expected_batch_result, 1e-3) def test_eval_generator_with_sample_weight(self): model = self._get_model() eval_result = model.evaluate_generator( self._custom_generator(sample_weight=self.sample_weight), steps=2) self.assertAllClose(eval_result, self.expected_batch_result_with_weights, 1e-3) @keras_parameterized.run_with_all_model_types(exclude_models=['sequential']) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) @parameterized.parameters([ losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE, losses_utils.ReductionV2.AUTO, losses_utils.ReductionV2.SUM ]) class TestOutputLossMetrics(keras_parameterized.TestCase): def _get_compiled_multi_io_model(self, loss): model = get_multi_io_model() model.compile( optimizer='rmsprop', loss=loss, run_eagerly=testing_utils.should_run_eagerly()) return model def setUp(self): super(TestOutputLossMetrics, self).setUp() self.x = np.asarray([[1.], [2.], [3.], [4.], [5.]]) self.y1 = np.asarray([[2.], [4.], [6.], [8.], [10.]]) self.y2 = np.asarray([[1.], [2.], [3.], [4.], [5.]]) self.sample_weight_1 = np.asarray([2., 3., 4., 5., 6.]) self.sample_weight_2 = np.asarray([3.5, 2.5, 1.5, 0.5, 3.]) # y_true_1 = [[2.], [4.], [6.], [8.], [10.]] # y_pred_1 = [[3.], [6.], [9.], [12.], [15.]] # y_true_2 = [[1.], [2.], [3.], [4.], [5.]] # y_pred_2 = [[3.], [6.], [9.], [12.], [15.]] # Loss `output_1`: # Per-sample weighted losses # Batch 1 = [(3 - 2)^2 * 2, (6 - 4)^2 * 3)] = [2, 12] # Batch 2 = [((9 - 6)^2 * 4, (12 - 8)^2 * 5)] = [36, 80] # Batch 3 = [(15 - 10)^2 * 6] = [150] # Result (reduction=SUM) = ((2 + 12)*2 + (36 + 80)*2 + 150) / 5 = 82 # Result (reduction=SUM_OVER_BATCH_SIZE/AUTO/NONE) = 280 / 5 = 56 # Loss `output_2`: # Per-sample weighted losses # Batch 1 = [(3 - 1)^2 * 3.5, (6 - 2)^2 * 2.5)] = [14, 40] # Batch 2 = [(9 - 3)^2 * 1.5, (12 - 4)^2 * 0.5)] = [54, 32] # Batch 3 = [(15 - 5)^2 * 3] = [300] # Result (reduction=SUM) = ((14 + 40)*2 + (54 + 32)*2 + 300) / 5 = 116 # Result (reduction=SUM_OVER_BATCH_SIZE/AUTO/NONE) = 440 / 5 = 88 # When reduction is 'NONE' loss value that is passed to the optimizer will # be vector loss but what is reported is a scalar, which is an average of # all the values in all the batch vectors. # Total loss = Output_loss_1 + Output_loss_2 sum_over_batch_size_fit_result = { 'loss': [144, 144], 'output_1_loss': [56, 56], 'output_2_loss': [88, 88], } self.expected_fit_result = { losses_utils.ReductionV2.NONE: sum_over_batch_size_fit_result, losses_utils.ReductionV2.SUM: { 'loss': [198, 198], 'output_1_loss': [82, 82], 'output_2_loss': [116, 116], }, losses_utils.ReductionV2.AUTO: sum_over_batch_size_fit_result, losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE: sum_over_batch_size_fit_result, } # In the order: 'loss', 'output_1_loss', 'output_2_loss', self.expected_batch_result = { losses_utils.ReductionV2.NONE: [144, 56, 88], losses_utils.ReductionV2.SUM: [198, 82, 116], losses_utils.ReductionV2.AUTO: [144, 56, 88], losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE: [144, 56, 88], } # 2 + 12 + 36 + 80 + 150 = 280 # 14 + 40 + 54 + 32 + 300 = 440 self.expected_single_batch_result = [720, 280, 440] def test_fit(self, reduction): model = self._get_compiled_multi_io_model( loss=losses.MeanSquaredError(reduction=reduction)) history = model.fit([self.x, self.x], [self.y1, self.y2], sample_weight={ 'output_1': self.sample_weight_1, 'output_2': self.sample_weight_2, }, batch_size=2, epochs=2, shuffle=False) for key, value in self.expected_fit_result[reduction].items(): self.assertAllClose(history.history[key], value) def test_eval(self, reduction): model = self._get_compiled_multi_io_model( loss=losses.MeanSquaredError(reduction=reduction)) eval_result = model.evaluate([self.x, self.x], [self.y1, self.y2], batch_size=2, sample_weight={ 'output_1': self.sample_weight_1, 'output_2': self.sample_weight_2, }) self.assertAllClose(eval_result, self.expected_batch_result[reduction]) def test_train_on_batch(self, reduction): model = self._get_compiled_multi_io_model( loss=losses.MeanSquaredError(reduction=reduction)) result = model.train_on_batch([self.x, self.x], [self.y1, self.y2], sample_weight={ 'output_1': self.sample_weight_1, 'output_2': self.sample_weight_2, }) expected_values = self.expected_batch_result[reduction] if reduction == losses_utils.ReductionV2.SUM: expected_values = self.expected_single_batch_result self.assertAllClose(result, expected_values) def test_test_on_batch(self, reduction): model = self._get_compiled_multi_io_model( loss=losses.MeanSquaredError(reduction=reduction)) result = model.test_on_batch([self.x, self.x], [self.y1, self.y2], sample_weight={ 'output_1': self.sample_weight_1, 'output_2': self.sample_weight_2, }) expected_values = self.expected_batch_result[reduction] if reduction == losses_utils.ReductionV2.SUM: expected_values = self.expected_single_batch_result self.assertAllClose(result, expected_values) def test_fit_generator(self, reduction): model = self._get_compiled_multi_io_model( loss=losses.MeanSquaredError(reduction=reduction)) history = model.fit_generator( custom_generator_multi_io( sample_weights=[self.sample_weight_1, self.sample_weight_2]), steps_per_epoch=3, epochs=2) for key, value in self.expected_fit_result[reduction].items(): self.assertAllClose(history.history[key], value) def test_eval_generator(self, reduction): model = self._get_compiled_multi_io_model( loss=losses.MeanSquaredError(reduction=reduction)) eval_result = model.evaluate_generator( custom_generator_multi_io( sample_weights=[self.sample_weight_1, self.sample_weight_2]), steps=3) self.assertAllClose(eval_result, self.expected_batch_result[reduction]) if __name__ == '__main__': test.main()
import unittest class MyTestCase (unittest.TestCase): def test_something ( self ): self.assertEqual (True, False) if __name__ == '__main__': unittest.main ()
""" Postfix relay domains extension forms. """ from django import forms from django.core.urlresolvers import reverse from django.http import QueryDict from django.utils.translation import ugettext as _, ugettext_lazy from modoboa.lib import events from modoboa.lib.form_utils import DynamicForm, DomainNameField, TabForms from modoboa.lib.web_utils import render_to_json_response from modoboa_admin.models import Domain, DomainAlias from .models import RelayDomain, RelayDomainAlias class RelayDomainFormGeneral(forms.ModelForm, DynamicForm): aliases = DomainNameField( label=ugettext_lazy("Alias(es)"), required=False, help_text=ugettext_lazy( "Alias(es) of this relay domain. Indicate only one name per input" ", press ENTER to add a new input." ) ) class Meta: model = RelayDomain exclude = ['dates'] widgets = { "service": forms.Select(attrs={"class": "form-control"}) } def __init__(self, *args, **kwargs): self.oldname = None if "instance" in kwargs: self.oldname = kwargs["instance"].name super(RelayDomainFormGeneral, self).__init__(*args, **kwargs) self.field_widths = { "service": 3 } if args and isinstance(args[0], QueryDict): self._load_from_qdict(args[0], "aliases", DomainNameField) elif 'instance' in kwargs: rd = kwargs['instance'] for pos, rdalias in enumerate(rd.relaydomainalias_set.all()): name = "aliases_%d" % (pos + 1) self._create_field(forms.CharField, name, rdalias.name, 3) def clean(self): """Custom fields validaton. We want to prevent duplicate names between domains, relay domains, domain aliases and relay domain aliases. The validation way is not very smart... """ super(RelayDomainFormGeneral, self).clean() cleaned_data = self.cleaned_data for dtype, label in [(Domain, _('domain')), (DomainAlias, _('domain alias')), (RelayDomainAlias, _('relay domain alias'))]: try: dtype.objects.get(name=cleaned_data['name']) except dtype.DoesNotExist: pass else: self.add_error( "name", _("A %s with this name already exists") % label ) break for k in cleaned_data.keys(): if not k.startswith("aliases"): continue if not cleaned_data[k]: del cleaned_data[k] continue for dtype, name in [(RelayDomain, _('relay domain')), (DomainAlias, _('domain alias')), (Domain, _('domain'))]: try: dtype.objects.get(name=cleaned_data[k]) except dtype.DoesNotExist: pass else: self.add_error( k, _("A %s with this name already exists") % name) break return cleaned_data def save(self, user, commit=True, rdomalias_post_create=False): """Custom save method. As relay domain aliases are defined using the same form as relay domains, we need to save them manually. :param ``User`` user: connected user """ rd = super(RelayDomainFormGeneral, self).save(commit=False) if commit: rd.oldname = self.oldname rd.save() aliases = [] for k, v in self.cleaned_data.iteritems(): if not k.startswith("aliases"): continue if v in ["", None]: continue aliases.append(v) for rdalias in rd.relaydomainalias_set.all(): if rdalias.name not in aliases: rdalias.delete() else: aliases.remove(rdalias.name) if aliases: events.raiseEvent( "CanCreate", user, "relay_domain_aliases", len(aliases) ) for alias in aliases: try: rd.relaydomainalias_set.get(name=alias) except RelayDomainAlias.DoesNotExist: pass else: continue al = RelayDomainAlias( name=alias, target=rd, enabled=rd.enabled ) al.save(creator=user) \ if rdomalias_post_create else al.save() return rd class RelayDomainForm(TabForms): """Specific edition form for relay domains. We use a *tabs* compatible form because extensions can add their own tab. (ex: amavis) """ def __init__(self, request, *args, **kwargs): self.user = request.user self.forms = [] if self.user.has_perm("modoboa_admin_relaydomains.change_relaydomain"): self.forms.append({ 'id': 'general', 'title': _("General"), 'formtpl': 'modoboa_admin_relaydomains/relaydomain_form.html', 'cls': RelayDomainFormGeneral, 'mandatory': True }) cbargs = [self.user] if "instances" in kwargs: cbargs += [kwargs["instances"]["general"]] self.forms += events.raiseQueryEvent("ExtraRelayDomainForm", *cbargs) super(RelayDomainForm, self).__init__(request, *args, **kwargs) def extra_context(self, context): """Additional content. """ rdom = self.instances["general"] context.update({ 'action': reverse( "modoboa_admin_relaydomains:relaydomain_change", args=[rdom.id]), 'formid': 'rdomform', 'title': rdom.name }) def save(self): """Custom save method As forms interact with each other, it is easier to make custom code to save them. """ self.forms[0]['instance'].save( self.request.user, rdomalias_post_create=True ) for f in self.forms[1:]: f["instance"].save(self.request.user) def done(self): events.raiseEvent('RelayDomainModified', self.instances["general"]) return render_to_json_response(_('Relay domain modified'))
# -*- coding: utf-8 -*- # author : anthony # version : python 3.6 import os import json import time from core import db_handler from conf import settings from core import logger def login_required(func): """ 验证用户是否登陆 :param func: :return: """ def wrapper(*args, **kwargs): if args[0].get('is_authenticated'): return func(*args, **kwargs) else: exit("User is not authenticated.") return wrapper def acc_auth2(account, password): """ 优化版认证接口 :param account: :param password: :return: """ db_api = db_handler.db_handler() data = db_api("select * from accounts where account=%s" % account) if data['password'] == password: exp_time_stamp = time.mktime(time.strptime(data['expire_date'], "%Y-%m-%d")) if time.time() > exp_time_stamp: print("\033[31;1m 账户已过期 \033[0m") else: # 通过认证 return data else: print("\033[31;1m 账户id不正确\033[0m") def acc_login(user_data, log_obj): """ 帐户登录功能 :param user_data: 用户信息数据,仅保存在内存中 :param log_obj: 写日志 :return: """ retry_count = 0 # 重试次数 while user_data['is_authenticated'] is not True and retry_count < 3: account = input("\033[32;1m account: \033[0m").strip() password = input("\033[32;1m password: \033[0m").strip() auth = acc_auth2(account, password) if auth: # None = False auth != None 没有None 意思通过认证 user_data['is_authenticated'] = True user_data['account_id'] = account return auth retry_count += 1 else: log_obj.error("account [%s] too many login attempts" % account) exit()
import re import numpy as np import matplotlib.pyplot as plt import h5py text = open("/Users/mac/Desktop/ca1_step1_input_data.txt") #处理字符 line1 = text.readline() #print(line1) line2 = text.readline() #print(line2) line1 = line1.replace("#","") line1 = line1.replace(" ","") line1 = line1.replace("\n","") line1 = line1.replace("(s)","") line2 = line2.replace("\n","") line2 = line2.replace("\t","") line2 = line2.replace(" ","") var_names = re.split(';|,', line1) var_numbers = re.split(';|,', line2) print(var_names) print(var_numbers) #字符串转数字 var_new_num = [] for var_number in var_numbers: if('.' in var_number): # print(var_number) var_new_num.append(float(var_number)) # print(float(var_number)) else : var_new_num.append(int(var_number)) # int(var_number) #print(var_new_num) #dict1=dict(zip(var_names,var_new_num)) #键值对配对 for name,value in zip(var_names,var_new_num): exec(f"{name} = {value}") print(time_steps, time_step, radius, v_variance, N_particles) #读取文件step data = [] linelist = [] line_new = text.readline() while line_new != '' : if('# time' in line_new) : # print(line_new) data.append(linelist) linelist=[] line_new = text.readline() if('# x' in line_new): line_new = text.readline() if(line_new == "\n"): line_new = text.readline() else : line_new = line_new.replace("\n","") line_new = line_new.replace("\t","") line_new = line_new.replace(" ","") var_numbers = re.split(';|,', line_new) var_tmp = [] for var_number in var_numbers: if('.' in var_number): var_tmp.append(float(var_number)) else : var_tmp.append(int(var_number)) linelist.append(var_tmp) # linelist.append(line_new) line_new = text.readline() # print(data) data.append(linelist) data = list(filter(None, data)) print(len(data), len(data[0]), len(data[0][0]), type(data[0][0][0])) # print(data[0]) #ndarray data = np.array(data) print(type(data)) print(data.dtype) print(data.shape) R = data[0:, 0:, 0:2] V = data[0:, 0:, 2:4] R = np.swapaxes(R, 1, 2) V = np.swapaxes(V, 1, 2) print(R.shape) print(V.shape) print(type(R)) plt.figure(figsize=(8,8)) plt.scatter(R[0][0][:],R[0][1][:]) plt.axis([-1,1,-1,1]) plt.show() with h5py.File("ca1_step1_input_data.hdf5", "w") as f: f.create_dataset("R", data=R) f.create_dataset("V", data=V) f.attrs['time_steps'] = time_steps f.attrs['time_step'] = time_step f.attrs['radius'] = radius f.attrs['v_variance'] = v_variance f.attrs['N_particles'] = N_particles
# -*- coding: utf-8 -*- import subprocess import win32api import win32con from pykeyboard import PyKeyboard import time __author__ = 'Kan' k = PyKeyboard() # 启动qq qq_exe = r"C:\Program Files (x86)\Tencent\QQ\Bin\QQ.exe" subprocess.Popen([qq_exe]) time.sleep(5) # 定位qq密码输入框 '''windll.user32.SetCursorPos(969, 583)''' win32api.SetCursorPos((973, 581)) time.sleep(1) # 双击输入框 win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, 0, 0, 0, 0) win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, 0, 0, 0, 0) win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, 0, 0, 0, 0) win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, 0, 0, 0, 0) time.sleep(2) # 输入qq账号 k.type_string('XXXXXX') # XXXXXX=qq账号 time.sleep(2) # 按TAB win32api.keybd_event(9, 0, 0, 0) win32api.keybd_event(9, 0, win32con.KEYEVENTF_KEYUP, 0) time.sleep(2) # 输入密码 k.type_string('YYYYYY') # YYYYYY=密码 time.sleep(1) # 按回车 win32api.keybd_event(13, 0, 0, 0) win32api.keybd_event(13, 0, win32con.KEYEVENTF_KEYUP, 0) time.sleep(5)
# encoding:UTF-8 ######################################################################## class AndRule: """""" #---------------------------------------------------------------------- def __init__(self, *args): """Constructor""" self.rules = args #---------------------------------------------------------------------- def matches(self, exchange): """""" return all([rule.matches(exchange) for rule in self.rules])
# -*- coding: utf-8 -*- import requests_mock from lib.UpcomingProductsHandler import UpcomingProductsHandler from tests.fixtures import Fixtures class TestUpcomingProductsHandler: def test_can_handle(self): uph = UpcomingProductsHandler() assert uph.can_handle(Fixtures.event('!upcoming')) == (True, None) bad_commands = [ '!upcoming ', ' !upcoming', 'upcoming', '! upcoming', '/upcoming', '\\upcoming', '!products' ] for cmd in bad_commands: assert uph.can_handle(Fixtures.event(cmd)) == (False, None) def test_get_products(self): uph = UpcomingProductsHandler() mock_html = Fixtures.upcoming_products_html() with requests_mock.Mocker() as m: m.get(uph.upcoming_url, text=mock_html) table = uph.get_products() expected_table = ( '```\n' '+Upcoming Products---+------------------+-------------------------------+-------+\n' '| Product | Status | Type | MSRP |\n' '+--------------------+------------------+-------------------------------+-------+\n' '| Kampala Ascendent | Shipping Now | Kitara Cycle - Data Packs | 14.95 |\n' '| Revised Core Set | Shipping Now | Android: Netrunner Core Set | 39.95 |\n' '| Reign and Reverie | At the Printer | Deluxe Expansions | 29.95 |\n' '| Order and Chaos | Awaiting Reprint | Deluxe Expansions | 29.95 |\n' '| Chrome City | Awaiting Reprint | The SanSan Cycle – Data Packs | 14.95 |\n' '| Old Hollywood | Awaiting Reprint | The SanSan Cycle – Data Packs | 14.95 |\n' '| Honor and Profit | Awaiting Reprint | Deluxe Expansions | 29.95 |\n' '| The Underway | Awaiting Reprint | The SanSan Cycle – Data Packs | 14.95 |\n' '+--------------------+------------------+-------------------------------+-------+\n' '```' ) assert table == expected_table
# Generated by Django 2.0 on 2018-03-15 18:39 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Evento', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('titulo', models.CharField(max_length=100)), ('descricao', models.CharField(max_length=2000)), ('data_evento', models.DateField()), ('local', models.CharField(max_length=100)), ('valor', models.CharField(max_length=20)), ('img_principal', models.ImageField(upload_to='eventos/main/%Y/%m/%d')), ], ), migrations.CreateModel( name='Imagem_evento', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('imagem', models.ImageField(upload_to='eventos/%Y/%m/%d')), ('evento', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='evento', to='core.Evento')), ], ), migrations.CreateModel( name='Imagens_noticia', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('imagem', models.ImageField(upload_to='noticias/%Y/%m/%d')), ('legenda', models.CharField(max_length=30)), ('data_upload', models.DateTimeField(auto_now_add=True)), ], ), migrations.CreateModel( name='Noticia', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('titulo', models.CharField(max_length=100)), ('data_criacao', models.DateTimeField(auto_now_add=True)), ('primeiro_paragrafo', models.CharField(max_length=1000)), ('segundo_paragrafo', models.CharField(max_length=1000, null=True)), ('consideração_final', models.CharField(max_length=100, null=True)), ('autor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='noticia', to=settings.AUTH_USER_MODEL)), ], ), migrations.AddField( model_name='imagens_noticia', name='noticia', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='noticia', to='core.Noticia'), ), ]
import requests import datetime import time import json from bs4 import BeautifulSoup Terkini={ "time":"", "kategori":"", "judul":"", "waktu":"" } data = [] page = requests.get("https://www.republika.co.id/") obj = BeautifulSoup(page.text,'html.parser'); for headline in obj.find_all('div', class_='conten1'): Terkini["kategori"] = headline.find('h1').text Terkini["judul"] = headline.find('h2').text Terkini["waktu"] = headline.find('div', class_='date').text localtime = time.asctime(time.localtime(time.time())) Terkini["time"] = localtime data.append (dict(Terkini)) print(Terkini) with open ("Terkini.json", "w") as write_file: json.dump(data, write_file, indent=4)
# Generated by Django 3.2.4 on 2021-07-06 00:13 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('blog', '0003_alter_post_fecha_creacion'), ] operations = [ migrations.AlterField( model_name='categoria', name='id', field=models.AutoField(primary_key=True, serialize=False), ), migrations.AlterField( model_name='post', name='fecha_creacion', field=models.CharField(default='06/07/2021', max_length=50), ), ]
#Start Import Area from os import system, name from time import sleep import time, pyautogui from bronzium_opener import text import bronzium_opener #End Import Area print(text.INTRO) input('\nPress ENTER to continue...') def clear(): # Windows if name == 'nt': _ = system('cls') # Unix Distros (Including MacOS) else: _ = system('clear') def opener(): clear() print(text.SELECTION) selection = int(input('Selection: ')) clear() if selection == 1: cycle = 210 cycles = 0 while cycles == 0: print(text.MENU_CYCLES_TOP) cycles = int(input('Specify Cycles: ')) cycle *= cycles timer = cycle timer_mins = timer timer_mins /= 60 timer_hrs = timer_mins timer_hrs /= 60 clear() print(text.MENU_CYCLES_RUNTIME) print('\nCycles: ' + str(cycles) + '\nMinutes: ' + str(timer_mins) + '\nHours: ' + str(round(timer_hrs, 2))) print(text.MENU_GEN_BREAK) confirm = int(input(text.CONFIRM)) if confirm == 1: print(text.STARTING) print('\n\nThis pack opener will be running for ' + str(timer_mins) + ' minutes...' ) time.sleep(5) while timer > 0: pyautogui.press('1') time.sleep(1.5) timer -= 1.5 elif confirm == 2: clear() cycles = 0 else: input(text.ERROR_GENERAL_INVALID_SELECTION) clear() opener() elif selection == 2: timer_mins = 0 while timer_mins == 0: print(text.MENU_TIME_TOP) timer = False while timer == False: timer = int(input('\nWould you like to specify hours or minutes?\n\n1. Minutes\n2. Hours\n\nSelection: ')) if timer == 1: timer_mins = int(input(text.TIMER_MINS)) timer_secs = timer_mins timer_secs *= 60 timer_hrs = timer_mins timer_hrs /= 60 clear() print(text.MENU_TIME_RUNTIME) print('Minutes: ' + str(timer_mins) + '\nHours: ' + str(round(timer_hrs,2))) print(text.MENU_GEN_BREAK) confirm = int(input(text.CONFIRM)) if confirm == 1: print(text.STARTING) print('\n\nThis pack opener will be running for ' + str(timer_mins) + ' minutes...' ) time.sleep(5) while timer_secs > 0: pyautogui.press('1') time.sleep(1.5) timer_secs -= 1.5 elif confirm == 2: clear() timer_mins = 0 else: input(text.ERROR_GENERAL_INVALID_SELECTION) clear() opener() if timer == 2: timer_hrs = int(input(text.TIMER_HOURS)) timer_secs = timer_hrs timer_secs *= 3600 timer_mins = timer_hrs timer_mins *= 60 clear() print(text.MENU_TIME_RUNTIME) print('Minutes: ' + str(timer_mins) + '\nHours: ' + str(round(timer_hrs,2))) print(text.MENU_GEN_BREAK) confirm = int(input(text.CONFIRM)) if confirm == 1: print(text.STARTING) print('\n\nThis pack opener will be running for ' + str(timer_mins) + ' minutes...' ) time.sleep(5) while timer_secs > 0: pyautogui.press('1') time.sleep(1.5) timer_secs -= 1.5 elif confirm == 2: clear() timer_mins = 0 else: input(text.ERROR_GENERAL_INVALID_SELECTION) clear() opener() elif selection == 3: print('PLACEHOLDER') clear() opener() else: input(text.ERROR_GENERAL_INVALID_SELECTION) clear() opener()
import pandas as pd import matplotlib.pyplot as plt import numpy as np import pylab from mpl_toolkits.mplot3d import Axes3D import random stock1 = pd.read_csv('C:/Users/ECEM YAMAN/Desktop/MATLAB/FİNANSAL ANALİZ/marketing_data.csv') # dosyamızı okuduk stock3 = stock1.drop(stock1.columns[[0]], axis=1, inplace=True) from sklearn.cluster import KMeans wcss =[] for k in range(1,15): kmeans = KMeans(n_clusters=k) # 1' den 15'e kadar sırayla k degerleri denenir. kmeans.fit(stock1) wcss.append(kmeans.inertia_) plt.plot(range(1,15),wcss) plt.xlabel("STOK1 K (CLUSTER) VALUE") plt.ylabel("wcss") plt.show() ############### kmeans2 = KMeans(n_clusters=2) clusters = kmeans.fit_predict(stock1) stock1["label"] = clusters x=stock1.iloc[:, 0:8].values y= stock1.iloc[:, 0:8].values plt.scatter(stock1.x[stock1.label ==0], stock1.y[stock1.label ==0],color ="red") plt.scatter(stock1.x[stock1.label ==1], stock1.y[stock1.label ==1],color ="green") plt.scatter(stock1.x[stock1.label ==2], stock1.y[stock1.label ==2],color ="blue") plt.scatter(stock1.x[stock1.label ==3], stock1.y[stock1.label ==3],color ="purple") plt.scatter(stock1.x[stock1.label ==4], stock1.y[stock1.label ==4],color ="yellow") plt.scatter(stock1.x[stock1.label ==5], stock1.y[stock1.label ==5],color ="black") plt.scatter(stock1.x[stock1.label ==6], stock1.y[stock1.label ==6],color ="orange") plt.scatter(stock1.x[stock1.label ==7], stock1.y[stock1.label ==7],color ="pink") plt.scatter(stock1.x[stock1.label ==8], stock1.y[stock1.label ==8],color ="cyan") plt.scatter(stock1.x[stock1.label ==9], stock1.y[stock1.label ==9],color ="brown") plt.scatter(kmeans2.cluster_centers_[:,0],kmeans2.cluster_centers_[:,1], color = "dark blue") plt.show()
# # (C) 2013 coolo@suse.de, openSUSE.org # Distribute under GPLv2 or GPLv3 # # Copy this script to ~/.osc-plugins/ or /var/lib/osc-plugins . # Then try to run 'osc checker --help' to see the usage. def _group_find_request(self, package, opts): url = makeurl(opts.apiurl, ['request'], "states=new,review,declined&project=openSUSE:Factory&view=collection&package=%s" % package ) f = http_GET(url) root = ET.parse(f).getroot() maxid=0 for rq in root.findall('request'): #print(ET.dump(rq)) id = int(rq.attrib['id']) if id > maxid: maxid = id return maxid def _group_find_group(self, request, opts): url = makeurl(opts.apiurl, ['search', "request", "id?match=action/grouped/@id=%s" % request] ) f = http_GET(url) root = ET.parse(f).getroot() maxid=0 for rq in root.findall('request'): #print(ET.dump(rq)) id = int(rq.attrib['id']) if id > maxid: maxid = id return maxid def do_group(self, subcmd, opts, *args): """${cmd_name}: group packages Usage: osc group [OPT] [list] [FILTER|PACKAGE_SRC] Shows pending review requests and their current state. ${cmd_option_list} """ opts.apiurl = self.get_api_url() requests=[] grouptoadd=0 for p in args[:]: request = self._group_find_request(p, opts) if not request: print("Can't find a request for", p) exit(1) group = self._group_find_group(request, opts) if group > 0: if grouptoadd > 0 and grouptoadd != group: print("there are two groups:", grouptoadd, group) exit(1) else: grouptoadd = group else: requests.append(request) if grouptoadd > 0: for r in requests: query = {'cmd': 'addrequest'} query['newid'] = str(r) u = makeurl(opts.apiurl, ['request', str(grouptoadd)], query=query) f = http_POST(u) root = ET.parse(f).getroot() print("added", r, "to group", grouptoadd) else: xml='<request><action type="group">' for r in requests: xml += "<grouped id='" + str(r) + "'/>" xml+='</action><description>' xml+= ' '.join(args[:]) xml+='</description></request>' query = {'cmd': 'create' } u = makeurl(opts.apiurl, ['request'], query=query) f = http_POST(u, data=xml) root = ET.parse(f).getroot() ET.dump(root) #Local Variables: #mode: python #py-indent-offset: 4 #tab-width: 8 #End:
#!/usr/bin/env python # # Author: andrew.galloway@nexenta.com # Created On: 2013-09-26 # Last Updated: 2016-11-10 # Description: ingestor python script template # import sys import os from functions import * # include generic functions file # # name of this script # could be filename, or something unique and recognizable # script_name = 'A0-example.py' # # put your actual code within this function, be # sure to exit 0 if successful and exit 1 if not # def main(bundle_dir): if verify_bundle_directory(script_name, bundle_dir): # put code here print script_name + ": directory (" + bundle_dir + ") seems valid." else: print script_name + ": directory (" + bundle_dir + ") not valid." sys.exit(1) # # no reason to touch # if __name__ == '__main__': try: os.environ['NXTA_INGESTOR'] except KeyError: print "NXTA_INGESTOR var MUST be set !" sys.exit(1) if len(sys.argv) <= 1: print script_name + ": no directory specified." else: main(sys.argv[1])
# This class define the Monte-Carlo agent class MC_agent(object): # [Action required] # WARNING: make sure this function can be called by the auto-marking script def get_action(self, state_policy, epsilon): max_val = np.argmax(state_policy) rand_val = np.random.uniform(0,1) if rand_val < (1-epsilon): return max_val else: options = [] for idx in range(len(state_policy)): if idx != max_val: options.append(idx) return np.random.choice(options) def solve(self, env): """ Solve a given Maze environment using Monte Carlo learning input: env {Maze object} -- Maze to solve output: - policy {np.array} -- Optimal policy found to solve the given Maze environment - values {list of np.array} -- List of successive value functions for each episode - total_rewards {list of float} -- Corresponding list of successive total non-discounted sum of reward for each episode """ # Initialisation (can be edited) epsilon = 0.2 gamma = env.get_gamma() Q = np.random.rand(env.get_state_size(), env.get_action_size()) C = np.zeros((env.get_state_size(), env.get_action_size())) V = np.zeros(env.get_state_size()) total_rewards = [[[] for i in range(env.get_action_size())] for j in range(env.get_state_size())] policy = np.argmax(Q, axis=1) num_runs = 50 run = 0 while run < num_runs: run += 1 time, env_state, reward, Terminate = env.reset() b = np.zeros((env.get_state_size(), env.get_action_size())) + 0.25 S_arr = np.array([], dtype = int) S_arr = np.append(S_arr, env_state) A_arr = np.array([], dtype = int) R_arr = np.array([], dtype = int) while not Terminate: action = self.get_action(b[env_state], epsilon) A_arr = np.append(A_arr, action) time, env_state, reward, Terminate = env.step(action) S_arr = np.append(S_arr, env_state) R_arr = np.append(R_arr, reward) G = 0 W = 1 T = time-1 for t in range(T-1, -1, -1): St = S_arr[t]; At = A_arr[t] # G = G + R_arr[t+1] G = (G * gamma) + R_arr[t+1] total_rewards[St][At].append(G) Q[St][At] = Q[St][At] + ((W / C[St][At]) * (G-Q[St][At])) policy[St] = np.argmax(Q[St]) if At != policy[St]: continue W = W * (1 / b[St][At]) for j in range(len(Q)): V[j] = np.max(Q[j]) values = [V] #### # Add your code here # WARNING: this agent only has access to env.reset() and env.step() # You should not use env.get_T(), env.get_R() or env.get_absorbing() to compute any value #### #def step() return policy, values, total_rewards
import bot_header def main(cmd): print("main") print(":\n %d LP requests\n %d messages received\n %d messages sent." "\n %s API requests done" "\n %s API requests failed" "" % ( bot_header.LP_REQUESTS_DONE, bot_header.LP_MESSAGES_RECEIVED, bot_header.LP_MESSAGES_SENT, bot_header.API_REQUESTS, bot_header.FAILED_API_REQUESTS ))
import json from django.shortcuts import render from django.views import generic from django.contrib.auth import mixins class PredictionHub(mixins.LoginRequiredMixin, generic.TemplateView): template_name = 'prediction_hub.html'
import json from django.http import HttpResponse from curling.lib import HttpClientError import mock from nose.tools import eq_ from test_utils import RequestFactory, TestCase from ..views import ProxyView, NoReference class FakeView(ProxyView): def get(self, request, *args, **kwargs): raise NoReference class TestZippyView(TestCase): def test_no_reference(self): req = RequestFactory().get('/') eq_(FakeView().dispatch(req, reference_name='bob', resource_name='sellers').status_code, 404) class TestAPIView(TestCase): def test_no_reference(self): req = RequestFactory().get('/') with self.settings(ZIPPY_MOCK=False): eq_(ProxyView().dispatch(req, reference_name='bob', resource_name='sellers').status_code, 404) class TestAPIasProxy(TestCase): def setUp(self): super(TestAPIasProxy, self).setUp() p = mock.patch('lib.provider.client.get_client') get_client = p.start() self.addCleanup(p.stop) self.api = mock.Mock() domain = mock.Mock(api=self.api) get_client.return_value = domain self.fake_data = {'foo': 'bar'} def request(self, method, url, resource_name, data=''): api = getattr(RequestFactory(), method) req = api(url, data and json.dumps(data) or '', content_type='application/json') res = ProxyView().dispatch(req, reference_name='reference', resource_name=resource_name) return res def test_proxy_get_params(self): self.api.products.get.return_value = '{}' self.request('get', '/reference/products?foo=bar', 'products') assert self.api.products.get.called eq_(self.api.products.get.call_args[1], self.fake_data) def test_proxy_error_responses(self): # Create a scenario where the proxied API raises an HTTP error. data = json.dumps({'error': 'something not found'}) proxy_res = HttpResponse(data, content_type='application/json', status=404) proxy_res.json = data proxy_res.request = RequestFactory().get('http://api/some/endpoint') exc = HttpClientError(proxy_res.content, response=proxy_res) self.api.products.get.side_effect = exc res = self.request('get', '/reference/products?foo=bar', 'products') content = res.render() eq_(content.status_code, 404) def test_proxy_routing(self): self.api.products.get.return_value = '{}' self.request('get', '/reference/products/fake-uuid', 'products') assert self.api.products.get.called def test_proxy_post(self): self.api.products.post.return_value = '{}' self.request('post', '/reference/products/fake-uuid', 'products', self.fake_data) assert self.api.products.post.called eq_(self.api.products.post.call_args[0][0], self.fake_data) def test_proxy_put(self): self.api.products.put.return_value = '{}' self.request('put', '/reference/products/fake-uuid', 'products', self.fake_data) assert self.api.products.put.called eq_(self.api.products.put.call_args[0][0], self.fake_data) def test_proxy_delete(self): self.api.products.delete.return_value = '{}' self.request('delete', '/reference/products/fake-uuid', 'products') assert self.api.products.delete.called
''' >>> base_convert(2555,2) '100111111011' >>> base_convert(42,2) '101010' >>> base_convert(0,2) '0' >>> base_convert(2555,10) '2555' >>> base_convert(0,10) '0' >>> base_convert(4711,10) '4711' >>> base_convert(2555,16) '9fb' >>> base_convert(0,16) '0' >>> base_convert(10,16) 'a' >>> base_convert(2555,36) '1yz' >>> base_convert(0,36) '0' ''' def base_convert(n,b): assert b in [2,10,16,36] try: alphabet='0123456789abcdefghijklmnopqrstuvwxyz' result='' if b==2: result = bin(n) result=result[2:] elif b==10: result = int(n) elif b==16: result= hex(n) result=result[2:] elif b==36: if n==0: result=0 while n != 0: n, i = divmod(n, 36) result = alphabet[i] + result return str(result) except TypeError: return "Please put integer only!" if __name__ == "__main__": import doctest doctest.testmod() print(base_convert(0,2)) print(base_convert(1,2)) print(base_convert(299,2)) print(base_convert(0,10)) print(base_convert(1,10)) print(base_convert(299,10)) print(base_convert(0,16)) print(base_convert(1,16)) print(base_convert(299,16)) print(base_convert(0,36)) print(base_convert(1,36)) print(base_convert(299,36)) print(base_convert(299,3))
from urllib.parse import quote_plus as urlencode from urllib.request import urlopen import json def words(context): """ Convert word array to string. """ return " ".join(context['word']) def make_url(corpus, req_type, param): """ Build a BLS URL. """ paramparts = [urlencode(k) + "=" + urlencode(str(v)) for (k, v) in param.items() if v is not None] urlpar = "&".join(paramparts) return "http://opensonar.ato.inl.nl/blacklab-server" + \ "/" + corpus + "/" + req_type + "?" + urlpar def request(corpus, req_type, param): """ Make a request to BLS and return the JSON structure. """ if 'outputformat' not in param: param['outputformat'] = 'json' url = make_url(corpus, req_type, param) #print(url) f = urlopen(url) return json.loads(f.read().decode('utf-8')) def concordances(cql_query): """ Show concordances (KWICs) for CQL query. """ response = request("zeebrieven", "hits", { 'patt': cql_query, 'wordsaroundhit': '3' }) hits = response['hits'] docs = response['docInfos'] print('Concordances for ' + cql_query + ':') for hit in hits: # Show the document title and hit information doc = docs[hit['docPid']] print ('%25s %-15s %-25s (%s)' % \ (words(hit['left']), words(hit['match']), words(hit['right']),\ doc['title'])) print() def group(title, cql_query, group_by, param = {}): """ Show frequency lists for alternative matches. """ # If a CQL query was specified, this is a hits request; # otherwise, it's a docs request. is_hits = cql_query is not None req_type = "hits" if is_hits else "docs" result_el = "hitGroups" if is_hits else "docGroups" if is_hits: param['patt'] = cql_query param['group'] = group_by if 'number' not in param: param['number'] = 10 # limit to 10 groups unless specified response = request("zeebrieven", req_type, param) groups = response[result_el] if is_hits: print('%s - %s' % (title, cql_query) ) else: print(title) for group in groups: print (' %-20s %4d' % (group['identityDisplay'], group['size'])) print() # Find concordances (KWICs) for a CQL query concordances('[pos="PRN"] "schip"') concordances('[lemma="groot"] [pos="NOU"]') """ Show frequency lists for alternative matches. """ group('Spelling variations', '[lemma="water"]', "hit:word:i") group('Starts with...', '[word="sch(ee?|i)p.*"]', "hit:word:i") group('Adjectives for a word', '[pos="ADJ"] [lemma="schip"]', "hit:lemma:i") group('Who\'s the sweetest?', '[lemma="lief"]', "wordright:lemma:i") group('Documents by year', None, "field:witnessYear_from", {'number': 100, 'sort': 'identity'})
from reconocer_rostros_cam import * from reconocer_rostros_imagen import * from codificar_rostros import * import os while True: print("""\n1. Entrenar modelo 2. Reconocer una imagen 3. Reconocer imágenes 4. Reconocer video 5. Salir\n""") op = input("Elija una opción: ") if op not in ['1', '2', '3', '4', '5']: print("\nOpción incorrecta") continue # OPCION 1 if op == "1": print("\nENTRENAR MODELO") print("(Ingresar -1 en cualquier entrada para regresar)") print("""\n---El directorio que funcionará como dataset debe tener la siguiente estructura:--- \n---Debe contener carpetas con los nombres de las personas, separados por un subguión.--- ---Dentro de la carpeta deben haber imágenes en las que aparezca solo dicha persona.--- ---(No hay problema si aparece más de una vez en la imagen)--- Estructura de ejemplo:\n Juan_Perez |___1.jpg |___a.jpg Maria_Ramirez |___1.jpg |___2.jpg""") while True: rutaDataset = input("\nRuta del dataset: ") if os.path.isdir(rutaDataset) or rutaDataset == "-1": break else: print("\nDirectorio erróneo") if rutaDataset == "-1": continue while True: print("\nModelo de detección (Recomendado Hog)\n1. cnn (Mayor precisión)\n2. hog (Mayor rapidez)") opm = input("\nElija una opción del modelo (1 o 2): ") if opm in ['1', '2', '-1']: break else: print("\nOpción incorrecta") if opm == "-1": continue if opm == "1": modeloDeteccion = "cnn" altura = 250 else: modeloDeteccion = "hog" altura = 2500 if opm == "-1": continue entrenar(rutaDataset, 'codificados'+modeloDeteccion+'.pickle', modeloDeteccion, altura) print("\nModelo entrenado") # OPCION 2 if op == "2": print("\nRECONOCER IMAGEN") print("(Ingresar -1 en cualquier entrada para regresar)") while True: rutaImagen = input("\nRuta de la imagen: ") if os.path.exists(rutaImagen) or rutaImagen == "-1": break else: print("\nRuta errónea") if rutaImagen == "-1": continue while True: print("\nModelo de detección\n1. cnn (Mayor precisión)\n2. hog (Mayor rapidez)") opm = input("\nElija una opción del modelo (1 o 2): ") if opm == "1": modeloDeteccion = "cnn" altura = 350 else: modeloDeteccion = "hog" altura = 2500 if not os.path.exists('codificados'+modeloDeteccion+'.pickle'): print("\nNo se ha entrenado con ese modelo") continue if opm in ['1', '2', '-1']: break else: print("\nOpción incorrecta") if opm == "-1": continue reconocerRostroImg('codificados'+modeloDeteccion+'.pickle', modeloDeteccion, rutaImagen, altura) print("\nRostros identificados") # OPCION 3 if op == "3": print("\nRECONOCER IMÁGENES") print("(Ingresar -1 en cualquier entrada para regresar)") while True: rutaDirectorio = input("\nRuta del directorio: ") if os.path.isdir(rutaDirectorio) or rutaDirectorio == "-1": break else: print("\nDirectorio erróneo") if rutaDirectorio == "-1": continue while True: print("\nModelo de detección\n1. cnn (Mayor precisión)\n2. hog (Mayor rapidez)") opm = input("\nElija una opción del modelo (1 o 2): ") if opm == "1": modeloDeteccion = "cnn" altura = 350 else: modeloDeteccion = "hog" altura = 2500 if not os.path.exists('codificados'+modeloDeteccion+'.pickle'): print("\nNo se ha entrenado con ese modelo") continue if opm in ['1', '2', '-1']: break else: print("\nOpción incorrecta") if opm == "-1": continue rutaImagenes = list(paths.list_images(rutaDirectorio)) for (i, rutaImagen) in enumerate(rutaImagenes): reconocerRostroImg('codificados'+modeloDeteccion+'.pickle', modeloDeteccion, rutaImagen, altura) op = input("\nIngrese q para detener la detección o cualquiera para continuar: ") if op == "q": break else: continue print("\nRostros identificados") # OPCION 4 if op == "4": print("\nRECONOCER VIDEO") print("(Ingresar -1 en cualquier entrada para regresar)") while True: rutaVideo = input("\nRuta del video (Ingresar 0 para webcam): ") if os.path.exists(rutaVideo) or rutaVideo in ['0', '-1']: break else: print("\nRuta errónea") if rutaVideo == "-1": continue if rutaVideo == "0": rutaVideo = int(rutaVideo) while True: print("\nModelo de detección\n1. cnn (Mayor precisión)\n2. hog (Mayor rapidez)") opm = input("\nElija una opción del modelo (1 o 2): ") if opm == "1": modeloDeteccion = "cnn" factor = 0.25 else: modeloDeteccion = "hog" factor = 0.30 if not os.path.exists('codificados'+modeloDeteccion+'.pickle'): print("\nNo se ha entrenado con ese modelo") continue if opm in ['1', '2', '-1']: break else: print("\nOpción incorrecta") if opm == "-1": continue reconocerRostroVid('codificados'+modeloDeteccion+'.pickle', modeloDeteccion, rutaVideo, factor) print("\nRostros identificados") # OPCION 5 if op == "5": print("\nFinalizado") break
from django.test import TestCase from django.core.urlresolvers import resolve from partials.controller.user_management import user_collection_partial, add_user_partial class UserManagementPartialTestCase(TestCase): def test_user_collection_partial_resolve_correctly(self): resolved_func = resolve('/partials/user-collection/') self.assertEqual(resolved_func.func, user_collection_partial) def test_add_user_partial_resolve_correctly(self): resolved_func = resolve('/partials/add-user/') self.assertEqual(resolved_func.func, add_user_partial) def test_user_collection_partial_returns_right_template(self): response = self.client.get('/partials/user-collection/') self.assertTemplateUsed(response, 'partials/user_collection.html') def test_add_user_partial_returns_right_template(self): response = self.client.get('/partials/add-user/') self.assertTemplateUsed(response, 'partials/add_user.html')
# ----------------------------------------------------------------------- """ Sort List Sort a linked list in O(n log n) time using constant space complexity. Example : Input : 1 -> 5 -> 4 -> 3 Returned list : 1 -> 3 -> 4 -> 5 """ # Definition for singly-linked list. class ListNode: def __init__(self, x): self.val = x self.next = None class Solution: # @param A : head node of linked list # @return the head node in the linked list def sortList(self, A): return merge_sort(A) # time O(n*log(n)) # space O(n) def merge_sort(head): if not head or not head.next: return head slow, fast = head, head.next while fast and fast.next: slow = slow.next fast = fast.next.next if not slow: return head head1 = head head2 = slow.next slow.next = None head1 = merge_sort(head1) head2 = merge_sort(head2) return merge(head1, head2) def merge(head1, head2): p1, p2 = head1, head2 res = ListNode('DUMMY') curr = res while p1 or p2: if not p2: curr.next = p1 break if not p1: curr.next = p2 break if p1.val < p2.val: curr.next = p1 p1 = p1.next else: curr.next = p2 p2 = p2.next curr = curr.next return res.next # ----------------------------------------------------------------------- """ Insertion Sort List Sort a linked list using insertion sort. We have explained Insertion Sort at Slide 7 of Arrays Insertion Sort Wiki has some details on Insertion Sort as well. Example : Input : 1 -> 3 -> 2 Return 1 -> 2 -> 3 """ class Solution: # @param A : head node of linked list # @return the head node in the linked list def insertionSortList(self, A): if not A or not A.next: return A curr = A.next while curr: A = insert(A, curr) curr = curr.next return A # time O(n^2) # space O(1) def insert(head, curr): left = head while left != curr: if left.val > curr.val: left.val, curr.val = curr.val, left.val left = left.next return head # ----------------------------------------------------------------------- # ----------------------------------------------------------------------- # ----------------------------------------------------------------------- # ----------------------------------------------------------------------- # ----------------------------------------------------------------------- # ----------------------------------------------------------------------- # ----------------------------------------------------------------------- # ----------------------------------------------------------------------- # ----------------------------------------------------------------------- # ----------------------------------------------------------------------- # ----------------------------------------------------------------------- # ----------------------------------------------------------------------- # ----------------------------------------------------------------------- # ----------------------------------------------------------------------- # ----------------------------------------------------------------------- # ----------------------------------------------------------------------- # ----------------------------------------------------------------------- # ----------------------------------------------------------------------- # ----------------------------------------------------------------------- # ----------------------------------------------------------------------- # ----------------------------------------------------------------------- # ----------------------------------------------------------------------- # ----------------------------------------------------------------------- # ----------------------------------------------------------------------- # ----------------------------------------------------------------------- # ----------------------------------------------------------------------- # -----------------------------------------------------------------------
from mod_user import User class Privileges: def __init__(self): self.privileges = ['can add post', 'can delete post', 'can ban user'] def show_privileges(self): print("Privileges are as follows:") for privilege in self.privileges: print(privilege) class Admin(User): def __init__(self, f_name, l_name, year_of_birth, city): super().__init__(f_name, l_name, year_of_birth, city) self.privileges = Privileges() def show_privileges(self): self.privileges.show_privileges()
N = int (input()) list =[] i = 1 while i<=N: M =int(input()) list.append(M) i= i +1 for j in list: if j %2 ==0 and j < 0: print("EVEN NEGATIVE") elif j % 2==1 and j <0: print("ODD NEGATIVE") elif j == 0: print("NULL") elif j % 2 ==1: print("ODD POSITIVE") elif j% 2==0: print("EVEN POSITIVE")
J, K, L = raw_input().split() J = int(J) K = int(K) L = int(L) count = 0 for i in range(J, K+1): if i % L == 0: count = count + 1 else: continue print count
# Generated by Django 2.0.7 on 2018-09-23 21:26 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('ficha', '0014_auto_20180923_1823'), ] operations = [ migrations.AlterField( model_name='ficha', name='exfisico', field=models.TextField(blank=True, default='Geral: B.E.G., L.O.T.E., eupnéico, normocorado, hidratado \n ACV: Bulhas rítmicas, normofonéticas, em dois tempos, sem sopros ', max_length=500), ), ]
#! /usr/bin/env python import sys import numpy as np sys.path.append('./') # my module from xyzStruct import xyzStruct from xyzTraj import xyzTraj from bMolShape import bMolShape def process_traj(): """ process traj of xyz file """ line = raw_input("enter the filename (xyz format): \n > ") fname = line.strip() line = raw_input("enter the index range (from zero) <i.e. 1-5,9,11>: \n >") frg_ndx1 = line.strip() line = raw_input("enter the index range (from zero) <i.e. 20-37, 39, 41, 42-48>: \n >") frg_ndx2 = line.strip() # traj. info traj = xyzTraj() traj.read_it(filename = fname) # det. shape shape = bMolShape() while True: frg1 = traj.fragment(frg_ndx1) frg2 = traj.fragment(frg_ndx2) t = shape.plane_angle(frg1, frg2) print t if traj.next_model() == 0: break return t def process_struct(): """ process on structure of xyz file """ line = raw_input("enter the filename (xyz format): \n > ") fname = line.strip() line = raw_input("enter the index range (from zero) <i.e. 1-5,9,11>: \n >") frg_ndx1 = line.strip() line = raw_input("enter the index range (from zero) <i.e. 20-37, 39, 41, 42-48>: \n >") frg_ndx2 = line.strip() # mol. info. mol = xyzStruct() mol.read_it(filename = fname) frg1 = mol.fragment(frg_ndx1) frg2 = mol.fragment(frg_ndx2) # det. shape shape = bMolShape() t = shape.plane_angle(frg1, frg2) return t if __name__ == "__main__": # t = process_struct() t = process_traj() print t[1]
#!/usr/bin/python3 import socket import sys # 创建 socket 对象 s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # 获取本地主机名 host = '120.79.47.182' # 设置端口好 port = 8081 # 连接服务,指定主机和端口 s.connect((host, port)) # 接收小于 1024 字节的数据 msg = s.recv(1024) print (msg.decode('utf-8')) sendmsg = bytes(socket.gethostname(), encoding = "utf8") s.sendall(sendmsg) s.close()
from __future__ import print_function from pyimagesearch.cbir import HSVDescriptor from imutils import paths import progressbar import argparse import cv2 ap = argparse.ArgumentParser() ap.add_argument("-d", "--dataset", required=True, help = "Path to the directory that contains the images to be indexed") ap.add_argument("-i", "--index", required=True, help = "Path to where the features index will be stored") args = vars(ap.parse_args()) desc=HSVDescriptor((4,6,3)) op=open(args["index"],"w") imagepaths=list(paths.list_images(args["dataset"])) widgets=["Indexing: ",progressbar.Percentage()," ",progressbar.Bar(),"",progressbar.ETA()] pbar=progressbar.ProgressBar(maxval=len(imagepaths),widgets=widgets) pbar.start() for (i,imagepath) in enumerate(imagepaths): filename=imagepath[imagepath.rfind("/")+1:] image=cv2.imread(imagepath) features=desc.describe(image) features=[str(x) for x in features] op.write("{},{}\n".format(filename,",".join(features))) pbar.update(i) pbar.finish() print("[info] indexed {} images".format(len(imagepaths)))
#!/usr/bin/env python import sys import json from datetime import datetime from collections import OrderedDict import copy import operator from itertools import chain def split_data(line): rec = line.strip().split("|") current_student = rec[0] class_name = rec[1] current_date = datetime.strptime(rec[2],"%Y-%m-%d %H:%M:%S") test_type = rec[3] test_score = float(rec[4]) return current_student, class_name, current_date, test_type, test_score def define_globals(): prev_student = -1 #fileName = "data_v2-sorted_dates.csv" fileName = "data_v3-sorted_dates.csv" now = datetime.now() d_list = {'ml':[], 'best':[], 'overall_ml':[]} return prev_student, now, d_list, fileName def evaluate_vector(test_score,count,lst): # keeps the vectors length at v_len # starts counting the number of test taken at or beyond 20. v_len=20 if count > v_len: lst[v_len - 1] = lst[v_len - 1]+1 elif count == v_len: lst.append(1) else: lst.append(test_score) return lst def increment_counts(data,d_list,student): singl_d = OrderedDict() outer_counter = 0 for vsim in data.iteritems(): outer_counter+=1 rank = vsim[0] vsim_date_obj = vsim[1][0] vsim_date_str = vsim[1][1] # check if the vsim can be converted to int if vsim[1][2] != None: vsim_test_score = float(vsim[1][2]) else: vsim_test_score = vsim[1][2] # increment test counts and appends values to the test_vectors ml_count_on_date = vsim[1][3] oml_count_on_date = vsim[1][4] singl_d[rank] = [vsim_date_str,vsim_test_score,ml_count_on_date,[],oml_count_on_date,[]] inner_ml_counter = 0 for ml in d_list['ml']: inner_ml_counter+=1 ml_date_obj = ml[0] # compare dates if ml_date_obj <= vsim_date_obj: # incredment ml count singl_d[rank][2] += 1 singl_d[rank][3]=evaluate_vector(ml[2],singl_d[rank][2],singl_d[rank][3]) inner_oml_counter = 0 for oml in d_list['overall_ml']: inner_oml_counter += 1 oml_date_obj = oml[0] # compare dates if oml_date_obj <= vsim_date_obj: # incredment overall_ml count singl_d[rank][4] += 1 # append test score singl_d[rank][5]=evaluate_vector(oml[2],singl_d[rank][4],singl_d[rank][5]) #singl_d[rank][5].append(oml[2]) # add null values for those students that have not yet taken 20+ exams for rank in singl_d: length1 = len(singl_d[rank][3]) length2 = len(singl_d[rank][5]) if length1 < 20: singl_d[rank][3].extend([None]*(20-length1)) if length2 < 20: singl_d[rank][5].extend([None]*(20-length2)) printable={"scores":singl_d,"student":student} return printable def median(lst): lst = [ x for x in lst if x is not None] lst = sorted(lst) n = len(lst) if n < 1: return None if n %2 == 1: return lst[((n+1)/2)-1] if len(lst) %2 == 0: return float(sum(lst[(n/2)-1:(n/2)+1]))/2.0 def mean(lst): lst = [ x for x in lst if x is not None] n = float(len(lst)) if n < 1: return None else: return round(sum(lst)/n,4) def _ss(data): """Return sum of square deviations of sequence data.""" data = [ x for x in data if x is not None] c = mean(data) ss = sum((x-c)**2 for x in data) return ss def pstdev(data): """Calculates the population standard deviation.""" data = [ x for x in data if x is not None] n = len(data) if n < 2: return None ss = _ss(data) pvar = ss/n # the population variance return round(pvar**0.5,4) def print_results(results,f): for score in results["scores"].iteritems(): student = results["student"] vsim_count = score[0] vsim_date = score[1][0] vsim_score = score[1][1] prepU_exam_count = score[1][2] prepU_vector = score[1][3] prepU_median = median(prepU_vector) prepU_mean = mean(prepU_vector) prepU_stdev = pstdev(prepU_vector) overall_ml_count = score[1][4] overall_ml_vector = score[1][5] overall_ml_median = median(overall_ml_vector) overall_ml_mean = mean(overall_ml_vector) overall_ml_stdev = pstdev(overall_ml_vector) f.write(json.dumps( [results["student"], vsim_count, vsim_date, vsim_score, prepU_exam_count, prepU_median, prepU_mean, prepU_stdev, prepU_vector, overall_ml_count, overall_ml_median, overall_ml_mean, overall_ml_stdev, overall_ml_vector])+'\n') def process_data(d_list,now,prev_student): # add date student has not taken vsim if len(d_list['best']) == 0: d_list['best'] = [(now,datetime.strftime(now,"%Y-%m-%d %H:%M:%S"),None,0,0)] # order dates d_list['best'] = sorted(d_list['best'],key = operator.itemgetter(0)) # create an ordered dict of vsim counts by date data = OrderedDict(zip(range(0,len(d_list['best'])),d_list['best'])) # increment ml counts per dated vsim attempt results = increment_counts(data,d_list,prev_student) # print results print_results(results,f) # reset counts for each new student return {'ml':[], 'best':[], 'overall_ml':[]} if __name__ == '__main__': #define globals prev_student, now, d_list, fileName = define_globals() sys.stderr.write("Header:\nstudent_name, vsim_counts, vsim date, vsim score, prepU_counts, prepU_median, prepU_mean, prepU_stdev, prepU_vector (20 values), overall_ml_counts, overall_ml_median, overall_ml_mean, overall_ml_stdev, overall_ml vector (20 values)\n\n") with open(fileName,"wb") as f: for line in sys.stdin: # parse line current_student, current_class, current_date, test_type, test_score = split_data(line) # check new student if current_student != prev_student and prev_student != -1: d_list=process_data(d_list,now,prev_student) # append [current_date, date_string, test_score,count] based on test_type, ml_count, overall_ml_count d_list[test_type].append([current_date,str(current_date),test_score,0,0]) #print d_list # set prev student prev_student = copy.deepcopy(current_student) # add the last student process_data(d_list,now,prev_student) sys.stderr.write("Reults saved: {}\n\n".format(fileName))
#!/usr/bin/python3 ## HumanShell created by Rohit Kumar . This is Cross platform it will work on both Linux and Windows ## import os import platform import smtplib, ssl try : import twilio import pyttsx3 #import pyaudio #import speech_recognition as sr except: print("install required libraries, list in requirements.txt") if platform.system() == "Windows": os.system("CLS") elif platform.system() == "Linux": os.system("clear") elif platform.system() == "Darwin": os.system("clear") ##### print(" ####################################################################################") print(" # _ _ _ _ __ __ _ _ _ ____ _ _ _____ _ _ #") print(" # | | | | | | | \/ | / \ | \ | | / ___|| | | | ____| | | | #") print(" # | |_| | | | | |\/| | / _ \ | \| | \___ \| |_| | _| | | | | #") print(" # | _ | |_| | | | |/ ___ \| |\ | ___) | _ | |___| |___| |___ #") print(" # |_| |_|\___/|_| |_/_/ \_\_| \_| |____/|_| |_|_____|_____|_____| #") print(" # #") print(" # Turn Your Thoughts Into Command #") print(" # -------------------------------------------------- #") print(" # Bash/Cmd commands/WhatsApp / SMS / E-Mail #") print(" ####################################################################################") pyttsx3.speak("Welcome to human shell") pyttsx3.speak("Turn your thoughts into command") if platform.system() == 'Linux': name = platform.uname()[1] mac = platform.uname()[4] elif platform.system() == 'Windows': name = platform.uname().node mac = platform.uname().machine name = str(name) print("Welcome,", name,"."," You are using ","'",str(platform.system()),mac,"'") print() print() print(" Text editor network/communication others internal ") print(" ------------- -------------------------- ---------- ------------ ") print(" Notepad (W) chrome (C) explorer (W) credits (C) ") print(" Vim (L) firefox (C) wmplayer (W) help (C) ") print(" nano (L) ipaddress (C) whoami (C) exit (C) ") print(" gedit (L) email (C) cmd (W) clear (C) ") print(" vscode (C) sms (C) bash (L) ") print(" whatsapp (C) calculator (C) ") print() print("** notepad is default in Windows / nano is default in linux // W = Windows / L = Linux / C = cross-platform ") print() print(' Recommended : Type "help", "credits" for more information.') pyttsx3.speak("first read help section to know more about humanshell") print() while True: print() print(" Enter Your Thought and Human Shell will try to process it ") print(" ----------------------------------------------------------------------------------------------------------- ") print("Example : type help to see help, Now go try it below.") print() query = input(" Your thought goes here $ ") ############################## clear Screen ############ if (("clear" in query) or ("cls" in query) or ("CLS" in query) or ("CLEAR" in query) or ("Clear" in query)): if platform.system() == "Windows": os.system("CLS") elif platform.system() == "Linux": os.system("clear") elif platform.system() == "Darwin": os.system("clear") ############################# Exit ################### if(("quit" in query) or ("exit" in query ) or ("get out" in query) or ("out of here" in query )): exit() ############################################### ############################### HELP SECTION ######################################## if (("help" in query) or ("Help" in query) or ("how to use" in query)): if platform.system() == "Windows": os.system("CLS") elif platform.system() == "Linux": os.system("clear") elif platform.system() == "Darwin": os.system("clear") print(" ####################################################################################") print(" # _ _ _ _ __ __ _ _ _ ____ _ _ _____ _ _ #") print(" # | | | | | | | \/ | / \ | \ | | / ___|| | | | ____| | | | #") print(" # | |_| | | | | |\/| | / _ \ | \| | \___ \| |_| | _| | | | | #") print(" # | _ | |_| | | | |/ ___ \| |\ | ___) | _ | |___| |___| |___ #") print(" # |_| |_|\___/|_| |_/_/ \_\_| \_| |____/|_| |_|_____|_____|_____| #") print(" # #") print(" # Turn Your Thoughts Into Command #") print(" # -------------------------------------------------- #") print(" # Bash/Cmd commands // WhatsApp // SMS // E-Mail #") print(" ####################################################################################") pyttsx3.speak("this is help section.") print() print(" I am ROHIT KUMAR & I am new to Python.This is Human Shell my first project in python. This is Cross platform it will work on both ") print(" Linux and Windows. ") print() print("You can directly use any common command prompt or bash shell commands in human shell ") print("OR you can use human type sentences/query to make your work easy (only for mentioned application on main screen)") print("For Example :") print() print(" >> chrome ") print(" >> Enter URL : https://github.com # then press Enter") print() print(" >> i want to surf web") print(" >> I want to securely surf the web # it will open duckduckgo search engine") print(" >> notepad ") print(" >> Enter file name : # then press enter ") print() print(" >> i want to write something ") print(" >> i want to take notes via vim") print(" >> i want to take notes in texteditor") print(" >> explorer # this command is for file explorer in windows only") print() print(" >> plz open browser for me ") print(" >> i want to write a note") print(" >> plz open vscode for me") print() print("Default texteditor in 'windows' is Notepad and in 'LINUX' is nano editor") print("Default web Browser in 'Windows' is chrome (must have installed in your pc) and in 'linux' is mozilla Firefox") print(" This is Still Incomplete and lacking Many thing. I will try to solve it.") print(" >> who the hell i am | >> who the hell am i") ###################################################################################### ############################ Web Browser #################################### if platform.system() == "Windows": if(("browser" in query) or (("run" in query) and ("browser" in query)) or (("run" in query) and ("chrome" in query)) or ("web" in query) or (("open" in query) and ("chrome" in query))): if(("secure" in query) or ("protect" in query) or ("privacy" in query) or ("securely" in query)): pyttsx3.speak("Launching chrome in secure mode") os.system("chrome www.duckduckgo.com/") else: pyttsx3.speak("Launching chrome") os.system("chrome") elif platform.system() == "Linux": if(("browser" in query) or (("run" in query) and ("browser" in query)) or (("run" in query) and ("firefox" in query)) or ("web" in query) or (("open" in query) and ("firefox" in query))): pyttsx3.speak("Launching firefox") os.system("firefox") if (query == "chrome"): x = input("Enter URL : ") url ="chrome " + x os.system(url) if (query == "firefox"): x = input("Enter URL : ") url ="firefox " + x os.system(url) # > I want to securely surf the web ##################################Who am i############################################# if (("whoami" in query) or ("i am" in query) or ("am i" in query)): os.system("whoami") ####################### Text Editor ########################################### if platform.system() == "Windows": if("texteditor" in query or (("run" in query) and ("notepad" in query)) or "write" in query or "notes" in query or (("open" in query) and ("notepad" in query))): pyttsx3.speak("Launching notepad") os.system("notepad") if( query == "notepad" ): x = input("Enter File Name : ") note = "notepad "+ x pyttsx3.speak("Launching notepad") os.system(note) if("codeeditor" in query or (("run" in query) and ("vscode" in query)) or(("execute" in query ) and ("code" in query)) or "program" in query or (("open" in query) and ("vscode" in query))): pyttsx3.speak("Launching VSCode") os.system("code") if( query == "vscode"): x = input("Enter File Name (with extension) : ") code = "code "+ x pyttsx3.speak("Launching VSCode") os.system(code) if platform.system() == "Linux": if("texteditor" in query or (("run" in query) and ("nano" in query)) or "write" in query or "notes" in query or (("open" in query) and ("nano" in query))): pyttsx3.speak("Launching nano") os.system("nano") if("codeeditor" in query or (("run" in query) and ("vscode" in query)) or "code" in query or "program" in query or (("open" in query) and ("vscode" in query))): pyttsx3.speak("Launching VSCode") os.system("code") if( query == "vscode"): x = input("Enter File Name (with extension) : ") code = "code "+ x pyttsx3.speak("Launching VSCode") os.system(code) if("vimeditor" in query or (("run" in query) and ("vim" in query)) or (("execute" in query) and ("vim" in query)) or(("open" in query) and ("vim" in query))): pyttsx3.speak("Launching vim") os.system("vim") if( query == "vim"): x = input("Enter File Name (with extension) : ") code = "vim "+ x pyttsx3.speak("Launching vim") os.system(code) if("geditor" in query or (("run" in query) and ("gedit" in query)) or (("execute" in query) and ("gedit" in query)) or(("open" in query) and ("gedit" in query))): pyttsx3.speak("Launching gedit") os.system("gedit") if( query == "gedit"): x = input("Enter File Name (with extension) : ") code = "gedit "+ x pyttsx3.speak("Launching gedit") os.system(code) ######################################################################################### ########################## Calculator ################################ if platform.system() == "Windows": if("calculator" in query or "calc" in query or "calculate" in query or "add" in query or "sum" in query or "subtract" in query or "product" in query): pyttsx3.speak("Launching calculator") os.system("calc") if platform.system() == "Linux": if("calculator" in query or "calc" in query or "calculate" in query or "add" in query or "sum" in query or "subtract" in query or "product" in query): pyttsx3.speak("launching bc calculator") print("Type 'quit' to exit") os.systeproduct ######################### Network ######################################## if platform.system() == "Windows": if("ipconfig" in query or "ipaddress" in query or "netstatus" in query or "ip" in query or "mac address" in query or "ip address" in query): os.system("ipconfig /all") if platform.system() == "Linux": if("ipconfig" in query or "ipaddress" in query or "netstatus" in query or "ifconfig" in query): os.system("ifconfig") # > what is my ipaddress ######################## Media Player / vlc ################################### if platform.system() == "Windows": if("media" in query or "music" in query or "wmplayer" in query or (("run" in query) and ("wmplayer" in query)) or (("open" in query) and ("wmplayer" in query))): os.system("wmplayer") ############################ File Explorer ######################## if platform.system() == "Windows": if("file explorer" in query or "explorer" in query): pyttsx3.speak("Launching windows file explorer") os.system("explorer") ################# Send SMS ####### Twilio Reference ##################### # Download the helper library from https://www.twilio.com/docs/python/install if("sms" in query or (("send" in query) and ("message" in query)) or "message" in query or (("send" in query) and ("sms" in query))): # Your Account Sid and Auth Token from twilio.com/console # DANGER! This is insecure. See http://twil.io/secure print("You must create an account on 'www.twilio.com' to get your own 'account_sid' , 'account_token' and Sandbox number ") account_sid = input("Enter Account Sid : ") auth_token = input("Enter Auth Token : ") client = Client(account_sid, auth_token) message = client.messages \ .create( body=input("Enter Your Message : "), from_=input("Enter your Number(twilio with Country code) : "), to=input("Enter receipent Number (verified Number): ") ) print(message.sid) print("for Reference and help in this section goto https://twilio.com/docs") #################################################################################### ############################## Send Email ################## if("mail" in query or "email" in query or (("send" in query) and ("email" in query)) or (("send" in query) and ("mail" in query))): print(" This section will work only if Senders Gmail Account's particular option is active :") print(" i.e, 'Allow Less Secure Apps' must be on ") print() port = 587 # For starttls smtp_server = "smtp.gmail.com" sender_email = input("Enter your Email : ") password = input("Type your password and press enter:") receiver_email = input("enter Recepiant Email : ") subject = """\ Subject: Hi there """ message = subject + input("Enter your Message : ") context = ssl.create_default_context() with smtplib.SMTP(smtp_server, port) as server: server.ehlo() # Can be omitted server.starttls(context=context) server.ehlo() # Can be omitted server.login(sender_email, password) server.sendmail(sender_email, receiver_email, message) ############################################################# ##################################### Whatsapp ####################### if("whatsapp" in query or "chat" in query or(("instant" in query) and ("chat" in query))): # client credentials are read from TWILIO_ACCOUNT_SID and AUTH_TOKEN print("You must create an account on 'www.twilio.com' to get your own 'account_sid' , 'account_token' and 'Sandbox number' ") account_sid = input("Enter Account_Sid : ") auth_token = input("Enter Auth_Token : ") client = Client(account_sid, auth_token) # this is the Twilio sandbox testing number from_whatsapp_number = 'whatsapp:'+input("Enter twilio sandbox number : ") # replace this number with your own WhatsApp Messaging number to_whatsapp_number = 'whatsapp:'+input("Enter receiver number : ") what_message = input(" Enter your message : ") client.messages.create(body=what_message, from_=from_whatsapp_number, to=to_whatsapp_number) ############################################# CMD // Bash Commands ######################################## if (platform.system() == "Windows") : if("cmd" in query or "command" in query or "dos" in query or ("run" in query and ("cmd" in query)) or "prompt" in query ): print(" to 'exit' type exit and press enter") os.system("cmd") if (platform.system() == "Linux"): if("bash" in query or "command" in query or "shell" in query or ("run" in query and ("bash" in query)) or "sh" in query): print(" to 'exit' type exit and press enter") os.system("bash") ############################################################################################################ ################################## Credits ########################################## if("credits" in query or "programmer" in query or "developer" in query or "owner" in query): if platform.system() == "Windows": os.system("CLS") elif platform.system() == "Linux": os.system("clear") elif platform.system() == "Darwin": os.system("clear") print(" ####################################################################################") print(" # _ _ _ _ __ __ _ _ _ ____ _ _ _____ _ _ #") print(" # | | | | | | | \/ | / \ | \ | | / ___|| | | | ____| | | | #") print(" # | |_| | | | | |\/| | / _ \ | \| | \___ \| |_| | _| | | | | #") print(" # | _ | |_| | | | |/ ___ \| |\ | ___) | _ | |___| |___| |___ #") print(" # |_| |_|\___/|_| |_/_/ \_\_| \_| |____/|_| |_|_____|_____|_____| #") print(" # #") print(" # Turn Your Thoughts Into Command #") print(" # -------------------------------------------------- #") print(" # Bash/Cmd commands/WhatsApp / SMS / E-Mail #") print(" ####################################################################################") print(" My Name is Rohit Kumar. I am a Second Year college Student.") print() print(" My Email : 29rkwhitelist@gmail.com / 29rohit.code@gmail.com") print() print(" My github : https://github.com/29rohitkr/") print() print(" My Linkedin profile : https://www.linkedin.com/in/29rohitkr/") print() print()
class ConsensusStage: def __init__(self): # TODO return # types: # preVote # preCommit # newRound # nil (fraud or no response)
# -*- coding: utf-8 -*- # flake8: noqa """Then step definitions.""" from behave import then from behave.runner import Context from tests.functional.steps.then_impl import ( fab_company_should_be_verified, fab_should_be_asked_about_verification_form, fab_should_not_see_collaborator, fab_should_see_case_study_error_message, fas_check_profiles, fas_find_supplier_using_case_study_details, fas_no_links_to_online_profiles_are_visible, fas_should_be_on_profile_page, fas_should_be_told_that_message_has_been_sent, fas_should_find_all_sought_companies, fas_should_find_with_company_details, fas_should_not_find_with_company_details, fas_should_see_all_case_studies, fas_should_see_company_once_in_search_results, fas_should_see_different_png_logo_thumbnail, fas_should_see_filtered_search_results, fas_should_see_highlighted_search_term, fas_should_see_png_logo_thumbnail, fas_should_see_unfiltered_search_results, fas_supplier_cannot_be_found_using_case_study_details, fas_supplier_should_receive_message_from_buyer, generic_content_of_viewed_pages_should_in_selected_language, generic_language_switcher_should_be_set_to, generic_page_language_should_be_set_to, generic_should_get_email_notifications, international_should_see_links_to_industry_pages, isd_should_be_told_about_empty_search_results, isd_should_see_unfiltered_search_results, prof_should_be_told_about_missing_description, profile_all_unsupported_files_should_be_rejected, profile_business_profile_should_be_ready_for_publishing, profile_no_links_to_online_profiles_are_visible, profile_profile_is_published, profile_should_be_told_about_invalid_links, profile_should_get_request_for_becoming_owner, profile_should_not_see_options_to_manage_users, profile_should_see_all_case_studies, profile_should_see_company_details, profile_should_see_expected_error_messages, profile_should_see_logo_picture, profile_should_see_online_profiles, profile_supplier_should_be_on_landing_page, reg_should_get_verification_email, reg_supplier_has_to_verify_email_first, should_be_at, should_be_taken_to_selected_page, should_not_be_able_to_access_page, should_not_see_message, should_see_message, should_see_selected_pages, sso_should_be_signed_in_to_sso_account, sso_should_be_told_about_password_reset, sso_should_get_password_reset_email, sso_should_get_request_for_collaboration_email, sso_should_see_invalid_password_reset_link_error, sud_should_not_see_options_to_manage_users, sud_should_see_options_to_manage_users, ) from tests.functional.steps.when_impl import ( fas_feedback_request_should_be_submitted, fas_should_be_told_about_empty_search_results, fas_should_be_told_to_enter_search_term_or_use_filters, ) @then('"{alias}" should receive an email verification msg entitled "{subject}"') def then_supplier_should_receive_verification_email(context, alias, subject): reg_should_get_verification_email(context, alias, subject=subject) @then('"{supplier_alias}" should be told that her company has no description') def then_supplier_should_be_told_about_missing_description(context, supplier_alias): response = context.response prof_should_be_told_about_missing_description(response, supplier_alias) @then('"{supplier_alias}" should be told that her company is published') def then_supplier_should_be_told_that_profile_is_published(context, supplier_alias): profile_profile_is_published(context, supplier_alias) @then('"{supplier_alias}" should be on "{company_alias}"\'s FAS Business Profile page') def then_supplier_should_be_on_company_fas_page(context, supplier_alias, company_alias): fas_should_be_on_profile_page(context, supplier_alias, company_alias) @then( '"{supplier_alias}" should be told that she needs to verify her email address first' ) def then_supplier_has_to_verify_email_first(context, supplier_alias): reg_supplier_has_to_verify_email_first(context, supplier_alias) @then('"{supplier_alias}" should be on Welcome to your great.gov.uk profile page') def then_supplier_should_be_on_profile_landing_page(context, supplier_alias): profile_supplier_should_be_on_landing_page(context, supplier_alias) @then('"{supplier_alias}" should be signed in to SSO/great.gov.uk account') def then_supplier_should_be_signed_in_to_sso_account(context, supplier_alias): sso_should_be_signed_in_to_sso_account(context, supplier_alias) @then('"{supplier_alias}" should see new details on "{page_name}" page') def then_supplier_should_see_new_details(context, supplier_alias, page_name): profile_should_see_company_details(context, supplier_alias, page_name) @then( '"{supplier_alias}" should see links to all online profiles on Edit Business Profile page' ) def then_supplier_should_see_online_profiles_on_fab(context, supplier_alias): profile_should_see_online_profiles(context, supplier_alias) @then( '"{supplier_alias}" should see links to all online profiles on FAS Business Profile page' ) def then_supplier_should_see_online_profiles_on_fas(context, supplier_alias): fas_check_profiles(context, supplier_alias) @then('"{supplier_alias}" should be told to provide valid links to all online profiles') def then_supplier_should_be_told_to_use_valid_links(context, supplier_alias): profile_should_be_told_about_invalid_links(context, supplier_alias) @then( '"{supplier_alias}" should not see any links to online profiles on edit Business Profile page' ) def then_no_online_profiles_are_visible_on_fab(context, supplier_alias): profile_no_links_to_online_profiles_are_visible(context, supplier_alias) @then( '"{supplier_alias}" should not see any links to online profiles on FAS Business Profile page' ) def then_no_online_profiles_are_visible_on_fas(context, supplier_alias): fas_no_links_to_online_profiles_are_visible(context, supplier_alias) @then( '"{supplier_alias}" should see all case studies on the edit Business Profile page' ) def then_supplier_should_see_all_case_studies_fab(context, supplier_alias): profile_should_see_all_case_studies(context, supplier_alias) @then('"{supplier_alias}" should see all case studies on the FAS Business Profile page') def then_supplier_should_see_all_case_studies_fas(context, supplier_alias): fas_should_see_all_case_studies(context, supplier_alias) @then( '"{supplier_alias}" should see that logo on FAB Company\'s Directory Profile page' ) def then_supplier_should_see_logo_picture_on_fab(context, supplier_alias): profile_should_see_logo_picture(context, supplier_alias) @then( '"{supplier_alias}" should see that logo on FAS Company\'s Directory Profile page' ) def then_supplier_should_see_logo_picture_on_fas(context, supplier_alias): fas_should_see_png_logo_thumbnail(context, supplier_alias) @then( 'for every uploaded unsupported file "{supplier_alias}" should be told that only certain image types can be used as company\'s logo' ) def then_every_invalid_logo_should_be_rejected(context, supplier_alias): profile_all_unsupported_files_should_be_rejected(context, supplier_alias) @then( '"{buyer_alias}" should be able to find company "{company_alias}" on FAS using words from case study "{case_alias}"' ) def then_buyer_should_find_supplier_using_part_of_case_study( context, buyer_alias, company_alias, case_alias ): fas_find_supplier_using_case_study_details( context, buyer_alias, company_alias, case_alias, properties=context.table ) @then( '"{buyer_alias}" should NOT be able to find company "{company_alias}" on FAS by using any part of case study "{case_alias}"' ) def then_buyer_should_not_be_able_to_find_company( context, buyer_alias, company_alias, case_alias ): fas_supplier_cannot_be_found_using_case_study_details( context, buyer_alias, company_alias, case_alias ) @then( '"{buyer_alias}" should be able to find company "{company_alias}" on FAS using any part of case study "{case_alias}"' ) def then_buyer_should_find_supplier_using_any_part_of_case_study( context, buyer_alias, company_alias, case_alias ): fas_find_supplier_using_case_study_details( context, buyer_alias, company_alias, case_alias ) @then( '"{buyer_alias}" should NOT be able to find company "{company_alias}" on FAS using selected company\'s details' ) def then_buyer_should_find_supplier_using_company_details( context, buyer_alias, company_alias ): fas_should_not_find_with_company_details(context, buyer_alias, company_alias) @then( '"{buyer_alias}" should be able to find company "{company_alias}" on FAS using selected company\'s details' ) def then_buyer_should_find_supplier_using_company_details( context, buyer_alias, company_alias ): fas_should_find_with_company_details(context, buyer_alias, company_alias) @then( 'the "{page_part}" part of the viewed FAS page should be presented in "{language}" language with probability greater than "{probability}"' ) @then( 'the "{page_part}" part of the viewed pages should be presented in "{language}" language with probability greater than "{probability}"' ) def then_page_should_be_in(context, page_part, language, probability): generic_content_of_viewed_pages_should_in_selected_language( context, language=language, page_part=page_part, probability=float(probability) ) @then( '"{buyer_alias}" should be told that the search did not match any UK trade profiles' ) def then_should_be_told_about_empty_search_results(context, buyer_alias): fas_should_be_told_about_empty_search_results(context, buyer_alias) @then('"{buyer_alias}" should be told to enter a search term or use the filters') def then_should_be_told_about_empty_search_results(context, buyer_alias): fas_should_be_told_to_enter_search_term_or_use_filters(context, buyer_alias) @then('"{buyer_alias}" should be told that the search did not match any ISD companies') def then_should_be_told_about_empty_search_results(context, buyer_alias): isd_should_be_told_about_empty_search_results(context, buyer_alias) @then('"{buyer_alias}" should be told that the feedback request has been submitted') def then_buyer_should_be_told_about_feedback_request_confirmation(context, buyer_alias): fas_feedback_request_should_be_submitted(context, buyer_alias) @then('"{buyer_alias}" should be able to find all sought companies') def then_buyer_should_find_all_sought_companies(context, buyer_alias): fas_should_find_all_sought_companies(context, buyer_alias) @then( '"{buyer_alias}" should be told that the message has been sent to company "{company_alias}"' ) def then_buyer_should_be_told_that_message_has_been_sent( context, buyer_alias, company_alias ): fas_should_be_told_that_message_has_been_sent(context, buyer_alias, company_alias) @then('"{supplier_alias}" should receive an email message from "{buyer_alias}"') def then_supplier_should_receive_message_from_buyer( context, supplier_alias, buyer_alias ): fas_supplier_should_receive_message_from_buyer(context, supplier_alias, buyer_alias) @then( '"{actor_alias}" should see a PNG logo thumbnail on FAS Company\'s Directory Profile page' ) def then_buyer_should_see_logo_on_fas_profile_page(context, actor_alias): fas_should_see_png_logo_thumbnail(context, actor_alias) @then( '"{actor_alias}" should see different updated thumbnail of the logo on FAS Company\'s Directory Profile page' ) def then_actor_should_see_different_logo_on_fas(context, actor_alias): fas_should_see_different_png_logo_thumbnail(context, actor_alias) @then('"{supplier_alias}" should see expected error messages') def then_supplier_should_see_expected_error_messages(context, supplier_alias): profile_should_see_expected_error_messages(context, supplier_alias) @then( '"{actor_alias}" should see links to all industry pages available in "{language}" language' ) def then_actor_should_see_links_to_industry_pages( context: Context, actor_alias: str, language: str ): international_should_see_links_to_industry_pages(context, actor_alias, language) @then('"{actor_alias}" should see search results filtered by appropriate sector') @then('"{actor_alias}" should see search results filtered by appropriate sectors') def then_actor_should_see_filtered_search_results(context, actor_alias): fas_should_see_filtered_search_results(context, actor_alias) @then( '"{actor_alias}" should see that FAS search results are not filtered by any sector' ) def then_actor_should_see_unfiltered_search_results(context, actor_alias): fas_should_see_unfiltered_search_results(context, actor_alias) @then( '"{actor_alias}" should see that ISD search results are not filtered by any sector' ) def then_actor_should_see_unfiltered_search_results(context, actor_alias): isd_should_see_unfiltered_search_results(context, actor_alias) @then( '"{actor_alias}" should see company "{company_alias}" only once on browsed search result pages' ) def then_actor_should_see_company_once_in_search_results( context, actor_alias, company_alias ): fas_should_see_company_once_in_search_results(context, actor_alias, company_alias) @then( '"{actor_alias}" should see that some of the results have the "{search_term}" search terms highlighted' ) def then_should_see_highlighted_search_term(context, actor_alias, search_term): fas_should_see_highlighted_search_term(context, actor_alias, search_term) @then( '"{supplier_alias}" should be told that business profile is ready to be published' ) def then_company_should_be_verified(context, supplier_alias): profile_business_profile_should_be_ready_for_publishing(context, supplier_alias) @then('"{supplier_alias}" should be told that company has been verified') def then_company_should_be_verified(context, supplier_alias): fab_company_should_be_verified(context, supplier_alias) @then('"{supplier_alias}" should see expected case study error message') def then_supplier_should_see_expected_case_study_error_message(context, supplier_alias): fab_should_see_case_study_error_message(context, supplier_alias) @then('"{supplier_alias}" should be told that password was reset') def then_should_be_told_that_password_was_reset(context, supplier_alias): sso_should_be_told_about_password_reset(context, supplier_alias) @then('"{supplier_alias}" should receive a password reset email') def then_supplier_should_receive_password_reset_email(context, supplier_alias): sso_should_get_password_reset_email(context, supplier_alias) @then('"{supplier_alias}" should be told that password reset link is invalid') def then_should_see_invalid_password_reset_link_error(context, supplier_alias): sso_should_see_invalid_password_reset_link_error(context, supplier_alias) @then('"{supplier_alias}" should see "{page_name}" page') @then('"{supplier_alias}" should be on "{page_name}" page') def then_supplier_should_see_specific_page(context, supplier_alias, page_name): should_be_at(context, supplier_alias, page_name) @then('"{actor_alias}" should be able to see all selected pages') def then_actor_should_see_selected_pages(context, actor_alias): should_see_selected_pages(context, actor_alias) @then('"{actor_alias}" should be taken to "{page_name}" for all requests') def then_actor_should_see_selected_pages(context, actor_alias, page_name): should_be_taken_to_selected_page(context, actor_alias, page_name) @then('"{supplier_alias}" should be asked to decide how to verify her identity') def then_supplier_should_be_asked_about_verification(context, supplier_alias): fab_should_be_asked_about_verification_form(context, supplier_alias) @then('"{actor_alias}" should see "{message}" on the page') @then('"{actor_alias}" should see "{message}" message') def then_actor_should_see_a_message(context, actor_alias, message): should_see_message(context, actor_alias, message) @then('"{actor_alias}" should not see "{message}" on the page') @then('"{actor_alias}" should not see "{message}" message') def then_actor_should_not_see_a_message(context, actor_alias, message): should_not_see_message(context, actor_alias, message) @then( '"{actor_aliases}" should receive an email with a request to confirm that he\'s been added to company "{company_alias}" Find a Buyer profile' ) @then( '"{actor_aliases}" should receive an email with a request to confirm that she\'s been added to company "{company_alias}" Find a Buyer profile' ) @then( '"{actor_aliases}" should receive an email with a request to confirm that they\'ve been added to company "{company_alias}" Find a Buyer profile' ) def then_actor_should_receive_email_with_request_for_collaboration( context, actor_aliases, company_alias ): sso_should_get_request_for_collaboration_email( context, actor_aliases, company_alias ) @then( '"{actor_alias}" should see options to manage Find a Buyer profile users on SSO Profile' ) def then_actor_should_see_options_to_manage_account_users( context: Context, actor_alias: str ): sud_should_see_options_to_manage_users(context, actor_alias) @then( '"{actor_alias}" should not see options to manage Find a Buyer profile users on SSO Profile' ) def then_actor_should_not_see_options_to_manage_account_users( context: Context, actor_alias: str ): sud_should_not_see_options_to_manage_users(context, actor_alias) @then( '"{new_owner_alias}" should receive an email with a request for becoming the owner of the company "{company_alias}" profile' ) def then_actor_should_receive_email_with_transfer_account_ownership_request( context, new_owner_alias, company_alias ): profile_should_get_request_for_becoming_owner( context, new_owner_alias, company_alias ) @then( '"{supplier_alias}" should not see "{collaborators_aliases}" among the users associated with company\'s profile' ) def then_supplier_should_not_see_collaborator( context, supplier_alias, collaborators_aliases ): fab_should_not_see_collaborator(context, supplier_alias, collaborators_aliases) @then('"{collaborator_alias}" should not be able to access "{page_name}" page') def then_collaborator_should_not_be_able_to_access_page( context, collaborator_alias, page_name ): should_not_be_able_to_access_page(context, collaborator_alias, page_name) @then( 'the HTML document language for viewed pages should be set to "{language}" language' ) def then_page_language_should_be_set_to(context: Context, language: str): generic_page_language_should_be_set_to(context, language) @then( 'the language switcher on viewed pages should show "{language}" as selected language' ) def then_language_switcher_should_be_set_to(context: Context, language: str): generic_language_switcher_should_be_set_to(context, language) @then('"{actor_alias}" should not see options to manage profile') def then_actor_should_not_see_options_to_manage_account_users( context: Context, actor_alias: str ): profile_should_not_see_options_to_manage_users(context, actor_alias) @then('"{actor_alias}" should receive an email notification with subject "{subject}"') def then_actor_should_get_email(context: Context, actor_alias: str, subject: str): generic_should_get_email_notifications(context, actor_alias, subject)
#!/usr/bin/env python3 import sys import os import svgwrite import math class Target: def __init__(self, name, pid): self.name = name self.pid = pid self.start = None self.end = None self.booking_index = None def duration(self): assert self.end >= self.start return self.end - self.start if len(sys.argv) != 4: print('Usage: <file> <threshold> <output>') exit(1) lines = [x.strip() for x in open(sys.argv[1]).readlines()] targets = [] cache = {} for i, l in enumerate(lines): if l.startswith('Putting child'): parts = l.split(' ') target = parts[3][1:-1] assert parts[4] == 'PID' pid = int(parts[5]) assert not pid in cache cache[pid] = Target(target, pid) elif l.startswith('Reaping '): parts = l.split(' ') target = parts[4] assert parts[5] == 'PID' pid = int(parts[6]) if pid in cache: t = cache[pid] start = float(parts[9]) + float(parts[10]) / 1000000 end = float(parts[12]) + float(parts[13]) / 1000000 t.start = start t.end = end targets.append(t) del cache[pid] sorted_items = sorted([t for t in targets], key = lambda x: x.start) min_start = sorted_items[0].start for f in sorted_items: f.start -= min_start f.end -= min_start #for f in sorted_items: # print('%-35s:%.2f -> %.2f' % (f.name, f.start, f.end)) filtered = [t for t in sorted_items if t.duration() > float(sys.argv[2])] events = [] for f in filtered: events.append((f.start, 0, f)) events.append((f.end, 1, f)) sorted_events = sorted(events, key = lambda x: x[0]) booking = [] for i in range(200): booking.append(None) for event in sorted_events: if event[1] == 0: # start for i, b in enumerate(booking): if booking[i] == None: event[2].booking_index = i booking[i] = event[2] # print('At: %f adding to booking %d: %d' % (event[0], i, event[2].pid)) break elif event[1] == 1: # end i = event[2].booking_index assert booking[i] == event[2] booking[i] = None # print('At: %f removing from booking %d: %d' % (event[0], i, event[2].pid)) # write it SVG file maximum_booking_id = max([x.booking_index for x in filtered]) margin = 100 height = 60 dwg = svgwrite.Drawing(sys.argv[3], size = (100 * filtered[-1].end + 2 * margin, height * (maximum_booking_id + 4)), profile = 'tiny') dwg.add(dwg.rect(insert=(0, 0), size = ('100%', '100%'), fill = 'white')) # draw the ruler Y = 50 dwg.add(dwg.line(start = (margin, Y), end = (100 * filtered[-1].end + margin, Y), stroke = 'black')) for i in range(math.ceil(filtered[-1].end)): dwg.add(dwg.line(start = (100 * i, Y - 10), end = (100 * i, Y + 10), stroke = 'black')) if i != 0: dwg.add(dwg.text(str(i), insert = (100 * (i + 1), Y - 30), font_size = 22)) for t in filtered: start_x = margin + 100.0 * t.start end_x = margin + 100.0 * t.end start_y = height * t.booking_index dwg.add(dwg.rect(insert=(start_x, margin + height * t.booking_index), size = (end_x - start_x, 0.8 * height), fill = 'rgb(216, 172, 51)', stroke = 'black')) dwg.add(dwg.text('%s: %.1fs' % (t.name, t.duration()), insert=(start_x, margin + start_y + height / 2), font_size = 22)) dwg.save()
#!/usr/bin/env python import re pattern = re.compile(r'world') match = pattern.match('hello world!') search = pattern.search('hello world!') if match: print match.group() else: print 'match failed' if search: print search.group() else: print 'search failed'
#!/usr/bin/env python # USAGE: temp.py # Michael Chambers, 2017 # from sys import stdin import sys import itertools def func(vals): return(vals[0] + vals[1] * vals[2] ** 2 + vals[3] ** 3 - vals[4]) def main(): x = [2,3,5,7,9] for i in itertools.permutations(x): # print(func(i)) if func(i) == 399: print(i) if __name__ == "__main__": main()
import unittest from popular_artifacts import popular_artifact_generator def test_get_maven_artifacts(): results = popular_artifact_generator.get_maven_artifacts() assert len(results) > 0 def test_get_file_stats_for_artifacts(): download_list = popular_artifact_generator.get_file_stats_for_artifacts() assert len(download_list) > 0
liste = [['chat'], ['chien'], [['.']], ['voiture']] c = 0 for i in range(4): print(liste[c][0]) c+=1
from django.urls import path from question import views urlpatterns = [ # 書籍 path('question/<int:category_id>/<int:question_no>', views.QuestionView.as_view(), name='question'), path('category', views.CategoryView.as_view(), name='category'), path('answer_result/<int:category_id>/<int:question_id>/<int:question_no>/<str:question_type>/<int:choice_no>', views.AnswerResultView.as_view(), name='answer_result'), path('session_expire', views.SessionExpireView.as_view(), name='session_expire'), path('top', views.TopPageView.as_view(), name='top'), path('question/stop', views.QuestionStopView.as_view(), name='stop'), path('question/preparation', views.PreparationView.as_view(), name='preparation'), path('sample/answer/<str:question_type>/<int:question_id>', views.SampleAnswerView.as_view(), name='sample_answer'), path('help', views.HelpPageView.as_view(), name='help'), ]
from clikit.api.io import Input from clikit.api.io import Output from clikit.formatter import AnsiFormatter from clikit.formatter import PlainFormatter from clikit.io import ConsoleIO from clikit.io.input_stream import StandardInputStream from clikit.io.output_stream import ErrorOutputStream from clikit.io.output_stream import StandardOutputStream from cleo.io.io_mixin import IOMixin class GotIO(ConsoleIO, IOMixin): """ class for io that's no in cleo commands """ def __init__(self, style_set): input_stream = StandardInputStream() input = Input(input_stream) output_stream = StandardOutputStream() if output_stream.supports_ansi(): formatter = AnsiFormatter(style_set) else: formatter = PlainFormatter(style_set) output = Output(output_stream, formatter) error_stream = ErrorOutputStream() if error_stream.supports_ansi(): formatter = AnsiFormatter() else: formatter = PlainFormatter() error_output = Output(error_stream, formatter) super(GotIO, self).__init__(input, output, error_output)
# -*- coding: utf-8 -*- """ /*************************************************************************** QSDM Species distribution modelling support for the QGIS Processing toolbox ------------------- begin : 2014-03-31 copyright : (C) 2014 by Martin Jung email : martinjung-at-zoho.com ***************************************************************************/ /*************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * ***************************************************************************/ This script initializes the plugin, making it known to QGIS. """ def name(): return "QSDM" def description(): return "Species distribution modelling support for the QGIS Processing toolbox" def version(): return "Version 0.2" def icon(): return "icons/default.png" def qgisMinimumVersion(): return "2.0" def classFactory(iface): from qsdm_main import qsdm return qsdm(iface)
""" Python RPC Client for Discord ----------------------------- By: qwertyquerty and LewdNeko """ from .client import Client from .presence import Presence from .exceptions import * __title__ = 'pypresence' __author__ = 'qwertyquerty' __copyright__ = 'Copyright 2018 qwertyquerty' __license__ = 'MIT' __version__ = '1.0.9'
from PIL import Image import matplotlib.pyplot as plt import numpy as np I = Image.open('a.png') a = np.asarray(I) plt.imshow(a)
from django.contrib import messages from django.db.models import Count from django.shortcuts import get_object_or_404, redirect, render from .models import Collection, Person, Face def index(request): collection = Collection.objects.first() if not collection: return render(request, 'face/index.html') slug = Collection.objects.first().slug return redirect('collection_detail', slug) def collection_detail(request, slug): collection = get_object_or_404(Collection, slug=slug) if request.method == 'POST': photo_file = request.FILES.get('photo') if photo_file: identified = collection.search_faces(photo_file, max_faces=3) else: identified = [] return render(request, 'face/identified.html', { 'identified': identified, }) person_list = collection.person_set.all().annotate(Count('face')) face_count = Face.objects.all().count() return render(request, 'face/collection_detail.html', { 'collection': collection, 'person_list': person_list, 'face_count': face_count, }) def person_detail(request, collection_slug, pk): person = get_object_or_404(Person, collection__slug=collection_slug, pk=pk) return render(request, 'face/person_detail.html', { 'person': person, }) def person_photo(request, pk): person = get_object_or_404(Person, pk=pk) return redirect(person.photo_url)
# Generated by Django 3.2.4 on 2021-06-22 13:17 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('myapi', '0002_auto_20210615_1628'), ] operations = [ migrations.CreateModel( name='Room_detail', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('user_id', models.CharField(max_length=45)), ('nickname', models.CharField(max_length=50)), ('cotent', models.TextField(blank=True)), ('room_title', models.CharField(max_length=45)), ('room_id', models.CharField(max_length=50)), ('date', models.DateTimeField(auto_now_add=True)), ], ), migrations.DeleteModel( name='Post', ), ]
#coding:utf-8 from __future__ import division, print_function, unicode_literals import os import sys if sys.version_info.major == 2: reload(sys) sys.setdefaultencoding('utf-8') sys.path.append(os.path.dirname(__file__)) import pyexts # pyexts = os.path.dirname(__file__) + "/pyexts" # for root, dirs, files in os.walk(pyexts): # for f in files: # if f.endswith(".py"): # exec(open(root + "/" + f, 'rb').read(), globals())
m=int(input()) if m>=80 and m<=90: print("A Grade") elif m>=60 and m<80: print("B grade") elif m>=40 and m<60: print(" C Grade") else: print("Fail")
""" Created on Tue Feb 13 10:04:09 2018 @author: davidhull """ ### Exercise 1 ### class Person: def __init__ (self, name, email, phone, friends =[], count = 0): self.name = name self.email = email self.phone = phone self.friends = [] self.count = count def greet (self, other_person): self.count += 1 print('Hello {}, I am {}!'.format(other_person.name, self.name)) def print_contact_info(self): print("{}'s email: {},".format(self.name,self.email), " {}'s phone number: {}".format(self.name, self.phone)) def add_friend(self, friend): return self.friends.append(friend) def num_friend(self): print(len(self.friends)) def greeting_count(self): print("{}".format(self.count)) def __str__(self): return 'Person: {} {} {}'.format(self.name, self.email, self.phone) #def __str__(self): #return "{}".format(self.greeting_count) sonny = Person(name = 'Sonny', email = 'sonny@hotmail.com', phone = '483-485-4948') jordan = Person(name = 'Jordan', email = 'jordan@aol.com', phone = '495-586-3456') sonny.greet(jordan) jordan.greet(sonny) jordan.greeting_count() jordan.greet(sonny) print(sonny.email," ", sonny.phone) print(jordan.email, ' ', jordan.phone) ### Vehicle Exercise ### class Vehicle: def __init__ (self, make, model, year): self.make = make self.model = model self.year = year def print_info(self): print(self.year, self.make, self.model) car = Vehicle('Nissan', 'Leaf', 2015) car.print_info()
from .go_out import Option from . import _test def test_from_menu(): with _test.screenshot("go_out_menu.png") as img: res = Option.from_menu(img) support_card, character = sorted(res, key=lambda x: x.position[1]) assert support_card.type == Option.TYPE_SUPPORT, support_card.type assert support_card.current_event_count == 0, support_card.current_event_count assert character.type == Option.TYPE_MAIN, character.type
import sys import requests import threading class LocalAgent: def __init__(self, machine_no, time_interval, increment): self.machine_no = machine_no self.time_interval = time_interval self.current_temp = 200 self.increment = 1 self.incremental_value = increment self.set_interval(self.send_temp, time_interval) # self.send_temp() def get_temperature(self): if(self.increment == 1): self.current_temp += self.incremental_value else: self.current_temp -= self.incremental_value if(self.current_temp >= 299): self.increment = 0 if(self.current_temp <= 200): self.increment = 1 return self.current_temp def send_temp(self): print("Sending temperature log") r = requests.post("http://localhost:3030/log", json = { "machine_no" : self.machine_no, "iot_no": 1, "temperature": self.get_temperature(), "timestamp" : 123123123 }) print("Log sent") def set_interval(self, func, sec): def func_wrapper(): self.set_interval(func, sec) func() t = threading.Timer(sec, func_wrapper) t.start() return t machine_no = int(sys.argv[1]) time_interval = int(sys.argv[2]) increment = int(sys.argv[3]) localAgent = LocalAgent(machine_no, time_interval, increment)
from django.urls import path, re_path, reverse_lazy, include from django.contrib.auth import views as auth_views from accounts.views import activate_view from . import views from django.conf import settings urlpatterns = [ path('login/', views.login_view, name='login'), path('logout/', views.logout_view, name='logout'), path('register/', views.register, name='register'), path('password-create/', views.create_password, name='password_create'), path( 'password-change/', auth_views.PasswordChangeView.as_view( template_name='accounts/password_change_form.html', ), name='password_change'), path( 'password-change/done/', auth_views.PasswordChangeDoneView.as_view( template_name='accounts/personal_information.html', extra_context={'extra_message': 'Password was successfully changed'} ), name='password_change_done'), path('password-reset/', auth_views.PasswordResetView.as_view( email_template_name='accounts/email/password_reset.html', subject_template_name='accounts/email/password_reset_subject.txt', template_name='accounts/password_reset.html', success_url=reverse_lazy('password_reset_done'), extra_email_context={'domain': settings.APP_DOMAIN}, html_email_template_name='accounts/email/password_reset.html', ), name='password_reset'), path('password-reset/done/', auth_views.PasswordResetDoneView.as_view( template_name='accounts/password_reset_done.html', ), name='password_reset_done'), re_path( r'^reset/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$', auth_views.PasswordResetConfirmView.as_view( template_name='accounts/password_reset_confirm.html', success_url=reverse_lazy('password_reset_complete'), ), name='password_reset_confirm'), re_path( r'^reset/done/$', auth_views.PasswordResetDoneView.as_view( template_name='accounts/password_reset_complete.html', ), name='password_reset_complete'), path('my-account/', views.personal_information, name='personal_information'), path('my-account/edit/', views.edit_personal_information, name='edit_personal_information'), path('activate/<uidb64>/<token>', activate_view, name='activate') ]
#-*- coding: utf-8 -*- ''' Created on 15 de mai de 2017 @author: alisson ''' import pandas import math import numpy import keras from keras.models import Sequential from keras.layers import LSTM from sklearn.preprocessing import MinMaxScaler from sklearn.metrics import mean_squared_error from keras.layers.core import Dense, Dropout from datetime import datetime import matplotlib.pyplot as plt from keras.layers.normalization import BatchNormalization from Tkinter import Label #import matplotlib.pyplot as plt class Deep(): def set_data(self, data, look_back): # fix random seed for reproducibility #numpy.random.seed(7) # load the dataset dataframe = pandas.read_csv(data, usecols=[0], engine='python', skipfooter=3) dataset = dataframe.values dataset = dataset.astype('float32') # normalize the dataset print dataset scaler = MinMaxScaler(feature_range=(0, 1)) dataset = scaler.fit_transform(dataset) # split into train and test sets train_size = int(len(dataset) * 0.67) test_size = len(dataset) - train_size train, test = dataset[0:train_size,:], dataset[train_size:len(dataset),:] # reshape into X=t and Y=t+1 trainX, trainY = self.create_dataset(train, look_back) testX, testY = self.create_dataset(test, look_back) # reshape input to be [samples, time steps, features] trainX = numpy.reshape(trainX, (trainX.shape[0], trainX.shape[1])) testX = numpy.reshape(testX, (testX.shape[0], testX.shape[1])) return trainX, trainY, testX, testY, scaler, dataset def Deep_network(self, trainX, trainY, testX, testY, scaler, look_back, b_size, epocas, dataset): self.look_back = look_back self.trainX =trainX self.trainY =trainY self.testX = testX self.testY = testY self.b_size = b_size self.epocas = epocas self.dataset = dataset print("create and fit the LSTM network") model = Sequential() '''Para experimentos com apenas 1 camda LSTM usar a linha abaixo e comentar as demais linhas de LSTM''' model.add(Dense(look_back, input_dim=look_back, activation="sigmoid")) model.add(Dense(look_back, input_dim=look_back, activation="sigmoid")) model.add(Dense(look_back, input_dim=look_back, activation="sigmoid")) model.add(Dense(look_back, input_dim=look_back, activation="sigmoid")) model.add(Dense(1, input_dim=look_back, activation="sigmoid")) print("Usando o método Compile") model.compile(loss='mean_squared_error', optimizer='adam') print("Usando o método Fit") '''O parametro 'verbose' altera a exibição do treinamento. Para exibir da forma como já vinhamos fazendo use 'verbose=2'. 'verbose=0' não exibe nada durante o treinamento''' '''print("Inicial time: " + str(datetime.now().strftime("%H %M %S")))''' model.fit(self.trainX, self.trainY, nb_epoch=self.epocas, batch_size=self.b_size, verbose=0) '''print("Final time: " + str(datetime.now().strftime("%H %M %S")))''' print("Fazendo as predições") trainPredict = model.predict(self.trainX, batch_size=self.b_size) testPredict = model.predict(self.testX, batch_size=self.b_size) # invert predictions trainPredict = scaler.inverse_transform(trainPredict) trainY = scaler.inverse_transform([trainY]) testPredict = scaler.inverse_transform(testPredict) testY = scaler.inverse_transform([testY]) # calculate root mean squared error trainScore = math.sqrt(mean_squared_error(trainY[0], trainPredict[:,0])) print('Train Score: %.2f RMSE' % (trainScore)) testScore = math.sqrt(mean_squared_error(testY[0], testPredict[:,0])) print('Test Score: %.2f RMSE' % (testScore)) print("\n") # shift train predictions for plotting trainPredictPlot = numpy.empty_like(self.dataset) trainPredictPlot[:, :] = numpy.nan trainPredictPlot[look_back:len(trainPredict)+look_back, ] = trainPredict # shift test predictions for plotting testPredictPlot = numpy.empty_like(self.dataset) testPredictPlot[:, :] = numpy.nan testPredictPlot[len(trainPredict)+(look_back*2)+1:len(self.dataset)-1, ] = testPredict # plot baseline and predictions print self.dataset print scaler.inverse_transform(self.dataset) print trainPredict plt.plot(scaler.inverse_transform(self.dataset), Label="Serie Real") plt.plot(trainPredictPlot, Label="Treinamento") plt.plot(testPredictPlot, Label="Teste") plt.legend(loc='upper rigth') plt.tight_layout() plt.show() return trainScore, testScore def create_dataset(self, dataset, look_back): dataX, dataY = [], [] for i in range(len(dataset)-look_back-1): a = dataset[i:(i+look_back), 0] dataX.append(a) dataY.append(dataset[i + look_back, 0]) return numpy.array(dataX), numpy.array(dataY) data = 'teste.csv' t = Deep() look_back = 12 [trainX, trainY, testX, testY, scaler, dataset] = t.set_data(data, look_back) t.Deep_network(trainX, trainY, testX, testY, scaler, look_back, 5, 1000, dataset)
from django.contrib.staticfiles.testing import StaticLiveServerTestCase from selenium import webdriver from selenium.webdriver.common.keys import Keys from selenium.common.exceptions import WebDriverException import time import os MAX_WAIT = 10 class NewVisitorTest(StaticLiveServerTestCase): def setUp(self): self.browser = webdriver.Chrome() staging_server = os.environ.get('STAGING_SERVER') if staging_server: self.live_server_url = 'http://' + staging_server def tearDown(self): self.browser.quit() def wait_for_row_in_list_table(self, row_text): start_time = time.time() while True: try: table = self.browser.find_element_by_id('id_list_table') rows = table.find_elements_by_tag_name('tr') self.assertIn(row_text, [row.text for row in rows]) return except (AssertionError, WebDriverException) as e: if time.time() - start_time > MAX_WAIT: raise e time.sleep(0.5) def test_can_start_a_list_for_one_user(self): self.browser.get(self.live_server_url) self.assertIn('DJ NoBangers', self.browser.title) header_text = self.browser.find_element_by_tag_name('h1').text self.assertIn('Start a new playlist', header_text) inputbox = self.browser.find_element_by_id('id_new_item') self.assertEqual(inputbox.get_attribute('placeholder'), 'Enter a youtube link') inputbox.send_keys('Eminem') inputbox.send_keys(Keys.ENTER) self.wait_for_row_in_list_table('1: Eminem') inputbox = self.browser.find_element_by_id('id_new_item') inputbox.send_keys('SWOG') inputbox.send_keys(Keys.ENTER) self.wait_for_row_in_list_table('1: Eminem') self.wait_for_row_in_list_table('2: SWOG') def test_multiple_users_can_start_lists_at_different_urls(self): # original user self.browser.get(self.live_server_url) inputbox = self.browser.find_element_by_id('id_new_item') inputbox.send_keys('SWOG') inputbox.send_keys(Keys.ENTER) self.wait_for_row_in_list_table('1: SWOG') drew_list_url = self.browser.current_url self.assertRegex(drew_list_url, '/youtubeplayer/.+') # new user - opens up new page self.browser.quit() self.browser = webdriver.Chrome() self.browser.get(self.live_server_url) page_text = self.browser.find_element_by_tag_name('body').text self.assertNotIn('SWOG', page_text) self.assertNotIn('Rihanna', page_text) inputbox = self.browser.find_element_by_id('id_new_item') inputbox.send_keys('Kendrick Lamar') inputbox.send_keys(Keys.ENTER) self.wait_for_row_in_list_table('1: Kendrick Lamar') user_list_url = self.browser.current_url self.assertRegex(user_list_url, '/youtubeplayer/.+') self.assertNotEqual(user_list_url, drew_list_url) page_text = self.browser.find_element_by_tag_name('body').text self.assertNotIn('SWOG', page_text) self.assertIn('Kendrick Lamar', page_text) def test_layout_and_styling(self): self.browser.get(self.live_server_url) self.browser.set_window_size(1024, 768) inputbox = self.browser.find_element_by_id('id_new_item') self.assertAlmostEqual( inputbox.location['x'] + inputbox.size['width'] / 2, 512, delta = 10 ) inputbox.send_keys('testing') inputbox.send_keys(Keys.ENTER) self.wait_for_row_in_list_table('1: testing') inputbox = self.browser.find_element_by_id('id_new_item') self.assertAlmostEqual( inputbox.location['x'] + inputbox.size['width'] / 2, 512, delta = 10 )
# -*- coding: utf-8 -*- """ --------------------------------------------------------- バーコードリーダから入力された値の数をカウントします。 このアプリの権利は全て影山裕一(兵庫県たつの市)及びAlice Electronicsに帰属します。 アプリ及びソースコードはBSD 3-Clause Licenseで公開しています。 お問い合わせはEmail: contact@alicle.jp へお願いします。 --------------------------------------------------------- """ # --------------------------------------------------------- # ライブラリ # --------------------------------------------------------- # 音を鳴らせるためのライブラリをインポート from pygame import mixer # アプリを終了するためのライブラリをインポート import sys # ディレクトリ操作をするためのライブラリをインポート import os # --------------------------------------------------------- # 変数定義 # --------------------------------------------------------- # 読み込まれたバーコードの値を格納するリスト BarcodeValueArray = [] # 表示バーコードの桁数定義する変数(初期値は5) BarcodeLength: int = 5 # カウント数を定義する変数(初期値は5) NumberOfCount: int = 5 # --------------------------------------------------------- # バーコード入力を受け付けてリストに格納する処理をする関数 # --------------------------------------------------------- def BarcodeReading(): # 動作管理用ステータスを変数として定義 Status: int = 0 # 読み込まれたバーコードの数をカウントする変数 ReadBarcodeQty = 0 # バーコードの桁数を設定 print('\033[2K', 'How many digits in the barcode?') BarcodeLength = input('> ') # 入力された値を整数に変換 BarcodeLength = int(BarcodeLength) # カウント数を設定 print('\033[2A', 'How many to count?') NumberOfCount = input('> ') # 入力された値を整数に変換 NumberOfCount = int(NumberOfCount) # 設定メッセージを削除 print('\033[2K', '\033[A', '\033[2K', '\033[A', '\033[2K', '\033[A') # 設定完了のメッセージ print('\033[2K', 'Setting Done. \n Please read a bar-code.') # pygameライブラリで効果音を再生するための初期化処理 mixer.init() while Status == 0: # コンソールの表示をクリア sys.stdout.write('\033[2K') # バーコードを読み込み BarcodeValue: str = input(' Bar-code >') # 読み込まれた値の文字数をカウントする ReadValueLength = len(BarcodeValue) # 読み込まれた値がBarcodeLengthで規定した桁数と整合すれば実行 if ReadValueLength == BarcodeLength: # 入力された値をリストに追加 BarcodeValueArray.append(BarcodeValue) # print(BarcodeValueArray) # 読み込んだバーコードの数を1つアップ ReadBarcodeQty = ReadBarcodeQty + 1 # 読み込んだバーコードの数が設定値に達したらアラートを出してカウンタを0に戻す if ReadBarcodeQty == NumberOfCount: # カウント完了の効果音を再生 mixer.Sound('lib/alarm.wav').play() print('\033[2K', ReadBarcodeQty, 'sheets Done !', '\033[2A') ReadBarcodeQty = 0 continue # 読み込みの効果音を再生 mixer.music.load('lib/beep.mp3') mixer.music.play(1) # 現在の枚数を表示し、カーソルを1行戻す print('\033[2K', 'Now: ', ReadBarcodeQty, '\033[2A',) continue # 「q」がタイプされた時はアプリを終了 elif BarcodeValue == 'q': # 読み込んだ値に改行を追加しファイルに出力(追記で出力する) d = '\n'.join(BarcodeValueArray) with open('Data/data.txt', 'a') as f: f.write(d) # データファイルの最後に改行を追加 f.write('\n') print('\033[2K', 'Appication has quit.') print('\033[2K', 'Bye!') sys.exit() # 読み込まれた値がBarcodeLengthで規定した桁数と異なる場合の処理 elif ReadValueLength != BarcodeLength: print('\033[2K', 'Read error ! Read agein.', '\033[2A') continue return() # --------------------------------------------------------- # メイン処理 # --------------------------------------------------------- def main(): # シェルの表示を消去 print('\033[2J', '\033[1;1H') # pygameライブラリの初期メッセージを削除 print('\033[2A') print('\033[K', '\033[A', '\033[K') print('\033[2A') # データ保存用のディレクトリが存在しなければ作成 if not os.path.isdir('Data'): os.makedirs('Data') # アプリの初期メッセージを表示 print('\033[A', '[Bar-code Counter v0.1.0]') BarcodeReading() # このファイルがインポートされても勝手に実行されないようにする if __name__ == "__main__": main()
a=[10,20,30,40,60] print(a[0]) b=['a','b','c'] print(b[1]) del (a[0]) # delete the values in the list a.insert(5,87) for i in a: print (b) print(len(a)) c=("a","b","def") print(c) #these are tuples. we cant add elements to it print(a)
$NetBSD$ Fix path. --- SABnzbd.py.orig 2020-02-12 20:18:12.258920844 +0000 +++ SABnzbd.py @@ -42,6 +42,10 @@ import ssl import time import re +# Force python to load the patched version of cherrypy included with the port, +# instead of any version that may be installed otherwise. +sys.path.insert(0,'@PREFIX@/share/sabnzbd') + try: import Cheetah if Cheetah.Version[0] < '2': @@ -919,7 +923,8 @@ def main(): sabnzbd.MY_FULLNAME = os.path.normpath(os.path.abspath(sabnzbd.MY_FULLNAME)) sabnzbd.MY_NAME = os.path.basename(sabnzbd.MY_FULLNAME) - sabnzbd.DIR_PROG = os.path.dirname(sabnzbd.MY_FULLNAME) + # sabnzbd.DIR_PROG = os.path.dirname(sabnzbd.MY_FULLNAME) + sabnzbd.DIR_PROG = "@PREFIX@/share/sabnzbd" sabnzbd.DIR_INTERFACES = real_path(sabnzbd.DIR_PROG, DEF_INTERFACES) sabnzbd.DIR_LANGUAGE = real_path(sabnzbd.DIR_PROG, DEF_LANGUAGE) org_dir = os.getcwd()
import argparse from collections import namedtuple from pathlib import Path, PurePosixPath import tempfile from subprocess import check_call from urllib.parse import urlparse import os.path import sys Repo = namedtuple('Repo', 'url branch') def sr(repo): return 'git://studentrobotics.org/{}.git'.format(repo) REPOSITORIES = [ Repo(sr('comp/ranker'), 'master'), Repo(sr('comp/srcomp'), 'master'), Repo(sr('comp/srcomp-http'), 'master'), Repo(sr('comp/srcomp-scorer'), 'master'), Repo(sr('comp/srcomp-cli'), 'master'), Repo(sr('tools'), 'new-tools'), Repo(sr('brain/herdsman'), 'master'), Repo(sr('brain/sr-robot'), 'master'), ] parser = argparse.ArgumentParser(description='SR Python distribution builder') targets = parser.add_mutually_exclusive_group(required=True) targets.add_argument('-o', '--output', help='directory to output built distributions', type=Path) targets.add_argument('-r', '--rsync', help='remote directory to output build distributions', type=str) parser.add_argument('-v', '--virtualenv', help='path to virtualenv', default='/usr/bin/virtualenv') parser.add_argument('-2', '--python2', help='path to python 2', default='/usr/bin/python2.7') parser.add_argument('-3', '--python3', help='path to python 3', default=sys.executable) args = parser.parse_args() root_deps = ['wheel', 'nose', 'Sphinx==1.3b2'] def create_virtualenv(directory, python=sys.executable): check_call((args.virtualenv, '-p', python, directory)) versions = { args.python2: ['bdist_egg', 'bdist_wheel'], args.python3: ['sdist', 'bdist_egg', 'bdist_wheel'] } virtualenvs = { } with tempfile.TemporaryDirectory() as tmpdir: work = Path(tmpdir).resolve() if args.output is not None: DST = args.output.resolve() else: DST = work / 'dist' DST.mkdir() for n, version in enumerate(versions.keys()): virtenv = work / 'venv{}'.format(n) print('Creating virtualenv...') create_virtualenv(str(virtenv), python=version) virtualenvs[version] = virtenv for root_dep in root_deps: check_call([str(virtenv / 'bin/pip'), 'install', root_dep]) for repo in REPOSITORIES: name = PurePosixPath(urlparse(repo.url).path).stem check_call(['git', 'clone', '-b', repo.branch, repo.url, name], cwd=str(work)) for version, distributions in versions.items(): virtenv = virtualenvs[version] for repo in REPOSITORIES: name = PurePosixPath(urlparse(repo.url).path).stem for distribution in distributions: check_call([str(virtenv / 'bin/python'), 'setup.py', distribution, '-d', str(DST)], cwd=str(work/name)) if args.rsync is not None: print('Copying to target...') check_call(['rsync', '--recursive', str(DST) + '/', args.rsync])
from django.conf.global_settings import SECRET_KEY import base64 from itsdangerous import URLSafeTimedSerializer class Token: def __init__(self): self.security_key = SECRET_KEY self.salt = base64.encodebytes(self.security_key.encode(encoding='utf-8')) def generate_email_active_token(self, username): serializer = URLSafeTimedSerializer(self.security_key) return serializer.dumps(username, self.salt) def confirm_email_active_token(self, token, expiration=3600): serializer = URLSafeTimedSerializer(self.security_key) return serializer.loads(token, salt=self.salt, max_age=expiration)
class Solution(object): def maxRotateFunction(self, A): sum1,n=sum(A),len(A) if n==0: return 0 max_index=temp=sum (A[i]*i for i in range(n)) for i in range(n-1,0,-1): temp=temp+sum1-n*A[i] max_index=max(max_index,temp) return max_index s=Solution() A=[4,3,2,6] print(s.maxRotateFunction(A)) O(N)的方法: https://leetcode.com/problems/rotate-function/discuss/87853/Java-O(n)-solution-with-explanation """ class Solution(object): def maxRotateFunction(self, A): nums,n=[],len(A) number=range(n) nums.append(A.copy()) for i in range(n-1): A.insert(0,A.pop()) nums.append(A.copy()) res=-float("inf") for num in nums: res=max(res,sum(num[j]*number[j] for j in range(n))) return res class Solution(object): def maxRotateFunction(self, A): max_index,n=-float("inf"),len(A) if n==0: return 0 start=0 while start<n: num=list(range(start,n,1))+list(range(start)) temp=sum(num[i]*A[i] for i in range(n)) max_index=max(max_index,temp) start+=1 return max_index """
#Print odd numbers between 1 and 10 number_count = 0 while number_count < 9: should_run = True number_count = number_count + 1 if number_count%2 == 1: print(number_count)
#临床参数+基因+预测转移时间 import pandas as pd from pandas import read_csv import pandas as pd from sklearn import datasets from pandas.plotting import scatter_matrix from matplotlib import pyplot from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import train_test_split from sklearn.model_selection import KFold from sklearn.model_selection import cross_val_score from sklearn.metrics import classification_report from sklearn.metrics import confusion_matrix from sklearn.metrics import accuracy_score from sklearn.tree import DecisionTreeClassifier from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.neighbors import KNeighborsClassifier from sklearn.naive_bayes import GaussianNB from sklearn.svm import SVC, SVR from sklearn.preprocessing import LabelEncoder, StandardScaler from sklearn.linear_model import LogisticRegression, LinearRegression,ElasticNetCV from collections import Counter import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.decomposition import PCA #将两个csv文件按列合并为一个csv import pandas as pd import os import csv import numpy as np csv_1=pd.read_csv(r'gse62254_linchuangcanshu2.csv') csv_2=pd.read_csv(r'DFS_gene.csv') #删除重复列 csv_2.drop('GEO',axis = 1,inplace=True) #进行合并 data=pd.concat([csv_1,csv_2],axis=1) data.to_csv(r'combine_liver.csv',index=False) data.drop('id',axis = 1,inplace=True) #进行列名简化 colNameDict=({'geo_accession':'GEO', 'MLH1.IHC':'MLH', 'WHO.1.w.d.adeno.2.m.d.adeno.3.p.d.adeno.4.signet.ring.5..mucinous.6.papillary.adeno.7.adenosquamous.8.undifferentiated.ca.9.hepatoid.adenoca.10.tubular.adenoca.11.others..text.':'WHO', 'Perineural.Invasion':'PI', 'VENOUS.INVASION.':'VI', 'lymphatic..lymphovascular.inv.':'LI', 'documented.recurrence.':'recurr', 'peritoneal.seeding':'fm', 'intraabdominal_LN':'lbj', 'distant.lymph.node':'lbj2', 'FU.status0.无复发存活1.复发但存活..2.未复发死亡.3.复发死亡..4.死因未明的死亡..5..删失':'FU', 'H..pylori.0.No.1.Yes.blank...H.pylori.not.checked':'ym', 'DFS..months.':'DFS', 'OS..months.':'OS', 'Mol..Subtype..0.MSS.TP53...1.MSS.TP53...2...MSI..3..EMT':'Mol'}) data.rename(columns=colNameDict,inplace=True) #删除NA数量大于40%的列 data.drop('VI',axis = 1,inplace=True) data.drop('ym',axis = 1,inplace=True) #删除与预测结果无关的列 data.drop('description',axis = 1,inplace=True) ''' data['MLH'] = data['MLH'].map({'negative':1,'positive':0,'Negative':1}) data['sex'] = data['sex'].map({'M':1,'F':0}) data['Lauren'] = data['Lauren'].map({'intestinal':0,'diffuse':1,'mixed':2,'indeterminate':3}) ''' #删除复发结果未知的样本 data = data[~data['recurr'].isin([2])] #删除预测时不能参考的列 data.drop('OS',axis = 1,inplace=True) #用众数填补EBV,PI,LI的NA most_common1=data['EBV'].value_counts().index[0] data['EBV'].fillna(most_common1,inplace=True) most_common2=data['PI'].value_counts().index[0] data['PI'].fillna(most_common2,inplace=True) most_common3=data['LI'].value_counts().index[0] data['LI'].fillna(most_common3,inplace=True) #特征工程 #连续数据离散化:按照分位数对样本进行划分的,这样划分的结果是的每个区间的大小基本相同 data['age']=pd.qcut(data['age'],q=4,labels=[1,2,3,4]) #data['DFS'] = pd.qcut(data['OS'], q=4, labels=[1, 2, 3, 4]) #data['OS'] = pd.qcut(data['OS'], q=4, labels=[1, 2, 3, 4]) #改正数据的瑕疵 data['MLH']= data['MLH'].map({'positive': 1, 'Positive': 1,'positive; MSH2 mutation (+)': 1, 'negative': 0, 'Negative': 0,'partial loss':0}) #异常数据处理 data['WHO'] = data['WHO'].map({'1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9, '10': 10, '11': 11, '①2/ ②1/ ③1': 3, '①2/ ②1': 3, '2,3': 3, '2,6(20%)': 3, '3, 11(neuroendocrine differentiation)': 3, '6,2': 3, '11(lymphoepithelioma-like carcinoma)': 11, '11 Adenocarcinoma with neuroendocrine differentiation': 11, '11(composite adenoca and neuroendocrine ca)': 11}) data['T'] = data['T'].map({'1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9, '10': 10, '11': 11, '①2/ ②1/ ③1': 2, '①2/ ②1': 2, '2,3': 3, '2,6(20%)': 3, '3, 11(neuroendocrine differentiation)': 3, '6,2': 3, '11(lymphoepithelioma-like carcinoma)': 11, '11 Adenocarcinoma with neuroendocrine differentiation': 11, '11(composite adenoca and neuroendocrine ca)': 11}) #标签和特征变量转换为字符串 data['EBV'] = data['EBV'].astype(str) data['WHO'] = data['WHO'].astype(str) data['PI'] = data['PI'].astype(str) data['LI'] = data['LI'].astype(str) data['T'] = data['T'].astype(str) data['N'] = data['N'].astype(str) data['M'] = data['M'].astype(str) data['MLH'] = data['MLH'].astype(str) data['liver'] = data['liver'].astype(str) data['fm'] = data['fm'].astype(str) data['ascites'] = data['ascites'].astype(str) data['lbj'] = data['lbj'].astype(str) data['lbj2'] = data['lbj2'].astype(str) data['bone'] = data['bone'].astype(str) data['other'] = data['other'].astype(str) #使用了LabelEncoder可以将英文或特殊字符的categorical labels 转换为不同的数字 data['sex'] = data['sex'].astype(str) data['sex'] =LabelEncoder().fit_transform(data['sex']) data['pStage'] = data['pStage'].astype(str) data['pStage'] =LabelEncoder().fit_transform(data['pStage']) data['Lauren'] = data['Lauren'].astype(str) data['Lauren'] =LabelEncoder().fit_transform(data['Lauren']) data['Code_site'] = data['Code_site'].astype(str) data['Code_site'] =LabelEncoder().fit_transform(data['Code_site']) ''' #特征降维:相关度矩阵分析 data.drop('GEO', axis=1, inplace=True) # 删列 axis=0删行 res = data.corr().abs() res.to_csv("gse62254_linchuangcanshu_relation_abs.csv") #特征降维 data.drop('FU',axis = 1,inplace=True) data.drop('DFS',axis = 1,inplace=True) ''' ''' #特征降维:PCA降维 pca = PCA(n_components=22) df = pd.DataFrame(pca.fit_transform(data)) df.to_csv(r'gse62254_linchuangcanshu_PCA.csv',index=False) ''' data.to_csv(r'combine_time.csv',index=False) #统计各转移位置的数据有多少个 print(Counter(data['liver'])) print(Counter(data['fm'])) print(Counter(data['ascites'])) print(Counter(data['lbj'])) print(Counter(data['lbj2'])) print(Counter(data['bone'])) print(Counter(data['other'])) print(data.apply(lambda col:sum(col.isnull())/col.size)) data.info() #进行训练集测试集数据分离 array = data.values X1 = array[:, 1:22] # C_D为编号,与Y无相关性,过滤掉 X2 = array[:, 24:41] #采用Z-Score标准化,保证每个特征维度的数据均值为0,方差为1 # ss = StandardScaler() # X2 = ss.fit_transform(X2) X= np.hstack((X1,X2)) #X=array[:, 25:41] Y = array[:, 23] testsize = 0.3 seed = 7 X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=testsize, random_state=seed) #归一化 ss_X=StandardScaler() ss_y=StandardScaler() # X_train=ss_X.fit_transform(X_train) # X_test=ss_X.fit_transform(X_test) # y_train=ss_y.fit_transform(Y_train.reshape(-1, 1)) # y_test=ss_y.transform(Y_test.reshape(-1, 1)) l_svr = SVR(kernel='linear') l_svr.fit(X_train,Y_train) print(l_svr.score(X_test,Y_test)) n_svr = SVR(kernel="poly") n_svr.fit(X_train,Y_train) print(n_svr.score(X_test,Y_test)) r_svr = SVR(kernel="rbf") r_svr.fit(X_train,Y_train) print(r_svr.score(X_test,Y_test)) from sklearn.neighbors import KNeighborsRegressor knn = KNeighborsRegressor(weights="uniform") knn.fit(X_train,Y_train) print(knn.score(X_test,Y_test)) from sklearn.ensemble import RandomForestRegressor rfr = RandomForestRegressor() rfr.fit(X_train,Y_train) print(rfr.score(X_test,Y_test)) lr = LinearRegression() lr.fit(X_train,Y_train) print(lr.score(X_test,Y_test)) ''' #载入模型 models = {} models['LR'] = LogisticRegression() models['LDA'] = LinearDiscriminantAnalysis() models['KNN'] = KNeighborsClassifier() models['CART'] = DecisionTreeClassifier() models['NB'] = GaussianNB() models['SVM'] = SVC() #进行k折交叉验证 num_folds = 9 seed = 7 kfold = KFold(n_splits=num_folds, random_state=seed,shuffle=True) # 评估算法 results = [] for name in models: result = cross_val_score(models[name], X_train, Y_train, cv=kfold, scoring='accuracy') results.append(result) msg = '%s: %.3f (%.3f)' % (name, result.mean(), result.std()) print(msg) #输出结果的线箱图,进行均值和方差评估 fig = pyplot.figure() fig.suptitle('Algorithm Comparison') ax = fig.add_subplot(111) pyplot.boxplot(results) ax.set_xticklabels(models.keys()) pyplot.show() '''
#!/usr/bin/env python3 """ melt - a visual cat-like file viewer with syntax highlighting supporting 2-file views The syntax highlighting is done with the bat tool that you need to install first (see https://github.com/sharkdp/bat). Author: Laszlo Szathmary, alias Jabba Laci, 2019 E-mail: jabba.laci@gmail.com GitHub: https://github.com/jabbalaci/melt """ import atexit import shutil import sys import tempfile from pathlib import Path from typing import Any, Dict, List, Set, Tuple import melt.config as cfg from melt import docs, helper OFFSET = 5 # space for line numbers ##################### ## class PlainFile ## ##################### class PlainFile: """ Plain input file without any syntax highlighting. """ def __init__(self, fname: str, tmp_dir_path: str, panel: int) -> None: self.fname = fname # can be a relative or an absolute path too self.fname_only = Path(fname).name # just the name, no path to it self.tmp_dir_path = tmp_dir_path self.idx_of_too_long_lines: Set[int] = set() # this is the maximal length of a line to fit in a frame (OFFSET is for the line numbers): self.line_length_limit = helper.get_terminal_width(half=True, panel=panel) - OFFSET # # self.tmp_file_path is created later def read(self) -> None: with open(self.fname) as f: self.lines = f.readlines() # self.lines = self.clean_lines(self.lines) # for idx, line in enumerate(self.lines): if len(line) > self.line_length_limit: line = line[:self.line_length_limit-3] # truncate and leave space for the 3 dots self.lines[idx] = line self.idx_of_too_long_lines.add(idx) def clean_lines(self, lines: List[str]) -> List[str]: lines = [line.rstrip() for line in lines] while (len(lines) > 0) and (lines[-1] == ""): lines.pop() # return lines def max_width(self, offset: int = 0) -> int: return max(len(line) for line in self.lines) + offset def get_number_of_lines(self) -> int: return len(self.lines) def add_extra_lines(self, goal: int) -> None: for _ in range(goal - self.get_number_of_lines()): self.lines.append("") def save_as_tmp(self) -> None: tmp = tempfile.NamedTemporaryFile(dir=self.tmp_dir_path).name fname = f"{tmp}_{self.fname_only}" self.tmp_file_path = fname # print(self.tmp_file_path) with open(fname, "w") as f: for line in self.lines: print(line, file=f) ####################### ## class ColoredFile ## ####################### class ColoredFile: """ Syntax highlighted version of the input file. """ def __init__(self, plain_file: PlainFile, options: Dict[str, Any]) -> None: self.pf = plain_file self.light = bool(options.get("light")) # True or False # self.fname is set later def save_file(self) -> None: tmp = tempfile.NamedTemporaryFile(dir=self.pf.tmp_dir_path).name fname = f"{tmp}_{self.pf.fname_only}" self.fname = fname # cmd = "{bat} --color=always --style numbers {light} {fn}".format( bat=cfg.BAT, light=f"--theme={cfg.LIGHT_THEME}" if self.light else "", fn=self.pf.tmp_file_path ) # print("#", cmd) self.lines = helper.get_simple_cmd_output(cmd).splitlines() with open(self.fname, "w") as f: for line in self.lines: print(line, file=f) def pad_lines_on_right(self) -> None: assert len(self.pf.lines) == len(self.lines) # # width = helper.get_terminal_width(half=True) width = self.pf.line_length_limit + OFFSET # width of the panel for idx in range(len(self.pf.lines)): if idx in self.pf.idx_of_too_long_lines: self.lines[idx] += helper.THREE_RED_DOTS else: plain_line = self.pf.lines[idx] length = len(plain_line) + OFFSET spaces = " " * (width - length) self.lines[idx] += spaces ############################################################################# def merge_colored_files(cf1: ColoredFile, cf2: ColoredFile) -> None: """ Merge the colored versions of the files and place them side by side. """ assert len(cf1.lines) == len(cf2.lines) # width = helper.get_terminal_width() line = [helper.CHAR_HORIZONTAL_LINE] * width idx = helper.get_terminal_width(half=True) line[idx] = helper.CHAR_T_DOWN hr = "{c}{line}{nc}".format(c=helper.FRAME_COLOR, line="".join(line), nc=helper.NO_COLOR) sys.stdout.write(hr) # for l1, l2 in zip(cf1.lines, cf2.lines): sys.stdout.write(l1) sys.stdout.write("{c}{char}{nc}".format(c=helper.FRAME_COLOR, char=helper.CHAR_VERTICAL_LINE, nc=helper.NO_COLOR)) print(l2) # line[idx] = helper.CHAR_T_UP hr = "{c}{line}{nc}".format(c=helper.FRAME_COLOR, line="".join(line), nc=helper.NO_COLOR) print(hr) def make_equal_long(pf1: PlainFile, pf2: PlainFile) -> None: """ The two files must have the same number of lines. If one of them is shorter, then fill it up with blank lines. """ maxi = max([pf1.get_number_of_lines(), pf2.get_number_of_lines()]) pf1.add_extra_lines(maxi) pf2.add_extra_lines(maxi) def check_command_line_args(argv: List[str]) -> Tuple[str, str, Dict[str, Any]]: """ Treat the command-line arguments. """ args = argv[1:] options: Dict[str, Any] = {} if "--light" in args: options['light'] = True args.remove("--light") if (len(args) == 0) or ("-h" in args) or ("--help" in args): docs.show_help() sys.exit(0) if len(args) != 2: print("Error: provide two input files / use valid options", file=sys.stderr) sys.exit(1) # else fname1, fname2 = args return fname1, fname2, options ############################################################################## def check_if_bat_exists() -> None: """ The program requires bat for the syntax highlighting. """ if not helper.which(cfg.BAT): print("Error: bat not found.", file=sys.stderr) print("Tip: visit https://github.com/sharkdp/bat and install it.", file=sys.stderr) sys.exit(1) def main(argv: List[str]) -> None: """ Controller. """ check_if_bat_exists() # if bat is available: fname1, fname2, options = check_command_line_args(argv) tmp_dir_path = tempfile.mkdtemp() atexit.register(shutil.rmtree, tmp_dir_path) pf1 = PlainFile(fname1, tmp_dir_path, cfg.LEFT_SIDE) pf1.read() pf2 = PlainFile(fname2, tmp_dir_path, cfg.RIGHT_SIDE) pf2.read() make_equal_long(pf1, pf2) pf1.save_as_tmp() pf2.save_as_tmp() cf1 = ColoredFile(pf1, options) cf1.save_file() cf2 = ColoredFile(pf2, options) cf2.save_file() cf1.pad_lines_on_right() cf2.pad_lines_on_right() merge_colored_files(cf1, cf2) ############################################################################## if __name__ == "__main__": main(sys.argv)
# coding=utf-8 # Copyright 2020 The TF-Agents Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A REINFORCE Agent. Implements the REINFORCE algorithm from (Williams, 1992): https://www-anw.cs.umass.edu/~barto/courses/cs687/williams92simple.pdf """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections from typing import Callable, Optional, Text import gin import numpy as np import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import from tf_agents.agents import data_converter from tf_agents.agents import tf_agent from tf_agents.networks import network from tf_agents.policies import actor_policy from tf_agents.policies import greedy_policy from tf_agents.trajectories import time_step as ts from tf_agents.trajectories import trajectory as traj from tf_agents.typing import types from tf_agents.utils import common from tf_agents.utils import eager_utils from tf_agents.utils import nest_utils from tf_agents.utils import value_ops # A function `advantage(returns, value_predictions) -> advantages. AdvantageFnType = Callable[[types.Tensor, types.Tensor], types.Tensor] def _standard_normalize(values, axes=(0,)): """Standard normalizes values `values`. Args: values: Tensor with values to be standardized. axes: Axes used to compute mean and variances. Returns: Standardized values (values - mean(values[axes])) / std(values[axes]). """ values_mean, values_var = tf.nn.moments(x=values, axes=axes, keepdims=True) epsilon = np.finfo(values.dtype.as_numpy_dtype).eps normalized_values = (values - values_mean) / (tf.sqrt(values_var) + epsilon) return normalized_values def _entropy_loss(distributions, spec, weights=None): """Computes entropy loss. Args: distributions: A possibly batched tuple of distributions. spec: A nested tuple representing the action spec. weights: Optional scalar or element-wise (per-batch-entry) importance weights. Includes a mask for invalid timesteps. Returns: A Tensor representing the entropy loss. """ with tf.name_scope('entropy_regularization'): entropy = -tf.cast(common.entropy(distributions, spec), tf.float32) if weights is not None: entropy *= weights return tf.reduce_mean(input_tensor=entropy) def _get_initial_policy_state(policy, time_steps): """Gets the initial state of a policy.""" batch_size = ( tf.compat.dimension_at_index(time_steps.discount.shape, 0) or tf.shape(time_steps.discount)[0] ) return policy.get_initial_state(batch_size=batch_size) class ReinforceAgentLossInfo( collections.namedtuple( 'ReinforceAgentLossInfo', ( 'policy_gradient_loss', 'policy_network_regularization_loss', 'entropy_regularization_loss', 'value_estimation_loss', 'value_network_regularization_loss', ), ) ): """ReinforceAgentLossInfo is stored in the `extras` field of the LossInfo. All losses, except for `policy_network_regularization_loss` have a validity mask applied to ensure no loss or error is calculated for episode boundaries. policy_gradient_loss: The weighted policy_gradient loss. policy_network_regularization_loss: The regularization loss terms from the policy network used to generate the `policy_gradient_loss`. entropy_regularization_loss: The entropy regularization loss. value_estimation_loss: If value estimation network is being used, the loss associated with that network. """ pass @gin.configurable class ReinforceAgent(tf_agent.TFAgent): """A REINFORCE Agent. Implements: REINFORCE algorithm from "Simple statistical gradient-following algorithms for connectionist reinforcement learning" Williams, R.J., 1992. https://www-anw.cs.umass.edu/~barto/courses/cs687/williams92simple.pdf REINFORCE with state-value baseline, where state-values are estimated with function approximation, from "Reinforcement learning: An introduction" (Sec. 13.4) Sutton, R.S. and Barto, A.G., 2018. http://incompleteideas.net/book/the-book-2nd.html The REINFORCE agent can be optionally provided with: - value_network: A `tf_agents.network.Network` which parameterizes state-value estimation as a neural network. The network will be called with call(observation, step_type) and returns a floating point state-values tensor. - value_estimation_loss_coef: Weight on the value prediction loss. If value_network and value_estimation_loss_coef are provided, advantages are computed as `advantages = (discounted accumulated rewards) - (estimated state-values)` and the overall learning objective becomes: `(total loss) = (policy gradient loss) + value_estimation_loss_coef * (squared error of estimated state-values)` """ def __init__( self, time_step_spec: ts.TimeStep, action_spec: types.TensorSpec, actor_network: network.Network, optimizer: types.Optimizer, value_network: Optional[network.Network] = None, value_estimation_loss_coef: types.Float = 0.2, advantage_fn: Optional[AdvantageFnType] = None, use_advantage_loss: bool = True, gamma: types.Float = 1.0, normalize_returns: bool = True, gradient_clipping: Optional[types.Float] = None, debug_summaries: bool = False, summarize_grads_and_vars: bool = False, entropy_regularization: Optional[types.Float] = None, train_step_counter: Optional[tf.Variable] = None, name: Optional[Text] = None, ): """Creates a REINFORCE Agent. Args: time_step_spec: A `TimeStep` spec of the expected time_steps. action_spec: A nest of BoundedTensorSpec representing the actions. actor_network: A tf_agents.network.Network to be used by the agent. The network will be called with call(observation, step_type). optimizer: Optimizer for the actor network. value_network: (Optional) A `tf_agents.network.Network` to be used by the agent. The network will be called with call(observation, step_type) and returns a floating point value tensor. value_estimation_loss_coef: (Optional) Multiplier for value prediction loss to balance with policy gradient loss. advantage_fn: A function `A(returns, value_preds)` that takes returns and value function predictions as input and returns advantages. The default is `A(returns, value_preds) = returns - value_preds` if a value network is specified and `use_advantage_loss=True`, otherwise `A(returns, value_preds) = returns`. use_advantage_loss: Whether to use value function predictions for computing returns. `use_advantage_loss=False` is equivalent to setting `advantage_fn=lambda returns, value_preds: returns`. gamma: A discount factor for future rewards. normalize_returns: Whether to normalize returns across episodes when computing the loss. gradient_clipping: Norm length to clip gradients. debug_summaries: A bool to gather debug summaries. summarize_grads_and_vars: If True, gradient and network variable summaries will be written during training. entropy_regularization: Coefficient for entropy regularization loss term. train_step_counter: An optional counter to increment every time the train op is run. Defaults to the global_step. name: The name of this agent. All variables in this module will fall under that name. Defaults to the class name. """ tf.Module.__init__(self, name=name) actor_network.create_variables() self._actor_network = actor_network if value_network: value_network.create_variables() self._value_network = value_network collect_policy = actor_policy.ActorPolicy( time_step_spec=time_step_spec, action_spec=action_spec, actor_network=self._actor_network, clip=True, ) policy = greedy_policy.GreedyPolicy(collect_policy) self._optimizer = optimizer self._gamma = gamma self._normalize_returns = normalize_returns self._gradient_clipping = gradient_clipping self._entropy_regularization = entropy_regularization self._value_estimation_loss_coef = value_estimation_loss_coef self._baseline = self._value_network is not None self._advantage_fn = advantage_fn if self._advantage_fn is None: if use_advantage_loss and self._baseline: self._advantage_fn = lambda returns, value_preds: returns - value_preds else: self._advantage_fn = lambda returns, _: returns super(ReinforceAgent, self).__init__( time_step_spec, action_spec, policy, collect_policy, train_sequence_length=None, debug_summaries=debug_summaries, summarize_grads_and_vars=summarize_grads_and_vars, train_step_counter=train_step_counter, ) self._as_trajectory = data_converter.AsTrajectory(self.data_context) def _initialize(self): pass def _train(self, experience, weights=None): experience = self._as_trajectory(experience) # Add a mask to ensure we reset the return calculation at episode # boundaries. This is needed in cases where episodes are truncated before # reaching a terminal state. Note experience is a batch of trajectories # where reward=next_step.reward so the mask may look shifted at first. non_last_mask = tf.cast( tf.math.not_equal(experience.next_step_type, ts.StepType.LAST), tf.float32, ) discounts = non_last_mask * experience.discount * self._gamma returns = value_ops.discounted_return( experience.reward, discounts, time_major=False ) if self._debug_summaries: tf.compat.v2.summary.histogram( name='rewards', data=experience.reward, step=self.train_step_counter ) tf.compat.v2.summary.histogram( name='discounts', data=experience.discount, step=self.train_step_counter, ) tf.compat.v2.summary.histogram( name='returns', data=returns, step=self.train_step_counter ) with tf.GradientTape() as tape: loss_info = self.total_loss( experience, tf.stop_gradient(returns), weights=weights, training=True ) tf.debugging.check_numerics(loss_info.loss, 'Loss is inf or nan') variables_to_train = self._actor_network.trainable_weights if self._baseline: variables_to_train += self._value_network.trainable_weights grads = tape.gradient(loss_info.loss, variables_to_train) grads_and_vars = list(zip(grads, variables_to_train)) if self._gradient_clipping: grads_and_vars = eager_utils.clip_gradient_norms( grads_and_vars, self._gradient_clipping ) if self._summarize_grads_and_vars: eager_utils.add_variables_summaries( grads_and_vars, self.train_step_counter ) eager_utils.add_gradients_summaries( grads_and_vars, self.train_step_counter ) self._optimizer.apply_gradients(grads_and_vars) self.train_step_counter.assign_add(1) return tf.nest.map_structure(tf.identity, loss_info) def total_loss( self, experience: traj.Trajectory, returns: types.Tensor, weights: types.Tensor, training: bool = False, ) -> tf_agent.LossInfo: # Ensure we see at least one full episode. time_steps = ts.TimeStep( experience.step_type, tf.zeros_like(experience.reward), tf.zeros_like(experience.discount), experience.observation, ) is_last = experience.is_last() num_episodes = tf.reduce_sum(tf.cast(is_last, tf.float32)) tf.debugging.assert_greater( num_episodes, 0.0, message=( 'No complete episode found. REINFORCE requires full episodes ' 'to compute losses.' ), ) # Mask out partial episodes at the end of each batch of time_steps. # NOTE: We use is_last rather than is_boundary because the last transition # is the transition with the last valid reward. In other words, the # reward on the boundary transitions do not have valid rewards. Since # REINFORCE is calculating a loss w.r.t. the returns (and not bootstrapping) # keeping the boundary transitions is irrelevant. valid_mask = tf.cast(experience.is_last(), dtype=tf.float32) valid_mask = tf.math.cumsum(valid_mask, axis=1, reverse=True) valid_mask = tf.cast(valid_mask > 0, dtype=tf.float32) if weights is not None: weights *= valid_mask else: weights = valid_mask advantages = returns value_preds = None if self._baseline: value_preds, _ = self._value_network( time_steps.observation, time_steps.step_type, training=True ) if self._debug_summaries: tf.compat.v2.summary.histogram( name='value_preds', data=value_preds, step=self.train_step_counter ) advantages = self._advantage_fn(returns, value_preds) if self._debug_summaries: tf.compat.v2.summary.histogram( name='advantages', data=advantages, step=self.train_step_counter ) # TODO(b/126592060): replace with tensor normalizer. if self._normalize_returns: advantages = _standard_normalize(advantages, axes=(0, 1)) if self._debug_summaries: tf.compat.v2.summary.histogram( name='normalized_%s' % ('advantages' if self._baseline else 'returns'), data=advantages, step=self.train_step_counter, ) nest_utils.assert_same_structure(time_steps, self.time_step_spec) policy_state = _get_initial_policy_state(self.collect_policy, time_steps) actions_distribution = self.collect_policy.distribution( time_steps, policy_state=policy_state ).action policy_gradient_loss = self.policy_gradient_loss( actions_distribution, experience.action, experience.is_boundary(), advantages, num_episodes, weights, ) entropy_regularization_loss = self.entropy_regularization_loss( actions_distribution, weights ) network_regularization_loss = tf.nn.scale_regularization_loss( self._actor_network.losses ) total_loss = ( policy_gradient_loss + network_regularization_loss + entropy_regularization_loss ) losses_dict = { 'policy_gradient_loss': policy_gradient_loss, 'policy_network_regularization_loss': network_regularization_loss, 'entropy_regularization_loss': entropy_regularization_loss, 'value_estimation_loss': 0.0, 'value_network_regularization_loss': 0.0, } value_estimation_loss = None if self._baseline: value_estimation_loss = self.value_estimation_loss( value_preds, returns, num_episodes, weights ) value_network_regularization_loss = tf.nn.scale_regularization_loss( self._value_network.losses ) total_loss += value_estimation_loss + value_network_regularization_loss losses_dict['value_estimation_loss'] = value_estimation_loss losses_dict['value_network_regularization_loss'] = ( value_network_regularization_loss ) loss_info_extra = ReinforceAgentLossInfo(**losses_dict) losses_dict['total_loss'] = total_loss # Total loss not in loss_info_extra. common.summarize_scalar_dict( losses_dict, self.train_step_counter, name_scope='Losses/' ) return tf_agent.LossInfo(total_loss, loss_info_extra) def policy_gradient_loss( self, actions_distribution: types.NestedDistribution, actions: types.NestedTensor, is_boundary: types.Tensor, returns: types.Tensor, num_episodes: types.Int, weights: Optional[types.Tensor] = None, ) -> types.Tensor: """Computes the policy gradient loss. Args: actions_distribution: A possibly batched tuple of action distributions. actions: Tensor with a batch of actions. is_boundary: Tensor of booleans that indicate if the corresponding action was in a boundary trajectory and should be ignored. returns: Tensor with a return from each timestep, aligned on index. Works better when returns are normalized. num_episodes: Number of episodes contained in the training data. weights: Optional scalar or element-wise (per-batch-entry) importance weights. May include a mask for invalid timesteps. Returns: policy_gradient_loss: A tensor that will contain policy gradient loss for the on-policy experience. """ # TODO(b/126594799): Add class IndependentNested(tfd.Distribution) to handle # nests of independent distributions like this. action_log_prob = common.log_probability( actions_distribution, actions, self.action_spec ) # Filter out transitions between end state of previous episode and start # state of next episode. valid_mask = tf.cast(~is_boundary, tf.float32) action_log_prob *= valid_mask action_log_prob_times_return = action_log_prob * returns if weights is not None: action_log_prob_times_return *= weights if self._debug_summaries: tf.compat.v2.summary.histogram( name='action_log_prob', data=action_log_prob, step=self.train_step_counter, ) tf.compat.v2.summary.histogram( name='action_log_prob_times_return', data=action_log_prob_times_return, step=self.train_step_counter, ) # Policy gradient loss is defined as the sum, over timesteps, of action # log-probability times the cumulative return from that timestep onward. # For more information, see (Williams, 1992). policy_gradient_loss = -tf.reduce_sum( input_tensor=action_log_prob_times_return ) # We take the mean over episodes by dividing by num_episodes. policy_gradient_loss = policy_gradient_loss / num_episodes return policy_gradient_loss def entropy_regularization_loss( self, actions_distribution: types.NestedDistribution, weights: Optional[types.Tensor] = None, ) -> types.Tensor: """Computes the optional entropy regularization loss. Extending REINFORCE by entropy regularization was originally proposed in "Function optimization using connectionist reinforcement learning algorithms." (Williams and Peng, 1991). Args: actions_distribution: A possibly batched tuple of action distributions. weights: Optional scalar or element-wise (per-batch-entry) importance weights. May include a mask for invalid timesteps. Returns: entropy_regularization_loss: A tensor with the entropy regularization loss. """ if self._entropy_regularization: loss = _entropy_loss(actions_distribution, self.action_spec, weights) loss *= self._entropy_regularization else: loss = tf.constant(0.0, dtype=tf.float32) return loss def value_estimation_loss( self, value_preds: types.Tensor, returns: types.Tensor, num_episodes: types.Int, weights: Optional[types.Tensor] = None, ) -> types.Tensor: """Computes the value estimation loss. Args: value_preds: Per-timestep estimated values. returns: Per-timestep returns for value function to predict. num_episodes: Number of episodes contained in the training data. weights: Optional scalar or element-wise (per-batch-entry) importance weights. May include a mask for invalid timesteps. Returns: value_estimation_loss: A scalar value_estimation_loss loss. """ value_estimation_error = tf.math.squared_difference(returns, value_preds) if weights is not None: value_estimation_error *= weights value_estimation_loss = ( tf.reduce_sum(input_tensor=value_estimation_error) * self._value_estimation_loss_coef ) # We take the mean over episodes by dividing by num_episodes. value_estimation_loss = value_estimation_loss / num_episodes return value_estimation_loss
import unittest from programy.storage.stores.nosql.mongo.dao.oob import OOB class OOBTests(unittest.TestCase): def test_init_no_id(self): oob = OOB(name="test", oob_class="test.oobclass") self.assertIsNotNone(oob) self.assertIsNone(oob.id) self.assertEqual("test", oob.name) self.assertEqual("test.oobclass", oob.oob_class) self.assertEqual({'name': 'test', 'oob_class': 'test.oobclass'}, oob.to_document()) def test_init_with_id(self): oob = OOB(name="test", oob_class="test.oobclass") oob.id = '666' self.assertIsNotNone(oob) self.assertIsNotNone(oob.id) self.assertEqual('666', oob.id) self.assertEqual("test", oob.name) self.assertEqual("test.oobclass", oob.oob_class) self.assertEqual({'_id': '666', 'name': 'test', 'oob_class': 'test.oobclass'}, oob.to_document()) def test_from_document_no_id(self): oob1 = OOB.from_document({'name': 'test', 'oob_class': 'test.oobclass'}) self.assertIsNotNone(oob1) self.assertIsNone(oob1.id) self.assertEqual("test", oob1.name) self.assertEqual("test.oobclass", oob1.oob_class) def test_from_document_with_id(self): oob2 = OOB.from_document({'_id': '666', 'name': 'test', 'oob_class': 'test.oobclass'}) self.assertIsNotNone(oob2) self.assertIsNotNone(oob2.id) self.assertEqual('666', oob2.id) self.assertEqual("test", oob2.name) self.assertEqual("test.oobclass", oob2.oob_class) def test_from_document_no_id(self): oob1 = OOB.from_document({'name': 'test', 'oob_class': 'test.oobclass'}) self.assertEquals("<OOB(id='n/a', name='test', oob_class='test.oobclass')>", str(oob1)) def test_from_document_with_id(self): oob2 = OOB.from_document({'_id': '666', 'name': 'test', 'oob_class': 'test.oobclass'}) self.assertEquals("<OOB(id='666', name='test', oob_class='test.oobclass')>", str(oob2))
from django.contrib.auth.hashers import check_password from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText from django.conf import settings import smtplib def mailer(reciever,token): mail = smtplib.SMTP('smtp.gmail.com',587) mail.starttls() mail.ehlo() # change this mail.login(settings.MAIL_ID,settings.MAIL_PASS) message=MIMEMultipart('alternative') sender = settings.MAIL_ID message['From'] = "Hostel Managment System" message['To'] = reciever message['Subject']="Password Reset Request" mess = "Hello User, \n Looks like you have had some trouble in logging with us. No worries, this email has been generated to help you just with that. \n Click on the link below to reset your password. \n " +str(settings.WEB_URL)+"reset/"+str(token)+ "\n\n If that doesnt works try copying and pasting this in your browser while being connected to college WiFi:- \n "+str(settings.WEB_URL)+"reset/" + str(token) # for sending through file system read message.attach(MIMEText(mess,'plain')) mail.sendmail(sender,reciever, message.as_string()) mail.quit()
from datetime import datetime, timezone from django.contrib.auth import user_logged_in from django.db import models from django.conf import settings from django.contrib.auth.models import User, Group from django.db.models.signals import post_save from django.dispatch import receiver class Player(models.Model): user = models.OneToOneField(User, on_delete=models.DO_NOTHING, null=True) active_group = models.ForeignKey(Group, on_delete=models.DO_NOTHING, null=True) def current_score(self, group): try: return ScoreTransaction.objects.filter(player=self).filter(match__group=group).latest('match__played_at').score except: return 100 def wins_from(self, player, match): s = player.current_score(group=match.group) * 0.25 self.add_score(s, match) player.add_score(-s, match) def add_score(self, score, match): ScoreTransaction.objects.create(score=self.current_score(group=match.group) + score, player=self, match=match) @receiver(post_save, sender=User) def create_player(sender, instance, created, **kwargs): if created: Player.objects.create(user=instance) @receiver(post_save, sender=User) def save_player(sender, instance, **kwargs): instance.player.save() @receiver(user_logged_in, sender=User) def set_active_group(sender, user, **kwargs): if (not user.player.active_group) & user.groups.exists(): user.player.active_group = user.groups.last() def __str__(self): return self.user.username class Match(models.Model): challenger = models.ForeignKey(Player, on_delete=models.DO_NOTHING, related_name='challenger') opponent = models.ForeignKey(Player, on_delete=models.DO_NOTHING, related_name='opponent') challenger_wins = models.BooleanField(null=True, blank=True) date = models.DateTimeField() group = models.ForeignKey(Group, on_delete=models.CASCADE) played_at = models.DateTimeField(null=True, blank=True) cancelled_at = models.DateTimeField(null=True, blank=True) def submit_result(self, challenger_wins): if self.played_at: return self.challenger_wins = challenger_wins self.played_at = datetime.now(tz=timezone.utc) if challenger_wins: self.challenger.wins_from(self.opponent, self) else: self.opponent.wins_from(self.challenger, self) self.save() def __str__(self): return "{challenger} VS {opponent} ({date})".format(challenger=self.challenger.user.username, opponent=self.opponent.user.username, date=self.date) class ScoreTransaction(models.Model): # Last score will be its actual score. Earlier scores are stored for efficient statistics player = models.ForeignKey(Player, on_delete=models.DO_NOTHING) score = models.IntegerField(default=100) match = models.ForeignKey(Match, on_delete=models.CASCADE, null=True) # group = models.ForeignKey(Group, on_delete=models.CASCADE) def __str__(self): return f"{self.player.user.username}: {self.score} ({self.match.group})" class JoinCode(models.Model): group = models.ForeignKey(Group, on_delete=models.CASCADE) key = models.TextField() expires_at = models.DateTimeField()
from collections import namedtuple SqueakProfile = namedtuple( "SqueakProfile", "profile_id, profile_name, private_key, address, sharing, following, whitelisted", )
import queue import threading import urllib.request def getUrl(q, url): print('getUrl(' + url + ') called from a thead.') q.put(urllib.request.urlopen(url).read()) theurls = ["http://google.com", "http://google.de", "http://google.ca"] threadQueue = queue.Queue() for u in theurls: t = threading.Thread(target=getUrl, args=(threadQueue, u)) t.daemon = True t.start() output = threadQueue.get() # print(output)
import urllib2 import csv from bs4 import BeautifulSoup def get_seasons( number_of_seasons ): i = 1 all_seasons = [] while i < number_of_seasons + 1: all_seasons.append(scrape_page(i)) i += 1 write_seasons(all_seasons) def scrape_page( season_num ): imdb_page = 'https://www.imdb.com/title/tt0096697/episodes?season=%s' % season_num request = urllib2.Request(imdb_page, headers={'User-Agent': 'your user-agent'}) page = urllib2.urlopen(request) soup = BeautifulSoup(page, 'html.parser') episode_list = soup.find('div', attrs={ "class" : "eplist" }) episodes = episode_list.find_all('div', attrs={ "class" : "list_item"}, recursive=False) season_episodes = [] episode_num = 1 for episode in episodes: title = encode_text(episode.find('a', attrs={'itemprop':'name'})) airdate = encode_text(episode.find('div', attrs={'class':'airdate'})) description = encode_text(episode.find('div', attrs={'class':"item_description"})) image_url = format_image_url(episode.find('img')['src']) season_episodes.append([season_num, episode_num, title, airdate, description, image_url]) episode_num += 1 return season_episodes def write_seasons( seasons ): file = open('simpsons_data.csv', 'wb') writer = csv.writer(file) writer.writerow(['Season', 'Episode', 'Title', 'Airdate', 'Description', 'Image']) for episodes in seasons: for episode in episodes: writer.writerow(episode) file.close() def encode_text( txt ): text = txt.text.strip() if "," in text: text = '\comma'.join(text.split(",")) if "\"" in text: text = ''.join(text.split('\"')) return text.encode('utf-8') def format_image_url( url ): return (url.split("_V1_")[0] + "_V1_.jpg").encode('utf-8') get_seasons(30)
def main(): min_val = 1 max_val = 1000 int_list = list(range(min_val, max_val+1,1)) sum = 0 for num in int_list: val = num ** num sum += val print('The sum is:', sum) digits = [int(x) for x in str(sum)] i = -1 last_ten= [] while i >= -11: digit = digits[i] last_ten.append(digit) i = i-1 print('The last 10 digits are:', last_ten) print('actually 10 digits?', len(last_ten)) main()
from PythonFiles import constants as cs, initialization as init ######################################################################################## # FUNCTIONS USED DURING GAME LOOP UPDATES ######################################################################################## def gradual_status_bar_fluctuation(status_bar_name): """ Apply value change factors of current bar for each of their time intervals """ status_bars = init.game_state.status_bars if status_bars[status_bar_name].fluctuation is not None: item = status_bars[status_bar_name].fluctuation # Fluctuation supposed to happen based on passed game time fluctuation_counter = init.game_state.game_time // item[1] if abs(fluctuation_counter - item[2]) > 0: if status_bar_name == "Body Heat": pass if not int(status_bars[status_bar_name].current_value - abs(fluctuation_counter - item[2]) * item[0]) < (-status_bars[status_bar_name].max_value / 10): status_bars[status_bar_name].current_value = min(status_bars[status_bar_name].current_value - abs(fluctuation_counter - item[2]) * item[0], status_bars[status_bar_name].max_value) #status_bars[status_bar_name].current_value = status_bars[status_bar_name].current_value - abs(fluctuation_counter - item[2]) * item[0] if (-status_bars[status_bar_name].max_value / 7) < status_bars[status_bar_name].current_value < (-status_bars[status_bar_name].max_value / 15): if status_bar_name != "Calories": init.game_state.game_over = "Lost" # Last fluctuation that happened based on passed game time item[2] = fluctuation_counter def immediate_status_bar_decay(status_bar_name, damage): """ Take immediate decay based on damage of a certain action""" status_bars = init.game_state.status_bars status_bars[status_bar_name].current_value -= damage if status_bars[status_bar_name].current_value - damage < (-status_bars[status_bar_name].max_value / 20): if status_bar_name != "Calories": init.game_state.game_over = "Lost" def immediate_status_bar_increase(status_bar_name, increase): """ Immediate increase in value based on certain action """ status_bars = init.game_state.status_bars if min(status_bars[status_bar_name].max_value, status_bars[status_bar_name].current_value + increase) == status_bars[status_bar_name].max_value: status_bars[status_bar_name].current_value = status_bars[status_bar_name].max_value elif status_bars[status_bar_name].current_value <= 0: status_bars[status_bar_name].current_value = int(increase) else: status_bars[status_bar_name].current_value = int(increase + status_bars[status_bar_name].current_value) def get_heat_fluctuation_code(): """ Get fluctuation code """ # Get the heat factor data to obtain the fluctuation code fluctuation_code = "" # Check the factor booleans for name in cs.heat_factor_names: # Get the complete boolean variable names complete_name = "init.game_state." + name # Add an "n" in front of the current factor character if False if not eval(complete_name): fluctuation_code += "n" # Get the current factor character fluctuation_code += name.lower()[0] return fluctuation_code ######################################################################################## # GAME LOOP UPDATING FUNCTIONS ######################################################################################## def update_status_bars(): """ Update status bars value each frame """ for status_bar_name in init.game_state.status_bars.keys(): gradual_status_bar_fluctuation(status_bar_name) def update_heat_fluctuation_factor(fluctuation_code): """ Update heat fluctuation factor based on location, day time, fire, weather """ # Modify the Body Heat fluctuation factor if the environmental factors change if fluctuation_code != init.game_state.current_heat_factor_code: # Save the current environmental factor code init.game_state.current_heat_factor_code = fluctuation_code # If the status bar should stay stagnant, delete fluctuation factor if cs.heat_factor_fluctuation[fluctuation_code] == 0.0: init.game_state.status_bars["Body Heat"].remove_fluctuation_factor() else: # Time interval between two concurrent fluctuations with the factor value value = (cs.heat_factor_fluctuation[fluctuation_code] / 60) * (-1.0) # Modify the fluctuation factor init.game_state.status_bars["Body Heat"].add_fluctuation_factor(value, 1, init.game_state.game_time)
import tkinter as tk import socket import time import re HOST = '192.168.71.128' PORT = 29999 command = [ 'robotmode', 'get serial number', 'get loaded program', 'programState' ] # Connect to port 29999 (dashboard) def dashboard_connection(): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.settimeout(2.0) try: s.connect((HOST, PORT)) return s except socket.error as e: s.close() prog.config(text=f'[ERROR] - {e}') return None # Send command via dashboard and receive data def send_string(s, cmd): cmd = cmd + '\n' s.sendall(cmd.encode()) time.sleep(0.08) rcvd = s.recv(2048).decode().split() return rcvd # Send list of commands and receive a list of data def receive_data_list(): data = [] s = dashboard_connection() if s != None: for i in range(4): data.append(send_string(s, command[i])) else: data.append('Robot is powered off') return data # Parse received data into readable view def parse_received_data_list(): data = receive_data_list() if len(data) == 1: data = 'Robot is powered off' else: data[0] = data[0][-1] data[1] = data[1][0] if data[2][0] == 'No': data[2] = ' '.join(data[2]) else: data[2] = re.findall(r'<?\w+>?.urp', data[2][-1])[0] data[3] = data[3][0] return data # Update info in the main tk window def tk_show_robot_data(): data = parse_received_data_list() if type(data) is str: sn.config(text=data) timeout = 2000 else: mode.config(text=data[0]) sn.config(text=data[1]) prog.config(text=data[2]) state.config(text=data[3]) timeout = 500 window.after(timeout, tk_show_robot_data) center_the_window(window) # Layout main window in the center of the screen def center_the_window(root): root.update_idletasks() sizes = root.geometry().split('+') window_w = int(sizes[0].split('x')[0]) window_h = int(sizes[0].split('x')[1]) w = root.winfo_screenwidth() // 2 h = root.winfo_screenheight() // 2 w -= window_w // 2 h -= window_h // 2 root.geometry(f'+{w}+{h}') if __name__ == "__main__": # Create main window window = tk.Tk() window.title('Robot monitoring') window.resizable(False, False) # Create new window with log data def log_window(): log = tk.Toplevel() log.title('Log') log.geometry('650x400') log.resizable(False, False) header_lbl = tk.Label(log, text='Robot log') text_box = tk.Text(log, height=log.winfo_screenmmheight() - 265, width=log.winfo_screenmmwidth()) exit_btn = tk.Button( log, text='Exit', command=log.destroy, relief='groove', activebackground='lightgray' ) # Insert new log data into window def read_data(): with open('tmp.log', 'r') as l: data = l.readlines() text_box.delete(1.0, tk.END) text_box.insert(tk.END, ''.join(data)) log.after(1000, read_data) header_lbl.pack() text_box.pack() exit_btn.pack(side='bottom') log.after(1000, read_data) center_the_window(log) log.mainloop() mode = tk.Label(text='') sn = tk.Label(text='Loading...') prog = tk.Label(text='') state = tk.Label(text='') log_btn = tk.Button( window, text='Log', command=log_window, relief='groove', activebackground='lightgray' ) quit_btn = tk.Button( window, text='Exit', command=window.destroy, relief='groove' ) mode.pack() sn.pack() prog.pack() state.pack() log_btn.pack(side='left', expand=1, fill='x') quit_btn.pack(side='right', expand=1, fill='x') window.after(500, tk_show_robot_data) center_the_window(window) window.mainloop()
import time from selenium import webdriver # webdriver from Chrome PATH = "C:\Program Files (x86)\chromedriver.exe" chrome = webdriver.Chrome(PATH) # Link to the page PAGE = "https://www.bestbuy.com/site/madden-nfl-21-playstation-4-playstation-5/6407594.p?skuId=6407594" chrome.get(PAGE) purchaseButton = False while not purchaseButton: try: addToCart = addButton = chrome.find_element_by_class_name("btn-prim") print("Button is not ready at this time.") time.sleep(1) chrome.refresh() except: addToCart = addButton = chrome.find_element_by_class_name("fulfillment-add-to-cart-button") print("Button was clicked.") addToCart.click() purchaseButton = True goToCart = addToCartButton = chrome.find_element_by_class_name("btn btn-secondary btn-sm btn-block ") goToCart.click() print("Go to Cart was clicked")
''' Created on Jan 29, 2015 @author: Jerry ''' def repeat(word, number): """ return the word with the first part of it repeated if it is a valid part to repeat. The part to be repeated must be repeated "number" times. If the first letter is not a vowel, and the third letter is a vowel, then the part to repeat is the first three letters. If the first and third letters are not vowels, but the second letter is, then the part to repeat is the first two letters. Otherwise there is nothing to repeat. Only the first letter of the returned word may be a capital letter, if the original word started with a capital letter. """ if len(word)== 1: return word if isVowel(word[0]): return word if len(word)== 2: if isVowel (word[1]): return word +word.lower() * (number-1) else: return word if len(word)== 3: if isVowel(word[2]): return word +word.lower() * (number-1) if isVowel(word[3]): return word + word.lower() * (number - 1) if len(word) > 3: if isVowel(word[2]): return word[:3] + word[:3].lower() * (number - 1) + word[3:] if isVowel(word[1]): return word[:2] + word[:2].lower() * (number - 1) + word[2:] return word def isVowel(ch): ch = ch.lower() if ch == "a": return True if ch == "e": return True if ch == "i": return True if ch == "o": return True if ch == "u": return True return False
# Generated by Django 2.2.4 on 2019-11-27 01:00 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('blogs', '0024_user_otp_secret'), ] operations = [ migrations.AlterField( model_name='post', name='content', field=models.CharField(blank=True, max_length=300), ), ]
class Solution: def numIslands(self, grid: List[List[str]]) -> int: if not grid: return 0 land = set() res = 0 def dfs(r, c): if (r, c) in land: land.remove((r, c)) dfs(r-1, c) dfs(r+1, c) dfs(r, c+1) dfs(r, c-1) for i in range(len(grid)): for j in range(len(grid[0])): if grid[i][j] == "1": land.add((i, j)) while land: l = land.pop() land.add(l) dfs(l[0], l[1]) res += 1 return res
import heapq from queue import Queue def get_int(): return int(input()) def get_line(): return input().strip() def get_ints(): return [ int(i) for i in input().split() ] class NumberStream: def __init__(self, num_list): self.num_list = num_list self.index = 0 def is_end(self): return self.index == len(self.num_list) def top(self): return self.num_list[self.index] def merge_it(l, r): result = [0] * (len(l)+len(r)-1) for i, x in enumerate(l): for j, y in enumerate(r): result[i+j] = max(result[i+j], x+y) return result def accumul(nums): result = [0] for num in nums: result.append(result[-1]+num) return result def do_one_step(): N, K, P = get_ints() q = Queue() for i in range(N): q.put(accumul(get_ints())) while q.qsize() > 1: q.put(merge_it(q.get(), q.get())[:P+1]) result = q.get() return result[P] def main(): n = get_int() for i in range(1, n+1): print("Case #%s: %s" % (i, do_one_step())) if __name__ == "__main__": main()