text stringlengths 38 1.54M |
|---|
from django.apps import AppConfig
class SupplementaryContentConfig(AppConfig):
name = "regcore.supplementary_content"
verbose_name = "Supplementary content for regulations"
|
m1=int(input())
m2=list(map(int,input().split()))
print(m2.index(min(m2))+1,end=" ")
print(m2.index(max(m2))+1)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
def night_at_the_museum(s):
total = 0
last = 'a'
for c in s:
dist = abs(ord(last) - ord(c))
total += min(dist, 26 - dist)
last = c
return total
s = input()
ans = night_at_the_museum(s)
print(ans)
|
import os
import argparse
import sys
import json
import sqlite3
# TODO configure
def configure_query(delimiter=",",type="str", **kwargs) -> str:
pass
def get_config() -> dict:
print("Loading settings.json file")
path = f"{get_folder()}/settings.json"
if not os.path.isfile(path):
print("File not exists! Use setup")
return {}
try:
file = open(path, "r")
except PermissionError:
raise PermissionError("Change permissions to the folder")
try:
result = json.loads(file.read())
file.close()
return {"result": result, "path": path}
except json.JSONDecodeError as e:
print(e)
file.close()
return {}
def get_folder() -> str:
return os.path.dirname(os.path.abspath(__file__))
# TODO refactor set_proxy and set_token func
def set_proxy(proxy: str) -> bool:
"""Sets proxy in settings.json
:param: proxy -- proxy link
"""
resp = get_config()
if not resp:
return False
data = resp["result"]
path = resp["path"]
data["proxy"] = proxy
with open(path, "w") as file:
json.dump(data, file, sort_keys=True, indent="")
return True
def set_token(token):
"""Sets token in settings.json
:param: token -- bot token
"""
resp = get_config()
if not resp:
return False
data = resp["result"]
path = resp["path"]
data["token"] = token
with open(path, "w") as file:
json.dump(data, file, sort_keys=True, indent="")
return True
def add_user_to_db(username, password, status) -> bool:
dbname = get_config()["result"]["database"]
connection = connect_db(dbname)
cursor = connection.cursor()
cursor.execute("INSERT INTO users(username,key,status) VALUES(?,?,?)", (username, password, status,))
connection.commit()
cursor.execute("SELECT * FROM users")
print(cursor.fetchall())
return True
def remove_user_from_bd(username = None, id=None) -> bool:
"""Removes user from db
:param username: -- name of the user
:param id: -- id of the user
:return: True/False
"""
query = "DELETE FROM users WHERE"
if username and id:
query += f" username='{username}' AND id={id}"
elif username:
query += f" username='{username}'"
elif id:
query += f" id={id}"
else:
print("Need args")
return False
print(query)
dbname = get_config()["result"]["database"]
connection = connect_db(dbname)
cursor = connection.cursor()
cursor.execute(query)
connection.commit()
cursor.execute("SELECT * FROM users")
print(cursor.fetchall())
return True
def change_user_settings(**keys) -> bool:
"""Changes user info
:param username: - user login
:param key: -- user password
:param status: -- 0 or 1 | user or admin
:return:
"""
query = "UPDATE users SET "
for i, values in enumerate(keys):
if i == 0 and keys[values] is not None:
query += f"{values}='{keys[values]}'"
print("here")
continue
elif i != 0:
query += f","
if values == "status":
if int(keys[values]) != 1 or int(keys[values]) != 0:
return False
else:
query += f"{values}={keys[values]}"
elif keys[values]:
query += f"{values}='{keys[values]}'"
print("ti kek realna")
print(query)
# dbname = get_config()["result"]["database"]
# connection = connect_db(dbname)
# cursor = connection.cursor()
# cursor.execute(query)
# connection.commit()
# cursor.execute("SELECT * FROM users")
# print(cursor.fetchall())
return True
def connect_db(dbname) -> sqlite3.connect:
"""Connection to Database function
:param dbname: -- database name
"""
path = f"{get_folder()}/{dbname}.db"
try:
connect = sqlite3.connect(path)
return connect
except sqlite3.Error:
return False
def drop_db(dbname: str) -> bool:
"""Removes DataBase
:param dbname: -- name of the database
"""
path = f"{get_folder()}/{dbname}.db"
if os.path.isfile(path):
os.remove(path)
return True
else:
print("No db file")
return False
def create_db(dbname) -> bool:
"""Creates DataBase with table users:
id | username | key | status
:param dbname: -- database name
"""
path = f"{get_folder()}/{dbname}.db"
try:
connection = sqlite3.connect(path)
print("Database created")
cursor = connection.cursor()
cursor.execute("""CREATE TABLE users(id integer PRIMARY KEY, username text, key text, status integer)""")
connection.commit()
connection.close()
return True
except sqlite3.Error:
return False
def setup_handler() -> bool:
"""Creates new json settings file"""
print("Creating settings.json file")
path = f"{get_folder()}/settings.json"
try:
file = open(path, "w+")
except PermissionError:
raise PermissionError("Change permissions to the folder")
token = input("Input your token: ")
proxy = input("Input proxy(or press Enter): ")
database = input("Input database name: ")
result = {}
if len(token) > 10:
result["token"] = token
if proxy.strip():
result["proxy"] = proxy.strip()
if database.strip():
result["database"] = database.strip()
create_db(result["database"])
else:
return False
json.dump(result, file)
return True
def create_parser() -> None:
parser = argparse.ArgumentParser(description="Manage bot settings", prog="Bot manage")
subparsers = parser.add_subparsers(help="Manage commands")
"""
Create settings
"""
create_set = subparsers.add_parser("create-settings")
create_set.set_defaults(createsettings=setup_handler)
# create_set.add_argument("--force", action="store_const", const=setup_handler)
"""
Proxy commands
"""
proxy = dict()
# Set command
proxy["set"] = subparsers.add_parser("set_proxy")
proxy["set"].add_argument("proxy", action="store", help="Set proxy to bot", type=set_proxy)
# Remove command
proxy["remove"] = subparsers.add_parser("remove_proxy")
proxy["remove"].add_argument("proxy", action="store", help="Removes proxy value from settings")
"""
Token commands
"""
token = dict()
# Set command
token["set"] = subparsers.add_parser("set_token")
token["set"].add_argument("token", action="store", help="Set token of the bot", type=set_token)
"""
Database commands
"""
database = dict()
# Remove DB
database["drop"] = subparsers.add_parser("dropdb")
database["drop"].add_argument("dbname", action="store", help="DB name", type=drop_db)
# Create DB
database["create"] = subparsers.add_parser("createdb")
database["create"].add_argument("dbname", action="store", help="DB name", type=create_db)
"""
Users
"""
users = dict()
# Add user
users["add"] = subparsers.add_parser("add_user")
users["add"].add_argument("username")
users["add"].add_argument("password")
users["add"].add_argument("status")
users["add"].set_defaults(add_user=add_user_to_db)
# Remove user
users["remove"] = subparsers.add_parser("remove_user")
users["remove"].add_argument("--username", dest="removeuname")
users["remove"].add_argument("--id", dest="removeid")
users["remove"].set_defaults(remove_user=True)
# Change user info
users["change"] = subparsers.add_parser("change_user")
users["change"].add_argument("--username", dest="change_name")
users["change"].add_argument("--password", dest="change_pass")
users["change"].add_argument("--status", dest="change_status")
users["change"].set_defaults(change_user=True)
args = parser.parse_args()
# TODO don't pass function to the args or call it from args
if "createsettings" in args:
setup_handler()
elif "add_user" in args:
add_user_to_db(args.username, args.password, args.status)
elif "remove_user" in args:
print(args)
remove_user_from_bd(username=args.removeuname, id=args.removeid)
elif "change_user" in args:
change_user_settings(username=args.change_name, password=args.change_pass, status=args.change_status)
if __name__ == "__main__":
create_parser()
|
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import pickle
import glob
from collections import defaultdict
from analysis import *
def load_metrics(path):
"""
This function loads metrics
"""
# load the data
seed_folders = glob.glob(f"{path}/*")
# save results
generalize_result = {}
train_result = {}
# run through all seed
for s in seed_folders:
# get seed index
seed = s.split("/")[-1]
# make sure to ignore the rsa analysis for now
if seed == 'rsa_analysis.pkl':
continue
# get all metric files
metric_files = glob.glob(s + "/*.pkl")
for file in metric_files:
# load files
m1 = pickle.load(open(file, "rb"))
# check if file is generalize or train metric
if file.find('generalize') == -1:
if file.find('10000') != -1:
train_result[seed] = m1
else:
generalize_result[seed] = m1
return train_result, generalize_result
def show_messages(path, metrics, show_results=True):
"""
This function shows the messages for different seeds of a certain experiment.
:param path: decides for which files the messages must be printed
:param metrics: checkpoints to show the message
:param show_results: boolean to decide whether or not to print results
:return:
"""
# get the files to be analyzed
metric_files = glob.glob(f"{path}/*/*.pkl")
seed_folders = glob.glob(f"{path}/*")
# load one file to find the amount of samples in the data
m1 = pickle.load(open(metric_files[0], "rb"))
nindex = len(m1['messages'])
# generate 10 samples to be checked
indices = np.random.choice(nindex, 10, replace=False)
result = {}
# run through all seed
for s in seed_folders:
# get seed index
seed = s.split("/")[-1]
if seed == 'rsa_analysis.pkl':
continue
result[seed] = defaultdict(list)
# run through selected metric iterations
for metric in metrics:
# combine file path
file_path = s + "/metrics_at_{}.pkl".format(metric)
# load files
m1 = pickle.load(open(file_path, "rb"))
# extract selected messages
messages = m1['messages'][indices]
targets = m1['targets'][indices]
# use targets as key to save messagess
for i in range(len(messages)):
result[seed][str(targets[i])].append(messages[i])
# show the results if requested
if show_results:
# pretty print the results
for tar, mess in result['1'].items():
print('Messages for target:')
print(tar)
print()
for i in range(len(mess)):
if i == 0:
print('iteration | ', end='')
for seed in result.keys():
print("{:<13}".format(' seed ' + seed + ''), end="")
print(' | ', end='')
print()
print("{:<9}".format(str(metrics[i])), end='')
print(' | ', end="")
for seed in result.keys():
print(result[seed][tar][i], end='')
print(' | ', end='')
print()
print()
return
def unique_messages(path, metrics, show_results=True):
"""
This function counts the amount of unique messages for a certain experiment
:param path: the path to the files that need to be checked
:param metrics: iterations at which to show results
:param show_results: boolean to decide whether or not to print results
:return: nr of unique messages
"""
# load the data
seed_folders = glob.glob(f"{path}/*")
result = {}
# run through all seed
for s in seed_folders:
# get seed index
seed = s.split("/")[-1]
if seed == 'rsa_analysis.pkl':
continue
result[seed] = {}
# run through selected metric iterations
for metric in metrics:
# combine file path
file_path = s + "/metrics_at_{}.pkl".format(metric)
# load files
m1 = pickle.load(open(file_path, "rb"))
# loop through messages to gather unique ones
um = set()
for message in m1['messages']:
um.add(tuple(message))
# append result
result[seed][metric] = len(um)
if show_results:
# show the amount of unique message
for seed, metric in result.items():
print('Showing results for seed ' + str(seed))
print('Iteration: \t | \t Unique messages:')
for i, count in metric.items():
if i < 1000:
print('\t' + str(i) + '\t\t\t\t\t' + str(count))
else:
print('\t' + str(i) + '\t\t\t\t' + str(count))
print()
return result
def unique_tokens(path, iterations, show_results=True):
"""
This function counts the amount of unique messages for a certain experiment
:param path: the path to the files that need to be checked
:param iterations: iterations at which to show results
:param show_results: boolean to decide whether or not to print results
:return: nr of unique messages
"""
# load the data
seed_folders = glob.glob(f"{path}/*")
result = {}
result_tokens = {}
# run through all seed
for s in seed_folders:
# get seed index
seed = s.split("/")[-1]
if seed == 'rsa_analysis.pkl':
continue
result[seed] = {}
all_tokens =set()
# run through selected metric iterations
for it in iterations:
# combine file path
file_path = s + "/metrics_at_{}.pkl".format(it)
# load files
m1 = pickle.load(open(file_path, "rb"))
# loop through messages to gather unique tokens
tokens = set()
for message in m1['messages']:
for token in message:
tokens.add(token)
all_tokens.add(token)
# append result
result[seed][it] = len(tokens)
result_tokens[seed] = all_tokens
if show_results:
# show the amount of unique message
for seed, metric in result.items():
print('Showing results for seed ' + str(seed))
print('Iteration: \t | \t Unique tokens:')
for i, count in metric.items():
if i < 1000:
print('\t' + str(i) + '\t\t\t\t\t' + str(count))
else:
print('\t' + str(i) + '\t\t\t\t' + str(count))
print()
return result
|
import unittest
from configchecker import ConfigChecker
import os
import logging
import sys
from io import StringIO
logging.disable(logging.CRITICAL)
good_config = \
"[FirstSection]\n\
key_integer = 45 \n\
key_boolean = yes \n\
key_float = 23.2\n\
key_string = I am a string\n\
\n\
[SecondSection] \n\
User = hg \n\
\n\
[ThirdSection]\n\
Port = 50022 \n\
ForwardX11 = no\n"
bad_config = "[asdff} asdfas[sd"
expectedOutput ="Configuration Values\n\
\n\
Section: FirstSection\n\
Key key_integer\n\
Data Type: <class 'int'>\n\
Value: None\n\
Default Value: 123\n\
"
class ExpectionTests(unittest.TestCase):
def setUp(self):
self.checker = ConfigChecker()
def test_adding_item_increases_length(self):
self.checker.set_expectation("TestSection","TestKey",bool,False);
added = self.checker.set_expectation("TestSection_second","TestKey",bool,True);
self.assertIs(len(self.checker.get_expectations()),2,"Length of expectation list didn't increase when item was added.")
self.assertIs(added,True)
def test_adding_duplicate_expectation_dont_duplicate(self):
self.checker.set_expectation("TestSection","testKey",str,"TestDefault");
added = self.checker.set_expectation("TestSection","testkey",str,"TestDefault");
self.assertIs(len(self.checker.get_expectations()),1,"Length of expectation list didn't increase when item was added.")
self.assertIs(added,False)
def test_adding_duplicate_key_different_section_is_ok(self):
self.checker.set_expectation("TestSection","testKey",int,123);
added = self.checker.set_expectation("TestSection_test","testkey",int,1234);
self.assertIs(len(self.checker.get_expectations()),2,"Length of expectation list didn't increase when item was added.")
self.assertIs(added,True)
def test_expectation_added_to_end_of_list_correctly_no_message(self):
self.checker.set_expectation("TestSection","testkey",int,34);
addedSection = self.checker.get_expectations()[len(self.checker.get_expectations()) - 1]['section']
addedKey = self.checker.get_expectations()[len(self.checker.get_expectations()) - 1]['key']
addedType = self.checker.get_expectations()[len(self.checker.get_expectations()) - 1]['data_type']
addedDefault = self.checker.get_expectations()[len(self.checker.get_expectations()) - 1]['default']
addedMessage = self.checker.get_expectations()[len(self.checker.get_expectations() ) - 1]['message']
self.assertEqual(addedSection,'TestSection',"Added section doesn't match last section in list")
self.assertEqual(addedKey,'testkey',"Added key doesn't match last key in list")
self.assertEqual(addedType,int,"Added type doesn't match last type in list")
self.assertEqual(addedDefault,34,"Added default doesn't match last default in list")
self.assertEqual(addedMessage,None,"Added message doesn't match last message in list")
def test_expectation_added_to_end_of_list_correctly_with_message(self):
self.checker.set_expectation("TestSection","testkey",str,"TestDefault","TestMessage");
addedSection = self.checker.get_expectations()[len(self.checker.get_expectations()) - 1]['section']
addedKey = self.checker.get_expectations()[len(self.checker.get_expectations()) - 1]['key']
addedType = self.checker.get_expectations()[len(self.checker.get_expectations()) - 1]['data_type']
addedDefault = self.checker.get_expectations()[len(self.checker.get_expectations()) - 1]['default']
addedMessage = self.checker.get_expectations()[len(self.checker.get_expectations() ) - 1]['message']
self.assertEqual(addedSection,'TestSection',"Added section doesn't match last section in list")
self.assertEqual(addedKey,'testkey',"Added key doesn't match last key in list")
self.assertEqual(addedType,str,"Added type doesn't match last type in list")
self.assertEqual(addedDefault,'TestDefault',"Added default doesn't match last default in list")
self.assertEqual(addedMessage,'TestMessage',"Added message doesn't match last message in list")
def test_removeing_expectation_which_matches_section_and_key_reduces_list_number(self):
self.checker.set_expectation("TestSection","testkey_2",bool,False,"TestMessage")
self.checker.set_expectation("TestSection","testkey",int,23498,"TestMessage")
removed = self.checker.remove_expectation("TestSection",'testkey')
entryExists,position = self.checker.expectation_exists_at_index("TestSection","testkey_2")
self.assertIs(len(self.checker.get_expectations()),1,"Matching expectation wasn't removed from list.")
self.assertIs(position,0)
self.assertIs(entryExists,True)
self.assertIs(removed,True)
def test_removeing_expectation_which_doesnt_match_section_and_key_returns_false(self):
self.checker.set_expectation("TestSection","testkey_2",bool,False,"TestMessage")
self.checker.set_expectation("TestSection","testkey",int,23498,"TestMessage")
removed = self.checker.remove_expectation("badSecion",'testkey')
entryExists,position = self.checker.expectation_exists_at_index("TestSection","testkey_2")
self.assertIs(entryExists,True)
entryExists,position = self.checker.expectation_exists_at_index("TestSection","testkey")
self.assertIs(entryExists,True)
self.assertIs(len(self.checker.get_expectations()),2)
self.assertIs(removed,False)
def test_adding_expectation_with_wrong_default_type_returns_false_integer(self):
added = self.checker.set_expectation("FirstSection","key_integer",int,'asdfasd',"TestMessage")
self.assertIs(added,False);
self.assertIs(len(self.checker.get_expectations()),0)
def test_adding_expectation_with_wrong_default_type_returns_false_float(self):
added = self.checker.set_expectation("FirstSection","key_float",float,'asdfasd',"TestMessage")
self.assertIs(added,False);
self.assertIs(len(self.checker.get_expectations()),0)
def test_adding_expectation_with_wrong_default_type_returns_false_boolean(self):
added = self.checker.set_expectation("FirstSection","key_float",bool,'asdfasd',"TestMessage")
self.assertIs(added,False);
self.assertIs(len(self.checker.get_expectations()),0)
def test_adding_expectation_with_not_allowed_data_type_returns_false(self):
added = self.checker.set_expectation("FirstSection","key_float",'asdf','asdfasd',"TestMessage")
self.assertIs(added,False);
self.assertIs(len(self.checker.get_expectations()),0)
def test_section_names_cant_be_integer(self):
added = self.checker.set_expectation(123,"key_float",float,123.3,"TestMessage")
self.assertIs(added,False);
def test_section_names_cant_be_floats(self):
added = self.checker.set_expectation(1232.12,"key_float",float,123.3,"TestMessage")
self.assertIs(added,False);
def test_section_names_cant_be_boolean(self):
added = self.checker.set_expectation(True,"key_float",float,123.3,"TestMessage")
self.assertIs(added,False);
def test_key_names_cant_be_integer(self):
added = self.checker.set_expectation('Section',123,float,123.3,"TestMessage")
self.assertIs(added,False);
def test_key_names_cant_be_floats(self):
added = self.checker.set_expectation('Section',123.123,float,123.3,"TestMessage")
self.assertIs(added,False);
def test_key_names_cant_be_boolean(self):
added = self.checker.set_expectation('Section',True,float,123.3,"TestMessage")
self.assertIs(added,False);
def test_printing_expectation_output(self):
added = self.checker.set_expectation("FirstSection","key_integer",int,123)
old_stdout = sys.stdout
sys.stdout = printOutput = StringIO()
self.checker.print_expectations()
sys.stdout = old_stdout
self.assertEqual(printOutput.getvalue(),expectedOutput)
class FileOperationTests(unittest.TestCase):
def setUp(self):
self.checker = ConfigChecker()
self.makeGoodConfigFile()
self.makeBadConfigFile()
def tearDown(self):
self.removeGoodConfigFile()
self.removeBadConfigFile()
try:
os.remove('test_write.ini')
except:
pass
def test_opening_bad_config_file_return_false(self):
self.checker.set_expectation("FirstSection","key_integer",int,23,"TestMessage")
opened = self.checker.set_configuration_file('test_bad_config.ini')
self.assertIs(opened,False)
def test_opening_file_with_no_dxpectations_returns_false(self):
opened = self.checker.set_configuration_file('test_good_config.ini')
self.assertIs(opened,False)
def test_file_which_exists_opens_correctly(self):
self.checker.set_expectation("FirstSection","key_integer",int,23,"TestMessage")
opened = self.checker.set_configuration_file('test_good_config.ini')
self.assertIs(len(self.checker.get_config_parser_object().sections()),3, "A well fomatted config file contained no sections when opened")
self.assertIs(opened,True,"Opened was reported for a well formatted config file")
def test_file_which_doesnt_exist_returns_false(self):
self.checker.set_expectation("FirstSection","key_integer",int,23,"TestMessage")
opened = self.checker.set_configuration_file('bad_file_name.ini')
self.assertIs(len(self.checker.get_config_parser_object().sections()),0, "A file which didn't exist resulted in config sections being made.")
self.assertIs(opened,False,"A file which didn't exist resulted in a true open status (should be false)")
def test_only_expectation_are_stored_as_config_values_after_file_is_read(self):
opened = self.checker.set_configuration_file('test_good_config.ini')
self.checker.set_expectation("FirstSection","key_integer",int,23,"TestMessage")
self.checker.set_expectation("FirstSection","key_boolean",bool,False,"TestMessage")
self.assertIs(len(self.checker.get_expectations()),2)
def test_expectations_which_match_have_their_value_updated(self):
self.checker.set_expectation("FirstSection","key_integer",int,23,"TestMessage")
self.checker.set_expectation("FirstSection","key_boolean",bool,False,"TestMessage")
self.checker.set_expectation("FirstSection","key_float",float,12123.1,"TestMessage")
self.checker.set_expectation("FirstSection","key_string",str,'A string',"TestMessage")
opened = self.checker.set_configuration_file('test_good_config.ini')
self.assertIs(self.checker.get_value("FirstSection","key_integer"),45)
self.assertIs(self.checker.get_value("FirstSection","key_boolean"),True)
self.assertEqual(self.checker.get_value("FirstSection","key_float"),23.2)
self.assertEqual(self.checker.get_value("FirstSection","key_string"),"I am a string")
def test_bad_data_types_cause_their_default_to_be_loaded_bool(self):
self.checker.set_expectation("FirstSection","key_integer",bool,True,"TestMessage")
opened = self.checker.set_configuration_file('test_good_config.ini')
self.assertIs(self.checker.get_value("FirstSection","key_integer"),True)
def test_bad_data_types_cause_their_default_to_be_loaded_float(self):
self.checker.set_expectation("FirstSection","key_string",float,123.1,"TestMessage")
opened = self.checker.set_configuration_file('test_good_config.ini')
self.assertIs(self.checker.get_value("FirstSection","key_string"),123.1)
def test_bad_data_types_cause_their_default_to_be_loaded_int(self):
self.checker.set_expectation("FirstSection","key_string",int,10,"TestMessage")
opened = self.checker.set_configuration_file('test_good_config.ini')
self.assertIs(self.checker.get_value("FirstSection","key_string"),10)
def test_reading_value_not_updated_by_config_file_returns_default(self):
self.checker.set_expectation("FirstSection","unspec_key",int,23,"TestMessage")
opened = self.checker.set_configuration_file('test_good_config.ini')
self.assertIs(self.checker.get_value("FirstSection","unspec_key"),23)
def test_values_are_set_to_default_if_file_cant_be_opened(self):
self.checker.set_expectation("FirstSection","unspec_key",int,23,"TestMessage")
opened = self.checker.set_configuration_file('random_file_name.ini')
self.assertIs(self.checker.get_value("FirstSection","unspec_key"),23)
def test_writing_file_produces_produces_expected_result(self):
self.checker.set_expectation("FirstSection","key_integer",int,23,"TestMessage")
self.checker.set_expectation("FirstSection","key_boolean",bool,False,"TestMessage")
self.checker.set_expectation("FirstSection","key_float",float,12123.1,"TestMessage")
self.checker.set_expectation("FirstSection","key_string",str,'A string',"TestMessage")
self.checker.set_expectation("SecondSection","key_string",str,'default',"TestMessage")
opened = self.checker.set_configuration_file('test_write.ini')
self.assertIs(opened,False)
written = self.checker.write_configuration_file('test_write.ini')
self.assertIs(written,True)
# Make a new object and set get_expectations() to different values
self.checker = ConfigChecker()
self.checker.set_expectation("FirstSection","key_integer",int,1,"TestMessage")
self.checker.set_expectation("FirstSection","key_boolean",bool,True,"TestMessage")
self.checker.set_expectation("FirstSection","key_float",float,1.1,"TestMessage")
self.checker.set_expectation("FirstSection","key_string",str,'sfs',"TestMessage")
self.checker.set_expectation("SecondSection","key_string",str,'asfs',"TestMessage")
# If the values match the original dump then all everythin was successful
opened = self.checker.set_configuration_file('test_write.ini')
self.assertIs(opened,True)
self.assertIs(self.checker.get_value("FirstSection","key_integer"),23)
self.assertIs(self.checker.get_value("FirstSection","key_boolean"),False)
self.assertEqual(self.checker.get_value("FirstSection","key_float"),12123.1)
self.assertEqual(self.checker.get_value("FirstSection","key_string"),"A string")
self.assertEqual(self.checker.get_value("SecondSection","key_string"),"default")
os.remove('test_write.ini')
def test_writing_file_with_none_uses_existing_config_file_name(self):
self.checker.set_expectation("FirstSection","key_integer",int,23,"TestMessage")
self.checker.set_expectation("FirstSection","key_boolean",bool,False,"TestMessage")
self.checker.set_expectation("FirstSection","key_float",float,12123.1,"TestMessage")
self.checker.set_expectation("FirstSection","key_string",str,'A string',"TestMessage")
self.checker.set_expectation("SecondSection","key_string",str,'default',"TestMessage")
opened = self.checker.set_configuration_file('test_write.ini')
self.assertIs(opened,False)
written = self.checker.write_configuration_file(None)
self.assertIs(written,True)
self.assertIs(os.path.exists('test_write.ini'),True)
os.remove('test_write.ini')
def test_writing_file_permission_error_returs_false(self):
self.checker.set_expectation("FirstSection","key_integer",int,23,"TestMessage")
self.checker.set_expectation("FirstSection","key_boolean",bool,False,"TestMessage")
self.checker.set_expectation("FirstSection","key_float",float,12123.1,"TestMessage")
self.checker.set_expectation("FirstSection","key_string",str,'A string',"TestMessage")
self.checker.set_expectation("SecondSection","key_string",str,'default',"TestMessage")
opened = self.checker.set_configuration_file('test_write.ini')
self.assertIs(opened,False)
written = self.checker.write_configuration_file('/root/test_config.ini')
self.assertIs(written,False,"These tests and module shouldn't be run with root acccess.")
def test_writing_os_error_returs_false(self):
self.checker.set_expectation("FirstSection","key_integer",int,23,"TestMessage")
self.checker.set_expectation("FirstSection","key_boolean",bool,False,"TestMessage")
self.checker.set_expectation("FirstSection","key_float",float,12123.1,"TestMessage")
self.checker.set_expectation("FirstSection","key_string",str,'A string',"TestMessage")
self.checker.set_expectation("SecondSection","key_string",str,'default',"TestMessage")
opened = self.checker.set_configuration_file('test_write.ini')
self.assertIs(opened,False)
written = self.checker.write_configuration_file('sdf/asdf')
self.assertIs(written,False)
def test_writing_file_with_no_expectations_returns_false(self):
written = self.checker.write_configuration_file('test_write.ini')
self.assertIs(written,False)
def test_can_change_the_value_of_an_expectation_int(self):
self.checker.set_expectation("FirstSection","key_integer",int,23,"TestMessage")
opened = self.checker.set_configuration_file('test_write.ini')
written = self.checker.set_value("FirstSection","key_integer",45);
self.assertIs(written,True)
self.assertIs(self.checker.get_value("FirstSection","key_integer"),45)
def test_can_change_the_value_of_an_expectation_float(self):
self.checker.set_expectation("FirstSection","key_float",float,23.3,"TestMessage")
opened = self.checker.set_configuration_file('test_write.ini')
written = self.checker.set_value("FirstSection","key_float",45.3);
self.assertIs(written,True)
self.assertIs(self.checker.get_value("FirstSection","key_float"),45.3)
def test_can_change_the_value_of_an_expectation_bool(self):
self.checker.set_expectation("FirstSection","key_boolean",bool,True,"TestMessage")
opened = self.checker.set_configuration_file('test_write.ini')
written = self.checker.set_value("FirstSection","key_boolean",False);
self.assertIs(written,True)
self.assertIs(self.checker.get_value("FirstSection","key_boolean"),False)
def test_can_change_the_value_of_an_expectation_str(self):
self.checker.set_expectation("FirstSection","key_string",str,'asdf',"TestMessage")
opened = self.checker.set_configuration_file('test_write.ini')
written = self.checker.set_value("FirstSection","key_string",'qwer');
self.assertIs(written,True)
self.assertIs(self.checker.get_value("FirstSection","key_string"),'qwer')
def test_can_change_the_value_of_multiple_without_changing_others(self):
self.checker.set_expectation("FirstSection","key_string",str,'asdf',"TestMessage")
self.checker.set_expectation("FirstSection","key_boolean",bool,True,"TestMessage")
self.checker.set_expectation("FirstSection","key_float",float,23.3,"TestMessage")
self.checker.set_expectation("FirstSection","key_integer",int,23,"TestMessage")
opened = self.checker.set_configuration_file('test_write.ini')
written = self.checker.set_value("FirstSection","key_string",'qwer');
self.assertIs(written,True)
written = self.checker.set_value("FirstSection","key_float",45.3);
self.assertIs(written,True)
written = self.checker.set_value("FirstSection","key_integer",45);
self.assertIs(written,True)
written = self.checker.set_value("FirstSection","key_boolean",False);
self.assertIs(written,True)
self.assertIs(self.checker.get_value("FirstSection","key_string"),'qwer')
self.assertIs(self.checker.get_value("FirstSection","key_integer"),45)
written = self.checker.set_value("FirstSection","key_float",45.3);
self.assertIs(self.checker.get_value("FirstSection","key_boolean"),False)
def test_changing_value_of_expectation_which_doesnt_exist_returns_false(self):
self.checker.set_expectation("FirstSection","key_integer",int,23,"TestMessage")
opened = self.checker.set_configuration_file('test_write.ini')
written = self.checker.set_value("WrongSection","key_integer",45);
self.assertIs(written,False)
def test_cannot_set_values_if_file_target_is_not_set(self):
self.checker.set_expectation("FirstSection","key_integer",int,23,"TestMessage")
written = self.checker.set_value("FirstSection","key_integer",45);
self.assertIs(written,False)
def test_writing_incorrect_type_returns_false_integer(self):
self.checker.set_expectation("FirstSection","key_integer",int,23,"TestMessage")
opened = self.checker.set_configuration_file('test_write.ini')
written = self.checker.set_value("FirstSection","key_integer",'sdfs');
self.assertIs(written,False)
def test_writing_incorrect_type_returns_false_float(self):
self.checker.set_expectation("FirstSection","key_float",float,23.3,"TestMessage")
opened = self.checker.set_configuration_file('test_write.ini')
written = self.checker.set_value("FirstSection","key_float",'sdfs');
self.assertIs(written,False)
def test_writing_incorrect_type_returns_false_bool(self):
self.checker.set_expectation("FirstSection","key_boolean",bool,False,"TestMessage")
opened = self.checker.set_configuration_file('test_write.ini')
written = self.checker.set_value("FirstSection","key_boolean",'sdf');
self.assertIs(written,False)
def makeGoodConfigFile(self):
try:
with open('test_good_config.ini','w') as f:
f.write(good_config)
except:
print("Warning: Cannot write test configuration files to filesystem, tests failed")
def makeBadConfigFile(self):
try:
with open('test_bad_config.ini','w') as f:
f.write(bad_config)
except:
print("Warning: Cannot write test configuration files to filesystem, tests failed")
def removeGoodConfigFile(self):
try:
os.remove('test_good_config.ini')
except:
pass
def removeBadConfigFile(self):
try:
os.remove('test_bad_config.ini')
except:
pass
class ReadingValuesTests(unittest.TestCase):
def setUp(self):
self.checker = ConfigChecker()
def test_reading_good_section_and_key_returns_expected_results(self):
self.checker.set_expectation("FirstSection","key_integer",int,23,"TestMessage")
self.checker.get_expectations()[0]['value'] = 12;
self.assertIs(self.checker.get_value("FirstSection","key_integer"),12)
def test_reading_bad_section_or_bad_key_returns_None(self):
self.checker.set_expectation("FirstSection","key_integer",int,23,"TestMessage")
self.checker.get_expectations()[0]['value'] = 12;
self.assertIs(self.checker.get_value("BadSection","key_integer"),None)
self.assertIs(self.checker.get_value("FirstSection","badKey"),None)
if __name__ == '__main__':
unittest.main();
|
"""
Quicksort: One of the fastest sorting algorithm. Divide and conquer.
Select a pivot and place it in it's place in the array, then
recursively do the same for left and right partitions of the remaining array.
Time Complexity: O(n*log(n)) [O(n*n) - Worst case]
Space Complexity: O(log(n))
Inplace: Yes
Stable: No
3-Way: Maintain 3 subarrays. 1. Of lesser value, 2. Of equal, 3. Of greater
"""
def partition(arr: list, start: int, end: int) -> int:
"""
Choosing last element as pivot
"""
i, j = start - 1, start
while j < end:
if arr[j] < arr[end]:
i += 1
arr[i], arr[j] = arr[j], arr[i]
j += 1
i += 1
arr[i], arr[end] = arr[end], arr[i]
return i
def quicksort_recursive(arr: list, start: int, end: int) -> None:
if start < end:
partition_index: int = partition(arr, start, end)
quicksort_recursive(arr, start, partition_index - 1)
quicksort_recursive(arr, partition_index + 1, end)
def quicksort_iterative(arr: list, start: int, end: int) -> None:
from collections import deque
q: deque = deque()
q.append((start, end))
while q:
start, end = q.popleft()
if start < end:
p = partition(arr, start, end)
if p - 1 > start:
q.append((start, p - 1))
if p + 1 < end:
q.append((p + 1, end))
if __name__ == "__main__":
arr = [80, 90, 100, 70, 60, 50, 40, 10, 20, 30]
print("before sorting:", *arr)
quicksort_recursive(arr, 0, len(arr) - 1)
print("after recursive sorting:", *arr)
arr = [80, 90, 100, 70, 60, 50, 40, 10, 20, 30]
quicksort_iterative(arr, 0, len(arr) - 1)
print("after iterative sorting:", *arr)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 5 04:34:01 2020
@author: chetanya
"""
{
"sender_address": "",
"reciever_address":"",
"units":"",
"Ether":""
} |
#!/usr/bin/python
#
# Magicor
# Copyright 2006 Peter Gebauer. Licensed as Public Domain.
# (see LICENSE for more info)
import sys, os
def change_to_correct_path(): #taken from pygame wiki cookbook
import os, sys
exe_base_dir = os.path.abspath(os.path.dirname(sys.argv[0]))
os.chdir(exe_base_dir)
sys.path.append(exe_base_dir)
if sys.platform=='win32':
change_to_correct_path() #usefull when running from another dir, desktop or appbar
from optparse import OptionParser
sys.path.append(".")
from magicor import GameEngine, getConfig, parse_printkeys
from magicor.states.intro import CopyrightNoticeState
parser = OptionParser(usage="%prog [options]")
if sys.platform=='win32':
parser.add_option("-c", "--config", dest="configPath",
default = ".",
help="use this config path, default is magicor directory.")
baseConf=".\magicor.conf"
else:
parser.add_option("-c", "--config", dest="configPath",
default = "###CONFIG_PATH###",
help="use this default config, default ###CONFIG_PATH###")
baseConf="~/.magicor/magicor.conf"
parser.add_option("-j", "--joystick",
action="store", type="int", dest="joystick",
default=None,
help="enable/disable joystick")
parser.add_option("-m", "--music",
action="store", type="int", dest="music",
default=None,
help="enable/disable music")
parser.add_option("-s", "--sound",
action="store", type="int", dest="sound",
default=None,
help="enable/disable sound")
parser.add_option("-f", "--fullscreen",
action="store", type="int", dest="fullscreen",
default=None,
help="enable/disable fullscreen")
parser.add_option("-d","--dev", type="int", dest= "devmode",
default=None, help="enable dev keys")
parser.add_option("-k","--keysprintdbg",type="string", dest="printkeys",default="",help="keys to enable selective printing of debug info. Separator is ':'")
(options, args) = parser.parse_args()
paths = [ options.configPath, baseConf ]
conf = getConfig(paths)
if sys.platform=='win32': # not clean but...
conf["user_path"]='.'
conf["data_path"]='data'
if options.joystick != None:
conf["joystick"] = bool(options.joystick)
if options.music != None:
conf["music"] = options.music
if options.sound != None:
conf["sound"] = options.sound
if options.fullscreen != None:
conf["fullscreen"] = bool(options.fullscreen)
if options.devmode != None:
conf["devmode"] = bool(options.devmode)
parse_printkeys(options.printkeys)
gameEngine = GameEngine(conf)
gameEngine.start(CopyrightNoticeState(conf, None, gameEngine.screen))
|
#SMPAIR
for _ in range(int(input())):
n = int(input())
num = list(map(int,input().split()))
num.sort()
print(num[0] + num[1])
|
class classname:
def createname(self,name):
self.name=name
def displayname(self):
return self.name
def saying(self):
print "hello %s" %self.name
print classname
first=classname()
second=classname()
first.createname("chandan")
second.createname("kuiry")
print first.displayname()
print second.displayname()
first.saying()
second.saying()
#here we learn "self is a temporary placeorder for a objectname here "first" and "second is a objectname
|
# @Author : Vector
# @Email : vectorztt@163.com
# @Time : 2019/5/30 17:09
# -----------------------------------------
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from conf.base_page import BasePage
from conf.decorator import teststep
class MedalPage(BasePage):
@teststep
def medal_icon(self):
ele = self.driver.find_element_by_id(self.id_type() + 'medal')
return ele
@teststep
def wait_check_medal_page(self):
"""勋章页面检查点"""
locator = (By.XPATH, '//android.widget.TextView[contains(@text,"我的勋章")]')
try:
WebDriverWait(self.driver, 10, 0.5).until(lambda x: x.find_element(*locator))
return True
except:
return False
@teststep
def wait_check_medal_img_page(self):
locator = (By.ID, self.id_type() + 'img')
try:
WebDriverWait(self.driver, 10, 0.5).until(lambda x: x.find_element(*locator))
return True
except:
return False
@teststep
def medals(self):
"""奖牌"""
ele = self.driver.find_elements_by_id(self.id_type() + 'des')
return ele
@teststep
def medal_content(self):
"""置灰奖牌的说明"""
ele = self.driver.find_element_by_id(self.id_type() + 'text')
return ele.text
|
import os
import random
import string
from transliterate import detect_language
from transliterate import slugify as slugify_translit
from django.utils.text import slugify
def get_filename_ext(filepath):
base_name = os.path.basename(filepath)
name, ext = os.path.splitext(base_name)
return name, ext
def upload_image_path(instance, filename):
new_filename = random.randint(666666, 399999999)
name, ext = get_filename_ext(filename)
final_filename = f'{new_filename}{ext}'
return f'products/{new_filename}/{final_filename}'
def random_slug_generator(
size=10,
chars=string.ascii_lowercase \
+ string.digits):
return "".join(random.choice(chars) for _ in range(size))
def unique_slug_generator(instance, new_slug=None):
if new_slug is not None:
slug = new_slug
else:
lang = detect_language(instance.title)
if lang:
slug = slugify_translit(instance.title, lang)
else:
slug = slugify(instance.title, allow_unicode=True)
Class_ = instance.__class__
queryset_exists = Class_.objects.filter(slug=slug).exists()
if queryset_exists:
new_slug = f'{slug}-{random_slug_generator(size=15)}'
return unique_slug_generator(instance, new_slug=new_slug)
return slug |
#!/usr/bin/env python
# coding=utf-8
# Python Script
#
# Copyright © Manoel Vilela
#
#
from simulation import AntSimulation
matrix = AntSimulation.run()
print("Goodbye!") |
"""
Summary:
Libary of functions to create an the report.
"""
from . import meistertask_requests as meistertask
from datetime import datetime
from datetime import timedelta
from . import data_helper
import json
def calctime(starttime,endtime):
"""
Summary:
Calculates the difference between start time and end time in seconds.
Args:
starttime (datetime): start time in the format %Y-%m-%dT%H:%M:%S.%fZ
endtime (datetime): end time in the format %Y-%m-%dT%H:%M:%S.%fZ
Returns:
integer: time difference in seconds
"""
time = 0.0
time = endtime - starttime
time = time.seconds
return time
def export_report_json(data):
"""
Summary:
Exports the report into a JSON file.
Args:
data (dictionary): the report data
Returns:
string: name of JSON file
"""
now = datetime.now()
dt_string = now.strftime("%d%m%Y%H%M%S")
path = 'report' + dt_string + '.json'
path_json = 'data/json/' +path
data_helper.save_json(path_json,data)
return path
def append_members_json(members,key,firstname,lastname,hours,hsalary,report,memberfee):
"""
Summary:
Appends an new member to the project with the hours he/she spended in at this project.
It also adds the hours to the summary of each person in the report.
Args:
members (dictionary): dict of the members in the project
key (integer): identifier (key) of the person
firstname (string): firstname of the person
lastname (string): lastname of the person
hours (floating): hours of the person he or she has worked on the project
hsalary (floating): salary per hour
report (dictionary): the report
memberfee (floating): memberfee of the association
"""
# Adds an new member to the project overview.
members[key] = []
members[key].append({
'firstname' : firstname,
'lastname' : lastname,
'hours' : round(hours,2),
'salary' : round(hours*hsalary,2)
} )
# Adds or update the person in the summary of the report.
salary = round(hours*hsalary,2) - float(memberfee) # The salary is calculated as shown here: (Hours * Hourly salary) - Memberfee
if key in report['persons']:
# If person already has an entry, it must be updated. This mainly affects the hours and the salary
hours = hours + float(report['persons'][key]['hours'])
salary = round(hours*hsalary,2) - float(memberfee)
report['persons'][key]['hours'] = round(hours,2)
if salary > 0:
report['persons'][key]['salary'] = str(salary) + ' CHF'
else:
report['persons'][key]['salary'] = '0 CHF'
else:
if salary < 0:
report['persons'][key] = {
"firstname": firstname,
"lastname" : lastname,
"hours": round(hours,2),
"salary" : str(0) + ' CHF'
}
else:
report['persons'][key] = {
"firstname": firstname,
"lastname" : lastname,
"hours": round(hours,2),
"salary" : str(round(salary,2)) + ' CHF'
}
def append_project_json(projects,name,members,tasks,projecttime,hsalary):
"""
Summary:
Appends a new project to the report.
Args:
projects (dictonary): including all project data
name(string): name of the project
members (dictionary): dict of the members in the project
projecttime (floating): hours which has been spended at the project
hsalary (floating): salary per hour
"""
projects[name] = []
projects[name].append({
'members': members,
'tasks' : tasks,
'time' : round(projecttime,2),
'costs' : round(projecttime *hsalary,2)
} )
def append_task_json(tasks,key,name,time,hsalary):
"""
Summary:
Appends a new task to the report.
Args:
tasks (dictonary): including all tasks data
key (integer): idenifier of the task
name(string): name of the task
time (dictionary): hours which has been spended at this task
hsalary (floating): salary per hour
"""
tasks[key] =[]
tasks[key].append({
'name' : name,
'time' : round(time,2),
'costs' : round(time*hsalary,2)
})
def sec_to_hours(seconds):
"""
Summary:
Converts seconds into hours
Args:
seconds (floating): seconds
Returns:
floating : hours which has been calculated
"""
hours = seconds/3600
hours = round(hours,2)
return hours
def add_time_to_worktime(starttime,endtime,worktime):
"""
Summary:
Add a timespan to the worktime
Args:
starttime (datetime): start time in the format %Y-%m-%dT%H:%M:%S.%fZ
endtime (datetime): end time in the format %Y-%m-%dT%H:%M:%S.%fZ
Returns:
floating: working after adding an new timespan
"""
result= float(calctime(starttime,endtime))
result = sec_to_hours(result)
worktime = worktime + result
return worktime
def report(selected_projects,hsalary,apikey,memberfee):
"""
Summary:
Get information from Meistertask API and creates an report from the selected projects
Args:
selected_projects (list): project ids of projects which should be
hsalary (floating): salary per hour
apikey (string):
memberfee (floating):
Returns:
floating: working after adding an new timespan
"""
report = {}
projects = {}
report['persons'] = {}
report['memberfee'] = memberfee
for selected_project in selected_projects:
name = meistertask.get_projects(selected_project,apikey)
name = name['name']
projecttime = 0.0
times = meistertask.get_workintervals_project(selected_project,apikey)
persons = meistertask.get_persons_project(selected_project,apikey)
project_tasks = meistertask.get_tasks(selected_project,apikey)
persons = persons['persons']
members = {}
tasks={}
for person in persons:
worktime = 0.0
for time in times:
if time['person_id'] == person['id']:
worktime = add_time_to_worktime(datetime.strptime(time['started_at'],'%Y-%m-%dT%H:%M:%S.%fZ'),datetime.strptime(time['finished_at'],'%Y-%m-%dT%H:%M:%S.%fZ'),worktime)
append_members_json(members,person['id'],person['firstname'],person['lastname'],worktime,hsalary,report,memberfee)
for task in project_tasks:
worktime = 0.0
worktime= sec_to_hours(task['tracked_time'])
projecttime = projecttime + worktime
append_task_json(tasks,task['id'],task['name'],worktime,hsalary)
append_project_json(projects,name,members,tasks,projecttime,hsalary)
report['projects'] = projects
path = export_report_json(report)
return path
|
# Generated by Django 3.0.5 on 2020-11-09 11:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0051_auto_20201109_0622'),
]
operations = [
migrations.AddField(
model_name='attendance',
name='image',
field=models.ImageField(blank=True, null=True, upload_to='images/attendance/'),
),
]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the OLE Compound File summary and document summary plugins."""
import unittest
from plaso.parsers.olecf_plugins import summary
from tests.parsers.olecf_plugins import test_lib
class TestSummaryInformationOLECFPlugin(test_lib.OLECFPluginTestCase):
"""Tests for the OLECF summary information plugin."""
def testProcess(self):
"""Tests the Process function on a Summary Information stream."""
plugin = summary.SummaryInformationOLECFPlugin()
storage_writer = self._ParseOLECFFileWithPlugin(['Document.doc'], plugin)
self.assertEqual(storage_writer.number_of_warnings, 0)
self.assertEqual(storage_writer.number_of_events, 3)
events = list(storage_writer.GetSortedEvents())
# TODO: add support for: 'Total edit time (secs): 0'
expected_event_values = {
'application': 'Microsoft Office Word',
'author': 'DAVID NIDES',
'data_type': 'olecf:summary_info',
'last_saved_by': 'Nides',
'name': 'Summary Information',
'number_of_characters': 18,
'number_of_pages': 1,
'number_of_words': 3,
'revision_number': '4',
'security': 0,
'template': 'Normal.dotm',
'timestamp': '2012-12-10 18:38:00.000000',
'timestamp_desc': 'Document Creation Time',
'title': 'Table of Context'}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
class TestDocumentSummaryInformationOLECFPlugin(test_lib.OLECFPluginTestCase):
"""Tests for the OLECF document summary information plugin."""
def testProcess(self):
"""Tests the Process function on a Document Summary Information stream."""
plugin = summary.DocumentSummaryInformationOLECFPlugin()
storage_writer = self._ParseOLECFFileWithPlugin(['Document.doc'], plugin)
self.assertEqual(storage_writer.number_of_warnings, 0)
self.assertEqual(storage_writer.number_of_events, 1)
events = list(storage_writer.GetSortedEvents())
expected_event_values = {
'application_version': '14.0',
'company': 'KPMG',
'data_type': 'olecf:document_summary_info',
'name': 'Document Summary Information',
'number_of_lines': 1,
'number_of_paragraphs': 1,
'shared_document': False}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
if __name__ == '__main__':
unittest.main()
|
def validation():
name = input("What is your name?")
print("Hello",name,",you are a beautiful fucking bastard.")
validation()
|
locators = {
'breadcrumbs': '//*[@id="container"]/div[2]',
'home': '//*[@id="container"]/div[2]/a[1]',
'authentication_and_authorization': '//*[@id="container"]/div[2]/a[2]',
'groups': '//*[@id="container"]/div[2]/a[3]',
'are_you_sure_headline': '//*[@id="content"]/h1',
'are_you_sure_question': '//*[@id="content"]/p',
'summary': '//*[@id="content"]/h2[1]',
'objects': '//*[@id="content"]/h2[2]',
'generic_group_element': '//*[@id="content"]/ul[2]/li/a[contains(.,"%s")]',
'confirm_deletion_button': '//*[@id="content"]/form/div/input[4]',
'cancel_deletion_button': '//*[@id="content"]/form/div/a',
} |
from typing import List
from bisect import bisect_right
class Solution:
def breakfastNumber_(self, staple: List[int], drinks: List[int], x: int) -> int:
# 暴力解法
res = 0
for i in staple:
for j in drinks:
if i + j <= x:
res += 1
return res
# 使用bisect库维护有序列表
def breakfastNumber(self, staple: List[int], drinks: List[int], x: int) -> int:
staple.sort(reverse=True)
drinks.sort()
res = 0
for i in staple:
if i > x:
continue
minus = x - i
ind = bisect_right(drinks, minus)
res += ind
return res % 1000000007
if __name__ == '__main__':
staple = [10, 20, 5]
drinks = [5, 5, 2]
x = 15
# staple = [6, 1, 9, 2, 9, 9, 3, 4]
# drinks = [2, 7, 10, 2, 9, 2, 1, 3]
# x = 2
# staple = [2, 1, 1]
# drinks = [8, 9, 5, 1]
# x = 9
sol = Solution()
print(sol.breakfastNumber(staple, drinks, x))
|
import numpy as np
from numpy import mat
#将各节点的有向图转换为概率转移矩阵
def probability_graph(w_ori_mat):
N = w_ori_mat.shape[0]
for i in range(N):
w_ori_mat[:,i] = w_ori_mat[:,i] / np.sum(w_ori_mat[:,i]) #这个sum函数用的好,直接的想法事用for循环,而这一个是用数学的方法,这种要总结
return w_ori_mat
def pangerank(w_ori_mat, iter_num, c_damping = 0.15):
N = w_ori_mat.shape[0]
w_ori_mat = probability_graph(w_ori_mat)
e_j = np.ones((N,1)) / N
rank = e_j
for i in range(iter_num):
rank = (1-c_damping) * w_ori_mat * rank + c_damping * e_j
return rank
#测试
w_ori_mat = mat([[0,1,0],[1,0,1],[1,0,0]], dtype=float) #这里一定要注意加dtype,否则可能在计算的时候自动存为int型
print(pangerank(w_ori_mat, 6))
|
import random
def hangman(correct_word):
uniq = set()
wrong = set()
hint = "-" * len(correct_word) # hint =["-"] * len(correct_word)
chances = 8
while chances > 0:
print()
print(hint) # print("".join(hint))
guess_letter = input("Input a letter:")
if guess_letter in wrong:
print("You already typed this letter")
continue
if len(guess_letter) != 1:
print("You should input a single letter")
continue
if guess_letter not in ('abcdefghijklmnopqrstuvwxyz'):
print("It is not an ASCII lowercase letter")
continue
check = correct_word.find(guess_letter)
if check == -1:
chances -= 1
wrong.update(guess_letter)
print("No such letter in the word")
continue
if guess_letter in uniq:
print("You already typed this letter")
continue
else:
uniq.update(guess_letter)
for i in range(len(correct_word)):
if guess_letter == correct_word[i]:
hint = hint[:i] + guess_letter + hint[i + 1:] #hint[i] = guess_letter
if hint == correct_word: #"".join(hint) == correct_word:
break
if hint == correct_word: #"".join(hint) == correct_word:
print("You guessed the word " + correct_word + "!")
print("You survived!")
else:
print("You are hanged!")
print('H A N G M A N')
names_list = ['python', 'java', 'kotlin', 'javascript'] # give list of words
correct_word = random.choice(names_list)
while True:
play_ornot = input('''Type "play" to play the game, "exit" to quit: ''')
if play_ornot == "play":
hangman(correct_word)
elif play_ornot == "exit":
break
else:
print('''Choose either "play" or "exit"''')
|
from ctypes import addressof, create_string_buffer
from warnings import warn
import numpy as np
from pyvst import VstPlugin
from pyvst.vstwrap import VstTimeInfoFlags, VstTimeInfo, AudioMasterOpcodes
from pyvst.midi import midi_note_event, wrap_vst_events
# Class inspired from MrsWatson's audioClock
class Transport:
"""
Class responsible to know where we are in the "song".
It knows the sample_rate and tempo and so it can convert the position in frames/seconds/beats.
It also notes when it's changing from play/stop, etc, which can be asked by VSTs.
"""
def __init__(self, sample_rate, tempo=120.):
self._sample_rate = sample_rate
self.tempo = tempo
# position in frames
self._position = 0.
self.has_changed = False
self.is_playing = False
def step(self, num_frames):
self._position += num_frames
if not self.is_playing:
self.is_playing = True
self.has_changed = True
else:
self.has_changed = False
def stop(self):
if self.is_playing:
self.is_playing = False
self.has_changed = True
else:
self.has_changed = False
def reset(self):
self.stop()
self._position = 0.
def get_position(self, unit='frame'):
if unit == 'frame':
return self._position
elif unit == 'beat': # same as a quarter
return self.tempo * self._position / 60. / self._sample_rate
elif unit == 'second':
return self._position / self._sample_rate
else:
raise ValueError('Unknown unit "{}"'.format(unit))
class SimpleHost:
"""Simple host that holds a single (synth) vst."""
_product_string = create_string_buffer(b'pyvst SimpleHost')
def __init__(self, vst_filename=None, sample_rate=44100., tempo=120., block_size=512):
self.sample_rate = sample_rate
self.transport = Transport(sample_rate, tempo)
self.block_size = block_size
def callback(*args):
return self._audio_master_callback(*args)
self._callback = callback
self._vst = None
self._vst_path = None
if vst_filename is not None:
self.load_vst(vst_filename)
@property
def vst(self):
if self._vst is None:
raise RuntimeError('You must first load a vst using `self.load_vst`.')
return self._vst
def reload_vst(self):
params = [self.vst.get_param_value(i) for i in range(self.vst.num_params)]
self.vst.suspend()
self.vst.close()
del self._vst
self.load_vst(self._path_to_so_file)
for i, p in enumerate(params):
self.vst.set_param_value(i, p)
def load_vst(self, path_to_so_file):
"""
Loads a vst. If there was already a vst loaded, we will release it.
:param path_to_so_file: Path to the .so file to use as a plugin.
"""
self._vst = VstPlugin(path_to_so_file, self._callback)
# Not sure I need this but I've seen it in other hosts
self.vst.open()
if not self._vst.is_synth:
raise RuntimeError('Your VST must be a synth!')
self.vst.set_sample_rate(self.sample_rate)
self.vst.set_block_size(self.block_size)
self.vst.resume()
# Warm up the VST by playing a quick note. It has fixed some issues for TyrellN6 where
# otherwise the first note is funny.
self._path_to_so_file = path_to_so_file
self.play_note(note=64, min_duration=.1, max_duration=.1, note_duration=.1, velocity=127,
reload=False)
def play_note(self, note=64, note_duration=.5, velocity=100, max_duration=5.,
min_duration=0.01, volume_threshold=0.000002, reload=False):
"""
:param note_duration: Duration between the note on and note off midi events, in seconds.
The audio will then last between `min_duration` and `max_duration`, stopping when
sqrt(mean(signal ** 2)) falls under `volume_threshold` for a single buffer. For those
arguments, `None` means they are ignored.
:param reload: Will delete and reload the vst after having playing the note. It's an
extreme way of making sure the internal state of the VST is reset. When False, we
simply suspend() and resume() the VST (which should be enough in most cases).
"""
if max_duration is not None and max_duration < note_duration:
raise ValueError('max_duration ({}) is smaller than the midi note_duration ({})'
.format(max_duration, note_duration))
if min_duration is not None and max_duration is not None and max_duration < min_duration:
raise ValueError('max_duration ({}) is smaller than min_duration ({})'
.format(max_duration, min_duration))
# Call this here to fail fast in case the VST has not been loaded
self.vst
# Convert the durations from seconds to frames
min_duration = round(min_duration * self.sample_rate)
max_duration = round(max_duration * self.sample_rate)
note_on = midi_note_event(note, velocity)
# nb of frames before the note_off events
noteoff_is_in = round(note_duration * self.sample_rate)
outputs = []
self.transport.reset()
# note_on is at time 0 anyway so we can do it before the loop
self.vst.process_events(wrap_vst_events([note_on]))
while True:
if max_duration is not None and self.transport.get_position() > max_duration:
break
# If it's time for the note off
if 0 <= noteoff_is_in < self.block_size:
note_off = midi_note_event(note, 0, kind='note_off', delta_frames=noteoff_is_in)
self.vst.process_events(wrap_vst_events([note_off]))
output = self.vst.process(input=None, sample_frames=self.block_size)
outputs.append(output)
# If we are past the min_position, and if we have a volume_threshold, then we see if
# we have enough volume to continue.
if self.transport.get_position() > min_duration and volume_threshold:
rms = np.sqrt((output ** 2).mean())
if rms < volume_threshold:
break
# We move transport in the future
self.transport.step(self.block_size)
# Which means the "noteoff is in" one block_size sooner
noteoff_is_in -= self.block_size
# Concatenate all the output buffers
outputs = np.hstack(outputs)
# Cut the extra of the last buffer if need be, to respect the `max_duration`.
if max_duration is not None:
outputs = outputs[:, :max_duration]
# Reset the plugin to clear its state
if reload:
self.reload_vst()
else:
self.vst.suspend()
self.vst.resume()
return outputs
def _audio_master_callback(self, effect, opcode, index, value, ptr, opt):
# Note that there are a lot of missing opcodes here, I basically add them as I see VST
# asking for them...
if opcode == AudioMasterOpcodes.audioMasterVersion:
return 2400
# Deprecated but some VSTs still ask for it
elif opcode == AudioMasterOpcodes.audioMasterWantMidi:
return 1
elif opcode == AudioMasterOpcodes.audioMasterGetTime:
# Very much inspired from MrsWatson
sample_pos = self.transport.get_position()
sample_rate = self.sample_rate
flags = 0
# Always return those
if self.transport.has_changed:
flags |= VstTimeInfoFlags.kVstTransportChanged
if self.transport.is_playing:
flags |= VstTimeInfoFlags.kVstTransportPlaying
if value & VstTimeInfoFlags.kVstNanosValid:
warn('Asked for VstTimeInfoFlags.kVstNanosValid but not supported yet')
# Depending on the passed mask, we'll returned what was asked
mask = value
if mask & VstTimeInfoFlags.kVstPpqPosValid:
ppq_pos = self.transport.get_position(unit='beat')
flags |= VstTimeInfoFlags.kVstPpqPosValid
if mask & VstTimeInfoFlags.kVstTempoValid:
tempo = self.transport.tempo
flags |= VstTimeInfoFlags.kVstTempoValid
# TODO: Should we warn that we don't support the other ones?
# Make sure it doesn't get garbage collected
self._last_time_info = VstTimeInfo(
sample_pos=sample_pos,
sample_rate=sample_rate,
ppq_pos=ppq_pos,
tempo=tempo,
flags=flags,
)
return addressof(self._last_time_info)
elif opcode == AudioMasterOpcodes.audioMasterGetProductString:
return addressof(self._product_string)
elif opcode == AudioMasterOpcodes.audioMasterIOChanged:
return 0
elif opcode == AudioMasterOpcodes.audioMasterGetCurrentProcessLevel:
# This should mean "not supported by Host"
return 0
else:
warn('Audio master call back opcode "{}" not supported yet'.format(opcode))
return 0
|
import os
import pickle
from matplotlib.transforms import Bbox
import numpy as np
import matplotlib.pyplot as plt
from scipy.io import wavfile
from tqdm import tqdm
from utils import get_spectrogram, plot_spectrogram
from scipy.ndimage import gaussian_filter
from skimage.filters import apply_hysteresis_threshold
from scipy.ndimage import label
from skimage.color import label2rgb
from skimage.measure import regionprops
def merge_intervals(intervals, max_gap):
"""
Given intervals (N, 3) with columns (start, end, label)
and a scalar max_gap
return a list of lists, each inner list containing
labels to be merged
"""
intervals = intervals[np.argsort(intervals[:, 0])]
stack = []
# insert first interval into stack
stack.append([intervals[0, -1]])
current = intervals[0, :2]
for interval in intervals[1:]:
# Check for overlapping interval,
# if interval overlap
if current[0] <= interval[0] <= (current[1] + max_gap):
current[1] = max(interval[1], current[1])
stack[-1].append(interval[-1])
else:
current = interval[:2]
stack.append([interval[-1]])
return stack
def detect_sound_events(m, t, spectrogram):
dt = t[1] - t[0]
assert np.all(np.isclose(np.diff(t), dt))
avg_power = spectrogram.mean(axis=1, keepdims=True)
bright = apply_hysteresis_threshold(spectrogram, avg_power * 1.5, avg_power * 3)
labels, _ = label(bright)
regions = regionprops(labels)
click_labels = np.zeros_like(labels)
intervals = []
for region in regions:
duration = (region.bbox[3] - region.bbox[1]) * dt
if duration < 0.2:
labels[region.slice][region.image] = 0
click_labels[region.slice][region.image] = region.label
else:
intervals.append([region.bbox[1] * dt, region.bbox[3] * dt, region.label])
intervals = np.array(intervals)
if len(intervals) > 0:
label_groups = merge_intervals(intervals, max_gap=0.05)
for label_group in label_groups:
labels[np.isin(labels, label_group)] = label_group[0]
return labels, regionprops(labels, spectrogram)
def read_and_get_spectrogram(fp):
samplerate, data = wavfile.read(fp)
if len(data) == 0:
return None, None, None
return get_spectrogram(1024, data, samplerate)
def inspect(fp):
m, t, spectrogram = read_and_get_spectrogram(fp)
plot_spectrogram(m, t, spectrogram, log=False)
plt.title(fp)
plt.show()
labels, regions = detect_sound_events(m, t, spectrogram)
gray = np.stack([spectrogram / spectrogram.max() for _ in range(3)], axis=-1)
colour = label2rgb(labels, gray, alpha=0.2)
_, axes= plt.subplots(2, 1)
axes[0].pcolormesh(t, m, spectrogram)
axes[0].set_xlabel("Time / s")
axes[0].set_ylabel("Frequency / mel")
axes[1].imshow(colour[::-1])
axes[1].axis("off")
plt.show()
def get_bbox(region, m, t):
bbox = region.bbox
return (m[bbox[0]], t[bbox[1]], m[bbox[2]-1], t[bbox[3]-1], region.mean_intensity)
if __name__ == "__main__":
debug = True
if debug:
path = "data/station5_converted/EOS_220405_133808.wav"
inspect(path)
assert False
all_events = []
for dir_name in tqdm(os.listdir("data")):
if not dir_name.endswith("converted"):
continue
dir = os.path.join("data", dir_name)
for filename in tqdm(os.listdir(dir)):
file = os.path.join(dir, filename)
m, t, spectrogram = read_and_get_spectrogram(file)
if m is None:
continue
# inspect(file)
labels, regions = detect_sound_events(m, t, spectrogram)
all_events.extend([get_bbox(region, m, t) + (file,) for region in regions])
with open("sound_events.pkl", "wb") as f:
pickle.dump(all_events, f)
|
from datetime import date, datetime
from dateutil.rrule import MONTHLY, YEARLY
import pytest
from dashboard.libs.date_tools import (
get_workdays, get_workdays_list, get_bank_holidays, get_overlap,
parse_date, to_datetime, slice_time_window, dates_between,
financial_year_tuple, get_weekly_repeat_time_windows,
get_weekday)
@pytest.mark.parametrize("start_date, end_date, expected", [
('2016-04-28', '2016-04-28', 1), # same working day
('2016-04-30', '2016-04-30', 0), # same weekend day
('2016-04-27', '2016-04-29', 3), # just work days
('2016-04-27', '2016-04-30', 3), # one weekend day
('2016-04-27', '2016-05-01', 3), # two weekend days
('2016-04-27', '2016-05-02', 3), # two weekend days plus a bank holiday
('2016-01-01', '2016-01-31', 20), # Jan 2016
('2016-02-01', '2016-02-28', 20), # Feb 2016
])
def test_get_workdays(start_date, end_date, expected):
workdays = get_workdays(parse_date(start_date), parse_date(end_date))
assert workdays == expected
@pytest.mark.parametrize("start_date, end_date, expected", [
('2016-04-28', '2016-04-28', ['2016-04-28']), # same working day
('2016-04-30', '2016-04-30', []), # same weekend day
('2016-04-27', '2016-04-29',
['2016-04-27', '2016-04-28', '2016-04-29']), # just work days
('2016-04-27', '2016-04-30',
['2016-04-27', '2016-04-28', '2016-04-29']), # one weekend day
('2016-04-27', '2016-05-01',
['2016-04-27', '2016-04-28', '2016-04-29']), # two weekend days
('2016-04-27', '2016-05-02',
['2016-04-27', '2016-04-28', '2016-04-29']), # plus a bank holiday
])
def test_get_workdays_list(start_date, end_date, expected):
workdays = get_workdays_list(parse_date(start_date), parse_date(end_date))
assert workdays == [parse_date(day) for day in expected]
@pytest.mark.parametrize("day", [
'2015-12-28', # boxing day (substitue day)
'2016-05-02', # may day
'2016-05-30', # spring bank holiday
'2016-08-29', # summer bank holiday (England)
'2016-12-27', # christmas day (substitue day)
])
def test_get_bank_holidays_good_days(day):
assert parse_date(day) in get_bank_holidays(), \
'{} is a bank holiday!'.format(day)
@pytest.mark.parametrize("day", [
'2016-04-01', # april fools day
'2016-08-01', # summer bank holiday (Scotland)
])
def test_get_bank_holidays_bad_days(day):
assert parse_date(day) not in get_bank_holidays()
@pytest.mark.parametrize(
"start_date0, end_date0, start_date1, end_date1, expected", [
# no overlap
('2016-01-01', '2016-01-31', '2015-12-01', '2015-12-31', None),
('2015-12-01', '2015-12-31', '2016-01-01', '2016-01-31', None),
# one time window is part of the other
('2015-12-01', '2016-01-31', '2015-12-31', '2016-01-01',
('2015-12-31', '2016-01-01')),
# intersection is part of both time windows
('2015-12-01', '2016-01-01', '2015-12-31', '2016-01-31',
('2015-12-31', '2016-01-01')),
])
def test_get_overlap(start_date0, end_date0, start_date1, end_date1, expected):
overlap = get_overlap((parse_date(start_date0), parse_date(end_date0)),
(parse_date(start_date1), parse_date(end_date1)))
if expected:
expected = parse_date(expected[0]), parse_date(expected[1])
assert expected == overlap
@pytest.mark.parametrize(
"start_date0, end_date0, start_date1, end_date1", [
# time window0 start date > end date
('2016-01-31', '2016-01-01', '2015-12-01', '2015-12-31'),
# time window1 start date > end date
('2015-12-01', '2015-12-31', '2016-01-31', '2016-01-01'),
])
def test_get_overlap_value_error(start_date0, end_date0,
start_date1, end_date1):
with pytest.raises(ValueError):
get_overlap((parse_date(start_date0), parse_date(end_date0)),
(parse_date(start_date1), parse_date(end_date1)))
@pytest.mark.parametrize("start_date, end_date, extend, first, last, length", [
('2015-01-10', '2015-01-25', True,
('2015-01-01', '2015-01-31'), ('2015-01-01', '2015-01-31'), 1),
('2015-01-10', '2015-01-25', False,
('2015-01-10', '2015-01-25'), ('2015-01-10', '2015-01-25'), 1),
('2015-01-10', '2015-12-25', True,
('2015-01-01', '2015-01-31'), ('2015-12-01', '2015-12-31'), 12),
('2015-01-10', '2015-12-25', False,
('2015-01-10', '2015-01-31'), ('2015-12-01', '2015-12-25'), 12),
('2015-01-01', '2015-12-31',
True, ('2015-01-01', '2015-01-31'), ('2015-12-01', '2015-12-31'), 12),
('2015-01-01', '2015-12-31',
False, ('2015-01-01', '2015-01-31'), ('2015-12-01', '2015-12-31'), 12),
('2015-01-01', '2015-01-01',
True, ('2015-01-01', '2015-01-31'), ('2015-01-01', '2015-01-31'), 1),
('2015-01-01', '2015-01-01',
False, ('2015-01-01', '2015-01-01'), ('2015-01-01', '2015-01-01'), 1),
('2015-01-31', '2015-01-31',
True, ('2015-01-01', '2015-01-31'), ('2015-01-01', '2015-01-31'), 1),
('2015-01-31', '2015-01-31',
False, ('2015-01-31', '2015-01-31'), ('2015-01-31', '2015-01-31'), 1),
])
def test_slice_time_window(start_date, end_date, extend, first, last, length):
sliced = slice_time_window(
parse_date(start_date), parse_date(end_date),
'MS', extend=extend)
assert sliced[0] == tuple([parse_date(d) for d in first])
assert sliced[-1] == tuple([parse_date(d) for d in last])
assert len(sliced) == length
def test_to_datetime():
assert to_datetime(date(2015, 12, 1)) == datetime(2015, 12, 1, 0, 0, 0)
@pytest.mark.parametrize("start_date, end_date, freq, bymonthday, expected", [
(date(2015, 1, 2), date(2015, 3, 3), MONTHLY, 2,
[date(2015, 1, 2), date(2015, 2, 2), date(2015, 3, 2)]),
(date(2015, 1, 31), date(2015, 3, 31), MONTHLY, 31,
[date(2015, 1, 31), date(2015, 2, 28), date(2015, 3, 31)]),
(date(2015, 1, 29), date(2015, 3, 31), MONTHLY, 29,
[date(2015, 1, 29), date(2015, 2, 28), date(2015, 3, 29)]),
(date(2015, 1, 2), date(2017, 3, 3), YEARLY, None,
[date(2015, 1, 2), date(2016, 1, 2), date(2017, 1, 2)]),
(date(2015, 1, 30), date(2017, 3, 3), YEARLY, None,
[date(2015, 1, 30), date(2016, 1, 30), date(2017, 1, 30)]),
])
def test_slice_on_date(start_date, end_date, freq, bymonthday, expected):
dates = dates_between(start_date, end_date, freq, bymonthday=bymonthday)
assert dates == expected
@pytest.mark.parametrize("year, expected", [
[2014, (date(2014, 4, 6), date(2015, 4, 5))],
[2999, (date(2999, 4, 6), date(3000, 4, 5))],
])
def test_financial_year_tuple(year, expected):
assert financial_year_tuple(year) == expected
@pytest.mark.parametrize("start_date, end_date, repeat_end, expected", [
('2017-04-24', '2017-04-24', '2017-04-24', [
('2017-04-24', '2017-04-24')
]),
('2017-04-24', '2017-04-28', '2017-05-05', [
('2017-04-24', '2017-04-28'),
('2017-05-01', '2017-05-05'),
]),
('2017-04-24', '2017-04-28', '2017-05-01', [
('2017-04-24', '2017-04-28'),
('2017-05-01', '2017-05-05'),
]),
('2017-04-27', '2017-05-01', '2017-05-05', [
('2017-04-27', '2017-05-01'),
('2017-05-04', '2017-05-08'),
]),
])
def test_get_weekly_repeat_time_windows(start_date, end_date, repeat_end, expected):
assert get_weekly_repeat_time_windows(
parse_date(start_date),
parse_date(end_date),
parse_date(repeat_end)
) == [
(parse_date(sd), parse_date(ed))
for sd, ed in expected
]
@pytest.mark.parametrize("day, expected", [
('2017-01-02', (0, 1)),
('2017-01-09', (0, 2)),
('2017-01-16', (0, 3)),
('2017-01-23', (0, 4)),
('2017-01-30', (0, 5)),
('2017-05-01', (0, 1)),
('2017-05-08', (0, 2)),
('2017-05-15', (0, 3)),
('2017-05-22', (0, 4)),
('2017-05-29', (0, 5)),
('2017-05-04', (3, 1)),
('2017-05-11', (3, 2)),
('2017-05-18', (3, 3)),
('2017-05-25', (3, 4)),
])
def test_get_weekday(day, expected):
assert get_weekday(parse_date(day)) == expected
|
def main():
score = float(input('Enter the test score: '))
if score < 60:
print('Your grade is F.')
elif score < 70:
print('Your grade is D.')
elif score < 80:
print('Your grade is C.')
elif score < 90:
print('Your grade is B.')
elif score >=90:
print('Your grade is an A.')
#call main to start
main()
|
import code
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("run_mlm_features")
import torch
import spacy
nlp = spacy.load('en_core_web_sm')
lemma_overrides = {
'it': ['it'],
"they/them": ['they', 'them'],
"oneself": ['himself', 'herself', 'itself', 'themselves', 'ourselves', 'itself', 'oneself', 'myself', 'yourself']
}
lemma_override_map = {
token: lemma
for lemma, tokens in lemma_overrides.items()
for token in tokens
}
def lemmatize(token):
return lemma_override_map.get(token.lower()) or nlp(token)[0].lemma_
# utility methods
import gzip
import codecs
import json
from tqdm import tqdm
import os
def read_lines(file_path):
if file_path.endswith('.gz'):
with gzip.open(file_path, 'r') as f:
for line in f:
yield line
else:
with codecs.open(file_path, 'r', encoding='utf8') as f:
for line in f:
yield line
# def indexOfFirstSatisfying(xs, p):
# for i, x in enumerate(xs):
# if p(x):
# return i
# return -1
def indexOfFirst(xs, y):
for i, x in enumerate(xs):
if x == y:
return i
return -1
def print_vector(vec, get_tokens, num_tokens = 15):
top_probs, top_indices = torch.topk(vec, num_tokens)
top_tokens = get_tokens(top_indices)
print(" ".join(["%15s" % x for x in top_tokens]))
print(" ".join(["%15.10f" % x for x in top_probs]))
class MLMFeatureBuilder:
def __init__(self):
pass
from transformers import BertTokenizer, BertForMaskedLM
from transformers import RobertaTokenizer, RobertaForMaskedLM
class BertMLMFeatureBuilder(MLMFeatureBuilder):
"""Includes:
- tokenizer (for vocab indexing conversions), and
- run(List[token], int) -> torch.Tensor(vocab size) that does MLM.
"""
def __init__(self, should_lemmatize = True):
super(MLMFeatureBuilder, self).__init__()
#self.tokenizer = BertTokenizer.from_pretrained('google/bert_uncased_L-2_H-128_A-2', local_files_only = True)
#self.model = BertForMaskedLM.from_pretrained('google/bert_uncased_L-2_H-128_A-2', local_files_only = True)
# self.tokenizer = RobertaTokenizer.from_pretrained('roberta-large', local_files_only = True)
# self.model = RobertaForMaskedLM.from_pretrained('roberta-large', local_files_only = True)
self.tokenizer = BertTokenizer.from_pretrained('bert-large-uncased-whole-word-masking', local_files_only = True)
self.model = BertForMaskedLM.from_pretrained('bert-large-uncased-whole-word-masking', local_files_only = True)
self.model.eval()
self.model.to('cuda')
self.should_lemmatize = should_lemmatize
if not should_lemmatize:
self.vocab_size = len(self.tokenizer.get_vocab())
else:
logger.info("Constructing lemmatizer map")
vocab = self.tokenizer.get_vocab()
lemmatizer_map = {
token: lemmatize(token)
for token, index in tqdm(vocab.items())
if not token.startswith("##") and not (token.startswith('[') and token.endswith(']'))
}
self.lemmas = list({
lemma for token, lemma in lemmatizer_map.items()
})
self.vocab_size = len(self.lemmas)
lemma_index = { lem: i for i, lem in enumerate(self.lemmas) }
num_all_tokens = len(vocab)
num_valid_tokens = len(lemmatizer_map)
logger.info("Num total tokens: %d" % len(vocab))
logger.info("Num valid tokens: %d" % num_valid_tokens)
# print(list(lemmatizer_map))
logger.info("Num lemmas: %d" % self.vocab_size)
# print(self.lemmas)
lemma_indices = []
token_indices = []
for token, lemma in lemmatizer_map.items():
lemma_indices.append(lemma_index[lemma])
token_indices.append(vocab[token])
transform_values = torch.ones(num_valid_tokens, dtype = torch.float32, device = torch.device('cuda'))
transform_indices = torch.LongTensor([lemma_indices, token_indices]).to('cuda')
self.lemma_transform = torch.sparse.FloatTensor(
transform_indices, transform_values, torch.Size([self.vocab_size, num_all_tokens])
)
def get_tokens(self, indices):
if self.should_lemmatize:
return [self.lemmas[i] for i in indices.tolist()]
else:
return self.tokenizer.convert_ids_to_tokens(indices)
def run(self, tokens, start_index, end_index, keep_indices = None, repeat = False):
"""start_index inclusive, end_index exclusive. runs with span masked."""
# copy tokens, then replace target span with a mask.
# Special handling for "n't" contractions: make sure to remove the whole thing
# assert end_index > start_index
toks = list(tokens)
# expand span to cover n't contractions
if toks[start_index] == "n't":
start_index = start_index - 1
if end_index < len(tokens) and toks[end_index] == "n't":
end_index = end_index + 1
# remove all but 1 token of the target span
while end_index > start_index + 1:
toks.pop(start_index)
end_index = end_index - 1
# replace the last target token with [MASK]
toks[start_index] = self.tokenizer.mask_token
if not repeat:
# produce input string for BERT, collapsing contractions so the tokenizer can do its thing
text = " ".join(["[CLS]"] + toks + ["[SEP]"])
text = text.replace(" n't", "n't") # combine "n't" contractions together
# Retokenize input for the encoder
tokenized_text = self.tokenizer.tokenize(text)
masked_index = indexOfFirst(tokenized_text, self.tokenizer.mask_token)
assert masked_index > -1
# Convert token to vocabulary indices
indexed_tokens = self.tokenizer.convert_tokens_to_ids(tokenized_text)
# Define sentence A and B indices associated to 1st and 2nd sentences (see paper)
segment_ids = [0] * len(tokenized_text)
else:
first_text = " ".join(["[CLS]"] + list(tokens))
first_text = first_text.replace(" n't", "n't")
tokenized_first_text = self.tokenizer.tokenize(first_text)
second_text = " ".join(toks + ["[SEP]"])
second_text = second_text.replace(" n't", "n't")
tokenized_second_text = self.tokenizer.tokenize(second_text)
tokenized_text = tokenized_first_text + tokenized_second_text
masked_index = indexOfFirst(tokenized_text, self.tokenizer.mask_token)
assert masked_index > -1
indexed_tokens = self.tokenizer.convert_tokens_to_ids(tokenized_text)
segment_ids = ([0] * len(tokenized_first_text)) + ([0] * len(tokenized_second_text))
# Convert inputs to PyTorch tensors
tokens_tensor = torch.tensor([indexed_tokens]).to('cuda')
segment_tensors = torch.tensor([segment_ids]).to('cuda')
# Predict all tokens
with torch.no_grad():
outputs = self.model(tokens_tensor, segment_tensors)
predictions = outputs[0]
# print(text)
mask_predictions = torch.nn.functional.softmax(predictions[0, masked_index], dim = 0)
# print_vector(mask_predictions, self.tokenizer.convert_ids_to_tokens)
if self.should_lemmatize:
mask_predictions = torch.matmul(self.lemma_transform, mask_predictions.unsqueeze(1)).squeeze(1)
# renormalize after lemmatizing to fix cases with high-probability ##partial ##wordpieces
mask_predictions = mask_predictions / mask_predictions.sum()
# print_vector(mask_predictions, self.get_tokens)
if keep_indices is not None:
# don't necessarily renormalize here; can always do that later.
mask_predictions = torch.index_select(mask_predictions, 0, keep_indices)
# print_vector(mask_predictions, lambda indices: self.get_tokens([keep_indices[i] for i in indices]))
# mask_predictions = mask_predictions / mask_predictions.sum()
# top_probs, top_indices = torch.topk(mask_predictions, 10)
# if keep_indices is not None:
# top_indices = torch.index_select(keep_indices, 0, top_indices)
# top_tokens = self.get_tokens(top_indices)
# print(text)
# print(" ".join(["%15s" % x for x in top_tokens]))
# print(" ".join(["%15.5f" % x for x in top_probs]))
return mask_predictions
# TODO perhaps add some other features in..
# - animacy?
# - number?
# - reflexivity?
# I _could_ just do those directly on the original text...but maybe BERT gives stronger signal?
# Idk.
# from transformers import RobertaTokenizer, RobertaForMaskedLM
# tokenizer = RobertaTokenizer.from_pretrained('roberta-large')
# model = RobertaForMaskedLM.from_pretrained('roberta-large')
model = BertMLMFeatureBuilder(should_lemmatize = True)
data_sources = {
"dev": {
"path": "experiments/conll08/out/mlm-inputs/dev.jsonl.gz",
"size": 1228
},
"train": {
"path": "experiments/conll08/out/mlm-inputs/train.jsonl.gz",
"size": 35566
},
"test": {
"path": "experiments/conll08/out/mlm-inputs/test.jsonl.gz",
"size": 2139
}
}
final_vocab_size = 1024
output_dir = "experiments/conll08/input/mlm"
targets = {
"devel:359:plunge:18",
"devel:359:press",
"devel:1025:close",
"devel:637:purchase",
"devel:487:trigger",
"devel:1009:trade",
"devel:1053:believe:3",
}
# just for debugging
def should_include(sentence_id, verb_lemma, index):
# string = "%s:%s:%s" % (sentence_id, verb_lemma, index)
# if any([string.startswith(t) for t in targets]):
# print(string)
# return True
# else:
# return False
return True
import itertools
def get_lines(split):
source = data_sources[split]
return tqdm(read_lines(source["path"]), total = source["size"])
# return itertools.islice(read_lines(data_sources[split]), 5)
def run_repeated(tokens, start_index, end_index, keep_indices):
return model.run(tokens, start_index, end_index, keep_indices, repeat=True)
def run_symm_left(tokens, start_index, end_index, keep_indices):
new_tokens = list(tokens)
new_tokens.insert(start_index, "and")
new_tokens.insert(start_index, model.tokenizer.mask_token)
return model.run(new_tokens, start_index, start_index + 1, keep_indices)
def run_symm_right(tokens, start_index, end_index, keep_indices):
new_tokens = list(tokens)
# if end_index == len(tokens) and tokens[end_index] == ".":
# end_index = end_index - 1
new_tokens.insert(end_index, "and")
new_tokens.insert(end_index + 1, model.tokenizer.mask_token)
return model.run(new_tokens, end_index + 1, end_index + 2, keep_indices)
def run_symm_both(tokens, start_index, end_index, keep_indices):
left = run_symm_left(tokens, start_index, end_index, keep_indices)
right = run_symm_right(tokens, start_index, end_index, keep_indices)
product = torch.nn.functional.softmax(left.log() + right.log(), dim = 0)
return product
all_verb_data = {
"masked": {
"run": model.run,
"verbs": {},
"args": {}
},
"repeated": {
"run": run_repeated,
"verbs": {},
"args": {}
},
"symm_left": {
"run": run_symm_left,
"verbs": {},
"args": {}
},
"symm_right": {
"run": run_symm_right,
"verbs": {},
"args": {}
},
"symm_both": {
"run": run_symm_both,
"verbs": {},
"args": {}
},
}
base_vec = torch.zeros(model.vocab_size, dtype = torch.float32, device = torch.device('cuda'))
def make_dist_data():
return {
"total": 0,
"pcounts": torch.zeros_like(base_vec),
"maxes": torch.zeros_like(base_vec),
}
def get_dist_data(mode_verb_data, verb):
if verb not in mode_verb_data:
mode_verb_data[verb] = make_dist_data()
return mode_verb_data[verb]
def print_prior(verb, verb_data):
probs = verb_data["pcounts"] / verb_data["total"]
top_probs, top_indices = torch.topk(probs, 30)
top_tokens = model.get_tokens(top_indices)
print(verb)
print("Instances: %s" % verb_data["total"])
print(" ".join(["%15s" % x for x in top_tokens]))
print(" ".join(["%15.5f" % x for x in top_probs]))
# === MAIN ===
# give it a read through and construct verb-wise priors
logger.info("Constructing priors")
for split in data_sources:
logger.info("Reading %s" % split)
for line in get_lines(split):
input_json = json.loads(line)
tokens = input_json["sentenceTokens"]
for verb, mlm_info in input_json["verbs"].items():
for mode, mode_data in all_verb_data.items():
verb_data = get_dist_data(mode_data["verbs"], verb)
for verb_index in mlm_info["verbIndices"]:
if should_include(input_json["sentenceId"], verb, verb_index):
verb_data["total"] += 1
probs = mode_data["run"](tokens, verb_index, verb_index + 1, None)
verb_data["pcounts"].add_(probs)
verb_data["maxes"] = torch.max(verb_data["maxes"], probs)
args_data = get_dist_data(mode_data["args"], verb)
for arg_span in mlm_info["argSpans"]:
if should_include(input_json["sentenceId"], verb, arg_span[0]):
args_data["total"] += 1
probs = mode_data["run"](tokens, arg_span[0], arg_span[1], None)
args_data["pcounts"].add_(probs)
args_data["maxes"] = torch.max(args_data["maxes"], probs)
# if keep_indices is not None:
# top_indices = torch.index_select(keep_indices, 0, top_indices)
# top_tokens = self.get_tokens(top_indices)
# print(text)
# debug printing
# print("==== PRIORS ====")
# sorted_verb_data = sorted(list(all_verb_data["masked"]["verbs"].items()), key = lambda t: t[1]["total"])
# for verb, verb_data in sorted_verb_data:
# print_prior(verb, verb_data)
# record final vocabularies
logger.info("Computing vocabularies")
for mode, mode_data in all_verb_data.items():
for verb, verb_data in mode_data["verbs"].items():
top_pcounts, top_indices = torch.topk(verb_data["maxes"], final_vocab_size - 1)
verb_data["keep_indices"] = top_indices
for verb, arg_data in mode_data["args"].items():
top_pcounts, top_indices = torch.topk(arg_data["maxes"], final_vocab_size - 1)
arg_data["keep_indices"] = top_indices
for mode, mode_data in all_verb_data.items():
logger.info("Writing features for mode: %s" % mode)
mode_dir = os.path.join(output_dir, mode)
os.makedirs(mode_dir, exist_ok = True)
logger.info("Writing vocabularies")
verb_vocabs = {
verb: ["<none>"] + model.get_tokens(verb_data["keep_indices"])
for verb, verb_data in mode_data["verbs"].items()
}
verb_vocab_file = os.path.join(mode_dir, "verb_vocabs.json")
with open(verb_vocab_file, 'wt') as f:
f.write(json.dumps(verb_vocabs))
arg_vocabs = {
verb: ["<none>"] + model.get_tokens(arg_data["keep_indices"])
for verb, arg_data in mode_data["args"].items()
}
arg_vocab_file = os.path.join(mode_dir, "arg_vocabs.json")
with open(arg_vocab_file, 'wt') as f:
f.write(json.dumps(arg_vocabs))
logger.info("Writing data")
# write verb IDs and vectors
for split in data_sources:
logger.info("Processing %s" % split)
verb_ids_file = os.path.join(mode_dir, "%s_verb_ids.jsonl.gz" % split)
arg_ids_file = os.path.join(mode_dir, "%s_arg_ids.jsonl.gz" % split)
verb_vecs_file = os.path.join(mode_dir, "%s_verb_vecs.bin" % split)
arg_vecs_file = os.path.join(mode_dir, "%s_arg_vecs.bin" % split)
with gzip.open(verb_ids_file, "wt") as f_verb_ids:
with open(verb_vecs_file, "wb") as f_verb_vecs:
with gzip.open(arg_ids_file, "wt") as f_arg_ids:
with open(arg_vecs_file, "wb") as f_arg_vecs:
for line in get_lines(split):
input_json = json.loads(line)
tokens = input_json["sentenceTokens"]
for verb, mlm_info in input_json["verbs"].items():
verb_data = mode_data["verbs"][verb]
for index in mlm_info["verbIndices"]:
instance_id = {
"sentenceId": input_json["sentenceId"],
"verbLemma": verb,
"span": [index, index + 1]
}
instance_vec = mode_data["run"](tokens, index, index + 1, verb_data["keep_indices"])
instance_vec = torch.cat([torch.FloatTensor([1e-12]).to(instance_vec.device), instance_vec])
f_verb_ids.write(json.dumps(instance_id) + "\n")
f_verb_vecs.write(instance_vec.cpu().numpy().tobytes())
arg_data = mode_data["args"][verb]
for arg_span in mlm_info["argSpans"]:
instance_id = {
"sentenceId": input_json["sentenceId"],
"verbLemma": verb,
"span": arg_span
}
instance_vec = mode_data["run"](tokens, arg_span[0], arg_span[1], arg_data["keep_indices"])
instance_vec = torch.cat([torch.FloatTensor([1e-12]).to(instance_vec.device), instance_vec])
f_arg_ids.write(json.dumps(instance_id) + "\n")
f_arg_vecs.write(instance_vec.cpu().numpy().tobytes())
|
import pygame
class Cell:
def __init__(self, i, j, cell_size):
self.i = i
self.j = j
self.size = cell_size
self.number = 0
def draw(self, surface):
print("Rysuję")
# print(self.number, end="")
#pygame.draw.rect()
w, h = self.size
i, j = self.i, self.j # to samo jakbym zrobił i = self.i j = self.j w dwóch linijkach
color = self.number,self.number,self.number
pygame.draw.rect(surface, color, (i*w, j*h,(i+1)*w, (j+1)*h))
myfont = pygame.font.SysFont("arial", 60)
label = myfont.render(str(self.number), 1, (255,255,0))
surface.blit(label,(i*w +30, j*h + 30))
def __repr__(self):
return f"Cell ({self.i}, {self.j}, number: {self.number})"
class Grid:
def __init__(self, size):
cell_size = size[0] // 4, size[1] // 4
self.cells = [[Cell(i, j, cell_size) for i in range(4)] for j in range(4)]
def draw(self, surface):
# print("=" * 30)
for i in range(4):
for j in range(4):
self.cells[i][j].draw(surface)
# print('\t', end="")
# print()
# print("=" * 30) |
import pandas as pd
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from sklearn import svm
# read data from csv
def read_data():
train = pd.read_csv('../input/train_features')
test = pd.read_csv('../input/test_features')
label = pd.read_csv('../input/train_label',header=None)
return train,test,label
# C:penalty parameter
# loss:loss function
# epsilon:Epsilon parameter in the epsilon-insensitive loss function
# dual:determine either solve the dual or primal optimization problem,Prefer dual=False when n_samples > n_features
# tol: Tolerance for stopping criteria
clf = svm.LinearSVR()
#from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
def model_test():
X_train,X_test,y = read_data()
# cross-validation: aviod overfitting
# step1: split trainning data
#cro_train, cro_test, cro_y_train, cor_y_test = train_test_split(X_train, y, test_size=0.4, random_state=0)
#clf.fit(cro_train,np.array(cro_y_train))
#clf.score(cro_test,np.array(cro_y_test))
print(cross_val_score(clf, X_train, np.array(y),cv=5,scoring='neg_mean_squared_error'))
def model_use():
X_train,X_test,y = read_data()
clf.fit(X_train,y)
pre_val = np.expm1(pd.DataFrame(clf.predict(X_test)))
test_label = pd.read_csv('../input/test_id',header=None)
result = pd.DataFrame()
result['Id'] = test_label[0]
result['SalePrice'] = pre_val[0]
result.to_csv('../output/result_svr',index=None)
model_use()
#model_test()
|
#!/usr/bin/env python
import unittest
from dominion import Card, Game, Piles, Player
###############################################################################
class Card_Den_of_Sin(Card.Card):
def __init__(self):
Card.Card.__init__(self)
self.cardtype = [Card.CardType.NIGHT, Card.CardType.DURATION]
self.base = Card.CardExpansion.NOCTURNE
self.name = "Den of Sin"
self.cost = 2
def desc(self, player):
if player.phase == Player.Phase.BUY:
return "At the start of your next turn, +2 Cards; This is gained to your hand (instead of your discard pile)."
return "At the start of your next turn, +2 Cards"
def duration(self, game, player):
for _ in range(2):
player.pickup_card()
def hook_gain_this_card(self, game, player):
return {"destination": Piles.HAND}
###############################################################################
class Test_Den_of_Sin(unittest.TestCase):
def setUp(self):
self.g = Game.TestGame(numplayers=1, initcards=["Den of Sin"])
self.g.start_game()
self.plr = self.g.player_list(0)
self.card = self.g["Den of Sin"].remove()
def test_gain(self):
self.plr.gain_card("Den of Sin")
self.assertIn("Den of Sin", self.plr.piles[Piles.HAND])
def test_duration(self):
self.plr.add_card(self.card, Piles.HAND)
self.plr.play_card(self.card)
self.plr.end_turn()
self.plr.start_turn()
self.assertEqual(self.plr.piles[Piles.HAND].size(), 5 + 2)
###############################################################################
if __name__ == "__main__": # pragma: no cover
unittest.main()
# EOF
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import gym
from itertools import count
import time
import matplotlib.pyplot as plt
R = np.loadtxt('return_per_episode_baseline.txt')
L1 = np.loadtxt('return_per_episode_decay_rate_30000.txt')
L2 = np.loadtxt('return_per_episode_decay_rate_100000.txt')
plt.plot(R[0:1999], label='Decay Steps = 1M')
plt.plot(L1, label='Decay Steps = 30K')
plt.plot(L2, label='Decay Steps = 100K')
plt.ylabel('Clipped Return')
plt.xlabel('Number of Episodes')
plt.title('Learning Curve for Boxing-ram-v0')
plt.legend()
plt.show()
|
import logging
import threading
import time
from ast import literal_eval
from random import randint
from threading import Thread
import requests
from flask import Flask, request, jsonify, render_template
from tools.maths import rnd
from client.motors.v03 import Motor
from datetime import datetime
from client import host, tehran, admin_key, fastness
# debug, info, warning, error, critical
class MyFormatter(logging.Formatter):
converter = datetime.fromtimestamp
def formatTime(self, record, datefmt=None):
ct = self.converter(record.created)
if datefmt:
s = ct.strftime(datefmt)
else:
t = ct.strftime("%Y-%m-%d %H:%M:%S")
s = "%s,%03d" % (t, record.msecs)
return s
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# console = logging.StreamHandler()
# logger.addHandler(console)
console = logging.FileHandler('simulation.log')
console.setLevel(logging.DEBUG)
logger.addHandler(console)
formatter = MyFormatter(fmt='%(asctime)s %(message)s', datefmt='%Y-%m-%d,%H:%M:%S.%f')
console.setFormatter(formatter)
logger.debug('/*******************************\\')
logger.debug(' {}'.format(datetime.now()))
logger.debug('\\*******************************/')
# 2011-06-09,07:12:36.553554 Jackdaws love my big sphinx of quartz.
app = Flask(__name__, template_folder='../client')
motors = {}
order_rate = 1.37
paths = []
@app.route('/<motor>', methods=['POST'])
def msg(motor):
path = literal_eval(request.values['path'])
paths.append(path['_id'])
requests.post(host + '/food/~{}@ack'.format(path['_id']), data={'key': motors[motor].key})
motors[motor].msg_box.append(path)
logger.critical("porter: {}, path: {}".format(motor, request.values['path']))
return jsonify({
'SUCCESS': True
})
@app.route('/')
def _map():
return render_template('simulation.html', key=b_key, host=host, hang='food')
client_server = threading.Thread(target=app.run, kwargs={'port': 5002})
client_server.start()
requests.post(host + '/!flush', data={'key': admin_key})
requests.delete(host + '/food/!!/shifts', data={'key': admin_key, 'q': '{}'})
requests.delete(host + '/food/!!/paths', data={'key': admin_key, 'q': '{}'})
requests.delete(host + '/food/!!/credits', data={'key': admin_key, 'q': '{}'})
# get a B key
requests.put(host + '/food/mehrad', data={'key': admin_key})
response = requests.post(host + '/food/mehrad@privilege=b', data={'key': admin_key})
requests.post(host + '/food/mehrad@credit=1000000', data={'key': admin_key})
b_key = response.text
# some operators -> start inserting randomly
def insertion_loop(operator, key):
while True:
# insert orders randomly
requests.post(host + '/food/~{},{};{},{}'.format(*rnd(tehran), *rnd(tehran)), data={
'key': key,
'volume': 2,
'priority': 1,
'delay': 400
})
time.sleep(randint(1000, 3000) / fastness / order_rate)
for o in [[name, None] for name in [
'nosrat',
'effat',
'shokat',
'sakineh',
'halimeh',
'naemeh',
'maryam',
'zahra',
'narjes',
'zeynab',
'maede',
'sajede',
'ahlam',
'shaqlam',
'shiva',
'nirvana',
'sahar',
'baran',
'marziyeh',
'tayebeh',
'nosrat_2',
'effat_2',
'shokat_2',
'sakineh_2',
'halimeh_2',
'naemeh_2',
'maryam_2',
'zahra_2',
'narjes_2',
'zeynab_2',
'maede_2',
'sajede_2',
'ahlam_2',
'shaqlam_2',
'shiva_2',
'nirvana_2',
'sahar_2',
'baran_2',
'marziyeh_2',
'tayebeh_2',
]]:
requests.put(host + '/food/{}'.format(o[0]), data={'key': b_key})
response = requests.post(host + '/food/{}@privilege=o'.format(o[0]), data={'key': b_key})
o[1] = response.text
thread = Thread(target=insertion_loop, args=tuple(o))
thread.start()
clock = datetime.now()
clock = clock.hour * 3600 + clock.minute * 60 + clock.second
for name in [
'shahin',
'alireza',
'naqi',
'taqi',
'javad',
'kazem',
'bil',
'jack',
'muses',
'nick',
'liam',
'william',
'james',
'benjamin',
'mason',
'logan',
'iris',
'zeus',
'hera',
'poseidon',
'demeter',
'ares',
'athena',
'apollo',
'hermes',
'artemis',
'thor',
'odin',
'ironman',
'vision',
'hulk',
'thanos',
'superman',
'captain',
'spiderman',
'flash',
'_ali',
'_hassan',
'_hossein',
'_sajad',
'_baqer',
'_sadeq',
'_javad',
'_hadi',
'_naqi',
'_taqi',
'arav',
'vivaan',
'aditya',
'vihaan',
'arjun',
'raju',
'sai',
'reyansh',
'salman',
'amirkhan',
'chao',
'chaun',
'chen',
'cheng',
'jetli',
'borosly',
'ching',
'chinchan',
'luchin',
'michin',
]:
motors[name] = Motor(name, requests.put(host + '/food/{}'.format(name), data={'key': b_key}).text,
*rnd(tehran), hang='food', fastness=fastness)
motors[name].shift(clock + 6, 24 * 3600 - 1)
# motors[name].shift(clock + 20, clock + 40)
motors[name].thread.start()
# fastness = 24 () -> periods in ego -> hour = 1
def rebook():
while True:
# active porters to porters
requests.patch(host + '/food', data={'key': b_key})
response = requests.post(host + '/food/@frees', data={'key': b_key})
if response.json():
logger.info(response.text)
# # solve
requests.post(host + '/food/!!!/hng', data={'key': b_key})
time.sleep(420 / fastness)
rebook_thread = Thread(target=rebook)
rebook_thread.start()
# 1. each 10sec / fastness each operator will ask location of all porters and orders.
def beg():
while True:
for path in set(paths):
location = requests.get(host + '/food/~{}@'.format(path), params={'key': b_key}).json()
if location['date'] and location['lat'] and location['lng']:
logger.info("path: {} -> {}".format(path, str(location)))
else:
logger.info("-- path removed --")
paths.remove(path)
time.sleep(5)
begging_thread = Thread(target=beg)
begging_thread.start()
def report():
while True:
n = 0
s = 0
for name, motor in motors.items():
n += motor.n
s += motor.s
print('-- {}m have gone for {} finished paths --'.format(s, n))
time.sleep(60)
report_thread = Thread(target=report)
report_thread.start()
# 2. simulation.html
|
# -*- coding: utf-8 -*-
"""
Created on Tue May 15 11:05:38 2018
@author: CUHKSZ
"""
import time
#from collections import deque
from ina219 import INA219
from ina219 import DeviceRangeError
def sensor():
Shunt_OHMS = 0.1 # For this sensor it is 0.1 ohm
try:
print('Starting Current Sensor')
print('Collecting Sensor Values...')
start = time.time() # Start Time
#global DataPoints
#DataPoints = deque(maxlen=None) # Creating Array of datatype Deque to store values
a = 0.9664 # Regression Fitting Parameter
b = 0.0285 # Regression Fitting Parameter
ina = INA219(Shunt_OHMS) # Auto Gain
while True:
try:
ina.configure()
break
except:
print('Cannot configure ina219!')
print('Current Sensor Configured Successfully')
while True:
#print('Bus Voltage: %.3f V' % ina.voltage())
try:
#print('Bus Current: %.3f mA' % ina.current())
#print('Power: %.3f mW' % ina.power())
currentvalue = round((a*ina.current())+b) # Rounding off values to nearest integer
voltagevalue = float('{0:.1f}'.format(ina.voltage())) # Floating point up to one decimal point
powervalue = round(currentvalue*voltagevalue)
timevalue = float('{0:.1f}'.format(time.time()-start)) # Elapsed time in Seconds with 1 decimal point floating number
#DataPoints.append([timevalue, currentvalue, voltagevalue, powervalue]) # Updating DataPoints Array
print('Current value is: ',currentvalue)
except DeviceRangeError:
print('Device Range Error')
time.sleep(0.5) # Reading value after 0.5 second
except:
print('Exception Occurred, Current Sensor Stopped \n')
with open(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())+'.txt','w') as f:
f.write(str(currentvalue)+' '+str(voltagevalue)+' '+str(powervalue)+' '+str(timevalue)+'\n')
print('Sensor Stopped!\n')
#------------------------------------------------
sensor()
|
"""list of common VLANs."""
def int_in_list(a_list):
"""Convert a_list elements to int type."""
counter = 0
for elem in a_list:
a_list[counter] = int(elem)
counter += 1
COMMAND1 = "switchport trunk allowed vlan 1,3,10,20,30,100"
COMMAND2 = "switchport trunk allowed vlan 1,3,100,200,300"
COMMAND_LIST = [COMMAND1, COMMAND2]
COMMON_VLAN = []
for element in COMMAND_LIST:
ADDIT = element.split()[-1].split(",")
int_in_list(ADDIT)
COMMON_VLAN.append(ADDIT)
# print(set(COMMON_VLAN))
print(set(COMMON_VLAN[0]).intersection(set(COMMON_VLAN[1])))
|
# Return the sum of the numbers in the array, except ignore sections of numbers
# starting with a 6 and extending to the next 7 (every 6 will be followed by at
# least one 7). Return 0 for no numbers.
def sum67(nums):
sum = 0
add = True
for i in nums:
while add == True:
if i != 6:
sum += i
break
else:
add = False
while add == False:
if i != 7:
break
else:
add = True
return sum
print(sum67([1, 2, 2])) #5
print(sum67([1, 2, 2, 6, 99, 99, 7])) #5
print(sum67([1, 1, 6, 7, 2])) #4
|
from lxml import html
import json
import requests
from time import sleep
def AdebooksParser(url, isbn, keyword):
headers = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.90 Safari/537.36'}
page = requests.get(url,headers=headers)
while True:
sleep(3)
try:
doc = html.fromstring(page.content)
XPATH_NAME = '//h2/a/span/text()'
XPATH_SALE_PRICE = '//div[@class="item-price"]/span[@class="price"]/text()'
RAW_NAME = doc.xpath(XPATH_NAME)
RAW_SALE_PRICE = doc.xpath(XPATH_SALE_PRICE)
NAME = ' '.join(''.join(RAW_NAME).split()) if RAW_NAME else None
SALE_PRICE = ' '.join(''.join(RAW_SALE_PRICE).split()).strip() if RAW_SALE_PRICE else None
if page.status_code!=200:
raise ValueError('captha')
data = {
'ISBN10':isbn,
'KEYOWRD':keyword,
'TITLE':NAME,
'SALE_PRICE':SALE_PRICE,
}
return data
except Exception as e:
print (e)
def ReadIsbn():
with open('keywords_ISBN10.json') as f:
IsbnList = json.load(f)
extracted_data = []
for i in IsbnList:
url = "https://www.abebooks.com/servlet/SearchResults?ds=1&isbn="+i["ISBN10"]
print ("Processing: ",url)
extracted_data.append(AdebooksParser(url, i["ISBN10"], i["KEYOWRD"]))
sleep(2)
f=open('adebookDB.json','w')
json.dump(extracted_data,f,indent=4)
if __name__ == "__main__":
ReadIsbn()
|
from enum import Enum
from string import ascii_uppercase, ascii_lowercase
from typing import NamedTuple, Tuple, Generator
class CharacterType(Enum):
LOWERCASE = 'LOWERCASE'
UPPERCASE = 'UPPERCASE'
UNDERSCORE = 'UNDERSCORE'
DASH = 'DASH'
DOT = 'DOT'
SLASH = 'SLASH'
COMMA = 'COMMA'
OTHER = 'OTHER'
class Character(NamedTuple):
character: str
case: CharacterType
class CharacterContainer:
def __init__(self, string: str = ''):
self._string = string
self._characters = tuple(self._map_casing())
self._iter = None
def __iter__(self) -> 'CharacterContainer':
self._iter = iter(self.characters)
return self
def __next__(self) -> Character:
return next(self._iter)
def __str__(self):
return ''.join([c.character for c in self.characters])
def __repr__(self):
return self.__str__()
def __eq__(self, other):
return str(other) == str(self)
@property
def characters(self) -> Tuple[Character, ...]:
return self._characters
def append_str(self, string: str):
self._string += string
self._characters = tuple(self._map_casing())
def _map_casing(self) -> Generator[Character, None, None]:
for character in self._string:
if character in ascii_uppercase:
yield Character(character, CharacterType.UPPERCASE)
elif character in ascii_lowercase:
yield Character(character, CharacterType.LOWERCASE)
elif character == '_':
yield Character(character, CharacterType.UNDERSCORE)
elif character == '-':
yield Character(character, CharacterType.DASH)
elif character == '.':
yield Character(character, CharacterType.DOT)
elif character == '/':
yield Character(character, CharacterType.SLASH)
elif character == ',':
yield Character(character, CharacterType.COMMA)
else:
yield Character(character, CharacterType.OTHER)
|
#
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
import pendulum
import pytest
import responses
from airbyte_cdk.models import SyncMode
from pytest import fixture
from source_orb.source import CreditsLedgerEntries, Customers, IncrementalOrbStream, OrbStream, Plans, Subscriptions, SubscriptionUsage
@fixture
def patch_incremental_base_class(mocker):
# Mock abstract methods to enable instantiating abstract class
mocker.patch.object(IncrementalOrbStream, "path", "v0/example_endpoint")
mocker.patch.object(IncrementalOrbStream, "primary_key", "id")
mocker.patch.object(IncrementalOrbStream, "__abstractmethods__", set())
def test_cursor_field(patch_incremental_base_class):
stream = IncrementalOrbStream()
expected_cursor_field = "created_at"
assert stream.cursor_field == expected_cursor_field
@pytest.mark.parametrize(
("current_stream_state", "latest_record", "expected_state"),
[
(
dict(created_at="2022-01-25T12:00:00+00:00"),
dict(created_at="2022-01-26T12:00:00+00:00"),
dict(created_at="2022-01-26T12:00:00+00:00"),
),
(
dict(created_at="2022-01-26T12:00:00+00:00"),
dict(created_at="2022-01-25T12:00:00+00:00"),
dict(created_at="2022-01-26T12:00:00+00:00"),
),
({}, dict(created_at="2022-01-25T12:00:00+00:00"), dict(created_at="2022-01-25T12:00:00+00:00")),
],
)
def test_get_updated_state(patch_incremental_base_class, mocker, current_stream_state, latest_record, expected_state):
stream = IncrementalOrbStream()
inputs = {"current_stream_state": current_stream_state, "latest_record": latest_record}
assert stream.get_updated_state(**inputs) == expected_state
@pytest.mark.parametrize(
("config", "current_stream_state", "next_page_token", "expected_params"),
[
(
{},
dict(created_at="2022-01-25T12:00:00+00:00"),
{"cursor": "f96594d0-8220-11ec-a8a3-0242ac120002"},
{"created_at[gte]": "2022-01-25T12:00:00+00:00", "cursor": "f96594d0-8220-11ec-a8a3-0242ac120002"},
),
({}, dict(created_at="2022-01-25T12:00:00+00:00"), None, {"created_at[gte]": "2022-01-25T12:00:00+00:00"}),
# Honors lookback_window_days
(
dict(lookback_window_days=3),
dict(created_at="2022-01-25T12:00:00+00:00"),
None,
{"created_at[gte]": "2022-01-22T12:00:00+00:00"},
),
({}, {}, None, None),
(dict(start_date=pendulum.parse("2022-01-25T12:00:00+00:00")), {}, None, {"created_at[gte]": "2022-01-25T12:00:00+00:00"}),
(
dict(start_date=pendulum.parse("2022-01-25T12:00:00+00:00")),
dict(created_at="2022-01-26T12:00:00+00:00"),
None,
{"created_at[gte]": "2022-01-26T12:00:00+00:00"},
),
# Honors lookback_window_days
(
dict(start_date=pendulum.parse("2022-01-25T12:00:00+00:00"), lookback_window_days=2),
dict(created_at="2022-01-26T12:00:00+00:00"),
None,
{"created_at[gte]": "2022-01-24T12:00:00+00:00"},
),
],
)
def test_request_params(patch_incremental_base_class, mocker, config, current_stream_state, next_page_token, expected_params):
stream = IncrementalOrbStream(**config)
inputs = {"stream_state": current_stream_state, "next_page_token": next_page_token}
expected_params = expected_params or {}
expected_params["limit"] = OrbStream.page_size
assert stream.request_params(**inputs) == expected_params
# We have specific unit tests for CreditsLedgerEntries incremental stream
# because that employs slicing logic
@pytest.mark.parametrize(
("current_stream_state", "latest_record", "expected_state"),
[
# Updates for matching customer already in state
(
dict(customer_id_foo=dict(created_at="2022-01-25T12:00:00+00:00")),
dict(created_at="2022-01-26T12:00:00+00:00", customer_id="customer_id_foo"),
dict(customer_id_foo=dict(created_at="2022-01-26T12:00:00+00:00")),
),
# No state for customer
(
{},
dict(created_at="2022-01-26T12:00:00+00:00", customer_id="customer_id_foo"),
dict(customer_id_foo=dict(created_at="2022-01-26T12:00:00+00:00")),
),
# State has different customer than latest record
(
dict(customer_id_foo=dict(created_at="2022-01-25T12:00:00+00:00")),
dict(created_at="2022-01-26T12:00:00+00:00", customer_id="customer_id_bar"),
dict(
customer_id_foo=dict(created_at="2022-01-25T12:00:00+00:00"),
customer_id_bar=dict(created_at="2022-01-26T12:00:00+00:00"),
),
),
],
)
def test_credits_ledger_entries_get_updated_state(mocker, current_stream_state, latest_record, expected_state):
stream = CreditsLedgerEntries()
inputs = {"current_stream_state": current_stream_state, "latest_record": latest_record}
assert stream.get_updated_state(**inputs) == expected_state
def test_credits_ledger_entries_stream_slices(mocker):
mocker.patch.object(
Customers, "read_records", return_value=iter([{"id": "1", "name": "Customer Foo"}, {"id": "18", "name": "Customer Bar"}])
)
stream = CreditsLedgerEntries()
inputs = {"sync_mode": SyncMode.incremental, "cursor_field": [], "stream_state": {}}
expected_stream_slice = [{"customer_id": "1"}, {"customer_id": "18"}]
assert list(stream.stream_slices(**inputs)) == expected_stream_slice
@pytest.mark.parametrize(
("current_stream_state", "current_stream_slice", "next_page_token"),
[
# Slice matches customer in state, paginated request
(
dict(customer_id_foo=dict(created_at="2022-01-25T12:00:00+00:00")),
dict(customer_id="customer_id_foo"),
{"cursor": "f96594d0-8220-11ec-a8a3-0242ac120002"},
),
# Slice matches customer in state, non-paginated request
(dict(customer_id_foo=dict(created_at="2022-01-25T12:00:00+00:00")), dict(customer_id="customer_id_foo"), None),
# Slice does not match customer in state, paginated request
(
dict(customer_id_foo=dict(created_at="2022-01-25T12:00:00+00:00")),
dict(customer_id="customer_id_bar"),
{"cursor": "f96594d0-8220-11ec-a8a3-0242ac120002"},
),
# Slice does not match customer in state, non-paginated request
(dict(customer_id_foo=dict(created_at="2022-01-25T12:00:00+00:00")), dict(customer_id="customer_id_bar"), None),
({}, dict(customer_id="customer_id_baz"), None),
],
)
def test_credits_ledger_entries_request_params(mocker, current_stream_state, current_stream_slice, next_page_token):
stream = CreditsLedgerEntries()
inputs = {"stream_state": current_stream_state, "stream_slice": current_stream_slice, "next_page_token": next_page_token}
expected_params = dict(limit=CreditsLedgerEntries.page_size, entry_status="committed")
current_slice_state = current_stream_state.get(current_stream_slice["customer_id"], {})
if current_slice_state.get("created_at"):
expected_params["created_at[gte]"] = current_slice_state["created_at"]
if next_page_token is not None:
expected_params["cursor"] = next_page_token["cursor"]
assert stream.request_params(**inputs) == expected_params
def test_credits_ledger_entries_transform_record(mocker):
stream = CreditsLedgerEntries()
ledger_entry_record = {
"event_id": "foo-event-id",
"entry_type": "decrement",
"customer": {
"id": "foo-customer-id",
},
"credit_block": {"expiry_date": "2023-01-25T12:00:00+00:00", "id": "2k6hj0s8dfhj0d7h", "per_unit_cost_basis": "2.50"},
}
# Validate that calling transform record unwraps nested customer and credit block fields.
assert stream.transform_record(ledger_entry_record) == {
"event_id": "foo-event-id",
"entry_type": "decrement",
"customer_id": "foo-customer-id",
"block_expiry_date": "2023-01-25T12:00:00+00:00",
"credit_block_id": "2k6hj0s8dfhj0d7h",
"credit_block_per_unit_cost_basis": "2.50",
}
@responses.activate
def test_credits_ledger_entries_no_matching_events(mocker):
stream = CreditsLedgerEntries(string_event_properties_keys=["ping"])
ledger_entries = [{"event_id": "foo-event-id", "entry_type": "decrement"}, {"event_id": "bar-event-id", "entry_type": "decrement"}]
mock_response = {
"data": [
{
"customer_id": "foo-customer-id",
"event_name": "foo-name",
# Does not match either event_id that we'd expect
"id": "foo-event-id-2",
"properties": {"ping": "pong"},
"timestamp": "2022-02-21T07:00:00+00:00",
}
],
"pagination_metadata": {"has_more": False, "next_cursor": None},
}
responses.add(responses.POST, f"{stream.url_base}events", json=mock_response, status=200)
enriched_entries = stream.enrich_ledger_entries_with_event_data(ledger_entries)
# We failed to enrich either event, but still check that the schema was
# transformed as expected
assert enriched_entries == [
{"event": {"id": "foo-event-id"}, "entry_type": "decrement"},
{"event": {"id": "bar-event-id"}, "entry_type": "decrement"},
]
@pytest.mark.parametrize(
("event_properties", "selected_string_property_keys", "selected_numeric_property_keys", "resulting_properties"),
[
({}, ["event-property-foo"], [], {}),
({"ping": "pong"}, ["ping"], [], {"ping": "pong"}),
({"ping": "pong", "unnamed_property": "foo"}, ["ping"], [], {"ping": "pong"}),
({"unnamed_property": "foo"}, ["ping"], [], {}),
({"numeric_property": 1}, ["ping"], ["numeric_property"], {"numeric_property": 1}),
({"ping": "pong", "numeric_property": 1}, ["ping"], ["numeric_property"], {"ping": "pong", "numeric_property": 1}),
],
)
@responses.activate
def test_credits_ledger_entries_enriches_selected_property_keys(
mocker, event_properties, selected_string_property_keys, selected_numeric_property_keys, resulting_properties
):
stream = CreditsLedgerEntries(
string_event_properties_keys=selected_string_property_keys, numeric_event_properties_keys=selected_numeric_property_keys
)
original_entry_1 = {"entry_type": "increment"}
ledger_entries = [{"event_id": "foo-event-id", "entry_type": "decrement"}, original_entry_1]
mock_response = {
"data": [
{
"customer_id": "foo-customer-id",
"event_name": "foo-name",
"id": "foo-event-id",
"properties": event_properties,
"timestamp": "2022-02-21T07:00:00+00:00",
}
],
"pagination_metadata": {"has_more": False, "next_cursor": None},
}
responses.add(responses.POST, f"{stream.url_base}events", json=mock_response, status=200)
enriched_entries = stream.enrich_ledger_entries_with_event_data(ledger_entries)
assert enriched_entries[0] == {"entry_type": "decrement", "event": {"id": "foo-event-id", "properties": resulting_properties}}
# Does not enrich, but still passes back, irrelevant (for enrichment purposes) ledger entry
assert enriched_entries[1] == original_entry_1
@responses.activate
def test_credits_ledger_entries_enriches_with_multiple_entries_per_event(mocker):
stream = CreditsLedgerEntries(string_event_properties_keys=["ping"])
ledger_entries = [{"event_id": "foo-event-id", "entry_type": "decrement"}, {"event_id": "foo-event-id", "entry_type": "decrement"}]
mock_response = {
"data": [
{
"customer_id": "foo-customer-id",
"event_name": "foo-name",
"id": "foo-event-id",
"properties": {"ping": "pong"},
"timestamp": "2022-02-21T07:00:00+00:00",
}
],
"pagination_metadata": {"has_more": False, "next_cursor": None},
}
responses.add(responses.POST, f"{stream.url_base}events", json=mock_response, status=200)
enriched_entries = stream.enrich_ledger_entries_with_event_data(ledger_entries)
# We expect both events are enriched correctly
assert enriched_entries == [
{"event": {"id": "foo-event-id", "properties": {"ping": "pong"}}, "entry_type": "decrement"},
{"event": {"id": "foo-event-id", "properties": {"ping": "pong"}}, "entry_type": "decrement"},
]
# We have specific unit tests for SubscriptionUsage incremental stream
# because its logic differs from other IncrementalOrbStreams
@pytest.mark.parametrize(
("current_stream_state", "latest_record", "expected_state"),
[
# Updates for matching customer already in state
(
dict(subscription_id_foo=dict(timeframe_start="2022-01-25T12:00:00+00:00")),
dict(timeframe_start="2022-01-26T12:00:00+00:00", subscription_id="subscription_id_foo"),
dict(subscription_id_foo=dict(timeframe_start="2022-01-26T12:00:00+00:00")),
),
# No state for subscription
(
{},
dict(timeframe_start="2022-01-26T12:00:00+00:00", subscription_id="subscription_id_foo"),
dict(subscription_id_foo=dict(timeframe_start="2022-01-26T12:00:00+00:00")),
),
# State has different subscription than latest record
(
dict(subscription_id_foo=dict(timeframe_start="2022-01-25T12:00:00+00:00")),
dict(timeframe_start="2022-01-26T12:00:00+00:00", subscription_id="subscription_id_bar"),
dict(
subscription_id_foo=dict(timeframe_start="2022-01-25T12:00:00+00:00"),
subscription_id_bar=dict(timeframe_start="2022-01-26T12:00:00+00:00"),
),
),
],
)
def test_subscription_usage_get_updated_state(mocker, current_stream_state, latest_record, expected_state):
stream = SubscriptionUsage(start_date="2022-01-25T12:00:00+00:00", end_date="2022-01-26T12:00:00+00:00")
inputs = {"current_stream_state": current_stream_state, "latest_record": latest_record}
assert stream.get_updated_state(**inputs) == expected_state
def test_subscription_usage_stream_slices(mocker):
mocker.patch.object(
Subscriptions, "read_records", return_value=iter([
{"id": "1", "plan_id": "2"},
{"id": "11", "plan_id": "2"},
{"id": "111", "plan_id": "3"} # should be ignored because plan_id set to 2
])
)
stream = SubscriptionUsage(plan_id="2", start_date="2022-01-25T12:00:00+00:00", end_date="2022-01-26T12:00:00+00:00")
inputs = {"sync_mode": SyncMode.incremental, "cursor_field": [], "stream_state": {}}
expected_stream_slice = [
{"subscription_id": "1", "timeframe_start": "2022-01-25T12:00:00+00:00", "timeframe_end": "2022-01-26T12:00:00+00:00"},
{"subscription_id": "11", "timeframe_start": "2022-01-25T12:00:00+00:00", "timeframe_end": "2022-01-26T12:00:00+00:00"}
]
assert list(stream.stream_slices(**inputs)) == expected_stream_slice
def test_subscription_usage_stream_slices_with_grouping_key(mocker):
mocker.patch.object(
Subscriptions, "read_records", return_value=iter([
{"id": "1", "plan_id": "2"},
{"id": "11", "plan_id": "2"},
{"id": "111", "plan_id": "3"} # should be ignored because plan_id set to 2
])
)
mocker.patch.object(
Plans, "read_records", return_value=iter([
{"id": "2", "prices": [
{"billable_metric": {"id": "billableMetricIdA"}},
{"billable_metric": {"id": "billableMetricIdB"}}
]},
{"id": "3", "prices": [ # should be ignored because plan_id is set to 2
{"billable_metric": {"id": "billableMetricIdC"}}
]}
])
)
# when a grouping_key is present, one slice per billable_metric is created because the Orb API
# requires one API call per billable metric if the group_by param is in use.
stream = SubscriptionUsage(plan_id="2", subscription_usage_grouping_key="groupKey", start_date="2022-01-25T12:00:00+00:00", end_date="2022-01-26T12:00:00+00:00")
inputs = {"sync_mode": SyncMode.incremental, "cursor_field": [], "stream_state": {}}
# one slice per billable metric per subscription that matches the input plan
expected_stream_slice = [
{"subscription_id": "1", "billable_metric_id": "billableMetricIdA", "timeframe_start": "2022-01-25T12:00:00+00:00", "timeframe_end": "2022-01-26T12:00:00+00:00"},
{"subscription_id": "1", "billable_metric_id": "billableMetricIdB", "timeframe_start": "2022-01-25T12:00:00+00:00", "timeframe_end": "2022-01-26T12:00:00+00:00"},
{"subscription_id": "11", "billable_metric_id": "billableMetricIdA", "timeframe_start": "2022-01-25T12:00:00+00:00", "timeframe_end": "2022-01-26T12:00:00+00:00"},
{"subscription_id": "11", "billable_metric_id": "billableMetricIdB", "timeframe_start": "2022-01-25T12:00:00+00:00", "timeframe_end": "2022-01-26T12:00:00+00:00"},
]
assert list(stream.stream_slices(**inputs)) == expected_stream_slice
@pytest.mark.parametrize(
("current_stream_state", "current_stream_slice", "grouping_key"),
[
# Slice matches subscription in state, no grouping
(
dict(subscription_id_foo=dict(timeframe_start="2022-01-25T12:00:00+00:00")),
dict(subscription_id="subscription_id_foo", timeframe_start="2022-01-25T12:00:00+00:00", timeframe_end="2022-01-26T12:00:00+00:00"),
None
),
# Slice does not match subscription in state, no grouping
(
dict(subscription_id_foo=dict(timeframe_start="2022-01-25T12:00:00+00:00")),
dict(subscription_id="subscription_id_bar", timeframe_start="2022-01-25T12:00:00+00:00", timeframe_end="2022-01-26T12:00:00+00:00"),
None
),
# No existing state, no grouping
(
{},
dict(subscription_id="subscription_id_baz", timeframe_start="2022-01-25T12:00:00+00:00", timeframe_end="2022-01-26T12:00:00+00:00"),
None
),
# Slice matches subscription in state, with grouping
(
dict(subscription_id_foo=dict(timeframe_start="2022-01-25T12:00:00+00:00")),
dict(subscription_id="subscription_id_foo", billable_metric_id="billableMetricA", timeframe_start="2022-01-25T12:00:00+00:00", timeframe_end="2022-01-26T12:00:00+00:00"),
"group_key_foo"
),
# Slice does not match subscription in state, with grouping
(
dict(subscription_id_foo=dict(timeframe_start="2022-01-25T12:00:00+00:00")),
dict(subscription_id="subscription_id_bar", billable_metric_id="billableMetricA", timeframe_start="2022-01-25T12:00:00+00:00", timeframe_end="2022-01-26T12:00:00+00:00"),
"group_key_foo"
),
# No existing state, with grouping
(
{},
dict(subscription_id="subscription_id_baz", billable_metric_id="billableMetricA", timeframe_start="2022-01-25T12:00:00+00:00", timeframe_end="2022-01-26T12:00:00+00:00"),
"group_key_foo"
),
],
)
def test_subscription_usage_request_params(mocker, current_stream_state, current_stream_slice, grouping_key):
if grouping_key:
stream = SubscriptionUsage(start_date="2022-01-25T12:00:00+00:00", end_date="2022-01-26T12:00:00+00:00", subscription_usage_grouping_key=grouping_key)
else:
stream = SubscriptionUsage(start_date="2022-01-25T12:00:00+00:00", end_date="2022-01-26T12:00:00+00:00")
inputs = {"stream_state": current_stream_state, "stream_slice": current_stream_slice}
expected_params = dict(granularity="day")
# always pull the timeframe_start and timeframe_end from the stream slice
expected_params["timeframe_start"] = current_stream_slice["timeframe_start"]
expected_params["timeframe_end"] = current_stream_slice["timeframe_end"]
# if a grouping_key is present, add the group_by and billable_metric_id to params
if grouping_key:
expected_params["group_by"] = grouping_key
expected_params["billable_metric_id"] = current_stream_slice["billable_metric_id"]
assert stream.request_params(**inputs) == expected_params
def test_subscription_usage_yield_transformed_subrecords(mocker):
stream = SubscriptionUsage(start_date="2022-01-25T12:00:00+00:00", end_date="2022-01-26T12:00:00+00:00")
subscription_usage_response = {
"billable_metric": {
"name": "Metric A",
"id": "billableMetricA"
},
"usage": [
{
"quantity": 0,
"timeframe_start": "2022-01-25T12:00:00+00:00",
"timeframe_end": "2022-01-26T12:00:00+00:00"
},
{
"quantity": 1,
"timeframe_start": "2022-01-25T12:00:00+00:00",
"timeframe_end": "2022-01-26T12:00:00+00:00"
},
{
"quantity": 2,
"timeframe_start": "2022-01-26T12:00:00+00:00",
"timeframe_end": "2022-01-27T12:00:00+00:00"
}
],
"otherTopLevelField": {
"shouldBeIncluded": "true"
}
}
subscription_id = "subscriptionIdA"
# Validate that one record is yielded per non-zero usage subrecord
expected = [
{
"quantity": 1,
"timeframe_start": "2022-01-25T12:00:00+00:00",
"timeframe_end": "2022-01-26T12:00:00+00:00",
"billable_metric_name": "Metric A",
"billable_metric_id": "billableMetricA",
"otherTopLevelField": {
"shouldBeIncluded": "true"
},
"subscription_id": subscription_id
},
{
"quantity": 2,
"timeframe_start": "2022-01-26T12:00:00+00:00",
"timeframe_end": "2022-01-27T12:00:00+00:00",
"billable_metric_name": "Metric A",
"billable_metric_id": "billableMetricA",
"otherTopLevelField": {
"shouldBeIncluded": "true"
},
"subscription_id": subscription_id
}
]
actual_output = list(stream.yield_transformed_subrecords(subscription_usage_response, subscription_id))
assert actual_output == expected
def test_subscription_usage_yield_transformed_subrecords_with_grouping(mocker):
stream = SubscriptionUsage(start_date="2022-01-25T12:00:00+00:00", end_date="2022-01-26T12:00:00+00:00", subscription_usage_grouping_key="grouping_key")
subscription_usage_response = {
"billable_metric": {
"name": "Metric A",
"id": "billableMetricA"
},
"metric_group": {
"property_key": "grouping_key",
"property_value": "grouping_value"
},
"usage": [
{
"quantity": 0,
"timeframe_start": "2022-01-25T12:00:00+00:00",
"timeframe_end": "2022-01-26T12:00:00+00:00"
},
{
"quantity": 1,
"timeframe_start": "2022-01-25T12:00:00+00:00",
"timeframe_end": "2022-01-26T12:00:00+00:00"
},
{
"quantity": 2,
"timeframe_start": "2022-01-26T12:00:00+00:00",
"timeframe_end": "2022-01-27T12:00:00+00:00"
}
],
"otherTopLevelField": {
"shouldBeIncluded": "true"
}
}
subscription_id = "subscriptionIdA"
# Validate that one record is yielded per non-zero usage subrecord
expected = [
{
"quantity": 1,
"timeframe_start": "2022-01-25T12:00:00+00:00",
"timeframe_end": "2022-01-26T12:00:00+00:00",
"billable_metric_name": "Metric A",
"billable_metric_id": "billableMetricA",
"otherTopLevelField": {
"shouldBeIncluded": "true"
},
"subscription_id": subscription_id,
"grouping_key": "grouping_value"
},
{
"quantity": 2,
"timeframe_start": "2022-01-26T12:00:00+00:00",
"timeframe_end": "2022-01-27T12:00:00+00:00",
"billable_metric_name": "Metric A",
"billable_metric_id": "billableMetricA",
"otherTopLevelField": {
"shouldBeIncluded": "true"
},
"subscription_id": subscription_id,
"grouping_key": "grouping_value"
}
]
actual_output = list(stream.yield_transformed_subrecords(subscription_usage_response, subscription_id))
assert actual_output == expected
def test_supports_incremental(patch_incremental_base_class, mocker):
mocker.patch.object(IncrementalOrbStream, "cursor_field", "dummy_field")
stream = IncrementalOrbStream()
assert stream.supports_incremental
def test_source_defined_cursor(patch_incremental_base_class):
stream = IncrementalOrbStream()
assert stream.source_defined_cursor
def test_stream_checkpoint_interval(patch_incremental_base_class):
stream = IncrementalOrbStream()
assert stream.state_checkpoint_interval is None
|
#!/usr/bin/env python
from ListNode import ListNode, ListTestCase
head = ListTestCase(range(10)).head
ListTestCase.prettyPrint(head)
def reverse(head):
pre = None
p = head.next
while p != None:
tmp = p.next
p.next = pre
pre = p
p = tmp
head.next = pre
return head
ListTestCase.prettyPrint(reverse(head))
|
# train_model.py
import numpy as np
from alexnet import alexnet
WIDTH = 205 # 80
HEIGHT = 155 # 60
LR = 1e-3
EPOCHS = 8
MODEL_NAME = 'pyET2-car-{}-{}-{}-epochs.model'.format(LR, 'alexnetv2', EPOCHS)
model = alexnet(WIDTH, HEIGHT, LR)
print('Ready to load data, loading now: ')
train_data = np.load('shuffled_final.npy')
print('Done loading')
train = train_data[:-500]
test = train_data[-500:]
X = np.array([i[0] for i in train]).reshape(-1, WIDTH, HEIGHT, 1)
Y = [i[1] for i in train]
test_x = np.array([i[0] for i in train]).reshape(-1, WIDTH, HEIGHT, 1)
test_y = [i[1] for i in train]
model.fit({'input':X}, {'targets':Y}, n_epoch=EPOCHS,
validation_set=({'input':test_x}, {'targets':test_y}),
snapshot_step=500, show_metric=True, run_id=MODEL_NAME)
# tensorboard --logdir=foo:C:\PythonET2\snoh_ET_v3_training_alexnet\log
model.save(MODEL_NAME) |
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
data_dict= {}
sample_data= None
labels= None
NUM_EPOCHS= 100000
X_train= []
X_test= []
Y_train= []
Y_test= []
def create_data():
global data_dict, sample_data, labels, min_fval, max_fval
X0, labels = make_blobs(n_samples=100, n_features=2, centers=2,
cluster_std=1.05, random_state=10)
X1 = np.c_[np.ones((X0.shape[0])), X0]
#Change labels of negative points to -1
labels[labels == 0] = -1
sample_data= X1
positive_x =[]
negative_x =[]
for i,label in enumerate(labels):
if label == -1:
negative_x.append(X1[i])
else:
positive_x.append(X1[i])
data_dict = {-1:np.array(negative_x), 1:np.array(positive_x)}
#To extract maximum feature value in entire dataset
max_fval = float('-inf')
for y_i in data_dict:
if np.amax(data_dict[y_i]) > max_fval:
max_fval=np.amax(data_dict[y_i])
def split_train_test_data():
global X_train, Y_train, X_test, Y_test
N= len(sample_data)
#First 80%: training data, remaining 20% : test data
X_train= sample_data[:int(N*0.8)]
X_test= sample_data[int(N*0.8):]
Y_train= labels[:int(N*0.8)]
Y_test= labels[int(N*0.8):]
def train(data_dict):
global X_train, Y_train
#Initialise weights with dimension as number of features
w = np.zeros(X_train.shape[1])
eta= 0.1*max_fval
_lambda= 0.5
weights= []
#Setting seed so that np.random.uniform picks predictable z values
np.random.seed(1)
for epoch in range(1, NUM_EPOCHS):
#Pick an instance from dataset at random with uniform prob.
z = int(np.random.uniform(0, X_train.shape[0]))
if (Y_train[z] * (np.dot(X_train[z][1:], w[1:]) + w[0])) >= 1:
v= _lambda*w
else:
v= _lambda*w - Y_train[z]*X_train[z]
w[0] = w[0] + Y_train[z]
#Not updating bias with other weights so that bias is not regularized
w[1:]= w[1:] - eta*v[1:]
weights.append(w)
if eta > 1e-4:
eta= eta*0.1
#Return avergae weights over epochs
result_weight= 1/NUM_EPOCHS*(np.sum(weights, axis=0))
return result_weight
def test(weights):
misclassified= 0
for i in range(X_test.shape[0]):
y_predicted = np.sign(np.dot(X_test[i], weights))
if y_predicted!= Y_test[i]:
misclassified+= 1
return X_test.shape[0], misclassified
def draw(weights):
fig, ax = plt.subplots(figsize=(7, 5))
fig.patch.set_facecolor('white')
cdict = {-1: 'red', 1: 'blue'}
for key, data in data_dict.items():
ax.scatter(data_dict[key][:,1], data_dict[key][:,2], c=cdict[key])
#Find 2 points with minimum and max value in feature dimension 1. Find corresponsing y values below to plot line
min_x= np.min(sample_data[:,1])
max_x= np.max(sample_data[:,1])
#<w.x> + b = -1
neg_y_xmin = (-weights[1] * min_x - weights[0] - 1) / weights[2]
neg_y_xmax = (-weights[1] * max_x - weights[0] - 1) / weights[2]
ax.plot([min_x, max_x], [neg_y_xmin, neg_y_xmax], 'k')
#<w.x> + b = 1
pos_y_xmin = (-weights[1] * min_x - weights[0] + 1) / weights[2]
pos_y_xmax = (-weights[1] * max_x - weights[0] + 1) / weights[2]
ax.plot([min_x, max_x], [pos_y_xmin, pos_y_xmax], 'k')
#<w.x> + b = 0
min_y = (-weights[1] * min_x - weights[0]) / weights[2]
max_y = (-weights[1] * max_x - weights[0]) / weights[2]
print((-weights[1] * 0 - weights[0]) / weights[2])
ax.plot([min_x, max_x], [min_y, max_y], 'r--')
plt.show()
def main():
create_data()
split_train_test_data()
weights= train(data_dict)
num_points, num_misclassified= test(weights)
draw( weights)
#Ouput data to Readme
open('ReadMe.txt', 'w').close()
with open('ReadMe.txt', 'a') as outputfile:
print("the total number of data points on which your test method was run, : " + str(num_points), file=outputfile)
print("the total number of data points on which your test method was misclassified, : " + str(num_misclassified), file=outputfile)
if __name__ == "__main__":
main()
|
from .utils import *
from .base_actuator import Actuator, HighLevelActuator
from .wheel_actuator import WheelActuator
from .communication_transmitter import CommunicationTransmitter
from .RF_transmitter import RF_Transmitter
from .led_actuator import LedActuator
from .joint_actuator import JointPositionActuator, JointVelocityActuator
from .grasp_actuator import GraspActuator |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('firestation', '0018_assign_station_number'),
]
sql = """
BEGIN TRANSACTION;
-- If postgres supported negative lookbehind regexes, this'd be a little simpler
SELECT src.id, array_to_string(regexp_matches(src.newname, '(\d+)'), ',') as num, src.name
INTO TEMP tmp_fs
FROM (
SELECT sd.id, regexp_replace(sd.name, 'Battalion\s+\d+', '') as newname, sd.name
FROM firestation_firestation fs
INNER JOIN firestation_usgsstructuredata sd on sd.id = fs.usgsstructuredata_ptr_id
WHERE sd.name ~ '.*\d+.*'
AND fs.station_number is null
) AS src;
UPDATE firestation_firestation
SET station_number = cast(tmp_fs.num as int)
FROM tmp_fs
WHERE tmp_fs.id = usgsstructuredata_ptr_id;
DROP TABLE tmp_fs;
COMMIT;
"""
operations = [
migrations.RunSQL(sql),
]
|
#
# Copyright 2016 iXsystems, Inc.
# All rights reserved
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted providing that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
#####################################################################
import logging
import socket
import threading
import urllib.parse
from msock.client import Connection
class Server(object):
def __init__(self):
self.on_connection = lambda conn: None
self._logger = logging.getLogger(self.__class__.__name__)
self._uri = None
self._socket = None
self._connections = []
self._lock = threading.RLock()
def open(self, uri):
parsed = urllib.parse.urlparse(uri, 'tcp')
if parsed.scheme == 'tcp':
af = socket.AF_INET
address = (parsed.hostname, parsed.port)
elif parsed.scheme == 'unix':
af = socket.AF_UNIX
address = parsed.netloc
else:
raise RuntimeError('Unsupported scheme {0}'.format(parsed.scheme))
self._socket = socket.socket(af, socket.SOCK_STREAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._socket.bind(address)
def run(self):
self._logger.debug('Listening for client connections on {0}'.format(self._uri))
self._socket.listen()
while True:
sock, addr = self._socket.accept()
self._logger.debug('Accepted client from {0}'.format(addr))
conn = Connection()
conn._socket = sock
conn._address = addr
conn.open()
self.on_connection(conn)
|
# vim: ai ts=4 sts=4 et sw=4
from datetime import datetime
from django.db import models
from mwana.apps.locations.models import Location
from rapidsms.models import Contact
class SMSAlertLocation(models.Model):
"""
Manages facilities that can receive SMS alerts
"""
district = models.ForeignKey(Location, limit_choices_to={"type__slug":"districts"})
enabled = models.BooleanField(default=False)
def __str__(self):
return "%s: %s" % (self.district.name, "Enabled" if self.enabled else "Not Enabled")
class Hub(models.Model):
name = models.CharField(max_length=50)
district = models.ForeignKey(Location, unique=True)
phone = models.CharField(max_length=15, null=True, blank=True)
def __str__(self):
return "%s in %s district. Contact: %s" % (self.name,
self.district.name,
self.phone)
class Lab(models.Model):
source_key = models.CharField(max_length=50)
name = models.CharField(max_length=50, null=True, blank=True)
phone = models.CharField(max_length=15, null=True, blank=True)
def __str__(self):
return "%s. %s . Contact: %s" % (self.source_key, self.name, self.phone)
class DhoSMSAlertNotification(models.Model):
"""
Records alerts sent to DHO staff
"""
REPORT_TYPES = (
('M', 'Monthly Alert'),
('W', 'Weekly Alert'),
)
ALERT_TYPES = (
("1", "DISTRICT_NOT_SENDING_DBS"),
("2", "LONG_PENDING_RESULTS"),
("3", "CLINIC_NOT_USING_SYSTEM"),
("4", "LAB_NOT_PROCESSING_DBS"),
("5", "LAB_NOT_SENDING_PAYLOD"),
("6", "CLINIC_NOT_USING_TRACE"),
)
contact = models.ForeignKey(Contact, blank=True, null=True)
district = models.ForeignKey(Location)
report_type = models.CharField(choices=REPORT_TYPES, max_length=1, blank=True)
alert_type = models.CharField(choices=ALERT_TYPES, max_length=1, blank=True)
date_sent = models.DateTimeField(default=datetime.now) |
__author__ = 'christopherrivera'
########################################################################################################################
# This contains functions classes for scrapping the web or parsing websites
########################################################################################################################
import requests
from bs4 import BeautifulSoup
from os.path import join
from multiprocessing.pool import ThreadPool as Pool
from validators.url import url as validate_url
import string
import pycurl
import StringIO
from requests.exceptions import Timeout, ReadTimeout, TooManyRedirects, ConnectionError
remove_strings = ' '.join([string.punctuation,'1234567890'])
def parse_url(url):
"""Parses the string of a url and returns as a string. """
return url.replace('.', '/').replace('//', '/').replace(':', '').split('/')
def get_host(url):
"""Parses out the host url. """
protect = '*****'
url = url.replace('//', protect) # replace with this string to protect.
index = url.find('/')
return url[:index].replace(protect, '//')
def rename_url(url, suffix=''):
"""Renames a url by striping out punctuation and returns string.
Parameters:
url (str): the url
suffix (str): a decorator.
Returns renamed(str): the processes url"""
return ''.join([url.replace('/', '').replace('.', '_'), suffix])
def download_html(url, savepath='/Users/christopherrivera/Desktop', suffix='', ext='txt'):
"""Accesses html and downloads to disc.
Parameters
"""
# set up a connection
# error correction if can't download
try:
response = requests.get(url).content
except requests.ConnectionError:
return
# build the file name
filename = rename_url(url, suffix)
filename = '.'.join([filename, ext])
filename = join(savepath, filename)
# save the file to desk
with open(filename, 'w') as f:
f.write(response)
def download_multiple_htmls(urls, threads=8):
pool = Pool(threads)
pool.map(download_html, urls)
def valid_download_html(url, savepath='/Volumes/Mac/GoGuardianHTMLS', ext='txt', validate=False):
"""Validates a url prior to attempting to download
Parameters:
url (str): the url name.
Return:
tupple: Url (url), downloaded (bool)"""
# determine if the url is valid.
old_url = url # place hoder for the url tags
# If validate true, validate the url and add a protocol if needed.
if validate:
valid = validate_url(url)
# if not valid download add an http.
if not valid:
url = ''.join(['http://', url])
# try to validate it.
try:
response = requests.get(url, timeout=(10, 10)).content # allow to time out for only 10 seconds.
except (Timeout, ReadTimeout, TooManyRedirects, ConnectionError): # fails return the tupple
return old_url, False
# Build the file name
filename = rename_url(old_url, suffix='')
filename = '.'.join([filename, ext])
filename = join(savepath, filename)
# save the file to desk
with open(filename, 'w') as f:
f.write(response)
return old_url, True
def download_multiple_html_with_pycurl(urls, savepath='/Volumes/Mac/Insight/GoGuardianHTMLS-9-17-2015-b', ext='txt'):
"""Takes a list of urls and downloads and writes a save path.
Parameters:
urls (list): list of url strings
savepath (str): path to save location
ext (str) extension to the string
Returns None
"""
# set up a curl object and set up some options
curl = pycurl.Curl()
curl.setopt(pycurl.FOLLOWLOCATION, 1)
curl.setopt(pycurl.MAXREDIRS, 5)
# use a for loop to get the urls and download
for url in urls:
# place holder for the old url
old_url = url
# validate the url and append and http: to it if needed
valid = validate_url(url)
# if not add an http.
if not valid:
url = ''.join(['http://', url])
# set the url
curl.setopt(pycurl.URL, url)
# try to download it and save it.
try:
# create a new string io object and set it.
b = StringIO.StringIO()
curl.setopt(pycurl.WRITEFUNCTION, b.write)
# perform the getting and streaming
curl.perform()
response = b.getvalue()
b.close()
# handle download
# Build the file name
filename = rename_url(old_url, suffix='')
filename = '.'.join([filename, ext])
filename = join(savepath, filename)
# save the file
with open(filename, 'w') as f:
f.write(response)
except:
pass
curl.close()
def make_url_valid(url):
"""Validates a url, and add an http protocol if not valid.
Parameters:
url (str): a url string.
Return:
new url(str)
"""
valid = validate_url(url)
# if not valid download add an http.
if not valid:
url = ''.join(['http://', url])
return url
########################################################################################################################
# Below are functions for using Beatiful soup.
########################################################################################################################
def get_soup(url):
""" Opens up with request and remove the style and javascript (do not want)"""
# get the html and then convert to dom with beautiful soup
html = requests.get(url).content
soup = BeautifulSoup(html)
# strip out the javascript and other stuff
for script in soup(["script", "style"]):
script.extract()
return soup
def get_soup_from_text(text):
"""
Extracts soup from the text and removes script and style.
Parameters:
text (str): location of the website.
:return: soup
"""
# open the text and get out the soup
with open(text, 'r') as f:
soup = BeautifulSoup(f)
# strip out the javascript and other stuff
for script in soup(['script', 'style']):
script.extract()
return soup
def get_tag_text(soup, tag='a', count_tags=True):
"""
Parameters:
soup (BeautifulSoup ): the soup.
tag (str): the tag to get
count_tags (bool): count the number of tags if true.
Return:
list of the link text.
"""
# search for all the tags, get the text and place in a list.
tags = [t.get_text().strip() for t in soup.find_all(tag)]
# convert to a large string
text = ' '.join(tags)
# do encode decode to remove all the unusual text
text = text.encode('utf8').decode('unicode_escape').encode('ascii', 'ignore')
# get rid of the punctuation
for p in string.punctuation:
text = text.replace(p, ' ')
text = text.replace('\n', ' ').replace('\r', ' ').replace('\t', ' ')
# Count the tags if need and return.
if count_tags:
number = len(tags) # get the number
return text, number
else:
return text
def get_paragraphs(soup, count_tags=True):
""" gets the body text"""
tag = 'p'
return get_tag_text(soup, tag, count_tags)
def get_title(soup, count_tags=True):
""" gets the body text"""
tag = 'title'
return get_tag_text(soup, tag, count_tags)
def get_links(soup, count_tags=True):
""" gets the body text"""
tag = 'a'
return get_tag_text(soup, tag, count_tags)
def get_images(soup, count_tags=True):
""" gets the body text"""
tag = 'img'
return get_tag_text(soup, tag, count_tags)
def get_meta(soup, count_tags=True):
""" gets the body text"""
tag = 'meta'
return get_tag_text(soup, tag, count_tags)
def get_header(soup, count_tags=True):
""" gets the body text"""
tag = 'header'
return get_tag_text(soup, tag, count_tags)
def scrape_all_text(text):
""" Uses beautiful soup to scrape all the text from an html text document.
Parameters. This gets the text for the paragraphs, title, links, img, meta data and header.
text (str): Path to the text file (html)
Returns:
(str): a string with all the text for parsing later by sklearn.
"""
soup = get_soup_from_text(text)
# search for all the tags, get the text and place in a list.
def get_tag_text(tag):
# internal function to get the text for a given text and return as a text.
return ' '.join([t.get_text() for t in soup.find_all(tag)])
# scrape for the desired text tag and join.
tags = [get_tag_text(tagd) for tagd in ['p', 'title', 'a', 'img', 'meta', 'header']]
text = ' '.join(tags)
# do encode decode to remove all the unusual text
text = text.encode('utf8').decode('unicode_escape').encode('ascii', 'ignore')
# get rid of the punctuation
for p in remove_strings:
text = text.replace(p, ' ')
# replace some unwanted stuff.
text = text.replace('\n', ' ').replace('\r', ' ').replace('\t', ' ')
return text
def scrape_text_and_count_tags(text):
""" Uses beautiful soup to scrape all the text from an html text document.
Parameters. This gets the text for the paragraphs, title, links, img, meta data and header. It also
counts the number of tags.
text (str): Path to the text file (html)
Returns:
(str): a string with all the text for parsing later by sklearn.
"""
soup = get_soup_from_text(text)
text = soup.get_text()
# Coun the then number of tags
def get_tag_count(tag):
# Counts the number of tags
return len(soup.find_all(tag))
tags = ['p', 'title', 'a', 'img', 'meta', 'header']
counts = [get_tag_count(t) for t in tags]
# do encode decode to remove all the unusual text
text = text.encode('utf8').decode('unicode_escape').encode('ascii', 'ignore')
# get rid of the punctuation
for p in remove_strings:
text = text.replace(p, ' ')
# replace some unwanted stuff.
text = text.replace('\n', ' ').replace('\r', ' ').replace('\t', ' ')
text_len = len(text.split())
return text, counts, text_len
|
import pygame
from pygame.locals import *
from EventManager import *
from Charactor import CharactorViewEntity
from Sprite import *
class PygameView:
def __init__(self, evManager):
self.evManager = evManager
#List every event for which this object listens
self.evManager.registerListener(self,[TickEvent,MapBuiltEvent,CreateCharactorViewEntityEvent,CharactorMoveEvent])
pygame.init()
self.window = pygame.display.set_mode((424,440))
pygame.display.set_caption('Example Game')
self.background = pygame.Surface(self.window.get_size(), flags=pygame.SRCALPHA)
self.background.fill((0,0,0))
font = pygame.font.Font(None, 30)
text = """STARTING"""
textImg = font.render( text, 1, (255,0,0))
self.background.blit( textImg, (0,0) )
self.window.blit( self.background, (0,0) )
pygame.display.flip()
self.backSprites = pygame.sprite.RenderUpdates()
self.frontSprites = pygame.sprite.RenderUpdates()
#----------------------------------------------------------------------
#This is where we paint the screen.
def showMap(self, gameMap):
# clear the screen first
self.background.fill((0,0,0))
self.window.blit(self.background,(0,0))
pygame.display.flip()
# use this squareRect as a cursor and go through the
# columns and rows and assign the rect
# positions of the SectorSprites
squareRect = pygame.Rect( (-128,10, 128,128 ) )
column = 0
for sector in gameMap.sectors:
if column < 3:
squareRect = squareRect.move( 138,0 )
else:
column = 0
squareRect = squareRect.move( -(138*2), 138 )
column += 1
newSprite = SectorSprite( sector, self.backSprites )
newSprite.rect = squareRect
newSprite = None
#----------------------------------------------------------------------
def createCharactorSprite(self, entity, playerNumber):
sector = entity.sector
charactorViewEntity = CharactorViewEntity(self.evManager, entity, playerNumber, self.frontSprites)
sectorSprite = self.getSectorSprite( sector )
charactorViewEntity.rect.center = sectorSprite.rect.center
#----------------------------------------------------------------------
def moveCharactor(self, charactor):
charactorViewEntity = self.getCharactorViewEntity(charactor)
sector = charactor.sector
sectorSprite = self.getSectorSprite(sector)
charactorViewEntity.moveTo = sectorSprite.rect.center
#----------------------------------------------------------------------
def getCharactorViewEntity(self, charactor):
#there will be only one
for s in self.frontSprites:
return s
return None
#----------------------------------------------------------------------
def getSectorSprite(self, sector):
for s in self.backSprites:
if hasattr(s, "sector") and s.sector == sector:
return s
#----------------------------------------------------------------------
def notify(self, event):
if event.is_a(TickEvent):
#Draw Everything
self.backSprites.clear(self.window, self.background)
self.frontSprites.clear(self.window, self.background)
self.backSprites.update()
self.frontSprites.update()
dirtyRects1 = self.backSprites.draw( self.window )
dirtyRects2 = self.frontSprites.draw( self.window )
dirtyRects = dirtyRects1 + dirtyRects2
pygame.display.update(dirtyRects)
elif event.is_a(MapBuiltEvent):
levelMap = event.levelMap
self.showMap(levelMap)
elif event.is_a(CreateCharactorViewEntityEvent):
self.createCharactorSprite(event.entity, event.playerNumber)
elif event.is_a(CharactorMoveEvent):
self.moveCharactor(event.entity)
|
#coding=utf-8
__author__ = 'JinyouHU'
'''
使用属性tags设置item的tag
使用Canvas的方法gettags获取指定item的tags
'''
from Tkinter import *
root = Tk()
#创建一个Canvas,设置其背景色为白色
cv = Canvas(root, bg='green')
#使用tags指定一个tag('r1')
rt = cv.create_rectangle(10, 10, 110, 110,
tags=1
)
cv.pack()
print cv.gettags(rt)
cv.itemconfig(rt, tags=('r2', 'r3', 'r4'))
print cv.gettags(rt)
root.mainloop() |
import psycopg2
""" Convenience class that wraps a Postgres Database Connection. """
class Connection:
def __init__(self, host, dbname, user, password):
self.host = host
self.dbname = dbname
self.user = user
self.password = password
def connect(self):
self.conn = psycopg2.connect("host='{}' dbname='{}' user='{}' password='{}'".format(
self.host, self.dbname, self.user, self.password))
def cursor(self, name=None):
return self.conn.cursor(name=name)
def commit(self):
return self.conn.commit()
def rollback(self):
return self.conn.rollback();
|
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from database_setup import Store, Base, InventoryItem, User, Category
engine = create_engine('sqlite:///store.db')
# Bind the engine to the metadata of the Base class so that the
# declaratives can be accessed through a DBSession instance
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
# A DBSession() instance establishes all conversations with the database
# and represents a "staging zone" for all the objects loaded into the
# database session object. Any change made against the objects in the
# session won't be persisted into the database until you call
# session.commit(). If you're not happy about the changes, you can
# revert all of them back to the last commit by calling
# session.rollback()
session = DBSession()
# Create dummy user
User1 = User(name="Charlene Wroblewski", email="cwroblew+catalog@gmail.com",
picture="https://www.facebook.com/photo.php?fbid=10204105998147240")
session.add(User1)
session.commit()
# Catalog for Charlene's Store
store = Store(id=1, name="Charlene's Store")
session.add(store)
session.commit()
category1 = Category(name="Category 1", description="Category 1's description")
session.add(category1)
session.commit()
invItem1 = InventoryItem(user_id=1, name="Inv 1", description="Inventory Item 1", price="$100.00", category_id=1, store_id=1)
session.add(invItem1)
session.commit()
category1 = Category(name="Category 1", description="Category 1's description")
session.add(category1)
session.commit()
invItem1 = InventoryItem(user_id=1, name="Inv 1", description="Inventory Item 1", price="$100.00", category_id=1, store_id=1)
session.add(invItem1)
session.commit()
category1 = Category(name="Category 1", description="Category 1's description")
session.add(category1)
session.commit()
invItem1 = InventoryItem(user_id=1, name="Inv 1", description="Inventory Item 1", price="$100.00", category_id=1, store_id=1)
session.add(invItem1)
session.commit()
category1 = Category(name="Category 1", description="Category 1's description")
session.add(category1)
session.commit()
invItem1 = InventoryItem(user_id=1, name="Inv 1", description="Inventory Item 1", price="$100.00", category_id=1, store_id=1)
session.add(invItem1)
session.commit()
category1 = Category(name="Category 1", description="Category 1's description")
session.add(category1)
session.commit()
invItem1 = InventoryItem(user_id=1, name="Inv 1", description="Inventory Item 1", price="$100.00", category_id=1, store_id=1)
session.add(invItem1)
session.commit()
category1 = Category(name="Category 1", description="Category 1's description")
session.add(category1)
session.commit()
invItem1 = InventoryItem(user_id=1, name="Inv 1", description="Inventory Item 1", price="$100.00", category_id=1, store_id=1)
session.add(invItem1)
session.commit()
category1 = Category(name="Category 1", description="Category 1's description")
session.add(category1)
session.commit()
invItem1 = InventoryItem(user_id=1, name="Inv 1", description="Inventory Item 1", price="$100.00", category_id=1, store_id=1)
session.add(invItem1)
session.commit()
category1 = Category(name="Category 1", description="Category 1's description")
session.add(category1)
session.commit()
invItem1 = InventoryItem(user_id=1, name="Inv 1", description="Inventory Item 1", price="$100.00", category_id=1, store_id=1)
session.add(invItem1)
session.commit()
|
## Script for Exporting Briefcase contents to a text file
## Copies the Briefcase as a tar file
## Extracts The File "toc.xml" from the tar file (toc = table of contents)
## Parses "toc.xml" and writes each line to a text file
## Counts each parsed line and revisits the text file
## Writes number of line as final line
import tarfile
import time
import os
import shutil
import xml.etree.ElementTree as ET
if __name__ == '__main__':
files = os.listdir('.')
for file in files:
if file.endswith('.lfb'):
filename = file.split('.lfb')
briefcaseDirectory = filename[0]
os.makedirs(briefcaseDirectory)
tarFileName = filename[0]+'.tar.gz'
txtFileName = filename[0] + '.txt'
os.popen('copy %s %s'%(file,tarFileName))
time.sleep(5) ## Gives Files time to create
shutil.move(tarFileName, briefcaseDirectory)
os.chdir(briefcaseDirectory)
tar = tarfile.open(tarFileName)
tar.extract('toc.xml')
tar.close()
totalCount = 0
tree = ET.parse('toc.xml')
root = tree.getroot()
for child in root:
childattrib = child.attrib
with open(txtFileName, 'a') as txtFile:
txtFile.write(childattrib['name']+'\n')
totalCount = totalCount + 1
txtFile.close()
with open(txtFileName, 'a') as txtFile:
txtFile.write('Total Number Of Files: ' + str(totalCount) )
os.chdir('..')
|
__author__ = 'sriram'
def get_prime_numbers(p1, p2):
primes = []
for num in range(p1, p2):
if num > 1:
for i in range(2, num):
if (num % i) == 0:
break
else:
primes.append(num)
return primes
def find_next_prime(a, b):
for p in range(a, b):
for i in range(2, p):
if p % i == 0:
break
else:
return p
def find_prime_connection(primes):
sum = 0
for i in range(1, len(primes)):
primes[i-1], primes[i]
def prime_pair_connection(p1, p2):
primes = get_prime_numbers(p1, p2)
primes.append(find_next_prime(primes[len(primes)-1]+1, 2*(primes[len(primes)-1]+1)))
find_prime_connection(primes)
if __name__ == '__main__':
n = int(raw_input().strip())
(p1, p2) = map(int, raw_input().strip().split())
prime_pair_connection(p1, p2) |
from PIL import Image
__author__ = 'sss'
IMAGE_HEIGHT = 60
IMAGE_WIDTH = 160
if __name__ == '__main__':
image = Image.open("C:\\Users\sss\Desktop\\2907.jpg")
image = image.resize((IMAGE_WIDTH, IMAGE_HEIGHT)) #resize image with high-quality
image.save("C:\\Users\sss\Desktop\\bbb.png", 'png') |
import numpy as np
import math
from domains.environment import Environment
import copy
class SimpleEnv(Environment):
def __init__(self, branch, solution_path, printp=False):
self._branch = branch # branching factor
self._solution_path = solution_path # path to unique solution
self._path = []
self._printp = printp
def copy(self):
return copy.deepcopy(self)
def reset(self):
self._path = []
def __hash__(self):
return hash(str( self._path ))
def __eq__(self, other):
return self._branch == other._branch and self._solution_path == other._solution_path
def successors(self):
actions = list(range(self._branch))
return actions;
def successors_parent_pruning(self, op):
return self.successors()
def apply_action(self, action):
if printp: print("path = {} action = {}".format(self._path, action))
self._path.append(action)
def is_solution(self):
return self._path == self._solution_path
def get_image_representation(self):
image = np.zeros((1, 1, 1))
return image
def heuristic_value(self):
h = 0
return h
def print(self):
print(self._path)
|
n=int(raw_input())
print("enter '0' for exist")
n=input("enter any character")
if n==0:
exit();
else:
if((n>='a' and n<='z') or (n>='A' and n<='z')):
print("Alphabet")
else:
print("Not")
|
class PlayerDto:
def __init__(self, server_id: int, player_id: int, inventory: list[str]=[],
coal_count: int=0, score: int=0):
self.server_id = server_id
self.player_id = player_id
self.inventory = inventory #list of items
self.coal_count = coal_count
self.score = len(inventory) |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
a=np.array([1,2,3])
# In[2]:
a
# In[3]:
a[0]
# In[4]:
a[2]
# In[10]:
import sys
# In[11]:
x=range(500)
# In[12]:
array=np.arange(500)
# In[13]:
print(sys.getsizeof(5)*len(x))
print(array.size*array.itemsize)
# In[14]:
a1=np.array([1,2,3])
a2=np.array([4,5,6])
# In[15]:
a1+a2
# In[16]:
a1-a2
# In[17]:
a1*a2
# In[18]:
a1/a2
# In[ ]:
|
import requests
url = 'http://httpbin.org/post'
files = {'file': open('post_file.txt', 'rb')}
r = requests.post(url, files=files)
print(r.text)
|
# elevator.py
import os
import logging
import argparse
import threading
import time
import random
import json
import paho.mqtt.client as mqtt
from typing import List
from cps_common.data import Passenger, PassengerEncoder, ElevatorData
class Floor:
def __init__(self, id: int):
self.floor: int = id
self.client = mqtt.Client(f"floor{self.floor}")
self.waiting_list: List[Passenger] = []
self.arrived_list: List[Passenger] = []
self.elevators: List[ElevatorData] = [ElevatorData(id) for id in range(0, 6)]
self.waiting_count_thread = threading.Thread(target=self.update_waiting_count)
self.push_call_button_thread = threading.Thread(
target=self.push_call_button_wrapper
)
def run(self, host: str = "localhost", port: int = 1883):
# setup MQTT
self.client.on_connect = self.on_connect
self.client.on_disconnect = self.on_disconnect
self.client.connect(host, port)
self.waiting_count_thread.start()
self.push_call_button_thread.start()
self.client.loop_forever()
def update_waiting_count(self):
t = threading.currentThread()
while getattr(t, "do_run", True):
time.sleep(1)
self.client.publish(
f"floor/{self.floor}/waiting_count", json.dumps(len(self.waiting_list))
)
def on_connect(self, client, userdata, flags, rc):
logging.info("connected to broker!")
subscriptions = [
(
f"simulation/floor/{self.floor}/passenger_waiting",
self.on_passenger_waiting,
),
(
f"simulation/floor/{self.floor}/passenger_arrived",
self.on_passenger_arrived,
),
(f"elevator/+/door", self.on_elevator_door),
(f"elevator/+/capacity", self.on_elevator_capacity),
(f"elevator/+/status", self.on_elevator_status),
(f"elevator/+/actual_floor", self.on_elevator_actual_floor),
]
# subscribe to multiple topics in a single SUBSCRIBE command
# QOS=1
self.client.subscribe([(s[0], 1) for s in subscriptions])
# add callback for each subscription
for s in subscriptions:
self.client.message_callback_add(s[0], s[1])
def on_disconnect(self, client, userdata, rc):
logging.info("disconnected from broker")
def on_elevator_actual_floor(self, client, userdata, msg):
elevator_id = int(msg.topic.split("/")[1])
self.elevators[elevator_id].floor = int(msg.payload)
def on_elevator_capacity(self, client, userdata, msg):
# logging.info(f"New message from {msg.topic}")
elevator_id = int(msg.topic.split("/")[1])
# logging.debug(f"payload: {str(msg.payload)}")
capacity = json.loads(msg.payload)
# logging.debug(f"capacity: {capacity}")
# logging.debug(f"id {elevator_id}: capacity: {capacity}")
self.elevators[elevator_id].max_capacity = capacity["max"]
self.elevators[elevator_id].actual_capacity = capacity["actual"]
def on_elevator_status(self, client, userdata, msg):
# logging.info(f"New message from {msg.topic}")
elevator_id = int(msg.topic.split("/")[1])
status = msg.payload.decode("utf-8")
# logging.debug(f"id {elevator_id}: status: {status}")
self.elevators[elevator_id].status = status
def on_elevator_door(self, client, userdata, msg):
# logging.info(f"New message from {msg.topic}")
status = msg.payload.decode("utf-8")
elevator_id = int(msg.topic.split("/")[1])
# logging.debug(
# f"status: {status}; elevator floor: {self.elevators[elevator_id].floor}"
# )
if (self.elevators[elevator_id].floor == self.floor) and (
status == "open" and len(self.waiting_list) > 0
):
enter_list: List[Passenger] = []
free = (
self.elevators[elevator_id].max_capacity
- self.elevators[elevator_id].actual_capacity
)
while len(enter_list) < free and len(self.waiting_list) > 0:
enter_list.append(self.waiting_list.pop())
payload = json.dumps(enter_list, cls=PassengerEncoder)
self.client.publish(
f"simulation/elevator/{elevator_id}/passenger", payload, qos=2
)
self.client.publish(
f"floor/{self.floor}/waiting_count", len(self.waiting_list), qos=1
)
# re-push or disable call button if there is still passenger waiting
self.push_call_button()
def on_passenger_waiting(self, client, userdata, msg):
# logging.info(f"New message from {msg.topic}")
# TODO: validate schema
# convert the payload to JSON
try:
waiting_list = json.loads(msg.payload)
except json.JSONDecodeError:
logging.error("Wrong/faulty JSON message format")
# skip wrong message format
return
# this is the first time we received the pasesnger object so create it first
# convert the JSON to Passenger objects
self.waiting_list += [
Passenger(id=p["id"], start_floor=p["start"], end_floor=p["destination"])
for p in waiting_list
]
random.shuffle(self.waiting_list)
logging.debug(f"waiting list count: {len(self.waiting_list)}")
self.client.publish(
f"floor/{self.floor}/waiting_count", len(self.waiting_list), qos=1
)
self.push_call_button()
def on_passenger_arrived(self, client, userdata, msg):
logging.info(f"New message from {msg.topic}")
# convert the payload to JSON
arrived_list = json.loads(msg.payload, object_hook=Passenger.from_json_dict)
# log end time
logged_passenger: List[Passenger] = []
for p in arrived_list:
p.log_end()
logged_passenger.append(p)
self.arrived_list += logged_passenger
logging.debug(f"arrived list: {self.arrived_list}")
self.client.publish(
f"simulation/floor/{self.floor}/arrived_count",
len(self.arrived_list),
qos=1,
)
# publish logged passenger to record
self.client.publish(
f"record/floor/{self.floor}/passenger_arrived",
json.dumps(logged_passenger, cls=PassengerEncoder),
qos=2,
)
def push_call_button_wrapper(self):
t = threading.currentThread()
while getattr(t, "do_run", True):
time.sleep(1)
self.push_call_button()
def push_call_button(self):
# logging.info("pushing call button")
up: bool = False
down: bool = False
for p in self.waiting_list:
up = up or (p.end_floor > self.floor)
down = down or (p.end_floor < self.floor)
# logging.debug(f"button pushed: up: {up}; down: {down}")
if up:
self.client.publish(f"floor/{self.floor}/button_pressed/up", up, qos=1)
if down:
self.client.publish(f"floor/{self.floor}/button_pressed/down", down, qos=1)
if __name__ == "__main__":
argp = argparse.ArgumentParser(description="Floor")
argp.add_argument(
"-mqtthost",
action="store",
dest="host",
default="localhost",
help="default: localhost",
)
argp.add_argument(
"-mqttport", action="store", dest="port", default=1883, help="default: 1883"
)
argp.add_argument(
"-log",
action="store",
dest="log",
default="DEBUG",
help="default: ERROR\nAvailable: INFO DEBUG WARNING ERROR CRITICAL",
)
argp.add_argument(
"-id", action="store", default=5, dest="floor_id", help="Floor ID",
)
args = argp.parse_args()
host = os.getenv("mqtt_host", args.host)
port = os.getenv("mqtt_port", args.port)
loglevel = os.getenv("log_level", args.log)
id = os.getenv("floor_id", args.floor_id)
logging.basicConfig(level=getattr(logging, loglevel.upper()))
logging.info(f"Starting floor {id}")
controller = Floor(id=int(id))
controller.run(host=host, port=int(port))
logging.info(f"Exited elevator {id}")
|
from lib import common
def login():
print("登录功能")
common.logger('egon刚刚登陆了')
def register():
print("注册功能")
def witdraw():
print("提现功能")
def transfer():
print("转账功能")
func_dict={
'0':['登录',login()],
'1':['注册',register()],
'2':['提现',witdraw()],
'3':['转账',transfer()],
'5':['退出',exit]
}
def run():
while True:
for k in func_dict:
print(k,func_dict[k][0])
choice=input("请输入编号:").strip()
|
from typing import List
import random
class Layer:
def __init__(self, size: int):
self.size = size
self.neurons = [0.] * size
self.biases = [0.] * size
self.weights = []
class NeuralNetwork:
def __init__(self, learningRate: float, activation, derivative, sizes: List[int]):
self.learningRate = learningRate
self.activation = activation
self.derivative = derivative
self.layers = []
for i in range(len(sizes)):
nextSize = 0
if i < len(sizes) - 1:
nextSize = sizes[i + 1]
self.layers.append(Layer(sizes[i]))
for j in range(sizes[i]):
self.layers[i].biases[j] = random.random() * 2.0 - 1.0
self.layers[i].weights.append([])
for k in range(nextSize):
self.layers[i].weights[j].append(random.random() * 2.0 - 1.0)
def feedForward(self, inputs: List[float]) -> List[float]:
self.layers[0].neurons = inputs
for i in range(1, len(self.layers)):
l = self.layers[i-1]
l1 = self.layers[i]
for j in range(l1.size):
l1.neurons[j] = 0
for k in range(l.size):
l1.neurons[j] += l.neurons[k] * l.weights[k][j]
l1.neurons[j] += l1.biases[j]
l1.neurons[j] = self.activation(l1.neurons[j])
return self.layers[-1].neurons
def backpropagation(self, targets: List[float]):
errors = []
for i in range(self.layers[-1].size):
errors.append(targets[i] - self.layers[-1].neurons[i])
for k in range(len(self.layers) - 2, -1, -1):
l = self.layers[k]
l1 = self.layers[k + 1]
errorsNext = [0]
gradients = []
for i in range(l1.size):
gradients.append(errors[i] * self.derivative(self.layers[k + 1].neurons[i]))
gradients[i] *= self.learningRate
deltas = []
for i in range(l1.size):
deltas.append([])
for j in range(l.size):
deltas[i].append(gradients[i] * l.neurons[j])
for i in range(l.size):
errorsNext.append(0)
for j in range(l1.size):
errorsNext[i] += l.weights[i][j] * errors[j]
errors = errorsNext
weightsNew = []
for i in range(l.size):
weightsNew.append([])
for j in range(l1.size):
weightsNew[i].append([])
for i in range(l1.size):
for j in range(l.size):
weightsNew[j][i] = l.weights[j][i] + deltas[i][j]
for i in range(l1.size):
for j in range(l.size):
weightsNew[j][i] = l.weights[j][i] + deltas[i][j]
l.weights = weightsNew
for i in range(l1.size):
l1.biases[i] += gradients[i]
|
import logging
import pymysql
logger = logging.getLogger()
class PyMysqlBase(object):
"""建立单个 mysql 数据库连接的 python 封装"""
def __init__(self,
host='localhost',
port=3306,
user='user',
password='pwd',
db='db',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor,
):
self.connection = pymysql.connect(
host=host,
port=port,
user=user,
password=password,
db=db,
charset=charset,
cursorclass=cursorclass
)
def _exec_sql(self, sql, param=None):
with self.connection.cursor() as cursor:
count = cursor.execute(sql, param)
self.connection.commit()
return count
def insert(self, sql, params=None):
with self.connection.cursor() as cursor:
ret = cursor.execute(sql, params)
self.connection.commit()
return ret
def select_all(self, sql, params=None):
with self.connection.cursor() as cursor:
cursor.execute(sql, params)
results = cursor.fetchall()
return results
def select_many(self, sql, params=None, size=1):
with self.connection.cursor() as cursor:
cursor.execute(sql, params)
results = cursor.fetchmany(size)
return results
def select_one(self, sql, params=None):
with self.connection.cursor() as cursor:
cursor.execute(sql, params)
result = cursor.fetchone()
return result
def insert_many(self, sql, values):
with self.connection.cursor() as cursor:
count = cursor.executemany(sql, values)
return count
def update(self, sql, param=None):
"""
@summary: 更新数据表记录
@param sql: SQL 格式及条件,使用(%s,%s)
@param param: 要更新的 值 tuple/list
@return: count 受影响的行数
"""
return self._exec_sql(sql, param)
def delete(self, sql, param=None):
"""
@summary: 删除数据表记录
@param sql: SQL 格式及条件,使用(%s,%s)
@param param: 要删除的条件 值 tuple/list
@return: count 受影响的行数
"""
return self._exec_sql(sql, param)
|
from django.conf.urls import url
from .views import (ReportFormView, ProjectDetailView, ProjectListView, home, inner, temp)
urlpatterns=[
url(r'^$', home, name='home'),
url(r'^inner/$',inner,name='inner'),
url(r'^temp/$',temp,name='temp'),
url(r'^report-form/$', ReportFormView.as_view(), name='report-form'),
url(r'^detail/project/(?P<pk>\d+)$', ProjectDetailView.as_view(), name='detail-project'),
url(r'^list/project/$', ProjectListView.as_view(), name='list-project'),
]
|
N = int(input())
visit = [[0]*N for _ in range(N)]
a = [list(map(int,list(input()))) for _ in range(N)]
# print(a)
# print(visit)
cnt = 0
dx = [1,-1,0,0]
dy = [0,0,1,-1]
def bfs(x, y, cnt):
queue = [[x,y]] # 이중배열
visit[x][y] = cnt
while queue:
current_node = queue.pop(0) # 0번째 인덱스인 리스트 하나가 나옴
for k in range(4):
nx = current_node[0] + dx[k]
ny = current_node[1] + dy[k]
if (nx >= 0 and nx < N) and (ny >= 0 and ny < N):
if visit[nx][ny] == 0 and a[nx][ny] == 1: # 방문하지 않았고 연결되어있으면
visit[nx][ny] = cnt
queue += [[nx,ny]]
for i in range(N):
for j in range(N):
if a[i][j] == 1 and visit[i][j] == 0:
cnt += 1
bfs(i, j, cnt)
# 1 차원 리스트로 만들기
answer = []
for i in visit:
answer += i
# 총단지수
print(max(answer))
arr = []
for i in range(1,max(answer)+1):
sum = 0
for j in answer:
if i == j:
sum += 1
arr.append(sum)
arr.sort()
for i in arr:
print(i)
|
from sys import argv
# read WYSS section for how to run this
script, first, second, third = argv
print("The script is called:", script)
print("The first variable is:", first)
print("The second variable is:", second)
print("The third variable is:", third)
# What you should see
# WARNING! Pay attention! You have been running python scripts without command line
# arguments. If you type only python3.6 ex13.py you are doing it wrong! Pay close
# attention to how I run it. This applies any time you see argv being used.
|
n = int(input())
ll = []
for i in range(n):
peo={}
temp = input().split()
n = int(temp[1])**2+int(temp[2])**2
peo[n] = temp[0]
ll.append(peo)
print(ll)
|
##### Part 1 #####
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.cross_validation import train_test_split
from sklearn import metrics
import statsmodels.formula.api as smf
# visualization
import seaborn as sns
import matplotlib.pyplot as plt
# 1. read in the yelp dataset
import pandas as pd
yelp = pd.read_csv('../hw/optional/yelp.csv', index_col = 'review_id')
yelp.head()
# 2. Perform a linear regression using
# "stars" as your response and
# "cool", "useful", and "funny" as predictors
'''visualizing the data'''
sns.pairplot(yelp, x_vars=['cool','useful','funny'], y_vars='stars', size=4.5, aspect=0.7)
sns.pairplot(yelp, x_vars=['cool','useful','funny'], y_vars='stars', size=4.5, aspect=0.7, kind='reg')
sns.pairplot(yelp)
yelp.corr()
sns.heatmap(yelp.corr())
'''linear regression'''
feature_cols = ['cool', 'useful', 'funny']
X = yelp[feature_cols]
y = yelp.stars
linreg = LinearRegression()
linreg.fit(X, y)
print linreg.intercept_
print linreg.coef_
zip(feature_cols, linreg.coef_)
# 3. Show your MAE, R_Squared and RMSE
'''R_squared'''
lm = smf.ols(formula='stars ~ cool + useful + funny', data=yelp).fit()
lm.rsquared
'''MAE'''
y_pred = linreg.predict(X)
print metrics.mean_absolute_error(y, y_pred)
'''RMSE'''
print np.sqrt(metrics.mean_squared_error(y, y_pred))
# 4. Use statsmodels to show your pvalues
# for each of the three predictors
# Using a .05 confidence level,
# Should we eliminate any of the three?
lm = smf.ols(formula='stars ~ cool + useful + funny', data=yelp).fit()
lm.pvalues
lm.conf_int()
'''We believe all features have a relationship with stars rated.'''
# 5. Create a new column called "good_rating"
# this could column should be True iff stars is 4 or 5
# and False iff stars is below 4
yelp['good_rating'] = yelp['stars'] >= 4
yelp.head()
# 6. Perform a Logistic Regression using
# "good_rating" as your response and the same
# three predictors
from sklearn.linear_model import LogisticRegression
Z = yelp.good_rating
logreg = LogisticRegression()
logreg.fit(X, Z)
print logreg.intercept_
print logreg.coef_
zip(feature_cols, logreg.coef_)
# 7. Show your Accuracy, Sensitivity, Specificity
# and Confusion Matrix
from sklearn import metrics
preds = logreg.predict(X)
print metrics.confusion_matrix(Z, preds)
Accuracy = (227.0 + 6733) / 10000
Sensitivity = 6733 / (130.0 + 6733)
Specificity = 227 / (227 + 2910.0)
# 8. Perform one NEW operation of your
# choosing to try to boost your metrics!
train_test_rmse(X, y)
##### Part 2 ######
# 1. Read in the titanic data set.
titanic = pd.read_csv('titanic.csv', index_col = 'PassengerId')
titanic.head()
# 4. Create a new column called "wife" that is True
# if the name of the person contains Mrs.
# AND their SibSp is at least 1
titanic['wife'] = [row for row in titanic['Name'] if 'Mrs.' in row] and titanic['SibSp'] >= 1
# 5. What is the average age of a male and
# the average age of a female on board?
male_avg_age = titanic.Age[titanic.Sex == 'male'].mean()
female_avg_age = titanic.Age[titanic.Sex == 'female'].mean()
# 5. Fill in missing MALE age values with the
# average age of the remaining MALE ages
titanic.Age[titanic.Sex == 'male'].isnull().sum()
#titanic.Age[titanic.Sex == 'male'].fillna(value=male_avg_age,inplace=True)
titanic.Age = titanic.groupby("Sex").transform(lambda x: x.fillna(x.mean()))['Age']
'''code provided by Patrick fills in both missing male and female cells'''
# 6. Fill in missing FEMALE age values with the
# average age of the remaining FEMALE ages
titanic.Age[titanic.Sex == 'female'].isnull().sum()
#titanic.Age[titanic.Sex == 'female'].fillna(female_avg_age, inplace=True)
# 7. Perform a Logistic Regression using
# Survived as your response and age, wife
# as predictors
logreg = LogisticRegression()
titanicfeature_cols = ['Age' , 'wife']
A = titanic[titanicfeature_cols]
b = titanic.Survived
logreg.fit(A, b)
assorted_pred_class = logreg.predict(A)
# 8. Show Accuracy, Sensitivity, Specificity and
# Confusion matrix
prds = logreg.predict(A)
print metrics.confusion_matrix(b, prds)
Accuracy = (523+26.0)/ 891
Sensitivity = (26.0) / (316+26)
Specificity = (523.0) / (523+26)
# 9. now use ANY of your variables as predictors
# Still using survived as a response to boost metrics!
cfeature_cols = ['Age','Pclass','Fare']
C = titanic[cfeature_cols]
logreg.fit(C, b)
assorted_pred_class = logreg.predict(C)
# 10. Show Accuracy, Sensitivity, Specificity
preds1 = logreg.predict(C)
print metrics.confusion_matrix(b, preds1)
Accuracy = (477+148.0)/ 891
Sensitivity = (148.0) / (194+148)
Specificity = (477.0) / (477+72)
train_test_rmse(C, b)
# REMEMBER TO USE
# TRAIN TEST SPLIT AND CROSS VALIDATION
# FOR ALL METRIC EVALUATION!!!!
|
user_input=input("ENTER STRING ")
print("OUTPUT")
#titlte first letter of each word will be capilta
print(user_input.title())
#capitalize first letter of string will be capital
print(user_input.capitalize())
#all letters will be capital
print(user_input.upper())
#OUTPUT
"""
ENTER STRING my name is mohd mazher uddin
OUTPUT
My Name Is Mohd Mazher Uddin
My name is mohd mazher uddin
MY NAME IS MOHD MAZHER UDDIN
""" |
import pickle
from config import *
from Messages.Ack import Ack
from Messages.message_types import *
from utils.utils import *
class MessageHandler:
def __init__(self, msg, server_socket):
self.msg = msg
self.server_socket = server_socket
self.handle_message_scenario()
# here we send the message to its path (switch case)
def handle_message_scenario(self):
message_type = self.msg.get("message_type")
print(message_type)
if message_type == SERVER_CONFIG_MESSAGE:
if self.msg.get("ack"):
ack_message = Ack()
self.send_msg_to_server(ack_message.json())
print(self.msg.get("config_data"))
elif message_type == SESSION_TICKET:
print(self.msg.get('ticket_id'))
elif message_type == SESSION_CLOSE:
print('close session')
elif message_type == "PDF":
self.write_pdf_file()
def write_pdf_file(self):
pdf_content = self.msg.get('data')
with open(f'{get_project_root()}/pdf_files/Waybill.pdf', "wb") as f:
f.write(pdf_content)
# send message to client
def send_msg_to_server(self, msg_obj=None):
"""
serialize msg object to send it to server, send msg with header containe msg size,and its type
"""
serialized_msg = pickle.dumps(msg_obj)
msg_header = general_message_header(len(serialized_msg),BUFFER_LENGTH)
full_msg = bytes(msg_header.encode("utf-8")) + serialized_msg
# send msg to client
self.server_socket.send(full_msg)
|
import matplotlib.pyplot as plt
import numpy as np
def A7(title, blue, green, brown, other):
y = [blue, green, brown, other]
x = [2, 4, 6, 8]
y_ticks = list(range(max(y)+2))
# Format y_ticks
def format_y_ticks():
if max(y) < 30:
y_even = []
for val in y_ticks:
if val % 2 == 0:
y_even.append(val)
else:
y_even = list(y_ticks[0:-1:5])
return(y_even)
y_vals = format_y_ticks()
width = 1/1.5
fig = plt.figure()
ax = fig.add_subplot(111)
rext1 = ax.bar(x, y, width-1, color="#FF00FF", label='All people')
plt.title(title, color='blue', fontsize=14)
plt.minorticks_on()
plt.grid(color="#800080", which='major',linewidth=0.7, ls='-')
#plt.grid(color='#800080', which='minor', lw=1.0, ls='-')
#plt.legend(loc="upper left", frameon=True )
ax.set_ylabel('Number of passengers', color='blue', fontsize=12)
ax.set_xlabel('Destination',color='blue', fontsize=12)
ax.set_xticks(x)
ax.set_yticks(y_vals)
ax.set_xticklabels(('USA', 'UK', 'SOUTH AFRICA', 'SPAIN', ), color='green')
plt.show()
A7('Passenger-Destination Chart', 14, 10, 11, 5) |
from django.conf.urls import patterns, url
from rest_framework.urlpatterns import format_suffix_patterns
from rrdapi import views
urlpatterns = patterns('',
# url(r'^rrddata/$', 'rrddata_detail'),
url(r'^rrddata/$', views.RRDDataDetail.as_view()),
)
urlpatterns = format_suffix_patterns(urlpatterns)
|
import argparse
from src.data import CelebaData
from src.train import Trainer, Evaluation
from src.utils import load_checkpoint
from src.config import config
from src.net import VggNetwork
import torch
parser = argparse.ArgumentParser()
parser.add_argument("--epochs", help="Number of epochs to train", type=int, default=1)
parser.add_argument("--batch", help="Number of epochs to train", type=int, default=1)
parser.add_argument("--workers", help="Checkpoint name", type=int, default=6)
parser.add_argument("--checkpoint", help="Checkpoint name", type=str, default=None)
args = parser.parse_args()
print("Arguments: {}".format(args))
print("Config: {}".format(config))
net = VggNetwork(out=2)
if args.checkpoint is not None:
extra = load_checkpoint(net, args.checkpoint)
train_data = CelebaData("data/train.csv")
val_data = CelebaData("data/val.csv")
train_loader = torch.utils.data.DataLoader(train_data, batch_size=args.batch, num_workers=args.workers, shuffle=True)
val_loader = torch.utils.data.DataLoader(val_data, batch_size=args.batch, num_workers=args.workers, shuffle=True)
trainer = Trainer(net, config)
eval = Evaluation(net, config)
for i in range(args.epochs):
trainer.run(train_loader, epochs=1)
result = eval.run(val_loader)
print(f"Validation result: {result['loss']} / {result['accuracy']}")
|
import serial
import pyqtgraph as pg
import random
from PyQt5 import QtGui, QtWidgets,QtCore
from PyQt5.QtWidgets import QWidget, QApplication
from pyqtgraph.Qt import QtGui, QtWidgets,QtCore
from threading import Thread
'''
app = QtGui.QApplication([])
win = pg.GraphicsLayoutWidget(show=True, title="Basic plotting examples")
win.resize(800, 600)
win.setWindowTitle('comPortReader')
p = win.addPlot(title="Updating plot")
curve = p.plot(pen='y')
inpFromComPort = 0
def update():
global curve, data, inpFromPort
data.pop(0)
data.append(inpFromPort)
inpFromPort = random.randint(0, 100)
curve.setData(data)
p.enableAutoRange('xy', True)
'''
class TestWidget(QWidget):
def __init__(self, parent = None):
QWidget.__init__(self, parent)
def keyPressEvent(self, event):
print("pressed key " + str(event.key()))
if __name__ == "__main__":
import sys
app = QApplication(sys.argv)
tst = TestWidget()
tst.show()
sys.exit(app.exec_()) |
# coding=utf-8
import math
import operator
import os
import datetime
#from numpy import *
class KnnUtil:
def __init__(self):
pass
def get_us(self,time_pointa,time_pointb):
return time_pointa.second*1000000+time_pointa.microsecond-time_pointb.second*1000000-time_pointb.microsecond
def distance_calculator(self, point_a, point_b, dims=2):
"""
calculate distance between 2 points
point_a:tuple with dims
point_b:tuple with dims
dims: how many dimension
eg: dims=3
point_a=(1,2,3)
point_b=(4,5,6)
"""
dis_sum = 0.0
for dim in range(dims):
dis_sum += (point_a[dim] - point_b[dim]) ** 2
return dis_sum
def find_NearestTag_KthNearst(self, point_a, point_list, point_tag_list, dims=2, K=10):
"""
point_a: 需要判定的点
point_list: 已经标注过的点
point_tag_list:已经标注过的点的标注结果 与point_list按照索引一一对应 tag 应当是一个可比较的对象
"""
points_amounts = len(point_list)
res_tuple_list = [None] * points_amounts
if points_amounts != len(point_tag_list):
print("ERROR 000: 标注长度与样本点长度不一致")
os.system("pause")
raise RuntimeError
for i in range(points_amounts):
#tick0 = datetime.datetime.now()
res_tuple_list[i] = (
point_list[i], point_tag_list[i], self.distance_calculator(point_a, point_list[i], dims=dims))
#tick1 = datetime.datetime.now()
point_sorted_list = sorted(res_tuple_list, key=operator.itemgetter(2))
#tick2 = datetime.datetime.now()
# reduce by key
tag_cnt_dict = {}
for j in range(K): # 进行最近k个元素的类别统计
tag = point_sorted_list[j][1]
if tag not in tag_cnt_dict:
tag_cnt_dict[tag] = 1
else:
tag_cnt_dict[tag] += 1
key_cnt_list = []
for key in tag_cnt_dict:
key_cnt_list.append((key, tag_cnt_dict[key]))
key_cnt_list = sorted(key_cnt_list, key=operator.itemgetter(1), reverse=True)
#print(self.get_us(tick1,tick0),self.get_us(tick2,tick1))
return key_cnt_list[0][0] # 返回距离最近的k个元素的 类别标签作为结果
#self test
if __name__ == "__main__":
point0 = (1, 2)
points = [(1, 2), (3, 4), (4, 5), (100, 300)]
point_tag = ["A", "B", "A", "B"]
tool = KnnUtil()
type_final = tool.find_NearestTag_KthNearst(point0, points, point_tag, K=3)
print(type_final)
|
from node_position import NodePosition
def get_neighbor_coordinates(node_pos):
"""
Calculates node's neighbor's cartesian coordinates
:param node_pos: tuple containing node's coordinates
:return: neighbor coordinates as a dictionary
"""
x, y = node_pos
top = (x - 1, y)
bottom = (x + 1, y)
left = (x, y - 1)
right = (x, y + 1)
return {NodePosition.TOP: top, NodePosition.BOTTOM: bottom, NodePosition.LEFT: left, NodePosition.RIGHT: right}
|
from flask import Flask, request, session, g, redirect, url_for, \
abort, render_template, flash, Response, send_from_directory
import time
from bluepy import btle
import logging
import server_settings
app = Flask(__name__)
app.config.from_object(__name__)
# WARNING: For some reason, setting Flask DEBUG=True causes btle to fail
app.config["PROPOGATE_EXCEPTIONS"] = True
# Log to stdout
logFormatter = logging.Formatter("%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s")
rootLogger = logging.getLogger()
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(logFormatter)
rootLogger.addHandler(consoleHandler)
notification_data = False # Buffer to store data from BTLE. False if nothing yet read or data already read
temperature = None # None if not yet loaded
visor_status = None
debug = True
response_prefix = """
<html>
<head>
<title>CAT COMMANDER</title>
</head>
<body>
"""
response_suffix = """
</body>
"""
def cors(response):
# Allow AJAX interactions from other domains
response.headers.add('Access-Control-Allow-Origin', '*')
response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization')
response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE')
return response
@app.route('/')
def main():
return render_template('index.html')
@app.route('/api/v1/laser/')
def v1_laser():
# Turn the laser on or off
laser_id = request.args.get('id', 0, type=int)
laser_status = request.args.get('status', 0, type=int)
# Communicate the status to BTLE
btle_write('laser %d %d\n'%(laser_id, laser_status));
return cors(Response({"success": True}, status=200, mimetype='application/json'))
@app.route('/api/v1/laser_position/')
def v1_laser_position():
# Set the laser position
laser_id = request.args.get('id', 0, type=int)
laser_x = request.args.get('x', 0, type=int)
laser_y = request.args.get('y', 0, type=int)
# Communicate the new position to BTLE
btle_write('laser_position %d %d %d\n'%(laser_id, laser_x, laser_y));
return cors(Response({"success": True}, status=200, mimetype='application/json'))
class MyDelegate(btle.DefaultDelegate):
def __init__(self, params):
btle.DefaultDelegate.__init__(self)
if debug:
print "Starting BTLE notification delegate"
def handleNotification(self, cHandle, data):
# We received some data!
global notification_data
notification_data = data
if debug:
print data
print notification_data
def load_temperature():
# Tell the BTLE we want to receive the temperature
btle_write('get temperature\n');
if p and p.waitForNotifications(1.0):
global notification_data
global temperature
temperature = float(notification_data)
notification_data = False
print temperature
return
# There was some problem loading the data
temperature = False
def load_visor_status():
# Tell the BTLE we want to receive the status of the visor
btle_write('get visor_status\n');
if p and p.waitForNotifications(1.0):
global notification_data
global visor_status
visor_status = bool(notification_data)
notification_data = False
print visor_status
return
# There was some problem loading the data
visor_status = False
def btle_write(data, retry=0):
try:
# write data in 10 character chunks
length = 20
payloads = [data[0+i:length+i] for i in range(0, len(data), length)]
for payload in payloads:
tx.write(payload)
except:
# Something went wrong, try to re-establish the connection
btle_connect()
# Retry the connection
if retry < 3:
btle_write(data, retry=retry+1)
def btle_connect():
global p
global tx_uuid
global rx_uuid
global tx
global rx
try:
p = btle.Peripheral(server_settings.btle_address,"random")
except btle.BTLEException as exc:
print "Error connecting to BTLE device: " + exc.message
return False
tx_uuid = btle.UUID("6e400002-b5a3-f393-e0a9-e50e24dcca9e")
rx_uuid = btle.UUID("6e400003-b5a3-f393-e0a9-e50e24dcca9e")
tx = p.getCharacteristics(uuid=tx_uuid)[0]
rx = p.getCharacteristics(uuid=rx_uuid)[0]
p.setDelegate( MyDelegate({}) )
# Tell BTLE to accept notifications by sending 0x0100 to the CCCD
p.writeCharacteristic(0x0023, '\x01\x00', False)
return True
btle_connect()
# Run server as a standalone app
if __name__ == '__main__':
print "Beginning server"
app.run(host='0.0.0.0', port=80)
|
#!/usr/bin/env python
# coding: utf-8
# In[141]:
import os
import csv
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import requests
import json
from pprint import pprint
from stats import median
csvpath = os.path.join("Data", "city_codes.csv")
csvpath = os.path.join("Data", "majors_dates.csv")
csvpath = os.path.join("Data", "rift_weath_sol_equ.csv")
# In[142]:
city_codes_csv = "Data/city_codes.csv"
# In[143]:
city_codes_df = pd.read_csv(city_codes_csv)
# In[108]:
major_dates_csv = "Data/majors_dates.csv"
# In[109]:
major_dates_df = pd.read_csv(major_dates_csv)
# In[110]:
major_dates_df
# In[111]:
city_13_list = []
feel_13_list = []
url = "https://api.worldweatheronline.com/premium/v1/past-weather.ashx"
params = {
"key": "bc1fb38cb0ec446b83221156192908",
"tp": 24,
"format": "json",
}
for index, row in major_dates_df.iterrows():
params['q'] = row['major']
params['date'] = row['13 majors']
response = requests.get(url, params=params).json()
city_13_list.append(row['major'])
feel_13_list.append((response['data']['weather'][0]['hourly'][0]['FeelsLikeF']))
pprint(response)
# In[112]:
print(feel_13_list)
# In[113]:
majors_weather_dict = {
"city": city_13_list,
"feels_like_13": feel_13_list,
}
majors_13_weather_df = pd.DataFrame(majors_weather_dict)
majors_13_weather_df.head()
# In[114]:
city_14_list = []
feel_14_list = []
url = "https://api.worldweatheronline.com/premium/v1/past-weather.ashx"
params = {
"key": "bc1fb38cb0ec446b83221156192908",
"tp": 24,
"format": "json",
}
for index, row in major_dates_df.iterrows():
params['q'] = row['major']
params['date'] = row['14 majors']
response = requests.get(url, params=params).json()
city_14_list.append(row['major'])
feel_14_list.append((response['data']['weather'][0]['hourly'][0]['FeelsLikeF']))
pprint(response)
# In[115]:
majors_weather_dict = {
"city": city_14_list,
"feels_like_14": feel_14_list,
}
majors_14_weather_df = pd.DataFrame(majors_weather_dict)
majors_14_weather_df.head()
# In[116]:
city_15_list = []
feel_15_list = []
url = "https://api.worldweatheronline.com/premium/v1/past-weather.ashx"
params = {
"key": "bc1fb38cb0ec446b83221156192908",
"tp": 24,
"format": "json",
}
for index, row in major_dates_df.iterrows():
params['q'] = row['major']
params['date'] = row['15 majors']
response = requests.get(url, params=params).json()
city_15_list.append(row['major'])
feel_15_list.append((response['data']['weather'][0]['hourly'][0]['FeelsLikeF']))
pprint(response)
# In[117]:
majors_weather_dict = {
"city": city_15_list,
"feels_like_15": feel_15_list,
}
majors_15_weather_df = pd.DataFrame(majors_weather_dict)
majors_15_weather_df.head()
# In[120]:
city_16_list = []
feel_16_list = []
url = "https://api.worldweatheronline.com/premium/v1/past-weather.ashx"
params = {
"key": "bc1fb38cb0ec446b83221156192908",
"tp": 24,
"format": "json",
}
for index, row in major_dates_df.iterrows():
params['q'] = row['major']
params['date'] = row['16 majors']
response = requests.get(url, params=params).json()
city_16_list.append(row['major'])
feel_16_list.append((response['data']['weather'][0]['hourly'][0]['FeelsLikeF']))
pprint(response)
# In[121]:
majors_weather_dict = {
"city": city_16_list,
"feels_like_16": feel_16_list,
}
majors_16_weather_df = pd.DataFrame(majors_weather_dict)
majors_16_weather_df.head()
# In[122]:
city_17_list = []
feel_17_list = []
url = "https://api.worldweatheronline.com/premium/v1/past-weather.ashx"
params = {
"key": "bc1fb38cb0ec446b83221156192908",
"tp": 24,
"format": "json",
}
for index, row in major_dates_df.iterrows():
params['q'] = row['major']
params['date'] = row['17 majors']
response = requests.get(url, params=params).json()
city_17_list.append(row['major'])
feel_17_list.append((response['data']['weather'][0]['hourly'][0]['FeelsLikeF']))
pprint(response)
# In[123]:
majors_weather_dict = {
"city": city_17_list,
"feels_like_17": feel_17_list,
}
majors_17_weather_df = pd.DataFrame(majors_weather_dict)
majors_17_weather_df.head()
# In[124]:
city_18_list = []
feel_18_list = []
url = "https://api.worldweatheronline.com/premium/v1/past-weather.ashx"
params = {
"key": "bc1fb38cb0ec446b83221156192908",
"tp": 24,
"format": "json",
}
for index, row in major_dates_df.iterrows():
params['q'] = row['major']
params['date'] = row['18 majors']
response = requests.get(url, params=params).json()
city_18_list.append(row['major'])
feel_18_list.append((response['data']['weather'][0]['hourly'][0]['FeelsLikeF']))
pprint(response)
# In[125]:
majors_weather_dict = {
"city": city_18_list,
"feels_like_18": feel_18_list,
}
majors_18_weather_df = pd.DataFrame(majors_weather_dict)
majors_18_weather_df.head()
# In[174]:
merge_weather_1 = pd.merge(majors_13_weather_df, majors_14_weather_df, on="city")
merge_weather_1
# In[175]:
merge_weather_2 = pd.merge(merge_weather_1, majors_15_weather_df, on="city")
merge_weather_2
# In[176]:
merge_weather_3 = pd.merge(merge_weather_2, majors_16_weather_df, on="city")
merge_weather_3
# In[177]:
merge_weather_4 = pd.merge(merge_weather_3, majors_17_weather_df, on="city")
merge_weather_4
# In[178]:
merge_weather_5 = pd.merge(merge_weather_4, majors_18_weather_df, on="city")
merge_weather_5
merge_weather_5 = merge_weather_5.rename(columns={"feels_like_13":"2013", "feels_like_14":"2014", "feels_like_15":"2015","feels_like_16":"2016","feels_like_17":"2017","feels_like_18":"2018",})
merge_weather_5
# In[185]:
merge_weather_df = pd.merge(merge_weather_5, city_codes_df, on="city")
merge_weather_df
# In[186]:
merge_weather_df = merge_weather_df.set_index("city_code")
merge_weather_df
# In[189]:
merge_weather_df=merge_weather_df.astype(float)
# In[188]:
average_heat_index = merge_weather_df.mean()
average_heat_index
print(average_heat_index)
# In[150]:
# merge_weather_df.loc['TYO', "2013":"2018"].plot(label="Tokyo")
# plt.legend()
# plt.show()
# In[151]:
average_heat_index = merge_weather_df.mean()
average_heat_index
merge_weather_df.mean()
# In[27]:
# country_one, = plt.plot(years, merge_weather_df.loc['JPN',["2013","2014","2015","2016","2017","2018"]], color="green",label="Tokyo Marathon"
# plt.legend()
# plt.show
# In[ ]:
# Select two countries' worth of data.
heat_index_13 = merge_weather_df.loc[1]
heat_index_14 = merge_weather_df.loc[2]
heat_index_15 = merge_weather_df.loc[3]
# Plot with differently-colored markers.
years = [2013, 2014, 2015, 2016, 2017, 2018]
plt.plot(x= ["years"], y=["heat_index_13", "heat_index_14", "heat_index_15"], kind="bar")
# plt.plot(['city'], heat_index_13, 'b-', label='Majors 2013')
# plt.plot(['city'], heat_index_14, 'g-', label='Majors 2014')
# Create legend.
plt.legend()
plt.xlabel('Year')
# In[ ]:
merge_weather_df=merge_weather_df.astype(float)
# In[ ]:
merge_weather_df.plot.bar()
plt.legend(loc="best")
plt.title("Historical Heat Index At The Abbot Majors")
plt.xlabel("Country")
plt.ylabel("Number of Medals")
# In[ ]:
merge_weather_df.columns
# In[ ]:
merge_weather_df.set_index('city')
# In[ ]:
merge_weather_df.plot.bar()
plt.legend(loc="best")
plt.title("Historical Heat Index At The Abbot Majors")
plt.xlabel("Country")
plt.ylabel("Number of Medals")
# In[160]:
rift_dates_csv = "Data/rift_weath_sol_equ.csv"
# In[161]:
rift_dates_df = pd.read_csv(rift_dates_csv)
rift_dates_df
# In[162]:
city_rift_list = []
feel_rift_list = []
url = "https://api.worldweatheronline.com/premium/v1/past-weather.ashx"
params = {
"key": "bc1fb38cb0ec446b83221156192908",
"tp": 24,
"format": "json",
}
for index, row in rift_dates_df.iterrows():
params['q'] = row['city']
params['date'] = row['sumsol_13']
response = requests.get(url, params=params).json()
city_rift_list.append(row['city'])
feel_rift_list.append((response['data']['weather'][0]['hourly'][0]['FeelsLikeF']))
pprint(response)
# In[ ]:
print(temp_rift_list)
# In[ ]:
major_dates_df
# In[ ]:
N = 5
menMeans = (20, 35, 30, 35, 27)
womenMeans = (25, 32, 34, 20, 25)
menStd = (2, 3, 4, 1, 2)
womenStd = (3, 5, 2, 3, 3)
ind = np.arange(N) # the x locations for the groups
width = 0.35 # the width of the bars: can also be len(x) sequence
p1 = plt.bar(ind, menMeans, width, yerr=menStd)
p2 = plt.bar(ind, womenMeans, width,
bottom=menMeans, yerr=womenStd)
plt.ylabel('Scores')
plt.title('Scores by group and gender')
plt.xticks(ind, ('G1', 'G2', 'G3', 'G4', 'G5'))
plt.yticks(np.arange(0, 81, 10))
plt.legend((p1[0], p2[0]), ('Men', 'Women'))
plt.show()
|
from typing import Protocol
class SessionManagerAdapter(Protocol):
async def get(self):
...
async def commit(self):
...
async def rollback(self):
...
async def close(self):
...
async def execute(self, *args, **kwargs):
...
|
from GUIControllerAttack import *
from GUImethods import *
def sshAttack(raiz):
'''
:param raiz: Se recibe la dirección de memoría de la interfáz gráfica de Tkinter
:return: No se devuelve nada, simplemente se llama a la función 'Controller' definida en 'GUIControllerAttack.py'
'''
attackWindow = Toplevel(raiz)
attackWindow.title("SSH Attack")
attackWindow.wm_resizable(0, 0)
attackWindow.geometry("400x500")
icono = PhotoImage(file='attackIcon.png')
attackWindow.iconphoto(False, icono)
op = IntVar()
# Texto previo a las opciones básicas
Label(attackWindow, text="Por favor seleccione una opción: ").pack(pady=10)
# Opciones básicas, se debe elegir una u otra pero no ambas
scan_all = Radiobutton(attackWindow, text="Atacar desde todos los hosts atacantes a los objetivos seleccionados", variable=op, value=1,
command=lambda: disabled(checkBtns))
scan_single = Radiobutton(attackWindow, text="Atacar desde un host concreto a los objetivos seleccionados", variable=op, value=2,
command=lambda: enabled(checkBtns))
scan_all.pack()
scan_single.pack()
# Acción correspondiente a la opción escogida en el menú principal que se le pasará al controlador
accion = "SSHAttack"
# Se pide introducir la red a la cuál se quiere realizar el SSHAttack
Redes = []
Lred = Label(attackWindow, text="Introduzca la red a atacar")
Red = Entry(attackWindow)
reg = Red.register(comprobarIP)
Red.config(validate="focusout", validatecommand=(reg, "%s"))
print("reg = " + reg)
print("red.get() = ", Red.get())
Redes.append(Red)
#Red opcional 1
Lred1 = Label(attackWindow, text="Introduzca una segunda red alternativa")
Red1 = Entry(attackWindow)
Red1.insert(0, "")
reg1 = Red1.register(comprobarIP)
Red1.config(validate="focusout", validatecommand=(reg1, "%s"))
Redes.append(Red1)
#Red opcional 2
Lred2 = Label(attackWindow, text="Introduzca una tercera red alternativa")
Red2 = Entry(attackWindow)
reg2 = Red2.register(comprobarIP)
Red2.config(validate="focusout", validatecommand=(reg2, "%s"))
Redes.append(Red2)
# Obtenemos el JSON(diccionario) con toda la información referente a nuestros dispositivos.
diccionario_atacantes = getAttackHosts()
# Se crea una lista compuesta de texto y una scrollbar(barra) vertical para los clientes
lista_atacantes = Frame(attackWindow)
scrollbar_atacantes = Scrollbar(lista_atacantes)
scrollbar_atacantes.pack(side=RIGHT, fill=Y, pady=20)
checklistAtacantes = Text(lista_atacantes, height=20, width=15)
# Se generan los botones Check en base a la cantidad de dispositivos que haya en nuestro diccionario.
variables_atacantes = []
v = 0
i = 1
c = 0
checkBtns = []
ids_atacantes = []
# Se realiza un bucle que recorrerá todos y cada uno de los dispositivos registrados en el diccionario
# generándose los checkbuttons para cada uno de ellos
for atacante_check in diccionario_atacantes.get("hosts"):
print("Clientes_check: ")
print(atacante_check)
variables_atacantes.append(atacante_check.get("id"))
variables_atacantes[v] = IntVar()
v += 1
ids_atacantes.append(atacante_check.get("id"))
checkBtns.append(
Checkbutton(checklistAtacantes, text=atacante_check.get("name"), variable=variables_atacantes[c], onvalue=i,
state=DISABLED))
checklistAtacantes.window_create("end", window=checkBtns[c])
checklistAtacantes.insert("end", "\n")
# print(variables_clientes[c].get())
i += 1
c += 1
checklistAtacantes.pack(pady=20)
checklistAtacantes.config(yscrollcommand=scrollbar_atacantes.set)
scrollbar_atacantes.config(command=checklistAtacantes.yview)
# Desactiva el widget para que los usuarios no puedan introducir texto
checklistAtacantes.configure(state="disabled")
# Botón de confirmación que pasa a la función controller 3 argumentos (la acción (SSHAttack), la operación deseada y los valores de los checks en ID
btnConfirmation = Button(attackWindow, text="Confirmar acción", command=lambda: [
controller(accion, getOpValues(op.get()),
hosts_origen=getCheckValuesDevices(variables_atacantes, ids_atacantes, op.get()),
hosts_destino=getIP(Redes),
GUI=raiz), print(Redes[0]), attackWindow.destroy()])
#Se incluyen los widgets de entrada y los botones
Lred.pack(pady=15)
Red.pack(pady=4)
Lred1.pack(pady=8)
Red1.pack(pady=4)
Lred2.pack(pady=8)
Red2.pack(pady=4)
btnConfirmation.pack()
btnConfirmation.pack(pady=20)
lista_atacantes.pack() |
# 1423. Maximum Points You Can Obtain from Cards (Medium)
def maxScore(self, cardPoints: List[int], k: int) -> int:
n = len(cardPoints)
total = sum(cardPoints)
if n == k:
return total
rem = n - k
subArr = 0
maxSum = 0
for i in range(n):
if i < rem:
subArr += cardPoints[i]
else:
if i == rem:
maxSum = total - subArr
subArr += cardPoints[i] - cardPoints[i-rem]
maxSum = max(maxSum, total - subArr)
return maxSum
# A shorter solution
def maxScore(self, cardPoints: List[int], k: int) -> int:
n = len(cardPoints)
m = n - k
min_sum = s = sum(cardPoints[:m])
for i in range(k):
s = s - cardPoints[i] + cardPoints[m + i]
min_sum = min(min_sum, s)
return sum(cardPoints) - min_sum
# Time complexity: O(n)
# Space complexity: O(1)
# 1052. Grumpy Bookstore Owner (Medium)
def maxSatisfied(self, customers: List[int], grumpy: List[int], X: int) -> int:
n = len(grumpy)
max_gains = 0
satisfied = 0
for i in range(n):
if grumpy[i] == 0:
satisfied += customers[i]
if i < X:
max_gains += customers[i]*grumpy[i]
else:
if i == X:
curr_sum = max_gains
curr_sum += customers[i]*grumpy[i] - customers[i-X]*grumpy[i-X]
max_gains = max(curr_sum, max_gains)
return satisfied + max_gains
# Time complexity: O(n)
# Space complexity: O(1)
# 1004. Max Consecutive Ones III (Medium)
def longestOnes(self, A: List[int], K: int) -> int:
start = 0
for i in range(len(A)):
K -= 1 - A[i]
if K < 0:
K += 1 - A[start]
start += 1
return i - start + 1
# Time complexity: O(n)
# Space complexity: O(1)
# 485. Max Consecutive Ones (Easy)
def findMaxConsecutiveOnes(self, nums: List[int]) -> int:
res = 0
curr = 0
for n in nums:
if n == 1:
curr += 1
else:
res = max(curr, res)
curr = 0
return res if res > curr else curr
# Time complexity: O(n)
# Space complexity: O(1) |
import subprocess
def get_firmware_url():
return 'https://github.com/makestack/esp8266-firmware/releases/download/v0.3.0/firmware.bin'
def install(serial, firmware_path):
subprocess.run(['esptool', '-v', '-cd', 'ck', '-cb', '115200',
'-cp', serial, '-ca', '0x00000', '-cf', firmware_path])
|
"""%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
PARAMETERSPACE_AGNfitter.py
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
This script contains all functions used by the MCMC machinery to explore the parameter space
of AGNfitter.
It contains:
* Initializing a point on the parameter space
* Calculating the likelihood
* Making the next step
* Deciding when the burn-in is finished and start MCMC sampling
"""
from __future__ import division
import pylab as pl
import numpy as np
from math import pi
import time
import pickle
import MODEL_AGNfitter2 as model
def Pdict (data):
"""
Constructs a dictionary P with keys. The value of every key is a tuple with the
same length (the number of model parameters)
name : parameter names
min : minimum value allowed for parameter
max : maximum value allowed for parameter
## inputs:
- catalog file name
- sourceline
## output:
- dictionary P with all parameter characteristics
"""
P = adict()
# ----------------------------|--------------|--------------|---------------|-----------|-----------|------------|-----------|------------|-------------|------------|
P.names = r'$\tau$' , 'age', r'N$_{\rm H}$', 'irlum' , 'SB', 'BB', 'GA', 'TO', r'E(B-V)$_{bbb}$', r'E(B-V)$_{gal}$'
# -------------------------|-------------|-------------|---------------|-----------|-----------|------------|-----------|------------|-------------|------------| For F_nu
P.min = 0 , 6, 21, 7, 0, 0, 0, 0, 0, 0.
P.max = 3.5, np.log10(model.maximal_age(data.z)), 25, 15, 10, 1, 10, 10, 0.1, 1.5
# -------------------------|-------------|-------------|-----------|-----------|------------|-----------|------------|-------------|------------|
Npar = len(P.names)
return P
def ymodel(data_nus, z, dictkey_arrays, dict_modelfluxes, *par):
"""
This function constructs the model from the parameter values
## inputs:
-
## output:
- the total model amplitude
"""
STARBURSTFdict , BBBFdict, GALAXYFdict, TORUSFdict,_,_,_,_= dict_modelfluxes
gal_do, irlum_dict, nh_dict, BBebv_dict= dictkey_arrays
# Call MCMC-parameter values
tau, agelog, nh, irlum, SB ,BB, GA,TO, BBebv, GAebv= par[0:10]
age = 10**agelog
# Pick dictionary key-values, nearest to the MCMC- parameter values
irlum_dct = model.pick_STARBURST_template(irlum, irlum_dict)
nh_dct = model.pick_TORUS_template(nh, nh_dict)
ebvbbb_dct = model.pick_BBB_template(BBebv, BBebv_dict)
gal_do.nearest_par2dict(tau, age, GAebv)
tau_dct, age_dct, ebvg_dct=gal_do.t, gal_do.a,gal_do.e
# Call fluxes from dictionary using keys-values
try:
bands, gal_Fnu = GALAXYFdict[tau_dct, age_dct,ebvg_dct]
bands, sb_Fnu= STARBURSTFdict[irlum_dct]
bands, bbb_Fnu = BBBFdict[ebvbbb_dct]
bands, tor_Fnu= TORUSFdict[nh_dct]
except ValueError:
print 'Error: Dictionary does not contain some values'
# Renormalize to have similar amplitudes. Keep these fixed!
sb_Fnu_norm = sb_Fnu.squeeze()/ 1e20
bbb_Fnu_norm = bbb_Fnu.squeeze() / 1e60
gal_Fnu_norm = gal_Fnu.squeeze() / 1e18
tor_Fnu_norm = tor_Fnu.squeeze()/ 1e-40
# Total SED sum
#---------------------------------------------------------------------------------------------------------------------------------------------------------#
lum = 10**(SB)* sb_Fnu_norm + 10**(BB)*bbb_Fnu_norm + 10**(GA)*gal_Fnu_norm + (10**TO) *tor_Fnu_norm
#-----------------------------------------------------------------------------------------------------------------------------------------------------------
lum = lum.reshape((np.size(lum),))
return lum
def ln_prior(dict_modelsfiles, dict_modelfluxes, z, P, pars):
"""
This function constructs the model from the parameter values
## inputs:
-
## output:
- the total model amplitude
"""
for i,p in enumerate(pars):
if not (P.min[i] < p < P.max[i]):
return -np.inf
# Bband expectations
B_band_expected, B_band_thispoint = galaxy_Lumfct_prior(dict_modelsfiles, dict_modelfluxes, z, *pars )
#if Bband magnitude in this trial is brighter than expected by the luminosity function, dont accept this one
if B_band_thispoint < (B_band_expected - 5):#2.5):
return -np.inf
return 0.
def ln_likelihood(pars, x, y, ysigma, z, dictkey_arrays, dict_modelfluxes):
"""
This function constructs the model from the parameter values
## inputs:
-
## output:
- the total model amplitude
"""
y_model = ymodel(x,z,dictkey_arrays,dict_modelfluxes,*pars)
#x_valid:
#only frequencies with existing data (no detections nor limits F = -99)
#Consider only data free of IGM absorption. Lyz = 15.38 restframe
array = np.arange(len(x))
x_valid = array[(x< np.log10(10**(15.38)/(1+z))) & (y>-99.)]
resid = [(y[i] - y_model[i])/ysigma[i] for i in x_valid]
return -0.5 * np.dot(resid, resid)
def ln_probab(pars, x, y, ysigma, z, dictkey_arrays, dict_modelfluxes, P):
"""
This function constructs the model from the parameter values
## inputs:
-
## output:
- the total model amplitude
"""
lnp = ln_prior(dictkey_arrays, dict_modelfluxes, z, P, pars)
if np.isfinite(lnp):
posterior = lnp + ln_likelihood(pars, x,y, ysigma, z, dictkey_arrays, dict_modelfluxes)
return posterior
return -np.inf
"""--------------------------------------
Functions to obtain initial positions
--------------------------------------"""
def get_initial_positions(nwalkers, P):
"""
This function constructs the model from the parameter values
## inputs:
-
## output:
- the total model amplitude
"""
Npar = len(P.names)
p0 = np.random.uniform(size=(nwalkers, Npar))
for i in range(Npar):
p0[:, i] = 0.5*(P.max[i] + P.min[i]) + (2* p0[:, i] - 1) * (1)
return p0
def get_best_position(filename, nwalkers, P):
"""
This function constructs the model from the parameter values
## inputs:
-
## output:
- the total model amplitude
"""
Npar = len(P.names)
#all saved vectors
f = open(filename, 'rb')
samples = pickle.load(f)
f.close()
#index for the largest likelihood
i = samples['lnprob'].ravel().argmax()
#the values for the parameters at this index
P.ml= samples['chain'].reshape(-1, Npar)[i]
p1 = np.random.normal(size=(nwalkers, Npar))
for i in range(Npar):
p = P.ml[i]
p1[:, i] = p + 0.00001 * p1[:, i]
return p1
def get_best_position_4mcmc(filename, nwalkers, P):
Npar = len(P.names)
f = open(filename, 'rb')
samples = pickle.load(f)
f.close()
i = samples['lnprob'].ravel().argmax()
#the values for the parameters at this index
P.ml= samples['chain'].reshape(-1, Npar)[i]
p1 = np.random.normal(size=(nwalkers, Npar))
for i in range(Npar):
p = P.ml[i]
p1[:, i] = p + 0.00001 * p1[:, i]
return p1
def galaxy_Lumfct_prior(dict_modelsfiles, dict_modelfluxes, z, *par):
# Calculated B-band at this parameter space point
h_70 = 1.
distance = model.z2Dlum(z)
lumfactor = (4. * pi * distance**2.)
bands, gal_flux = galaxy_flux(dict_modelsfiles, dict_modelfluxes, *par)
bands = np.array(bands)
flux_B = gal_flux[(14.790 < bands)&(bands < 14.870)]
mag1= -2.5 * np.log10(flux_B) - 48.6
distmod = -5.0 * np.log10((distance/3.08567758e24 *1e6)/10)
abs_mag1 = mag1 + distmod
thispoint1 = abs_mag1
lum_B = lumfactor * flux_B
abs_mag = 51.6 - 2.5 *np.log10(lum_B)
thispoint = abs_mag
# Expected B-band calculation
expected = -20.3 - (5 * np.log10(h_70) )- (1.1 * z)
return expected,thispoint
def galaxy_flux(dictkey_arrays, dict_modelfluxes, *par):
# call dictionary
gal_do, _,_,_= dictkey_arrays
STARBURSTFdict , BBBFdict, GALAXYFdict, TORUSFdict,_,_,_,_= dict_modelfluxes
# calling parameters from Emcee
tau, agelog, nh, irlum, SB ,BB, GA,TO, BBebv, GAebv= par[0:10]
age = 10**agelog
#nearest dict-key value to MCMC value
gal_do.nearest_par2dict(tau, age, GAebv)
tau_dct, age_dct, ebvg_dct=gal_do.t, gal_do.a,gal_do.e
# Call fluxes from dictionary using keys-values
bands, gal_Fnu = GALAXYFdict[tau_dct, age_dct,ebvg_dct]
gal_Fnu_norm = gal_Fnu / 1e18
gal_flux = 10**(GA)*gal_Fnu_norm
return bands, gal_flux
class adict(dict):
""" A dictionary with attribute-style access. It maps attribute
access to the real dictionary.
This class has been obtained from the Barak package by Neil Chrighton)
"""
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
# the following two methods allow pickling
def __getstate__(self):
"""Prepare a state of pickling."""
return self.__dict__.items()
def __setstate__(self, items):
""" Unpickle. """
for key, val in items:
self.__dict__[key] = val
def __setitem__(self, key, value):
return super(adict, self).__setitem__(key, value)
def __getitem__(self, name):
return super(adict, self).__getitem__(name)
def __delitem__(self, name):
return super(adict, self).__delitem__(name)
def __setattr__(self, key, value):
if hasattr(self, key):
# make sure existing methods are not overwritten by new
# keys.
return super(adict, self).__setattr__(key, value)
else:
return super(adict, self).__setitem__(key, value)
__getattr__ = __getitem__
def copy(self):
""" Return a copy of the attribute dictionary.
Does not perform a deep copy
"""
return adict(self)
|
# (0). 选择题库。
# 写这个程序,要用到requests模块。
# 先用requests下载链接,再用res.json()解析下载内容。
# 让用户选择想测的词库,输入数字编号,获取题库的代码。
# 提示:记得给input前面加一个int()来转换数据类型
import requests
link = requests.get('https://www.shanbay.com/api/v1/vocabtest/category/')
#先用requests下载链接。
js_link = link.json()
#解析下载得到的内容。
bianhao = int(input('''请输入你选择的词库编号,按Enter确认
1,GMAT 2,考研 3,高考 4,四级 5,六级
6,英专 7,托福 8,GRE 9,雅思 10,任意
>'''))
#让用户选择自己想测的词库,输入数字编号。int()来转换数据类型 |
import os
import pygame
import pygame.mixer
import random
import sys
#required
pygame.init();
#colors
white = (255,255,255)
#window size
X_MAX = 950
Y_MAX = 472
#position vars
background_x = 0
background_y = 0
mich_x = 75
mich_y = 100
block_init_y_top = 0
block_init_y_bottom = 375
ref_init_y_bottom = 300
win_screen_x = 53
lose_screen_x = 130
#sprite groups for all objects and all enemies
everything = pygame.sprite.Group()
enemies = pygame.sprite.Group()
#main player, automatically falls and progresses across screen
class Michigan(pygame.sprite.Sprite):
def __init__(self, x, y, image):
super(Michigan, self).__init__()
self.image = image
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
self.health = 100
self.add(everything)
def rect(self):
return self.image.get_rect()
def update(self):
self.rect.y += 5
self.rect.x += 1
def reduce_health(self):
self.health -= 5
#obstacles, automatically progress toward player
class Block(pygame.sprite.Sprite):
def __init__(self, x, y, image):
super(Block, self).__init__()
self.image = image
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
self.add(everything)
self.add(enemies)
def rect(self):
return self.image.get_rect()
def update(self):
self.rect.x -= 5
if self.rect.x < 0:
self.rect.x = random.randint(300,950)
#obstacle inherited from Block, moves faster
class Referee(Block):
def __init__(self, x, y, image):
Block.__init__(self, x, y, image)
def update(self):
self.rect.x -= 10
if self.rect.x < 0:
self.rect.x = random.randint(300,950)
#load images needed for the game
def load_images():
def load_image(image_name):
img = pygame.image.load(os.path.join('images', image_name))
img = img.convert_alpha()
return img
return {'Michigan_Wolverines_Field': load_image('Michigan_Wolverines_Field.bmp'),
'Harbaugh': load_image('Harbaugh.png'), 'block': load_image('block.bmp'),
'Game_Over': load_image('Game_Over.bmp'), 'ref_top': load_image('ref_top.png'),
'ref_bottom': load_image('ref_bottom.png'), 'Win_Screen': load_image('Win_Screen.png')}
#main play game function
def main():
#create the window and caption
gameDisplay = pygame.display.set_mode((X_MAX, Y_MAX))
pygame.display.set_caption('B1G Flappy Bird')
gameDisplay.fill(white)
pygame.display.update()
#create the dictionary of photos
images = load_images()
#create the player and the blocks, randomizing start x for blocks
player = Michigan(mich_x, mich_y, images['Harbaugh'])
for i in range(3):
pos_init = random.randint(300, 950)
Block(pos_init, block_init_y_top, images['block'])
Block(pos_init, block_init_y_bottom, images['block'])
for i in range(2):
pos_init_1 = random.randint(400, 950)
pos_init_2 = random.randint(400, 950)
Referee(pos_init_1, block_init_y_top, images['ref_top'])
Referee(pos_init_2, ref_init_y_bottom, images['ref_bottom'])
#display health score on screen
health_font = pygame.font.SysFont(None, 32, bold=True)
health_surface = health_font.render('Health: ' + str(player.health), True, (0, 0, 0))
gameDisplay.blit(health_surface, (100, 450))
#boolean vars for game loop, losing, and winning
gameExit = False
Lose = False
Win = False
#load sounds and play background music
background_sound = pygame.mixer.Sound('stadium_sound.wav')
background_sound.play()
collision_sound = pygame.mixer.Sound('referee.wav')
#game play loop
while not gameExit:
#check for quit and up arrow action from user
for event in pygame.event.get():
if event.type == pygame.QUIT:
gameExit = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_UP:
player.rect.y -= 75
#collision detection, play sound and reduce health
for enemy in enemies:
if pygame.sprite.collide_rect(player, enemy):
player.reduce_health()
collision_sound.play()
health_surface = health_font.render('Health: ' + str(player.health), True, (0, 0, 0))
gameDisplay.blit(health_surface, (100, 450))
#if player's health runs out or goes off screen, lose game
if player.health <= 0:
Lose = True
if player.rect.y == 475 or player.rect.y == 0:
Lose = True
#if player makes it to the endzone
if player.rect.x == 760:
Win = True
#if player loses the game
if Lose == True:
#kill objects on screen
for sprite in everything.sprites():
sprite.kill()
#load background image and text to display
gameDisplay.fill(white)
gameDisplay.blit(images['Game_Over'], (lose_screen_x, background_y))
font = pygame.font.SysFont('times new roman', 32)
score_font = pygame.font.SysFont('times new roman', 40, True)
over_surface = font.render('\"I\'m bitterly disappointed', True, white)
gameDisplay.blit(over_surface, (490, 100))
over_surface = font.render('with the officiating', True, white)
gameDisplay.blit(over_surface, (515, 140))
over_surface = font.render('of this game.\"', True, white)
gameDisplay.blit(over_surface, (530, 180))
over_surface = score_font.render('Try again!', True, white)
gameDisplay.blit(over_surface, (590, 220))
pygame.display.update()
exit()
#if player wins the game
if Win == True:
#kill objects on screen
for sprite in everything.sprites():
sprite.kill()
#load background image and text to display
gameDisplay.fill(white)
gameDisplay.blit(images['Win_Screen'], (win_screen_x, background_y))
font = pygame.font.SysFont('times new roman', 32)
score_font = pygame.font.SysFont('times new roman', 40, True)
over_surface = font.render('You attacked this game', True, white)
gameDisplay.blit(over_surface, (80, 100))
over_surface = font.render('with an enthusiasm', True, white)
gameDisplay.blit(over_surface, (80, 150))
over_surface = font.render('unknown to mankind!', True, white)
gameDisplay.blit(over_surface, (80, 200))
over_surface = score_font.render('Score: ' + str(player.health), True, white)
gameDisplay.blit(over_surface, (115, 250))
pygame.display.update()
exit()
#update the screen and images
everything.update()
everything.draw(gameDisplay)
pygame.display.flip()
gameDisplay.blit(images['Michigan_Wolverines_Field'], (background_x, background_y))
gameDisplay.blit(images['Harbaugh'], player.rect)
gameDisplay.blit(health_surface, (100, 450))
#run the game
main()
#required
pygame.quit()
quit()
|
import mysql.connector
conexion = mysql.connector.connect(
host = 'cloud.eant.tech',
database = '',
user = '',
password = 'eantpass')
cursor = conexion.cursor()
sql = "SELECT nombre, apellido FROM alumnos where apellido like 'r%'"
cursor.execute(sql)
for alumno in cursor:
print(alumno)
cursor.close()
conexion.close()
|
from urllib import request, parse
url = 'http://httpbin.org/post'
headers = {
'User-Agent ': 'Mozilla/4.0 (compatible; MSIE S. S; Windows NT)',
'Host':'httpbin.org'}
dict={'name':'Germey'}
data= bytes(parse.urlencode(dict), encoding='utf-8')
req = request.Request(url=url, data=data, headers=headers, method='POST')
response = request.urlopen(req)
print(response. read(). decode(' utf-8')) |
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 22 22:25:01 2020
@author: zeh0814
"""
def main(x, target):
import functools
import math
#比较字符串中使用('+','-','*','/')的总个数,输出个数较少的
def CompareLen(str1,str2):
sum1,sum2 = 0,0
for i in {'+','-','*','/'}:
sum1 = sum1 + str1.count(i)
sum2 = sum2 + str2.count(i)
if sum1 < sum2:
return str1
else:
return str2
@functools.lru_cache(None)
#递归+动态规划
def dp(t):
if t==0:
return ''
if t < x: #目标值小于除数
str1 = (str(x) + '/' + str(x) + '+') * t
str1 = str1[:-1]
str2 = str(x) + ('-' + str(x) + '/' + str(x)) * (x-t)
return CompareLen(str1,str2)
lo = int(math.log(t,x))
po = pow(x, lo)
str3 = (str(x) + '*') * lo
str3 = str3[:-1]
if t == po: #target恰好是x的指数,直接输出
return str3
res = str3 + '+' + dp(t-po) #target不是x的指数,相差(target-po)或者(po*x-t)
left = po * x - t
if left < t: #只需讨论left < t,否者直接取res
if '-' not in dp(left):
str4 = str3 + '*' + str(x) + '-' + dp(left)
else:
s = dp(left)
s = s.replace('-','+')
str4 = str3 + '*' + str(x) + '-' + s
res = CompareLen(res,str4) #比较使用符号的总数
if res[-1] not in {'+','-','*','/'}:
return res
else:
return res[:-1]
return dp(target)
x,target = map(int,input().split())
print(main(x,target)) |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 15 17:31:41 2021
@author: alysonweidmann
"""
|
# -*- coding: utf8 -*-
# org.onap.vnfrqts/requirements
# ============LICENSE_START====================================================
# Copyright © 2018 AT&T Intellectual Property. All rights reserved.
#
# Unless otherwise specified, all software contained herein is licensed
# under the Apache License, Version 2.0 (the "License");
# you may not use this software except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Unless otherwise specified, all documentation contained herein is licensed
# under the Creative Commons License, Attribution 4.0 Intl. (the "License");
# you may not use this documentation except in compliance with the License.
# You may obtain a copy of the License at
#
# https://creativecommons.org/licenses/by/4.0/
#
# Unless required by applicable law or agreed to in writing, documentation
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ============LICENSE_END============================================
"""
This script should be run before every commit to ensure proper
standards are being followed within the project. The script
will also automatically fix certain issues when they are encountered, and
warn about other issues it cannot automatically resolve.
Warnings:
- Requirement missing required attributes
- Invalid values for attributes
- Invalid section header usage in any file
- :keyword: and requirement mismatch
Auto Updates:
- Assigning :id: on new requirements where an ID missing
- Adding :introduced: attribute on new requirements
- Adding/correcting :updated: attribute on changed requirements
"""
import os
import random
import re
import sys
from abc import ABC, abstractmethod
from collections import OrderedDict, deque
from pathlib import Path
from typing import Deque, List, Mapping, Callable, Set
import requests
THIS_DIR = Path(__file__).parent
CONF_PATH = THIS_DIR / "docs/conf.py"
NEEDS_JSON_URL = (
"https://nexus.onap.org/service/local/repositories/raw/content"
"/org.onap.vnfrqts.requirements/master/needs.json"
)
HEADING_LEVELS = ("-", "^", "~", "+", "*", '"')
SPACES = re.compile(r"\s+")
REQ_DIRECTIVE_PATTERN = re.compile(r"\.\.\s+req::.*")
ATTRIBUTE_PATTERN = re.compile(r"^\s+(:\w+:)\s+(.*)$")
VERSION_PATTERN = re.compile(r"version\s+=\s+'(.*?)'")
VALID_KEYWORDS = ("MUST", "MUST NOT", "SHOULD", "SHOULD NOT", "MAY", "MAY NOT")
VALID_VERSIONS = (
"amsterdam",
"beijing",
"casablanca",
"dublin",
"el alto",
"frankfurt",
"guilin",
"honolulu"
)
REQUIRED_ATTRIBUTES = (":keyword:", ":target:", ":id:")
VALID_TARGETS = (
"VNF",
"PNF",
"VNF or PNF",
"VNF DOCUMENTATION PACKAGE",
"PNF DOCUMENTATION PACKAGE",
"VNF or PNF DOCUMENTATION PACKAGE",
"VNF PROVIDER",
"PNF PROVIDER",
"VNF or PNF PROVIDER",
"VNF CSAR PACKAGE",
"PNF CSAR PACKAGE",
"VNF or PNF CSAR PACKAGE",
"VNF HEAT PACKAGE",
)
VALID_VALIDATION_MODES = ("static", "none", "in_service")
def check(predicate: bool, msg: str):
"""
Raises RuntimeError with given msg if predicate is False
"""
if not predicate:
raise RuntimeError(msg)
def get_version() -> str:
"""
Returns the version value from conf.py
"""
with open(CONF_PATH) as f:
for line in f:
m = VERSION_PATTERN.match(line)
if m:
version = m.groups()[0]
if version not in VALID_VERSIONS:
print(
f"ERROR: {version} in conf.py is not defined in "
f"VALID_VERSIONS. Update the script to continue"
)
sys.exit(1)
return version
VERSION = get_version()
def normalize(text: str):
"""
Strips out formatting, line breaks, and repeated spaces to normalize
the string for comparison. This ensures minor formatting changes
are not tagged as meaningful changes
"""
s = text.lower()
s = s.replace("\n", " ")
s = re.sub(r'[`*\'"]', "", s)
s = re.sub(r"\s+", " ", s)
return s
def warn(path: str, msg: str, req: "RequirementDirective" = None):
"""
Log a warning
"""
req_id = req.requirement_id or "UNKNOWN" if req else "UNKNOWN"
print(f"WARNING: {path} | {req_id} | {msg}")
class RequirementRepository:
"""
Pulls needs.json and provides various options to interact with the data.
"""
def __init__(self, data=None):
self.data = data or requests.get(NEEDS_JSON_URL).json()
self.all_ids = {
r["id"]
for version in self.data["versions"].values()
for r in version["needs"].values()
}
@property
def current_requirements(self) -> Mapping:
"""
Returns the requirements specified by current_version in needs.json.
"""
version = self.data["current_version"]
return self.data["versions"][version]["needs"]
@property
def unique_targets(self) -> Set[str]:
return {r["target"] for r in self.current_requirements.values()}
@property
def unique_validation_modes(self) -> Set[str]:
return {r["validation_mode"] for r in self.current_requirements.values()}
def create_id(self) -> str:
"""
Generates a requirements ID that has not been used in any version
of the requirements.
"""
while True:
new_id = "R-{:0>5d}".format(random.randint(0, 99999))
if new_id in self.all_ids:
continue # skip this one and generate another one
self.all_ids.add(new_id)
return new_id
def is_new_requirement(self, req: "RequirementDirective") -> bool:
return req.requirement_id not in self.current_requirements
def has_changed(self, req: "RequirementDirective") -> bool:
"""
Returns True if the requirement already exists and the contents has
meaningfully changed. Small changes in spaces or formatting are not considered.
"""
current_req = self.current_requirements.get(req.requirement_id)
if not current_req:
return False
return normalize(current_req["description"]) == normalize("".join(req.content))
class RequirementDirective:
"""
Data structure to hold a .. req:: directive
"""
ATTRIBUTE_ORDER = (
":id:",
":target:",
":keyword:",
":introduced:",
":updated:",
":validation_mode:",
":impacts:",
)
def __init__(self, path: str):
self.path = path
self.attributes = OrderedDict()
self.content = []
self.indent = None
@property
def requirement_id(self) -> str:
return self.attributes.get(":id:", "")
@requirement_id.setter
def requirement_id(self, r_id: str):
self._update(":id:", r_id)
@property
def keyword(self) -> str:
return self.attributes.get(":keyword:", "")
@keyword.setter
def keyword(self, k: str):
self._update(":keyword:", k)
@property
def target(self) -> str:
return self.attributes.get(":target:", "")
@target.setter
def target(self, value: str):
self._update(":target", value)
@property
def introduced(self) -> str:
return self.attributes.get(":introduced:", "")
@introduced.setter
def introduced(self, version: str):
self._update(":introduced:", version)
@property
def updated(self) -> str:
return self.attributes.get(":updated:", "")
@updated.setter
def updated(self, version: str):
self._update(":updated:", version)
@property
def validation_mode(self) -> str:
return self.attributes.get(":validation_mode:", "")
@validation_mode.setter
def validation_mode(self, value: str):
self._update(":validation_mode:", value)
def parse(self, lines: Deque[str]):
"""
Parses a ..req:: directive and populates the data structre
"""
parsing_attrs = True
while lines:
line = lines.popleft()
match = ATTRIBUTE_PATTERN.match(line) if parsing_attrs else None
if match:
self.indent = self.indent or self.calc_indent(line)
attr, value = match.groups()
self.attributes[attr] = value
else:
parsing_attrs = False # passed attributes, capture content
if line.strip() and self.calc_indent(line) < self.indent:
# past end of the directive so we'll put this line back
lines.appendleft(line)
break
else:
self.content.append(line)
def format_attributes(self) -> List[str]:
"""
Converts a dict back to properly indented lines using ATTRIBUTE_ORDER
"""
spaces = " " * self.indent
attr_lines = []
for key in self.ATTRIBUTE_ORDER:
value = self.attributes.get(key)
if value:
attr_lines.append(f"{spaces}{key} {value}\n")
return attr_lines
@staticmethod
def calc_indent(line: str) -> int:
"""
Number of leading spaces of the line
"""
return len(line) - len(line.lstrip())
def __str__(self):
return "".join(self.format_attributes() + self.content)
def _notify(self, field, value):
req_id = self.requirement_id or "UNKNOWN"
print(f"UPDATE: {self.path} | {req_id} | Setting {field} to {value}")
def _update(self, attr, value):
self.attributes[attr] = value
self._notify(attr, value)
class RequirementVisitor:
"""
Walks a directory for reStructuredText files and and passes contents to
visitors when the content is encountered.
Types of visitors supported:
- Requirement: Take the path and a RequirementDirective which may be modified
If modified, the file will be updated using the modified directive
- Post Processor: Take the path and all lines for processing; returning a
potentially changed set of lines
"""
def __init__(
self,
req_visitors: List[Callable[[str, RequirementDirective], None]],
post_processors: List[Callable[[str, List[str]], List[str]]],
):
self.req_visitors = req_visitors or []
self.post_processors = post_processors or []
def process(self, root_dir: Path):
"""
Walks the `root_dir` looking for rst to files to parse
"""
for dir_path, sub_dirs, filenames in os.walk(root_dir.as_posix()):
for filename in filenames:
if filename.lower().endswith(".rst"):
self.handle_rst_file(os.path.join(dir_path, filename))
@staticmethod
def read(path):
"""Read file at `path` and return lines as list"""
with open(path, "r") as f:
return list(f)
@staticmethod
def write(path, content):
"""Write a content to the given path"""
with open(path, "w") as f:
for line in content:
f.write(line)
def handle_rst_file(self, path):
"""
Parse the RST file notifying the registered visitors
"""
lines = deque(self.read(path))
new_contents = []
while lines:
line = lines.popleft()
if self.is_req_directive(line):
req = RequirementDirective(path)
req.parse(lines)
for func in self.req_visitors:
func(path, req)
# Put the lines back for processing by the line visitor
lines.extendleft(reversed(req.format_attributes() + req.content))
new_contents.append(line)
for processor in self.post_processors:
new_contents = processor(path, new_contents) or new_contents
self.write(path, new_contents)
@staticmethod
def is_req_directive(line):
"""Returns True if the line denotes the start of a needs directive"""
return bool(REQ_DIRECTIVE_PATTERN.match(line))
class AbstractRequirementVisitor(ABC):
@abstractmethod
def __call__(self, path: str, req: RequirementDirective):
raise NotImplementedError()
class MetadataFixer(AbstractRequirementVisitor):
"""
Updates metadata based on the status of the requirement and contents of
the metadata
"""
def __init__(self, repos: RequirementRepository):
self.repos = repos
def __call__(self, path: str, req: RequirementDirective):
if not req.requirement_id:
req.requirement_id = self.repos.create_id()
if self.repos.is_new_requirement(req) and req.introduced != VERSION:
req.introduced = VERSION
if self.repos.has_changed(req) and req.updated != VERSION:
req.updated = VERSION
class MetadataValidator(AbstractRequirementVisitor):
def __init__(self, repos: RequirementRepository):
self.repos = repos
def __call__(self, path: str, req: RequirementDirective):
for attr in REQUIRED_ATTRIBUTES:
if attr not in req.attributes:
warn(path, f"Missing required attribute {attr}", req)
if req.keyword and req.keyword not in VALID_KEYWORDS:
warn(path, f"Invalid :keyword: value ({req.keyword})", req)
if repository.is_new_requirement(req) and req.introduced != VERSION:
warn(path, f":introduced: is not {VERSION} on new requirement", req)
if req.introduced and req.introduced not in VALID_VERSIONS:
warn(path, f"Invalid :introduced: value ({req.introduced})", req)
if req.updated and req.updated not in VALID_VERSIONS:
warn(path, f"Invalid :updated: value ({req.updated})", req)
if req.target and req.target not in VALID_TARGETS:
warn(path, f"Invalid :target: value ({req.target})", req)
if req.validation_mode and req.validation_mode not in VALID_VALIDATION_MODES:
warn(path, f"Invalid :validation_mode: value ({req.validation_mode})", req)
def check_section_headers(path: str, lines: List[str]) -> List[str]:
"""
Ensure hierarchy of section headers follows the expected progression as defined
by `HEADING_LEVELS`, and that section heading marks match the length of the
section title.
"""
current_heading_level = 0
for i, line in enumerate(lines):
if any(line.startswith(char * 3) for char in HEADING_LEVELS):
# heading level should go down, stay the same, or be next level
expected = HEADING_LEVELS[0 : current_heading_level + 2]
if line[0] not in expected:
warn(
path,
f"Unexpected heading char ({line[0]}) on line {i+1}. "
f"Expected one of {' '.join(expected)}",
)
if len(line.strip()) != len(lines[i - 1].strip()):
lines[i] = (line[0] * len(lines[i - 1].strip())) + "\n"
print(
f"UPDATE: {path} | Matching section mark to title length "
f"on line {i+1}"
)
current_heading_level = HEADING_LEVELS.index(line[0])
return lines
def check_keyword_text_alignment(path: str, req: RequirementDirective):
if not req.keyword:
return req
keyword = f"**{req.keyword}**"
if not any(keyword in line for line in req.content):
warn(path, f"Keyword is {req.keyword}, but {keyword} not in requirement", req)
if __name__ == "__main__":
print("Valid Versions")
print("-----------------------")
print("\n".join(VALID_VERSIONS))
print()
print("Valid Keywords")
print("-----------------------")
print("\n".join(VALID_KEYWORDS))
print()
print("Valid Targets")
print("-----------------------")
print("\n".join(VALID_TARGETS))
print()
print("Valid Validation Modes")
print("-----------------------")
print("\n".join(VALID_VALIDATION_MODES))
print()
print("Check-up Report")
print("-" * 100)
repository = RequirementRepository()
visitor = RequirementVisitor(
req_visitors=[
MetadataFixer(repository),
MetadataValidator(repository),
check_keyword_text_alignment,
],
post_processors=[check_section_headers],
)
visitor.process(THIS_DIR / "docs")
|
# assignment_010: Building your own Data set from Google Images
# The task is to create an image dataset through Google Images, visualise the train and
# test sets.
# Your codes here..
|
from django.contrib import admin
from .models import Web_test
from django.contrib import messages
from web_test.views import WebTest
from product.models import Product
from django.urls import reverse
from django.utils.html import format_html
#<a href="/admin/product/product/2/change/">2</a>
@admin.register(Web_test)
class Web_testAdmin(admin.ModelAdmin):
list_display = ["web_test_name","get_product","web_test_user","web_test_bug","create_time"]
search_fields = ['web_test_name']
def get_product(self,obj):
html = format_html("<a href='/admin/product/product/{url}/change/'>{name}</a>", url=obj.product.id,name=obj.product.product_name)
return html
get_product.short_description = "所属项目"
actions = ['running']
def running(self, request, queryset):
if len(queryset) == 1:
web_test_queryset = queryset[0]
a = Web_test.objects.get(pk=web_test_queryset.id)
result = WebTest(request,web_test_queryset)
a.web_test_result_finaly = result
a.save()
messages.add_message(request, messages.SUCCESS,result)
else:
messages.add_message(request, messages.ERROR, '现阶段只能支持运行一个用例')
# 显示的文本,与django admin一致
running.short_description = ' 运行'
# icon,参考element-ui icon与https://fontawesome.com
running.icon = 'fas fa-play'
# 指定element-ui的按钮类型,参考https://element.eleme.cn/#/zh-CN/component/button
running.type = 'success'
# 给按钮追加自定义的颜色
#running.style = 'color:black;'
# Register your models here.
|
import argparse
import gym
import torch
from models.dqn_learning import DQN_net
from tensorboardX import SummaryWriter
def train(config, device, writer):
step_list = []
i_episode_list = []
env = gym.make(config.env)
env = env.unwrapped
config.action_space = env.action_space.n
config.state_size = env.observation_space.shape[0]
dqn = DQN_net(config,device)
step_max = 0
for i_episode in range(300):
s = env.reset()
step = 0
ep_r = 0
while True:
# env.render()
a = dqn.choose_action(s)
# take action
s_,r,done,info = env.step(a)
# 修改奖励
x, x_dot, theta, theta_dot = s_
r1 = (env.x_threshold - abs(x)) / env.x_threshold *0.7
r2 = (env.theta_threshold_radians - abs(theta)) / env.theta_threshold_radians *0.3
r = r1 + r2
step = step + 1
if step >100000:
print("step>100000")
done = True
ep_r += r
dqn.store_transition(s,a,r,s_)
if dqn.memory_counter > 1000:
dqn.learn_dqn()
if done:
print('episode: ', i_episode,
' ep_r: ', round(ep_r, 2),
' step: ', round(step, 2),
' epsilon: ', round(dqn.epsilon, 2),
)
step_list.append(step)
i_episode_list.append(i_episode)
writer.add_scalar("ep_reward", ep_r, i_episode)
writer.add_scalar("step", step, i_episode)
writer.add_scalar("epsilon", dqn.epsilon, i_episode)
if step_max < step:
step_max = step
dqn.save()
print('episode: ', i_episode, 'model update and save:', "max_step:",step_max)
break
s = s_
# plt.plot(i_episode_list,step_list)
# plt.show()
def estimation(config, device):
env = gym.make(config.env)
env = env.unwrapped # b
config.action_space = env.action_space.n
config.state_size = env.observation_space.shape[0]
print('\n test')
dqn1 = DQN_net(config, device)
dqn1.load()
for i_episode in range(200):
s = env.reset()
total = 0
step = 0
while True:
#env.render()
a = dqn1.choose_action_test(s)
# take action
s_,r,done,info = env.step(a)
total = total + r
step = step + 1
if step >100000:
print("step>10000")
done = True
if done:
print("total,i_episode",total,i_episode)
break
s = s_
if __name__ == '__main__':
# torch.manual_seed(1) # reproducible
parser = argparse.ArgumentParser()
parser.add_argument("--env",
default='CartPole-v0',
help="Name of environment")
parser.add_argument("--action_space", default=1, type=int)
parser.add_argument("--state_size", default=1, type=int)
parser.add_argument("--memory_size", default=10000, type=int)
parser.add_argument("--lr", default=0.001, type=float)
parser.add_argument("--gamma", default=0.95, type=float)
parser.add_argument("--target_replaece_net", default=30, type=float)
parser.add_argument("--batch_size", default= 300, type=int)
parser.add_argument("--epsilon", default=0.0, type=float)
parser.add_argument("--epsilon_max", default=0.98, type=float)
parser.add_argument("--epsilon_increment", default=0.0001, type=float)
parser.add_argument("--action_space", default=1, type=int)
parser.add_argument("--state_size", default=1, type=int)
config = parser.parse_args()
# use the cuda
device = 'cuda' if torch.cuda.is_available() else 'cpu'
if device == 'cuda':
print('using the GPU...')
torch.cuda.manual_seed(3000)#为当前GPU设置随机种子
torch.cuda.manual_seed_all(3000)#为所有GPU设置随机种子
else:
print('using the CPU...')
torch.manual_seed(3000)
writer = SummaryWriter(comment="-" + config.env)
train(config,device,writer)
estimation(config,device)
|
#!/usr/bin/env python
# Copyright 2017, Tianwei Shen, HKUST.
# compute mAP score for overlap benchmark
import sys, os
sys.path.append('..')
from tools.common import read_list
def compute_overlap_ap(gt_list, res_list, d):
gt_size = len(gt_list)
old_recall = 0.0
old_precision = 1.0
ap = 0.0
intersect_size = 0
for i in xrange(len(res_list)):
if res_list[i] in gt_list:
intersect_size = intersect_size + 1
recall = float(intersect_size) / gt_size
precision = float(intersect_size) / (i + 1.0)
ap += (recall - old_recall) * (old_precision + precision) / 2.0
old_precision = precision
old_recall = recall
return ap
def compute_GL3D_map(image_list, overlap_result, ground_truth_file):
image_filenames = read_list(image_list)
overlap_pair_lines = read_list(overlap_result)
gt_lines = read_list(ground_truth_file)
gt_vec = [[] for i in image_filenames]
for line in gt_lines:
split_items = line.split()
index1 = int(split_items[0])
index2 = int(split_items[1])
if index1 == index2: # don't count (i, i) pair
continue
gt_vec[index1].append(index2)
res_vec = [[] for i in image_filenames]
for line in overlap_pair_lines:
split_items = line.split()
index1 = int(split_items[0])
index2 = int(split_items[1])
if index1 == index2: # don't count (i, i) pair
continue
res_vec[int(index1)].append(int(index2))
num_test = 0
mAP = 0
for i in xrange(len(image_filenames)):
if len(gt_vec[i]) != 0:
num_test = num_test + 1
ap = compute_overlap_ap(gt_vec[i], res_vec[i], i)
#print(i, image_filenames[i], ap, i)
mAP = mAP + ap
mAP = mAP / num_test
print(mAP, num_test)
return mAP
if __name__ == '__main__':
if len(sys.argv) < 4:
print(sys.argv[0], '<test_image_list> <overlap_result> <ground_truth_pairs>')
exit()
image_list = sys.argv[1]
overlap_result = sys.argv[2]
ground_truth_file = sys.argv[3]
compute_GL3D_map(image_list, overlap_result, ground_truth_file) |
from input_algorithms.errors import BadSpecValue
from input_algorithms.dictobj import dictobj
from input_algorithms import spec_base as sb
from input_algorithms.meta import Meta
from delfick_error import DelfickError, ProgrammerError
from collections import defaultdict
from operator import itemgetter
from layerz import Layers
import pkg_resources
import logging
import six
log = logging.getLogger("option_merge.addons")
class option_merge_addon_hook(object):
def __init__(self, extras=sb.NotSpecified, post_register=False):
self.post_register = post_register
if post_register and extras not in (None, {}, sb.NotSpecified):
msg = "Sorry, can't specify ``extras`` and ``post_register`` at the same time"
raise ProgrammerError(msg)
spec = sb.listof(sb.tuple_spec(sb.string_spec(), sb.listof(sb.string_spec())))
self.extras = spec.normalise(Meta({}, []), extras)
def __call__(self, func):
func.extras = self.extras
func._option_merge_addon_entry = True
func._option_merge_addon_entry_post_register = self.post_register
return func
class spec_key_spec(sb.Spec):
"""
Turns value into (int, (str1, str2, ..., strn))
If value is a single string: (0, (val, ))
if value is a tuple of strings: (0, (val1, val2, ..., valn))
if value is a list of strings: (0, (val2, val2, ..., valn))
if value is already correct, then return as is
"""
def normalise_filled(self, meta, val):
if isinstance(val, six.string_types):
return (0, (val, ))
else:
if isinstance(val, list) or isinstance(val, tuple) and len(val) > 0:
is_int = type(val[0]) is int
is_digit = getattr(val[0], "isdigit", lambda: False)()
if not is_int and not is_digit:
val = (0, val)
spec = sb.tuple_spec(sb.integer_spec(), sb.tupleof(sb.string_spec()))
return spec.normalise(meta, val)
class no_such_key_spec(sb.Spec):
def setup(self, reason):
self.reason = reason
def normalise_filled(self, meta, val):
raise BadSpecValue(self.reason, meta=meta)
class Result(dictobj.Spec):
specs = dictobj.Field(sb.dictof(spec_key_spec(), sb.has("normalise")))
extra = dictobj.Field(no_such_key_spec("Use extras instead (notice the s!)"))
extras = dictobj.Field(sb.listof(sb.tuple_spec(sb.string_spec(), sb.tupleof(sb.string_spec()))))
class Addon(dictobj.Spec):
name = dictobj.Field(sb.string_spec)
extras = dictobj.Field(sb.listof(sb.tuple_spec(sb.string_spec(), sb.string_spec())))
resolver = dictobj.Field(sb.any_spec)
namespace = dictobj.Field(sb.string_spec)
class BadHook(DelfickError):
desc = "Bad Hook"
@property
def resolved(self):
errors = []
if getattr(self, "_resolved", None) is None:
try:
self._resolved = list(self.resolver())
except Exception as error:
errors.append(self.BadHook("Failed to resolve a hook", name=self.name, namespace=self.namespace, error=str(error)))
if errors:
raise self.BadHook(_errors=errors)
return self._resolved
def process(self, collector):
for result in self.resolved:
if collector is not None:
collector.register_converters(
result.get("specs", {})
, Meta, collector.configuration, sb.NotSpecified
)
def post_register(self, **kwargs):
list(self.resolver(post_register=True, **kwargs))
def unresolved_dependencies(self):
for namespace, name in self.extras:
yield (namespace, name)
def resolved_dependencies(self):
for result in self.resolved:
for namespace, names in result.get("extras", []):
if not isinstance(names, (tuple, list)):
names = (names, )
for name in names:
yield (namespace, name)
def dependencies(self, all_deps):
for dep in self.unresolved_dependencies():
yield dep
if hasattr(self, "_resolved"):
for dep in self.resolved_dependencies():
yield dep
class AddonGetter(object):
class NoSuchAddon(DelfickError):
desc = "No such addon"
class BadImport(DelfickError):
desc = "Bad import"
class BadAddon(DelfickError):
desc = "Bad addon"
def __init__(self):
self.namespaces = {}
self.entry_points = {}
self.add_namespace("option_merge.addons")
def add_namespace(self, namespace, result_spec=None, addon_spec=None):
self.namespaces[namespace] = (result_spec or Result.FieldSpec(), addon_spec or Addon.FieldSpec())
self.entry_points[namespace] = defaultdict(list)
for e in pkg_resources.iter_entry_points(namespace):
self.entry_points[namespace][e.name].append(e)
def all_for(self, namespace):
if namespace not in self.entry_points:
log.warning("Unknown plugin namespace\tnamespace=%s", namespace)
return
for name in self.entry_points[namespace]:
yield (namespace, name)
def __call__(self, namespace, entry_point_name, collector, known=None):
if namespace not in self.namespaces:
log.warning("Unknown plugin namespace\tnamespace=%s\tentry_point=%s\tavailable=%s"
, namespace, entry_point_name, sorted(self.namespaces.keys())
)
return
entry_point_full_name = "{0}.{1}".format(namespace, entry_point_name)
entry_points = self.find_entry_points(
namespace, entry_point_name, entry_point_full_name
)
def result_maker(**data):
return self.namespaces[namespace][0].normalise(Meta(data, []), data)
resolver, extras = self.resolve_entry_points(
namespace, entry_point_name, collector
, result_maker, entry_points, entry_point_full_name
, known
)
return self.namespaces[namespace][1].normalise(Meta({}, [])
, { "namespace": namespace
, "name": entry_point_name
, "resolver": resolver
, "extras": extras
}
)
def find_entry_points(self, namespace, entry_point_name, entry_point_full_name):
it = self.entry_points[namespace][entry_point_name]
entry_points = list(it)
if len(entry_points) > 1:
log.warning("Found multiple entry_points for {0}".format(
entry_point_full_name
))
elif len(entry_points) == 0:
raise self.NoSuchAddon(addon=entry_point_full_name)
else:
log.info("Found {0} addon".format(entry_point_full_name))
return entry_points
def resolve_entry_points(self
, namespace, entry_point_name, collector
, result_maker, entry_points, entry_point_full_name
, known
):
errors = []
modules = []
for entry_point in entry_points:
try:
modules.append(entry_point.resolve())
except ImportError as error:
err = self.BadImport("Error whilst resolving entry_point"
, importing=entry_point_full_name
, module=entry_point.module_name
, error=str(error)
)
errors.append(err)
if errors:
raise self.BadImport("Failed to import some entry points"
, _errors=errors
)
hooks, extras = self.get_hooks_and_extras(modules, known)
resolver = self.get_resolver(collector, result_maker, hooks)
return resolver, extras
def get_hooks_and_extras(self, modules, known):
found = []
extras = []
for module in modules:
for attr in dir(module):
hook = getattr(module, attr)
if getattr(hook, "_option_merge_addon_entry", False):
found.append(hook)
for namespace, names in hook.extras:
for name in names:
pairs = [(namespace, name)]
if name == "__all__":
pairs = sorted([pair for pair in self.all_for(namespace) if pair not in known])
for pair in pairs:
if pair not in extras:
extras.append(pair)
return found, extras
def get_resolver(self, collector, result_maker, hooks):
def resolve(post_register=False, **kwargs):
for hook in hooks:
is_post_register = getattr(hook, "_option_merge_addon_entry_post_register", False)
if (post_register and not is_post_register) or (is_post_register and not post_register):
continue
if post_register:
hook(collector, **kwargs)
else:
r = hook(collector, result_maker)
if r is not None:
yield r
return resolve
class Register(object):
"""
Responsible for finding and registering addons.
Addons can register unresolved dependencies and resolved dependencies.
The difference is that an unresolved dependency does not involve executing
the addon, whereas a resolved dependency does.
Order is such that:
* import known pairs
* import extra pairs from known pairs
* resolve known and extra pairs in layers
* import and resolve extra pairs from those layers until no more are known
* call post_register on all pairs in layers
Usage:
.. code-block:: python
register = Register(AddonGetter, collector)
# Add pairs as many times as you want
register.add_pairs(("namespace1", "name1"), ("namespace2", "name2"), ..., )
register.add_pairs(("namespace1", "name1"), ("namespace2", "name2"), ..., )
# Now we import but not resolve the addons to get the unresolved extras
register.recursive_import_known()
# We now have a record of all the unresolved extras to be imported
# Let's actually call our addons
# And in the process, import and resolve any resolved extras
register.recursive_resolve_imported()
# Finally, everything has been imported and resolved, let's call post_register
register.post_register({namespace1: {arg1=val1, arg2=val2}, ...})
Alternatively if you don't want that much control:
.. code-block:: python
register = Register(AddonGetter, collector)
register.register((namespace1, name1), (namespace2, name2), ...
, namespace1={arg1:val1}, namespace2 = {arg1=val1}
)
# This will ensure the same resolution path as the manual approach
"""
def __init__(self, addon_getter, collector):
self.known = []
self.imported = {}
self.resolved = {}
self.collector = collector
self.addon_getter = addon_getter
########################
### AUTO USAGE
########################
def register(self, *pairs, **extra_args):
self.add_pairs(*pairs)
self.recursive_import_known()
self.recursive_resolve_imported()
self.post_register(extra_args)
########################
### MANUAL USAGE
########################
def add_pairs(self, *pairs):
import_all = set()
found = []
for pair in pairs:
if pair[1] == "__all__":
import_all.add(pair[0])
elif pair not in self.known:
found.append(pair)
self.known.append(pair)
for namespace in import_all:
for pair in self.addon_getter.all_for(namespace):
if pair not in self.known:
found.append(pair)
self.known.append(pair)
return found
def recursive_import_known(self):
added = False
while True:
nxt = self._import_known()
if not nxt:
break
added = nxt or added
return added
def recursive_resolve_imported(self):
while True:
if not self._resolve_imported():
break
def post_register(self, extra_args=None):
for layer in self.layered:
for pair, imported in layer:
args = (extra_args or {}).get(pair[0], {})
imported.post_register(**args)
########################
### LAYERED
########################
@property
def layered(self):
layers = Layers(self.imported)
for key in sorted(self.imported):
layers.add_to_layers(key)
for layer in layers.layered:
yield layer
########################
### HELPERS
########################
def _import_known(self):
added = False
for pair in list(self.known):
namespace, name = pair
if pair not in self.imported:
imported = self.addon_getter(namespace, name, self.collector, known=list(self.known))
if imported is None:
self.known.pop(self.known.index(pair))
else:
self.imported[pair] = imported
self.add_pairs_from_extras(imported.extras)
added = True
return added
def _resolve_imported(self):
for layer in self.layered:
for pair, imported in layer:
namespace, name = pair
if pair not in self.resolved:
self.resolved[pair] = list(imported.resolved)
imported.process(self.collector)
for result in imported.resolved:
found = self.add_pairs_from_extras(result.extras)
if any("__all__" in names for _, names in result.extras):
want = defaultdict(list)
for namespace, names in result.extras:
for name in names:
if name != "__all__":
want[namespace].append(name)
for namespace, name in found:
want[namespace].append(name)
result.extras = sorted([
(namespace, tuple(sorted(set(names))))
for namespace, names in sorted(want.items())
])
return self.recursive_import_known()
def add_pairs_from_extras(self, extras):
found = []
for pair in extras:
namespace, names = pair
if not isinstance(names, (tuple, list)):
names = (names, )
for name in names:
found.extend(self.add_pairs((namespace, name)))
return sorted(found)
|
#!/usr/bin/env python
# encoding: utf-8
"""
@author: Wayne
@contact: wangye.hope@gmail.com
@software: PyCharm
@file: Largest Magic Square
@time: 2021/06/13 00:22
"""
from typing import *
class Solution:
def largestMagicSquare(self, grid: List[List[int]]) -> int:
rs = [[0] + list(accumulate(row)) for row in grid]
cs = [[0] + list(accumulate(row[c] for row in grid)) for c in range(len(grid[0]))]
for size in range(min(len(grid), len(grid[0])), 1, -1):
for j in range(len(grid[0]) - size + 1):
for i in range(len(grid) - size + 1):
s = rs[i][j + size] - rs[i][j]
if all(rs[r][j + size] - rs[r][j] == s for r in range(i + 1, i + size)) and \
all(cs[c][i + size] - cs[c][i] == s for c in range(j + 1, j + size)) and \
sum(grid[x][y] for x, y in zip(range(i, i + size), range(j, j + size))) == s and \
sum(grid[x][y] for x, y in zip(range(i + size - 1, i - 1, -1), range(j, j + size))) == s:
return size
return 1
|
import unittest
import json
from tests.v2.basecases import TestBaseCase
class QuestionTestCase(TestBaseCase):
"""
test class for the question endpoint
"""
def test_user_posting_a_question_with_str(self):
"""Test user posting a question to no meetup"""
auth_token = self.user_login()
response = self.client.post('/api/v2/meetups/<meetup_id>/questions',
data=json.dumps(self.question_one),
headers=dict(
Authorization="Bearer " + auth_token),
content_type='application/json')
res = json.loads(response.data.decode())
self.assertEqual(response.status_code, 404)
self.assertEqual(
res["error"],
"Resource Identifier need an integer")
def test_user_getting_a_none_question(self):
"""Test user posting a question to no meetup"""
auth_token = self.user_login()
response = self.client.post('/api/v2/meetups/1/questions',
data=json.dumps(self.question_one),
headers=dict(
Authorization="Bearer " + auth_token),
content_type='application/json')
res = json.loads(response.data.decode())
self.assertEqual(response.status_code, 404)
self.assertEqual(
res["error"],
"Meetup of id 1 not found")
def test_user_patch_a_none_question(self):
"""Test user upvote"""
auth_token = self.user_login()
response = self.client.patch('/api/v2/questions/1/upvote',
data=json.dumps(self.question_one),
headers=dict(
Authorization="Bearer " + auth_token),
content_type='application/json')
res = json.loads(response.data.decode())
self.assertEqual(response.status_code, 404)
self.assertEqual(
res["error"],
"No question found")
def test_downvote_patch_a_none_question(self):
"""Test user downvote"""
auth_token = self.user_login()
response = self.client.patch('/api/v2/questions/1/downvote',
data=json.dumps(self.question_one),
headers=dict(
Authorization="Bearer " + auth_token),
content_type='application/json')
res = json.loads(response.data.decode())
self.assertEqual(response.status_code, 404)
self.assertEqual(
res["error"],
"No question found")
|
from __future__ import annotations
from unittest.mock import patch
import pytest
from kombu import Connection
class test_get_manager:
@pytest.mark.masked_modules('pyrabbit')
def test_without_pyrabbit(self, mask_modules):
with pytest.raises(ImportError):
Connection('amqp://').get_manager()
@pytest.mark.ensured_modules('pyrabbit')
def test_with_pyrabbit(self, module_exists):
with patch('pyrabbit.Client', create=True) as Client:
manager = Connection('amqp://').get_manager()
assert manager is not None
Client.assert_called_with(
'localhost:15672', 'guest', 'guest',
)
@pytest.mark.ensured_modules('pyrabbit')
def test_transport_options(self, module_exists):
with patch('pyrabbit.Client', create=True) as Client:
manager = Connection('amqp://', transport_options={
'manager_hostname': 'admin.mq.vandelay.com',
'manager_port': 808,
'manager_userid': 'george',
'manager_password': 'bosco',
}).get_manager()
assert manager is not None
Client.assert_called_with(
'admin.mq.vandelay.com:808', 'george', 'bosco',
)
|
###JunctionArray
#Copyright 2005-2008 J. David Gladstone Institutes, San Francisco California
#Author Nathan Salomonis - nsalomonis@gmail.com
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys,string,os
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
import os.path
import unique
import math
import reorder_arrays
from build_scripts import ExonArray
from build_scripts import EnsemblImport
from build_scripts import ExonArrayEnsemblRules
try: from build_scripts import JunctionArrayEnsemblRules
except Exception: ### Path error issue which remains partially unresolved
import JunctionArrayEnsemblRules
import export
import RNASeq
def filepath(filename):
fn = unique.filepath(filename)
return fn
def read_directory(sub_dir):
dir_list = unique.read_directory(sub_dir)
return dir_list
def verifyFile(filename,server_folder):
fn=filepath(filename)
try:
for line in open(fn,'rU').xreadlines():break
except Exception:
import update; reload(update)
if server_folder == None: server_folder = 'AltMouse'
continue_analysis = update.downloadCurrentVersion(filename,server_folder,'')
if continue_analysis == 'no':
print 'The file:\n',filename, '\nis missing and cannot be found online. Please save to the designated directory or contact AltAnalyze support.';sys.exit()
########### Recent code for dealing with comprehensive Affymetrix Junction Arrays
########### Begin Analyses ###########
class ExonAnnotationData:
def Probeset(self): return self._probeset
def ProbesetName(self): return self._psr
def ExonClusterID(self): return self._exon_cluster_id
def setGeneID(self, geneID): self.geneid = geneID
def GeneID(self): return self.geneid
def setTransSplicing(self): self.trans_splicing = 'yes'
def setSecondaryGeneID(self,secondary_geneid): self.secondary_geneid = secondary_geneid
def SecondaryGeneID(self): return self.secondary_geneid
def checkExonPosition(self,exon_pos): return 'left'
def TransSplicing(self): return self.trans_splicing
def EnsemblGeneID(self):
geneid = self._geneid
if 'ENS' in self._geneid:
if ',' in self._geneid:
ens=[]
ids = string.split(self._geneid,',')
for id in ids:
if 'ENS' in id: ens.append(id)
geneid = unique.unique(ens)[-1]
else: geneid=''
return geneid
def EnsemblGeneIDs(self):
geneid = self._geneid
if 'ENS' in self._geneid:
if ',' in self._geneid:
ens=[]
ids = string.split(self._geneid,',')
for id in ids:
if 'ENS' in id: ens.append(id)
geneids = unique.unique(ens)
else: geneids = [self._geneid]
else: geneids=[]
return geneids
def Symbol(self):
try: symbols = string.split(self._symbols,',')
except Exception: symbols = self._symbols
return symbols
def setTranscriptClusterID(self,transcript_cluster): self._transcript_cluster = transcript_cluster
def TranscriptCluster(self):
if self._transcript_cluster[-2:] == '.1':
self._transcript_cluster = self._transcript_cluster[:-2]
return self._transcript_cluster
def setTranscripts(self, transcripts): self.transcripts = transcripts
def EnsemblTranscripts(self): return self.transcripts
def ProbesetType(self):
###e.g. Exon, junction, constitutive(gene)
return self._probeset_type
def setStart(self, start): self.start = start
def setEnd(self, end): self.end = end
def Start(self): return self.start
def End(self): return self.end
def setChromosome(self,chr):
self._chromosome_info = chr
def Chromosome(self):
if len(self._chromosome_info)>0:
try:
null,chr = string.split(self._chromosome_info,'=chr')
chromosome,null=string.split(chr,':')
except Exception: chromosome = self._chromosome_info
if chromosome == 'chrM': chromosome = 'chrMT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention
if chromosome == 'M': chromosome = 'MT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention
else: chromosome = 'not-assinged'
return chromosome
def Strand(self):
if self._strand == '-': self._strand = '-1'
else: self._strand = '1'
return self._strand
def ProbesetClass(self):
###e.g. core, extendended, full
#return self._probest_class
return 'core'
def ExternalExonClusterIDs(self): return self._exon_clusters
def ExternalExonClusterIDList(self):
external_exonid_list = string.split(self.ExternalExonClusterIDs(),'|')
return external_exonid_list
def Constitutive(self): return self._constitutive_status
def Sequence(self): return string.lower(self._seq)
def JunctionSequence(self): return string.replace(self.Sequence(),'|','')
def JunctionSequences(self):
try: seq1, seq2 = string.split(self.Sequence(),'|')
except Exception:
seq1 = self.Sequence()[:len(self.Sequence())/2]
seq2 = self.Sequence()[-1*len(self.Sequence())/2:]
return seq1, seq2
def Report(self):
output = self.Probeset()
return output
def __repr__(self): return self.Report()
class PSRAnnotation(ExonAnnotationData):
def __init__(self,psr,probeset,ucsclink,transcript_cluster,strand,geneids,symbols,exon_clusters,constitutive,seq,probeset_type):
self._transcript_cluster = transcript_cluster; self._geneid = geneids; self._exon_clusters = exon_clusters;
self._constitutive_status = constitutive; self._symbols = symbols
self._strand = strand; self._chromosome_info = ucsclink; self._probeset = probeset; self._psr = psr; self._seq = seq
self._probeset_type = probeset_type
class EnsemblInformation:
def __init__(self, chr, strand, gene, symbol, description):
self._chr = chr; self._strand = strand; self._gene = gene; self._description = description
self._symbol = symbol
def GeneID(self): return self._gene
def Chromosome(self):
if self._chr == 'chrM': self._chr = 'chrMT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention
if self._chr == 'M': self._chr = 'MT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention
return self._chr
def Strand(self): return self._strand
def Description(self): return self._description
def Symbol(self): return self._symbol
def __repr__(self): return self.GeneID()
def importEnsemblLiftOverData(filename):
fn=filepath(filename); ens_translation_db={}
print 'importing:',filename
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
tc, new_ens, new_coord = string.split(data,'\t')
ens_translation_db[tc]=new_ens
print len(ens_translation_db), 'Old versus new Ensembl IDs imported (from coordinate liftover and realignment)'
return ens_translation_db
def importJunctionArrayAnnotations(species,array_type,specific_array_type):
filename = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_LiftOverEnsembl.txt'
try: verifyFile(filename,array_type+'/'+specific_array_type) ### Downloads server file if not local
except Exception: null=[]
try: ens_translation_db = importEnsemblLiftOverData(filename)
except Exception: ens_translation_db={}; print "No coordinate LiftOver file present (not supplied for HJAY or MJAY)!!!!"
from build_scripts import EnsemblImport
ens_gene_chr_db = EnsemblImport.importEnsGeneData(species) ### retrieves chromosome and strand info for each gene
ensembl_annotations = 'AltDatabase/ensembl/'+ species + '/'+species+ '_Ensembl-annotations_simple.txt'
ensembl_annotation_db = importGeneric(ensembl_annotations)
extraction_type = 'Ensembl'
tc_ensembl_annotations = importJunctionArrayAnnotationMappings(array_type,specific_array_type,species,extraction_type)
if 'HTA' in specific_array_type or 'MTA' in specific_array_type:
ens_trans_gene_db = importGenericReverse('AltDatabase/ensembl/Hs/Hs_Ensembl_transcript-annotations.txt')
ensembl_symbol_db={}; ensembl_gene_db={}
for ens_geneid in ensembl_annotation_db:
description, symbol = ensembl_annotation_db[ens_geneid]
if ens_geneid in ens_gene_chr_db:
chr,strand = ens_gene_chr_db[ens_geneid]
ei = EnsemblInformation(chr,strand,ens_geneid,symbol,description)
if len(symbol)>0:
try: ensembl_symbol_db[symbol].append(ei)
except KeyError: ensembl_symbol_db[symbol] =[ei]
ensembl_gene_db[ens_geneid] = ei
primary_gene_annotation_export = 'AltDatabase/'+species +'/'+ array_type +'/'+ array_type+ '_gene_annotations.txt'
ens_match=0; sym_match=0; ensembl_associations={}; gene_annotation_db={}; missing_count=0
### We want to maximize accurate gene-transcript associations (given the poor state of annotations provided by Affymetrix in these files)
for transcript_cluster_id in tc_ensembl_annotations:
ti = tc_ensembl_annotations[transcript_cluster_id]
try: ens_transcripts = ti.EnsemblTranscripts()
except Exception: ens_transcripts = []
ens_geneids={}; ens_geneid_ls=[]
for gene in ti.EnsemblGeneIDs():
if gene in ens_translation_db and gene not in ensembl_gene_db: ### This is the old lift over method where an old Ens in the annotation file is translated to a more recent ID
gene = ens_translation_db[gene] ### translate the old to new Ensembl
if gene in ensembl_gene_db:
try: ens_geneids[gene]+=1
except Exception: ens_geneids[gene]=1
ens_match+=1
if len(ti.EnsemblGeneIDs())>0:
for transcript in ens_transcripts:
try:
gene = ens_trans_gene_db[transcript]
try: ens_geneids[gene]+=1
except Exception: ens_geneids[gene]=1
ens_match+=1
except Exception: pass
#if transcript_cluster_id == 'TC01000626.hg.1':
#print ti.EnsemblGeneIDs(), ti.EnsemblTranscripts(); sys.exit()
if transcript_cluster_id in ens_translation_db:
gene = ens_translation_db[transcript_cluster_id] ### translate the TC to new Ensembl
if gene in ensembl_gene_db:
try: ens_geneids[gene]+=1
except Exception: ens_geneids[gene]=1
ens_match+=1
for symbol in ti.Symbol():
if symbol in ensembl_symbol_db:
for ei in ensembl_symbol_db[symbol]:
#print [symbol, ei.GeneID(),ti.Chromosome()]; sys.exit()
#print [ei.Chromosome(),ti.Chromosome(),ei.Strand(),ti.Strand()];kill
if ti.Chromosome() != 'not-assinged': ### Valid for HJAY and MJAY arrays
if ei.Chromosome() == ti.Chromosome() and ei.Strand() == ti.Strand():
try: ens_geneids[ei.GeneID()]+=1
except Exception: ens_geneids[ei.GeneID()]=1
sym_match+=1
else: ### Valid for GLU arrays (since Affymetrix decided to change the file formats and content!!!)
try: ens_geneids[ei.GeneID()]+=1
except Exception: ens_geneids[ei.GeneID()]=1
sym_match+=1
for gene in ens_geneids: ens_geneid_ls.append([ens_geneids[gene],gene]) ### Rank these to get Ensembls that have symbol and ID evidence where possible
ens_geneid_ls.sort(); ens_geneid_ls.reverse()
if len(ens_geneid_ls)>0:
ens_geneid = ens_geneid_ls[0][1] ### Best evidence gene association
try: ensembl_associations[transcript_cluster_id].append(ens_geneid)
except KeyError: ensembl_associations[transcript_cluster_id] = [ens_geneid]
ei = ensembl_gene_db[ens_geneid]
gene_annotation_db[transcript_cluster_id]=[ei.Description(),ens_geneid,ei.Symbol(),'']
else:
missing_count+=1
#if missing_count<20: print transcript_cluster_id,ti.EnsemblGeneIDs(),ti.Symbol()
if 'HTA' in specific_array_type or 'MTA' in specific_array_type:
### Add TCs based on genomic overlap positions with Ensembl genes
coordinates_to_annotate={}; added_genes=0
for transcript_cluster_id in tc_ensembl_annotations:
ti = tc_ensembl_annotations[transcript_cluster_id]
if ti.Strand() == '-1': strand = '-'
else: strand = '+'
try: coordinates_to_annotate[ti.Chromosome(),strand].append([(ti.Start(),ti.End()),ti])
except Exception: coordinates_to_annotate[ti.Chromosome(),strand] = [[(ti.Start(),ti.End()),ti]]
import RNASeq
limit = 0
RNASeq.alignCoordinatesToGeneExternal(species,coordinates_to_annotate)
for transcript_cluster_id in tc_ensembl_annotations:
ti = tc_ensembl_annotations[transcript_cluster_id]
if transcript_cluster_id not in gene_annotation_db:
try:
if 'ENSG' in ti.GeneID() or 'ENSMUSG' in ti.GeneID():
gene_annotation_db[transcript_cluster_id]=['',ti.GeneID(),ti.Symbol()[0],'']
try: ensembl_associations[transcript_cluster_id].append(ti.GeneID())
except KeyError: ensembl_associations[transcript_cluster_id] = [ti.GeneID()]
added_genes+=1
except Exception:
if limit < 0:# set to 20 - missing are typically retired Ensembl IDs
print transcript_cluster_id
limit+=1
else:
try:
if 'ENSG' in ti.GeneID() or 'ENSMUSG' in ti.GeneID(): added_genes+=1
except Exception: pass
print added_genes
exportDB(primary_gene_annotation_export,gene_annotation_db)
ensembl_associations = eliminate_redundant_dict_values(ensembl_associations)
print ens_match, 'direct Ensembl-Ensembl gene mapping and', sym_match, 'indirect Symbol-chromosome mapping'
print len(tc_ensembl_annotations)-len(ensembl_associations),'unmapped transcript clusters'
print len(gene_annotation_db), 'transcripts with associated valid Ensembl gene IDs'#; sys.exit()
"""
u=0 ### print transcript clusters without gene IDs
for i in tc_ensembl_annotations:
if i not in ensembl_associations:
if u<15:
print i, tc_ensembl_annotations[i].EnsemblGeneID(); u+=1
"""
exportArrayIDEnsemblAssociations(ensembl_associations,species,array_type) ###Use these For LinkEST program
return ensembl_associations
def pickShortestExonIDDiff(exon_to_exon):
if '|' in exon_to_exon: delim = '|'
else: delim = '///'
if delim not in exon_to_exon:
try: five_exon,three_exon=string.split(exon_to_exon,'_to_')
except Exception: print [exon_to_exon];sys.exit()
return five_exon,three_exon
else:
exon_comps = string.split(exon_to_exon,delim); diff_list=[]
for exon_comp in exon_comps:
five_exon,three_exon=string.split(exon_comp,'_to_')
try: diff=abs(int(five_exon[5:])-int(three_exon[5:]))
except Exception: diff=abs(int(five_exon[4:-3])-int(three_exon[4:-3])) #hta
diff_list.append((diff,[five_exon,three_exon]))
diff_list.sort()
return diff_list[0][1]
def importJunctionArrayAnnotationMappings(array_type,specific_array_type,species,extraction_type):
print 'Importing junction array sequence mapping'
export_dir = 'AltDatabase/'+species+'/'+array_type+'/'
filename = export_dir+string.lower(species[0])+'jay.r2.annotation_map'
if 'lue' in specific_array_type: ### Grab an hGlue specific annotation file
filename = export_dir+string.lower(species[0])+'Glue_3_0_v1.annotation_map_dt.v3.hg18.csv'
elif 'HTA' in specific_array_type:
try: psr_probeset_db = importGenericReverse(export_dir+'probeset-psr.txt')
except Exception:
psr_probeset_db = importGenericReverse(export_dir+species+'_probeset-psr.txt')
if extraction_type == 'Ensembl':
filename = export_dir+'HTA-2_0.na33.hg19.transcript.csv'
type = 'TranscriptCluster'
else:
filename = export_dir+'HTA-2_0.na33.hg19.probeset.csv'
#filename = export_dir+'test.csv'
elif 'MTA' in specific_array_type:
try: psr_probeset_db = importGenericReverse(export_dir+'probeset-psr.txt')
except Exception:
psr_probeset_db = importGenericReverse(export_dir+species+'_probeset-psr.txt')
if extraction_type == 'Ensembl':
filename = export_dir+'MTA-1_0.na35.mm10.transcript.csv'
type = 'TranscriptCluster'
else:
filename = export_dir+'MTA-1_0.na35.mm10.probeset.csv'
#filename = export_dir+'test.csv'
verifyFile(filename,array_type) ### Check's to see if it is installed and if not, downloads or throws an error
fn=filepath(filename)
if extraction_type == 'sequence':
probeset_junctionseq_export = 'AltDatabase/'+species+'/'+array_type+'/'+array_type+'_critical-junction-seq.txt'
fn2=filepath(probeset_junctionseq_export); dw = open(fn2,'w'); print "Exporting",probeset_junctionseq_export
probeset_translation_db={}; x=0; tc=0; j=0; p=0; k=0; tc_db=(); transcript_cluster_count={}; transcript_cluster_count2={}
global probeset_db; global junction_comp_db; junction_comp_db={}; global junction_alinging_probesets
ps_db={}; jc_db={}; left_ec={}; right_ec={}; psr_ec={}; probeset_db={}; junction_alinging_probesets={}; nonconstitutive_junctions={}
header_row = True; ct=0; probeset_types = {}
for line in open(fn,'r').xreadlines():
#if 'PSR170003198' in line:
if '.csv' in filename:
data = altCleanUpLine(line)
if '"' in data :
t = string.split(data,'"')
new_string = t[0]
for i in t[1:-1]:
if len(i)>1:
if ',' in i[1:-1]: ### can have legitimate commas on the outsides
i = string.replace(i,",",'|')
new_string+=i
new_string+=t[-1]
t = string.split(new_string[:-1],',')
else: t = string.split(data,',')
else:
data = cleanUpLine(line)
t = string.split(data,'\t')
if x<5 or '#' == data[0]: x+=1
elif x>2:
if 'HTA' in specific_array_type or 'MTA' in specific_array_type:
if extraction_type != 'Ensembl': type = 'PSR'
### This is the probeset file which has a different structure and up-to-date genomic coordinates (as of hg19)
if header_row:
psr_index = t.index('probeset_id'); si = t.index('strand'); sqi = t.index('seqname')
starti = t.index('start'); endi = t.index('stop')
if type == 'TranscriptCluster':
ai = t.index('mrna_assignment'); gi = t.index('gene_assignment')
else:
pti = t.index('probeset_type'); jse = t.index('junction_start_edge'); jee = t.index('junction_stop_edge')
jsi = t.index('junction_sequence'); tci = t.index('transcript_cluster_id'); xi = t.index('exon_id')
csi = t.index('constituitive')
header_row = False
else:
#probeset_type = t[pti]
#try: probeset_types[probeset_type]+=1
#except Exception: probeset_types[probeset_type]=1
#if probeset_type == 'main':
psr = t[psr_index]
try: probeset = psr_probeset_db[psr]
except Exception: probeset = psr
if type == 'TranscriptCluster':
transcript_annotation = t[ai]; gene_annotation = t[gi]
chr = t[sqi]
strand = t[si]
symbols=[]; ens_transcripts = []; geneids=[]
gene_annotation = string.split(gene_annotation,' /// ')
for ga in gene_annotation:
try: ga = string.split(ga,' // '); symbols = ga[1]
except Exception: pass
if 'ENSG' in transcript_annotation or 'ENSMUSG' in transcript_annotation:
if 'ENSG' in transcript_annotation: delim = 'ENSG'
if 'ENSMUSG' in transcript_annotation: delim = 'ENSMUSG'
try:
ta = string.split(transcript_annotation,delim)[1]
try: ta = string.split(ta,' ')[0]
except Exception: pass
geneids=delim+ta
except Exception: pass
if 'ENST' in transcript_annotation or 'ENSMUST' in transcript_annotation:
if 'ENST' in transcript_annotation: delim = 'ENST'
if 'ENSMUST' in transcript_annotation: delim = 'ENSMUST'
try:
gene_annotation = string.split(transcript_annotation,delim)[1]
try: gene_annotation = string.split(gene_annotation,' ')[0]
except Exception: pass
ens_transcripts = [delim+gene_annotation]
except Exception: pass
#if probeset == 'TC04000084.hg.1':
#print transcript_annotation;sys.exit()
#print probeset, strand, geneids, ens_transcripts, symbols
probeset = probeset[:-2] # remove the .1 or .0 at the end - doesn't match to the probeset annotations
psri = PSRAnnotation(psr,probeset,'',probeset,strand,geneids,symbols,'','','',type)
psri.setChromosome(chr)
try: psri.setStart(int(t[starti]))
except Exception: continue
psri.setEnd(int(t[endi]))
psri.setTranscripts(ens_transcripts)
elif 'JUC' in psr:
type = 'Junction'
exon_cluster = string.split(string.split(t[xi],'///')[0],'_to_') ### grab the first exonIDs
constitutive = t[csi]
transcript_cluster = string.split(t[tci],'///')[0]
chr = t[sqi]; strand = t[si]
if constitutive == 'Non-Constituitive': nonconstitutive_junctions[probeset]=[]
try: five_exon,three_exon = pickShortestExonIDDiff(t[xi])
except Exception:
five_exon,three_exon = exon_cluster
five_EC,three_EC = five_exon,three_exon ### NOT SURE THIS IS CORRECT
junction_alinging_probesets[probeset] = [five_exon,five_exon], [three_exon,three_exon]
seq = t[jsi]
seq = string.lower(string.replace(seq,'|',''))
psri = PSRAnnotation(psr,probeset,'',transcript_cluster,strand,'','',exon_cluster,constitutive,seq,type)
try: junction_start = int(t[jse]); junction_end = int(t[jee])
except Exception: print t;sys.exit()
if '-' in strand: junction_start, junction_end = junction_end,junction_start
exon1s = junction_start-16; exon1e = junction_start
exon2s = junction_end; exon2e = junction_end+16
if '-' in strand:
junction_start, junction_end = junction_end,junction_start
exon1s = junction_start+16; exon1e = junction_start
exon2s = junction_end; exon2e = junction_end-16
psri.setTranscriptClusterID(transcript_cluster)
psri.setChromosome(chr)
#print chr, transcript_cluster, exon1s, exon2s, seq, five_EC, three_EC;sys.exit()
elif 'PSR' in psr:
type = 'Exon'
exon_cluster = string.split(t[xi],'///')[0] ### grab the first exonIDs
constitutive = t[csi]
transcript_cluster = string.split(t[tci],'///')[0]
chr = t[sqi]; strand = t[si]
if constitutive == 'Non-Constituitive': nonconstitutive_junctions[probeset]=[]
five_EC,three_EC = five_exon,three_exon ### NOT SURE THIS IS CORRECT
psri = PSRAnnotation(psr,probeset,'',transcript_cluster,strand,'','',exon_cluster,constitutive,'',type)
exon_start = int(t[starti]); exon_end = int(t[endi])
if '-' in strand: exon_start, exon_end = exon_end,exon_start
psri.setTranscriptClusterID(transcript_cluster)
psri.setChromosome(chr)
elif len(t)==15: ###Transcript Cluster ID Lines
probeset, probeset_name, ucsclink, transcript_cluster, genome_pos, strand, transcripts, geneids, symbols, descriptions, TR_count, JUC_count, PSR_count, EXs_count, ECs_count = t
type = 'TranscriptCluster'; seq=''; exon_cluster=''; constitutive=''
if '|' in geneids: geneids = string.replace(geneids,'|',',')
if '|' in symbols: symbols = string.replace(symbols,'|',',')
psri = PSRAnnotation(probeset_name,probeset,ucsclink,transcript_cluster,strand,geneids,symbols,exon_cluster,constitutive,seq,type)
elif 'TC' in t[0]: ###Transcript ID Lines - Glue array
probeset, probeset_name, ucsclink, transcript_cluster, genome_pos, strand, transcripts, geneids, symbols, descriptions, TR_count, JUC_count, PSR_count, EXs_count, ECs_count = t[:15]
type = 'TranscriptCluster'; seq=''; exon_cluster=''; constitutive=''; ucsclink = ''
if '|' in geneids: geneids = string.replace(geneids,'|',',')
if '|' in symbols: symbols = string.replace(symbols,'|',',')
psri = PSRAnnotation(probeset_name,probeset,ucsclink,transcript_cluster,strand,geneids,symbols,exon_cluster,constitutive,seq,type)
elif len(t)==28:###Junction ID Lines
probeset, probeset_name, ucsclink, transcript_cluster, genome_pos, strand, transcripts, geneids, symbols, descriptions, TR_count, JUC_count, PSR_count, EXs_count, ECs_count, junction_number, original_seq, exon_to_exon, observed_speculative, strand, five_PSR, three_PSR, five_EC, three_EC, Rel_5EC, Rel_3EC, constitutive, blat_junction = t
type = 'Junction'; exon_cluster = [five_EC,three_EC]
if constitutive == 'alternative': nonconstitutive_junctions[probeset]=[]
five_exon,three_exon = pickShortestExonIDDiff(exon_to_exon)
junction_alinging_probesets[probeset] = [five_PSR,five_exon], [three_PSR,three_exon]; seq = blat_junction
psri = PSRAnnotation(probeset_name,probeset,ucsclink,transcript_cluster,strand,geneids,symbols,exon_cluster,constitutive,seq,type)
elif len(t)==31 and len(t[29])>0: ###Junction ID Lines - Glue array
probeset, probeset_name, ucsclink, transcript_cluster, genome_pos, strand, transcripts, geneids, symbols, descriptions, TR_count, JUC_count, PSR_count, EXs_count, ECs_count, original_seq, genomic_position, exon_to_exon, observed_speculative, exon_cluster, constitutive, five_PSR, tr_hits, three_PSR, percent_tr_hits, five_EC, loc_5_3, three_EC, Rel_5EC, Rel_3EC, blat_junction = t
if '|' in geneids: geneids = string.replace(geneids,'|',',')
if '|' in symbols: symbols = string.replace(symbols,'|',',')
type = 'Junction'; exon_cluster = [five_EC,three_EC]; ucsclink = ''
if constitutive == 'alternative': nonconstitutive_junctions[probeset]=[]
five_exon,three_exon = pickShortestExonIDDiff(exon_to_exon)
junction_alinging_probesets[probeset] = [five_PSR,five_exon], [three_PSR,three_exon]; seq = blat_junction
psri = PSRAnnotation(probeset_name,probeset,ucsclink,transcript_cluster,strand,geneids,symbols,exon_cluster,constitutive,seq,type)
elif len(t)==24: ###Probeset ID Lines
probeset, probeset_name, ucsclink, transcript_cluster, genome_pos, strand, transcripts, geneids, symbols, descriptions, TR_count, JUC_count, PSR_count, EXs_count, ECs_count, PSR_region, genome_pos2, strand, exon_cluster, constitutive, TR_hits, percent_TR_hits, location_5to3_percent,seq = t
type = 'Exon'
psri = PSRAnnotation(probeset_name,probeset,ucsclink,transcript_cluster,strand,geneids,symbols,exon_cluster,constitutive,seq,type)
elif len(t)==31 and len(t[29])== 0:##Probeset ID Lines - Glue array
probeset, probeset_name, ucsclink, transcript_cluster, genome_pos, strand, transcripts, geneids, symbols, descriptions, TR_count, JUC_count, PSR_count, EXs_count, ECs_count, original_seq, genomic_position, exon_to_exon, observed_speculative, exon_cluster, constitutive, five_PSR, tr_hits, three_PSR, percent_tr_hits, five_EC, loc_5_3, three_EC, Rel_5EC, Rel_3EC, seq = t
if '|' in geneids: geneids = string.replace(geneids,'|',',')
if '|' in symbols: symbols = string.replace(symbols,'|',',')
type = 'Exon'; ucsclink = ''
psri = PSRAnnotation(probeset_name,probeset,ucsclink,transcript_cluster,strand,geneids,symbols,exon_cluster,constitutive,seq,type)
else:
#if k<40 and len(t)>5: print len(t),t; k+=1
type = 'null'
#print len(t),data;sys.exit()
### Exon clusters are equivalent to exon blocks in this schema and can be matched between junctions and exons
#if x < 20: print len(t),t[0],type
store = 'yes'
if extraction_type == 'Ensembl':
if type != 'TranscriptCluster': store = 'no'
elif extraction_type == 'sequence':
store = 'no'
if type == 'Exon' or type == 'Junction':
transcript_cluster_count[psri.TranscriptCluster()]=[]
if psri.TranscriptCluster() in ensembl_associations:
ens_geneid = ensembl_associations[psri.TranscriptCluster()][0]
critical_junctions=''
if type == 'Junction':
dw.write(probeset+'\t'+psri.JunctionSequence()+'\t\t\n')
seq = psri.JunctionSequences()[0]; exon_id = probeset+'|5'
seq_data = ExonSeqData(exon_id,psri.TranscriptCluster(),psri.TranscriptCluster()+':'+exon_id,critical_junctions,seq)
try: probeset_db[ens_geneid].append(seq_data)
except Exception: probeset_db[ens_geneid] = [seq_data]
try: seq_data.setExonStart(exon1s); seq_data.setExonStop(exon1e) ### HTA
except Exception: pass
seq = psri.JunctionSequences()[1]; exon_id = probeset+'|3'
seq_data = ExonSeqData(exon_id,psri.TranscriptCluster(),psri.TranscriptCluster()+':'+exon_id,critical_junctions,seq)
try: seq_data.setExonStart(exon2s); seq_data.setExonStop(exon2e) ### HTA
except Exception: pass
try: probeset_db[ens_geneid].append(seq_data)
except Exception: probeset_db[ens_geneid] = [seq_data]
transcript_cluster_count2[psri.TranscriptCluster()]=[]
elif type == 'Exon':
dw.write(probeset+'\t'+psri.Sequence()+'\t\t\n')
seq = psri.Sequence(); exon_id = probeset
seq_data = ExonSeqData(exon_id,psri.TranscriptCluster(),psri.TranscriptCluster()+':'+exon_id,critical_junctions,seq)
try: seq_data.setExonStart(exon_start); seq_data.setExonStop(exon_end) ### HTA
except Exception: pass
try: probeset_db[ens_geneid].append(seq_data)
except Exception: probeset_db[ens_geneid] = [seq_data]
transcript_cluster_count2[psri.TranscriptCluster()]=[]
if store == 'yes':
#if probeset in probeset_db: print probeset; sys.exit()
try: probeset_db[probeset] = psri
except Exception: null=[]
if type == 'TranscriptCluster':
tc+=1
if type == 'Junction':
#print 'here';sys.exit()
j+=1
if extraction_type == 'comparisons':
### Store the left exon-cluster and right exon-cluster for each junction
try: left_ec[five_EC].append(probeset)
except KeyError: left_ec[five_EC]=[probeset]
try: right_ec[three_EC].append(probeset)
except KeyError: right_ec[three_EC]=[probeset]
if type == 'Exon':
p+=1
if extraction_type == 'comparisons':
try: psr_ec[exon_cluster].append(probeset)
except KeyError: psr_ec[exon_cluster]=[probeset]
"""
print 'psid',psid; print 'probeset',probeset; print 'ucsclink',ucsclink
print 'transcript_cluster',transcript_cluster; print 'transcripts',transcripts
print 'geneids',geneids; print 'symbols',symbols; print 'seq',seq; kill"""
x+=1
print 'TCs:',tc, 'Junctions:',j, 'Exons:',p, 'Total:',x; #sys.exit()
#print 'JUC0900017373',probeset_db['JUC0900017373'].Sequence()
#print 'JUC0900017385',probeset_db['JUC0900017385'].Sequence();kill
if extraction_type == 'sequence':
dw.close()
print len(probeset_db),'Entries exported from Junction annotation file'
return probeset_db
if extraction_type == 'Ensembl':
print len(probeset_db),'Entries exported from Junction annotation file'
return probeset_db
if extraction_type == 'comparisons':
global junction_inclusion_db; global ensembl_exon_db; global exon_gene_db
junction_inclusion_db = JunctionArrayEnsemblRules.reimportJunctionComps(species,array_type,'original')
ensembl_exon_db,exon_gene_db = JunctionArrayEnsemblRules.importAndReformatEnsemblJunctionAnnotations(species,array_type,nonconstitutive_junctions)
global failed_db; failed_db={}
global passed_db; passed_db={}
print len(junction_inclusion_db)
identifyCompetitiveJunctions(right_ec,"3'")
identifyCompetitiveJunctions(left_ec,"5'")
print 'len(passed_db)',len(passed_db),'len(failed_db)',len(failed_db)
print 'len(junction_inclusion_db)',len(junction_inclusion_db)
exportUpdatedJunctionComps(species,array_type)
def exportUpdatedJunctionComps(species,array_type,searchChr=None):
db_version = unique.getCurrentGeneDatabaseVersion() ### Only need this since we are exporting to root_dir for RNASeq
if array_type == 'RNASeq': species,root_dir=species
else: root_dir = ''
lines_exported=0
if searchChr !=None:
probeset_junction_export = root_dir+'AltDatabase/'+db_version+'/'+ species + '/'+array_type+'/comps/'+ species + '_junction_comps_updated.'+searchChr+'.txt'
else:
probeset_junction_export = root_dir+'AltDatabase/'+db_version+'/'+ species + '/'+array_type+'/'+ species + '_junction_comps_updated.txt'
if array_type == 'RNASeq':
data,status = RNASeq.AppendOrWrite(probeset_junction_export) ### Creates a new file or appends if already existing (import is chromosome by chromosome)
else:
data = export.ExportFile(probeset_junction_export); status = 'not found'
if array_type != 'RNASeq': print "Exporting",probeset_junction_export
if status == 'not found':
title = 'gene'+'\t'+'critical_exon'+'\t'+'exclusion_junction_region'+'\t'+'inclusion_junction_region'+'\t'+'exclusion_probeset'+'\t'+'inclusion_probeset'+'\t'+'data_source'+'\n'
data.write(title)
for i in junction_inclusion_db:
critical_exons=[]
for ji in junction_inclusion_db[i]:
#value = string.join([ji.GeneID(),ji.CriticalExon(),ji.ExclusionJunction(),ji.InclusionJunction(),ji.ExclusionProbeset(),ji.InclusionProbeset(),ji.DataSource()],'\t')+'\n'
### Combine all critical exons for a probeset pair
critical_exons.append(ji.CriticalExon())
critical_exons = unique.unique(critical_exons); critical_exons = string.join(critical_exons,'|'); ji.setCriticalExons(critical_exons); lines_exported+=1
data.write(ji.OutputLine())
data.close()
if array_type != 'RNASeq':
print lines_exported,'for',probeset_junction_export
def identifyCompetitiveJunctions(exon_cluster_db,junction_type):
"""To identify critical exons (e.g., the alternatively spliced exon sequence for two alternative exon-junctions), this script:
1) Finds pairs of junctions that contain the same 5' or 3' exon-cluster (genomic overlapping transcript exons)
2) Determines which junction has exons that are closes in genomic space, between the pair of junctions (based on exon-cluster ID number or exon ID)
3) Selects the non-common exon and stores the junction sequence for that exon
4) Selects any exon probeset ID that is annotated as overlapping with the critical exon
The greatest assumption with this method is that the critical exon is choosen based on the numerical ID in the exon-cluster or exon ID (when the exon-clusters
between the two junctions are the same). For example looked at, this appears to be true (e.g., two exons that make up a junction have a difference of 1 in their ID),
but this may not always be the case. Ideally, this method is more extensively tested by evaluating junction and exon sequences mapped to genomic coordinates
and AltAnalyze exon block and region coordinates to verify the critical exon selection."""
passed=0; failed=0; already_added=0
if junction_type == "5'": index = 1
else: index = 0
for ec in exon_cluster_db:
if len(exon_cluster_db[ec])>1:
junction_comps={} ### Calculate all possible pairwise-junction comparisons
for junction1 in exon_cluster_db[ec]:
for junction2 in exon_cluster_db[ec]:
if junction1 != junction2: temp = [junction1,junction2]; temp.sort(); junction_comps[tuple(temp)]=[]
for (junction1,junction2) in junction_comps:
store_data = 'no'
if (junction1,junction2) in junction_inclusion_db or (junction2,junction1) in junction_inclusion_db:
already_added+=1
elif junction1 in ensembl_exon_db and junction2 in ensembl_exon_db: ### Thus, these are mapped to the genome
ed1 = ensembl_exon_db[junction1]; ed2 = ensembl_exon_db[junction2]
ensembl_gene_id = ed1.GeneID()
try: diff1 = ed1.JunctionDistance(); diff2 = ed2.JunctionDistance()
except Exception:
print junction1,junction2
psri1 = probeset_db[junction1]
psri2 = probeset_db[junction2]
print psri1.Probeset(), psri2.Probeset()
kill
### Using the ranked exon-cluster IDs
psri1 = probeset_db[junction1]; exon1a = psri1.ExternalExonClusterIDs()[0]; exon1b = psri1.ExternalExonClusterIDs()[-1]
psri2 = probeset_db[junction2]; exon2a = psri2.ExternalExonClusterIDs()[0]; exon2b = psri2.ExternalExonClusterIDs()[-1]
try: diffX1 = abs(int(exon1a[5:])-int(exon1b[5:])); diffX2 = abs(int(exon2a[5:])-int(exon2b[5:]))
except Exception:
diffX1 = abs(int(exon1a[4:-4])-int(exon1b[4:-4])); diffX2 = abs(int(exon2a[4:-4])-int(exon2b[4:-4]))
junction1_exon_id = ed1.ExonID(); junction2_exon_id = ed2.ExonID()
if diffX1==0 or diffX2==0: null=[] ### splicing occurs within a single exon-cluster
elif diff1<diff2: ### Thus the first junction contains the critical exon
#critical_exon_seq = psri1.JunctionSequences()[index] ### if left most exon in junction is common, then choose the most proximal right exon as critical
incl_junction_probeset = junction1; excl_junction_probeset = junction2
incl_junction_id = junction1_exon_id; excl_junction_id = junction2_exon_id
incl_exon_probeset,incl_exon_id = junction_alinging_probesets[junction1][index]
store_data = 'yes'
elif diff2<diff1:
incl_junction_probeset = junction2; excl_junction_probeset = junction1
incl_junction_id = junction2_exon_id; excl_junction_id = junction1_exon_id
incl_exon_probeset,incl_exon_id = junction_alinging_probesets[junction2][index]
store_data = 'yes'
if store_data == 'yes':
critical_exon_id = string.split(incl_junction_id,'-')[index]; critical_exon_id = string.replace(critical_exon_id,'.','-')
if incl_exon_probeset in ensembl_exon_db:
if (excl_junction_probeset,incl_exon_probeset) in junction_inclusion_db or (incl_exon_probeset,excl_junction_probeset) in junction_inclusion_db:
already_added+=1
else:
critical_exon_id = ensembl_exon_db[incl_exon_probeset]
ji=JunctionArrayEnsemblRules.JunctionInformation(ensembl_gene_id,critical_exon_id,excl_junction_id,critical_exon_id,excl_junction_probeset,incl_exon_probeset,'Affymetrix')
try: junction_inclusion_db[excl_junction_probeset,incl_exon_probeset].append(ji)
except Exception: junction_inclusion_db[excl_junction_probeset,incl_exon_probeset] = [ji]
#value = string.join([ji.GeneID(),ji.CriticalExon(),ji.ExclusionJunction(),ji.InclusionJunction(),ji.ExclusionProbeset(),ji.InclusionProbeset(),ji.DataSource()],'\t')+'\n'
#print ji.OutputLine();kill
#print [[critical_exon_id,junction2,ed2.ExonID(), ed1.JunctionCoordinates(), ed2.JunctionCoordinates(), diff1,diff2]]
passed+=1
passed_db[junction1,junction2]=[]
ji=JunctionArrayEnsemblRules.JunctionInformation(ensembl_gene_id,critical_exon_id,excl_junction_id,incl_junction_id,excl_junction_probeset,incl_junction_probeset,'Affymetrix')
#print ensembl_gene_id,critical_exon_id,excl_junction_id,incl_junction_id,excl_junction_probeset,incl_junction_probeset;kill
#print [critical_exon_id,junction1,junction2,ed1.ExonID(),ed2.ExonID(), ed1.JunctionCoordinates(), ed2.JunctionCoordinates(), diff1,diff2]
try: junction_inclusion_db[exclusion_junction,inclusion_junction].append(ji)
except Exception: junction_inclusion_db[excl_junction_probeset,incl_junction_probeset] = [ji]
print 'already_added:',already_added,'passed:',passed,'failed:',failed
def identifyJunctionComps(species,array_type,specific_array_type):
### At this point, probeset-to-exon-region associations are built for exon and junction probesets along with critical exons and reciprocol junctions
### Now, associate the reciprocol junctions/critical exons (Ensembl/UCSC based) with junction array probesets and export to junction Hs_junction_comps.txt
JunctionArrayEnsemblRules.getJunctionComparisonsFromExport(species,array_type)
### Next, do this for reciprocal junctions predicted directly from Affymetrix's annotations
extraction_type = 'comparisons'
tc_ensembl_annotations = importJunctionArrayAnnotationMappings(array_type,specific_array_type,species,extraction_type)
inferJunctionComps(species,array_type)
def filterForCriticalExons(species,array_type):
filename = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_Ensembl_'+array_type+'_probesets.txt'
importForFiltering(species,array_type,filename,'exclude_junction_psrs')
filename = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_probeset_microRNAs_any.txt'
importForFiltering(species,array_type,filename,'include_critical_exon_ids')
filename = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_probeset_microRNAs_multiple.txt'
importForFiltering(species,array_type,filename,'include_critical_exon_ids')
filename = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_Ensembl_domain_aligning_probesets.txt'
importForFiltering(species,array_type,filename,'include_critical_exon_ids')
filename = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_Ensembl_indirect_domain_aligning_probesets.txt'
importForFiltering(species,array_type,filename,'include_critical_exon_ids')
filename = 'AltDatabase/'+species+'/'+array_type+'/exon/probeset-protein-annotations-exoncomp.txt'
importForFiltering(species,array_type,filename,'exclude_critical_exon_ids')
filename = 'AltDatabase/'+species+'/'+array_type+'/exon/probeset-domain-annotations-exoncomp.txt'
importForFiltering(species,array_type,filename,'exclude_critical_exon_ids')
def importForFiltering(species,array_type,filename,export_type):
fn=filepath(filename); dbase={}; x = 0
print 'Filtering:',filename
dbase['filename'] = filename
###Import expression data (non-log space)
for line in open(fn,'r').xreadlines():
data = cleanUpLine(line); splitup = 'no'
if x == 0: x=1; dbase['title'] = line; x+=1 ###Grab expression values
if x !=0:
key = string.split(data,'\t')[0]
if ':' in key:
old_key = key
key = string.split(key,':')[1]
line = string.replace(line,old_key,key)
if '|' in key: ### Get rid of |5 or |3
line = string.replace(line,key,key[:-2])
if export_type == 'exclude_critical_exon_ids': splitup = 'yes'
if splitup == 'no':
try: dbase[key].append(line)
except Exception: dbase[key] = [line]
#print len(dbase)
filterExistingFiles(species,array_type,dbase,export_type)
def importGenericAppend(filename,key_db):
fn=filepath(filename)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
key_db[t[0]] = t[1:]
return key_db
def importGenericReverse(filename):
db={}
fn=filepath(filename)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
db[t[-1]] = t[0]
return db
def importGenericAppendDBList(filename,key_db):
fn=filepath(filename)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
try: key_db[t[0]].append(t[1])
except KeyError: key_db[t[0]] = [t[1]]
return key_db
def combineExonJunctionAnnotations(species,array_type):
###Currently used for RNASeq databases to minimize the number of files supplied to the user
collapseSequenceFiles(species,array_type)
overRideJunctionEntriesWithExons(species,array_type)
collapseDomainAlignmentFiles(species,array_type,species+'_Ensembl_domain_aligning_probesets.txt')
collapseDomainAlignmentFiles(species,array_type,species+'_Ensembl_indirect_domain_aligning_probesets.txt')
def collapseDomainAlignmentFiles(species,array_type,filename):
original_filename = 'AltDatabase/'+species+'/'+array_type+'/'+filename
domain_db = importGenericAppendDBList(original_filename,{})
filename = 'AltDatabase/'+species+'/'+array_type+'/junction/'+filename
domain_db = importGenericAppendDBList(filename,domain_db); del domain_db['Probeset']
header = 'Probeset\tInterPro-Description\n'
exportGenericList(domain_db,original_filename,header)
def exportGenericList(db,filename,header):
data_export = export.ExportFile(filename)
if len(header)>0: data_export.write(header)
print 'Re-writing',filename
for key in db:
for i in db[key]: data_export.write(string.join([key]+[i],'\t')+'\n')
data_export.close()
def collapseSequenceFiles(species,array_type):
original_filename = 'AltDatabase/'+species+'/'+array_type+'/SEQUENCE-protein-dbase_exoncomp.txt'
seq_db = importGenericAppend(original_filename,{})
filename = 'AltDatabase/'+species+'/'+array_type+'/exon/SEQUENCE-protein-dbase_exoncomp.txt'
try: seq_db = importGenericAppend(filename,seq_db)
except Exception: print 'SEQUENCE-protein-dbase_exoncomp.txt - exon version not found'
filename = 'AltDatabase/'+species+'/'+array_type+'/junction/SEQUENCE-protein-dbase_exoncomp.txt'
try: seq_db = importGenericAppend(filename,seq_db)
except Exception: print 'SEQUENCE-protein-dbase_exoncomp.txt - junction version not found'
exportGeneric(seq_db,original_filename,[])
def exportGeneric(db,filename,header):
data_export = export.ExportFile(filename)
if len(header)>0: data_export.write(header)
print 'Re-writing',filename
for key in db:
data_export.write(string.join([key]+db[key],'\t')+'\n')
data_export.close()
def overRideJunctionEntriesWithExons(species,array_type):
filename1 = 'AltDatabase/'+species+'/'+array_type+'/exon/probeset-protein-annotations-exoncomp.txt'
filename2 = 'AltDatabase/'+species+'/'+array_type+'/junction/probeset-protein-annotations-exoncomp.txt'
overRideExistingEntries(filename1,filename2)
filename1 = 'AltDatabase/'+species+'/'+array_type+'/exon/probeset-domain-annotations-exoncomp.txt'
filename2 = 'AltDatabase/'+species+'/'+array_type+'/junction/probeset-domain-annotations-exoncomp.txt'
overRideExistingEntries(filename1,filename2)
def overRideExonEntriesWithJunctions(species,array_type):
filename1 = 'AltDatabase/'+species+'/'+array_type+'/junction/'+species+'_Ensembl_domain_aligning_probesets.txt'
filename2 = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_Ensembl_domain_aligning_probesets-filtered.txt'
overRideExistingEntries(filename1,filename2)
filename1 = 'AltDatabase/'+species+'/'+array_type+'/junction/'+species+'_Ensembl_indirect_domain_aligning_probesets.txt'
filename2 = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_Ensembl_indirect_domain_aligning_probesets-filtered.txt'
overRideExistingEntries(filename1,filename2)
filename1 = 'AltDatabase/'+species+'/'+array_type+'/junction/probeset-protein-annotations-exoncomp.txt'
filename2 = 'AltDatabase/'+species+'/'+array_type+'/exon/probeset-protein-annotations-exoncomp-filtered.txt'
overRideExistingEntries(filename1,filename2)
filename1 = 'AltDatabase/'+species+'/'+array_type+'/junction/probeset-domain-annotations-exoncomp.txt'
filename2 = 'AltDatabase/'+species+'/'+array_type+'/exon/probeset-domain-annotations-exoncomp-filtered.txt'
overRideExistingEntries(filename1,filename2)
def overRideExistingEntries(file_include,file_exclude):
### Imports two files and over-rides entries in one with another
### These are the filtered entries to replace
fn=filepath(file_include); dbase_include={}; x = 0
for line in open(fn,'r').xreadlines():
data = cleanUpLine(line)
key = string.split(data,'\t')[0]
try: dbase_include[key].append(line)
except Exception: dbase_include[key] = [line]
x+=1
print x;title=''
fn=filepath(file_exclude); dbase_exclude={}; x = 0
for line in open(fn,'r').xreadlines():
data = cleanUpLine(line)
if x == 0: x=1; title = line; x+=1
if x != 0:
key = string.split(data,'\t')[0]
try: dbase_exclude[key].append(line)
except Exception: dbase_exclude[key] = [line]
x+=1
print x
count=0
for key in dbase_exclude: count+=1
print file_exclude, count
count=0
for key in dbase_include:
dbase_exclude[key] = dbase_include[key]
count+=1
print file_exclude, count
dbase_exclude = eliminate_redundant_dict_values(dbase_exclude)
data_export = export.ExportFile(file_exclude)
count=0
print 'Re-writing',file_exclude,'with junction aligned entries.'
try: data_export.write(title)
except Exception: null=[] ### Occurs when no alternative isoforms present for this genome
for key in dbase_exclude:
for line in dbase_exclude[key]:
data_export.write(line); count+=1
data_export.close()
print count
def clearObjectsFromMemory(db_to_clear):
db_keys={}
for key in db_to_clear: db_keys[key]=[]
for key in db_keys:
try: del db_to_clear[key]
except Exception:
try:
for i in key: del i ### For lists of tuples
except Exception: del key ### For plain lists
class JunctionInformationSimple:
def __init__(self,critical_exon,excl_junction,incl_junction,excl_probeset,incl_probeset):
self._critical_exon = critical_exon; self.excl_junction = excl_junction; self.incl_junction = incl_junction
self.excl_probeset = excl_probeset; self.incl_probeset = incl_probeset
#self.critical_exon_sets = string.split(critical_exon,'|')
self.critical_exon_sets = [critical_exon]
def CriticalExon(self):
ce = str(self._critical_exon)
if '-' in ce: ce = string.replace(ce,'-','.')
return ce
def CriticalExonList(self):
critical_exon_str = self.CriticalExon()
critical_exons = string.split(critical_exon_str,'|')
return critical_exons
def setCriticalExons(self,critical_exons): self._critical_exon = critical_exons
def setCriticalExonSets(self,critical_exon_sets): self.critical_exon_sets = critical_exon_sets
def setInclusionProbeset(self,incl_probeset): self.incl_probeset = incl_probeset
def setInclusionJunction(self,incl_junction): self.incl_junction = incl_junction
def CriticalExonSets(self): return self.critical_exon_sets ### list of critical exons (can select any or all for functional analysis)
def InclusionJunction(self): return self.incl_junction
def ExclusionJunction(self): return self.excl_junction
def InclusionProbeset(self): return self.incl_probeset
def ExclusionProbeset(self): return self.excl_probeset
def setNovelEvent(self,novel_event): self._novel_event = novel_event
def NovelEvent(self): return self._novel_event
def setInclusionLookup(self,incl_junction_probeset): self.incl_junction_probeset = incl_junction_probeset
def InclusionLookup(self): return self.incl_junction_probeset
def __repr__(self): return self.GeneID()
def getPutativeSpliceEvents(species,array_type,exon_db,agglomerate_inclusion_probesets,root_dir):
alt_junction_db={}; critical_exon_db={}; critical_agglomerated={}; exon_inclusion_agglom={}; incl_junctions_agglom={}; exon_dbase={}
exon_inclusion_db={}; comparisons=0
### Previously, JunctionArrayEnsemblRules.reimportJunctionComps (see above) used for import---> too memory intensive
if array_type == 'junction': root_dir=''
filename = root_dir+'AltDatabase/' + species + '/'+array_type+'/'+ species + '_junction_comps_updated.txt'
fn=filepath(filename); junction_inclusion_db={}; x=0
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line); junction_info=[]
gene,critical_exon,excl_junction,incl_junction,excl_probeset,incl_probeset,source = string.split(data,'\t')
if source == 'AltAnalyze': novel_exon = 'known'
else: novel_exon = 'novel'
"""
if gene == 'ENSG00000140464':
a=0; b=0
if excl_probeset in exon_db: a = 1
if incl_probeset in exon_db: b = 1
#print incl_probeset, a, b, excl_probeset, critical_exon
"""
try:
null=exon_db[excl_probeset] ### Exclusion needs to be present
if incl_probeset in exon_db:
ji = JunctionInformationSimple(critical_exon,excl_junction,incl_junction,excl_probeset,incl_probeset)
junction_info.append(ji)
ji.setNovelEvent(novel_exon) ### Indicates known or novel splicing event
#print [ji.InclusionProbeset(),ji.ExclusionProbeset()]
if array_type == 'RNASeq':
critical_exons = string.split(critical_exon,'|')
for ce in critical_exons:
critical_exon_probeset = gene+':'+ce
ji=JunctionInformationSimple(ce,excl_junction,ce,excl_probeset,critical_exon_probeset)
junction_info.append(ji); ji.setInclusionLookup(incl_probeset) ### Use this ID to get protein and domain annotations
ji.setNovelEvent(novel_exon) ### Indicates known or novel splicing event
"""
if gene == 'ENSG00000140464' and ce == 'E5.2':
a=0; b=0
if ji.ExclusionProbeset() in exon_db: a = 1
if ji.InclusionProbeset() in exon_db: b = 1
print [ji.InclusionProbeset()],a,b;kill
"""
#print [ji.InclusionProbeset(),ji.ExclusionProbeset()];kill
for ji in junction_info:
try:
geneid=exon_db[ji.InclusionProbeset()].GeneID() ### This inclusion needs to be present
if agglomerate_inclusion_probesets == 'yes':
exclProbeset = ji.ExclusionProbeset(); inclProbeset = ji.InclusionProbeset()
exon_inclusion_agglom[exclProbeset] = ji ### Just need one example
try: critical_exon_db[exclProbeset].append(ji.CriticalExon())
except Exception: critical_exon_db[exclProbeset]=[ji.CriticalExon()]
try: critical_agglomerated[exclProbeset]+=ji.CriticalExonList()
except Exception: critical_agglomerated[exclProbeset]=ji.CriticalExonList()
try: incl_junctions_agglom[exclProbeset].append(ji.InclusionJunction())
except Exception: incl_junctions_agglom[exclProbeset]=[ji.InclusionJunction()]
try: exon_inclusion_db[exclProbeset].append(inclProbeset)
except Exception: exon_inclusion_db[exclProbeset]=[inclProbeset]
else:
try: alt_junction_db[geneid].append(ji)
except Exception: alt_junction_db[geneid] = [ji]
comparisons+=1
except KeyError: null=[]
except KeyError: null=[]
#print comparisons, "Junction comparisons in database"
if agglomerate_inclusion_probesets == 'yes':
alt_junction_agglom={}
for excl in exon_inclusion_db:
ji = exon_inclusion_agglom[excl]
ed = exon_db[ji.InclusionProbeset()]; ed1 = ed
geneid = ed.GeneID() ### If two genes are present for trans-splicing, over-ride with the one in the database
critical_exon_sets = unique.unique(critical_exon_db[excl])
incl_probesets = unique.unique(exon_inclusion_db[excl])
exon_inclusion_db[excl] = incl_probesets
critical_exons = unique.unique(critical_agglomerated[excl]); critical_exons.sort()
incl_junctions = unique.unique(incl_junctions_agglom[excl]); incl_junctions.sort()
ji.setCriticalExons(string.join(critical_exons,'|'))
ji.setInclusionJunction(string.join(incl_junctions,'|'))
ji.setInclusionProbeset(string.join(incl_probesets,'|'))
ji.setCriticalExonSets(critical_exon_sets)
ed1.setProbeset(string.replace(incl_probesets[0],'@',':')) ### Actually needs to be the first entry to match of re-import of a filtered list for exon_db (full not abbreviated)
#if '|' in ji.InclusionProbeset(): print ji.InclusionProbeset(), string.replace(incl_probesets[0],'@',':');sys.exit()
#print string.join(incl_probesets,'|'),ji.InclusionProbeset();kill
### Create new agglomerated inclusion probeset entry
#ed1.setProbeset(ji.InclusionProbeset()) ### Agglomerated probesets
ed1.setDisplayExonID(string.join(incl_junctions,'|'))
exon_db[ji.InclusionProbeset()] = ed1 ### Agglomerated probesets
#if 'ENSMUSG00000032497:E23.1-E24.1' in ji.InclusionProbeset():
#print ji.InclusionProbeset();sys.exit()
#if '198878' in ji.InclusionProbeset(): print ji.InclusionProbeset(),excl
try: alt_junction_db[geneid].append(ji)
except Exception: alt_junction_db[geneid] = [ji]
del exon_inclusion_agglom
critical_exon_db={}
if array_type == 'RNASeq':
### Need to remove the @ from the IDs
for e in exon_inclusion_db:
incl_probesets=[]
for i in exon_inclusion_db[e]:
incl_probesets.append(string.replace(i,'@',':'))
exon_inclusion_db[e] = incl_probesets
#clearObjectsFromMemory(junction_inclusion_db); junction_inclusion_db=[]
critical_agglomerated=[];exon_inclusion_agglom={}; incl_junctions_agglom={}
""" Not used for junction or RNASeq platforms
if array_type == 'AltMouse':
for probeset in array_id_db:
try:
geneid = exon_db[probeset].GeneID()
exons = exon_db[probeset].ExonID()
exon_dbase[geneid,exons] = probeset
except Exception: null=[]
"""
#print '--------------------------------------------'
### Eliminate redundant entries
objects_to_delete=[]
for geneid in alt_junction_db:
junction_temp_db={}; junction_temp_ls=[]
for ji in alt_junction_db[geneid]: ### Redundant entries can be present
id = ji.ExclusionProbeset(),ji.InclusionProbeset()
if id in junction_temp_db: objects_to_delete.append(ji)
else: junction_temp_db[id]=ji
for i in junction_temp_db:
ji = junction_temp_db[i]; junction_temp_ls.append(ji)
alt_junction_db[geneid]=junction_temp_ls
"""
for ji in alt_junction_db['ENSG00000140464']:
print ji.ExclusionProbeset(), ji.InclusionProbeset(), ji.CriticalExon(), ji.ExclusionJunction(), ji.InclusionJunction()
kill
"""
clearObjectsFromMemory(objects_to_delete); objects_to_delete=[]
return alt_junction_db,critical_exon_db,exon_dbase,exon_inclusion_db,exon_db
def getPutativeSpliceEventsOriginal(species,array_type,exon_db,agglomerate_inclusion_probesets,root_dir):
junction_inclusion_db = JunctionArrayEnsemblRules.reimportJunctionComps((species,root_dir),array_type,'updated')
alt_junction_db={}; critical_exon_db={}; critical_agglomerated={}; exon_inclusion_agglom={}; incl_junctions_agglom={}; exon_dbase={}
exon_inclusion_db={}; comparisons=0
for i in junction_inclusion_db:
critical_exons=[]
for ji in junction_inclusion_db[i]:
#ji.GeneID(),ji.CriticalExon(),ji.ExclusionJunction(),ji.InclusionJunction(),ji.ExclusionProbeset(),ji.InclusionProbeset(),ji.DataSource()
if agglomerate_inclusion_probesets == 'yes':
if ji.InclusionProbeset() in exon_db and ji.ExclusionProbeset() in exon_db:
if array_type == 'RNASeq':
exclProbeset = ji.ExclusionProbeset(); inclProbeset=JunctionArrayEnsemblRules.formatID(ji.InclusionProbeset())
else: exclProbeset = ji.ExclusionProbeset(); inclProbeset = ji.InclusionProbeset()
exon_inclusion_agglom[exclProbeset] = ji ### Just need one example
try: critical_exon_db[exclProbeset].append(ji.CriticalExon())
except Exception: critical_exon_db[exclProbeset]=[ji.CriticalExon()]
try: critical_agglomerated[exclProbeset]+=ji.CriticalExonList()
except Exception: critical_agglomerated[exclProbeset]=ji.CriticalExonList()
try: incl_junctions_agglom[exclProbeset].append(ji.InclusionJunction())
except Exception: incl_junctions_agglom[exclProbeset]=[ji.InclusionJunction()]
try: exon_inclusion_db[exclProbeset].append(inclProbeset)
except Exception: exon_inclusion_db[exclProbeset]=[inclProbeset]
else:
try:
geneid = exon_db[ji.InclusionProbeset()].GeneID() ### If two genes are present for trans-splicing, over-ride with the one in the database
try: alt_junction_db[geneid].append(ji)
except Exception: alt_junction_db[geneid] = [ji]
comparisons+=1
except Exception: geneid = ji.GeneID() ### If not in the local user datasets (don't think these genes need to be added)
#print comparisons, "Junction comparisons in database"
if agglomerate_inclusion_probesets == 'yes':
alt_junction_agglom={}
for excl in exon_inclusion_db:
ji = exon_inclusion_agglom[excl]
ed = exon_db[ji.InclusionProbeset()]; ed1 = ed
geneid = ed.GeneID() ### If two genes are present for trans-splicing, over-ride with the one in the database
critical_exon_sets = unique.unique(critical_exon_db[excl])
incl_probesets = unique.unique(exon_inclusion_db[excl])
exon_inclusion_db[excl] = incl_probesets
critical_exons = unique.unique(critical_agglomerated[excl]); critical_exons.sort()
incl_junctions = unique.unique(incl_junctions_agglom[excl]); incl_junctions.sort()
ji.setCriticalExons(string.join(critical_exons,'|'))
ji.setInclusionJunction(string.join(incl_junctions,'|'))
ji.setInclusionProbeset(string.join(incl_probesets,'|'))
ji.setCriticalExonSets(critical_exon_sets)
ed1.setProbeset(string.replace(incl_probesets[0],'@',':')) ### Actually needs to be the first entry to match of re-import of a filtered list for exon_db (full not abbreviated)
#if '|' in ji.InclusionProbeset(): print ji.InclusionProbeset(), string.replace(incl_probesets[0],'@',':');sys.exit()
#print string.join(incl_probesets,'|'),ji.InclusionProbeset();kill
### Create new agglomerated inclusion probeset entry
#ed1.setProbeset(ji.InclusionProbeset()) ### Agglomerated probesets
ed1.setDisplayExonID(string.join(incl_junctions,'|'))
exon_db[ji.InclusionProbeset()] = ed1 ### Agglomerated probesets
#if 'ENSMUSG00000032497:E23.1-E24.1' in ji.InclusionProbeset():
#print ji.InclusionProbeset();sys.exit()
#if '198878' in ji.InclusionProbeset(): print ji.InclusionProbeset(),excl
try: alt_junction_db[geneid].append(ji)
except Exception: alt_junction_db[geneid] = [ji]
del exon_inclusion_agglom
critical_exon_db={}
if array_type == 'RNASeq':
### Need to remove the @ from the IDs
for e in exon_inclusion_db:
incl_probesets=[]
for i in exon_inclusion_db[e]:
incl_probesets.append(string.replace(i,'@',':'))
exon_inclusion_db[e] = incl_probesets
### Eliminate redundant entries
objects_to_delete=[]
for geneid in alt_junction_db:
junction_temp_db={}; junction_temp_ls=[]
for ji in alt_junction_db[geneid]: ### Redundant entries can be present
id = ji.ExclusionProbeset(),ji.InclusionProbeset()
if id in junction_temp_db: objects_to_delete.append(ji)
else: junction_temp_db[id]=ji
for i in junction_temp_db:
ji = junction_temp_db[i]; junction_temp_ls.append(ji)
alt_junction_db[geneid]=junction_temp_ls
clearObjectsFromMemory(objects_to_delete); objects_to_delete=[]
return alt_junction_db,critical_exon_db,exon_dbase,exon_inclusion_db,exon_db
def filterExistingFiles(species,array_type,db,export_type):
"""Remove probesets entries (including 5' and 3' junction exons) from the database that don't indicate possible critical exons"""
export_exon_filename = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_Ensembl_'+array_type+'_probesets.txt'
ensembl_probeset_db = ExonArrayEnsemblRules.reimportEnsemblProbesetsForSeqExtraction(export_exon_filename,'probesets',{})
critical_junction_db = {}; critical_probeset_db={}; crit1={}
junction_inclusion_db = JunctionArrayEnsemblRules.reimportJunctionComps(species,array_type,'updated')
for ids in junction_inclusion_db:
for jd in junction_inclusion_db[ids]:
critical_exon_id = jd.ParentCriticalExon()
critical_id = jd.GeneID()+':'+jd.CriticalExon()
critical_exon_ids = string.split(critical_exon_id,'|')
critical_junction_db[jd.ExclusionProbeset(),jd.InclusionProbeset()]=critical_exon_ids,critical_id
crit1[critical_id]=[]
"""
for id in crit1:
if 'ENSMUSG00000066842' in id: print id
stop
"""
#print len(crit1);
crit2={}
for (pX,probeset) in critical_junction_db:
###Keep only junction probesets that contain possible critical exons
p1 = probeset+'|5'; p2 = probeset+'|3'
c1s,critical_id = critical_junction_db[(pX,probeset)]; proceed = 'no'
#print p1, p2, c1s, critical_id
#for probeset in db: print [probeset];kill
if probeset in ensembl_probeset_db and probeset in db:
critical_probeset_db[probeset,critical_id]=db[probeset]
crit2[probeset]=[]
else:
if p1 in ensembl_probeset_db and p1 in db:
c2s = ensembl_probeset_db[p1]; p = p1
c2s = string.split(c2s,'|')
for c1 in c1s:
if c1 in c2s:
critical_probeset_db[p,critical_id]=db[p]
crit2[probeset]=[]
if p2 in ensembl_probeset_db and p2 in db:
c2s = ensembl_probeset_db[p2]; p = p2
c2s = string.split(c2s,'|')
for c1 in c1s:
if c1 in c2s:
critical_probeset_db[p,critical_id]=db[p]
crit2[probeset]=[]
for probeset in ensembl_probeset_db: ### For non-junction probesets
if '|' not in probeset:
if probeset in db: critical_probeset_db[probeset,probeset]=db[probeset]; crit2[probeset]=[]
critical_probeset_db = eliminate_redundant_dict_values(critical_probeset_db)
print len(crit2),'len(crit2)'
x=0
"""
for probeset in db:
if probeset not in crit2:
x+=1
if x<20: print probeset """
print len(critical_probeset_db),': length of filtered db', len(db), ': length of db'
"""
for probeset in ensembl_probeset_db:
###Keep only probesets that contain possible critical exons
if '|' in probeset:
if probeset[:-2] in critical_junction_db and probeset in db:
critical_probeset_db[probeset[:-2]]=db[probeset]
elif probeset in db: critical_probeset_db[probeset]=db[probeset] """
"""
for id in critical_probeset_db:
if 'ENSMUSG00000066842' in id[1]: print id
stop
"""
if export_type == 'exclude_junction_psrs':
critical_probeset_db['title'] = db['title']
critical_probeset_db['filename'] = db['filename']
exportFiltered(critical_probeset_db)
else:
for p in db:
if '|' not in p: probeset = p
else: probeset = p[:-2]
if probeset not in crit2:
### Add back any junction probesets that do not have a critical exon component
critical_probeset_db[probeset,probeset]=db[p]
if export_type == 'exclude_critical_exon_ids':
critical_probeset_db2={}
for (p,cid) in critical_probeset_db:
if ':' in cid or '|' in p:
critical_probeset_db2[p[:-2],p[:-2]] = critical_probeset_db[(p,cid)]
else: critical_probeset_db2[p,p] = critical_probeset_db[(p,cid)]
critical_probeset_db = critical_probeset_db2
critical_probeset_db['title'] = db['title']
critical_probeset_db['filename'] = db['filename']
exportFiltered(critical_probeset_db)
########### Code originally designed for AltMouseA array database builds (adapted for use with Mouse and Human Junction Arrays)
def filterExpressionData(filename1,filename2,pre_filtered_db,constitutive_db):
fn2=filepath(filename2)
probeset_translation_db={}
###Import probeset number/id relationships (note: forced to use numeric IDs for Plier/Exact analysis)
if analysis_method != 'rma':
for line in open(fn2,'r').xreadlines():
data = cleanUpLine(line)
probeset_number,probeset_id = string.split(data,'\t')
probeset_translation_db[probeset_number]=probeset_id
fn=filepath(filename1)
exp_dbase={}; d = 0; x = 0
###Import expression data (non-log space)
try:
for line in open(fn,'r').xreadlines():
data = cleanUpLine(line)
if data[0] != '#' and x == 1: ###Grab expression values
tab_delimited_data = string.split(data,'\t')
z = len(tab_delimited_data)
probeset = tab_delimited_data[0]
if analysis_method == 'rma': exp_vals = tab_delimited_data[1:]
else: exp_vals = convertToLog2(tab_delimited_data[1:])
###Filter results based on whether a sufficient number of samples where detected as Present
if probeset in pre_filtered_db:
if probeset in probeset_translation_db: original_probeset_id = probeset_translation_db[probeset]
else: original_probeset_id = probeset ###When p-values are generated outside of Plier
if original_probeset_id in constitutive_db:
percent_present = pre_filtered_db[probeset]
if percent_present > 0.99: exp_dbase[original_probeset_id] = exp_vals
#else: print percent_present,original_probeset_id; kill
else: exp_dbase[original_probeset_id] = exp_vals
elif data[0] != '#' and x == 0: ###Grab labels
array_names = []
tab_delimited_data = string.split(data,'\t')
for entry in tab_delimited_data: array_names.append(entry)
x += 1
except IOError: exp_dbase = exp_dbase
print len(exp_dbase),"probesets imported with expression values"
###If the arrayid column header is missing, account for this
if len(array_names) == z:
array_names = array_names[1:]
null,filename = string.split(filename1,'\\')
filtered_exp_export = 'R_expression_raw_data\\'+filename[:-4]+'-filtered.txt'
fn=filepath(filtered_exp_export); data = open(fn,'w'); title = 'probeset_id'
for array in array_names: title = title +'\t'+ array
data.write(title+'\n')
for probeset in exp_dbase:
exp_vals = probeset
for exp_val in exp_dbase[probeset]:
exp_vals = exp_vals +'\t'+ str(exp_val)
data.write(exp_vals+'\n')
data.close()
#return filtered_exp_export
def convertToLog2(data_list):
new_list=[]
for item in data_list:
new_list.append(math.log(float(item)+1,2))
return new_list
def getAnnotations(filename,p,Species,Analysis_Method,constitutive_db):
global species; species = Species
global analysis_method; analysis_method = Analysis_Method
array_type = 'AltMouse'
filtered_junctions_list = ExonArray.getFilteredExons(filename,p)
probe_id_translation_file = 'AltDatabase/'+species+'/'+array_type+'/'+array_type+'-probeset_translation.txt'
filtered_exp_export_file = filterExpressionData(filename,probe_id_translation_file,filtered_junctions_list,constitutive_db)
return filtered_exp_export_file
def altCleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
return data
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
data = string.replace(data,'"','')
return data
def importGeneric(filename):
verifyFile(filename,None)
fn=filepath(filename); key_db = {}; x = 0
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if len(t[1:]) == 1:
try: key_db[t[0]].append(t[1])
except KeyError: key_db[t[0]] = [t[1]]
else: key_db[t[0]] = t[1:]
return key_db
def eliminate_redundant_dict_values(database):
db1={}
for key in database:
list = unique.unique(database[key])
list.sort()
db1[key] = list
return db1
def importAnnotateCriticalExonSequences(species,array_type):
ensembl_associations = importArrayAnnotations(species,array_type)
filename = 'AltDatabase/'+species+'/'+array_type+'/'+ array_type+'_critical-exon-seq.txt'
critical_exon_seq_db = importCriticalExonSeq(filename,array_type,ensembl_associations)
return critical_exon_seq_db
def importArrayAnnotations(species,array_type):
primary_gene_annotation_file = 'AltDatabase/'+species +'/'+ array_type +'/'+ array_type+ '_gene_annotations.txt'
ensembl_array_gene_annotation_file = 'AltDatabase/'+species+'/'+ array_type + '/'+array_type+ '-Ensembl.txt'
ensembl_annotations = 'AltDatabase/ensembl/'+ species + '/'+species+ '_Ensembl-annotations_simple.txt'
verifyFile(primary_gene_annotation_file,array_type)
verifyFile(ensembl_array_gene_annotation_file,array_type)
verifyFile(ensembl_annotations,array_type)
array_gene_annotations = importGeneric(primary_gene_annotation_file)
ensembl_associations = importGeneric(ensembl_array_gene_annotation_file)
ensembl_annotation_db = importGeneric(ensembl_annotations)
ensembl_symbol_db={}
for ens_geneid in ensembl_annotation_db:
description, symbol = ensembl_annotation_db[ens_geneid]
#print symbol;klll
if len(symbol)>0:
try: ensembl_symbol_db[symbol].append(ens_geneid)
except KeyError: ensembl_symbol_db[symbol] =[ens_geneid]
### Update array Ensembl annotations
for array_geneid in array_gene_annotations:
t = array_gene_annotations[array_geneid]; description=t[0];entrez=t[1];symbol=t[2]
if symbol in ensembl_symbol_db:
ens_geneids = ensembl_symbol_db[symbol]
for ens_geneid in ens_geneids:
try: ensembl_associations[array_geneid].append(ens_geneid)
except KeyError: ensembl_associations[array_geneid] = [ens_geneid]
ensembl_associations = eliminate_redundant_dict_values(ensembl_associations)
exportArrayIDEnsemblAssociations(ensembl_associations,species,array_type) ###Use these For LinkEST program
return ensembl_associations
def exportDB(filename,db):
fn=filepath(filename); data = open(fn,'w')
for key in db:
try: values = string.join([key]+db[key],'\t')+'\n'; data.write(values)
except Exception: print key,db[key];sys.exit()
data.close()
def exportFiltered(db):
filename = db['filename']; title = db['title']
filename = string.replace(filename,'.txt','-filtered.txt')
print 'Writing',filename
del db['filename']; del db['title']
fn=filepath(filename); data = open(fn,'w'); data.write(title)
for (old,new) in db:
for line in db[(old,new)]: ### Replace the old ID with the new one
if old not in line and '|' in old:
old = old[:-2]
if ('miR-'+new) in line: ### Occurs when the probeset is a number found in the miRNA name
line = string.replace(line,'miR-'+new,'miR-'+old)
line = string.replace(line,old,new); data.write(line)
data.close()
def exportArrayIDEnsemblAssociations(ensembl_associations,species,array_type):
annotation_db_filename = 'AltDatabase/'+species+'/'+array_type+'/'+array_type+'-Ensembl_relationships.txt'
fn=filepath(annotation_db_filename); data = open(fn,'w')
title = ['ArrayGeneID','Ensembl']; title = string.join(title,'\t')+'\n'
data.write(title)
for array_geneid in ensembl_associations:
for ens_geneid in ensembl_associations[array_geneid]:
values = [array_geneid,ens_geneid]; values = string.join(values,'\t')+'\n'; data.write(values)
data.close()
def exportCriticalExonLocations(species,array_type,critical_exon_seq_db):
location_db_filename = 'AltDatabase/'+species+'/'+array_type+'/'+array_type+'_critical_exon_locations.txt'
fn=filepath(location_db_filename); data = open(fn,'w')
title = ['Affygene','ExonID','Ensembl','start','stop','gene-start','gene-stop','ExonSeq']; title = string.join(title,'\t')+'\n'
data.write(title)
for ens_geneid in critical_exon_seq_db:
for cd in critical_exon_seq_db[ens_geneid]:
try:
values = [cd.ArrayGeneID(),cd.ExonID(),ens_geneid,cd.ExonStart(),cd.ExonStop(),cd.GeneStart(), cd.GeneStop(), cd.ExonSeq()]
values = string.join(values,'\t')+'\n'
data.write(values)
except AttributeError:
#print cd.ArrayGeneID(), cd.ExonID()
#print cd.ExonStart(),cd.ExonStop(),cd.GeneStart(), cd.GeneStop(), cd.ExonSeq()
#sys.exit()
pass
data.close()
class ExonSeqData:
def __init__(self,exon,array_geneid,probeset_id,critical_junctions,critical_exon_seq):
self._exon = exon; self._array_geneid = array_geneid; self._critical_junctions = critical_junctions
self._critical_exon_seq = critical_exon_seq; self._probeset_id = probeset_id
def ProbesetID(self): return self._probeset_id
def ArrayGeneID(self): return self._array_geneid
def ExonID(self): return self._exon
def CriticalJunctions(self): return self._critical_junctions
def ExonSeq(self): return string.upper(self._critical_exon_seq)
def setExonStart(self,exon_start):
try: self._exon_start = self._exon_start ### If it already is set from the input file, keep it
except Exception: self._exon_start = exon_start
def setExonStop(self,exon_stop):
try: self._exon_stop = self._exon_stop ### If it already is set from the input file, keep it
except Exception: self._exon_stop = exon_stop
def setGeneStart(self,gene_start): self._gene_start = gene_start
def setGeneStop(self,gene_stop): self._gene_stop = gene_stop
def ExonStart(self): return str(self._exon_start)
def ExonStop(self): return str(self._exon_stop)
def GeneStart(self): return str(self._gene_start)
def GeneStop(self): return str(self._gene_stop)
def importCriticalExonSeq(filename,array_type,ensembl_associations):
verifyFile(filename,array_type)
fn=filepath(filename); key_db = {}; x = 0
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
if x == 0: x = 1
else:
arraygeneid_exon,critical_junctions,critical_exon_seq = string.split(data,'\t')
if len(critical_exon_seq)>5:
array_geneid, exon = string.split(arraygeneid_exon,':')
if array_geneid in ensembl_associations:
ens_geneids = ensembl_associations[array_geneid]
for ens_geneid in ens_geneids:
seq_data = ExonSeqData(exon,array_geneid,arraygeneid_exon,critical_junctions,critical_exon_seq)
try: key_db[ens_geneid].append(seq_data)
except KeyError: key_db[ens_geneid] = [seq_data]
return key_db
def updateCriticalExonSequences(array_type, filename,ensembl_probeset_db):
exon_seq_db_filename = filename[:-4]+'_updated.txt'
fn=filepath(exon_seq_db_filename); data = open(fn,'w')
critical_exon_seq_db={}
for ens_gene in ensembl_probeset_db:
for probe_data in ensembl_probeset_db[ens_gene]:
exon_id,((probe_start,probe_stop,probeset_id,exon_class,transcript_clust),ed) = probe_data
try: critical_exon_seq_db[probeset_id] = ed.ExonSeq()
except AttributeError: null=[] ### Occurs when no sequence data is associated with exon (probesets without exon associations)
ensembl_probeset_db=[]; key_db = {}; x = 0
if array_type == 'AltMouse':
fn1=filepath(filename)
verifyFile(filename,array_type)
for line in open(fn1,'rU').xreadlines():
line_data = cleanUpLine(line)
if x == 0: x = 1; data.write(line)
else:
arraygeneid_exon,critical_junctions,critical_exon_seq = string.split(line_data,'\t')
if arraygeneid_exon in critical_exon_seq_db:
critical_exon_seq = critical_exon_seq_db[arraygeneid_exon]
values = [arraygeneid_exon,critical_junctions,critical_exon_seq]
values = string.join(values,'\t')+'\n'
data.write(values)
else: data.write(line)
elif array_type == 'junction':
### We don't need any of the additional information used for AltMouse arrays
for probeset in critical_exon_seq_db:
critical_exon_seq = critical_exon_seq_db[probeset]
if ':' in probeset:
probeset = string.split(probeset,':')[1]
values = [probeset,'',critical_exon_seq]
values = string.join(values,'\t')+'\n'
data.write(values)
data.close()
print exon_seq_db_filename, 'exported....'
def inferJunctionComps(species,array_type,searchChr=None):
if len(array_type) == 3:
### This indicates that the ensembl_probeset_db is already included
array_type,ensembl_probeset_db,root_dir = array_type
comps_type = ''
else:
export_exon_filename = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_Ensembl_probesets.txt'
ensembl_probeset_db = ExonArrayEnsemblRules.reimportEnsemblProbesetsForSeqExtraction(export_exon_filename,'junction-regions',{})
comps_type = 'updated'; root_dir = ''
if array_type != 'RNASeq':
print "Import junction probeset region IDs for",species
print "Preparing region IDs for analysis of possible reciprocal junctions"
putative_as_junction_db={}; probeset_juntion_db={}; common_exon_blocks_exon={}; common_exon_blocks_intron={}; count=0
for gene in ensembl_probeset_db:
for (probeset,regionid) in ensembl_probeset_db[gene]:
regionids = string.split(regionid,'|')
for regionid in regionids:
if '-' in regionid:
novel_5p=False; novel_3p=False
if 'I' in regionid: exons_type = 'exon-intron'
else: exons_type = 'exons'
exon_5prime_original, exon_3prime_original = string.split(regionid,'-')
exon_5prime = string.split(exon_5prime_original,'.')
if '_' in exon_5prime[1]:
exon_5prime[1] = float(string.replace(exon_5prime[1],'_','.'))
novel_5p=True
else: exon_5prime[1] = int(exon_5prime[1])
e1a3 = (int(exon_5prime[0][1:]),int(exon_5prime[1])) ### The first is an int for the region - since it hybs early
e1a5 = (int(exon_5prime[0][1:]),exon_5prime[1])
e1 = e1a3, e1a5
exon_3prime = string.split(exon_3prime_original,'.')
if '_' in exon_3prime[1]:
exon_3prime[1] = float(string.replace(exon_3prime[1],'_','.'))
novel_3p=True
else:
try: exon_3prime[1] = int(exon_3prime[1])
except Exception: print exon_3prime;kill
e2a3 = (int(exon_3prime[0][1:]),exon_3prime[1])
e2a5 = (int(exon_3prime[0][1:]),int(exon_3prime[1])) ### The second is an int for the region - since it hybs late
e2 = e2a3, e2a5
if exons_type == 'exons':
if novel_5p and novel_3p:
None ### Ignore junctions where both the 5' and 3' splice sites are novel -> like false positives
### If you include these with novel junction discovery in TopHat, you can get a huge memory issue in compareJunctions
else:
count+=1
try: putative_as_junction_db[gene].append((e1,e2))
except Exception: putative_as_junction_db[gene] = [(e1,e2)]
### This matches the recorded junction ID from EnsemblImport.compareJunctions()
try: probeset_juntion_db[gene,(e1a5,e2a3)].append(probeset)
except Exception: probeset_juntion_db[gene,(e1a5,e2a3)] = [probeset]
### Defines exon-intron and exon-exon reciprical junctions based on shared exon blocks
block = e1a3[0]; side = 'left'
try: common_exon_blocks_exon[side,gene,block].append([regionid,probeset])
except KeyError: common_exon_blocks_exon[side,gene,block] = [[regionid,probeset]]
block = e2a3[0]; side = 'right'
try: common_exon_blocks_exon[side,gene,block].append([regionid,probeset])
except KeyError: common_exon_blocks_exon[side,gene,block] = [[regionid,probeset]]
else:
### Defines exon-intron and exon-exon reciprical junctions based on shared exon blocks
### In 2.0.8 we expanded the search criterion here so that each side and exon-block are searched for matching junctions (needed for confirmatory novel exons)
if 'I' in exon_5prime or 'I' in exon_5prime[0]: ### Can be a list with the first object being the exon annotation
block = e2a3[0]; side = 'right'; critical_intron = exon_5prime_original
alt_block = e1a3[0]; alt_side = 'left'
else:
block = e1a3[0]; side = 'left'; critical_intron = exon_3prime_original
alt_block = e2a3[0]; alt_side = 'right'
#if gene == 'ENSG00000112695':
#print critical_intron,regionid,probeset, exon_5prime_original, exon_3prime_original, exon_5prime
try: common_exon_blocks_intron[side,gene,block].append([regionid,probeset,critical_intron])
except KeyError: common_exon_blocks_intron[side,gene,block] = [[regionid,probeset,critical_intron]]
### Below added in 2.0.8 to accomidate for a broader comparison of reciprocol splice junctions
try: common_exon_blocks_intron[alt_side,gene,alt_block].append([regionid,probeset,critical_intron])
except KeyError: common_exon_blocks_intron[alt_side,gene,alt_block] = [[regionid,probeset,critical_intron]]
if array_type != 'RNASeq':
print count, 'probed junctions being compared to identify putative reciprocal junction comparisons'
critical_exon_db, critical_gene_junction_db = EnsemblImport.compareJunctions(species,putative_as_junction_db,{},rootdir=root_dir, searchChr=searchChr)
if array_type != 'RNASeq':
print len(critical_exon_db),'genes with alternative reciprocal junctions pairs found'
global junction_inclusion_db; count=0; redundant=0; junction_annotations={}; critical_exon_annotations={}
junction_inclusion_db = JunctionArrayEnsemblRules.reimportJunctionComps(species,array_type,(comps_type,ensembl_probeset_db))
for gene in critical_exon_db:
for sd in critical_exon_db[gene]:
junction_pairs = getJunctionPairs(sd.Junctions())
"""
if len(junction_pairs)>1 and len(sd.CriticalExonRegion())>1:
print
.Junctions()
print sd.CriticalExonRegion();kill"""
for (junction1,junction2) in junction_pairs:
critical_exon = sd.CriticalExonRegion()
excl_junction,incl_junction = determineExclIncl(junction1,junction2,critical_exon)
incl_junction_probeset = probeset_juntion_db[gene,incl_junction][0]
excl_junction_probeset = probeset_juntion_db[gene,excl_junction][0]
source = 'Inferred'
incl_junction=formatJunctions(incl_junction)
excl_junction=formatJunctions(excl_junction)
critical_exon=string.replace(formatJunctions(critical_exon),'-','|'); count+=1
ji=JunctionArrayEnsemblRules.JunctionInformation(gene,critical_exon,excl_junction,incl_junction,excl_junction_probeset,incl_junction_probeset,source)
#if gene == 'ENSG00000112695':# and 'I' in critical_exon:
#print critical_exon,'\t', incl_junction,'\t',excl_junction_probeset,'\t',incl_junction_probeset
if (excl_junction_probeset,incl_junction_probeset) not in junction_inclusion_db:
try: junction_inclusion_db[excl_junction_probeset,incl_junction_probeset].append(ji)
except KeyError: junction_inclusion_db[excl_junction_probeset,incl_junction_probeset] = [ji]
junction_str = string.join([excl_junction,incl_junction],'|')
#splice_event_str = string.join(sd.SpliceType(),'|')
try: junction_annotations[ji.InclusionProbeset()].append((junction_str,sd.SpliceType()))
except KeyError: junction_annotations[ji.InclusionProbeset()] = [(junction_str,sd.SpliceType())]
try: junction_annotations[ji.ExclusionProbeset()].append((junction_str,sd.SpliceType()))
except KeyError: junction_annotations[ji.ExclusionProbeset()] = [(junction_str,sd.SpliceType())]
critical_exons = string.split(critical_exon,'|')
for critical_exon in critical_exons:
try: critical_exon_annotations[gene+':'+critical_exon].append((junction_str,sd.SpliceType()))
except KeyError: critical_exon_annotations[gene+':'+critical_exon] = [(junction_str,sd.SpliceType())]
else: redundant+=1
if array_type != 'RNASeq':
print count, 'Inferred junctions identified with',redundant, 'redundant.'
### Compare exon and intron blocks for intron alinging junctions
junction_inclusion_db = annotateNovelIntronSplicingEvents(common_exon_blocks_intron,common_exon_blocks_exon,junction_inclusion_db)
if len(root_dir)>0: exportUpdatedJunctionComps((species,root_dir),array_type,searchChr=searchChr)
else: exportUpdatedJunctionComps(species,array_type)
clearObjectsFromMemory(junction_inclusion_db); junction_inclusion_db=[]
if array_type == 'RNASeq':
### return these annotations for RNASeq analyses
return junction_annotations,critical_exon_annotations
def annotateNovelIntronSplicingEvents(common_exon_blocks_intron,common_exon_blocks_exon,junction_inclusion_db):
### Add exon-intron, exon-exon reciprical junctions determined based on common block exon (same side of the junction)
new_intron_events=0
for key in common_exon_blocks_intron:
(side,gene,block) = key; source='Inferred-Intron'
if key in common_exon_blocks_exon:
for (excl_junction,excl_junction_probeset) in common_exon_blocks_exon[key]:
for (incl_junction,incl_junction_probeset,critical_intron) in common_exon_blocks_intron[key]:
#if gene == 'ENSG00000112695':# and 'E2.9-E3.1' in excl_junction_probeset:
#print critical_intron,'\t', incl_junction,'\t',excl_junction_probeset,'\t',incl_junction_probeset,'\t',side,'\t',gene,block
ji=JunctionArrayEnsemblRules.JunctionInformation(gene,critical_intron,excl_junction,incl_junction,excl_junction_probeset,incl_junction_probeset,source)
if (excl_junction_probeset,incl_junction_probeset) not in junction_inclusion_db:
try: junction_inclusion_db[excl_junction_probeset,incl_junction_probeset].append(ji)
except Exception: junction_inclusion_db[excl_junction_probeset,incl_junction_probeset] = [ji]
new_intron_events+=1
#print new_intron_events, 'novel intron-splicing events added to database'
"""
### While the below code seemed like a good idea, the current state of RNA-seq alignment tools produced a rediculous amount of intron-intron junctions (usually in the same intron)
### Without supporting data (e.g., other junctions bridging these intron junction to a validated exon), we must assume these juncitons are not associated with the alinging gene
new_intron_events=0 ### Compare Intron blocks to each other
for key in common_exon_blocks_intron:
(side,gene,block) = key; source='Inferred-Intron'
for (excl_junction,excl_junction_probeset,critical_intron1) in common_exon_blocks_intron[key]:
for (incl_junction,incl_junction_probeset,critical_intron2) in common_exon_blocks_intron[key]:
if (excl_junction,excl_junction_probeset) != (incl_junction,incl_junction_probeset): ### If comparing entries in the same list, don't compare an single entry to itself
ji=JunctionArrayEnsemblRules.JunctionInformation(gene,critical_intron1+'|'+critical_intron2,excl_junction,incl_junction,excl_junction_probeset,incl_junction_probeset,source)
if (excl_junction_probeset,incl_junction_probeset) not in junction_inclusion_db and (incl_junction_probeset,excl_junction_probeset) not in junction_inclusion_db:
try: junction_inclusion_db[excl_junction_probeset,incl_junction_probeset].append(ji)
except Exception: junction_inclusion_db[excl_junction_probeset,incl_junction_probeset] = [ji]
new_intron_events+=1
"""
#print new_intron_events, 'novel intron-splicing events added to database'
return junction_inclusion_db
def determineExclIncl(junction1,junction2,critical_exons):
#((3, 2), (6, 1))
for critical_exon in critical_exons:
if critical_exon in junction1: incl_junction = junction1; excl_junction = junction2
if critical_exon in junction2: incl_junction = junction2; excl_junction = junction1
try: return excl_junction,incl_junction
except Exception:
print critical_exons
print junction1
print junction2
print 'Warning... Unknown error. Contact AltAnalyze support for assistance.'
sys.exit()
def formatJunctions(junction):
#((3, 2), (6, 1))
exons_to_join=[]
for i in junction:
exons_to_join.append('E'+str(i[0])+'.'+string.replace(str(i[1]),'.','_'))
junction_str = string.join(exons_to_join,'-')
return junction_str
def getJunctionPairs(junctions):
### Although the pairs of junctions (exclusion 1st, inclusion 2nd) are given, need to separate out the pairs to report reciprical junctions
# (((3, 2), (6, 1)), ((4, 2), (6, 1)), ((3, 2), (6, 1)), ((4, 2), (6, 1))))
count = 0; pairs=[]; pairs_db={}
for junction in junctions:
count +=1; pairs.append(junction)
if count==2: pairs_db[tuple(pairs)]=[]; count = 0; pairs=[]
return pairs_db
def getJunctionExonLocations(species,array_type,specific_array_type):
global ensembl_associations
ensembl_associations = importJunctionArrayAnnotations(species,array_type,specific_array_type)
extraction_type = 'sequence'
exon_seq_db=importJunctionArrayAnnotationMappings(array_type,specific_array_type,species,extraction_type)
if 'HTA' in specific_array_type or 'MTA' in specific_array_type:
exportImportedProbesetLocations(species,array_type,exon_seq_db,ensembl_associations)
getLocations(species,array_type,exon_seq_db)
def exportImportedProbesetLocations(species,array_type,critical_exon_seq_db,ensembl_associations):
location_db_filename = 'AltDatabase/'+species+'/'+array_type+'/'+array_type+'_critical_exon_locations-original.txt'
fn=filepath(location_db_filename); data = open(fn,'w')
title = ['Affygene','ExonID','Ensembl','start','stop','gene-start','gene-stop','ExonSeq']; title = string.join(title,'\t')+'\n'
data.write(title)
for ens_geneid in critical_exon_seq_db:
for cd in critical_exon_seq_db[ens_geneid]:
try:
values = [cd.ArrayGeneID(),cd.ExonID(),ens_geneid,cd.ExonStart(),cd.ExonStop(),cd.GeneStart(), cd.GeneStop(), cd.ExonSeq()]
values = string.join(values,'\t')+'\n'
data.write(values)
except AttributeError: null = []
data.close()
def identifyCriticalExonLocations(species,array_type):
critical_exon_seq_db = importAnnotateCriticalExonSequences(species,array_type)
getLocations(species,array_type,critical_exon_seq_db)
def getLocations(species,array_type,critical_exon_seq_db):
analysis_type = 'get_locations'
dir = 'AltDatabase/'+species+'/SequenceData/chr/'+species; gene_seq_filename = dir+'_gene-seq-2000_flank.fa'
critical_exon_seq_db = EnsemblImport.import_sequence_data(gene_seq_filename,critical_exon_seq_db,species,analysis_type)
exportCriticalExonLocations(species,array_type,critical_exon_seq_db)
def reAnnotateCriticalExonSequences(species,array_type):
export_exon_filename = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_Ensembl_'+array_type+'_probesets.txt'
ensembl_probeset_db = ExonArrayEnsemblRules.reimportEnsemblProbesetsForSeqExtraction(export_exon_filename,'null',{})
#analysis_type = 'get_sequence'
analysis_type = ('region_only','get_sequence') ### Added after EnsMart65
dir = 'AltDatabase/'+species+'/SequenceData/chr/'+species; gene_seq_filename = dir+'_gene-seq-2000_flank.fa'
ensembl_probeset_db = EnsemblImport.import_sequence_data(gene_seq_filename,ensembl_probeset_db,species,analysis_type)
critical_exon_file = 'AltDatabase/'+species+'/'+ array_type + '/' + array_type+'_critical-exon-seq.txt'
if array_type == 'AltMouse': verifyFile(critical_exon_file,array_type)
updateCriticalExonSequences(array_type, critical_exon_file, ensembl_probeset_db)
if __name__ == '__main__':
"""Module has methods for annotating Junction associated critical exon sequences with up-to-date genome coordinates and analysis options for
junciton arrays from AnalyzeExpressionDatasets"""
m = 'Mm'; h = 'Hs'; species = h; array_type = 'junction' ###In theory, could be another type of junction or combination array
specific_array_type = 'hGlue'
extraction_type = 'comparisons'
filename = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_LiftOverEnsembl.txt'
verifyFile(filename,array_type+'/'+specific_array_type);sys.exit()
tc_ensembl_annotations = importJunctionArrayAnnotationMappings(array_type,specific_array_type,species,extraction_type); sys.exit()
combineExonJunctionAnnotations(species,array_type);sys.exit()
filterForCriticalExons(species,array_type)
overRideExonEntriesWithJunctions(species,array_type);sys.exit()
#inferJunctionComps(species,array_type); sys.exit()
identifyJunctionComps(species,array_type,specific_array_type);sys.exit()
filterForCriticalExons(species,array_type);sys.exit()
reAnnotateCriticalExonSequences(species,array_type)
#getJunctionExonLocations(species,array_type,specific_array_type)
sys.exit()
import_dir = '/AltDatabase/exon/'+species; expr_file_dir = 'R_expression_raw_data\exp.altmouse_es-eb.dabg.rma.txt'
dagb_p = 1; Analysis_Method = 'rma'
#identifyCriticalExonLocations(species,array_type)
#JunctionArrayEnsemblRules.getAnnotations(species,array_type)
### Only needs to be run once, to update the original
#reAnnotateCriticalExonSequences(species,array_type); sys.exit()
#getAnnotations(expr_file_dir,dagb_p,Species,Analysis_Method)
|
fr = open('text.txt')
import json
# fw = open('text2.txt', 'w')
# fw.write('HELLO PIDOR!\n')
def getJSON(filePathAndName):
with open(filePathAndName, 'r') as json_data:
return json.load(json_data)
class Book:
def __init__(self, i, author, title, date, shellNumber):
self.id = i
self.title = title
self.author = author
self.date = date
self.shellNumber = shellNumber
class Library:
def __init__(self):
self._books = []
def addToList(self, item):
self._books.append(item)
def removeFromList(self, id):
print(id)
def get(self):
return self._books
def checkExistance(self, title):
for el in self._books:
if (title.split() == el.title.split()): #пока что костыль.., я бл не пойму откуда еще один символ берется
return True
return False
def getBooksCountByAuthor (self, author):
k = 0
for el in self._books:
if (author.split() == el.author.split()):
k = k + 1
return k
def showBeautifulData(self):
print("\n######################\n")
for el in self._books:
print("id:" , el.id)
print("title:" , el.title)
print("author:" , el.author)
print("date:" , el.date)
print("shellNumber:" , el.shellNumber)
print("\n######################\n")
def getLibraryDataFromFile(self):
testJson = getJSON('books.json')
for el in testJson:
book = Book(el.get("id"), el.get("author"),
el.get("title"), el.get("date"),
el.get("shellNumber"))
lib.addToList(book)
def deleteById (self, id):
del self._books[id]
def test ():
println(1);
lib = Library()
lib.getLibraryDataFromFile()
# lib.showBeautifulData()
print("Введи, сука, данные!")
line = input()
flag = lib.checkEx(line)
if (flag):
print("Книжка присутствует!")
else: print("Книжка отстутствует!!!!!!")
# интеррактивное общение через консоль
# lib.deleteById(0)
# print('deleting')
# lib.showBeautifulData()
# ////////////////////////////////////////////////////////////////////////////////
command = 1
while command == "0":
print("Введите '0', чтобы выйти\n" +
"Введите '1', чтобы добавить новую книжку\n" +
"Введите '2', чтобы удалить книжку\n" +
"Введите '3', чтобы показать все книжки\n" +
"Введите '4', чтобы проверить наличие книги\n" +
"Введите '5', чтобы узнать, сколько книг у данного автора\n")
command = input()
if (command == "4"):
print("Введите название книги: ")
name = input()
if (lib.checkExistance(name)):
print("Книжка присутствует!\n")
else:
print("Книжка отстутствует!\n")
if (command == "5"):
print("Введите автора: ")
author = input()
number = lib.getBooksCountByAuthor(author)
if (number == 0):
print("У " + author + " книжек нет!\n")
else:
print(lib.getBooksCountByAuthor(author), "\n")
print("\nЗавершить работу программы? (y/no)")
if (input() == "y"):
break
# интеррактивное общение через консоль
|
"""
-------------------------------------------------------
Lab/Assignment Testing
-------------------------------------------------------
Author: Zehao Liu
ID: 193074000
Email: liux4000@mylaurier.ca
(Add a second set of Author/ID/Email if working in pairs)
__updated__ = '2020-05-18'
-------------------------------------------------------
"""
import pygame
from RGB_code import *
from pygame.locals import *
pygame.init()
# 设置图标与标题
pygame.display.set_caption("壁球:鼠标演练")
icon = pygame.image.load('source//image//box.png')
pygame.display.set_icon(icon)
# any image be loaded become a surface object
ball = pygame.image.load('source//image//ball.gif')
def main():
WIDTH = 600
HEIGHT = 400
# Way to change into a full screen games
# Vinfo=pygame.display.Info()
# WIDTH=Vinfo.current_w
# HEIGHT=Vinfo.current_h
# screen is also a surface object
screen = pygame.display.set_mode((WIDTH, HEIGHT), RESIZABLE)
# /or/ screen=pygame.display.set_mode((WIDTH,HEIGHT),NOFRAME) 无边框模式
# /or/ screen=pygame.display.set_mode((WIDTH,HEIGHT),FULLSCREEN) 全屏模式
# print(pygame.display.Info())
speed = [10, 10]
is_moving = True
# After get_rect the ballrect become a rectangular object
ballrect = ball.get_rect()
fps = 100
fclock = pygame.time.Clock()
running = True
bgcolor = pygame.Color('black')
def RGBChannel(a):
return 0 if a < 0 else (255 if a > 255 else int(a))
while running:
for event in pygame.event.get():
if event.type == QUIT:
running = False
# the basic controll of player
elif event.type == VIDEORESIZE:
# size[0] is width and size[1] is height
WIDTH, HEIGHT = event.size[0], event.size[1]
pygame.display.set_mode((WIDTH, HEIGHT), RESIZABLE)
elif event.type == KEYDOWN:
if event.key == K_RIGHT or event.key == K_d:
speed[0] += 5
elif event.key == K_LEFT or event.key == K_a:
speed[0] -= 5
elif event.key == K_DOWN or event.key == K_s:
speed[1] += 5
elif event.key == K_UP or event.key == K_w:
speed[1] -= 5
elif event.key == K_ESCAPE or event.key == K_q:
running = False
elif event.key == K_r:
main()
elif event.type == MOUSEMOTION:
right_click = event.buttons[0]
if right_click == True:
is_moving = False
ballrect.move_ip(event.rel)
elif event.type == MOUSEBUTTONUP:
is_moving = True
# initial ball moving speed
if is_moving and pygame.display.get_active():
ballrect = ballrect.move(speed[0], speed[1])
# rebound of the ball
if ballrect.left < 0 or ballrect.right > WIDTH:
speed[0] = -speed[0]
# note: the followed two if codes are necessary
if ballrect.left < 0 and speed[0] < 0:
speed[0] = -speed[0]
if ballrect.right > WIDTH and speed[0] > 0:
speed[0] = -speed[0]
if ballrect.top < 0 or ballrect.bottom > HEIGHT:
speed[1] = -speed[1]
# note: the followed two if codes are necessary
if ballrect.top < 0 and speed[1] < 0:
speed[1] = -speed[1]
if ballrect.bottom > HEIGHT and speed[1] > 0:
speed[1] = -speed[1]
bgcolor.r = RGBChannel(ballrect.left * 255 / WIDTH)
bgcolor.g = RGBChannel(ballrect.top * 255 / HEIGHT)
bgcolor.b = RGBChannel(min(speed[0], speed[1]) / max(speed[0], speed[1])) # 有些不太懂
# 填补ball的运动轨迹
screen.fill(bgcolor)
# blit 将ball的图像-surface obj 绘制到 ballrect-surface obj 上
screen.blit(ball, ballrect)
# tick 控制窗口的刷新速度
fclock.tick(fps)
# untilities
pygame.display.update()
main()
pygame.quit()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.