blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
82917c8f8772702575f5d4d5c0d4e81003e9e9fb | 51d7e8c09793b50d45731bd5ab9b531b525cf6db | /src/garage/misc/rllab/instrument.py | 464a4973eba2de51104413e0190304eec5a17bba | [
"MIT"
] | permissive | fangqyi/garage | 454247849a6a3f547557b3fac3787ba9eeb0391f | ddafba385ef005f46f913ab352f9638760e5b412 | refs/heads/master | 2023-02-25T00:43:18.903328 | 2021-01-26T01:52:15 | 2021-01-26T01:52:15 | 267,667,220 | 0 | 0 | MIT | 2020-05-28T18:35:08 | 2020-05-28T18:35:07 | null | UTF-8 | Python | false | false | 54,560 | py | import base64
import collections
import datetime
import errno
import hashlib
import inspect
import json
import os
import os.path as osp
import pickle as pickle
import re
import subprocess
import sys
import time
from io import StringIO
import dateutil.tz
import numpy as np
from garage.misc.rllab import config
from garage.misc.rllab.console import mkdir_p
from garage.misc.rllab.ext import AttrDict, flatten
from garage.misc.rllab.serializable import Serializable
class StubBase(object):
def __getitem__(self, item):
return StubMethodCall(self, "__getitem__", args=[item], kwargs=dict())
def __getattr__(self, item):
try:
return super(self.__class__, self).__getattribute__(item)
except AttributeError:
if item.startswith("__") and item.endswith("__"):
raise
return StubAttr(self, item)
def __pow__(self, power, modulo=None):
return StubMethodCall(self, "__pow__", [power, modulo], dict())
def __call__(self, *args, **kwargs):
return StubMethodCall(self.obj, self.attr_name, args, kwargs)
def __add__(self, other):
return StubMethodCall(self, "__add__", [other], dict())
def __rmul__(self, other):
return StubMethodCall(self, "__rmul__", [other], dict())
def __div__(self, other):
return StubMethodCall(self, "__div__", [other], dict())
def __rdiv__(self, other):
return StubMethodCall(BinaryOp(), "rdiv", [self, other], dict()) # self, "__rdiv__", [other], dict())
def __rpow__(self, power, modulo=None):
return StubMethodCall(self, "__rpow__", [power, modulo], dict())
class BinaryOp(Serializable):
def __init__(self):
Serializable.quick_init(self, locals())
def rdiv(self, a, b):
return b / a
# def __init__(self, opname, a, b):
# self.opname = opname
# self.a = a
# self.b = b
class StubAttr(StubBase):
def __init__(self, obj, attr_name):
self.__dict__["_obj"] = obj
self.__dict__["_attr_name"] = attr_name
@property
def obj(self):
return self.__dict__["_obj"]
@property
def attr_name(self):
return self.__dict__["_attr_name"]
def __str__(self):
return "StubAttr(%s, %s)" % (str(self.obj), str(self.attr_name))
class StubMethodCall(StubBase, Serializable):
def __init__(self, obj, method_name, args, kwargs):
self._serializable_initialized = False
Serializable.quick_init(self, locals())
self.obj = obj
self.method_name = method_name
self.args = args
self.kwargs = kwargs
def __str__(self):
return "StubMethodCall(%s, %s, %s, %s)" % (
str(self.obj), str(self.method_name), str(self.args), str(self.kwargs))
class StubClass(StubBase):
def __init__(self, proxy_class):
self.proxy_class = proxy_class
def __call__(self, *args, **kwargs):
if len(args) > 0:
# Convert the positional arguments to keyword arguments
spec = inspect.getargspec(self.proxy_class.__init__)
kwargs = dict(list(zip(spec.args[1:], args)), **kwargs)
args = tuple()
return StubObject(self.proxy_class, *args, **kwargs)
def __getstate__(self):
return dict(proxy_class=self.proxy_class)
def __setstate__(self, dict):
self.proxy_class = dict["proxy_class"]
def __getattr__(self, item):
if hasattr(self.proxy_class, item):
return StubAttr(self, item)
raise AttributeError
def __str__(self):
return "StubClass(%s)" % self.proxy_class
class StubObject(StubBase):
def __init__(self, __proxy_class, *args, **kwargs):
if len(args) > 0:
spec = inspect.getargspec(__proxy_class.__init__)
kwargs = dict(list(zip(spec.args[1:], args)), **kwargs)
args = tuple()
self.proxy_class = __proxy_class
self.args = args
self.kwargs = kwargs
def __getstate__(self):
return dict(args=self.args, kwargs=self.kwargs, proxy_class=self.proxy_class)
def __setstate__(self, dict):
self.args = dict["args"]
self.kwargs = dict["kwargs"]
self.proxy_class = dict["proxy_class"]
def __getattr__(self, item):
# why doesnt the commented code work?
# return StubAttr(self, item)
# checks bypassed to allow for accesing instance fileds
if hasattr(self.proxy_class, item):
return StubAttr(self, item)
raise AttributeError('Cannot get attribute %s from %s' % (item, self.proxy_class))
def __str__(self):
return "StubObject(%s, *%s, **%s)" % (str(self.proxy_class), str(self.args), str(self.kwargs))
class VariantDict(AttrDict):
def __init__(self, d, hidden_keys):
super(VariantDict, self).__init__(d)
self._hidden_keys = hidden_keys
def dump(self):
return {k: v for k, v in self.items() if k not in self._hidden_keys}
class VariantGenerator(object):
"""
Usage:
vg = VariantGenerator()
vg.add("param1", [1, 2, 3])
vg.add("param2", ['x', 'y'])
vg.variants() => # all combinations of [1,2,3] x ['x','y']
Supports noncyclic dependency among parameters:
vg = VariantGenerator()
vg.add("param1", [1, 2, 3])
vg.add("param2", lambda param1: [param1+1, param1+2])
vg.variants() => # ..
"""
def __init__(self):
self._variants = []
self._populate_variants()
self._hidden_keys = []
for k, vs, cfg in self._variants:
if cfg.get("hide", False):
self._hidden_keys.append(k)
def add(self, key, vals, **kwargs):
self._variants.append((key, vals, kwargs))
def _populate_variants(self):
methods = inspect.getmembers(
self.__class__, predicate=lambda x: inspect.isfunction(x) or inspect.ismethod(x))
methods = [x[1].__get__(self, self.__class__)
for x in methods if getattr(x[1], '__is_variant', False)]
for m in methods:
self.add(m.__name__, m, **getattr(m, "__variant_config", dict()))
def variants(self, randomized=False):
ret = list(self.ivariants())
if randomized:
np.random.shuffle(ret)
return list(map(self.variant_dict, ret))
def variant_dict(self, variant):
return VariantDict(variant, self._hidden_keys)
def to_name_suffix(self, variant):
suffix = []
for k, vs, cfg in self._variants:
if not cfg.get("hide", False):
suffix.append(k + "_" + str(variant[k]))
return "_".join(suffix)
def ivariants(self):
dependencies = list()
for key, vals, _ in self._variants:
if hasattr(vals, "__call__"):
args = inspect.getargspec(vals).args
if hasattr(vals, 'im_self') or hasattr(vals, "__self__"):
# remove the first 'self' parameter
args = args[1:]
dependencies.append((key, set(args)))
else:
dependencies.append((key, set()))
sorted_keys = []
# topo sort all nodes
while len(sorted_keys) < len(self._variants):
# get all nodes with zero in-degree
free_nodes = [k for k, v in dependencies if len(v) == 0]
if len(free_nodes) == 0:
error_msg = "Invalid parameter dependency: \n"
for k, v in dependencies:
if len(v) > 0:
error_msg += k + " depends on " + " & ".join(v) + "\n"
raise ValueError(error_msg)
dependencies = [(k, v)
for k, v in dependencies if k not in free_nodes]
# remove the free nodes from the remaining dependencies
for _, v in dependencies:
v.difference_update(free_nodes)
sorted_keys += free_nodes
return self._ivariants_sorted(sorted_keys)
def _ivariants_sorted(self, sorted_keys):
if len(sorted_keys) == 0:
yield dict()
else:
first_keys = sorted_keys[:-1]
first_variants = self._ivariants_sorted(first_keys)
last_key = sorted_keys[-1]
last_vals = [v for k, v, _ in self._variants if k == last_key][0]
if hasattr(last_vals, "__call__"):
last_val_keys = inspect.getargspec(last_vals).args
if hasattr(last_vals, 'im_self') or hasattr(last_vals, '__self__'):
last_val_keys = last_val_keys[1:]
else:
last_val_keys = None
for variant in first_variants:
if hasattr(last_vals, "__call__"):
last_variants = last_vals(
**{k: variant[k] for k in last_val_keys})
for last_choice in last_variants:
yield AttrDict(variant, **{last_key: last_choice})
else:
for last_choice in last_vals:
yield AttrDict(variant, **{last_key: last_choice})
def variant(*args, **kwargs):
def _variant(fn):
fn.__is_variant = True
fn.__variant_config = kwargs
return fn
if len(args) == 1 and isinstance(args[0], collections.Callable):
return _variant(args[0])
return _variant
def stub(glbs):
# replace the __init__ method in all classes
# hacky!!!
for k, v in list(glbs.items()):
# look at all variables that are instances of a class (not yet Stub)
if isinstance(v, type) and v != StubClass:
glbs[k] = StubClass(v) # and replaces them by a the same but Stub
def query_yes_no(question, default="yes"):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
exp_count = 0
now = datetime.datetime.now(dateutil.tz.tzlocal())
timestamp = now.strftime('%Y_%m_%d_%H_%M_%S')
remote_confirmed = False
def run_experiment_lite(
stub_method_call=None,
batch_tasks=None,
exp_prefix="experiment",
exp_name=None,
log_dir=None,
script="scripts/run_experiment_lite.py",
python_command="python",
mode="local",
dry=False,
docker_image=None,
aws_config=None,
env=None,
variant=None,
use_gpu=False,
sync_s3_pkl=False,
sync_s3_png=False,
sync_s3_log=False,
sync_log_on_termination=True,
confirm_remote=True,
terminate_machine=True,
periodic_sync=True,
periodic_sync_interval=15,
sync_all_data_node_to_s3=True,
use_cloudpickle=None,
pre_commands=None,
added_project_directories=[],
**kwargs):
"""
Serialize the stubbed method call and run the experiment using the specified mode.
:param stub_method_call: A stubbed method call.
:param script: The name of the entrance point python script
:param mode: Where & how to run the experiment. Should be one of "local", "local_docker", "ec2",
and "lab_kube".
:param dry: Whether to do a dry-run, which only prints the commands without executing them.
:param exp_prefix: Name prefix for the experiments
:param docker_image: name of the docker image. Ignored if using local mode.
:param aws_config: configuration for AWS. Only used under EC2 mode
:param env: extra environment variables
:param kwargs: All other parameters will be passed directly to the entrance python script.
:param variant: If provided, should be a dictionary of parameters
:param use_gpu: Whether the launched task is running on GPU. This triggers a few configuration changes including
certain environment flags
:param sync_s3_pkl: Whether to sync pkl files during execution of the experiment (they will always be synced at
the end of the experiment)
:param sync_s3_png: Whether to sync png files during execution of the experiment (they will always be synced at
the end of the experiment)
:param sync_s3_log: Whether to sync log files during execution of the experiment (they will always be synced at
the end of the experiment)
:param confirm_remote: Whether to confirm before launching experiments remotely
:param terminate_machine: Whether to terminate machine after experiment finishes. Only used when using
mode="ec2". This is useful when one wants to debug after an experiment finishes abnormally.
:param periodic_sync: Whether to synchronize certain experiment files periodically during execution.
:param periodic_sync_interval: Time interval between each periodic sync, in seconds.
"""
assert stub_method_call is not None or batch_tasks is not None, "Must provide at least either stub_method_call or batch_tasks"
if use_cloudpickle is None:
for maybe_stub in (batch_tasks or [stub_method_call]):
# decide mode
if isinstance(maybe_stub, StubBase):
use_cloudpickle = False
else:
assert hasattr(maybe_stub, '__call__')
use_cloudpickle = True
# ensure variant exists
if variant is None:
variant = dict()
if batch_tasks is None:
batch_tasks = [
dict(
kwargs,
pre_commands=pre_commands,
stub_method_call=stub_method_call,
exp_name=exp_name,
log_dir=log_dir,
env=env,
variant=variant,
use_cloudpickle=use_cloudpickle
)
]
global exp_count
global remote_confirmed
config.USE_GPU = use_gpu
# params_list = []
for task in batch_tasks:
call = task.pop("stub_method_call")
if use_cloudpickle:
import cloudpickle
data = base64.b64encode(cloudpickle.dumps(call)).decode("utf-8")
else:
data = base64.b64encode(pickle.dumps(call)).decode("utf-8")
task["args_data"] = data
exp_count += 1
params = dict(kwargs)
if task.get("exp_name", None) is None:
task["exp_name"] = "%s_%s_%04d" % (
exp_prefix, timestamp, exp_count)
if task.get("log_dir", None) is None:
task["log_dir"] = config.LOG_DIR + "/local/" + \
exp_prefix.replace("_", "-") + "/" + task["exp_name"]
if task.get("variant", None) is not None:
variant = task.pop("variant")
if "exp_name" not in variant:
variant["exp_name"] = task["exp_name"]
task["variant_data"] = base64.b64encode(pickle.dumps(variant)).decode("utf-8")
elif "variant" in task:
del task["variant"]
task["remote_log_dir"] = osp.join(
config.AWS_S3_PATH, exp_prefix.replace("_", "-"), task["exp_name"])
task["env"] = task.get("env", dict()) or dict()
task["env"]["RLLAB_USE_GPU"] = str(use_gpu)
if mode not in ["local", "local_docker"] and not remote_confirmed and not dry and confirm_remote:
remote_confirmed = query_yes_no(
"Running in (non-dry) mode %s. Confirm?" % mode)
if not remote_confirmed:
sys.exit(1)
if hasattr(mode, "__call__"):
if docker_image is None:
docker_image = config.DOCKER_IMAGE
mode(
task,
docker_image=docker_image,
use_gpu=use_gpu,
exp_prefix=exp_prefix,
script=script,
python_command=python_command,
sync_s3_pkl=sync_s3_pkl,
sync_log_on_termination=sync_log_on_termination,
periodic_sync=periodic_sync,
periodic_sync_interval=periodic_sync_interval,
sync_all_data_node_to_s3=sync_all_data_node_to_s3,
)
elif mode == "local":
for task in batch_tasks:
del task["remote_log_dir"]
env = task.pop("env", None)
command = to_local_command(
task,
python_command=python_command,
script=osp.join(config.PROJECT_PATH, script),
use_gpu=use_gpu
)
print(command)
if dry:
return
try:
if env is None:
env = dict()
subprocess.call(
command, shell=True, env=dict(os.environ, **env))
except Exception as e:
print(e)
if isinstance(e, KeyboardInterrupt):
raise
elif mode == "local_docker":
if docker_image is None:
docker_image = config.DOCKER_IMAGE
for task in batch_tasks:
del task["remote_log_dir"]
env = task.pop("env", None)
command = to_docker_command(
task, # these are the params. Pre and Post command can be here
docker_image=docker_image,
script=script,
env=env,
use_gpu=use_gpu,
use_tty=True,
python_command=python_command,
)
print(command)
if dry:
return
p = subprocess.Popen(command, shell=True)
try:
p.wait()
except KeyboardInterrupt:
try:
print("terminating")
p.terminate()
except OSError:
print("os error!")
pass
p.wait()
elif mode == "ec2":
if docker_image is None:
docker_image = config.DOCKER_IMAGE
s3_code_path = s3_sync_code(config, dry=dry, added_project_directories=added_project_directories)
launch_ec2(batch_tasks,
exp_prefix=exp_prefix,
docker_image=docker_image,
python_command=python_command,
script=script,
aws_config=aws_config,
dry=dry,
terminate_machine=terminate_machine,
use_gpu=use_gpu,
code_full_path=s3_code_path,
sync_s3_pkl=sync_s3_pkl,
sync_s3_png=sync_s3_png,
sync_s3_log=sync_s3_log,
sync_log_on_termination=sync_log_on_termination,
periodic_sync=periodic_sync,
periodic_sync_interval=periodic_sync_interval)
elif mode == "lab_kube":
# assert env is None
# first send code folder to s3
s3_code_path = s3_sync_code(config, dry=dry)
if docker_image is None:
docker_image = config.DOCKER_IMAGE
for task in batch_tasks:
# if 'env' in task:
# assert task.pop('env') is None
# TODO: dangerous when there are multiple tasks?
task["resources"] = params.pop(
"resources", config.KUBE_DEFAULT_RESOURCES)
task["node_selector"] = params.pop(
"node_selector", config.KUBE_DEFAULT_NODE_SELECTOR)
task["exp_prefix"] = exp_prefix
pod_dict = to_lab_kube_pod(
task, code_full_path=s3_code_path, docker_image=docker_image, script=script, is_gpu=use_gpu,
python_command=python_command,
sync_s3_pkl=sync_s3_pkl, periodic_sync=periodic_sync,
periodic_sync_interval=periodic_sync_interval,
sync_all_data_node_to_s3=sync_all_data_node_to_s3,
terminate_machine=terminate_machine,
)
pod_str = json.dumps(pod_dict, indent=1)
if dry:
print(pod_str)
dir = "{pod_dir}/{exp_prefix}".format(
pod_dir=config.POD_DIR, exp_prefix=exp_prefix)
ensure_dir(dir)
fname = "{dir}/{exp_name}.json".format(
dir=dir,
exp_name=task["exp_name"]
)
with open(fname, "w") as fh:
fh.write(pod_str)
kubecmd = "kubectl create -f %s" % fname
print(kubecmd)
if dry:
return
retry_count = 0
wait_interval = 1
while retry_count <= 5:
try:
return_code = subprocess.call(kubecmd, shell=True)
if return_code == 0:
break
retry_count += 1
print("trying again...")
time.sleep(wait_interval)
except Exception as e:
if isinstance(e, KeyboardInterrupt):
raise
print(e)
else:
raise NotImplementedError
_find_unsafe = re.compile(r'[a-zA-Z0-9_^@%+=:,./-]').search
def ensure_dir(dirname):
"""
Ensure that a named directory exists; if it does not, attempt to create it.
"""
try:
os.makedirs(dirname)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def _shellquote(s):
"""Return a shell-escaped version of the string *s*."""
if not s:
return "''"
if _find_unsafe(s) is None:
return s
# use single quotes, and put single quotes into double quotes
# the string $'b is then quoted as '$'"'"'b'
return "'" + s.replace("'", "'\"'\"'") + "'"
def _to_param_val(v):
if v is None:
return ""
elif isinstance(v, list):
return " ".join(map(_shellquote, list(map(str, v))))
else:
return _shellquote(str(v))
def to_local_command(params, python_command="python", script=osp.join(config.PROJECT_PATH,
'scripts/run_experiment.py'),
use_gpu=False):
command = python_command + " " + script
if use_gpu and not config.USE_TF:
command = "THEANO_FLAGS='device=gpu,dnn.enabled=auto,floatX=float32' " + command
for k, v in config.ENV.items():
command = ("%s=%s " % (k, v)) + command
pre_commands = params.pop("pre_commands", None)
post_commands = params.pop("post_commands", None)
if pre_commands is not None or post_commands is not None:
print("Not executing the pre_commands: ", pre_commands, ", nor post_commands: ", post_commands)
for k, v in params.items():
if isinstance(v, dict):
for nk, nv in v.items():
if str(nk) == "_name":
command += " --%s %s" % (k, _to_param_val(nv))
else:
command += \
" --%s_%s %s" % (k, nk, _to_param_val(nv))
else:
command += " --%s %s" % (k, _to_param_val(v))
return command
def to_docker_command(params, docker_image, python_command="python", script='scripts/run_experiment_lite.py',
pre_commands=None, use_tty=False,
mujoco_path=None,
post_commands=None, dry=False, use_gpu=False, env=None, local_code_dir=None):
"""
:param params: The parameters for the experiment. If logging directory parameters are provided, we will create
docker volume mapping to make sure that the logging files are created at the correct locations
:param docker_image: docker image to run the command on
:param script: script command for running experiment
:return:
"""
log_dir = params.get("log_dir")
docker_args = params.pop("docker_args", "")
if pre_commands is None:
pre_commands = params.pop("pre_commands", None)
if post_commands is None:
post_commands = params.pop("post_commands", None)
if mujoco_path is None:
mujoco_path = config.MUJOCO_KEY_PATH
# script = 'rllab/' + script
# if not dry:
# create volume for logging directory
if use_gpu:
command_prefix = "nvidia-docker run"
else:
command_prefix = "docker run"
docker_log_dir = config.DOCKER_LOG_DIR
if env is None:
env = dict()
env = dict(
env,
AWS_ACCESS_KEY_ID=config.AWS_ACCESS_KEY,
AWS_SECRET_ACCESS_KEY=config.AWS_ACCESS_SECRET,
)
if env is not None:
for k, v in env.items():
command_prefix += " -e \"{k}={v}\"".format(k=k, v=v)
command_prefix += " -v {local_mujoco_key_dir}:{docker_mujoco_key_dir}".format(
local_mujoco_key_dir=mujoco_path, docker_mujoco_key_dir='/root/.mujoco')
command_prefix += " -v {local_log_dir}:{docker_log_dir}".format(
local_log_dir=log_dir,
docker_log_dir=docker_log_dir
)
command_prefix += docker_args
if local_code_dir is None:
local_code_dir = config.PROJECT_PATH
command_prefix += " -v {local_code_dir}:{docker_code_dir}".format(
local_code_dir=local_code_dir,
docker_code_dir=config.DOCKER_CODE_DIR
)
params = dict(params, log_dir=docker_log_dir)
if use_tty:
command_prefix += " -ti " + docker_image + " /bin/bash -c "
else:
command_prefix += " -i " + docker_image + " /bin/bash -c "
command_list = list()
if pre_commands is not None:
command_list.extend(pre_commands)
command_list.append("echo \"Running in docker\"")
command_list.append(to_local_command(
params, python_command=python_command, script=osp.join(
config.DOCKER_CODE_DIR, script), use_gpu=use_gpu))
# We for 2 min sleep after termination to allow for last syncs.
if post_commands is None:
post_commands = ['sleep 120']
command_list.extend(post_commands)
return command_prefix + "'" + "; ".join(command_list) + "'"
def dedent(s):
lines = [l.strip() for l in s.split('\n')]
return '\n'.join(lines)
def launch_ec2(params_list, exp_prefix, docker_image, code_full_path,
python_command="python",
script='scripts/run_experiment.py',
aws_config=None, dry=False, terminate_machine=True, use_gpu=False, sync_s3_pkl=False,
sync_s3_png=False,
sync_s3_log=False,
sync_log_on_termination=True,
periodic_sync=True, periodic_sync_interval=15):
if len(params_list) == 0:
return
default_config = dict(
image_id=config.AWS_IMAGE_ID,
instance_type=config.AWS_INSTANCE_TYPE,
key_name=config.AWS_KEY_NAME,
spot=config.AWS_SPOT,
spot_price=config.AWS_SPOT_PRICE,
iam_instance_profile_name=config.AWS_IAM_INSTANCE_PROFILE_NAME,
security_groups=config.AWS_SECURITY_GROUPS,
security_group_ids=config.AWS_SECURITY_GROUP_IDS,
network_interfaces=config.AWS_NETWORK_INTERFACES,
)
if aws_config is None:
aws_config = dict()
aws_config = dict(default_config, **aws_config)
sio = StringIO()
sio.write("#!/bin/bash\n")
sio.write("{\n")
sio.write("""
die() { status=$1; shift; echo "FATAL: $*"; exit $status; }
""")
sio.write("""
EC2_INSTANCE_ID="`wget -q -O - http://169.254.169.254/latest/meta-data/instance-id`"
""")
sio.write("""
aws ec2 create-tags --resources $EC2_INSTANCE_ID --tags Key=Name,Value={exp_name} --region {aws_region}
""".format(exp_name=params_list[0].get("exp_name"), aws_region=config.AWS_REGION_NAME))
if config.LABEL:
sio.write("""
aws ec2 create-tags --resources $EC2_INSTANCE_ID --tags Key=owner,Value={label} --region {aws_region}
""".format(label=config.LABEL, aws_region=config.AWS_REGION_NAME))
sio.write("""
aws ec2 create-tags --resources $EC2_INSTANCE_ID --tags Key=exp_prefix,Value={exp_prefix} --region {aws_region}
""".format(exp_prefix=exp_prefix, aws_region=config.AWS_REGION_NAME))
sio.write("""
service docker start
""")
sio.write("""
docker --config /home/ubuntu/.docker pull {docker_image}
""".format(docker_image=docker_image))
sio.write("""
export AWS_DEFAULT_REGION={aws_region}
""".format(aws_region=config.AWS_REGION_NAME))
if config.FAST_CODE_SYNC:
# sio.write("""
# aws s3 cp {code_full_path} /tmp/rllab_code.tar.gz --region {aws_region}
# """.format(code_full_path=code_full_path, local_code_path=config.DOCKER_CODE_DIR,
# aws_region=config.AWS_REGION_NAME))
sio.write("""
aws s3 cp {code_full_path} /tmp/rllab_code.tar.gz
""".format(code_full_path=code_full_path, local_code_path=config.DOCKER_CODE_DIR))
sio.write("""
mkdir -p {local_code_path}
""".format(code_full_path=code_full_path, local_code_path=config.DOCKER_CODE_DIR,
aws_region=config.AWS_REGION_NAME))
sio.write("""
tar -zxvf /tmp/rllab_code.tar.gz -C {local_code_path}
""".format(code_full_path=code_full_path, local_code_path=config.DOCKER_CODE_DIR,
aws_region=config.AWS_REGION_NAME))
else:
# sio.write("""
# aws s3 cp --recursive {code_full_path} {local_code_path} --region {aws_region}
# """.format(code_full_path=code_full_path, local_code_path=config.DOCKER_CODE_DIR,
# aws_region=config.AWS_REGION_NAME))
sio.write("""
aws s3 cp --recursive {code_full_path} {local_code_path}
""".format(code_full_path=code_full_path, local_code_path=config.DOCKER_CODE_DIR))
s3_mujoco_key_path = config.AWS_CODE_SYNC_S3_PATH + '/.mujoco/'
# sio.write("""
# aws s3 cp --recursive {} {} --region {}
# """.format(s3_mujoco_key_path, config.MUJOCO_KEY_PATH, config.AWS_REGION_NAME))
sio.write("""
aws s3 cp --recursive {} {}
""".format(s3_mujoco_key_path, config.MUJOCO_KEY_PATH))
sio.write("""
cd {local_code_path}
""".format(local_code_path=config.DOCKER_CODE_DIR))
for params in params_list:
log_dir = params.get("log_dir")
remote_log_dir = params.pop("remote_log_dir")
env = params.pop("env", None)
sio.write("""
aws ec2 create-tags --resources $EC2_INSTANCE_ID --tags Key=Name,Value={exp_name} --region {aws_region}
""".format(exp_name=params.get("exp_name"), aws_region=config.AWS_REGION_NAME))
sio.write("""
mkdir -p {log_dir}
""".format(log_dir=log_dir))
if periodic_sync:
include_png = " --include '*.png' " if sync_s3_png else " "
include_pkl = " --include '*.pkl' " if sync_s3_pkl else " "
include_log = " --include '*.log' " if sync_s3_log else " "
# sio.write("""
# while /bin/true; do
# aws s3 sync --exclude '*' {include_png} {include_pkl} {include_log}--include '*.csv' --include '*.json' {log_dir} {remote_log_dir} --region {aws_region}
# sleep {periodic_sync_interval}
# done & echo sync initiated""".format(include_png=include_png, include_pkl=include_pkl, include_log=include_log,
# log_dir=log_dir, remote_log_dir=remote_log_dir,
# aws_region=config.AWS_REGION_NAME,
# periodic_sync_interval=periodic_sync_interval))
sio.write("""
while /bin/true; do
aws s3 sync --exclude '*' {include_png} {include_pkl} {include_log}--include '*.csv' --include '*.json' {log_dir} {remote_log_dir}
sleep {periodic_sync_interval}
done & echo sync initiated""".format(include_png=include_png, include_pkl=include_pkl, include_log=include_log,
log_dir=log_dir, remote_log_dir=remote_log_dir,
periodic_sync_interval=periodic_sync_interval))
if sync_log_on_termination:
# sio.write("""
# while /bin/true; do
# if [ -z $(curl -Is http://169.254.169.254/latest/meta-data/spot/termination-time | head -1 | grep 404 | cut -d \ -f 2) ]
# then
# logger "Running shutdown hook."
# aws s3 cp /home/ubuntu/user_data.log {remote_log_dir}/stdout.log --region {aws_region}
# aws s3 cp --recursive {log_dir} {remote_log_dir} --region {aws_region}
# break
# else
# # Spot instance not yet marked for termination.
# sleep 5
# fi
# done & echo log sync initiated
# """.format(log_dir=log_dir, remote_log_dir=remote_log_dir, aws_region=config.AWS_REGION_NAME))
sio.write("""
while /bin/true; do
if [ -z $(curl -Is http://169.254.169.254/latest/meta-data/spot/termination-time | head -1 | grep 404 | cut -d \ -f 2) ]
then
logger "Running shutdown hook."
aws s3 cp /home/ubuntu/user_data.log {remote_log_dir}/stdout.log
aws s3 cp --recursive {log_dir} {remote_log_dir}
break
else
# Spot instance not yet marked for termination.
sleep 5
fi
done & echo log sync initiated
""".format(log_dir=log_dir, remote_log_dir=remote_log_dir))
if use_gpu:
sio.write("""
for i in {1..800}; do su -c "nvidia-modprobe -u -c=0" ubuntu && break || sleep 3; done
systemctl start nvidia-docker
""")
sio.write("""
{command}
""".format(command=to_docker_command(params, docker_image, python_command=python_command, script=script,
use_gpu=use_gpu, env=env,
local_code_dir=config.DOCKER_CODE_DIR)))
# sio.write("""
# aws s3 cp --recursive {log_dir} {remote_log_dir} --region {aws_region}
# """.format(log_dir=log_dir, remote_log_dir=remote_log_dir, aws_region=config.AWS_REGION_NAME))
sio.write("""
aws s3 cp --recursive {log_dir} {remote_log_dir}
""".format(log_dir=log_dir, remote_log_dir=remote_log_dir))
# sio.write("""
# aws s3 cp /home/ubuntu/user_data.log {remote_log_dir}/stdout.log --region {aws_region}
# """.format(remote_log_dir=remote_log_dir, aws_region=config.AWS_REGION_NAME))
sio.write("""
aws s3 cp /home/ubuntu/user_data.log {remote_log_dir}/stdout.log
""".format(remote_log_dir=remote_log_dir))
if terminate_machine:
sio.write("""
EC2_INSTANCE_ID="`wget -q -O - http://169.254.169.254/latest/meta-data/instance-id || die \"wget instance-id has failed: $?\"`"
aws ec2 terminate-instances --instance-ids $EC2_INSTANCE_ID --region {aws_region}
""".format(aws_region=config.AWS_REGION_NAME))
sio.write("} >> /home/ubuntu/user_data.log 2>&1\n")
full_script = dedent(sio.getvalue())
import boto3
import botocore
if aws_config["spot"]:
ec2 = boto3.client(
"ec2",
region_name=config.AWS_REGION_NAME,
aws_access_key_id=config.AWS_ACCESS_KEY,
aws_secret_access_key=config.AWS_ACCESS_SECRET,
)
else:
ec2 = boto3.resource(
"ec2",
region_name=config.AWS_REGION_NAME,
aws_access_key_id=config.AWS_ACCESS_KEY,
aws_secret_access_key=config.AWS_ACCESS_SECRET,
)
if len(full_script) > 10000 or len(base64.b64encode(full_script.encode()).decode("utf-8")) > 10000:
# Script too long; need to upload script to s3 first.
# We're being conservative here since the actual limit is 16384 bytes
s3_path = upload_file_to_s3(full_script)
sio = StringIO()
sio.write("#!/bin/bash\n")
sio.write("""
aws s3 cp {s3_path} /home/ubuntu/remote_script.sh --region {aws_region} && \\
chmod +x /home/ubuntu/remote_script.sh && \\
bash /home/ubuntu/remote_script.sh
""".format(s3_path=s3_path, aws_region=config.AWS_REGION_NAME))
user_data = dedent(sio.getvalue())
else:
user_data = full_script
print(full_script)
with open("/tmp/full_script", "w") as f:
f.write(full_script)
instance_args = dict(
ImageId=aws_config["image_id"],
KeyName=aws_config["key_name"],
UserData=user_data,
InstanceType=aws_config["instance_type"],
EbsOptimized=config.EBS_OPTIMIZED,
SecurityGroups=aws_config["security_groups"],
SecurityGroupIds=aws_config["security_group_ids"],
NetworkInterfaces=aws_config["network_interfaces"],
IamInstanceProfile=dict(
Name=aws_config["iam_instance_profile_name"],
),
**config.AWS_EXTRA_CONFIGS,
)
if len(instance_args["NetworkInterfaces"]) > 0:
# disable_security_group = query_yes_no(
# "Cannot provide both network interfaces and security groups info. Do you want to disable security group settings?",
# default="yes",
# )
disable_security_group = True
if disable_security_group:
instance_args.pop("SecurityGroups")
instance_args.pop("SecurityGroupIds")
if aws_config.get("placement", None) is not None:
instance_args["Placement"] = aws_config["placement"]
if not aws_config["spot"]:
instance_args["MinCount"] = 1
instance_args["MaxCount"] = 1
print("************************************************************")
print(instance_args["UserData"])
print("************************************************************")
if aws_config["spot"]:
instance_args["UserData"] = base64.b64encode(instance_args["UserData"].encode()).decode("utf-8")
spot_args = dict(
DryRun=dry,
InstanceCount=1,
LaunchSpecification=instance_args,
SpotPrice=aws_config["spot_price"],
# ClientToken=params_list[0]["exp_name"],
)
import pprint
pprint.pprint(spot_args)
if not dry:
response = ec2.request_spot_instances(**spot_args)
print(response)
spot_request_id = response['SpotInstanceRequests'][
0]['SpotInstanceRequestId']
for _ in range(10):
try:
ec2.create_tags(
Resources=[spot_request_id],
Tags=[
{'Key': 'Name', 'Value': params_list[0]["exp_name"]}
],
)
break
except botocore.exceptions.ClientError:
continue
else:
import pprint
pprint.pprint(instance_args)
ec2.create_instances(
DryRun=dry,
**instance_args
)
S3_CODE_PATH = None
def s3_sync_code(config, dry=False, added_project_directories=[]):
global S3_CODE_PATH
if S3_CODE_PATH is not None:
return S3_CODE_PATH
base = config.AWS_CODE_SYNC_S3_PATH
has_git = True
if config.FAST_CODE_SYNC:
try:
current_commit = subprocess.check_output(
["git", "rev-parse", "HEAD"]).strip().decode("utf-8")
except subprocess.CalledProcessError as _:
print("Warning: failed to execute git commands")
current_commit = None
file_name = str(timestamp) + "_" + hashlib.sha224(
subprocess.check_output(["pwd"]) + str(current_commit).encode() + str(timestamp).encode()
).hexdigest() + ".tar.gz"
file_path = "/tmp/" + file_name
tar_cmd = ["tar", "-zcvf", file_path, "-C", config.PROJECT_PATH]
for pattern in config.FAST_CODE_SYNC_IGNORES:
tar_cmd += ["--exclude", pattern]
tar_cmd += ["-h", "."]
for path in added_project_directories:
tar_cmd.append("-C")
tar_cmd.append(path)
tar_cmd += ["."]
remote_path = "%s/%s" % (base, file_name)
upload_cmd = ["aws", "s3", "cp", file_path, remote_path]
mujoco_key_cmd = [
"aws", "s3", "sync", config.MUJOCO_KEY_PATH, "{}/.mujoco/".format(base)]
print(" ".join(tar_cmd))
print(" ".join(upload_cmd))
print(" ".join(mujoco_key_cmd))
if not dry:
subprocess.check_call(tar_cmd)
subprocess.check_call(upload_cmd)
try:
subprocess.check_call(mujoco_key_cmd)
except Exception as e:
print(e)
S3_CODE_PATH = remote_path
return remote_path
else:
try:
current_commit = subprocess.check_output(
["git", "rev-parse", "HEAD"]).strip().decode("utf-8")
clean_state = len(
subprocess.check_output(["git", "status", "--porcelain"])) == 0
except subprocess.CalledProcessError as _:
print("Warning: failed to execute git commands")
has_git = False
dir_hash = base64.b64encode(subprocess.check_output(["pwd"])).decode("utf-8")
code_path = "%s_%s" % (
dir_hash,
(current_commit if clean_state else "%s_dirty_%s" % (current_commit, timestamp)) if
has_git else timestamp
)
full_path = "%s/%s" % (base, code_path)
cache_path = "%s/%s" % (base, dir_hash)
cache_cmds = ["aws", "s3", "cp", "--recursive"] + \
flatten(["--exclude", "%s" % pattern] for pattern in config.CODE_SYNC_IGNORES) + \
[cache_path, full_path]
cmds = ["aws", "s3", "cp", "--recursive"] + \
flatten(["--exclude", "%s" % pattern] for pattern in config.CODE_SYNC_IGNORES) + \
[".", full_path]
caching_cmds = ["aws", "s3", "cp", "--recursive"] + \
flatten(["--exclude", "%s" % pattern] for pattern in config.CODE_SYNC_IGNORES) + \
[full_path, cache_path]
mujoco_key_cmd = [
"aws", "s3", "sync", config.MUJOCO_KEY_PATH, "{}/.mujoco/".format(base)]
print(cache_cmds, cmds, caching_cmds, mujoco_key_cmd)
if not dry:
subprocess.check_call(cache_cmds)
subprocess.check_call(cmds)
subprocess.check_call(caching_cmds)
try:
subprocess.check_call(mujoco_key_cmd)
except Exception:
print('Unable to sync mujoco keys!')
S3_CODE_PATH = full_path
return full_path
def upload_file_to_s3(script_content):
import tempfile
import uuid
f = tempfile.NamedTemporaryFile(delete=False)
f.write(script_content.encode())
f.close()
remote_path = os.path.join(
config.AWS_CODE_SYNC_S3_PATH, "oversize_bash_scripts", str(uuid.uuid4()))
subprocess.check_call(["aws", "s3", "cp", f.name, remote_path])
os.unlink(f.name)
return remote_path
def to_lab_kube_pod(
params, docker_image, code_full_path,
python_command="python",
script='scripts/run_experiment.py',
is_gpu=False,
sync_s3_pkl=False,
periodic_sync=True,
periodic_sync_interval=15,
sync_all_data_node_to_s3=False,
terminate_machine=True
):
"""
:param params: The parameters for the experiment. If logging directory parameters are provided, we will create
docker volume mapping to make sure that the logging files are created at the correct locations
:param docker_image: docker image to run the command on
:param script: script command for running experiment
:return:
"""
log_dir = params.get("log_dir")
remote_log_dir = params.pop("remote_log_dir")
resources = params.pop("resources")
node_selector = params.pop("node_selector")
exp_prefix = params.pop("exp_prefix")
kube_env = [
{"name": k, "value": v}
for k, v in (params.pop("env", None) or dict()).items()
]
mkdir_p(log_dir)
pre_commands = list()
pre_commands.append('mkdir -p ~/.aws')
pre_commands.append('mkdir ~/.mujoco')
# fetch credentials from the kubernetes secret file
pre_commands.append('echo "[default]" >> ~/.aws/credentials')
pre_commands.append(
"echo \"aws_access_key_id = %s\" >> ~/.aws/credentials" % config.AWS_ACCESS_KEY)
pre_commands.append(
"echo \"aws_secret_access_key = %s\" >> ~/.aws/credentials" % config.AWS_ACCESS_SECRET)
s3_mujoco_key_path = config.AWS_CODE_SYNC_S3_PATH + '/.mujoco/'
pre_commands.append(
'aws s3 cp --recursive {} {}'.format(s3_mujoco_key_path, '~/.mujoco'))
if config.FAST_CODE_SYNC:
pre_commands.append('aws s3 cp %s /tmp/rllab_code.tar.gz' % code_full_path)
pre_commands.append('mkdir -p %s' % config.DOCKER_CODE_DIR)
pre_commands.append('tar -zxvf /tmp/rllab_code.tar.gz -C %s' % config.DOCKER_CODE_DIR)
else:
pre_commands.append('aws s3 cp --recursive %s %s' %
(code_full_path, config.DOCKER_CODE_DIR))
pre_commands.append('cd %s' % config.DOCKER_CODE_DIR)
pre_commands.append('mkdir -p %s' %
(log_dir))
if sync_all_data_node_to_s3:
print('Syncing all data from node to s3.')
if periodic_sync:
if sync_s3_pkl:
pre_commands.append("""
while /bin/true; do
aws s3 sync {log_dir} {remote_log_dir} --region {aws_region} --quiet
sleep {periodic_sync_interval}
done & echo sync initiated""".format(log_dir=log_dir, remote_log_dir=remote_log_dir,
aws_region=config.AWS_REGION_NAME,
periodic_sync_interval=periodic_sync_interval))
else:
pre_commands.append("""
while /bin/true; do
aws s3 sync {log_dir} {remote_log_dir} --region {aws_region} --quiet
sleep {periodic_sync_interval}
done & echo sync initiated""".format(log_dir=log_dir, remote_log_dir=remote_log_dir,
aws_region=config.AWS_REGION_NAME,
periodic_sync_interval=periodic_sync_interval))
else:
if periodic_sync:
if sync_s3_pkl:
pre_commands.append("""
while /bin/true; do
aws s3 sync --exclude '*' --include '*.csv' --include '*.json' --include '*.pkl' {log_dir} {remote_log_dir} --region {aws_region} --quiet
sleep {periodic_sync_interval}
done & echo sync initiated""".format(log_dir=log_dir, remote_log_dir=remote_log_dir,
aws_region=config.AWS_REGION_NAME,
periodic_sync_interval=periodic_sync_interval))
else:
pre_commands.append("""
while /bin/true; do
aws s3 sync --exclude '*' --include '*.csv' --include '*.json' {log_dir} {remote_log_dir} --region {aws_region} --quiet
sleep {periodic_sync_interval}
done & echo sync initiated""".format(log_dir=log_dir, remote_log_dir=remote_log_dir,
aws_region=config.AWS_REGION_NAME,
periodic_sync_interval=periodic_sync_interval))
# copy the file to s3 after execution
post_commands = list()
post_commands.append('aws s3 cp --recursive %s %s' %
(log_dir,
remote_log_dir))
if not terminate_machine:
post_commands.append('sleep infinity')
command_list = list()
if pre_commands is not None:
command_list.extend(pre_commands)
command_list.append("echo \"Running in docker\"")
command_list.append(
"%s 2>&1 | tee -a %s" % (
to_local_command(params, python_command=python_command, script=script),
"%s/stdouterr.log" % log_dir
)
)
if post_commands is not None:
command_list.extend(post_commands)
command = "; ".join(command_list)
pod_name = config.KUBE_PREFIX + params["exp_name"]
# underscore is not allowed in pod names
pod_name = pod_name.replace("_", "-")
print("Is gpu: ", is_gpu)
if not is_gpu:
return {
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"name": pod_name,
"labels": {
"owner": config.LABEL,
"expt": pod_name,
"exp_time": timestamp,
"exp_prefix": exp_prefix,
},
},
"spec": {
"containers": [
{
"name": "foo",
"image": docker_image,
"command": [
"/bin/bash",
"-c",
"-li", # to load conda env file
command,
],
"resources": resources,
"imagePullPolicy": "Always",
}
],
"restartPolicy": "Never",
"nodeSelector": node_selector,
"dnsPolicy": "Default",
}
}
return {
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"name": pod_name,
"labels": {
"owner": config.LABEL,
"expt": pod_name,
"exp_time": timestamp,
"exp_prefix": exp_prefix,
},
},
"spec": {
"containers": [
{
"name": "foo",
"image": docker_image,
"env": kube_env,
"command": [
"/bin/bash",
"-c",
"-li", # to load conda env file
command,
],
"resources": resources,
"imagePullPolicy": "Always",
# gpu specific
"volumeMounts": [
{
"name": "nvidia",
"mountPath": "/usr/local/nvidia",
"readOnly": True,
}
],
"securityContext": {
"privileged": True,
}
}
],
"volumes": [
{
"name": "nvidia",
"hostPath": {
"path": "/var/lib/docker/volumes/nvidia_driver_352.63/_data",
}
}
],
"restartPolicy": "Never",
"nodeSelector": node_selector,
"dnsPolicy": "Default",
}
}
def concretize(maybe_stub):
if isinstance(maybe_stub, StubMethodCall):
obj = concretize(maybe_stub.obj)
method = getattr(obj, maybe_stub.method_name)
args = concretize(maybe_stub.args)
kwargs = concretize(maybe_stub.kwargs)
return method(*args, **kwargs)
elif isinstance(maybe_stub, StubClass):
return maybe_stub.proxy_class
elif isinstance(maybe_stub, StubAttr):
obj = concretize(maybe_stub.obj)
attr_name = maybe_stub.attr_name
attr_val = getattr(obj, attr_name)
return concretize(attr_val)
elif isinstance(maybe_stub, StubObject):
if not hasattr(maybe_stub, "__stub_cache"):
args = concretize(maybe_stub.args)
kwargs = concretize(maybe_stub.kwargs)
try:
maybe_stub.__stub_cache = maybe_stub.proxy_class(
*args, **kwargs)
except Exception as e:
print(("Error while instantiating %s" % maybe_stub.proxy_class))
import traceback
traceback.print_exc()
ret = maybe_stub.__stub_cache
return ret
elif isinstance(maybe_stub, dict):
# make sure that there's no hidden caveat
ret = dict()
for k, v in maybe_stub.items():
ret[concretize(k)] = concretize(v)
return ret
elif isinstance(maybe_stub, (list, tuple)):
return maybe_stub.__class__(list(map(concretize, maybe_stub)))
else:
return maybe_stub
| [
"QF30@duke.edu"
] | QF30@duke.edu |
1ebf50f2fe945bd4d55d54c13e76a24165a05cf2 | a0f0efaaaf69d6ccdc2a91596db29f04025f122c | /build/botcmd_msgs/devel/lib/python2.7/dist-packages/botcmd_msgs/srv/_bot_getenabledi_cmd.py | 56881c75882d7bfcd72f305eeff5b2ca7dffd6bc | [] | no_license | chiuhandsome/ros_ws_test-git | 75da2723154c0dadbcec8d7b3b1f3f8b49aa5cd6 | 619909130c23927ccc902faa3ff6d04ae0f0fba9 | refs/heads/master | 2022-12-24T05:45:43.845717 | 2020-09-22T10:12:54 | 2020-09-22T10:12:54 | 297,582,735 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,723 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from botcmd_msgs/bot_getenabledi_cmdRequest.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class bot_getenabledi_cmdRequest(genpy.Message):
_md5sum = "481ac5a494c3140a2539020bd74c82c7"
_type = "botcmd_msgs/bot_getenabledi_cmdRequest"
_has_header = False # flag to mark the presence of a Header object
_full_text = """int8 command
"""
__slots__ = ['command']
_slot_types = ['int8']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
command
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(bot_getenabledi_cmdRequest, self).__init__(*args, **kwds)
# message fields cannot be None, assign default values for those that are
if self.command is None:
self.command = 0
else:
self.command = 0
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self.command
buff.write(_get_struct_b().pack(_x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
start = end
end += 1
(self.command,) = _get_struct_b().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self.command
buff.write(_get_struct_b().pack(_x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
start = end
end += 1
(self.command,) = _get_struct_b().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_b = None
def _get_struct_b():
global _struct_b
if _struct_b is None:
_struct_b = struct.Struct("<b")
return _struct_b
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from botcmd_msgs/bot_getenabledi_cmdResponse.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class bot_getenabledi_cmdResponse(genpy.Message):
_md5sum = "01a64608314d5f77b6df20caba78d455"
_type = "botcmd_msgs/bot_getenabledi_cmdResponse"
_has_header = False # flag to mark the presence of a Header object
_full_text = """bool result
int32 status
"""
__slots__ = ['result','status']
_slot_types = ['bool','int32']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
result,status
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(bot_getenabledi_cmdResponse, self).__init__(*args, **kwds)
# message fields cannot be None, assign default values for those that are
if self.result is None:
self.result = False
if self.status is None:
self.status = 0
else:
self.result = False
self.status = 0
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_Bi().pack(_x.result, _x.status))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
_x = self
start = end
end += 5
(_x.result, _x.status,) = _get_struct_Bi().unpack(str[start:end])
self.result = bool(self.result)
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_Bi().pack(_x.result, _x.status))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
_x = self
start = end
end += 5
(_x.result, _x.status,) = _get_struct_Bi().unpack(str[start:end])
self.result = bool(self.result)
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_Bi = None
def _get_struct_Bi():
global _struct_Bi
if _struct_Bi is None:
_struct_Bi = struct.Struct("<Bi")
return _struct_Bi
class bot_getenabledi_cmd(object):
_type = 'botcmd_msgs/bot_getenabledi_cmd'
_md5sum = 'c310784b062f6ef0f7752130ef306c28'
_request_class = bot_getenabledi_cmdRequest
_response_class = bot_getenabledi_cmdResponse
| [
"chiuhandsome1966@gmail.com"
] | chiuhandsome1966@gmail.com |
3115741b6188e19b6b15dbc83eb6f77b555b45bb | e127db67e1135906bf3ff523b9d79fa901f2cb17 | /feifan/feifan_activity.py | c2540fda11565801930d56d75a83a16999c93ec5 | [] | no_license | trrying/PythonNote | c8012e9254e8833e6afd1f5c8c3350f1ceb6c756 | 856a08b47ac6255fe3aee1acbc872a11ee677741 | refs/heads/master | 2021-01-21T12:47:16.001704 | 2017-09-22T10:24:42 | 2017-09-22T10:24:42 | 102,099,208 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,886 | py | import urllib.request
import json
import pymysql
import sys
import util.time_utils
import threading
import math
import ffan_db_config
def get_data(thread_name, data_list):
# 打开数据库连接
db, cursor = ffan_db_config.get_db_config()
print("plaza_list len : %d " % (len(data_list)))
# base_coupons_list_url = "https://api.ffan.com/ffan/v1/city/coupons?size=%d&offset=%d&plazaId=%s&cityId=%s"
base_coupons_list_url = "https://api.ffan.com/ffan/v1/city/activities?size=%s&offset=%s&plazaId=%s&cityId=%s"
size = 50
base_count_sql = "select count(*) from ffan_news where fp_p_id = '%(fp_p_id)s' and fn_aid = '%(fn_aid)s'"
base_update_sql = """
update ffan_news set
fn_title='%(fn_title)s',
fn_description='%(fn_description)s',
fn_subtitle='%(fn_subtitle)s',
fn_logo='%(fn_logo)s',
fn_start_time='%(fn_start_time)s',
fn_end_time='%(fn_end_time)s',
fn_update_time='%(fn_update_time)s'
where fp_p_id = '%(fp_p_id)s' and fn_aid = '%(fn_aid)s'
"""
base_insert_sql = """
insert into ffan_news (
fn_title,
fn_description,
fn_subtitle,
fn_logo,
fn_start_time,
fn_end_time,
fp_p_id,
fn_aid,
fn_create_time)
VALUES (
'%(fn_title)s',
'%(fn_description)s',
'%(fn_subtitle)s',
'%(fn_logo)s',
'%(fn_start_time)s',
'%(fn_end_time)s',
'%(fp_p_id)s',
'%(fn_aid)s',
'%(fn_create_time)s')
"""
start_time = util.time_utils.get_current_time()
for index, plaza in enumerate(data_list):
offset = 0
retry_count = 0
while True:
try:
# 只重试2次
if retry_count > 2:
break
# 取出数据库查询的值
plaza_id = plaza[0]
plaza_name = plaza[1]
plaza_city_id = plaza[2]
plaza_city_name = plaza[3]
# 拼接url,请求获取数据
url = base_coupons_list_url % (size, offset, plaza_id, plaza_city_id)
result_json_str = ffan_db_config.request(url)
# print("result_json_str : " + result_json_str)
# 解析json字符串
response_result = json.loads(result_json_str)
result_data = response_result['data']
result_data_list = result_data['list']
insert_count = 0
update_count = 0
for data_bean in result_data_list:
# print("data_bean : %s" % (str(data_bean)))
data_bean['plazaId'] = plaza_id
data_bean['startDate'] = util.time_utils.get_time(data_bean['startDate'])
data_bean['endDate'] = util.time_utils.get_time(data_bean['endDate'])
data_bean['title'] = data_bean['title'].replace("'", "''")
data_bean['description'] = data_bean['description'].replace("'", "''")
data_bean['subtitle'] = data_bean['subtitle'].replace("'", "''")
count_sql = base_count_sql % {'fp_p_id': data_bean['plazaId'], 'fn_aid': data_bean['id']}
cursor.execute(count_sql)
count_sql_result = cursor.fetchone()
try:
if count_sql_result[0] > 0:
# print("update data_bean : %s" % (str(data_bean)))
update_sql = base_update_sql % {
'fn_title': data_bean['title'],
'fn_description': data_bean['description'],
'fn_subtitle': data_bean['subtitle'],
'fn_logo': data_bean['pic'],
'fn_start_time': data_bean['startDate'],
'fn_end_time': data_bean['endDate'],
'fn_update_time': util.time_utils.get_current_time(),
'fp_p_id': data_bean['plazaId'],
'fn_aid': data_bean['id']
}
# print(update_sql)
update_count += cursor.execute(update_sql)
else:
# print("insert data_bean : %s" % (str(data_bean)))
insert_sql = base_insert_sql % {
'fn_title': data_bean['title'],
'fn_description': data_bean['description'],
'fn_subtitle': data_bean['subtitle'],
'fn_logo': data_bean['pic'],
'fn_start_time': data_bean['startDate'],
'fn_end_time': data_bean['endDate'],
'fp_p_id': data_bean['plazaId'],
'fn_aid': data_bean['id'],
'fn_create_time': util.time_utils.get_current_time()
}
# print(insert_sql)
insert_count += cursor.execute(insert_sql)
db.commit()
except Exception as e:
print(e)
db.rollback()
print("execute sql fail " + plaza_city_name + " " + plaza_name, sys.exc_info())
print("ffan_news operate db threadName : %s data.len : %d progress : %s result_data len : %s insertCount : %d updateCount : %d offset : %d retry_count : %d"
% (thread_name, len(data_list), str(int((index+1) / (len(data_list))*100))+"%", len(result_data_list), insert_count, update_count, offset, retry_count))
if int(result_data['info']['more']) > 0:
offset += size
else:
break
except Exception as e:
print(e)
print(sys.exc_info())
retry_count += 1
db.close()
print("activity list operate db threadName : %s 耗时:%s 秒" % (thread_name, (util.time_utils.get_current_time() - start_time)))
# 开启线程类
class OperateThread(threading.Thread):
def __init__(self, thread_id, data_list):
threading.Thread.__init__(self)
self.threadId = thread_id
self.dataList = data_list
def run(self):
get_data(self.threadId, self.dataList)
# thread_count 大于0开启线程,否则直接在线程运行
def start_get_data(thread_count=0):
# 打开数据库连接
db, cursor = ffan_db_config.get_db_config()
select_sql = "select fp_p_id,fp_p_name,fp_city_id,fp_city from ffan_poi"
cursor.execute(select_sql)
sql_result = cursor.fetchall()
print("activity plaza size : %s" % (len(sql_result)))
if thread_count > 0:
thread_data_size = math.ceil(len(sql_result) / thread_count)
for i in range(thread_count):
begin = i * thread_data_size
end = (i + 1) * thread_data_size
OperateThread(i+1, sql_result[begin:end]).start()
else:
get_data('caller thread', sql_result)
if __name__ == "__main__":
start_get_data(5)
| [
"ouweiming.yeah.net"
] | ouweiming.yeah.net |
2eca253d8b757e64ba22aa72a25f4bbcce87b4da | 9e91296e3f67cda95306e4091dcd1e72046d6f74 | /src/utils.py | 5bfe0887d950b23f1658afe34f76508b68f9bbfc | [
"MIT"
] | permissive | dayuanyuan1989/SaveProfits | c6be8a44935b07fb74797149c7967a7ef97931d2 | fcf86ab160eb7f9f064dfd25e9594dde2cc19ede | refs/heads/master | 2021-06-27T13:23:26.703728 | 2017-09-14T11:31:46 | 2017-09-14T11:31:46 | 103,524,079 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,556 | py | # -*- coding: utf-8 -*-
import sys
from aliyunsdkdysmsapi.request.v20170525 import SendSmsRequest
from aliyunsdkdysmsapi.request.v20170525 import QuerySendDetailsRequest
from aliyunsdkcore.client import AcsClient
import uuid
"""
短信业务调用接口示例,版本号:v20170525
Created on 2017-06-12
"""
reload(sys)
sys.setdefaultencoding('utf8')
REGION = "cn-hangzhou"
# ACCESS_KEY_ID/ACCESS_KEY_SECRET 根据实际申请的账号信息进行替换
ACCESS_KEY_ID = "LTAIN5LqyknGdcli"
ACCESS_KEY_SECRET = "mSCwWISXmPLiZB4TcEscdNNYNAz3Au"
acs_client = AcsClient(ACCESS_KEY_ID, ACCESS_KEY_SECRET, REGION)
def send_sms(business_id, phone_numbers, sign_name, template_code, template_param=None):
smsRequest = SendSmsRequest.SendSmsRequest()
# 申请的短信模板编码,必填
smsRequest.set_TemplateCode(template_code)
# 短信模板变量参数
if template_param is not None:
smsRequest.set_TemplateParam(template_param)
# 设置业务请求流水号,必填。
smsRequest.set_OutId(business_id)
# 短信签名
smsRequest.set_SignName(sign_name);
# 短信发送的号码列表,必填。
smsRequest.set_PhoneNumbers(phone_numbers)
# 调用短信发送接口,返回json
smsResponse = acs_client.do_action_with_exception(smsRequest)
# TODO 业务处理
return smsResponse
def query_send_detail(biz_id, phone_number, page_size, current_page, send_date):
queryRequest = QuerySendDetailsRequest.QuerySendDetailsRequest()
# 查询的手机号码
queryRequest.set_PhoneNumber(phone_number)
# 可选 - 流水号
queryRequest.set_BizId(biz_id)
# 必填 - 发送日期 支持30天内记录查询,格式yyyyMMdd
queryRequest.set_SendDate(send_date)
# 必填-当前页码从1开始计数
queryRequest.set_CurrentPage(current_page)
# 必填-页大小
queryRequest.set_PageSize(page_size)
# 调用短信记录查询接口,返回json
queryResponse = acs_client.do_action_with_exception(queryRequest)
# TODO 业务处理
return queryResponse
#__name__ = 'send'
if __name__ == 'send':
__business_id = uuid.uuid1()
print __business_id
params = "{\"code\":\"12345\",\"product\":\"云通信\"}"
params = "{\"nm\":\"东方既白\",\"number\":\"1234567\",\"in\":\"4.15\",\"cur\":\"8.15\",\"prate\":\"98%\"}"
print send_sms(__business_id, "13564511106", "XK咨询", "SMS_94650115", params)
if __name__ == 'query':
print query_send_detail("1234567^8901234", "13000000000", 10, 1, "20170612")
| [
"cuiyuanyuan@dmtec.cn"
] | cuiyuanyuan@dmtec.cn |
af733af30147e5b924a0499c3228d7c2c90c1183 | 2ad6564f703f65cf3f21c515793430aea249bcb8 | /source/FnAssetAPI/Entity.py | 88ce02d380800e3c5419c79a47a1dd65c480b0af | [
"MIT"
] | permissive | IngenuityEngine/ftrack-connect-foundry | 43590821dd5c7aced1b4ee8c586f7df4c4a2750f | a0d5ba788e3dc5c1536ebe9740bcf4393e3f5e1d | refs/heads/master | 2021-08-30T23:22:23.251553 | 2017-12-19T20:21:27 | 2017-12-19T20:21:27 | 114,193,460 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29,943 | py | from .core.decorators import debugApiCall
from .audit import auditApiCall
from .import constants
__all__ = ['Entity']
class Entity(object):
"""
The Entity is a @ref Host facing convenience class that holds an @ref
entity_reference and a @ref Manager instance. It wraps the @ref
ManagerInterfaceBase to make integration of the asset API into a host much more
straight forward.
Once created, the Entity becomes a self-contained representative for the
asset in the @ref asset_management_system.
@note In all cases, either the @ref Entity or @ref Manager
should be used in implementation code. The @ref ManagerInterfaceBase should
never be used directly.
Most functions simply wrap the ManagerInterfaceBase, so see the docs there for
more on their behaviour.
@see python.implementation.ManagerInterfaceBase
"""
def __init__(self, reference, manager):
if isinstance(manager, str):
## If we have a current session, use this to try and get the required
## manager @todo This needs cleaning up I think.
from SessionManager import SessionManager
session = SessionManager.currentSession()
if session:
manager = session._factory.instantiate(manager)
else:
manager = None
if not manager:
raise RuntimeError("Entity constructed with no Manager, or invalid "+
"identifier")
self.__reference = reference
self.__manager = manager
self.__interface = manager._getInterface()
# This can be set to false, to disable API debugging at the per-class level
self._debugCalls = True
def __eq__(self, other):
if self.reference != other.reference: return False
if self.manager != other.manager: return False
return True
@auditApiCall("Entity methods")
def __str__(self):
return self.reference
def __repr__(self):
managerId = self.__interface.getIdentifier() if self.__interface else None
return "Entity(%r, %r)" % (self.reference, managerId)
## @name Properties
## These are read-only for entities
## @{
def __getReference(self):
"""
@return str, the ASCII @ref entity_reference that locates the Entity.
"""
return self.__reference
reference = property(__getReference)
def __getManager(self):
"""
@return object, The @ref Manager that maintains the Entity.
"""
return self.__manager
manager = property(__getManager)
## @}
##
# @name Entity Resolution
#
# The concept of resolution is turning an @ref Entity into a 'finalized' or
# 'primary' string. This, ultimately, is anything meaningful to the
# situation. It could be a colour space, a directory, a script or image
# sequence. A rule of thumb is that a resolved @ref Entity should be the
# string the Host would have had anyway, in a unmanaged environment. For some
# kind of Entity - such as a 'Shot', for example, there may not be a
# meaningful string, though often some sensible return can be made. In these
# cases its generally unlikely that you would be resolving the Entity in the
# first place.
#
# @{
@debugApiCall
@auditApiCall("Entity methods")
def resolve(self, context):
"""
Returns the primary string held by the Entity. In general, any
substitutions tokens - such as frame numbers, views, etc... remain intact
and need handling as if the Asset API was never there..
The API defines that all file paths passed though the API that represent
file sequences should use the 'format' syntax, compatible with sprintf,
etc... (eg. %04d").
@return str, The UTF-8 ASCII compatible string that that is represented by
the Entity.
@exception python.exceptions.InvalidEntityReference If the @ref Entity is
not known by the associated \ref Manager.
@exception python.exceptions.EntityResolutionError If the @ref Entity does
not have a meaningful string representation, or if it is a valid Entity but
it does not logically exist in a way required to resolve.
@exception python.exceptions.InvalidEntityReference if the \ref Entity
should not be resolved for that context, for example, if the context access
is kWrite and the entity is an existing version - the exception means that
it is not a valid action to perform on the entity.
"""
return self.__manager.resolveEntityReference(self.__reference, context)
## @}
##
# @name Entity information
#
# There are several common requests for basic, generic information about
# an Entity that is assumed to be valid for all entity types.
#
# @see @ref metadata
#
# @{
@debugApiCall
@auditApiCall("Entity methods")
def exists(self, context):
"""
Can be called to determine if the Entity exists in the @ref
asset_management_system, and that it can be resolved into a meaningful
string. Managers may return perfectly valid \ref entity_references or
Entities that don't exist *yet* (maybe a new version, for example). By
'Exist' we mean 'is ready to be read'.
In the future, this may need to be extended to cover a more complex
definition of 'existence' (for example, known to the system, but not yet
finalized). For now however, it should be assumed to simply mean, 'ready to
be consumed', and if only a placeholder or un-finalized asset is available,
False should be returned.
It's important to properly configure the supplied context as the access
pattern and locale may well disambiguating this subtle definition of
'exists' in some cases too, as it better explains the intent.
@return bool, True if it points to an existing entity, False if the Entity
is not known or ready yet.
@exception python.exceptions.InvalidEntityReference If the Entity does not
hold a valid entity reference.
"""
return self.__interface.entityExists(self.__reference, context)
@debugApiCall
@auditApiCall("Entity methods")
def getName(self, context=None):
"""
Returns the name of the Entity itself, not including any hierarchy or
classification.
For example:
@li `"1"` - for a version of an asset
@li `"seq003"` - for a sequence in a hierarchy
@return str, A UTF-8 ASCII string with the Entity's name
@exception python.exceptions.InvalidEntityReference If the Entity is not
recognised by the Manager
@see getDisplayName
"""
return self.__interface.getEntityName(self.__reference, context)
@debugApiCall
@auditApiCall("Entity methods")
def getDisplayName(self, context=None):
"""
Returns an unambiguous, humanised display name for the Entity that can
uniquely identify the entity in that context.
@note It's important to properly configure the Context - some Managers may
give you much more meaningful and readable strings if they know the locale
that you are wanting to use the result in.
For example:
@li `"dive / build / cuttlefish / model / v1"` - for a version of an
asset in an 'open recent' menu.
@li `"Sequence 003 [ Dive / Episode 1 ]"` - for a sequence in
an hierarchy as a window title.
@return str, a UTF-8 ASCII string
@exception python.exceptions.InvalidEntityReference If the Entity is not
recognised by the Manager
@see getName
"""
return self.__interface.getEntityDisplayName(self.__reference, context)
@debugApiCall
@auditApiCall("Entity methods")
def getMetadata(self, context):
"""
Retrieve @ref metadata for the Entity.
@warning See @ref setMetadata for important notes on metadata and its
role in the system.
@return dict, with the entities meta-data. Values will be P.O.D types, keys
will be UTF-8 ASCII strings.
@exception python.exceptions.InvalidEntityReference If the Entity is not
recognised by the Manager.
@see getMetadataEntry
@see setMetadata
@see setMetadataEntry
"""
return self.__interface.getEntityMetadata(self.__reference, context)
@debugApiCall
@auditApiCall("Entity methods")
def getMetadataEntry(self, key, context, throw=False, defaultValue=None):
"""
Returns the value for the specified metadata key.
@param key str, The key to look up
@param throw bool [False] if True, the method will call a KeyError if the
requested key does not exists. Otherwise, the defaultValue will be
returned.
@param defaultValue p.o.d If not None, this value will be returned in the
case of the specified key not being set for the entity.
@return p.o.d, The value for the specific key.
@exception python.exceptions.InvalidEntityReference If the Entity is not
recognised by the Manager.
@exception KeyError If no defaultValue is supplied, and the Entity has no
metadata for the specified key.
@see setMetadataEntry
@see getMetadata
@see setMetadata
"""
try:
return self.__interface.getEntityMetadataEntry(self.__reference, key, context)
except KeyError, e:
if throw:
raise e
else:
return defaultValue
@debugApiCall
@auditApiCall("Entity methods")
def setMetadata(self, data, context, merge=True):
"""
Sets an Entities metadata.
@param data dict, A dictionaty of metadata - string key types, p.o.d value
types.
@param merge bool, If true, then the Entity's existing metadata will be
merged with the new data (the new data taking precedence). If false,
its metadata will entirely replaced by the new data.
@note Mangers guarantee to faithfully round-trip any data stored in an
Entities Metadata. They may elect to internally bridge this into other
first-class concepts within their domain, but they must present the same
dictionary back when queried (unless it has been meaningfully modified in
the mean time).
If any value is 'None' it instructs that that key should be un-set on the
Entity.
@exception python.exceptions.InvalidEntityReference If the Entity is not
recognised by the Manager.
@exception ValueError if any of the metadata values are of an un-storable
type. Presently it is only required to store str, float, int, bool
@exception KeyError if any of the metadata keys are non-strings.
@see getMetadata
@see getMetadataEntry
@see setMetadataEntry
"""
return self.__interface.setEntityMetadata(self.__reference, data,
context, merge)
@debugApiCall
@auditApiCall("Entity methods")
def setMetadataEntry(self, key, value, context):
"""
Stores a single metadata value under the supplied key.
@param value p.o.d, the Value must be a bool, float, int or str
@see getMetadataEntry
@see getMetadata
@see setMetadata
"""
return self.__interface.setEntityMetadataEntry(self.__reference, key, value, context)
## @}
##
# @name Versioning
#
# Most Managers allow multiple revisions of certain entities to be tracked
# simultaneously. This API exposes this as a generalised concept, in order to
# avoid Exceptions, you should take care to only query versioning where it's
# meaningful to the type of Entity.
#
# @{
@debugApiCall
@auditApiCall("Entity methods")
def getVersionName(self, context=None):
"""
Retrieves the name of the version pointed to by the supplied Entity
@return str, A UTF-8 ASCII string representing the version or an empty
string if the entity was not versioned.
@exception python.exceptions.InvalidEntityReference If the Entity is not
recognised by the Manager.
@see getVersions()
@see getFinalizedVersion()
"""
return self.__interface.getEntityVersionName(self.__reference, context)
@debugApiCall
@auditApiCall("Entity methods")
def getVersions(self, context, includeMetaVersions=False, maxResults=-1,
asRefs=False, asList=False):
"""
Retrieves all available versions of the Entity (including this Entity, if
it points to a specific version).
@param includeMetaVersions bool, if true, @ref meta_versions such as
'latest', etc... should be included, otherwise, only concrete versions
will be retrieved.
@param maxResults int, Limits the number of results collected, if more
results are available than the limit, then the newest versions will be
returned. If a value of -1 is used, then all results will be returned.
@return dict, Where the keys are ASCII string versions, and the values are
Entities. Additionally the python.constants.kVersionDict_OrderKey may be
set to a list of the version names (ie: dict keys) in their natural
ascending order, that may be used by UI elements, etc...
@exception python.exceptions.InvalidEntityReference If the Entity is not
recognised by the Manager.
@param asRefs bool [False] If True, then return will contain
\ref entity_reference "Entity References" rather than \ref Entity
instances.
@param asList bool [False] If True, then the return will be a list, rather
than the standard dictionary.
@see getVersionName()
@see getFinalizedVersion()
"""
versions = self.__interface.getEntityVersions(self.__reference, context,
includeMetaVersions, maxResults)
if not asRefs:
versions = dict( (v, Entity(r, self.__manager)) for (v, r) in versions.items() )
if not asList:
return versions
hint = versions.get(constants.kVersionDict_OrderKey, None)
if hint:
return [ versions[v] for v in hint ]
else:
return [ versions[v] for v in sorted(versions.keys()) ]
@debugApiCall
@auditApiCall("Entity methods")
def getFinalizedVersion(self, context, overrideVersionName=None, asRef=False):
"""
Retrieves a @ref entity_reference that points to the concrete version
of a @ref meta-version @ref entity_reference.
If the supplied entity reference is not versioned, or already has a
concrete version, the input reference is passed-through.
If versioning is unsupported for the given @ref entity_reference, then the
input reference is returned.
@param overrideVersionName str If supplied, then the call should return the
Entity for the version of the referenced asset that matches the
name specified here, ignoring any version inferred by this Entity.
@param asRef bool [False] if True, the return will be an \ref
entity_reference instead of a \ref Entity.
@return python.Entity.Entity or None
@exception python.exceptions.InvalidEntityReference If the Entity is not
recognised by the Manager.
@exception python.exceptions.EntityResolutionError should be thrown if the
Entity is ambiguously versioned (for example if the version is
missing as it points to the parent 'asset', and that behaviour is
undefined in the Manager's model. It may be that it makes sense in
the specific Manager to fall back on 'latest' in this case...)
@exception python.exception.EntityResolutionError if the supplied
overrideVersionName does not exist for the Entity.
@see getVersionName()
@see getVersions()
"""
ref = self.__interface.getFinalizedEntityVersion(self.__reference,
context, overrideVersionName)
if asRef:
return ref
else:
return Entity(ref, self.__manager) if ref else None
## @}
##
# @name Related Entities
#
# A 'related' Entity could take many forms. For example:
#
# @li In 3D CGI, Multiple @ref aovs may be related to a 'beauty' render.
# @li In Compositing, an image sequence may be related to the script
# that created it.
# @li An asset may be related to a task that specifies work to be done.
# @li Parent/child relationships are also (semantically) covered by
# these relationships.
#
# In the this API, these relationships are represented by a generic
# Specification, this may just be a 'type', but can additionally have
# arbitrary attributes to further define the relationship. For example in
# the case of @ref aovs, the type might be 'alternate output' and the
# attributes may be that the 'channel' is 'diffuse'.
#
# Related references form a vital part in the abstraction of the internal
# structure of the asset management system from the Host application in its
# attempts to provide the user with meaningful functionality. A good example
# of this is in an editorial example, where it may need to query whether a
# 'shot' exists in a certain part of the asset system. One approach would be
# to use a 'getChildren' call, on this part of the system. This has the
# drawback that is assumes that shots are always something that can be
# described as 'immediate children' of the location in question. This lay not
# always be the case (say, for example there is some kind of 'task' structure
# in place too). Instead we use a request that asks for any 'shots' that
# relate to the chosen location. It is then up to the implementation of the
# ManagerInterfaceBase to determine how that maps to its own data model.
# Hopefully this allows Hosts of this API to work with a broader range of
# asset managements, without providing any requirements of their structure or
# data model.
#
# @{
@debugApiCall
@auditApiCall("Entity methods")
def getRelatedEntities(self, relationshipSpecOrSpecs, context, asRefs=False,
resultSpec=None):
"""
Returns related Entites, based on a relationship specification.
This is an essential function in this API - as it is widely used to query
organisational hierarchy, and other interesting relationships.
There are two possible conventions for calling this function, to allow
for batch optimisations in the implementation and prevent excessive query
times with high-latency services.
a) A single specification.
b) A list of specifications.
In both cases, the return value is a list of lists, for example:
a) getRelatedEntites( spec )
> [ [ matches, ... ] ]
b) getRelatedEntites( [ s1, s2, s3 ] )
> [ [ s1-matches, ... ], [ s2-matches, ... ], [ s3-matches, ... ] ]
@note The order of entities in the inner lists of matching results should
not be considered meaningful, but the outer list will match the input
order.
If any specification is not understood by the Manager, then an empty list
will be returned for that Specificaion, and no errors should be raised.
@param relationshipSpecOrSpecs python.specification.Specification This can
either be a standard EntitySpecification, which will mean 'find me Entities
that match this spec in relation to me'. Or, if a RelationshipSpecification
is supplied, then more complex queries can be made.
@param asRefs bool [False] if True, then the return list of lists will
contain \ref entity_reference instead of Entity instances.
@param resultSpec python.specifications.EntitySpecification or None, a hint
as to what kind of entity your are expecting to be returned. May be
None.
@return list of Entity lists, The return is always a list of lists,
regardless of how many specs are passed in.
@exception python.exceptions.InvalidEntityReference If the Entity is not
recognised by the Manager.
@see python.specifications
@see setRelatedReferences()
"""
entities = self.__manager.getRelatedEntities( [self.__reference,],
relationshipSpecOrSpecs, context, resultSpec=resultSpec, asRefs=asRefs)
return entities
@debugApiCall
@auditApiCall("Entity methods")
def setRelatedEntities(self, relationshipSpec, entities, context, append=True):
"""
Creates a new relationship between this Entities and the other supplied
Entities.
@param append bool, When True (default) new relationships will be added to
any existing ones. If False, then any existing relationships with the
supplied specification will first be removed.
Though a Manager is required to support getRelatedEntities, there is some
asymetry here, as it is not required to be able to setRelatedReferences
directly. For example, in the case of a 'shot' (as illustrated in the docs
for getRelatedEntites) - any new shots would be created by registering a
new @ref python.specifications.ShotSpecification under the parent, rather
than using this call. The best way to think of it is that this call is
reserved for defining relationships between existing assets (Such as
connecting multiple image sequences published under the same shot, as being
part of the same render.) and 'register' as being defining the relationship
between a new asset and some existing one.
In systems that don't support post-creation adjustment of relationships,
this may simply be a no-op.
@exception python.exceptions.InvalidEntityReference If the Entity is not
recognised by the Manager.
@return None
@see @ref getRelatedEntities()
@see @ref register()
"""
references = [e.reference for e in entities]
return self.__interface.setRelatedReferences( self.__reference,
relationshipSpec, references, context, append)
## @}
##
# @name Publishing
#
#
# Certain Managers may have high latencies due to cloud hosting, or some such
# other fun and games. In order to attempt to improve performance in these
# situations, the API provides 'batch' alternatives to some of the well-used
# calls. These are suffixed 'multiple'. One point to consider here is that
# because Contexts can't be copied there is a slightly reduced scoped for
# informing the Manager of the locale/etc... as a single context must be used
# for all grouped actions.
#
# @{
@debugApiCall
@auditApiCall("Entity methods")
def preflight(self, spec, context):
"""
@note This call is only applicable when the Manager you are communicating with
sets the @ref python.constants.kWillManagePath bit in response to a @ref
python.Manager.Manager.managementPolicy for the Specification of Entity you
are intending to publish.
It signals your intent as a Host application to do some work to create a
file in relation to this Entity. This Entity does not need to exist yet
(see @ref entity_reference) or it may be a parent Entity that you are about
to create a child of or some other similar relationship (it actually
doesn't matter really, as this Entity will ultimately have been determined
by interaction with the Manager, and it will have returned you something
meaningful).
It should be called before register() if you are about to create media or
write to files. If the file or data already exists, then preflight is not
needed. It will return a working Entity that can be resolved/etc... in
order to determine a working path that the files should be written to.
This call is designed to allow sanity checking, placeholder creation or any
other sundry preparatory actions to be carried out by the Manager. In the
case of file-based Entites, the Manage may even use this opportunity to
switch to some temporary working path or some such.
\note Its vital that the \ref Context is well configured here, in
particular the 'ref python.Context.Context.retention "Context.retention".
See @ref examples_save, but the importance of using the working Entity,
rather than the initial Entity is essential to proper behaviour.
@return python.Entity.Entity or None, A working @ref Entity, that the you
should resolve to determine the path to write media too. This may or may
not be the same as the input reference. It should be resolved to get a
working file path before writing any files.
@exception python.exceptions.InvalidEntityReference If the Entity is not
recognised by the Manager.
@exception python.exceptions.PreflightError if some fatal exception happens
during preflight, this Exception indicates the process should be aborted.
@exception python.exceptions.RetryableError If any non-fatal error occurs
that means the call can be re-tried.
@see preflightMultiple
@see register
@see registerMultiple
"""
entityRef = self.__manager.preflight(self.__reference, spec, context)
return Entity(entityRef, self.__manager) if entityRef else None
@debugApiCall
@auditApiCall("Entity methods")
def preflightMultiple(self, specs, context):
"""
A batch version of preflight, taking an array of specs, instead of a single
Specification, and returning an array of Entities.
@note It is advisable to only call this if the Manager has set the
kSupportsBatchOperations bit in the managementPolicy bitfield for the
applicable EntitySpecification.
"""
targetRefs = [ self.__reference for s in specs ]
entityRefs = self.__manager.preflightMultiple(targetRefs, specs, context)
return [ Entity(e, self.__manager) if e else None for e in entityRefs ]
@debugApiCall
@auditApiCall("Entity methods")
def register(self, stringData, spec, context, metadata=None):
"""
Register should be used to register a new Entity either when originating new
data within the application process, or referencing some existing file,
media or information.
@note The registration call is applicable to all kinds of Manager, as long
as the @ref python.constants.kIgnored bit is not set in response to a @ref
python.Manager.Manager.managementPolicy for the Specification of Entity you
are intending to publish. In this case, the Manager is saying it doesn't
handle that Specification of Entity, and it should not be registered.
As the Entity has (ultimately) come from the Manager (either in response to
delegation of UI/etc... or as a return from another call), then it can be
assumed that the Manager will understand what it means for you to call
'register' on this Entity with the supplied Specification. The conceptual
meaning of the call is:
"I have this Entity (self in this case), and I would like to register a new
Entity to it with this Specification, to hold the supplied stringData. I
trust that this is ok, and you will give me back the Entity that represents
the result of this."
It is up to the manager to understand the correct result for the particular
Specification in relation to this Entity. For example, if you received this
Entity in response to browsing for a target to 'kWriteMultiple'
ShotSpecifications, then the Manager should have returned you an Entity that
you can then call register() on multiple times with a ShotSpecification
without error. Each resulting Entity should then reference the newly created
Shot.
@warning When registering files, it should never be assumed that the
resulting Entity will resolve to the same path. Managers may freely
relocate, copy move or rename files as part of registration.
@param stringData str, The @ref primary_string for this Entity. It is the
string the resulting Entity will resolve to. In the case of file-based
Entites, this is the file path, and may be further modified by Managers
that take care of relocating or managing the storage of files. The API
defines that in the case of paths representing file sequences, frame tokens
should be left un-subsituted, in a sprintf compatible format, eg. "%04d",
rather than say, the #### based method. If your application uses hashes, or
some other scheme, it should be converted to/from the sprintf format as
part of your integration.
@param spec python.specifications.Specfication the EntitySpecification for
the new registration.
@see python.specifications
@see registerMultiple
@see preflight
@see preflightMultiple
"""
entityRef = self.__manager.register(stringData, self.__reference, spec,
context, metadata=metadata)
return Entity(entityRef, self.__manager) if entityRef else None
@debugApiCall
@auditApiCall("Entity methods")
def registerMultiple(self, strings, specs, context):
"""
A batch version of register - taking equal length arrays of strings and
specs, returning a list of Entities
@note It is advisable to only call this if the Manager has set the
kSupportsBatchOperations bit in the managementPolicy bitfield for the
applicable EntitySpecification.
"""
targetRefs = [ self.__reference for s in strings ]
entityRefs = self.__manager.registerMultiple(strings, targetRefs, specs, context)
return [ Entity(e, self.__manager) if e else None for e in entityRefs ]
@debugApiCall
@auditApiCall("Entity methods")
def preflightItem(self, item, context):
"""
An alternate form of preflight that takes an Item derived class and takes
advantage of its toSpecfication() methods to make the standard preflight
call.
Return type and exceptions as per \ref preflight.
@see preflight
@see registerItem
"""
entityRef = self.__manager.preflight(self.__reference, item.toSpecification(), context)
return Entity(entityRef, self.__manager) if entityRef else None
@debugApiCall
@auditApiCall("Entity methods")
def registerItem(self, item, context):
"""
An alternate form of register that takes an Item derived class and takes
advantage of its getString() and toSpecfication() methods to make the
standard preflight call.
Return type and exceptions as per \ref register.
@see register
@see preflightItem
"""
# Manager implements the metadata extended signature that takes care of
# calling setMetadata.
entityRef = self.__manager.register(item.getString(), self.__reference,
item.toSpecification(), context, item.toMetadata())
return Entity(entityRef, self.__manager) if entityRef else None
## @}
| [
"dev@thesociable.net"
] | dev@thesociable.net |
1119b7cb0f9c85f7fa3a4421a3c00101eb810077 | 1595b644191c9c18a5503379703347a853b63348 | /investpy/stocks.py | bf4c06d8a6d8d0ebe499d46de5747c6612ac4bfb | [
"MIT"
] | permissive | ben-haim/investpy | d26c05c3e957d3ba623f408076746edbf5a8107b | 7ace4ac7693f505c199074de3333f56e6b89cfef | refs/heads/master | 2022-05-30T00:43:00.473082 | 2019-11-20T15:45:37 | 2019-11-20T15:45:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 46,164 | py | #!/usr/bin/python3
# Copyright 2018-2019 Alvaro Bartolome @ alvarob96 in GitHub
# See LICENSE for details.
from datetime import datetime, date
import json
from random import randint
import pandas as pd
import pkg_resources
import requests
import unidecode
from lxml.html import fromstring
from investpy.utils.user_agent import get_random
from investpy.utils.data import Data
from investpy.data.stocks_data import stocks_as_df, stocks_as_list, stocks_as_dict
from investpy.data.stocks_data import stock_countries_as_list
def get_stocks(country=None):
"""
This function retrieves all the stock data stored in `stocks.csv` file, which previously was
retrieved from Investing.com. Since the resulting object is a matrix of data, the stock data is properly
structured in rows and columns, where columns are the stock data attribute names. Additionally, country
filtering can be specified, which will make this function return not all the stored stock data, but just
the stock data of the stocks from the introduced country.
Args:
country (:obj:`str`, optional): name of the country to retrieve all its available stocks from.
Returns:
:obj:`pandas.DataFrame` - stocks_df:
The resulting :obj:`pandas.DataFrame` contains all the stock data from the introduced country if specified,
or from every country if None was specified, as indexed in Investing.com from the information previously
retrieved by investpy and stored on a csv file.
So on, the resulting :obj:`pandas.DataFrame` will look like::
country | name | full name | isin | currency | symbol
--------|------|-----------|------|----------|--------
xxxxxxx | xxxx | xxxxxxxxx | xxxx | xxxxxxxx | xxxxxx
Raises:
ValueError: raised whenever any of the introduced arguments is not valid.
FileNotFoundError: raised if stocks file was not found.
IOError: raised when stocks file is missing or empty.
"""
return stocks_as_df(country)
def get_stocks_list(country=None):
"""
This function retrieves all the stock symbols stored in `stocks.csv` file, which contains all the
data from the stocks as previously retrieved from Investing.com. So on, this function will just return
the stock symbols which will be one of the input parameters when it comes to stock data retrieval functions
from investpy. Additionally, note that the country filtering can be applied, which is really useful since
this function just returns the symbols and in stock data retrieval functions both the symbol and the country
must be specified and they must match.
Args:
country (:obj:`str`, optional): name of the country to retrieve all its available stocks from.
Returns:
:obj:`list` - stocks_list:
The resulting :obj:`list` contains the all the stock symbols from the introduced country if specified,
or from every country if None was specified, as indexed in Investing.com from the information previously
retrieved by investpy and stored on a csv file.
In case the information was successfully retrieved, the :obj:`list` of stock symbols will look like::
stocks_list = ['TS', 'APBR', 'GGAL', 'TXAR', 'PAMP', ...]
Raises:
ValueError: raised whenever any of the introduced arguments is not valid.
FileNotFoundError: raised if stocks file was not found.
IOError: raised when stocks file is missing or empty.
"""
return stocks_as_list(country)
def get_stocks_dict(country=None, columns=None, as_json=False):
"""
This function retrieves all the stock information stored in the `stocks.csv` file and formats it as a
Python dictionary which contains the same information as the file, but every row is a :obj:`dict` and
all of them are contained in a :obj:`list`. Note that the dictionary structure is the same one as the
JSON structure. Some optional paramaters can be specified such as the country, columns or as_json, which
are a filtering by country so not to return all the stocks but just the ones from the introduced country,
the column names that want to be retrieved in case of needing just some columns to avoid unnecessary information
load, and whether the information wants to be returned as a JSON object or as a dictionary; respectively.
Args:
country (:obj:`str`, optional): name of the country to retrieve all its available stocks from.
columns (:obj:`list`, optional):column names of the stock data to retrieve, can be: <country, name, full_name, isin, currency, symbol>
as_json (:obj:`bool`, optional): if True the returned data will be a :obj:`json` object, if False, a :obj:`list` of :obj:`dict`.
Returns:
:obj:`list` of :obj:`dict` OR :obj:`json` - stocks_dict:
The resulting :obj:`list` of :obj:`dict` contains the retrieved data from every stock as indexed in Investing.com from
the information previously retrieved by investpy and stored on a csv file.
In case the information was successfully retrieved, the :obj:`list` of :obj:`dict` will look like::
stocks_dict = {
'country': country,
'name': name,
'full_name': full_name,
'tag': tag,
'isin': isin,
'id': id,
'currency': currency,
'symbol': symbol,
}
Raises:
ValueError: raised whenever any of the introduced arguments is not valid.
FileNotFoundError: raised if stocks file was not found.
IOError: raised when stocks file is missing or empty.
"""
return stocks_as_dict(country=country, columns=columns, as_json=as_json)
def get_stock_countries():
"""
This function returns a listing with all the available countries from where stocks can be retrieved, so to
let the user know which of them are available, since the parameter country is mandatory in every stock retrieval
function. Also, not just the available countries, but the required name is provided since Investing.com has a
certain country name standard and countries should be specified the same way they are in Investing.com.
Returns:
:obj:`list` - countries:
The resulting :obj:`list` contains all the available countries with stocks as indexed in Investing.com
Raises:
FileNotFoundError: raised if stock countries file was not found.
IOError: raised when stock countries file is missing or empty.
"""
return stock_countries_as_list()
def get_stock_recent_data(stock, country, as_json=False, order='ascending', interval='Daily'):
"""
This function retrieves recent historical data from the introduced stock from Investing.com. So on, the recent data
of the introduced stock from the specified country will be retrieved and returned as a :obj:`pandas.DataFrame` if
the parameters are valid and the request to Investing.com succeeds. Note that additionally some optional parameters
can be specified: as_json and order, which let the user decide if the data is going to be returned as a
:obj:`json` or not, and if the historical data is going to be ordered ascending or descending (where the index is the
date), respectively.
Args:
stock (:obj:`str`): symbol of the stock to retrieve recent historical data from.
country (:obj:`str`): name of the country from where the stock is.
as_json (:obj:`bool`, optional):
to determine the format of the output data, either a :obj:`pandas.DataFrame` if False and a :obj:`json` if True.
order (:obj:`str`, optional): to define the order of the retrieved data which can either be ascending or descending.
interval (:obj:`str`, optional):
value to define the historical data interval to retrieve, by default `Daily`, but it can also be `Weekly` or `Monthly`.
Returns:
:obj:`pandas.DataFrame` or :obj:`json`:
The function can return either a :obj:`pandas.DataFrame` or a :obj:`json` object, containing the retrieved
recent data of the specified stock from the specified country. So on, the resulting dataframe contains the
open, high, low, close and volume values for the selected stock on market days and the currency in which those
values are presented.
The resulting recent data, in case that the default parameters were applied, will look like::
Date || Open | High | Low | Close | Volume | Currency
-----||------|------|-----|-------|--------|----------
xxxx || xxxx | xxxx | xxx | xxxxx | xxxxxx | xxxxxxxx
but in case that as_json parameter was defined as True, then the output will be::
{
name: name,
recent: [
{
date: 'dd/mm/yyyy',
open: x,
high: x,
low: x,
close: x,
volume: x,
currency: x
},
...
]
}
Raises:
ValueError: raised whenever any of the introduced arguments is not valid or errored.
IOError: raised if stocks object/file was not found or unable to retrieve.
RuntimeError: raised if the introduced stock/country was not found or did not match any of the existing ones.
ConnectionError: raised if connection to Investing.com could not be established.
IndexError: raised if stock recent data was unavailable or not found in Investing.com.
Examples:
>>> investpy.get_stock_recent_data(stock='bbva', country='spain')
Open High Low Close Volume Currency
Date
2019-08-13 4.263 4.395 4.230 4.353 27250000 EUR
2019-08-14 4.322 4.325 4.215 4.244 36890000 EUR
2019-08-15 4.281 4.298 4.187 4.234 21340000 EUR
2019-08-16 4.234 4.375 4.208 4.365 46080000 EUR
2019-08-19 4.396 4.425 4.269 4.269 18950000 EUR
"""
if not stock:
raise ValueError("ERR#0013: stock parameter is mandatory and must be a valid stock name.")
if not isinstance(stock, str):
raise ValueError("ERR#0027: stock argument needs to be a str.")
if country is None:
raise ValueError("ERR#0039: country can not be None, it should be a str.")
if country is not None and not isinstance(country, str):
raise ValueError("ERR#0025: specified country value not valid.")
if not isinstance(as_json, bool):
raise ValueError("ERR#0002: as_json argument can just be True or False, bool type.")
if order not in ['ascending', 'asc', 'descending', 'desc']:
raise ValueError("ERR#0003: order argument can just be ascending (asc) or descending (desc), str type.")
if not interval:
raise ValueError("ERR#0073: interval value should be a str type and it can just be either 'Daily', 'Weekly' or 'Monthly'.")
if not isinstance(interval, str):
raise ValueError("ERR#0073: interval value should be a str type and it can just be either 'Daily', 'Weekly' or 'Monthly'.")
if interval not in ['Daily', 'Weekly', 'Monthly']:
raise ValueError("ERR#0073: interval value should be a str type and it can just be either 'Daily', 'Weekly' or 'Monthly'.")
resource_package = 'investpy'
resource_path = '/'.join(('resources', 'stocks', 'stocks.csv'))
if pkg_resources.resource_exists(resource_package, resource_path):
stocks = pd.read_csv(pkg_resources.resource_filename(resource_package, resource_path))
else:
raise FileNotFoundError("ERR#0056: stocks file not found or errored.")
if stocks is None:
raise IOError("ERR#0001: stocks object not found or unable to retrieve.")
if unidecode.unidecode(country.lower()) not in get_stock_countries():
raise RuntimeError("ERR#0034: country " + country.lower() + " not found, check if it is correct.")
stocks = stocks[stocks['country'] == unidecode.unidecode(country.lower())]
stock = stock.strip()
stock = stock.lower()
if unidecode.unidecode(stock) not in [unidecode.unidecode(value.lower()) for value in stocks['symbol'].tolist()]:
raise RuntimeError("ERR#0018: stock " + stock + " not found, check if it is correct.")
symbol = stocks.loc[(stocks['symbol'].str.lower() == stock).idxmax(), 'symbol']
id_ = stocks.loc[(stocks['symbol'].str.lower() == stock).idxmax(), 'id']
name = stocks.loc[(stocks['symbol'].str.lower() == stock).idxmax(), 'name']
stock_currency = stocks.loc[(stocks['symbol'].str.lower() == stock).idxmax(), 'currency']
header = symbol + ' Historical Data'
params = {
"curr_id": id_,
"smlID": str(randint(1000000, 99999999)),
"header": header,
"interval_sec": interval,
"sort_col": "date",
"sort_ord": "DESC",
"action": "historical_data"
}
head = {
"User-Agent": get_random(),
"X-Requested-With": "XMLHttpRequest",
"Accept": "text/html",
"Accept-Encoding": "gzip, deflate, br",
"Connection": "keep-alive",
}
url = "https://www.investing.com/instruments/HistoricalDataAjax"
req = requests.post(url, headers=head, data=params)
if req.status_code != 200:
raise ConnectionError("ERR#0015: error " + str(req.status_code) + ", try again later.")
root_ = fromstring(req.text)
path_ = root_.xpath(".//table[@id='curr_table']/tbody/tr")
result = list()
if path_:
for elements_ in path_:
if elements_.xpath(".//td")[0].text_content() == 'No results found':
raise IndexError("ERR#0007: stock information unavailable or not found.")
info = []
for nested_ in elements_.xpath(".//td"):
info.append(nested_.get('data-real-value'))
stock_date = datetime.fromtimestamp(int(info[0]))
stock_date = date(stock_date.year, stock_date.month, stock_date.day)
stock_close = float(info[1].replace(',', ''))
stock_open = float(info[2].replace(',', ''))
stock_high = float(info[3].replace(',', ''))
stock_low = float(info[4].replace(',', ''))
stock_volume = 0
if info[5].__contains__('K'):
stock_volume = int(float(info[5].replace('K', '').replace(',', '')) * 1e3)
elif info[5].__contains__('M'):
stock_volume = int(float(info[5].replace('M', '').replace(',', '')) * 1e6)
elif info[5].__contains__('B'):
stock_volume = int(float(info[5].replace('B', '').replace(',', '')) * 1e9)
result.insert(len(result),
Data(stock_date, stock_open, stock_high, stock_low,
stock_close, stock_volume, stock_currency))
if order in ['ascending', 'asc']:
result = result[::-1]
elif order in ['descending', 'desc']:
result = result
if as_json is True:
json_ = {
'name': name,
'recent':
[value.stock_as_json() for value in result]
}
return json.dumps(json_, sort_keys=False)
elif as_json is False:
df = pd.DataFrame.from_records([value.stock_to_dict() for value in result])
df.set_index('Date', inplace=True)
return df
else:
raise RuntimeError("ERR#0004: data retrieval error while scraping.")
def get_stock_historical_data(stock, country, from_date, to_date, as_json=False, order='ascending', interval='Daily'):
"""
This function retrieves historical data from the introduced stock from Investing.com. So on, the historical data
of the introduced stock from the specified country in the specified data range will be retrieved and returned as
a :obj:`pandas.DataFrame` if the parameters are valid and the request to Investing.com succeeds. Note that additionally
some optional parameters can be specified: as_json and order, which let the user decide if the data is going to
be returned as a :obj:`json` or not, and if the historical data is going to be ordered ascending or descending (where the
index is the date), respectively.
Args:
stock (:obj:`str`): symbol of the stock to retrieve historical data from.
country (:obj:`str`): name of the country from where the stock is.
from_date (:obj:`str`): date formatted as `dd/mm/yyyy`, since when data is going to be retrieved.
to_date (:obj:`str`): date formatted as `dd/mm/yyyy`, until when data is going to be retrieved.
as_json (:obj:`bool`, optional):
to determine the format of the output data, either a :obj:`pandas.DataFrame` if False and a :obj:`json` if True.
order (:obj:`str`, optional): to define the order of the retrieved data which can either be ascending or descending.
interval (:obj:`str`, optional):
value to define the historical data interval to retrieve, by default `Daily`, but it can also be `Weekly` or `Monthly`.
Returns:
:obj:`pandas.DataFrame` or :obj:`json`:
The function can return either a :obj:`pandas.DataFrame` or a :obj:`json` object, containing the retrieved
historical data of the specified stock from the specified country. So on, the resulting dataframe contains the
open, high, low, close and volume values for the selected stock on market days and the currency in which those
values are presented.
The returned data is case we use default arguments will look like::
Date || Open | High | Low | Close | Volume | Currency
-----||------|------|-----|-------|--------|----------
xxxx || xxxx | xxxx | xxx | xxxxx | xxxxxx | xxxxxxxx
but if we define `as_json=True`, then the output will be::
{
name: name,
historical: [
{
date: 'dd/mm/yyyy',
open: x,
high: x,
low: x,
close: x,
volume: x,
currency: x
},
...
]
}
Raises:
ValueError: raised whenever any of the introduced arguments is not valid or errored.
IOError: raised if stocks object/file was not found or unable to retrieve.
RuntimeError: raised if the introduced stock/country was not found or did not match any of the existing ones.
ConnectionError: raised if connection to Investing.com could not be established.
IndexError: raised if stock historical data was unavailable or not found in Investing.com.
Examples:
>>> investpy.get_stock_historical_data(stock='bbva', country='spain', from_date='01/01/2010', to_date='01/01/2019')
Open High Low Close Volume Currency
Date
2010-01-04 12.73 12.96 12.73 12.96 0 EUR
2010-01-05 13.00 13.11 12.97 13.09 0 EUR
2010-01-06 13.03 13.17 13.02 13.12 0 EUR
2010-01-07 13.02 13.11 12.93 13.05 0 EUR
2010-01-08 13.12 13.22 13.04 13.18 0 EUR
"""
if not stock:
raise ValueError("ERR#0013: stock parameter is mandatory and must be a valid stock name.")
if not isinstance(stock, str):
raise ValueError("ERR#0027: stock argument needs to be a str.")
if country is None:
raise ValueError("ERR#0039: country can not be None, it should be a str.")
if country is not None and not isinstance(country, str):
raise ValueError("ERR#0025: specified country value not valid.")
if not isinstance(as_json, bool):
raise ValueError("ERR#0002: as_json argument can just be True or False, bool type.")
if order not in ['ascending', 'asc', 'descending', 'desc']:
raise ValueError("ERR#0003: order argument can just be ascending (asc) or descending (desc), str type.")
if not interval:
raise ValueError("ERR#0073: interval value should be a str type and it can just be either 'Daily', 'Weekly' or 'Monthly'.")
if not isinstance(interval, str):
raise ValueError("ERR#0073: interval value should be a str type and it can just be either 'Daily', 'Weekly' or 'Monthly'.")
if interval not in ['Daily', 'Weekly', 'Monthly']:
raise ValueError("ERR#0073: interval value should be a str type and it can just be either 'Daily', 'Weekly' or 'Monthly'.")
try:
datetime.strptime(from_date, '%d/%m/%Y')
except ValueError:
raise ValueError("ERR#0011: incorrect from_date date format, it should be 'dd/mm/yyyy'.")
try:
datetime.strptime(to_date, '%d/%m/%Y')
except ValueError:
raise ValueError("ERR#0012: incorrect to_date format, it should be 'dd/mm/yyyy'.")
start_date = datetime.strptime(from_date, '%d/%m/%Y')
end_date = datetime.strptime(to_date, '%d/%m/%Y')
if start_date >= end_date:
raise ValueError("ERR#0032: to_date should be greater than from_date, both formatted as 'dd/mm/yyyy'.")
date_interval = {
'intervals': [],
}
flag = True
while flag is True:
diff = end_date.year - start_date.year
if diff > 20:
obj = {
'start': start_date.strftime('%m/%d/%Y'),
'end': start_date.replace(year=start_date.year + 20).strftime('%m/%d/%Y'),
}
date_interval['intervals'].append(obj)
start_date = start_date.replace(year=start_date.year + 20)
else:
obj = {
'start': start_date.strftime('%m/%d/%Y'),
'end': end_date.strftime('%m/%d/%Y'),
}
date_interval['intervals'].append(obj)
flag = False
interval_limit = len(date_interval['intervals'])
interval_counter = 0
data_flag = False
resource_package = 'investpy'
resource_path = '/'.join(('resources', 'stocks', 'stocks.csv'))
if pkg_resources.resource_exists(resource_package, resource_path):
stocks = pd.read_csv(pkg_resources.resource_filename(resource_package, resource_path))
else:
raise FileNotFoundError("ERR#0056: stocks file not found or errored.")
if stocks is None:
raise IOError("ERR#0001: stocks object not found or unable to retrieve.")
if unidecode.unidecode(country.lower()) not in get_stock_countries():
raise RuntimeError("ERR#0034: country " + country.lower() + " not found, check if it is correct.")
stocks = stocks[stocks['country'] == unidecode.unidecode(country.lower())]
stock = stock.strip()
stock = stock.lower()
if unidecode.unidecode(stock) not in [unidecode.unidecode(value.lower()) for value in stocks['symbol'].tolist()]:
raise RuntimeError("ERR#0018: stock " + stock + " not found, check if it is correct.")
symbol = stocks.loc[(stocks['symbol'].str.lower() == stock).idxmax(), 'symbol']
id_ = stocks.loc[(stocks['symbol'].str.lower() == stock).idxmax(), 'id']
name = stocks.loc[(stocks['symbol'].str.lower() == stock).idxmax(), 'name']
stock_currency = stocks.loc[(stocks['symbol'].str.lower() == stock).idxmax(), 'currency']
final = list()
header = symbol + ' Historical Data'
for index in range(len(date_interval['intervals'])):
interval_counter += 1
params = {
"curr_id": id_,
"smlID": str(randint(1000000, 99999999)),
"header": header,
"st_date": date_interval['intervals'][index]['start'],
"end_date": date_interval['intervals'][index]['end'],
"interval_sec": interval,
"sort_col": "date",
"sort_ord": "DESC",
"action": "historical_data"
}
head = {
"User-Agent": get_random(),
"X-Requested-With": "XMLHttpRequest",
"Accept": "text/html",
"Accept-Encoding": "gzip, deflate, br",
"Connection": "keep-alive",
}
url = "https://www.investing.com/instruments/HistoricalDataAjax"
req = requests.post(url, headers=head, data=params)
if req.status_code != 200:
raise ConnectionError("ERR#0015: error " + str(req.status_code) + ", try again later.")
if not req.text:
continue
root_ = fromstring(req.text)
path_ = root_.xpath(".//table[@id='curr_table']/tbody/tr")
result = list()
if path_:
for elements_ in path_:
if elements_.xpath(".//td")[0].text_content() == 'No results found':
if interval_counter < interval_limit:
data_flag = False
else:
raise IndexError("ERR#0007: stock information unavailable or not found.")
else:
data_flag = True
info = []
for nested_ in elements_.xpath(".//td"):
info.append(nested_.get('data-real-value'))
if data_flag is True:
stock_date = datetime.fromtimestamp(int(info[0]))
stock_date = date(stock_date.year, stock_date.month, stock_date.day)
stock_close = float(info[1].replace(',', ''))
stock_open = float(info[2].replace(',', ''))
stock_high = float(info[3].replace(',', ''))
stock_low = float(info[4].replace(',', ''))
stock_volume = 0
if info[5].__contains__('K'):
stock_volume = int(float(info[5].replace('K', '').replace(',', '')) * 1e3)
elif info[5].__contains__('M'):
stock_volume = int(float(info[5].replace('M', '').replace(',', '')) * 1e6)
elif info[5].__contains__('B'):
stock_volume = int(float(info[5].replace('B', '').replace(',', '')) * 1e9)
result.insert(len(result),
Data(stock_date, stock_open, stock_high, stock_low,
stock_close, stock_volume, stock_currency))
if data_flag is True:
if order in ['ascending', 'asc']:
result = result[::-1]
elif order in ['descending', 'desc']:
result = result
if as_json is True:
json_ = {
'name': name,
'historical':
[value.stock_as_json() for value in result]
}
final.append(json_)
elif as_json is False:
df = pd.DataFrame.from_records([value.stock_to_dict() for value in result])
df.set_index('Date', inplace=True)
final.append(df)
else:
raise RuntimeError("ERR#0004: data retrieval error while scraping.")
if as_json is True:
return json.dumps(final[0], sort_keys=False)
elif as_json is False:
return pd.concat(final)
def get_stock_company_profile(stock, country='spain', language='english'):
"""
This function retrieves the company profile of a stock company in the specified language. This
function is really useful if NLP techniques want to be applied to stocks, since the company profile
is a short description of what the company does and since it is written by the company, it can give
the user an overview on what does the company do. The company profile can be retrieved either in english
or in spanish, the only thing that changes is the source from where the data is retrieved, but the
resulting object will be the same. Note that this functionalliy as described in the docs is just supported
for spanish stocks currently, so on, if any other stock from any other country is introduced as parameter,
the function will raise an exception.
Args:
stock (:obj:`str`): symbol of the stock to retrieve its company profile from.
country (:obj:`str`): name of the country from where the stock is.
language (:obj:`str`, optional): language in which the company profile is going to be retrieved, can either be english or spanish.
Returns:
:obj:`dict` - company_profile:
The resulting :obj:`dict` contains the retrieved company profile from the selected source depending
on the specified language in the function parameters, which can be either Investing.com (english)
or Bolsa de Madrid (spanish); and the URL from where it was retrieved, so to have both the source
and the description of the company_profile.
So the resulting :obj:`dict` should look like::
company_profile = {
url: 'https://www.investing.com/equities/bbva-company-profile',
desc: 'Banco Bilbao Vizcaya Argentaria, S.A. (BBVA) is a ...'
}
Raises:
ValueError: raised whenever any of the introduced arguments is not valid or errored.
FileNotFound: raised if the `stocks.csv` file was not found or unable to retrieve.
IOError: raised if stocks object/file was not found or unable to retrieve.
RuntimeError: raised if the introduced stock/country was not found or did not match any of the existing ones.
ConnectionError: raised if connection to Investing.com could not be established.
Examples:
>>> investpy.get_stock_company_profile(stock='bbva', country='spain', language='english')
company_profile = {
url: 'https://www.investing.com/equities/bbva-company-profile',
desc: 'Banco Bilbao Vizcaya Argentaria, S.A. (BBVA) is a ...'
}
"""
available_sources = {
'english': 'Investing',
'en': 'Investing',
'spanish': 'Bolsa de Madrid',
'es': 'Bolsa de Madrid',
}
if not stock:
raise ValueError("ERR#0013: stock parameter is mandatory and must be a valid stock name.")
if not isinstance(stock, str):
raise ValueError("ERR#0027: stock argument needs to be a str.")
if country is None:
raise ValueError("ERR#0039: country can not be None, it should be a str.")
if country is not None and not isinstance(country, str):
raise ValueError("ERR#0025: specified country value not valid.")
if language.lower() not in available_sources.keys():
raise ValueError(
"ERR#0014: the specified language is not valid, it can just be either spanish (es) or english (en).")
if unidecode.unidecode(country.lower()) not in ['spain']:
raise RuntimeError("ERR#0034: country " + country.lower() + " not found, check if it is correct.")
selected_source = available_sources[language.lower()]
resource_package = 'investpy'
resource_path = '/'.join(('resources', 'stocks', 'stocks.csv'))
if pkg_resources.resource_exists(resource_package, resource_path):
stocks = pd.read_csv(pkg_resources.resource_filename(resource_package, resource_path))
else:
raise FileNotFoundError("ERR#0056: stocks file not found or errored.")
if stocks is None:
raise IOError("ERR#0001: stocks object not found or unable to retrieve.")
stocks = stocks[stocks['country'] == unidecode.unidecode(country.lower())]
stock = stock.strip()
if unidecode.unidecode(stock.lower()) not in [unidecode.unidecode(value.lower()) for value in
stocks['symbol'].tolist()]:
raise RuntimeError("ERR#0018: stock " + stock.lower() + " not found, check if it is correct.")
company_profile = {
'url': None,
'desc': None
}
if selected_source == 'Bolsa de Madrid':
isin = stocks.loc[(stocks['symbol'].str.lower() == stock).idxmax(), 'isin']
url = "http://www.bolsamadrid.es/esp/aspx/Empresas/FichaValor.aspx?ISIN=" + isin
company_profile['url'] = url
head = {
"User-Agent": get_random(),
"X-Requested-With": "XMLHttpRequest",
"Accept": "text/html",
"Accept-Encoding": "gzip, deflate, br",
"Connection": "keep-alive",
}
req = requests.get(url, headers=head)
if req.status_code != 200:
raise ConnectionError("ERR#0015: error " + str(req.status_code) + ", try again later.")
root_ = fromstring(req.text)
path_ = root_.xpath(".//td[contains(@class, 'Perfil')]/p")
if path_:
text = list()
for element_ in path_:
if not element_.xpath(".//a"):
text.append(element_.text_content())
text = ''.join(text)
company_profile['desc'] = ' '.join(text.replace('\n', ' ').replace('\xa0', ' ').split())
return company_profile
else:
return company_profile
elif selected_source == 'Investing':
tag = stocks.loc[(stocks['symbol'].str.lower() == stock).idxmax(), 'tag']
url = "https://www.investing.com/equities/" + tag + "-company-profile"
company_profile['url'] = url
head = {
"User-Agent": get_random(),
"X-Requested-With": "XMLHttpRequest",
"Accept": "text/html",
"Accept-Encoding": "gzip, deflate, br",
"Connection": "keep-alive",
}
req = requests.get(url, headers=head)
if req.status_code != 200:
raise ConnectionError("ERR#0015: error " + str(req.status_code) + ", try again later.")
root_ = fromstring(req.text)
path_ = root_.xpath(".//*[@id=\"profile-fullStory-showhide\"]")
if path_:
company_profile['desc'] = str(path_[0].text_content())
return company_profile
else:
return company_profile
def get_stock_dividends(stock, country):
"""
This function retrieves the stock dividends from the introduced stocks, which are token rewards paid to
the shareholders for their investment in a company's stock/equity. Dividends data include date of the
dividend, dividend value, type, payment date and yield. This information is really useful when it comes
to creating portfolios.
Args:
stock (:obj:`str`): symbol of the stock to retrieve its dividends from.
country (:obj:`country`): name of the country from where the stock is from.
Returns:
:obj:`pandas.DataFrame` - stock_dividends:
Returns a :obj:`pandas.DataFrame` containing the retrieved information of stock dividends for every stock
symbol introduced as parameter.
So on, the resulting :obj:`pandas.DataFrame` will look like::
Date Dividend Type Payment Date Yield
0 2019-10-11 0.2600 trailing_twelve_months 2019-10-15 5,67%
1 2019-04-08 0.2600 trailing_twelve_months 2019-04-10 5,53%
2 2018-06-11 0.3839 trailing_twelve_months 2018-06-13 3,96%
3 2018-04-06 0.2400 trailing_twelve_months 2018-04-10 4,41%
4 2017-10-06 0.3786 trailing_twelve_months 2017-10-10 4,45%
"""
if not stock:
raise ValueError("ERR#0013: stock parameter is mandatory and must be a valid stock symbol.")
if not isinstance(stock, str):
raise ValueError("ERR#0027: stock argument needs to be a str.")
if country is None:
raise ValueError("ERR#0039: country can not be None, it should be a str.")
if country is not None and not isinstance(country, str):
raise ValueError("ERR#0025: specified country value not valid.")
resource_package = 'investpy'
resource_path = '/'.join(('resources', 'stocks', 'stocks.csv'))
if pkg_resources.resource_exists(resource_package, resource_path):
stocks = pd.read_csv(pkg_resources.resource_filename(resource_package, resource_path))
else:
raise FileNotFoundError("ERR#0056: stocks file not found or errored.")
if stocks is None:
raise IOError("ERR#0001: stocks object not found or unable to retrieve.")
if unidecode.unidecode(country.lower()) not in get_stock_countries():
raise RuntimeError("ERR#0034: country " + country.lower() + " not found, check if it is correct.")
stocks = stocks[stocks['country'].str.lower() == unidecode.unidecode(country.lower())]
stock = stock.strip()
stock = stock.lower()
if unidecode.unidecode(stock) not in [unidecode.unidecode(value.lower()) for value in stocks['symbol'].tolist()]:
raise RuntimeError("ERR#0018: stock " + stock + " not found, check if it is correct.")
tag_ = stocks.loc[(stocks['symbol'].str.lower() == stock).idxmax(), 'tag']
headers = {
"User-Agent": get_random(),
"X-Requested-With": "XMLHttpRequest",
"Accept": "text/html",
"Accept-Encoding": "gzip, deflate, br",
"Connection": "keep-alive",
}
url = 'https://es.investing.com/equities/' + str(tag_) + '-dividends'
req = requests.get(url=url, headers=headers)
if req.status_code != 200:
raise ConnectionError("ERR#0015: error " + str(req.status_code) + ", try again later.")
root_ = fromstring(req.text)
path_ = root_.xpath(".//table[contains(@id, 'dividendsHistoryData')]")
if path_:
more_results_id = path_[0].get('id').replace('dividendsHistoryData', '')
path_ = root_.xpath(".//table[@id='dividendsHistoryData" + str(more_results_id) + "']/tbody/tr")
objs = list()
type_values = {
'1': 'monthly',
'2': 'quarterly',
'3': 'semi_annual',
'4': 'annual',
'5': 'trailing_twelve_months',
}
if path_:
last_timestamp = path_[-1].get('event_timestamp')
for elements_ in path_:
dividend_date = dividend_value = dividend_type = dividend_payment_date = dividend_yield = None
for element_ in elements_.xpath(".//td"):
if element_.get('class'):
if element_.get('class').__contains__('first'):
dividend_date = datetime.strptime(element_.text_content().strip().replace('.', '-'), '%d-%m-%Y')
dividend_value = float(element_.getnext().text_content().replace('.', '').replace(',', '.'))
if element_.get('data-value') in type_values.keys():
dividend_type = type_values[element_.get('data-value')]
dividend_payment_date = datetime.strptime(element_.getnext().text_content().strip().replace('.', '-'), '%d-%m-%Y')
next_element_ = element_.getnext()
dividend_yield = next_element_.getnext().text_content()
obj = {
'Date': dividend_date,
'Dividend': dividend_value,
'Type': dividend_type,
'Payment Date': dividend_payment_date,
'Yield': dividend_yield,
}
objs.append(obj)
flag = True
while flag is True:
headers = {
"User-Agent": get_random(),
"X-Requested-With": "XMLHttpRequest",
"Accept": "text/html",
"Accept-Encoding": "gzip, deflate, br",
"Connection": "keep-alive",
}
params = {
'pairID': int(more_results_id),
'last_timestamp': int(last_timestamp)
}
url = 'https://es.investing.com/equities/MoreDividendsHistory'
req = requests.post(url=url, headers=headers, params=params)
if req.status_code != 200:
raise ConnectionError("ERR#0015: error " + str(req.status_code) + ", try again later.")
res = req.json()
if res['hasMoreHistory'] is False:
flag = False
root_ = fromstring(res['historyRows'])
path_ = root_.xpath(".//tr")
if path_:
last_timestamp = path_[-1].get('event_timestamp')
for elements_ in path_:
dividend_date = dividend_value = dividend_type = dividend_payment_date = dividend_yield = None
for element_ in elements_.xpath(".//td"):
if element_.get('class'):
if element_.get('class').__contains__('first'):
dividend_date = datetime.strptime(element_.text_content().strip().replace('.', '-'), '%d-%m-%Y')
dividend_value = float(
element_.getnext().text_content().replace('.', '').replace(',', '.'))
if element_.get('data-value') in type_values.keys():
dividend_type = type_values[element_.get('data-value')]
dividend_payment_date = datetime.strptime(element_.getnext().text_content().strip().replace('.', '-'), '%d-%m-%Y')
next_element_ = element_.getnext()
dividend_yield = next_element_.getnext().text_content()
obj = {
'Date': dividend_date,
'Dividend': dividend_value,
'Type': dividend_type,
'Payment Date': dividend_payment_date,
'Yield': dividend_yield,
}
objs.append(obj)
df = pd.DataFrame(objs)
return df
else:
raise RuntimeError("ERR#0061: introduced stock has no dividend's data to display.")
def search_stocks(by, value):
"""
This function searches stocks by the introduced value for the specified field. This means that this function
is going to search if there is a value that matches the introduced one for the specified field which is the
`stocks.csv` column name to search in. Available fields to search stocks are 'name', 'full_name' and 'isin'.
Args:
by (:obj:`str`): name of the field to search for, which is the column name which can be: 'name', 'full_name' or 'isin'.
value (:obj:`str`): value of the field to search for, which is the value that is going to be searched.
Returns:
:obj:`pandas.DataFrame` - search_result:
The resulting :obj:`pandas.DataFrame` contains the search results from the given query, which is
any match of the specified value in the specified field. If there are no results for the given query,
an error will be raised, but otherwise the resulting :obj:`pandas.DataFrame` will contain all the
available stocks that match the introduced query.
Raises:
ValueError: raised if any of the introduced parameters is not valid or errored.
IOError: raised if data could not be retrieved due to file error.
RuntimeError: raised if no results were found for the introduced value in the introduced field.
"""
available_search_fields = ['name', 'full_name', 'isin']
if not by:
raise ValueError('ERR#0006: the introduced field to search is mandatory and should be a str.')
if not isinstance(by, str):
raise ValueError('ERR#0006: the introduced field to search is mandatory and should be a str.')
if isinstance(by, str) and by not in available_search_fields:
raise ValueError('ERR#0026: the introduced field to search can either just be '
+ ' or '.join(available_search_fields))
if not value:
raise ValueError('ERR#0017: the introduced value to search is mandatory and should be a str.')
if not isinstance(value, str):
raise ValueError('ERR#0017: the introduced value to search is mandatory and should be a str.')
resource_package = 'investpy'
resource_path = '/'.join(('resources', 'stocks', 'stocks.csv'))
if pkg_resources.resource_exists(resource_package, resource_path):
stocks = pd.read_csv(pkg_resources.resource_filename(resource_package, resource_path))
else:
raise FileNotFoundError("ERR#0056: stocks file not found or errored.")
if stocks is None:
raise IOError("ERR#0001: stocks object not found or unable to retrieve.")
stocks['matches'] = stocks[by].str.contains(value, case=False)
search_result = stocks.loc[stocks['matches'] == True].copy()
if len(search_result) == 0:
raise RuntimeError('ERR#0043: no results were found for the introduced ' + str(by) + '.')
search_result.drop(columns=['tag', 'id', 'matches'], inplace=True)
search_result.reset_index(drop=True, inplace=True)
return search_result
| [
"alvarob96@usal.es"
] | alvarob96@usal.es |
7046dc932f90d4f7c0c870c901cae93bedbf4f24 | 7788602ce68c35ce0b862cdb897b1ecae6616689 | /ConwaySequence/conway_sequence.py | ae9422249612736e4e516480217b1efd065b84f6 | [] | no_license | JulienLefevreMars/Vrac | 56510360b1f19894b76e3db78f493ae3a29fd9c1 | 3eade1e890338bb7b4741bc5e80bef82151fc568 | refs/heads/main | 2023-07-10T09:56:01.636965 | 2021-08-09T11:19:07 | 2021-08-09T11:19:07 | 312,793,373 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 905 | py | # Conway's sequence
import sys
def successor(list):
# Find the successor of a sequence of integer by counting each occurence, taking into account ordering
# Example: sucessor(1121)=211211
new_list = []
n = len(list)
i = 0
while i<n:
j = 0 # nbr_consecutive_occurence
while (list[i+j] == list[i]):
j = j+1
if (i+j>=n):
break
new_list.append(j)
new_list.append(list[i])
i = i+j
return new_list
def compute_all_terms(N,list=[1]):
all_terms = [list]
for i in range(N):
all_terms.append(successor(all_terms[-1]))
return all_terms
def print_one_term(list):
n = len(list)
for i in range(n):
print(list[i],end="")
print()
def print_all_terms(all_terms):
N = len(all_terms)
for i in range(N):
#print(all_terms[i])
print_one_term(all_terms[i])
if __name__ == "__main__":
N = int(sys.argv[1])
all_terms = compute_all_terms(N)
print_all_terms(all_terms)
| [
"julien.lefevre@univ-amu.fr"
] | julien.lefevre@univ-amu.fr |
d4506065b6ed67e5576c14c6c659dffa3046577b | 5b6540708f15b2bf4dea0def02616e8e2ffa3959 | /1_higherLower/higherLower.py | 3a260fde7135e1e548c3fe3cc2cfea052110f766 | [] | no_license | raufkarakas/PythonExercises | dbb6f26c73cde76affdd5a0e12108b9764f2c3b4 | df435c9f3b7d21cb7d89be9093636b156f2217b8 | refs/heads/master | 2016-09-11T13:00:04.116415 | 2015-07-14T20:48:25 | 2015-07-14T20:48:25 | 38,538,747 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 567 | py | __author__ = 'rkarakas'
import random
def higherLower():
print("So you want to guess what is the chosen number between 0 and 100?")
chosenNumber = random.randint(0,100)
guess = 101
predictNumber = 0
while guess != chosenNumber:
guess = int(input("What is your guess? -> "))
predictNumber += 1
if guess < chosenNumber:
print("Higher than you think.")
elif guess > chosenNumber:
print("Lower than you think.")
print("You found it after %i try! Yaayyy!" %(predictNumber))
higherLower() | [
"raufkarakas@gmail.com"
] | raufkarakas@gmail.com |
ea7d83273057d07fc9c15b302494fb28ffd4ffeb | b97868808ed9f4c6b52ce86448d022a3b19fcc58 | /src/orthography.py | 890e3d0c869e6bca84cb89a80ca479707dd52548 | [] | no_license | magurevitch/OT-Machine | 2d22b9147c1e6c4b4f1156abf0f08bb24b374e02 | cbc1283ed9adb0305876ce79e6903e90274644f1 | refs/heads/master | 2021-01-23T05:10:06.999685 | 2018-07-29T21:09:16 | 2018-07-29T21:09:16 | 86,283,108 | 1 | 0 | null | 2018-07-29T21:09:17 | 2017-03-27T02:34:10 | Python | UTF-8 | Python | false | false | 609 | py | from src.aho_corasick_node import AhoCorasickNode
class Orthography:
def __init__(self,map):
self.map = map
self.decodeRoot = AhoCorasickNode()
self.encodeRoot = AhoCorasickNode()
for (input,output) in map.items():
self.decodeRoot.addSequence(input,output)
self.encodeRoot.addSequence(output,input)
self.decodeRoot.setFailureNodes()
self.encodeRoot.setFailureNodes()
def decode(self,text):
return self.decodeRoot.transform(text)
def encode(self,text):
return self.encodeRoot.transform(text) | [
"matthewgurevitch@gmail.com"
] | matthewgurevitch@gmail.com |
3e8821ac7ea49e894c7d3f4427b5ea5e156312a6 | 24b8e0405ef923e928e831e1cd127d5defb43b83 | /top_10_common_words_tuple.py | 67932ab4991e55d59def65e469ff397636181637 | [] | no_license | Justintime8/Python_Coding | e223d1698a6897916f4106761582d7470a238437 | 3aae153e5baecd6983c0ed1da3dd5da2bdf2546f | refs/heads/master | 2023-02-15T23:34:21.467362 | 2021-01-14T21:13:53 | 2021-01-14T21:13:53 | 314,700,413 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 357 | py | file = input('file name')
fhand = open(file)
counts = dict()
for line in fhand :
words = line.split()
for word in words :
counts[word] = counts.get(word,0) + 1
lst = list()
for key, val in counts.items() :
newtup = (val, key)
lst.append(newtup)
lst = sorted(lst,reverse=True)
for val, key in lst[:10] :
print(key, val)
| [
"noreply@github.com"
] | noreply@github.com |
47c023614d7d2ba1c4b4f921d42350aec154cb40 | ace30d0a4b1452171123c46eb0f917e106a70225 | /filesystems/vnx_rootfs_lxc_ubuntu64-16.04-v025-openstack-compute/rootfs/usr/lib/python2.7/dist-packages/openstackclient/common/clientmanager.py | 3e1a50e3e6423cbe6c7010004e2266d04e7627b8 | [
"Python-2.0"
] | permissive | juancarlosdiaztorres/Ansible-OpenStack | e98aa8c1c59b0c0040c05df292964520dd796f71 | c01951b33e278de9e769c2d0609c0be61d2cb26b | refs/heads/master | 2022-11-21T18:08:21.948330 | 2018-10-15T11:39:20 | 2018-10-15T11:39:20 | 152,568,204 | 0 | 3 | null | 2022-11-19T17:38:49 | 2018-10-11T09:45:48 | Python | UTF-8 | Python | false | false | 5,435 | py | # Copyright 2012-2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Manage access to the clients, including authenticating when needed."""
import logging
import pkg_resources
import sys
from osc_lib import clientmanager
from osc_lib import shell
LOG = logging.getLogger(__name__)
PLUGIN_MODULES = []
USER_AGENT = 'python-openstackclient'
class ClientManager(clientmanager.ClientManager):
"""Manages access to API clients, including authentication
Wrap osc_lib's ClientManager to maintain compatibility for the existing
plugin V2 interface. Some currently private attributes become public
in osc-lib so we need to maintain a transition period.
"""
# A simple incrementing version for the plugin to know what is available
PLUGIN_INTERFACE_VERSION = "2"
def __init__(
self,
cli_options=None,
api_version=None,
):
super(ClientManager, self).__init__(
cli_options=cli_options,
api_version=api_version,
# TODO(dtroyer): Remove this when osc-lib 1.2 is released
pw_func=shell.prompt_for_password,
)
# TODO(dtroyer): For compatibility; mark this for removal when plugin
# interface v2 is removed
self._region_name = self.region_name
self._interface = self.interface
self._cacert = self.cacert
self._insecure = not self.verify
# store original auth_type
self._original_auth_type = cli_options.auth_type
def setup_auth(self):
"""Set up authentication"""
if self._auth_setup_completed:
return
# NOTE(dtroyer): Validate the auth args; this is protected with 'if'
# because openstack_config is an optional argument to
# CloudConfig.__init__() and we'll die if it was not
# passed.
if self._cli_options._openstack_config is not None:
self._cli_options._openstack_config._pw_callback = \
shell.prompt_for_password
try:
self._cli_options._auth = \
self._cli_options._openstack_config.load_auth_plugin(
self._cli_options.config,
)
except TypeError as e:
self._fallback_load_auth_plugin(e)
return super(ClientManager, self).setup_auth()
def _fallback_load_auth_plugin(self, e):
# NOTES(RuiChen): Hack to avoid auth plugins choking on data they don't
# expect, delete fake token and endpoint, then try to
# load auth plugin again with user specified options.
# We know it looks ugly, but it's necessary.
if self._cli_options.config['auth']['token'] == 'x':
# restore original auth_type
self._cli_options.config['auth_type'] = \
self._original_auth_type
del self._cli_options.config['auth']['token']
del self._cli_options.config['auth']['endpoint']
self._cli_options._auth = \
self._cli_options._openstack_config.load_auth_plugin(
self._cli_options.config,
)
else:
raise e
def is_network_endpoint_enabled(self):
"""Check if the network endpoint is enabled"""
# NOTE(dtroyer): is_service_available() can also return None if
# there is no Service Catalog, callers here are
# not expecting that so fold None into True to
# use Network API by default
return self.is_service_available('network') is not False
# Plugin Support
def get_plugin_modules(group):
"""Find plugin entry points"""
mod_list = []
for ep in pkg_resources.iter_entry_points(group):
LOG.debug('Found plugin %r', ep.name)
__import__(ep.module_name)
module = sys.modules[ep.module_name]
mod_list.append(module)
init_func = getattr(module, 'Initialize', None)
if init_func:
init_func('x')
# Add the plugin to the ClientManager
setattr(
clientmanager.ClientManager,
module.API_NAME,
clientmanager.ClientCache(
getattr(sys.modules[ep.module_name], 'make_client', None)
),
)
return mod_list
def build_plugin_option_parser(parser):
"""Add plugin options to the parser"""
# Loop through extensions to get parser additions
for mod in PLUGIN_MODULES:
parser = mod.build_option_parser(parser)
return parser
# Get list of base plugin modules
PLUGIN_MODULES = get_plugin_modules(
'openstack.cli.base',
)
# Append list of external plugin modules
PLUGIN_MODULES.extend(get_plugin_modules(
'openstack.cli.extension',
))
| [
"jcdiaztorres96@gmail.com"
] | jcdiaztorres96@gmail.com |
297ea6ae137218d35d76fd54c88137155633f8c5 | 6ebac72a9a90bf2c1db6eab2e8006028e19bced7 | /autogooge.py | 3ced1eae58f3d100a16ed27689757591675c616b | [
"MIT"
] | permissive | FJen180835/Boothcamp2021 | af64f056a35cc2e6eca4cae17fa03f2363f9466c | 1cbf7a050aa002c7158450494e5e842ae03d15be | refs/heads/main | 2023-02-26T12:16:18.058959 | 2021-02-08T14:18:36 | 2021-02-08T14:18:36 | 330,072,209 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 910 | py | # autogoogle.py
import webbrowser
import pyautogui as pg
import time
import pyperclip
# กรณีจ้องการเปิด browser โดนตรง
# from subprocess import Popen
# path = r'C:\ProgramData\Microsoft\Windows\Start Menu\Programs(ใส่ r เพื่อให้ค่า \ me'kowfh)
# 1- open web broowser (google)
url = 'httP://www.google.com'
webbrowser.open(url)
time.sleep(3) # หน่วงเวลาไว้เพื่อให้มันสามารถพิมพ์ได้
# 2-type key word
# pg.write('thailand',interval = 0.25)
keyword = 'ประเทศไทย'
pyperclip.copy(keyword) # copy ลง clipbord
time.sleep(0.5)
pg.hotkey('ctrl', 'v') #ห้ามใช้เครื่องหมาย +
time.sleep(1)
# 3- press enter
pg.press('enter')
time.sleep(10)
# 4-screenshot
pg.screenshot('thailand.png') | [
"noreply@github.com"
] | noreply@github.com |
a40063809e4716cdd46bbe177d1c8ff041e98d3f | f8bdab45c1bd9908becd9be6229e941556a6fed2 | /Slinding window/Find All Anagrams in a String.py | 553cfc185848ff3319f0b7da9e8aa9427825964d | [
"MIT"
] | permissive | pkulkar/Algorithms | fefb55eaee5260a77119421d6a181ffa4e94cf9a | 5928c2a488af3cbde8624c13ee15d0afaacde778 | refs/heads/main | 2023-03-07T23:27:53.908910 | 2021-02-27T23:53:36 | 2021-02-27T23:53:36 | 336,630,113 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,774 | py | """
Leetcode
438. Find All Anagrams in a String
Given a string s and a non-empty string p, find all the start indices of p's anagrams in s.
Strings consists of lowercase English letters only and the length of both strings s and p will not be larger than 20,100.
The order of output does not matter.
Example 1:
Input:
s: "cbaebabacd" p: "abc"
Output:
[0, 6]
Explanation:
The substring with start index = 0 is "cba", which is an anagram of "abc".
The substring with start index = 6 is "bac", which is an anagram of "abc".
Example 2:
Input:
s: "abab" p: "ab"
Output:
[0, 1, 2]
Explanation:
The substring with start index = 0 is "ab", which is an anagram of "ab".
The substring with start index = 1 is "ba", which is an anagram of "ab".
The substring with start index = 2 is "ab", which is an anagram of "ab".
"""
class Solution:
def findAnagrams(self, s: str, p: str) -> List[int]:
memory = {}
windowStart, matched = 0, 0
resultIndexes = []
for character in p:
if character not in memory:
memory[character] = 0
memory[character] += 1
for windowEnd in range(len(s)):
rightCharacter = s[windowEnd]
if rightCharacter in memory:
memory[rightCharacter] -= 1
if memory[rightCharacter] == 0:
matched += 1
if matched == len(memory):
resultIndexes.append(windowStart)
if windowEnd >= len(p) - 1:
leftCharacter = s[windowStart]
windowStart += 1
if leftCharacter in memory:
if memory[leftCharacter] == 0:
matched -= 1
memory[leftCharacter] += 1
return resultIndexes
| [
"pkulkar2@binghamton.edu"
] | pkulkar2@binghamton.edu |
3799cedcf7c89a8907d3163df66fbe167a703c2e | b91cd06d8fe68fbca615ac82d107ae0b3ef171cf | /subscribe/migrations/0013_auto_20160420_1849.py | 767c8885751d5ba918172b17d1a082e56f0e8959 | [] | no_license | osp/osp.work.medor.www | e27054ca5f2eb1e12f5215351d5f4c47ce81bc99 | be852b80444b548108b29b5d6acab47dac943439 | refs/heads/master | 2021-01-01T20:17:53.132439 | 2018-02-06T10:20:22 | 2018-02-06T10:20:22 | 26,456,279 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 647 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('subscribe', '0012_auto_20151229_1628'),
]
operations = [
migrations.AlterField(
model_name='subscription',
name='from_issue',
field=models.PositiveSmallIntegerField(blank=True, null=True, verbose_name='\xe0 partir du num\xe9ro', choices=[(None, '---'), (1, 'du 1 au 4'), (2, 'du 2 au 5'), (3, 'du 3 au 6'), (4, 'du 4 au 7'), (5, 'du 5 au 8'), (6, 'du 6 au 9')]),
preserve_default=True,
),
]
| [
"alexandre@stdin.fr"
] | alexandre@stdin.fr |
fd5bf542b25ff6c479e4536956f11bc9bdf76b70 | 2b75162d530b5b24c73c2641b56af311ca940e94 | /disk/mount-diskimage | 5ee328094ee01066a2a9aad2297fd875d8b0c890 | [
"MIT"
] | permissive | randomstuff/randomstuff | 98f5efdd856f999b1469f1169c63c5d8201e9ddc | ac36b37659574da2543509dbe380a4d2a660283c | refs/heads/main | 2022-10-07T06:10:52.165998 | 2022-10-03T22:06:19 | 2022-10-03T22:06:19 | 24,476,962 | 8 | 0 | null | null | null | null | UTF-8 | Python | false | false | 669 | #!/usr/bin/python
# Mount an image (ISO) using udisks2.
# Usage: open-image foobar.iso
# Dependencies: python, python-dbus, udisks2
import dbus
import os
import sys
fd = os.open(sys.argv[1], os.O_RDONLY)
bus = dbus.SystemBus()
manager = bus.get_object("org.freedesktop.UDisks2",
"/org/freedesktop/UDisks2/Manager")
loop = dbus.Interface(manager, "org.freedesktop.UDisks2.Manager").LoopSetup(fd, {})
loop = bus.get_object("org.freedesktop.UDisks2", loop)
path = dbus.Interface(loop, "org.freedesktop.UDisks2.Filesystem").Mount({})
dbus.Interface(loop, "org.freedesktop.UDisks2.Loop").SetAutoclear(True, {})
sys.stdout.write(path)
sys.exit(0)
| [
"gabriel.corona@enst-bretagne.fr"
] | gabriel.corona@enst-bretagne.fr | |
5460b53e46cb799a3112fde7b6d5b6969d892c4f | 270145b9c08b7e3d55bc3812e97ee256ef90c06e | /env/bin/easy_install | 3fe32cb3543fc62d2e767d513e049612e7dfe44a | [] | no_license | joiellantero/flack-chatroom | 9ee5a11d88fe8aac840b14dfa05d95ef648f9403 | 33f068cd77ad158d1360332e4070daaded14e07c | refs/heads/master | 2023-03-22T19:17:55.182280 | 2021-03-20T14:50:30 | 2021-03-20T14:50:30 | 263,027,582 | 0 | 0 | null | 2021-03-20T14:50:31 | 2020-05-11T11:50:29 | HTML | UTF-8 | Python | false | false | 274 | #!/Users/joiellantero/Documents/Developer/Flack/env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"joie.llantero@eee.upd.edu.ph"
] | joie.llantero@eee.upd.edu.ph | |
0a970e7d7cd7a42f87907077ae728b8c4b17e5c4 | e72e2dc9d3cec63c668b803bba35d4aa9fc3d89c | /webnews/webnews/views/webnews.py | 16289cc8062e00349fa8fbe0cda61e2ac6892f16 | [] | no_license | robertwinchell/InvenioWork | 1cd93a0b4abfe2561c8eb25d21d8cad20a875f8d | cfa8812b911556d56280f2c775133c32924c0ce9 | refs/heads/master | 2021-01-10T18:31:50.982641 | 2014-09-15T13:52:56 | 2014-09-15T13:52:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,283 | py | # -*- coding: utf-8 -*-
"""nwsToolTip Flask Blueprint"""
from flask import Flask
from flask.ext.login import login_required
from flask import request, url_for, g, Markup, redirect, flash,Blueprint,render_template,jsonify, session
from flask.ext import menu
from ..models import NwsToolTip,NwsSTORY,NwsTAG
from flask.ext.menu import register_menu
from sqlalchemy.exc import IntegrityError
from werkzeug.debug import DebuggedApplication
from .. import config
from ..encoder import Encode,Decode
blueprint = Blueprint('webnews', __name__, template_folder='../templates',static_folder='../static' )
#@register_menu(blueprint, 'main.webnews',config.CFG_WEBNEWS_ADMIN_MAIN_NAV)
#@register_menu(blueprint, 'webnews',config.CFG_WEBNEWS_ADMIN_MAIN_NAV)
@blueprint.route(config.CFG_WEBNEWS_MENU_INDEX)
@register_menu(blueprint, 'webnews.menu.search',[config.CFG_WEBNEWS_SEARCH_NAV_NAME,'glyphicon glyphicon-search','general'])
def index():
result = NwsSTORY.query.filter_by(document_status='SHOW').limit(5).all()
return render_template('search.html',searchResult=result ,EncodeStr=Encode)
@blueprint.route(config.CFG_WEBNEWS_SEARCH, methods=['GET', 'POST'])
def search():
if request.method == 'POST':
try:
result = NwsSTORY.query.filter(NwsSTORY.title.contains(request.form['keywords']) | NwsSTORY.body.contains(request.form['keywords'])).filter_by(document_status='SHOW').all()
return render_template('search.html',searchResult=result,resultshow='block' ,EncodeStr=Encode)
except IntegrityError, e:
flash('Error')
alert=config.CFG_WEBNEWS_ERROR_ALERT
try:
keywords=Decode(request.args.get('keywords', Encode(None)))
id=int(Decode(request.args.get('id', Encode(0))))
if keywords=='1':
result1 = NwsSTORY.query.get(id)
return render_template('details.html',searchResult=result1)
result1 = NwsTAG.query.filter(NwsTAG.tag.contains(keywords)).all()
result = NwsSTORY.query.filter(NwsSTORY.id.in_(appendToListy(result1))).filter_by(document_status='SHOW').all()
return render_template('search.html',searchResult=result,resultshow='block',EncodeStr=Encode)
except IntegrityError, e:
flash('Error')
alert=config.CFG_WEBNEWS_ERROR_ALERT
@blueprint.route('/show_tooltips')
def show_tooltips():
targetpage = request.args.get('targetpage', 0, type=str)
try:
#session['exclude_ids']=[]
#targetpage = request.args.get('targetpage', 0, type=str)
if session['exclude_ids']:
result1 = NwsToolTip.query.filter(((NwsToolTip.target_page==targetpage) | (NwsToolTip.target_page=='*')) & (NwsToolTip.target_element.notin_(excludeList(targetpage)))).all()
else:
result1 = NwsToolTip.query.filter((NwsToolTip.target_page==targetpage) | (NwsToolTip.target_page=='*')).all()
except:
session['exclude_ids']=[]
result1 = NwsToolTip.query.filter((NwsToolTip.target_page==targetpage) | (NwsToolTip.target_page=='*')).all()
#filter((User.username == name) | (User.email == email))
return jsonify(tooltip=[i.serialize for i in result1])
def appendToListy(object):
Lst=[]
for result in object:
Lst.append(result.id_story)
return Lst
@blueprint.route('/tooltips_exclude')
def exclude_tooltip():
targetpage = request.args.get('targetpage', 0, type=str)
tooltipElement=request.args.get('tooltipElement', 0, type=str)
SessionList=[]
if session['exclude_ids']:
SessionList= session['exclude_ids']
if UniqueInsert(SessionList,tooltipElement,targetpage):
SessionList.append({'page': targetpage, 'Element':tooltipElement })
session['exclude_ids']=SessionList
else:
SessionList=[{'page': targetpage, 'Element':tooltipElement }]
session['exclude_ids']=SessionList
return jsonify(result='added')
def UniqueInsert(Obj,element, page):
for item in Obj:
if item['Element']== element and item['page']==page:
return False
return True
def excludeList(page):
Lst=[]
if session['exclude_ids']:
for item in session['exclude_ids']:
if item['page']==page:
Lst.append(item['Element'])
return Lst | [
"robertzwinchell@gmail.com"
] | robertzwinchell@gmail.com |
869f782ff0372ac7913d63d4ad2dbabc46d43d63 | 1257d37ce73ff6753162db3ccd1c4530306e0ed4 | /libr/final_fine_eval.py | 1b5370947897205392cbf4f81290250fc01ee4e8 | [] | no_license | tanmoydeb07/library | a3685aadd8b5da214a61810b3b3b09a3971f914c | 5a5ca6c63a1d8fc42952f6a90d670b6f25547909 | refs/heads/master | 2021-01-18T07:00:42.034282 | 2014-01-08T06:03:19 | 2014-01-08T06:03:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,348 | py | import os
import time
import datetime
import librarian
import sys
import welcome
def main(x):
os.system('cls')
welcome.func()
f=open("/Python27/libr/stud_db1.txt")
g=open("/Python27/libr/list_fin1.txt","w")
g.write("STUDENT USN BOOKS TO BE RETURNED\n")
newlist=[]
flag=0
while True:
pending=''
lis=[]
w=f.readline()
if len(w)==0:
break
dic=eval(w)
for k in dic:
for i in [0,1,2]:
if dic[k][i][0]!='#':
lis.append(dic[k][i][0])
if str(lis)!='':
pending=str(lis)
if pending!='':
g.write(k+" "+pending+"\n")
f.close()
g.close()
da1 =raw_input('Enter the final fine evaluation date(dd/mm/yyyy):')
try:
date2 = time.strptime(da1, '%d/%m/%Y') #convert string into date
except ValueError:
print('Invalid date!\nEnter again')
main()
d=da1.split('/') #for extracting date,year and month from the date1
d1=''
for i in d:
d1=d1+i
#print d1
s2 =datetime.datetime.strptime(d1,'%d%m%Y').date()
s1=datetime.date.today()
if s2<s1:
print("\nYou have typed a date which is already over...Type a valid date\n")
main(x)
os.system('cls')
f=open("/Python27/libr/list_fin1.txt")
print("\nThe following students are here by informed to return the books mentioned along with their USN on or before "+da1+"\n\nStudents who fail to return any books before mentioend date will have to pay the fine of Rs1000 in order to get the hall ticket\n")
while True:
w=f.readline()
if (len(w)==0):
break
print ("\n"+w)
f.close()
print("\nOpen the file 'list_fin1.txt' to get a hard copy of students who are supposed to return books\n")
print("\n\nInstruction for the librarians:\nPlease calculate the fine after date: "+da1+" so that students can get time to return their due books")
q=raw_input("1 to go back or any other key to exit")
if(q=='1'):
if (x==1):
os.system('cls')
librarian.main1()
else:
os.system('cls')
librarian.main2()
else:
sys.exit()
| [
"goyal.aashish6@gmail.com"
] | goyal.aashish6@gmail.com |
714945bba8881e4f76f17f4b3167e6ee38f12f96 | 70c1b886d233491c25721b3fc3d6ced06cb73cdb | /uri - 1287.py | 8fd4c630755ce9670c8b60258e29cd7548a7d798 | [] | no_license | souzajunior/URI | eda3bcb50ad9c93987381443528f656dc2f495c8 | 9f9f5e2c410b70703246fb06d1abfb253b362853 | refs/heads/master | 2022-10-14T00:15:59.417000 | 2020-10-26T18:53:57 | 2020-10-26T18:53:57 | 120,948,144 | 0 | 7 | null | 2022-10-07T17:42:17 | 2018-02-09T19:43:49 | Python | UTF-8 | Python | false | false | 1,126 | py | while True:
try:
entrada = input()
nova_entrada = ''
entrada = entrada.replace(',', '')
entrada = entrada.replace(' ', '')
if (entrada.isnumeric()):
if (int(entrada) > 2147483647):
entrada = ''
for i in entrada:
if ((i == 'O') or (i == 'o')):
nova_entrada += '0'
continue
elif (i == 'l'):
nova_entrada += '1'
continue
elif (i.isnumeric()):
nova_entrada += i
elif (i.isalpha()):
nova_entrada = ''
break
else:
nova_entrada = ''
break
if (nova_entrada == ''):
print('error')
else:
if (nova_entrada.isnumeric()):
if (int(nova_entrada) > 2147483647):
print('error')
else:
print(str(int(nova_entrada)))
else:
print(nova_entrada)
except EOFError:
break | [
"noreply@github.com"
] | noreply@github.com |
24a54a7565b8d38155fddd08742ae1389e50ac05 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5695413893988352_0/Python/algomaus/b.py | 2d3892eda43ade0e73e3d0638dc64a9dc402c531 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 3,205 | py | #! /usr/bin/env python
def parse(lines):
n = int(lines[0])
words = []
for i in range(n):
words.append(lines[i+1])
return words
def asInt(string, lastPosition):
if lastPosition == -1:
return 0
#if lastPosition == 0 and string[0] == '?':
#return 0
lis = []
for i in range(lastPosition+1):
lis.append(string[i])
return int(''.join(lis).replace(' ','').replace('[','').replace(']','').replace(',',''))
def solve(word):
splitted = word.split(' ')
coder = []
jammer = []
for i in splitted[0]:
coder.append(i)
for i in splitted[1]:
jammer.append(i)
coder_add = []
jammer_add = []
for i in range(len(coder)):
if coder[i] == '?' and jammer[i] == '?':
if i == 0 or (asInt(coder, i-1) == asInt(jammer, i-1)):
if i+1 < len(coder) and coder[i+1] != '?' and jammer[i+1] != '?':
if coder[i+1] > jammer[i+1]:
coder[i] = '0'
coder_add.append('0')
jammer[i] = '1'
jammer_add.append('1')
elif coder[i+1] < jammer[i+1]:
coder[i] = '1'
coder_add.append('1')
jammer[i] = '0'
jammer_add.append('0')
else:
coder[i] = '0'
coder_add.append(0)
jammer[i] = '0'
jammer_add.append(0)
else:
coder[i] = '0'
coder_add.append(0)
jammer[i] = '0'
jammer_add.append(0)
elif asInt(coder, i-1) > asInt(jammer, i-1):
coder[i] = '0'
coder_add.append(0)
jammer[i] = '9'
jammer_add.append(9)
else:
coder[i] = '9'
coder_add.append(9)
jammer[i] = '0'
jammer_add.append(0)
elif coder[i] == '?':
if asInt(coder, i-1) == asInt(jammer, i-1):
coder[i] = jammer[i]
coder_add.append(jammer[i])
#if int(jammer[i]) <= 5:
#coder[i] = '0'
#coder_add.append(0)
#else:
#coder[i] = '9'
#coder_add.append(9)
elif asInt(coder, i-1) > asInt(jammer, i-1):
coder[i] = '0'
coder_add.append(0)
else:
coder[i] = '9'
coder_add.append(9)
elif jammer[i] == '?':
if asInt(coder, i-1) == asInt(jammer, i-1):
jammer[i] = coder[i]
jammer_add.append(coder[i])
#if int(coder[i]) <= 5:
# jammer[i] = '0'
# jammer_add.append(0)
#else:
# jammer[i] = '9'
# jammer_add.append(9)
elif asInt(coder, i-1) < asInt(jammer, i-1):
jammer[i] = '0'
jammer_add.append(0)
else:
jammer[i] = '9'
jammer_add.append(9)
coder_add_str = str(coder).replace(' ','').replace('[','').replace(']','').replace(',','').replace('\'','')
jammer_add_str = str(jammer).replace(' ','').replace('[','').replace(']','').replace(',','').replace('\'','')
return coder_add_str + " " + jammer_add_str
#with open('A-large.in', 'r') as f:
with open('B-small-attempt1.in', 'r') as f:
words = parse(f.read().splitlines())
for i in range(len(words)):
wordSorted = solve(words[i])
print "Case #" + str(i+1) + ": " + wordSorted
| [
"alexandra1.back@gmail.com"
] | alexandra1.back@gmail.com |
09ca051cce57a8694818ca67a0d989dd1149b330 | 7dea08ae1ee8217793fb01b2964b60937ab28587 | /jiuzhang/find-minimum-in-rotated-sorted-array/find-minimum-in-rotated-sorted-array.py | a35502ebe21b3f65b960c76ea398582811238a59 | [] | no_license | chunyang-wen/leetcode | 4ade9e7bb49851f73b369eb118d6cd803923d990 | 109e9b957c06989c163b110c32b0f392d182517c | refs/heads/master | 2021-03-13T00:06:50.952274 | 2016-11-02T04:14:32 | 2016-11-02T04:14:32 | 25,857,398 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 884 | py | """
* 本代码由九章算法编辑提供。没有版权欢迎转发。
* - 九章算法致力于帮助更多中国人找到好的工作,教师团队均来自硅谷和国内的一线大公司在职工程师。
* - 现有的面试培训课程包括:九章算法班,系统设计班,九章强化班,Java入门与基础算法班
* - 更多详情请见官方网站:http://www.jiuzhang.com/
"""
class Solution:
# @param nums: a rotated sorted array
# @return: the minimum number in the array
def findMin(self, nums):
if len(nums) == 0:
return 0
start, end = 0, len(nums) - 1
target = nums[-1]
while start + 1 < end:
mid = (start + end) / 2
if nums[mid] <= target:
end = mid
else:
start = mid
return min(nums[start], nums[end])
| [
"wenchunyang@baidu.com"
] | wenchunyang@baidu.com |
c6b7cea13b93be28f9b651e0c403ac37d8d18e38 | 94118720c1c628cd3dc1b895904206fac6625e76 | /training/train_stella.py | ab66aa640d7c1052b717ef93e64ba6f990abc998 | [] | no_license | ShifatSarwar/STELLA-initial | b8f5186dc0fec67ea2269378021815ecf5b7c859 | 24d2c07946e79aafab3e1b1cbadce6e94850aae5 | refs/heads/main | 2023-08-22T04:40:05.612126 | 2021-10-11T05:13:18 | 2021-10-11T05:13:18 | 407,929,989 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,369 | py | import numpy as np
import json
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from nltkutilities import bag_of_words, tokenize, stem
from model import NeuralNet
with open('intents.json', 'r') as f:
intents = json.load(f)
all_words = []
tags = []
xy = []
# loop through each sentence in our intents patterns
for intent in intents['intents']:
tag = intent['tag']
# add to tag list
tags.append(tag)
for pattern in intent['patterns']:
# tokenize each word in the sentence
w = tokenize(pattern)
# add to our words list
all_words.extend(w)
# add to xy pair
xy.append((w, tag))
# stem and lower each word
ignore_words = ['?', '.', '!']
all_words = [stem(w) for w in all_words if w not in ignore_words]
# remove duplicates and sort
all_words = sorted(set(all_words))
tags = sorted(set(tags))
print(len(xy), "patterns")
print(len(tags), "tags:", tags)
print(len(all_words), "unique stemmed words:", all_words)
# create training data
X_train = []
y_train = []
for (pattern_sentence, tag) in xy:
# X: bag of words for each pattern_sentence
bag = bag_of_words(pattern_sentence, all_words)
X_train.append(bag)
# y: PyTorch CrossEntropyLoss needs only class labels, not one-hot
label = tags.index(tag)
y_train.append(label)
X_train = np.array(X_train)
y_train = np.array(y_train)
# Hyper-parameters
num_epochs = 1000
batch_size = 8
learning_rate = 0.001
input_size = len(X_train[0])
hidden_size = 8
output_size = len(tags)
print(input_size, output_size)
class ChatDataset(Dataset):
def __init__(self):
self.n_samples = len(X_train)
self.x_data = X_train
self.y_data = y_train
# support indexing such that dataset[i] can be used to get i-th sample
def __getitem__(self, index):
return self.x_data[index], self.y_data[index]
# we can call len(dataset) to return the size
def __len__(self):
return self.n_samples
dataset = ChatDataset()
train_loader = DataLoader(dataset=dataset,
batch_size=batch_size,
shuffle=True,
num_workers=0)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = NeuralNet(input_size, hidden_size, output_size).to(device)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# Train the model
for epoch in range(num_epochs):
for (words, labels) in train_loader:
words = words.to(device)
labels = labels.to(dtype=torch.long).to(device)
# Forward pass
outputs = model(words)
# if y would be one-hot, we must apply
# labels = torch.max(labels, 1)[1]
loss = criterion(outputs, labels)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (epoch+1) % 100 == 0:
print (f'Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item():.4f}')
print(f'final loss: {loss.item():.4f}')
data = {
"model_state": model.state_dict(),
"input_size": input_size,
"hidden_size": hidden_size,
"output_size": output_size,
"all_words": all_words,
"tags": tags
}
FILE = "data.pth"
torch.save(data, FILE)
print(f'training complete. file saved to {FILE}')
| [
"noreply@github.com"
] | noreply@github.com |
5274933a631089d372135868c729ed4cf73b93c3 | 517391c5e39917509c6e4415111080bd5a5a4603 | /code/liaoxuefeng/函数式编程.py | 23156e8297f76b5fb0a37c85679db371658ef4b2 | [
"MIT"
] | permissive | bighuang624/Python-Learning | e82f221d60a55f7b8a8849d88f3c1b52aff4e69b | 7901b24e2f0c28e68514d0799b59c312c8086ad1 | refs/heads/master | 2018-12-15T09:57:06.228172 | 2018-09-14T03:00:36 | 2018-09-14T03:00:36 | 115,792,208 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,699 | py | # !/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
函数式编程 : 抽象程度很高的编程范式
特点 : 允许把函数本身作为参数传入另一个函数,还允许返回一个函数
纯粹的函数式编程语言编写的函数没有变量,因此,任意一个函数,只要输入是确定的,输出就是确定的
由于 Python 允许使用变量,因此,Python 不是纯函数式编程语言
'''
'''
高阶函数 : 一个函数可以接收另一个函数作为参数
'''
def add(x, y, f):
return f(x) + f(y)
add(-5, 6, abs)
'''
map(func, Iterable) : 将传入的函数依次作用到序列的每个元素
return Iterator
'''
list(map(lambda x: x * x, [1, 2, 3, 4, 5])) # [1, 4, 9, 16, 25]
'''
reduce(func, Iterable) : 把结果继续和序列的下一个元素做累积计算
注意需要 from functools import reduce 来导入 reduce
'''
from functools import reduce
reduce(lambda x, y: 10 * x + y, [1, 3, 5, 7, 9]) # 13579
'''
filter(func, Iterable) : 过滤序列
return Iterator
'''
def not_empty(s):
return s and s.strip()
list(filter(not_empty, ['A', '', 'B', None, 'C', ' '])) # ['A', 'B', 'C']
'''
sorted(Iterable, key=func, reverse=False) : 根据 key 函数实现自定义排序,reverse 为 True 时反向排序
'''
sorted(['bob', 'about', 'Zoo', 'Credit'], key=str.lower, reverse=True) # ['Zoo', 'Credit', 'bob', 'about']
'''
返回函数
'''
# 调用作为返回值的函数时才真正计算
def lazy_sum(*args):
def sum():
ax = 0
for n in args:
ax = ax + n
return ax
return sum
f = lazy_sum(1, 3, 5, 7, 9)
f # <function lazy_sum.<locals>.sum at 0x101c6ed90>
f() # 25
# 在函数 a 中又定义了函数 b,内部函数可以引用外部函数的参数和局部变量
# 函数作为返回值时,相关参数和变量都保存在返回的函数。这种程序结构称为“闭包”
# 因为返回的函数并非立即执行,因此返回闭包时,返回函数不要引用任何循环变量,或者后续会发生变化的变量!
def count():
fs = []
for i in range(1, 4):
def f():
return i*i
fs.append(f)
return fs
f1, f2, f3 = count() # 所引用的变量 i 都变成了 3
f1() # 9
f2() # 9
f3() # 9
# 练习 : 利用闭包返回一个计数器函数,每次调用它返回递增整数
def createCounter():
count = 0
def counter():
nonlocal count
count += 1
return count
return counter
counterA = createCounter()
print(counterA(), counterA(), counterA(), counterA(), counterA()) # 1 2 3 4 5
counterB = createCounter()
if [counterB(), counterB(), counterB(), counterB()] == [1, 2, 3, 4]:
print('测试通过!')
else:
print('测试失败!')
'''
匿名函数 lambda
限制 : 只能有一个表达式
'''
# 把匿名函数赋值给一个变量,再利用变量来调用该函数
f = lambda x : x * x
f # <function <lambda> at 0x101c6ef28>
f(5) # 25
# 把匿名函数作为返回值返回
def build(x, y):
return lambda: x * x + y * y
'''
装饰器 decorator : 在代码运行期间动态增加功能
'''
# 定义一个能打印日志的 decorator
def log(func):
def wrapper(*args, **kw):
print('call %s():' % func.__name__)
return func(*args, **kw)
return wrapper
# 把 @log 放到 now() 函数的定义处,相当于执行语句 now = log(now)
@log
def now():
print('2018-8-8')
# 调用 now() 将执行 log() 函数中返回的 wrapper() 函数
now()
# call now():
# 2018-8-8
# 如果 decorator 本身需要传入参数,那就需要编写一个返回 decorator 的高阶函数
def log(text):
def decorator(func):
def wrapper(*args, **kw):
print('%s %s()' % (text, func.__name__))
return func(*args, **kw)
return wrapper
return decorator
# 相当于 now = log('execute')(now)
@log('execute')
def now():
print('2018-8-8')
now()
# execute now():
# 2018-8-8
# 按照上述写法有
now.__name__ # 'wrapper'
# 有些依赖函数签名的代码执行会出错
# functools.wraps : 将原始函数的 __name__ 等属性复制到 wrapper() 中
import functools
def log(text):
def decorator(func):
@functools.wraps(func) # 在定义 wrapper() 的前面加上
def wrapper(*args, **kw):
print('%s %s():' % (text, func.__name__))
return func(*args, **kw)
return wrapper
return decorator
# 练习 : 设计一个 decorator,它可作用于任何函数上,并打印该函数的执行时间
import time, functools
def metric(fn):
@functools.wraps(fn)
def wrapper(*args, **kw):
start = time.time()
fn(*args, **kw)
print('%s executed in %s ms' % (fn.__name__, time.time() - start))
return fn
return wrapper
@metric
def fast(x, y):
time.sleep(0.0012)
return x + y
@metric
def slow(x, y, z):
time.sleep(0.1234)
return x * y * z
f = fast(11, 22)
s = slow(11, 22, 33)
if f != 33:
print('测试失败!')
elif s != 7986:
print('测试失败!')
'''
偏函数 : 通过设定参数的默认值,降低函数调用的难度
'''
# functools.partial : 为一个函数的某些参数设置默认值,返回一个新的函数
import functools
int2 = functools.partial(int, base=2)
int('1000000') # 64
# 创建偏函数时,实际可以接收函数对象、*args 和 **kw 三个参数
int2 = functools.partial(int, base=2)
int2('10010')
# 相当于
kw = { 'base': 2 }
int('10010', **kw)
max2 = functools.partial(max, 10)
max2(5, 6, 7)
# 会将 10 作为 *args 的一部分自动加到左边,相当于
args = (10, 5, 6, 7)
max(*args)
| [
"kyonhuang@qq.com"
] | kyonhuang@qq.com |
7dc840ce96b633d37a61473ca5f4a0f82739f3c7 | 3fdf3e75c3107e6c28af3cd52e100af92db9db69 | /aboutus/views.py | 7e81de04509bb324742fa59ff0b5e1be33400c97 | [] | no_license | likhithae/MooMa | 0642709fdbd22f9a9d1843aaa36690c2d3862aaf | 9447616ba8b45e3aca296a1e6b01feec81c155db | refs/heads/master | 2022-09-23T07:43:17.033856 | 2020-06-03T11:14:45 | 2020-06-03T11:14:45 | 261,427,805 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py | from django.shortcuts import render,get_object_or_404
from django.http import HttpResponse
# Create your views here.
def aboutus(request):
return render(request, 'aboutus/aboutus.html')
| [
"likhitha.e17@iiits.in"
] | likhitha.e17@iiits.in |
378a8eda73598088bfd7a93a8ba47a0b880763c0 | 2a03d2f0916e9de5df39b802346743c2703986ba | /config.py | 27451b91dc9633c4cf0783b906661ac52f451b40 | [] | no_license | josereyesjrz/aecc-web | 93aafc26db96a6bbb0ebb0ac9a4192d404f917c8 | 641800bcfd76367c575876f1565b014f884f89dc | refs/heads/master | 2021-09-08T03:34:17.123966 | 2018-02-22T23:26:15 | 2018-02-22T23:26:15 | 115,763,780 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 721 | py | import os
#class BaseConfig(object):
"""Base configuration."""
# main config
SECRET_KEY = 'secret123'
SECURITY_PASSWORD_SALT = 'AECC_Salt'
DEBUG = True
BCRYPT_LOG_ROUNDS = 13
WTF_CSRF_ENABLED = True
DEBUG_TB_ENABLED = False
DEBUG_TB_INTERCEPT_REDIRECTS = False
#Upload Settings
UPLOAD_FOLDER = 'static/uploads'
MAX_CONTENT_LENGTH = 10 * 1024 * 1024 # 10MB
# mail settings
MAIL_SERVER = 'smtp.googlemail.com'
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USE_SSL = False
# gmail authentication
#MAIL_USERNAME = os.environ['APP_MAIL_USERNAME']
#MAIL_PASSWORD = os.environ['APP_MAIL_PASSWORD']
MAIL_USERNAME = 'websiteaecc@gmail.com'
MAIL_PASSWORD = 'aeccwebsite2018'
# mail accounts
MAIL_DEFAULT_SENDER = 'noreply@aecc.com' | [
"vmoralesmestres@gmail.com"
] | vmoralesmestres@gmail.com |
49d1bcefccf302d46aca267726e09e5c0e8faaf1 | b6bf1d94a4d51579285edf013e9cd074a3ec3aeb | /Módulo 2/desenho.py | 828e1dcd520ee1386abfef99bd98353f294e76c8 | [] | no_license | Clalloures/MentoriaVisionMaterial | fd9861e7f39b756dda680d53e5c0f12f6fbe63c0 | 28fce46f6fad1e57f074dcac7c7051f4731d23d9 | refs/heads/master | 2022-12-11T14:48:54.102972 | 2020-09-06T10:49:14 | 2020-09-06T10:49:14 | 293,255,655 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 705 | py | import numpy as np
import cv2
# Criando uma matriz onde todas as posicoes tem valor zero
# 500 , 500 = tamaho
# 3 = canais (RGB)
# uint8 = 8 bits
quadro = np.zeros((500,500,3), dtype = 'uint8')
# Escolhendo o desenho
# https://www.rapidtables.com/web/color/RGB_Color.html#:~:text=RGB%20color%20space%20or%20RGB,*256%3D16777216%20possible%20colors.
#cv2.rectangle(quadro, (350,120), (450,150), (153,51,255),3, lineType=8, shift=0)
cv2.rectangle(quadro, (200,120), (300,150), (255,51,153),3, lineType=8, shift=0)
cv2.line(quadro, (200,350), (300,350), (255,255,0))
cv2.circle(quadro, (250,250), 50, (0, 255,255))
cv2.imshow('Quadro', quadro)
cv2.waitKey(0)
cv2.destroyAllWindows()
| [
"noreply@github.com"
] | noreply@github.com |
fd222058d00f3f31767a8ffb41efbf42449f9f76 | 24099a74c086da62d03727e7b2ce07fc6b223169 | /src/config/window.py | c92ec5bf3ecf83045a70e9ced2b7831143ad8ca6 | [
"MIT"
] | permissive | yurychu/brave-tiled-battle-client | dbdc87aaafcb9e0fc5f1df8c70810e53a6374fe6 | 4c6e76e9a2ea4cda62c8fad276bb3a18d8ae067b | refs/heads/master | 2020-09-26T07:43:01.762477 | 2016-09-21T16:31:37 | 2016-09-21T16:31:37 | 67,948,804 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 93 | py | WINDOW_WIDTH = 1024
WINDOW_HEIGHT = 768
RESOLUTION = (WINDOW_WIDTH, WINDOW_HEIGHT)
FPS = 30
| [
"tesewar@mail.ru"
] | tesewar@mail.ru |
ab5c3e859decfe06c2fea3305b36211dc4866406 | 50ee7c8ac6c6ddd0fb1d020ca9864429fd4db8cd | /LabAssignment8/2/2.py | c7226e63140a08832dfdea6b4e8935e1116cbd9b | [] | no_license | 2018008613/ComputerGraphics | b6d3b3e474e7dbd75fcbb52f80a388916f5dc011 | 537e84d241e493a52ad95feb73eebe81d6a8e1df | refs/heads/master | 2022-11-28T17:14:37.825236 | 2020-07-17T03:38:42 | 2020-07-17T03:38:42 | 280,318,257 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,852 | py | ###################################################
# [Practice] OpenGL Lighting
import glfw
from OpenGL.GL import *
from OpenGL.GLU import *
import numpy as np
from OpenGL.arrays import vbo
import ctypes
gCamAng = 0.
gCamHeight = 1.
def drawCube_glVertex():
glBegin(GL_TRIANGLES)
glNormal3f(0,0,1) # v0, v2, v1, v0, v3, v2 normal
glVertex3f( -1 , 1 , 1 ) # v0 position
glVertex3f( 1 , -1 , 1 ) # v2 position
glVertex3f( 1 , 1 , 1 ) # v1 position
glVertex3f( -1 , 1 , 1 ) # v0 position
glVertex3f( -1 , -1 , 1 ) # v3 position
glVertex3f( 1 , -1 , 1 ) # v2 position
glNormal3f(0,0,-1)
glVertex3f( -1 , 1 , -1 ) # v4
glVertex3f( 1 , 1 , -1 ) # v5
glVertex3f( 1 , -1 , -1 ) # v6
glVertex3f( -1 , 1 , -1 ) # v4
glVertex3f( 1 , -1 , -1 ) # v6
glVertex3f( -1 , -1 , -1 ) # v7
glNormal3f(0,1,0)
glVertex3f( -1 , 1 , 1 ) # v0
glVertex3f( 1 , 1 , 1 ) # v1
glVertex3f( 1 , 1 , -1 ) # v5
glVertex3f( -1 , 1 , 1 ) # v0
glVertex3f( 1 , 1 , -1 ) # v5
glVertex3f( -1 , 1 , -1 ) # v4
glNormal3f(0,-1,0)
glVertex3f( -1 , -1 , 1 ) # v3
glVertex3f( 1 , -1 , -1 ) # v6
glVertex3f( 1 , -1 , 1 ) # v2
glVertex3f( -1 , -1 , 1 ) # v3
glVertex3f( -1 , -1 , -1 ) # v7
glVertex3f( 1 , -1 , -1 ) # v6
glNormal3f(1,0,0)
glVertex3f( 1 , 1 , 1 ) # v1
glVertex3f( 1 , -1 , 1 ) # v2
glVertex3f( 1 , -1 , -1 ) # v6
glVertex3f( 1 , 1 , 1 ) # v1
glVertex3f( 1 , -1 , -1 ) # v6
glVertex3f( 1 , 1 , -1 ) # v5
glNormal3f(-1,0,0)
glVertex3f( -1 , 1 , 1 ) # v0
glVertex3f( -1 , -1 , -1 ) # v7
glVertex3f( -1 , -1 , 1 ) # v3
glVertex3f( -1 , 1 , 1 ) # v0
glVertex3f( -1 , 1 , -1 ) # v4
glVertex3f( -1 , -1 , -1 ) # v7
glEnd()
def createVertexAndIndexArrayIndexed():
varr = np.array([
( -0.5773502691896258 , 0.5773502691896258 , 0.5773502691896258 ), # v0 normal
( -1 , 1 , 1 ), # v0 position
( 0.8164965809277261 , 0.4082482904638631 , 0.4082482904638631 ), # v1 normal
( 1 , 1 , 1 ), # v1 position
( 0.4082482904638631 , -0.4082482904638631 , 0.8164965809277261 ), # v2 normal
( 1 , -1 , 1 ), # v2 position
( -0.4082482904638631 , -0.8164965809277261 , 0.4082482904638631 ), # v3 normal
( -1 , -1 , 1 ), # v3 position
( -0.4082482904638631 , 0.4082482904638631 , -0.8164965809277261 ), # v4 normal
( -1 , 1 , -1 ), # v4 position
( 0.4082482904638631 , 0.8164965809277261 , -0.4082482904638631 ), # v5 normal
( 1 , 1 , -1 ), # v5 position
( 0.5773502691896258 , -0.5773502691896258 , -0.5773502691896258 ),
( 1 , -1 , -1 ), # v6
( -0.8164965809277261 , -0.4082482904638631 , -0.4082482904638631 ),
( -1 , -1 , -1 ), # v7
], 'float32')
iarr = np.array([
(0,2,1),
(0,3,2),
(4,5,6),
(4,6,7),
(0,1,5),
(0,5,4),
(3,6,2),
(3,7,6),
(1,2,6),
(1,6,5),
(0,7,3),
(0,4,7),
])
return varr, iarr
def drawCube_glDrawElements():
global gVertexArrayIndexed, gIndexArray
varr = gVertexArrayIndexed
iarr = gIndexArray
glEnableClientState(GL_VERTEX_ARRAY)
glEnableClientState(GL_NORMAL_ARRAY)
glNormalPointer(GL_FLOAT, 6*varr.itemsize, varr)
glVertexPointer(3, GL_FLOAT, 6*varr.itemsize, ctypes.c_void_p(varr.ctypes.data + 3*varr.itemsize))
glDrawElements(GL_TRIANGLES, iarr.size, GL_UNSIGNED_INT, iarr)
def render():
global gCamAng, gCamHeight
glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)
glEnable(GL_DEPTH_TEST)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(45, 1, 1,10)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
gluLookAt(5*np.sin(gCamAng),gCamHeight,5*np.cos(gCamAng), 0,0,0, 0,1,0)
drawFrame()
glEnable(GL_LIGHTING) # try to uncomment: no lighting
glEnable(GL_LIGHT0)
glEnable(GL_NORMALIZE) # try to uncomment: lighting will be incorrect if you scale the object
# glEnable(GL_RESCALE_NORMAL)
# light position
glPushMatrix()
t = glfw.get_time()
# glRotatef(t*(180/np.pi),0,1,0) # try to uncomment: rotate light
lightPos = (3.,4.,5.,1.) # try to change 4th element to 0. or 1.
glLightfv(GL_LIGHT0, GL_POSITION, lightPos)
glPopMatrix()
# light intensity for each color channel
lightColor = (1.,1.,1.,1.)
ambientLightColor = (.1,.1,.1,1.)
glLightfv(GL_LIGHT0, GL_DIFFUSE, lightColor)
glLightfv(GL_LIGHT0, GL_SPECULAR, lightColor)
glLightfv(GL_LIGHT0, GL_AMBIENT, ambientLightColor)
# material reflectance for each color channel
objectColor = (1.,0.,0.,1.)
specularObjectColor = (1.,1.,1.,1.)
glMaterialfv(GL_FRONT, GL_AMBIENT_AND_DIFFUSE, objectColor)
glMaterialfv(GL_FRONT, GL_SHININESS, 10)
glMaterialfv(GL_FRONT, GL_SPECULAR, specularObjectColor)
glPushMatrix()
# glRotatef(t*(180/np.pi),0,1,0) # try to uncomment: rotate object
# glScalef(1.,.2,1.) # try to uncomment: scale object
glColor3ub(0, 0, 255) # glColor*() is ignored if lighting is enabled
# drawCube_glVertex()
drawCube_glDrawElements()
glPopMatrix()
glDisable(GL_LIGHTING)
def drawFrame():
glBegin(GL_LINES)
glColor3ub(255, 0, 0)
glVertex3fv(np.array([0.,0.,0.]))
glVertex3fv(np.array([1.,0.,0.]))
glColor3ub(0, 255, 0)
glVertex3fv(np.array([0.,0.,0.]))
glVertex3fv(np.array([0.,1.,0.]))
glColor3ub(0, 0, 255)
glVertex3fv(np.array([0.,0.,0]))
glVertex3fv(np.array([0.,0.,1.]))
glEnd()
def key_callback(window, key, scancode, action, mods):
global gCamAng, gCamHeight
if action==glfw.PRESS or action==glfw.REPEAT:
if key==glfw.KEY_1:
gCamAng += np.radians(-10)
elif key==glfw.KEY_3:
gCamAng += np.radians(10)
elif key==glfw.KEY_2:
gCamHeight += .1
elif key==glfw.KEY_W:
gCamHeight += -.1
gVertexArrayIndexed = None
gIndexArray = None
def main():
global gVertexArrayIndexed, gIndexArray
if not glfw.init():
return
window = glfw.create_window(480,480,'2018008613', None,None)
if not window:
glfw.terminate()
return
glfw.make_context_current(window)
glfw.set_key_callback(window, key_callback)
glfw.swap_interval(1)
gVertexArrayIndexed, gIndexArray = createVertexAndIndexArrayIndexed()
while not glfw.window_should_close(window):
glfw.poll_events()
render()
glfw.swap_buffers(window)
glfw.terminate()
if __name__ == "__main__":
main()
| [
"picosw@hanyang.ac.kr"
] | picosw@hanyang.ac.kr |
397da806a95f70217bf79901c8e1ad9ffe4fcefe | e0ed932fc2e4edb953cc4e423362dabc19083008 | /python/002_note/learn_with/002_有异常的例子.py | 8704a23fa87700b015cb24d95bd2053e1d7f4bde | [] | no_license | glfAdd/note | 90baee45003ac3998d898dcfbc618caa28f33b74 | 19a9aff61450be25904bff0fe672f660d49d90ff | refs/heads/main | 2023-05-27T13:28:36.092352 | 2023-05-24T03:35:58 | 2023-05-24T03:35:58 | 240,066,208 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 816 | py | class Test:
def __enter__(self):
print('__enter__() is call!')
return self
@staticmethod
def start():
print('------------------------------ test')
return 1 / 0
def __exit__(self, exc_type, exc_value, traceback):
"""
@param exc_type:
@param exc_value:
@param traceback:
@return:
True: 不抛出异常
False: 抛出异常
"""
print('__exit__() is call!')
print(f'exc_type:{exc_type}')
print(f'exc_value:{exc_value}')
print(f'traceback:{traceback}')
print('__exit()__ is call!')
return True
# return False
with Test() as t:
print('------------ 1')
t.start()
print('------------ 2')
raise TypeError
print('------------ 3')
| [
"2239660080@qq.com"
] | 2239660080@qq.com |
e878483efb96ff6a75498766da8723c34864fa39 | 694d3929b23a8434cab14ddab623030a0fe4ac38 | /apps/reports/views.py | f3a408b99c91eae03444b5863ff332d455c98ab2 | [] | no_license | gehongming/django_api | 03fec87a25c2ad3cb603aad2f1b5d9b680debf12 | fb8e0623e9171deb8706ed258cc5d5bd0d9fe6aa | refs/heads/main | 2023-09-01T06:57:22.677374 | 2021-10-12T05:39:07 | 2021-10-12T05:39:07 | 415,173,097 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,648 | py | import json
import re
import os
from datetime import datetime
from django.http import StreamingHttpResponse
from django.utils.encoding import escape_uri_path
from rest_framework.response import Response
from rest_framework.viewsets import ModelViewSet
from rest_framework import permissions, status
from rest_framework.decorators import action
from rest_framework.settings import settings
from .models import Reports
from .serializer import ReportsSerializer
from .utils import format_output, get_file_contents
class ReportsViewSet(ModelViewSet):
"""
list:
返回测试报告(多个)列表数据
create:
创建测试报告
update:
更新测试报告
partial_update:
更新(部分)测试报告
destroy:
逻辑删除测试报告
retrieve:
获取测试报告详情
"""
queryset = Reports.objects.filter(is_delete=0)
serializer_class = ReportsSerializer
ordering_fields = ['name']
# 定义权限
permission_classes = [permissions.IsAuthenticated]
def list(self, request, *args, **kwargs):
# 调用父类的list方法。
response = super().list(request, *args, **kwargs)
response.data['results'] = format_output(response.data['results'])
return response
# 逻辑删除 重写。原有的destroy 是物理删除
def perform_destroy(self, instance):
# 修改字段 is_delete
instance.is_delete = 1
instance.save()
@action(detail=True)
def download(self, request, pk=None):
# 1、获取html源码
instance = self.get_object()
html = instance.html
name = instance.name
# 正则取存储的文件名称
mtch = re.match(r'(.*_)\d+', name)
if mtch:
mtch = mtch.group(1)
report_filename = mtch + datetime.strftime(datetime.now(), '%Y%m%d%H%M%S' + '.html')
# 获取文件路径。可以通过settings.BASE_DI获取项目路径
else:
report_filename = name+'.html'
# settings.REPORTS_DIR 设置 报告目录。
report_path = os.path.join(settings.REPORTS_DIR, report_filename)
# 将html数据 写入到保存的html文件内。
with open(report_path, 'w+', encoding='utf-8') as one_file:
one_file.write(html)
# 下载专用返回格式
response = StreamingHttpResponse(get_file_contents(report_path))
report_path_final = escape_uri_path(report_filename)
response['Content-Type'] = 'application/octet-stream'
response['Content-Disposition'] = f"attachment; filename*=UTF-8''{report_path_final}"
return response
def retrieve(self, request, *args, **kwargs):
instance = self.get_object()
serializer = self.get_serializer(instance)
data = serializer.data
try:
data['summary'] = json.loads(data['summary'], encoding='utf-8')
return Response(data)
except Exception as e:
return Response({
'err': '测试报告summary格式有误'
}, status=status.HTTP_400_BAD_REQUEST)
# def retrieve(self, request, *args, **kwargs):
# instance = self.get_object()
# try:
# summary = json.loads(instance.summary, encoding='utf-8')
# return Response({
# 'id': instance.id,
# 'summary': summary
# }, status=status.HTTP_200_OK)
# except Exception:
# return Response({
# 'err': '测试报告summary格式有误'
# }, status=status.HTTP_400_BAD_REQUEST)
| [
"1010562639@qq.com"
] | 1010562639@qq.com |
060a96ce55d378586411ad42a722042503385328 | 0bbeb64933cb5a4df366ad1b39484c3922fcc5a5 | /bin/django-admin | 93f16e781c08910499b4b071b66dc6691445aedb | [] | no_license | alex-h-k/Build-a-Blog-and-Learn-Python-Django | 484303385eaedd6353d04b76544a8a510b10930f | e8c8922db04f5dd2def47438ace914d092f7f16d | refs/heads/master | 2020-05-02T00:30:29.356333 | 2019-03-26T23:44:27 | 2019-03-26T23:44:27 | 177,673,988 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 292 | #!/Users/alex/Desktop/trydjango19/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
| [
"kuan8@hotmail.com"
] | kuan8@hotmail.com | |
28b45973b0949e982a7d58714b7cbadc29fc03c1 | 054b9a9493c70c114f174f0815b89cf587f36cb3 | /NSUR/news/migrations/0006_auto_20180314_1547.py | cb258485e728253c3a4560d9b6c1500f78a57af7 | [] | no_license | ahmeditaev/NSUR | 0e751d6375a1f83c1c81d028f574a543e3a5318b | 278721d48134160dd559a7e02e343b1b86f153c7 | refs/heads/master | 2021-04-12T03:16:06.825324 | 2018-03-29T11:21:29 | 2018-03-29T11:21:29 | 125,815,512 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 970 | py | # Generated by Django 2.0.3 on 2018-03-14 09:47
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('news', '0005_auto_20180314_0801'),
]
operations = [
migrations.CreateModel(
name='EmailUs',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=240)),
('tel', models.CharField(max_length=240)),
('email', models.EmailField(max_length=300)),
('text', models.TextField()),
],
),
migrations.AlterField(
model_name='news',
name='published',
field=models.DateTimeField(blank=True, default=datetime.datetime(2018, 3, 14, 9, 47, 39, 434445, tzinfo=utc), null=True),
),
]
| [
"ahmeditaev@gmail.com"
] | ahmeditaev@gmail.com |
2b2b0ee597e8128abc773f167c2b38bc1bb6fc9a | e7cbb7e86a9bda34ba6e160eaf2dc0ebcf63c318 | /udacity/udacity/items.py | 387ed55565772a14536e3a3e50d40c7020b4bb8f | [] | no_license | jacksino/spider | f1ac85314e0530870aad920d18f54ba31e69269a | f553c1cddbbe8cfdd47fb5e1480694f790226cb8 | refs/heads/master | 2022-05-16T09:02:20.264799 | 2022-04-18T11:07:21 | 2022-04-18T11:07:21 | 233,003,375 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 447 | py | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class UdacityItem(scrapy.Item):
title = scrapy.Field()
instructor = scrapy.Field()
lessons = scrapy.Field()
suitable = scrapy.Field()
level = scrapy.Field()
selling_points= scrapy.Field()
price = scrapy.Field()
learning_duration = scrapy.Field()
| [
"noreply@github.com"
] | noreply@github.com |
dd351a69b519de626f377dcd61ee199d47f1462a | 2d1cddc3ab885e374bf0bc85be39b7c7c13f0be5 | /mytweets/mytweets/urls.py | d9da1b1b9fe9136a2988fc65a28ff5b6e2a9bbba | [] | no_license | fernvnat14/MyTweetLab02 | e3cf976cb3830b88b9bcfae432fbddbe715cea1e | 596e21a0d52ae034ab7cce9d6185329965acbfaa | refs/heads/master | 2020-03-26T16:42:04.808357 | 2018-08-21T03:40:56 | 2018-08-21T03:40:56 | 145,117,869 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 892 | py | """mytweets URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
from tweets.views import Index , Profile
admin.autodiscover()
urlpatterns = [
url(r'^$', Index.as_view()),
url(r'^user/(\w+)/$', Profile.as_view()),
url(r'^admin/', admin.site.urls)]
| [
"noreply@github.com"
] | noreply@github.com |
c17a7dad686c72b10d108e8f1cfac4c9894e2578 | 25cb7be764c97bce19f2afbc0f67703c2832d503 | /Week 1/ex2.py | 53fb25b3ba8453bada242c104aeadb339b083e5a | [
"MIT"
] | permissive | wahsandaruwan/python_ess_exercises | 3ead6da2dc32976e2066924716427551f6e57349 | a43cffa1077d1d9af7ee2e81427416894b29b0b6 | refs/heads/master | 2022-12-16T20:28:05.368186 | 2020-09-19T20:30:30 | 2020-09-19T20:30:30 | 272,532,570 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 722 | py | # =======Simple Expressions========
# ----Unary + and - -----
print("Unary Operators")
print(+3)
print(-5)
print(+7.86)
print(-3348.63)
print("")
# -----Simple Arithmetic-----
print("Addition and Substraction")
print(1 + 2)
print(48 - 89)
print(3.45 + 2.7)
print(87.3384 - 12.35)
print(3 + 6.7)
print(9.8 - 4)
print("")
print("Multiplication")
print(3 * 4)
print(7.8 * 27.54)
print(7 * 8.3)
print("")
print("Division") # Division always gives back a float number
print(8 / 2)
print(3 / 2)
print(7.538 / 14.3)
print(8 // 2) # // Means integer divide, it returns an integer
print(3 // 2)
print(7.538 // 14.3)
print("Exponentiation")
print(3 ** 2) # 3 to the power of 2
print(5 ** 2)
print(32.6 ** 7)
print(9 ** 0.5)
| [
"wahsandaruwan6@gmail.com"
] | wahsandaruwan6@gmail.com |
9289f3888bd368ebf590e4c9f31cee03c9e2b5e1 | 5705ca0e1d3733add90a1952e54ad28785bc69f4 | /simpledu/simpledu/forms.py | 48c061ac0764ee488f373ae0246be5105527f4ba | [] | no_license | AbbieChen233/flask_login | fa7f330961be682972264a3fb019b4ebe9a29c2c | 275bbc4981007bfa8c9b5d7563e8b80b903fdf39 | refs/heads/master | 2020-03-17T08:31:14.163807 | 2018-05-15T01:41:02 | 2018-05-15T01:41:02 | 133,441,443 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,083 | py | from flask_wtf import FlaskForm
from wtforms import StringField,PasswordField,SubmitField,BooleanField
from wtforms.validators import Length,Email,EqualTo,Required
from simpledu.models import db,User
from wtforms import ValidationError
from flask import flash
import re
class RegisterForm(FlaskForm):
username = StringField('Username',validators=[Required(),Length(3,24)])
email = StringField('email',validators=[Required(),Email()])
password = PasswordField('password',validators=[Required(),Length(6,24)])
repeat_password = PasswordField('password',validators=[Required(),Length(6,24),EqualTo('password')])
submit = SubmitField('提交')
#根据表单提交的数据创建用户
def create_user(self):
user = User()
user.username = self.username.data
user.email = self.email.data
user.password = self.password.data
db.session.add(user)
db.session.commit()
return user
def validate_username(self,field):
user = User.query.filter_by(username=field.data).first()
if user:
raise ValidationError('用户名已经存在')
pattern = re.compile(r'[\W]+')
m = re.search(pattern,field.data)
if not user and m:
flash('用户名只能由字母和数字组成')
raise ValidationError('用户名只能由字母和数字组成')
def validate_email(self,field):
if User.query.filter_by(email=field.data).first():
raise ValidationError('邮箱已经存在')
class LoginForm(FlaskForm):
email = StringField('Username',validators=[Required(),Length(3,24)])
password = PasswordField('password',validators=[Required(),Email()])
remember_me = BooleanField('记住我')
submit = SubmitField('submit')
def validate_username(self,field):
if not User.query.filter_by(username=field.data).first() and field.data:
raise ValidationError('用户名未注册')
def validate_password(self,field):
user = User.query.filter_by(email=self.email.data).first()
if user and not user.check_password(user.password):
raise ValidationError('密码错误')
| [
"noreply@github.com"
] | noreply@github.com |
4570702ee558fd5356cbb6e61347d548044dc91f | 98efe1aee73bd9fbec640132e6fb2e54ff444904 | /loldib/getratings/models/NA/na_velkoz/na_velkoz_jng.py | 03ed1057207505472af839e30740f1e89491e018 | [
"Apache-2.0"
] | permissive | koliupy/loldib | be4a1702c26546d6ae1b4a14943a416f73171718 | c9ab94deb07213cdc42b5a7c26467cdafaf81b7f | refs/heads/master | 2021-07-04T03:34:43.615423 | 2017-09-21T15:44:10 | 2017-09-21T15:44:10 | 104,359,388 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,545 | py | from getratings.models.ratings import Ratings
class NA_Velkoz_Jng_Aatrox(Ratings):
pass
class NA_Velkoz_Jng_Ahri(Ratings):
pass
class NA_Velkoz_Jng_Akali(Ratings):
pass
class NA_Velkoz_Jng_Alistar(Ratings):
pass
class NA_Velkoz_Jng_Amumu(Ratings):
pass
class NA_Velkoz_Jng_Anivia(Ratings):
pass
class NA_Velkoz_Jng_Annie(Ratings):
pass
class NA_Velkoz_Jng_Ashe(Ratings):
pass
class NA_Velkoz_Jng_AurelionSol(Ratings):
pass
class NA_Velkoz_Jng_Azir(Ratings):
pass
class NA_Velkoz_Jng_Bard(Ratings):
pass
class NA_Velkoz_Jng_Blitzcrank(Ratings):
pass
class NA_Velkoz_Jng_Brand(Ratings):
pass
class NA_Velkoz_Jng_Braum(Ratings):
pass
class NA_Velkoz_Jng_Caitlyn(Ratings):
pass
class NA_Velkoz_Jng_Camille(Ratings):
pass
class NA_Velkoz_Jng_Cassiopeia(Ratings):
pass
class NA_Velkoz_Jng_Chogath(Ratings):
pass
class NA_Velkoz_Jng_Corki(Ratings):
pass
class NA_Velkoz_Jng_Darius(Ratings):
pass
class NA_Velkoz_Jng_Diana(Ratings):
pass
class NA_Velkoz_Jng_Draven(Ratings):
pass
class NA_Velkoz_Jng_DrMundo(Ratings):
pass
class NA_Velkoz_Jng_Ekko(Ratings):
pass
class NA_Velkoz_Jng_Elise(Ratings):
pass
class NA_Velkoz_Jng_Evelynn(Ratings):
pass
class NA_Velkoz_Jng_Ezreal(Ratings):
pass
class NA_Velkoz_Jng_Fiddlesticks(Ratings):
pass
class NA_Velkoz_Jng_Fiora(Ratings):
pass
class NA_Velkoz_Jng_Fizz(Ratings):
pass
class NA_Velkoz_Jng_Galio(Ratings):
pass
class NA_Velkoz_Jng_Gangplank(Ratings):
pass
class NA_Velkoz_Jng_Garen(Ratings):
pass
class NA_Velkoz_Jng_Gnar(Ratings):
pass
class NA_Velkoz_Jng_Gragas(Ratings):
pass
class NA_Velkoz_Jng_Graves(Ratings):
pass
class NA_Velkoz_Jng_Hecarim(Ratings):
pass
class NA_Velkoz_Jng_Heimerdinger(Ratings):
pass
class NA_Velkoz_Jng_Illaoi(Ratings):
pass
class NA_Velkoz_Jng_Irelia(Ratings):
pass
class NA_Velkoz_Jng_Ivern(Ratings):
pass
class NA_Velkoz_Jng_Janna(Ratings):
pass
class NA_Velkoz_Jng_JarvanIV(Ratings):
pass
class NA_Velkoz_Jng_Jax(Ratings):
pass
class NA_Velkoz_Jng_Jayce(Ratings):
pass
class NA_Velkoz_Jng_Jhin(Ratings):
pass
class NA_Velkoz_Jng_Jinx(Ratings):
pass
class NA_Velkoz_Jng_Kalista(Ratings):
pass
class NA_Velkoz_Jng_Karma(Ratings):
pass
class NA_Velkoz_Jng_Karthus(Ratings):
pass
class NA_Velkoz_Jng_Kassadin(Ratings):
pass
class NA_Velkoz_Jng_Katarina(Ratings):
pass
class NA_Velkoz_Jng_Kayle(Ratings):
pass
class NA_Velkoz_Jng_Kayn(Ratings):
pass
class NA_Velkoz_Jng_Kennen(Ratings):
pass
class NA_Velkoz_Jng_Khazix(Ratings):
pass
class NA_Velkoz_Jng_Kindred(Ratings):
pass
class NA_Velkoz_Jng_Kled(Ratings):
pass
class NA_Velkoz_Jng_KogMaw(Ratings):
pass
class NA_Velkoz_Jng_Leblanc(Ratings):
pass
class NA_Velkoz_Jng_LeeSin(Ratings):
pass
class NA_Velkoz_Jng_Leona(Ratings):
pass
class NA_Velkoz_Jng_Lissandra(Ratings):
pass
class NA_Velkoz_Jng_Lucian(Ratings):
pass
class NA_Velkoz_Jng_Lulu(Ratings):
pass
class NA_Velkoz_Jng_Lux(Ratings):
pass
class NA_Velkoz_Jng_Malphite(Ratings):
pass
class NA_Velkoz_Jng_Malzahar(Ratings):
pass
class NA_Velkoz_Jng_Maokai(Ratings):
pass
class NA_Velkoz_Jng_MasterYi(Ratings):
pass
class NA_Velkoz_Jng_MissFortune(Ratings):
pass
class NA_Velkoz_Jng_MonkeyKing(Ratings):
pass
class NA_Velkoz_Jng_Mordekaiser(Ratings):
pass
class NA_Velkoz_Jng_Morgana(Ratings):
pass
class NA_Velkoz_Jng_Nami(Ratings):
pass
class NA_Velkoz_Jng_Nasus(Ratings):
pass
class NA_Velkoz_Jng_Nautilus(Ratings):
pass
class NA_Velkoz_Jng_Nidalee(Ratings):
pass
class NA_Velkoz_Jng_Nocturne(Ratings):
pass
class NA_Velkoz_Jng_Nunu(Ratings):
pass
class NA_Velkoz_Jng_Olaf(Ratings):
pass
class NA_Velkoz_Jng_Orianna(Ratings):
pass
class NA_Velkoz_Jng_Ornn(Ratings):
pass
class NA_Velkoz_Jng_Pantheon(Ratings):
pass
class NA_Velkoz_Jng_Poppy(Ratings):
pass
class NA_Velkoz_Jng_Quinn(Ratings):
pass
class NA_Velkoz_Jng_Rakan(Ratings):
pass
class NA_Velkoz_Jng_Rammus(Ratings):
pass
class NA_Velkoz_Jng_RekSai(Ratings):
pass
class NA_Velkoz_Jng_Renekton(Ratings):
pass
class NA_Velkoz_Jng_Rengar(Ratings):
pass
class NA_Velkoz_Jng_Riven(Ratings):
pass
class NA_Velkoz_Jng_Rumble(Ratings):
pass
class NA_Velkoz_Jng_Ryze(Ratings):
pass
class NA_Velkoz_Jng_Sejuani(Ratings):
pass
class NA_Velkoz_Jng_Shaco(Ratings):
pass
class NA_Velkoz_Jng_Shen(Ratings):
pass
class NA_Velkoz_Jng_Shyvana(Ratings):
pass
class NA_Velkoz_Jng_Singed(Ratings):
pass
class NA_Velkoz_Jng_Sion(Ratings):
pass
class NA_Velkoz_Jng_Sivir(Ratings):
pass
class NA_Velkoz_Jng_Skarner(Ratings):
pass
class NA_Velkoz_Jng_Sona(Ratings):
pass
class NA_Velkoz_Jng_Soraka(Ratings):
pass
class NA_Velkoz_Jng_Swain(Ratings):
pass
class NA_Velkoz_Jng_Syndra(Ratings):
pass
class NA_Velkoz_Jng_TahmKench(Ratings):
pass
class NA_Velkoz_Jng_Taliyah(Ratings):
pass
class NA_Velkoz_Jng_Talon(Ratings):
pass
class NA_Velkoz_Jng_Taric(Ratings):
pass
class NA_Velkoz_Jng_Teemo(Ratings):
pass
class NA_Velkoz_Jng_Thresh(Ratings):
pass
class NA_Velkoz_Jng_Tristana(Ratings):
pass
class NA_Velkoz_Jng_Trundle(Ratings):
pass
class NA_Velkoz_Jng_Tryndamere(Ratings):
pass
class NA_Velkoz_Jng_TwistedFate(Ratings):
pass
class NA_Velkoz_Jng_Twitch(Ratings):
pass
class NA_Velkoz_Jng_Udyr(Ratings):
pass
class NA_Velkoz_Jng_Urgot(Ratings):
pass
class NA_Velkoz_Jng_Varus(Ratings):
pass
class NA_Velkoz_Jng_Vayne(Ratings):
pass
class NA_Velkoz_Jng_Veigar(Ratings):
pass
class NA_Velkoz_Jng_Velkoz(Ratings):
pass
class NA_Velkoz_Jng_Vi(Ratings):
pass
class NA_Velkoz_Jng_Viktor(Ratings):
pass
class NA_Velkoz_Jng_Vladimir(Ratings):
pass
class NA_Velkoz_Jng_Volibear(Ratings):
pass
class NA_Velkoz_Jng_Warwick(Ratings):
pass
class NA_Velkoz_Jng_Xayah(Ratings):
pass
class NA_Velkoz_Jng_Xerath(Ratings):
pass
class NA_Velkoz_Jng_XinZhao(Ratings):
pass
class NA_Velkoz_Jng_Yasuo(Ratings):
pass
class NA_Velkoz_Jng_Yorick(Ratings):
pass
class NA_Velkoz_Jng_Zac(Ratings):
pass
class NA_Velkoz_Jng_Zed(Ratings):
pass
class NA_Velkoz_Jng_Ziggs(Ratings):
pass
class NA_Velkoz_Jng_Zilean(Ratings):
pass
class NA_Velkoz_Jng_Zyra(Ratings):
pass
| [
"noreply@github.com"
] | noreply@github.com |
32f760663ed02344cc1d763c8b38a5d9f1d5f937 | 5db8d388ded4065307d246a72308459427a65ffa | /URI1003.py | a5d38fb660aacda1c836c08feab77467bd8f8022 | [] | no_license | dressaco/Python-URI | e1a079d6ea4ed107e99d25303371427ee62d82da | 2cfd7d8c9111668c85da61c1ec61ce3882bc296c | refs/heads/master | 2023-05-03T05:59:15.430547 | 2021-05-31T13:31:11 | 2021-05-31T13:31:11 | 362,318,692 | 0 | 1 | null | 2021-05-10T17:57:05 | 2021-04-28T02:53:43 | Python | UTF-8 | Python | false | false | 124 | py | A = int(input())
B = int(input())
soma = A + B
print('SOMA =',soma)
#print('SOMA = ' + str(soma))
#print(f'SOMA = {soma}') | [
"dressaco@outlook.com"
] | dressaco@outlook.com |
85f7df264972714c0c28d71df68d69842bd1a2f5 | 317b191130d8a32364b5e36e55b6c99fc58e7b62 | /odoo_addons/pos_proxy_service/models/account_journal.py | 5fe0a431470e34454ff6099090d78ff3dd1ba300 | [] | no_license | nahe-consulting-group/pos_proxy_services | 0d5d6db8e280dbdfe2ae9b02c31d14f3e1708ae5 | 64509dab6bd00b79f8a70bdd46e30ef345cb73ca | refs/heads/master | 2022-12-17T12:34:24.474956 | 2020-09-15T10:59:17 | 2020-09-15T10:59:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,044 | py | from odoo import api, fields, tools, models, _
from odoo.exceptions import UserError
class PosConfig(models.Model):
_inherit = 'pos.config'
payment_afip = fields.Selection([
('1','CARTA_DE_CREDITO_DOCUMENTARIO'),
('2','CARTAS_DE_CREDITO_SIMPLE'),
('3','CHEQUE'),
('4','CHEQUES_CANCELATORIOS'),
('5','CREDITO_DOCUMENTARIO'),
('6','CUENTA_CORRIENTE'),
('7','DEPOSITO'),
('8','EFECTIVO'),
('9','ENDOSO_DE_CHEQUE'),
('10','FACTURA_DE_CREDITO'),
('11','GARANTIAS_BANCARIAS'),
('12','GIROS'),
('13','LETRAS_DE_CAMBIO'),
('14','MEDIOS_DE_PAGO_DE_COMERCIO_EXTERIOR'),
('15','ORDEN_DE_PAGO_DOCUMENTARIA'),
('16','ORDEN_DE_PAGO_SIMPLE'),
('17','PAGO_CONTRA_REEMBOLSO'),
('18','REMESA_DOCUMENTARIA'),
('19','REMESA_SIMPLE'),
('20','TARJETA_DE_CREDITO'),
('21','TARJETA_DE_DEBITO'),
('22','TICKET'),
('23','TRANSFERENCIA_BANCARIA'),
('24','TRANSFERENCIA_NO_BANCARIA'),
('99','OTROS_MEDIOS_DE_PAGO')
], 'Forma de Pago AFIP',
default='99') | [
"pronexo@gmail.com"
] | pronexo@gmail.com |
256003aaafe8fa10d3dddfaa1c9e624d2290b8e1 | 45f38bb8754d5144fbbdc72dbe0cc70b43cde8a7 | /app/services/reply.py | 37ff79e84703e4ee5b00f22f4d501f6eee318a32 | [] | no_license | koyoru12/ShizuokaTrashNavi | aa2bec1e2b92fd0a6669840c373d7c715bc727e4 | ed443c701f1250f9867e2d2c19f7ae9280ba663f | refs/heads/master | 2020-04-27T07:46:29.644108 | 2019-03-21T02:16:57 | 2019-03-21T02:16:57 | 174,147,248 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,631 | py | import os
import sqlite3
import json
import re
from abc import ABCMeta, abstractclassmethod
import tornado
from tornado import gen, httpclient
import util
from app.models import MessageFactory
from app.repositories import (
FixedReplyRDBRepository, DynamicReplyRDBRepository,
UserRDBRepository, CityRDBRepository
)
from app.services.token import TokenProvider
class TextMessageReplyService():
def __init__(self, request):
self._request = request
self._messages = []
def add_handler(self, handler):
self._handlers.append(handler)
def reply(self):
if self.try_action_reply():
return self._messages
if self.try_fixed_reply():
return self._messages
if self.try_dynamic_reply():
return self._messages
def try_action_reply(self):
act = self._request.action
if act.type == '':
return False
if act.type == 'help_search_trash':
# (ヘルプ)ごみの出し方
message = MessageFactory.create_message('help_search_trash', self._request)
self._messages.append(message)
return True
if act.type == 'help_change_usercity':
# (ヘルプ)市町村変更
message = MessageFactory.create_message('help_change_usercity', self._request)
self._messages.append(message)
return True
elif act.type == 'search_trash':
# 市町村を指定してごみ情報の検索
# コンフィグ設定を変更する
self._request.request_message = act.trash
self._request.config.search_cityid = act.city
return False
elif act.type == 'handshake':
# Web版のハンドシェイク
message = MessageFactory.create_message('handshake', self._request)
self._messages.append(message)
return True
return False
def try_fixed_reply(self):
repo = FixedReplyRDBRepository()
data = repo.find_reply_by_message(self._request.request_message)
if (data):
# FIX:
# マッチしたものの最初しか考慮してない
message = MessageFactory.create_message(data[0]['message_type'], self._request)
if message is not None:
self._messages.append(message)
return True
return False
def try_dynamic_reply(self):
user_repo = UserRDBRepository()
city_repo = CityRDBRepository()
def check_city_assginment():
# 検索語に市町村が指定されているか確認する
m = re.match('(.+)[\s| ]+(.+)', self._request.request_message)
if m:
city_name = m.group(1)
trash_name = m.group(2)
city_data = city_repo.find_city_by_name(city_name, search_like=True)
if city_data != None:
# 市町村指定があるときはリクエストを書き換える
# ex)静岡 ペットボトル
self._request.request_message = trash_name
city = city_repo.find_city_by_name(city_data['city_name'])
if city != None:
self._request.config.search_cityid = city['id']
check_city_assginment()
q_message = self._request.request_message
q_city_id = ''
user_id = self._request.user_id
user = user_repo.find_user_by_id(user_id)
if self._request.config.search_cityid != '':
# 市町村IDが指定されている場合は優先
q_city_id = self._request.config.search_cityid
else:
if user == None:
# ユーザ登録がない場合は静岡市で検索する
city = city_repo.find_city_by_name('静岡市')
q_city_id = city['id']
else:
# ユーザ登録がある場合は登録された市町村で検索する
q_city_id = user['city_id']
reply_repo = DynamicReplyRDBRepository(q_message, q_city_id)
trash_list = reply_repo.find_reply_by_message()
if len(trash_list) == 0:
# 結果が見つからない場合
if q_city_id == '*':
# すべての市町村で見つからなかった場合
message = MessageFactory.create_message('trash_not_found', self._request)
self._messages.append(message)
else:
# 特定の市町村で検索した場合は「他の市町村で探す」ボタンを表示
message = MessageFactory.create_message('trash_not_found', self._request, searchbutton=True)
self._messages.append(message)
elif 0 < len(trash_list) <= 3:
# 結果が3個以下の場合はすべてメッセージにする
for trash in trash_list:
message = MessageFactory.create_message('trash_info', self._request, trash=trash)
self._messages.append(message)
else:
# 結果が4個以上の場合は選択肢にする
# 上限10個
trash_list = trash_list[0:10]
message = MessageFactory.create_message('trash_select', self._request,
trash_list=trash_list, show_city=True)
self._messages.append(message)
if user is None and self._request.client == 'line':
# LINEでユーザ登録がない場合は登録を促す
# 暫定的に機能停止中
# self._messages.append(MessageFactory.create_message('require_address', self._request))
pass
return True
class AddressMessageReplyService():
def __init__(self, request):
self._request = request
self._messages = []
async def try_register_address(self):
city_repo = CityRDBRepository()
city_name = await self._find_address_by_geolocation()
if city_name == None:
# 市町村が存在しない場合はWebサイトに誘導
token = TokenProvider.issue(self._request.user_id)
message = MessageFactory.create_message('response_address_reject', self._request, token=token)
self._messages.append(message)
return self._messages
city = city_repo.find_city_by_name(city_name)
if city == None:
# リクエストされた市町村に対応していない場合はWebサイトに誘導
token = TokenProvider.issue(self._request.user_id)
message = MessageFactory.create_message('response_address_reject', self._request, token=token)
self._messages.append(message)
return self._messages
# 市町村情報を登録
CityService.register_user_city(self._request.user_id, city['id'])
message = MessageFactory.create_message('response_address_success', self._request, city_name=city_name)
self._messages.append(message)
return self._messages
async def _find_address_by_geolocation(self):
def strip_ward_from_city_name(city_name):
m = re.match('(.+市).+区', city_name)
return None if m == None else m.group(1)
url = 'http://geoapi.heartrails.com/api/json?method=searchByGeoLocation&x={}&y={}'.format(
self._request.longitude,
self._request.latitude
)
# FIX:
# http失敗時のエラーハンドリング
http_client = httpclient.AsyncHTTPClient()
raw_response = await http_client.fetch(url)
raw_body = raw_response.body.decode('utf-8')
response = json.loads(raw_body)
if 'location' in response['response']:
city_name = response['response']['location'][0]['city']
stripped = strip_ward_from_city_name(city_name)
return city_name if stripped == None else stripped
else:
return None
class CityService:
@classmethod
def get_all_city(self):
repo = CityRDBRepository()
return repo.get_all_city()
@classmethod
def register_user_city(self, user_id, city_id):
user_repo = UserRDBRepository()
if user_repo.find_user_by_id(user_id) == None:
# ユーザ登録がない場合は登録
user_repo.register_user(user_id, city_id)
else:
# ユーザ登録がある場合は更新
user_repo.update_user(user_id, city_id)
| [
"koyoru12@yahoo.co.jp"
] | koyoru12@yahoo.co.jp |
9565cb65bec5d2a65feac6fb865cef79d1de56a2 | cbfd154a1f6d97b08ee5051e84482426d4756cbd | /forms.py | 0aa3a59f33d0c93d875cec4ba1973f63a7ac919e | [] | no_license | Shajidur-Rahman/PyQt5 | c82bb8444d13cf41cb452cf6b38bef93ee546fcc | 0d474e2f6d4ac6c5458e815ab3cff73f81aea27f | refs/heads/main | 2023-06-07T19:23:13.024203 | 2021-06-11T11:01:05 | 2021-06-11T11:01:05 | 374,356,769 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 799 | py | import PyQt5.QtWidgets as pq
import PyQt5.QtGui as gui
class Main(pq.QWidget):
def __init__(self):
super(Main, self).__init__()
self.setWindowTitle('Forms !!')
#self.setLayout(pq.QVBoxLayout())
form_layout = pq.QFormLayout()
self.setLayout(form_layout)
# add stuff
my_lable = pq.QLabel('It is a lable ')
my_lable.setFont(gui.QFont('Helvetica', 12))
first_name = pq.QLineEdit()
last_name = pq.QLineEdit()
# add rows to the app
form_layout.addRow(my_lable)
# form_layout.addRow('first name ', first_name)
form_layout.addRow(first_name, last_name)
form_layout.addRow(pq.QPushButton('Push Button'))
self.show()
app = pq.QApplication([])
main = Main()
app.exec_() | [
"shajidurrahmansaad10@gmail.com"
] | shajidurrahmansaad10@gmail.com |
7dd6b9b1a4bf5461d0b8bd571ed30fc871762159 | f50d20e98199a1422748a1e5586b99cbfb32080d | /Proyecto_POOUber/Python/carBasic.py | b94884e009eb86fe4cbcf69f6f63ad54da82276a | [] | no_license | DrCrisalod/curso_poo_platzi | 25bea414771ea072cf45b41121a343d32edaf561 | fbe91002069a4ad9fe14b13a9db96c1d26f87ac1 | refs/heads/master | 2023-06-17T15:00:50.768847 | 2021-07-16T15:57:22 | 2021-07-16T15:57:22 | 384,584,765 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 248 | py | from car import Car
class CarBasic(Car):
brand = str
model = str
def __init__(self, license, driver, brand, model):
super(CarBasic, self).__init__(license, driver)
self.brand = brand
self.model = model | [
"crisalod@hotmail.com"
] | crisalod@hotmail.com |
6217a2afd6175a6fbc4201550caffadfef8492ad | 7f928a8f3704a7b9d58c500ea78528efb88bf169 | /task6/email_scraper.py | 690daefbb82c4535a57487d6e034d50032d05407 | [
"MIT"
] | permissive | degenerat3/auditlab3 | 4e034594d4c79b11a3e80dd551fa44fa2c276cc2 | ab814c2f6f582a2de3bb239e8406a7557ad65c46 | refs/heads/master | 2020-08-05T02:09:32.922820 | 2019-10-07T14:45:06 | 2019-10-07T14:45:06 | 212,358,478 | 1 | 1 | MIT | 2019-10-07T14:26:02 | 2019-10-02T14:11:32 | PowerShell | UTF-8 | Python | false | false | 2,957 | py | import argparse
import ipaddress
import urllib.request
import re
import gc
import concurrent.futures
def format_url(child, parent):
c = urllib.parse.urlsplit(child)
if not c.scheme or not c.netloc:
# If partial path, or local path
p = urllib.parse.urlsplit(parent)
c = c._replace(scheme=p.scheme)
c = c._replace(netloc=p.netloc)
c = c._replace(path=urllib.parse.urljoin(p.path, c.path))
if not c.query and p.query:
c = c._replace(query=p.query)
return urllib.parse.urlunsplit(c)
def extract(body):
"""
Extract text
"""
e = re.compile(b'[a-zA-Z0-9.\-_]+' + b'@' + b'[a-zA-Z0-9.-]+\.[a-zA-Z]{2,24}')
emails = set(re.findall(e, body))
u = re.compile(b'<a\s+(?:[^>]*?\s+)?href="([^"]*)"')
# u = re.compile(b'<a\s+(?:[^>]*?\s+)?href=(["\'])(.*?)\1')
# u = re.compile(b'[-a-zA-Z0-9@:%._\+~#=]{1,256}\.[a-zA-Z0-9()]{1,6}\b([-a-zA-Z0-9()@:%_\+.~#?&//=]*)')
links = set(re.findall(u, body))
return emails, links
def get_page(url):
"""
Scrape a webpage for emails
"""
with urllib.request.urlopen(url) as conn:
return conn.read()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('url', help='url to begin crawl')
parser.add_argument('-d', '--depth', help='depth to crawl (take care)')
parser.add_argument('-s', '--scope', help='limit scope to certain domain')
args = parser.parse_args()
if not args.depth: depth = 2
else: depth = args.depth
visit = set({args.url})
visited = set()
for i in range(depth + 1):
emails = set()
content = dict()
with concurrent.futures.ThreadPoolExecutor(max_workers=20) as executor:
future_thread = {executor.submit(get_page, v):v for v in visit}
for future in concurrent.futures.as_completed(future_thread):
url = future_thread[future]
try:
content[url] = future.result()
except: pass # Let's be quiet..st.
visited.update(visit)
visit = set()
with concurrent.futures.ThreadPoolExecutor(max_workers=20) as executor:
future_thread = {executor.submit(extract, c):url for c, url in zip(content.values(), content.keys())}
for future in concurrent.futures.as_completed(future_thread):
url = future_thread[future]
try:
results = future.result()
emails.update(results[0])
res = {format_url(item, url) for item in results[1]}
for url in res:
if url:
if scope in url: visit.add({url})
elif url: visit.add({url})
except: pass # Let's be quiet...
# del(content)
for e in emails: print(e.decode())
del(emails)
gc.collect()
| [
"dszafran116@gmail.com"
] | dszafran116@gmail.com |
e299c60be9d53012b8b77da119af0d359f1e54d0 | c4ffab6cc6b5470a212d1b6a0d241de9427266ee | /test/functional/rpc_bind.py | ee454df4bb87a910eb3f5749321d750a3a4c467f | [
"MIT"
] | permissive | Upsidedoge/upsidedoge | 1b8d49787eedb84cb7c5aff77549d7d1239ab807 | 32dd022d43b8b90ae1aa1ad7d81c0dfeb89611a2 | refs/heads/main | 2023-04-26T16:56:17.024158 | 2021-05-21T21:12:57 | 2021-05-21T21:12:57 | 369,643,234 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,428 | py | #!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test running upsidedoged with the -rpcbind and -rpcallowip options."""
import sys
from test_framework.netutil import all_interfaces, addr_to_hex, get_bind_addrs, test_ipv6_local
from test_framework.test_framework import BitcoinTestFramework, SkipTest
from test_framework.util import assert_equal, assert_raises_rpc_error, get_rpc_proxy, rpc_port, rpc_url
class RPCBindTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.bind_to_localhost_only = False
self.num_nodes = 1
def setup_network(self):
self.add_nodes(self.num_nodes, None)
def add_options(self, parser):
parser.add_argument("--ipv4", action='store_true', dest="run_ipv4", help="Run ipv4 tests only", default=False)
parser.add_argument("--ipv6", action='store_true', dest="run_ipv6", help="Run ipv6 tests only", default=False)
parser.add_argument("--nonloopback", action='store_true', dest="run_nonloopback", help="Run non-loopback tests only", default=False)
def run_bind_test(self, allow_ips, connect_to, addresses, expected):
'''
Start a node with requested rpcallowip and rpcbind parameters,
then try to connect, and check if the set of bound addresses
matches the expected set.
'''
self.log.info("Bind test for %s" % str(addresses))
expected = [(addr_to_hex(addr), port) for (addr, port) in expected]
base_args = ['-disablewallet', '-nolisten']
if allow_ips:
base_args += ['-rpcallowip=' + x for x in allow_ips]
binds = ['-rpcbind='+addr for addr in addresses]
self.nodes[0].rpchost = connect_to
self.start_node(0, base_args + binds)
pid = self.nodes[0].process.pid
assert_equal(set(get_bind_addrs(pid)), set(expected))
self.stop_nodes()
def run_allowip_test(self, allow_ips, rpchost, rpcport):
'''
Start a node with rpcallow IP, and request getnetworkinfo
at a non-localhost IP.
'''
self.log.info("Allow IP test for %s:%d" % (rpchost, rpcport))
node_args = \
['-disablewallet', '-nolisten'] + \
['-rpcallowip='+x for x in allow_ips] + \
['-rpcbind='+addr for addr in ['127.0.0.1', "%s:%d" % (rpchost, rpcport)]] # Bind to localhost as well so start_nodes doesn't hang
self.nodes[0].rpchost = None
self.start_nodes([node_args])
# connect to node through non-loopback interface
node = get_rpc_proxy(rpc_url(self.nodes[0].datadir, 0, "%s:%d" % (rpchost, rpcport)), 0, coveragedir=self.options.coveragedir)
node.getnetworkinfo()
self.stop_nodes()
def run_test(self):
# due to OS-specific network stats queries, this test works only on Linux
if sum([self.options.run_ipv4, self.options.run_ipv6, self.options.run_nonloopback]) > 1:
raise AssertionError("Only one of --ipv4, --ipv6 and --nonloopback can be set")
self.log.info("Check for linux")
if not sys.platform.startswith('linux'):
raise SkipTest("This test can only be run on linux.")
self.log.info("Check for ipv6")
have_ipv6 = test_ipv6_local()
if not have_ipv6 and not (self.options.run_ipv4 or self.options.run_nonloopback):
raise SkipTest("This test requires ipv6 support.")
self.log.info("Check for non-loopback interface")
self.non_loopback_ip = None
for name,ip in all_interfaces():
if ip != '127.0.0.1':
self.non_loopback_ip = ip
break
if self.non_loopback_ip is None and self.options.run_nonloopback:
raise SkipTest("This test requires a non-loopback ip address.")
self.defaultport = rpc_port(0)
if not self.options.run_nonloopback:
self._run_loopback_tests()
if not self.options.run_ipv4 and not self.options.run_ipv6:
self._run_nonloopback_tests()
def _run_loopback_tests(self):
if self.options.run_ipv4:
# check only IPv4 localhost (explicit)
self.run_bind_test(['127.0.0.1'], '127.0.0.1', ['127.0.0.1'],
[('127.0.0.1', self.defaultport)])
# check only IPv4 localhost (explicit) with alternative port
self.run_bind_test(['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171'],
[('127.0.0.1', 32171)])
# check only IPv4 localhost (explicit) with multiple alternative ports on same host
self.run_bind_test(['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171', '127.0.0.1:32172'],
[('127.0.0.1', 32171), ('127.0.0.1', 32172)])
else:
# check default without rpcallowip (IPv4 and IPv6 localhost)
self.run_bind_test(None, '127.0.0.1', [],
[('127.0.0.1', self.defaultport), ('::1', self.defaultport)])
# check default with rpcallowip (IPv4 and IPv6 localhost)
self.run_bind_test(['127.0.0.1'], '127.0.0.1', [],
[('127.0.0.1', self.defaultport), ('::1', self.defaultport)])
# check only IPv6 localhost (explicit)
self.run_bind_test(['[::1]'], '[::1]', ['[::1]'],
[('::1', self.defaultport)])
# check both IPv4 and IPv6 localhost (explicit)
self.run_bind_test(['127.0.0.1'], '127.0.0.1', ['127.0.0.1', '[::1]'],
[('127.0.0.1', self.defaultport), ('::1', self.defaultport)])
def _run_nonloopback_tests(self):
self.log.info("Using interface %s for testing" % self.non_loopback_ip)
# check only non-loopback interface
self.run_bind_test([self.non_loopback_ip], self.non_loopback_ip, [self.non_loopback_ip],
[(self.non_loopback_ip, self.defaultport)])
# Check that with invalid rpcallowip, we are denied
self.run_allowip_test([self.non_loopback_ip], self.non_loopback_ip, self.defaultport)
assert_raises_rpc_error(-342, "non-JSON HTTP response with '403 Forbidden' from server", self.run_allowip_test, ['1.1.1.1'], self.non_loopback_ip, self.defaultport)
if __name__ == '__main__':
RPCBindTest().main()
| [
"36169687+blockinator@users.noreply.github.com"
] | 36169687+blockinator@users.noreply.github.com |
8dc69223b415e0f7687dea851cae24b384041290 | 1f6142ae595cc2dd5ed372e23a7a986a5a1d08d7 | /app/behaviours/reporter.py | e35ba37483c162ce97b10eb43fbd41445a078e76 | [
"MIT"
] | permissive | jchristov/crypto-signal | 5f34ee4bb25cc431d767b4d962b641f4f0c859f5 | 7d3c6b76667e985cacccf0840084ea88f179d770 | refs/heads/master | 2021-05-10T08:33:52.958976 | 2018-01-25T04:16:47 | 2018-01-25T04:16:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,609 | py | """Run reports from the database information.
"""
import structlog
from tabulate import tabulate
class ReporterBehaviour():
"""Run reports from the database information.
"""
def __init__(self, behaviour_config, exchange_interface,
notifier, db_handler):
"""Initialize ReporterBehaviour class.
Args:
behaviour_config (dict): A dictionary of configuration for this behaviour.
exchange_interface (ExchangeInterface): Instance of the ExchangeInterface class for
making exchange queries.
notifier (Notifier): Instance of the notifier class for informing a user when a
threshold has been crossed.
db_handler (DatbaseHandler): Instance of the DatabaseHandler class for reading and
storing transaction data.
"""
self.logger = structlog.get_logger()
self.behaviour_config = behaviour_config
self.exchange_interface = exchange_interface
self.notifier = notifier
self.db_handler = db_handler
def run(self, market_pairs):
"""The behaviour entrypoint
Args:
market_pairs (list): No function yet.
"""
header = "====== REPORT FOR {} ======".format(self.behaviour_config['name'])
transaction_count = self.db_handler.read_transactions().count()
transactions = "I have made {} transactions since I began.".format(transaction_count)
total_btc_value = 0
holdings_query = self.db_handler.read_holdings()
holdings = []
for row in holdings_query:
if row.volume_total > 0:
if row.symbol == "BTC":
btc_value = row.volume_total
else:
btc_value = self.exchange_interface.get_btc_value(
row.exchange,
row.symbol,
row.volume_total
)
total_btc_value += btc_value
holdings.append([
row.exchange,
row.symbol,
format(row.volume_total, '.8f'),
format(btc_value, '.8f')
])
holdings_table = tabulate(holdings, headers=["exchange", "symbol", "volume", "btc value"])
total_value = "I am currently holding {} in btc".format(format(total_btc_value, '.8f'))
message = header + "\n" + transactions + "\n\n" + holdings_table + "\n\n" + total_value
self.logger.info(message)
self.notifier.notify_all(message)
| [
"shadow_reaver2@hotmail.com"
] | shadow_reaver2@hotmail.com |
2e56820469786281eea6a55179cfaa0fae7337b3 | 5635a3b02f7695a50471c8c08970520858d2277c | /venv/bin/pyrsa-sign | 12ff831eef1cc1f5b697b68f04379992425ffe5c | [] | no_license | BethMwangi/Flask-social | 358325ea09b143c2aaa059594607d0a872fcabd1 | 4d0d902ee959054a95f0d7ab0dbfee3692521f91 | refs/heads/master | 2020-04-02T06:13:40.307975 | 2016-06-13T17:16:11 | 2016-06-13T17:16:11 | 60,806,302 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 249 | #!/home/beth/Documents/Github/Flask-social/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from rsa.cli import sign
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(sign())
| [
"wanjikumwangib@gmail.com"
] | wanjikumwangib@gmail.com | |
e60acfc6dfaaa850aa14c36de95d0f2dd9dbd345 | baefee5fbbc015cdc0b71ffc8956fad2d7d93683 | /openstack_dashboard/dashboards/admin/routers/ports/forms.py | 6010f5c792917a435eb64386f99e60d176fda8e1 | [
"Apache-2.0"
] | permissive | dsullivanwr/stx-horizon | 8312fa01bf28a6bfad175e66f4172add6cabf60c | ee6c9b17e34d1dc310790b9d5e0252361c86b8fb | refs/heads/master | 2020-03-29T06:51:49.902050 | 2018-10-11T19:37:40 | 2018-10-11T19:37:40 | 149,643,878 | 0 | 0 | Apache-2.0 | 2018-10-10T16:02:36 | 2018-09-20T17:11:28 | Python | UTF-8 | Python | false | false | 2,970 | py | # Copyright 2012, Nachi Ueno, NTT MCL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.routers.ports \
import forms as project_forms
LOG = logging.getLogger(__name__)
class SetGatewayForm(project_forms.SetGatewayForm):
network_id = forms.ChoiceField(label=_("External Network"))
ip_address = forms.IPField(
label=_("IP Address (optional)"),
required=False,
initial="",
help_text=_("IP address of gateway interface (e.g. 192.168.0.254). "
"Specify an explicit address to use when creating the "
"gateway interface. If one is not specified an address "
"will be allocated from the external subnet."),
version=forms.IPv4 | forms.IPv6,
mask=False)
router_name = forms.CharField(label=_("Router Name"),
widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
router_id = forms.CharField(label=_("Router ID"),
widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
enable_snat = forms.BooleanField(label=_("Enable SNAT"),
initial=True, required=False)
failure_url = 'horizon:admin:routers:index'
def handle(self, request, data):
try:
ip_address = data.get('ip_address') or None
enable_snat = data.get('enable_snat', True)
api.neutron.router_add_gateway(request,
data['router_id'],
data['network_id'],
ip_address=ip_address,
enable_snat=enable_snat)
msg = _('Gateway interface is added')
LOG.debug(msg)
messages.success(request, msg)
return True
except Exception as e:
msg = _('Failed to set gateway %s') % e
LOG.info(msg)
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
| [
"dtroyer@gmail.com"
] | dtroyer@gmail.com |
9fe793d1adfbfc1416174d2413534e74715e3bdc | d1ff371139640cfbead8afffbb35b33687d3770f | /uspto/cleanup.py | b6eafdbbbab7ac4e2159893fd92e90e10e9d0cca | [] | no_license | jameskli/uspto_scrapy | dabeba2a4e9a1ceb3146350b1ed06abfcea04b6f | b14647de4595d5eaa50a158f3e2706db8caa4394 | refs/heads/master | 2023-02-06T22:34:56.879684 | 2020-12-28T00:23:33 | 2020-12-28T00:23:33 | 324,856,244 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,682 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import csv
import collections
import operator
""" This is a collection of scripts used to clean up malformed patent data. USPTO data is extremely messy and therefore needs several customized cleanup functions
Note that this is more of a data exploration file and therefore has a lot of hard-coded filenames. For demo only.
"""
def fix_malformed_locations():
'''malformed_1: Location is CA, malformed_2: Location is ), malformed_3: Assignee malformed '''
malformed_count_1=0
malformed_count_2=0
malformed_count_3=0
full_count=0
clean_count=0
row_header = ["Number","Title","Date","FiledDate","Assignee","Location","Class","Abstract"]
with open('patent_data.csv', 'rU') as read_file:
csv_reader = csv.reader(read_file, delimiter=',', quotechar='"')
for row in csv_reader:
full_count+=1
row_dict = dict(zip(row_header, row))
if row_dict['Location'] == "CA":
malformed_count_1+=1
with open('malformed_1.csv', 'a+') as write_file:
csv_writer = csv.writer(write_file, quoting=csv.QUOTE_ALL)
csv_writer.writerow(row)
elif row_dict['Location'] == ")":
malformed_count_2+=1
with open('malformed_2.csv', 'a+') as write_file:
csv_writer = csv.writer(write_file, quoting=csv.QUOTE_ALL)
csv_writer.writerow(row)
elif row_dict['Assignee'] == ")":
malformed_count_3+=1
with open('malformed_3.csv', 'a+') as write_file:
csv_writer = csv.writer(write_file, quoting=csv.QUOTE_ALL)
csv_writer.writerow(row)
else:
clean_count+=1
with open('clean.csv', 'a+') as write_file:
csv_writer = csv.writer(write_file, quoting=csv.QUOTE_ALL)
csv_writer.writerow(row)
print malformed_count_1, malformed_count_2, malformed_count_3, clean_count, full_count
def fix_malformed_date():
'''malformed_B1: rows where Date has an asterisk, malformed B2: rows where location is unknown'''
malformed_count_B1 = 0
malformed_count_B2 = 0
full_count=0
clean_count=0
row_header = ["Number","Title","Date","FiledDate","Assignee","Location","Class","Abstract"]
with open('patent_data_w_header_3.csv', 'rU') as read_file:
csv_reader = csv.reader(read_file, delimiter=',', quotechar='"')
for row in csv_reader:
full_count+=1
row_dict = dict(zip(row_header, row))
if row_dict['Date'].startswith("*"):
malformed_count_B1 += 1
with open('malformed_B1.csv', 'a+') as write_file:
csv_writer = csv.writer(write_file, quoting=csv.QUOTE_ALL)
csv_writer.writerow(row)
elif row_dict['Location'] == "":
malformed_count_B2 += 1
with open('malformed_B2.csv', 'a+') as write_file:
csv_writer = csv.writer(write_file, quoting=csv.QUOTE_ALL)
csv_writer.writerow(row)
else:
clean_count+=1
with open('clean_B.csv', 'a+') as write_file:
csv_writer = csv.writer(write_file, quoting=csv.QUOTE_ALL)
csv_writer.writerow(row)
print malformed_count_B1, malformed_count_B2, clean_count, full_count
def print_patents_with_unknown_locations():
'''prints out assignee column of the patents with unknown locations'''
assignee_set = set ()
with open('malformed_B2a.csv', 'rU') as read_file:
csv_reader = csv.reader(read_file, delimiter=',', quotechar='"')
for row in csv_reader:
assignee_set.add(row[4])
for item in assignee_set:
print item
def guess_unknown_location():
'''Checks the ~2500 assignees with unknown locations against the assignees with known locations and returns the most frequent one'''
with open('unknown_assignees', 'rU') as read_file:
unknown_assignees = read_file.read().splitlines()
assignee_dict = collections.defaultdict(dict)
for assignee in unknown_assignees:
assignee_dict[assignee] = {}
with open('clean_B01.csv', 'rU') as read_file:
csv_reader = csv.reader(read_file, delimiter=',', quotechar='"')
for row in csv_reader:
if row[4] in unknown_assignees:
if row[5] not in assignee_dict[row[4]]:
assignee_dict[row[4]][row[5]] = 1
else:
current_count = assignee_dict[row[4]][row[5]]
assignee_dict[row[4]][row[5]] = 1 + current_count
would_be_fixed = 0
with open('malformed_B2a.csv', 'rU') as read_file:
csv_reader = csv.reader(read_file, delimiter=',', quotechar='"')
for row in csv_reader:
if row[4] in assignee_dict:
if len(assignee_dict[row[4]]):
would_be_fixed += 1
print "Would be fixed ", would_be_fixed
fixed_counts = 0
still_broken_count = 0
no_choice_count = 0
for key in assignee_dict:
if assignee_dict[key]:
if len(assignee_dict[key]) == 1:
no_choice_count += 1
print no_choice_count,key, assignee_dict[key]
with open('no_choice_assignees.csv', 'a+') as write_file:
csv_writer = csv.writer(write_file, quoting=csv.QUOTE_ALL)
csv_writer.writerow([key,next(iter(assignee_dict[key]))])
else:
fixed_counts += 1
else:
still_broken_count += 1
print "No choice: ",no_choice_count, "Fixed: ",fixed_counts," StillBroken: ",still_broken_count, " len: ",len(unknown_assignees)
def generate_location_frequency_dict():
'''Checks the entire list assignees with known locations and compiles a frequency dictionary'''
with open('known_assignees', 'rU') as read_file:
known_assignees = read_file.read().splitlines()
assignee_dict = collections.defaultdict(dict)
for assignee in known_assignees:
assignee_dict[assignee] = {}
with open('clean_B01.csv', 'rU') as read_file:
csv_reader = csv.reader(read_file, delimiter=',', quotechar='"')
for row in csv_reader:
if row[4] in known_assignees:
if row[5] not in assignee_dict[row[4]]:
assignee_dict[row[4]][row[5]] = 1
else:
current_count = assignee_dict[row[4]][row[5]]
assignee_dict[row[4]][row[5]] = 1 + current_count
fixed_counts = 0
still_broken_count = 0
no_choice_count = 0
for key in assignee_dict:
if assignee_dict[key]:
if len(assignee_dict[key]) == 1:
no_choice_count += 1
else:
fixed_counts += 1
print key, assignee_dict[key]
else:
still_broken_count += 1
print "No choice: ",no_choice_count, "Fixed: ",fixed_counts," StillBroken: ",still_broken_count, " len: ",len(known_assignees)
def create_all_assignees():
'''Checks the entire list of assignees and compiles a frequency dictionary, excluding empties'''
assignee_set = set ()
with open('patent_C.csv', 'rU') as read_file:
csv_reader = csv.reader(read_file, delimiter=',', quotechar='"')
for row in csv_reader:
assignee_set.add(row[4])
for item in assignee_set:
print item
def create_dictionary_of_assignee_locations():
with open('all_assignees', 'rU') as read_file:
all_assignees = read_file.read().splitlines()
assignee_dict = collections.defaultdict(dict)
for assignee in all_assignees:
assignee_dict[assignee] = {}
with open('patent_C.csv', 'rU') as read_file:
csv_reader = csv.reader(read_file, delimiter=',', quotechar='"')
for row in csv_reader:
if row[4] in all_assignees:
if row[5]:
if row[5] not in assignee_dict[row[4]]:
assignee_dict[row[4]][row[5]] = 1
else:
current_count = assignee_dict[row[4]][row[5]]
assignee_dict[row[4]][row[5]] = 1 + current_count
for key in assignee_dict:
print key, assignee_dict[key]
def main():
create_dictionary_of_assignee_locations()
main()
| [
"jameskli@gmail.com"
] | jameskli@gmail.com |
4bc48c2468c645af9bfc4312c03978f04d3ee7d2 | b269ef49ea09a1c73c465c726567bcaa4a689e07 | /setup.py | ed64eab63474777c980423c2662622d98cd675eb | [] | no_license | gian88/django-admin-bootstrap | 4d470ea8ff5dd02db131239d0425c180ca4569e8 | 7b05e45b6045de4a9f569a655eb229fd434a4c49 | refs/heads/master | 2021-01-16T22:00:29.089990 | 2016-07-22T17:40:48 | 2016-07-22T17:40:48 | 9,821,167 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 883 | py | #!/usr/bin/env python
import os
from setuptools import setup, find_packages
f = open(os.path.join(os.path.dirname(__file__), 'README.markdown'))
readme = f.read()
f.close()
setup(
name='bootstrap_admin',
version='0.1.9',
description='Twitter Bootstrap Responsive Skin for Django Admin.',
long_description=readme,
author='Douglas Miranda',
author_email='douglasmirandasilva@gmail.com',
url='https://github.com/douglasmiranda/django-admin-bootstrap',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
],
)
| [
"douglasmirandasilva@gmail.com"
] | douglasmirandasilva@gmail.com |
2c48dbf554941074c8f1a097ef283680bdb41899 | a1cc4d924019b4afc952ca70686b1950d3ce9890 | /ts_ws/devel/.private/hark_msgs/lib/python2.7/dist-packages/hark_msgs/msg/_HarkSourceVal.py | 95f1fba53b5347a8e390204b788c32c6f1f64df7 | [] | no_license | dsadhvi/Robot-Says-Hello | 95f06d6a905805b64f4aaca68f5abc3156bba9f4 | 3c3884ed521d19951190b0e9b000ea7e7bf15950 | refs/heads/master | 2020-03-31T01:06:35.628243 | 2018-10-05T20:49:23 | 2018-10-05T20:49:23 | 151,767,056 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,408 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from hark_msgs/HarkSourceVal.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class HarkSourceVal(genpy.Message):
_md5sum = "ee0b7cc36255925b0a96b74055ee462f"
_type = "hark_msgs/HarkSourceVal"
_has_header = False #flag to mark the presence of a Header object
_full_text = """int32 id
float32 power
float32 x
float32 y
float32 z
float32 azimuth
float32 elevation
"""
__slots__ = ['id','power','x','y','z','azimuth','elevation']
_slot_types = ['int32','float32','float32','float32','float32','float32','float32']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
id,power,x,y,z,azimuth,elevation
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(HarkSourceVal, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.id is None:
self.id = 0
if self.power is None:
self.power = 0.
if self.x is None:
self.x = 0.
if self.y is None:
self.y = 0.
if self.z is None:
self.z = 0.
if self.azimuth is None:
self.azimuth = 0.
if self.elevation is None:
self.elevation = 0.
else:
self.id = 0
self.power = 0.
self.x = 0.
self.y = 0.
self.z = 0.
self.azimuth = 0.
self.elevation = 0.
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_i6f().pack(_x.id, _x.power, _x.x, _x.y, _x.z, _x.azimuth, _x.elevation))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
_x = self
start = end
end += 28
(_x.id, _x.power, _x.x, _x.y, _x.z, _x.azimuth, _x.elevation,) = _get_struct_i6f().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_i6f().pack(_x.id, _x.power, _x.x, _x.y, _x.z, _x.azimuth, _x.elevation))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
_x = self
start = end
end += 28
(_x.id, _x.power, _x.x, _x.y, _x.z, _x.azimuth, _x.elevation,) = _get_struct_i6f().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_i6f = None
def _get_struct_i6f():
global _struct_i6f
if _struct_i6f is None:
_struct_i6f = struct.Struct("<i6f")
return _struct_i6f
| [
"tracyzhang98@gmail.com"
] | tracyzhang98@gmail.com |
2535c72550c0e3f19fab1535c0a7647f241f6641 | cae3f52bcf0a46bdd4c2be5976afc30ad6e3b89a | /external_tools/src/main/python/omero56/scripts/get_image_details_from_dcc_using_media_api.py | 51094dd478023d82a54a11c50dbcef984126f3d7 | [
"Apache-2.0"
] | permissive | mpi2/PhenotypeData | 648eb35460f47e5d6dc3c51ddad91f545fa38c04 | df87b1b9452b5186fa85dbfc74dcef1dc596b825 | refs/heads/master | 2023-07-25T02:56:47.364058 | 2023-07-10T13:58:23 | 2023-07-10T13:58:23 | 37,316,577 | 2 | 6 | Apache-2.0 | 2023-09-11T09:25:13 | 2015-06-12T10:40:45 | Java | UTF-8 | Python | false | false | 3,264 | py | #!/usr/bin/python
"""Create a CSV file using the DCC media API
"""
import sys
import os
import requests
import json
import argparse
uniqueUris=set()
parser = argparse.ArgumentParser(
description='Create CSV file of images ready to download using DCC media API'
)
parser.add_argument('-o', '--output-path', default="./media_api_files.csv",
dest='output_path',
help='Path to save the csv file'
)
parser.add_argument('-s', '--start', help='record to start from')
parser.add_argument('-r', '--result-size',
help='Number of records to get per site. ' + \
'Has no effect if "start" not supplied')
parser.add_argument('-v', '--verbose', action='store_true', help='print verbose messages')
args = parser.parse_args()
# We get the files we are interested in for each site using the
# media API
sites = [
('bcm', 'BCM',),
('gmc','HMGU',),
('h', 'MRC Harwell'),
('ics', 'ICS',),
('j', 'JAX',),
('tcp', 'TCP'),
('ning', 'NING',),
('rbrc', 'RBRC',),
('ucd', 'UC Davis',),
('wtsi', 'WTSI',),
('kmpc', 'KMPC',),
('ccpcz', 'CCP-IMG',),
]
header = "checksum,download_file_path,phenotyping_center," + \
"pipeline_stable_id,procedure_stable_id,datasource_name," + \
"parameter_stable_id\n"
datasource_name = "IMPC"
im_details = [header,]
numFound = 0
for site, phenotyping_center in sites:
query_string = f"https://api.mousephenotype.org/media/dccUrl/{site}?status=done"
if args.start is not None:
query_string += f"&start={args.start}"
if args.result_size is not None:
query_string += f"&resultsize={args.result_size}"
if args.verbose:
print(query_string)
v = json.loads(requests.get(query_string).text)
try:
docs = v['mediaFiles']
except KeyError as key_error:
print("WARNING - no media files returned for site: " + site)
continue
numFound += len(docs)
for doc in docs:
download_file_path=doc['dccUrl']
download_file_path=download_file_path.lower()
if download_file_path.find('mousephenotype.org') < 0 or \
download_file_path.endswith('.mov') or \
download_file_path.endswith('.bz2'):
continue
# On 13/11/2019 got a KeyError for phenotyping centre. This
# should not happen, but code modified appropriately
try:
pipeline_stable_id=doc['pipelineKey']
procedure_stable_id=doc['procedureKey']
parameter_stable_id=doc['parameterKey']
im_details.append(",".join([
doc['checksum'],
doc['dccUrl'],
phenotyping_center,
doc['pipelineKey'],
doc['procedureKey'],
datasource_name,
doc['parameterKey'],
]) + "\n"
)
except KeyError as e:
print("Key " + str(e)+ " not returned by media API - not including " + download_file_path)
continue
with open(args.output_path, "wt") as fid:
fid.writelines(im_details)
print(f"Found {numFound} urls. Written output to {args.output_path}")
| [
"kola@ebi.ac.uk"
] | kola@ebi.ac.uk |
e496f6a4b65e3fb3ed5cffda376a44cc1e6829cb | 7357d367b0af4650ccc5b783b7a59090fdde47bb | /py-appscript/tags/py-appscript-0.18.0/Lib/aem/types/objectspecifiers/testclause.py | cd023de05f848f30e64af15fc725899e0d6eb54c | [
"MIT"
] | permissive | BarracudaPff/code-golf-data-python | fb0cfc74d1777c4246d56a5db8525432bf37ab1a | 42e8858c2ebc6a061012bcadb167d29cebb85c5e | refs/heads/main | 2023-05-29T05:52:22.856551 | 2020-05-23T22:12:48 | 2020-05-23T22:12:48 | 378,832,634 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,297 | py | """testclause -- Used to construct test expressions for use in by-filter references.
(C) 2005 HAS
"""
from CarbonX import kAE
import base
class Test(base.BASE):
"""Base class for all comparison and logic test classes (Equals, NotEquals, AND, OR, etc.)."""
def AND(self, operand2, *operands):
"""AND(test,...) --> logical AND test"""
return AND((self, operand2) + operands)
def OR(self, operand2, *operands):
"""OR(test,...) --> logical OR test"""
return OR((self, operand2) + operands)
NOT = property(lambda self: NOT((self,)), doc="NOT --> logical NOT test")
class _ComparisonTest(Test):
"""Subclassed by comparison test classes."""
def __init__(self, operand1, operand2):
self._operand1 = operand1
self._operand2 = operand2
def __repr__(self):
return "%r.%s(%r)" % (self._operand1, self._name, self._operand2)
def AEM_resolve(self, obj):
return getattr(self._operand1.AEM_resolve(obj), self._name)(self._operand2)
def AEM_packSelf(self, codecs):
return base.packListAs(kAE.typeCompDescriptor, [(kAE.keyAEObject1, codecs.pack(self._operand1)), (kAE.keyAECompOperator, self._operator), (kAE.keyAEObject2, codecs.pack(self._operand2))])
class GreaterThan(_ComparisonTest):
_name = "gt"
_operator = base.packEnum(kAE.kAEGreaterThan)
class GreaterOrEquals(_ComparisonTest):
_name = "ge"
_operator = base.packEnum(kAE.kAEGreaterThanEquals)
class Equals(_ComparisonTest):
_name = "eq"
_operator = base.packEnum(kAE.kAEEquals)
class NotEquals(Equals):
_name = "ne"
_operatorNOT = base.packEnum(kAE.kAENOT)
def AEM_packSelf(self, codecs):
return self._operand1.eq(self._operand2).NOT.AEM_packSelf(codecs)
class LessThan(_ComparisonTest):
_name = "lt"
_operator = base.packEnum(kAE.kAELessThan)
class LessOrEquals(_ComparisonTest):
_name = "le"
_operator = base.packEnum(kAE.kAELessThanEquals)
class BeginsWith(_ComparisonTest):
_name = "beginswith"
_operator = base.packEnum(kAE.kAEBeginsWith)
class EndsWith(_ComparisonTest):
_name = "endswith"
_operator = base.packEnum(kAE.kAEEndsWith)
class Contains(_ComparisonTest):
_name = "contains"
_operator = base.packEnum(kAE.kAEContains)
class IsIn(Contains):
_name = "isin"
def AEM_packSelf(self, codecs):
return base.packListAs(kAE.typeCompDescriptor, [(kAE.keyAEObject1, codecs.pack(self._operand2)), (kAE.keyAECompOperator, self._operator), (kAE.keyAEObject2, codecs.pack(self._operand1))])
class _LogicalTest(Test):
"""Subclassed by logical test classes."""
def __init__(self, operands):
self._operands = operands
def __repr__(self):
return "%r.%s(%s)" % (self._operands[0], self._name, repr(list(self._operands[1:]))[1:-1])
def AEM_resolve(self, obj):
return getattr(self._operands[0].AEM_resolve(obj), self._name)(*self._operands[1:])
def AEM_packSelf(self, codecs):
return base.packListAs(kAE.typeLogicalDescriptor, [(kAE.keyAELogicalOperator, self._operator), (kAE.keyAELogicalTerms, codecs.pack(self._operands))])
class AND(_LogicalTest):
_operator = base.packEnum(kAE.kAEAND)
_name = "AND"
class OR(_LogicalTest):
_operator = base.packEnum(kAE.kAEOR)
_name = "OR"
class NOT(_LogicalTest):
_operator = base.packEnum(kAE.kAENOT)
_name = "NOT"
def __repr__(self):
return "%r.NOT" % self._operands[0]
def AEM_resolve(self, obj):
return self._operands[0].AEM_resolve(obj).NOT | [
"sokolov.yas@gmail.com"
] | sokolov.yas@gmail.com |
33426451207cff40bb66155aa128bb8d64225249 | c224275ff2ff634abcd072c3aa94b68bb5801944 | /abcli/__init__.py | 281b2bb040823d5ed05354eed9b4ed54853060af | [
"MIT"
] | permissive | john5f35/abcli | 5dc6d07db5a898151848ac3defc2dbe3eb049203 | fa696cf6bcc2f26fbd754e01952553ce09e5e006 | refs/heads/master | 2021-06-28T05:04:20.806246 | 2020-02-03T07:00:23 | 2020-02-03T07:00:23 | 216,186,047 | 3 | 1 | MIT | 2021-04-20T18:47:12 | 2019-10-19T10:08:29 | Python | UTF-8 | Python | false | false | 691 | py | # CLI
#
# Commands:
# - transactions import <json>
# - transaction show (?)
# - account show [name] [date-from] [date-to] [aggregation:week|fortnight|*month*|quarter|year]
# Shows balance, average in aggregation method, between two dates
# - account graph [name] [date-from] [date-to] [aggregation:...]
# - budget import <json>
# - budget show [name] [account]
# Shows progress & summary of a named budget
# - budget project [name] [unit] [aggregation:...]
import logging
# logging.basicConfig(format="[%(levelname)s] %(message)s")
import coloredlogs
# TODO: maybe load format from a config file?
coloredlogs.install(fmt="%(message)s", logger=logging.getLogger())
| [
"john.u5f35@gmail.com"
] | john.u5f35@gmail.com |
b80614ba72616b099fef5e0b5cef78ed8d996b89 | 58ad9c5fbdce8ebd29c20c9e736fd482c32c79c6 | /app.py | d45ac746417073a79199c054b3724f3a1e2a60c7 | [] | no_license | laurachan2020/STA9760_helloworld | 2193fc6cfc93b07ebf0bca41f69dad2bc8fd1bd8 | 05029797d7cca405874fdbad5503250a356c0940 | refs/heads/master | 2021-02-07T20:34:55.481754 | 2020-03-01T02:27:42 | 2020-03-01T02:27:42 | 244,073,821 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 59 | py | if __name__ == '__main__':
print()[D''[DHello, World!')
| [
"laura.chan.2010@gmail.com"
] | laura.chan.2010@gmail.com |
45d3df7ae25348ecc17cff66e2a05ae15d1297e7 | daf1937059003f1d1ded416e903fe4418629b24d | /ukoly/1.1 Intro to Programming/string_operations.py | 221e90e7096ce14c48f40202099dd0a97e3917e5 | [] | no_license | simona-engeto/unit_converter | 7367c49b65148c019adb5e7e57b7d47457cb1f95 | 37080daa404a5aca2503fb794065c773dc0daff7 | refs/heads/master | 2021-01-07T13:54:45.420943 | 2020-03-12T14:20:02 | 2020-03-12T14:20:02 | 241,715,411 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 566 | py | # Save name
name = input('Enter name: ')
# Print name
print('Saving', "'", name, " '", ' into name...')
# Save surname
surname = input('Enter surname: ')
# Print surname
print('Saving', "'", surname, " '", ' into surname...')
# Create and print variable full_name
full_name = name + surname
print('Full name: ', full_name)
# Create and print variable name_length
name_length = len(full_name)
print('Length of full name: ', name_length)
# Print bounded variable full_name
print('=' * name_length)
print(full_name)
print('=' * name_length) | [
"noreply@github.com"
] | noreply@github.com |
0a61d3455c62c56d19a40625fbc67c86684cf673 | de64b143a346585f51590bd674e8d13bbc672386 | /algorithm/Intermediate_Class/뉴스 클러스터링/Wooseong.py | a3809d890839206d63713c42df2c288ccf43d48e | [] | no_license | ai-kmu/etc | 304ec20f59e4026025abdcbcae21863c80630dcb | 9c29941e19b7dd2a2037b110dd6e16690e9a0cc2 | refs/heads/master | 2023-08-21T16:30:31.149956 | 2023-08-21T16:26:19 | 2023-08-21T16:26:19 | 199,843,899 | 3 | 24 | null | 2023-05-31T09:56:59 | 2019-07-31T11:36:16 | Jupyter Notebook | UTF-8 | Python | false | false | 1,976 | py | import copy
def solution(str1, str2):
# 대소문자 무시 - 모두 소문자로
str1 = str1.lower()
str2 = str2.lower()
# 각 str을 다중 집합으로 만들기 - 알파벳쌍만 가능
# -> .isalpha()는 str가 모두 알파벳일 때만 True
set1 = []
for i in range(len(str1) - 1):
temp = str1[i:i+2]
if temp.isalpha():
set1.append(temp)
set2 = []
for i in range(len(str2) - 1):
temp = str2[i:i+2]
if temp.isalpha():
set2.append(temp)
# 두 집합이 모두 공집합일 경우는 1로 정의
if (not set1) and (not set2):
return 65536
# 교집합과 합집합
# 겹친 게 나올 경우
# 교집합에는 적은 개수만큼, 합집합에는 많은 개수만큼 넣음
# 안 겹치면 합집합에만 넣음
set1_copy = copy.deepcopy(set1)
set2_copy = copy.deepcopy(set2)
inter = []
union = []
# 둘 중 하나 다 떨어짐 = 겹칠 수 없음
while set1_copy and set2_copy:
elem = set1_copy.pop()
if elem in set2_copy:
# set1은 이미 하나 pop 해서 +1로 보정
in_set1 = set1_copy.count(elem) + 1
in_set2 = set2_copy.count(elem)
# 교집합엔 적은 개수만큼
inter += [elem] * min(in_set1, in_set2)
# 합집합엔 많은 개수만큼
union += [elem] * max(in_set1, in_set2)
# 넣은 거 빼기
set1_copy = [i for i in set1_copy if i != elem]
set2_copy = [i for i in set2_copy if i != elem]
# 안 겹치는 건 union에만 넣음
else:
union.append(elem)
# 합집합에 남은 거 더 해주기 (둘 중 하나는 빈 리스트)
union += set1_copy + set2_copy
# print("교", inter)
# print("합", union)
return ((len(inter) / len(union)) * 655360) // 10
| [
"noreply@github.com"
] | noreply@github.com |
311db5d22a903d069536d929651f95544e988e79 | 274122af4207364dcb5167f6bddf34171d3a043e | /info_by_run.py | d7c7f5386d24980991e90ab415515c4b05c24d87 | [] | no_license | ceslat/python-chilean-info | 92d0b387d5ebabb2546a4ce5ec3a20f88759ddbe | d98aeb9041680f87cb376258ed735c323d316a60 | refs/heads/main | 2023-03-02T09:17:46.381091 | 2021-02-10T01:23:37 | 2021-02-10T01:23:37 | 337,581,739 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 904 | py | def get_data_by_run(run):
"""
Función que retorna un diccionario con la información personal de un ciudadano Chileno según el RUN que recibe. El RUN debe estar formateado 11.111.111-1
Requiere: requests, bs4
"""
import requests
from bs4 import BeautifulSoup
r = requests.post(
'https://www.nombrerutyfirma.com/rut',
data={
'term': run
}
)
contenido_web = BeautifulSoup(r.text, 'lxml')
tabla = contenido_web.find('table').find('tbody')
tds = tabla.findAll('td')
nombre = tds[0].get_text()
rut = tds[1].get_text()
sexo = tds[2].get_text()
direccion = tds[3].get_text()
comuna = tds[4].get_text()
response = {
'nombre': nombre,
'rut': rut,
'sexo': sexo,
'direccion': direccion,
'comuna': comuna
}
return response
print(get_data_by_run('11.111.111-1'))
| [
"noreply@github.com"
] | noreply@github.com |
cbb5b5e0a29153cfef89be24a515e1b90dbd5ce0 | 2a1e2c298773148983805f1e0fba62bc2bf79267 | /lib/network/vgg_base.py | 7057d7ce930283300e3f9abeacd0c7ce46869275 | [] | no_license | copperdong/CTPN | 42fde81010ba5c0bff193b4132d4c397c251dedd | 3d559406c7ad2a02ac54b07ff1cc3603b3c5b6c9 | refs/heads/master | 2020-11-25T10:51:23.753733 | 2019-07-22T12:29:15 | 2019-07-22T12:29:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,410 | py | import tensorflow.contrib.slim as slim
from lib.utils.config import cfg
def vgg_base(inputs, scope=None):
featuremap_scale = 1
net = slim.conv2d(inputs, 64, [3, 3], scope='conv1_1')
net = slim.conv2d(net, 64, [3, 3], scope='conv1_2')
net = slim.max_pool2d(net, [2, 2], stride=2, padding='VALID', scope='pool1')
featuremap_scale *= 2
net = slim.conv2d(net, 128, [3, 3], scope='conv2_1')
net = slim.conv2d(net, 128, [3, 3], scope='conv2_2')
net = slim.max_pool2d(net, [2, 2], stride=2, padding='VALID', scope='pool2')
featuremap_scale *= 2
net = slim.conv2d(net, 256, [3, 3], scope='conv3_1')
net = slim.conv2d(net, 256, [3, 3], scope='conv3_2')
net = slim.conv2d(net, 256, [3, 3], scope='conv3_3')
net = slim.max_pool2d(net, [2, 2], stride=2, padding='VALID', scope='pool3')
featuremap_scale *= 2
net = slim.conv2d(net, 512, [3, 3], scope='conv4_1')
net = slim.conv2d(net, 512, [3, 3], scope='conv4_2')
net = slim.conv2d(net, 512, [3, 3], scope='conv4_3')
if featuremap_scale != cfg["ANCHOR_WIDTH"]:
net = slim.max_pool2d(net, [2, 2], stride=2, padding='VALID', scope='pool4')
featuremap_scale *= 2
net = slim.conv2d(net, 512, [3, 3], scope='conv5_1')
net = slim.conv2d(net, 512, [3, 3], scope='conv5_2')
net = slim.conv2d(net, 512, [3, 3], scope='conv5_3')
return net, featuremap_scale
| [
"chizhanyuefeng@gmail.com"
] | chizhanyuefeng@gmail.com |
39f9f6cb12e59735ebe32a3c579294e54cc3f58e | 9039f309649d0b7c6dd974706fc507938ed0e47a | /03. Logistics.py | 51054e067ae37313a5cfc1e9833e3de6735c07c5 | [] | no_license | antondelchev/For-Loop---More-Exercises | 2b5dadb31c273611c15e6523b536f994a0353a52 | 891266ff8b931e19d179b22dd33647887814555e | refs/heads/main | 2023-03-03T11:59:16.990004 | 2021-02-16T15:01:02 | 2021-02-16T15:01:02 | 335,062,985 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,040 | py | number_of_loads = int(input())
tonnes_total = 0
bus_tonnes_total = 0
truck_tonnes_total = 0
train_tonnes_total = 0
bus_price_total = 0
truck_price_total = 0
train_price_total = 0
for i in range(1, number_of_loads + 1):
tonnes = int(input())
tonnes_total += tonnes
if tonnes <= 3:
bus_tonnes_total += tonnes
bus_price_total += tonnes * 200
elif 4 <= tonnes <= 11:
truck_tonnes_total += tonnes
truck_price_total += tonnes * 175
elif tonnes >= 12:
train_tonnes_total += tonnes
train_price_total += tonnes * 120
average_ton_price = (bus_price_total + truck_price_total + train_price_total) / tonnes_total
percent_tonnes_bus = bus_tonnes_total / tonnes_total * 100
percent_tonnes_truck = truck_tonnes_total / tonnes_total * 100
percent_tonnes_train = train_tonnes_total / tonnes_total * 100
print(f"{average_ton_price:.2f}")
print(f"{percent_tonnes_bus:.2f}%")
print(f"{percent_tonnes_truck:.2f}%")
print(f"{percent_tonnes_train:.2f}%")
| [
"noreply@github.com"
] | noreply@github.com |
85ac76387d3daebff05f1af62c4f80014636375e | 65f16a0567f1a1a7111e84deb730afdf7f757acf | /software/py/pws_menu.py | 63af33d83218a642c58b1bf71a02b940457aacb6 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | shibayan1122/PWS | 4c3c3511ac51dc2952c800624222943f6f63cffe | 206188f3031cc09cb6b020bbebb85f61244e3590 | refs/heads/master | 2021-01-20T22:34:31.731269 | 2016-08-05T06:21:36 | 2016-08-05T06:21:36 | 64,993,434 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,284 | py | #!/usr/bin/python
#coding:utf-8
from __future__ import print_function, unicode_literals
from functools import wraps
import os
import urlparse
import requests
import traceback
#from OpenSSL import SSL
from flask import (Flask,
make_response,
redirect,
render_template,
request,
session,
url_for
)
import submodule.dbox_tool
#
from logging import getLogger, StreamHandler, FileHandler, DEBUG, INFO, WARN, ERROR
logger = getLogger(__name__)
sh = StreamHandler()
sh.setLevel(INFO)
logger.setLevel(INFO)
logger.addHandler(sh)
#
app = Flask(__name__)
app.config['DEBUG'] = os.environ.get('DEBUG') == 'True'
app.secret_key = os.urandom(24)
#
CONF_FILENAME = ".dropbox_settings.conf"
APP_KEY_NAME = "APP_KEY"
APP_SECRET_NAME = "APP_SECRET"
ACCESS_TOKEN_STATE_NAME = "ACCESS_TOKEN_STATE"
ACCESS_TOKEN_STATE_OK = "ok"
ACCESS_TOKEN_STATE_NG = "ng"
#
def get_conf_pathname():
return os.path.join(os.path.dirname(__file__), CONF_FILENAME)
#
def get_url(url):
host = urlparse.urlparse(request.url).hostname
return url_for(url,
_external=True,
_scheme='http' if host in ('127.0.0.1', 'localhost') else 'https')
#
@app.route("/")
def index():
return render_template('menu.html')
#
@app.route("/setup_dropbox")
def setup_dropbox():
app_key = ""
access_token = ""
params = {APP_KEY_NAME: "",
ACCESS_TOKEN_STATE_NAME: ""
}
if request.args.has_key(APP_KEY_NAME) and request.args[APP_KEY_NAME]:
params[APP_KEY_NAME] = request.args[APP_KEY_NAME]
if request.args.has_key(ACCESS_TOKEN_STATE_NAME):
params[ACCESS_TOKEN_STATE_NAME] = request.args[ACCESS_TOKEN_STATE_NAME]
else:
conf = submodule.dbox_tool.DropboxConfig(get_conf_pathname())
params[APP_KEY_NAME] = conf.getAppKey()
return render_template('dbox_setup.html', **params)
#
@app.route("/dropbox_auth_start")
def dropbox_auth_start():
session[APP_KEY_NAME] = request.args[APP_KEY_NAME]
session[APP_SECRET_NAME] = request.args[APP_SECRET_NAME]
flow = submodule.dbox_tool.DropboxAuthFlow(session[APP_KEY_NAME], session[APP_SECRET_NAME], get_url("dropbox_auth_finish"), session)
return redirect(flow.start())
#
@app.route("/dropbox_auth_finish")
def dropbox_auth_finish():
flow = submodule.dbox_tool.DropboxAuthFlow(session[APP_KEY_NAME], session[APP_SECRET_NAME], get_url("dropbox_auth_finish"), session)
try:
access_token, user_id, url_state = flow.finish(request.args)
conf = submodule.dbox_tool.DropboxConfig()
conf.setAppKey(session[APP_KEY_NAME])
conf.setAppSecret(session[APP_SECRET_NAME])
conf.setAccessToken(access_token)
connector = submodule.dbox_tool.DropboxConnector(conf)
result = ACCESS_TOKEN_STATE_NG if connector.dbx is None else ACCESS_TOKEN_STATE_OK
except:
#traceback.print_exc()
result = ACCESS_TOKEN_STATE_NG
if result == ACCESS_TOKEN_STATE_OK:
conf.save(get_conf_pathname())
return redirect(url_for("setup_dropbox", **{APP_KEY_NAME: session[APP_KEY_NAME], ACCESS_TOKEN_STATE_NAME: result}))
#
@app.route("/cleanup_dropbox")
def cleanup_dropbox():
return render_template('dbox_cleanup.html')
#
@app.route("/conf_cleanup")
def conf_cleanup():
session.clear()
try:
os.remove(get_conf_pathname())
except:
#traceback.print_exc()
pass
return redirect(url_for("index"))
#
@app.route("/show_log")
def show_log():
manager_log = ""
uploader_log = ""
try:
with open("/pws/log/pws_manager.log", "r") as f:
manager_log = f.read().decode("utf-8")
manager_log.replace("<", "<")
f.close()
except:
#traceback.print_exc()
pass
try:
with open("/pws/log/uploader.log", "r") as f:
uploader_log = f.read().decode("utf-8")
uploader_log.replace("<", "<")
f.close()
except:
#traceback.print_exc()
pass
return render_template('log.html', MANAGER_LOG=manager_log, UPLOADER_LOG=uploader_log)
#
if __name__ == "__main__":
ssl = (os.path.join(os.path.dirname(__file__), "static/pws.crt"), os.path.join(os.path.dirname(__file__), "static/pws.key"))
app.run(host="0.0.0.0", port=443, ssl_context=ssl)
#app.run(debug=True, ssl_context=ssl)
| [
"takuya.shibata@music.yamaha.com"
] | takuya.shibata@music.yamaha.com |
91cbe1b399b74f4f04531d4409bc44e6efacd4f4 | 9d030bb9ea59fbb831db98aad5fbdcb7df038f33 | /Prac_08/car.py | 34e7081ac26630d5f22ed244a4a5265e893e641f | [] | no_license | EdyZ01/Practicals | b01f974008576c7e45d13016ccc76b667996b0d1 | 6f5e1f91c3e07b9769135b77901f346c8901ca1c | refs/heads/master | 2020-04-09T11:35:46.374406 | 2019-01-16T17:00:58 | 2019-01-16T17:00:58 | 160,311,121 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 873 | py | class Car:
"""Represent a Car object."""
def __init__(self, name="", fuel=0):
"""Initialise a Car instance.
fuel: float, one unit of fuel drives one kilometre
"""
self.fuel = fuel
self.odometer = 0
self.name = name
def add_fuel(self, amount):
"""Add amount to the car's fuel."""
self.fuel += amount
def drive(self, distance):
"""Drive the car a given distance.
Drive given distance if car has enough fuel
or drive until fuel runs out return the distance actually driven.
"""
if distance > self.fuel:
distance = self.fuel
self.fuel = 0
else:
self.fuel -= distance
self.odometer += distance
return distance
def __str__(self):
return("Car {}, {}".format(self.fuel, self.odometer)) | [
"noreply@github.com"
] | noreply@github.com |
6d472be5d883fe959547fb748f6ca1cf36e34f03 | a3feb43bbac86c0998ac5d46e180779d483a56bf | /data_loader_script.py | b8edc8e1df1fdfdc26fbf8dcb902b5696b5bdac7 | [] | no_license | hridyansh68/handwritten_digit_prediction_1-_hour_implementation | ed1899594767829fa95d0b41d8cd33de81c3a7e6 | 781e40d2b651a9ba240ddf0a9708948f908d059e | refs/heads/master | 2020-04-12T22:45:01.890996 | 2019-01-06T06:33:25 | 2019-01-06T06:33:25 | 162,797,941 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 855 | py |
import cPickle
import gzip
import numpy as np
def load_data():
f = gzip.open('../data/mnist.pkl.gz', 'rb')
training_data, validation_data, test_data = cPickle.load(f)
f.close()
return (training_data, validation_data, test_data)
def load_data_wrapper():
tr_d, va_d, te_d = load_data()
training_inputs = [np.reshape(x, (784, 1)) for x in tr_d[0]]
training_results = [vectorized_result(y) for y in tr_d[1]]
training_data = zip(training_inputs, training_results)
validation_inputs = [np.reshape(x, (784, 1)) for x in va_d[0]]
validation_data = zip(validation_inputs, va_d[1])
test_inputs = [np.reshape(x, (784, 1)) for x in te_d[0]]
test_data = zip(test_inputs, te_d[1])
return (training_data, validation_data, test_data)
def vectorized_result(j):
e = np.zeros((10, 1))
e[j] = 1.0
return e
| [
"hridyansh68@gmail.com"
] | hridyansh68@gmail.com |
947d8dca92c8781df1ca0936dfa233df43c1dedf | f6cf62319aceb51464118d13baa424317824d3ce | /Contas/conta_poupanca.py | 9ae670bbc844d97507345e6e2083b41c9d3df652 | [] | no_license | gcastorino/bank | 3ed7ad4dfa6ac9e6653b3943308935644ab7f571 | 597854eba2ed170d7efd25ecde007661d9083480 | refs/heads/master | 2020-08-04T14:36:01.327461 | 2019-10-03T19:32:13 | 2019-10-03T19:32:13 | 212,169,675 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 295 | py | import random
from Contas import Conta
class ContaPoupanca(Conta):
def __init__(self, extract, type_account, number=None):
super().__init__(extract, type_account, 0, number)
def criar_numero(self):
return f'100{random.randrange(1000, 10000)}-{random.randrange(0, 10)}'
| [
"gcastorino@hotmail.com"
] | gcastorino@hotmail.com |
ac39b66cf4724966114b3c3b3d4168da74fa9796 | d7aeae8d4065bab6e3644433212c848abbdc04e1 | /time_score.py | f035a832ce2f4144ba09b1ab25e55c4d10275a80 | [] | no_license | mohitsoni87/BlockPass | 084089aa8b853fd6b23ae9e246088637c391e0b3 | 43742dd9272a47a6b23a546fb48541ee0eea385d | refs/heads/master | 2020-03-23T16:31:56.182806 | 2018-07-21T13:20:54 | 2018-07-21T13:20:54 | 141,815,695 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,578 | py | import pygame as pg
import time, random
from pygame.locals import *
pg.init()
car = pg.image.load('download.png')
car_d = car.get_rect().size
car_width = car_d[0]; car_height = car_d[1]
white = (255, 255, 255)
black = (0, 0, 0)
blue = (0, 0, 255)
display_width = 800; display_height = 600;
pg.display.set_caption('First Game')
clock = pg.time.Clock()
start_time = time.time()
gamedisp = pg.display.set_mode((display_width, display_height))
scores = []
speed = 10
check = 0
count = 0
def text_objects(text, style):
surface = style.render(text, True, black)
return surface, surface.get_rect()
def GameOver(Text, TextRect):
gamedisp.blit(Text, TextRect)
pg.time.wait(2000)
start()
def object(x, y, w, h, color):
pg.draw.rect(gamedisp, blue, [x, y, w, h])
def start():
global speed, count, check
check = 0
count = 0
speed = 10
x = 361.5
y = 495
gamedisp.fill(white)
gamedisp.blit(car, (x, y))
gameloop(x, y)
def Text_Formatting(text, font_size):
style = pg.font.Font('freesansbold.ttf', font_size)
Text, TextRect = text_objects(text, style)
return Text, TextRect
def crash():
global start_time
score = format((time.time() - start_time), '0.2f')
start_time = time.time()
text = 'Game over'
Text, TextRect = Text_Formatting(text, 115)
TextRect.center = ((400), (300))
GameOver(Text, TextRect)
def score(start):
score = format((time.time() - start_time), '0.2f')
score = float(score)
global speed, check
if(score > 5 and check == 0):
speed += 2
check = 1
elif(score > 10 and check == 1):
speed += 4
check = 2
elif(score > 15 and check == 2):
speed += 4
check = 3
TEXT = 'Score: '
style = pg.font.Font('freesansbold.ttf', 30)
Text, TextRect = text_objects(TEXT, style)
TextRect.center = ((650), (50))
gamedisp.blit(Text, TextRect)
TEXT = str(score)
Text, TextRect = text_objects(TEXT, style)
TextRect.center = ((730), (50))
gamedisp.blit(Text, TextRect)
HighScore(score)
def HighScore(score):
scores.append(float(score))
highest_score = 'High Score: '
Text, TextRect = Text_Formatting(highest_score, 30)
TextRect.center = ((150), (50))
gamedisp.blit(Text, TextRect)
highest_score = str(max(scores))
Text, TextRect = Text_Formatting(highest_score, 30)
TextRect.center = ((265), (50))
gamedisp.blit(Text, TextRect)
def change(x, y):
gamedisp.fill(white)
gamedisp.blit(car, (x , y))
def gameloop(x, y):
global count
crashed = False
rx = random.randrange(0, 800)
ry = -300
w = 100
h = 100
while not crashed:
for event in pg.event.get():
if(event.type == QUIT):
crashed = True
pg.quit()
quit()
if(event.type == KEYDOWN):
if (event.key == K_RIGHT):
x += 50
if (x > display_width - car_width):
x = 0
elif(event.key == K_LEFT):
x -= 50
if (x < 0):
x = display_width - car_width
elif(event.key == K_DOWN):
y += 50
if (y > display_height - car_height):
crash()
continue
elif(event.key == K_UP):
y -= 50
if (y < 0):
crash()
continue
change(x, y)
object(rx, ry, w, h, black)
score(start_time)
ry += speed
if( ((rx + 100 >= x and rx <= x) or (rx >= x and rx - 100 <= x) ) and (ry + 100 >= y) ):
crash()
if(ry > display_height):
count += 1
print(count)
gameloop(x, y)
pg.display.update()
clock.tick(60)
start()
| [
"mohit_soni87@ymail.com"
] | mohit_soni87@ymail.com |
964b812d02375eb43441088299f997192ca9d36b | 894b290b4f4f47b5eb523c23efd7bd6110d91b2f | /116_fang_shop/fang_shop/fang_shop/spiders/fang_shop_spider.py | c3f9547cad1f9b1ec5db7c8618dd0e8ddbf53a24 | [] | no_license | wliustc/SpiderS | 6650c00616d11239de8c045828bafdc5a299b1ce | 441f309c50d28c1a3917bed19321cd5cbe7c2861 | refs/heads/master | 2020-03-27T06:15:39.495785 | 2018-06-14T07:55:44 | 2018-06-14T07:55:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,684 | py | # -*- coding: utf-8 -*-
import scrapy
import re
from fang_shop.items import FangShopItem
import web
import urlparse
import hashlib
import json
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/59.0.3071.115 Safari/537.36'
}
dbo2o = web.database(dbn='mysql', db='o2o', user='writer', pw='hh$writer', port=3306, host='10.15.1.24')
db = web.database(dbn='mysql', db='hillinsight', user='writer', pw='hh$writer', port=3306, host='10.15.1.24')
class Fang_Shop_Spider(scrapy.Spider):
name = 'fang_shop_spider'
def start_requests(self):
sql = '''select city,city_link,province from t_hh_fang_city_list'''
results = db.query(sql)
for result in results:
if result['city'] == '北京':
url = 'http://shop.fang.com/loupan/house/'
yield scrapy.Request(url, headers=headers, callback=self.list_parse, meta={
'city': result['city'], 'province': result['province']
}, dont_filter=True)
else:
pattern = re.search('(.*?)\.fang', result['city_link'])
city_code = pattern.group(1)
url = 'http://shop.%s.fang.com/loupan/house/' % city_code
yield scrapy.Request(url, headers=headers, callback=self.list_parse, meta={
'city': result['city'], 'province': result['province']
}, dont_filter=True)
def list_parse(self, response):
content = str(response.body).decode('gb18030').encode('utf-8')
pattern = re.compile('class="title"><a target="_blank" href="(.*?)"')
city = response.meta['city']
province = response.meta['province']
url_list = re.findall(pattern, content)
for url in url_list:
url = re.sub('/esf/', '/', url)
url_new = url + 'xiangqing/'
yield scrapy.Request(url_new, headers=headers, callback=self.detail_parse, meta={
'city': city, 'province': province
}, dont_filter=True)
pattern_next = re.search('id="PageControl1_hlk_next" href="(.*?)"', content)
url_domain = urlparse.urlparse(response.url).netloc
if pattern_next:
url_next = 'http://' + url_domain + pattern_next.group(1)
yield scrapy.Request(url_next, headers=headers, callback=self.list_parse, meta={
'city': city, 'province': province
}, dont_filter=True)
def detail_parse(self, response):
content = str(response.body).decode('gb18030').encode('utf-8')
city = response.meta['city']
province = response.meta['province']
items = FangShopItem()
base_info = {}
pattern1 = re.search('所属区域:([\s\S]*?)<', content)
base_info['所属区域'] = pattern1.group(1)
pattern2 = re.search('楼盘地址:<span title="([\s\S]*?)"', content)
base_info['楼盘地址'] = pattern2.group(1)
pattern3 = re.search('环线位置:([\s\S]*?)<', content)
base_info['环线位置'] = pattern3.group(1)
pattern4 = re.search('物业类别:([\s\S]*?)<', content)
base_info['物业类别'] = pattern4.group(1)
pattern5 = re.search('建筑类别:([\s\S]*?)<', content)
base_info['建筑类别'] = pattern5.group(1)
pattern6 = re.search('总 层 数:([\s\S]*?)<', content)
base_info['总层数'] = pattern6.group(1)
pattern7 = re.search('开 发 商:([\s\S]*?)<', content)
base_info['开发商'] = pattern7.group(1)
pattern8 = re.search('竣工时间:([\s\S]*?)<', content)
base_info['竣工时间'] = pattern8.group(1)
pattern9 = re.search('物 业 费:([\s\S]*?)<', content)
base_info['物业费'] = pattern9.group(1)
pattern10 = re.search('物业公司:([\s\S]*?)<', content)
base_info['物业公司'] = pattern10.group(1)
pattern11 = re.search('占地面积:([\s\S]*?)<', content)
base_info['占地面积'] = pattern11.group(1)
pattern12 = re.search('建筑面积:([\s\S]*?)<', content)
base_info['建筑面积'] = pattern12.group(1)
pattern13 = re.search('开间面积:([\s\S]*?)<', content)
base_info['开间面积'] = pattern13.group(1)
pattern14 = re.search('是否可分割:([\s\S]*?)<', content)
base_info['是否可分割'] = pattern14.group(1)
pattern15 = re.search('电梯数量:([\s\S]*?)<', content)
base_info['电梯数量'] = pattern15.group(1)
pattern16 = re.search('空 调:([\s\S]*?)<', content)
base_info['空调'] = pattern16.group(1)
pattern17 = re.search('装修状况:([\s\S]*?)<', content)
base_info['装修状况'] = pattern17.group(1)
pattern18 = re.search('停 车 位:([\s\S]*?)<', content)
base_info['停车位'] = pattern18.group(1)
base_info = json.dumps(base_info, ensure_ascii=False, encoding='utf-8')
items['base_info'] = base_info
pattern19 = re.search('交通状况</dt>[\s\S]*?<dl class="xiangqing">([\s\S]*?)</div>', content)
traffic_con = pattern19.group(1)
if '暂无资料' in traffic_con:
items['traffic_info'] = '暂无资料'
# print traffic_con
# raw_input('enter')
else:
traffic_info = {}
pattern19_1 = re.search('公交:([\s\S]*?)<', traffic_con)
if pattern19_1:
traffic_info['公交'] = pattern19_1.group(1)
pattern19_2 = re.search('地铁:([\s\S]*?)<', traffic_con)
if pattern19_2:
traffic_info['地铁'] = pattern19_2.group(1)
traffic_info = json.dumps(traffic_info, ensure_ascii=False, encoding='utf-8')
items['traffic_info'] = traffic_info
pattern20 = re.search('周边信息</dt>[\s\S]*?<dl class="xiangqing">([\s\S]*?)</div>', content)
around_con = pattern20.group(1)
if '暂无资料' in around_con:
items['around_info'] = '暂无资料'
else:
around_info = {}
pattern20_1 = re.search('商场:([\s\S]*?)<', around_con)
if pattern20_1:
around_info['商场'] = pattern20_1.group(1)
pattern20_2 = re.search('医院:([\s\S]*?)<', around_con)
if pattern20_2:
around_info['医院'] = pattern20_2.group(1)
pattern20_3 = re.search('邮局:([\s\S]*?)<', around_con)
if pattern20_3:
around_info['邮局'] = pattern20_3.group(1)
pattern20_4 = re.search('银行:([\s\S]*?)<', around_con)
if pattern20_4:
around_info['银行'] = pattern20_4.group(1)
pattern20_5 = re.search('餐饮:([\s\S]*?)<', around_con)
if pattern20_5:
around_info['餐饮'] = pattern20_5.group(1)
around_info = json.dumps(around_info, ensure_ascii=False, encoding='utf-8')
items['around_info'] = around_info
pattern21 = re.search('class="biaoti">([\s\S]*?)<', content)
pattern22 = re.search('newcode=(\d+)"', content)
items['shop_name'] = pattern21.group(1)
if pattern22:
items['src_uid'] = pattern22.group(1)
else:
md5 = hashlib.md5()
md5.update(response.url)
items['src_uid'] = md5.hexdigest()
items['city'] = city
items['province'] = province
items['url'] = response.url
yield items
| [
"luoshao23@gmail.com"
] | luoshao23@gmail.com |
77ea35da65f61abce7c44b9a46ee137770cc95ec | fc5becca3e2e48a444b512e059df1cd21601829b | /Aulas/Aula23A.py | 3baac0f53fa2d741ffa7e4838bd99fbeb5af6205 | [
"MIT"
] | permissive | Felix-xilef/Curso-de-Python | c44bf8c22b393aefaed3a2bb3127ef7999e27fb8 | cdff7c7f3850e6326e274c8c1987b9e1a18ce910 | refs/heads/master | 2021-05-19T11:09:22.644638 | 2020-04-01T22:09:02 | 2020-04-01T22:09:02 | 251,665,966 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,148 | py | try:
a = int(input('\n\tNumerador: '))
b = int(input('\tDenominador: '))
r = a / b
# except: - apenas redireciona caso dê erro (GENÉRICO)
# print('\n\tProblema encontrao') # erro - mostra erro | erro.__class__ - mostra classe do erro
except (ValueError, TypeError): # except classe: - apenas redireciona, caso dê erro da classe informada *colocar entre parêntesis e separado por vírgula caso haja mais de um erro
print('\n\tTivemos um problema com os tipos de dados que você digitou')
except ZeroDivisionError:
print('\n\tNão é possível dividir um número por zero!')
except KeyboardInterrupt:
print('\n\tO usuário preferiu não informar os dados!')
except Exception as erro: # guarda a exeção na variável erro (GENÉRICO)
print(f'\n\tProblema encontrao:\n\t{erro.__class__}') # erro - mostra erro | erro.__class__ - mostra classe do erro
else: # opcional (o que ocorrerá caso não der erro)
print(f'\n\t{a}/{b} = {r}')
finally: # opcional (sempre é executado, isto é, caso dê ou não erro)
print('\n\tVolte sempre! Muito obrigado!')
input('\n\nPressione <enter> para continuar')
| [
"felixpb@yahoo.com.br"
] | felixpb@yahoo.com.br |
e7c4cc9aafe5f314aab6257410c9f0e26c117e7c | 484ec0b529d96fbce4c23362d1d758a1e1bdfc91 | /deprecated/fragment_csv_converter/directories.py | ea0c8936a4f78dd6bc7fb97aaa534014f2965201 | [] | no_license | willjwon/file-type-identification | a7db5b5ccbaa2625ede64f22af1984c065de7c2c | 8d760bf70764328d7b47dff904b9ad25273b2516 | refs/heads/master | 2021-09-28T01:53:50.241671 | 2018-11-13T07:24:49 | 2018-11-13T07:24:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 710 | py | import random
from files import Files
class Directories:
def __init__(self, directories):
self.files = []
for index, directory in directories.items():
self.files.append((int(index), Files(directory=directory)))
def random_directory(self):
if len(self.files) == 0:
return None, None
return self.files[random.randrange(len(self.files))]
def sequential_directory(self):
if len(self.files) == 0:
return None, None
return self.files[0]
def remove_directory(self, index):
for i in range(len(self.files)):
if self.files[i][0] == index:
del self.files[i]
break
| [
"baryberri@icloud.com"
] | baryberri@icloud.com |
a8ee0c14187fe73d4b8e96cd004a298e7fc626a1 | b5c88180e4821583b2e4174b352186c0277ac1b6 | /watcher/decision_engine/model/collector/cinder.py | 1e5c1c967b8746521781086dab98cdf06def2a6c | [
"Apache-2.0",
"CC-BY-3.0"
] | permissive | YumengBao/watcher | ecb7232c69668f9d189673ccd997a7d4a8087124 | 051b4fcd066960a3563d04b149307b388d3b66b2 | refs/heads/master | 2021-05-07T06:32:51.851512 | 2017-11-22T12:15:57 | 2017-11-22T12:15:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,347 | py | # -*- encoding: utf-8 -*-
# Copyright 2017 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
from oslo_log import log
from watcher.common import cinder_helper
from watcher.common import exception
from watcher.decision_engine.model.collector import base
from watcher.decision_engine.model import element
from watcher.decision_engine.model import model_root
from watcher.decision_engine.model.notification import cinder
LOG = log.getLogger(__name__)
class CinderClusterDataModelCollector(base.BaseClusterDataModelCollector):
"""Cinder cluster data model collector
The Cinder cluster data model collector creates an in-memory
representation of the resources exposed by the storage service.
"""
def __init__(self, config, osc=None):
super(CinderClusterDataModelCollector, self).__init__(config, osc)
@property
def audit_scope_handler(self):
return None
@property
def notification_endpoints(self):
"""Associated notification endpoints
:return: Associated notification endpoints
:rtype: List of :py:class:`~.EventsNotificationEndpoint` instances
"""
return [
cinder.CapacityNotificationEndpoint(self),
cinder.VolumeCreateEnd(self),
cinder.VolumeDeleteEnd(self),
cinder.VolumeUpdateEnd(self),
cinder.VolumeAttachEnd(self),
cinder.VolumeDetachEnd(self),
cinder.VolumeResizeEnd(self)
]
def execute(self):
"""Build the storage cluster data model"""
LOG.debug("Building latest Cinder cluster data model")
builder = ModelBuilder(self.osc)
return builder.execute()
class ModelBuilder(object):
"""Build the graph-based model
This model builder adds the following data"
- Storage-related knowledge (Cinder)
"""
def __init__(self, osc):
self.osc = osc
self.model = model_root.StorageModelRoot()
self.cinder = osc.cinder()
self.cinder_helper = cinder_helper.CinderHelper(osc=self.osc)
def _add_physical_layer(self):
"""Add the physical layer of the graph.
This includes components which represent actual infrastructure
hardware.
"""
for snode in self.cinder_helper.get_storage_node_list():
self.add_storage_node(snode)
for pool in self.cinder_helper.get_storage_pool_list():
pool = self._build_storage_pool(pool)
self.model.add_pool(pool)
storage_name = getattr(pool, 'name')
try:
storage_node = self.model.get_node_by_name(
storage_name)
# Connect the instance to its compute node
self.model.map_pool(pool, storage_node)
except exception.StorageNodeNotFound:
continue
def add_storage_node(self, node):
# Build and add base node.
storage_node = self.build_storage_node(node)
self.model.add_node(storage_node)
def add_storage_pool(self, pool):
storage_pool = self._build_storage_pool(pool)
self.model.add_pool(storage_pool)
def build_storage_node(self, node):
"""Build a storage node from a Cinder storage node
:param node: A storage node
:type node: :py:class:`~cinderclient.v2.services.Service`
"""
# node.host is formatted as host@backendname since ocata,
# or may be only host as of ocata
backend = ""
try:
backend = node.host.split('@')[1]
except IndexError:
pass
volume_type = self.cinder_helper.get_volume_type_by_backendname(
backend)
# build up the storage node.
node_attributes = {
"host": node.host,
"zone": node.zone,
"state": node.state,
"status": node.status,
"volume_type": volume_type}
storage_node = element.StorageNode(**node_attributes)
return storage_node
def _build_storage_pool(self, pool):
"""Build a storage pool from a Cinder storage pool
:param pool: A storage pool
:type pool: :py:class:`~cinderlient.v2.capabilities.Capabilities`
"""
# build up the storage pool.
node_attributes = {
"name": pool.name,
"total_volumes": pool.total_volumes,
"total_capacity_gb": pool.total_capacity_gb,
"free_capacity_gb": pool.free_capacity_gb,
"provisioned_capacity_gb": pool.provisioned_capacity_gb,
"allocated_capacity_gb": pool.allocated_capacity_gb}
storage_pool = element.Pool(**node_attributes)
return storage_pool
def _add_virtual_layer(self):
"""Add the virtual layer to the graph.
This layer is the virtual components of the infrastructure.
"""
self._add_virtual_storage()
def _add_virtual_storage(self):
volumes = self.cinder_helper.get_volume_list()
for vol in volumes:
volume = self._build_volume_node(vol)
self.model.add_volume(volume)
pool_name = getattr(vol, 'os-vol-host-attr:host')
if pool_name is None:
# The volume is not attached to any pool
continue
try:
pool = self.model.get_pool_by_pool_name(
pool_name)
self.model.map_volume(volume, pool)
except exception.PoolNotFound:
continue
def _build_volume_node(self, volume):
"""Build an volume node
Create an volume node for the graph using cinder and the
`volume` cinder object.
:param instance: Cinder Volume object.
:return: A volume node for the graph.
"""
attachments = [{k: v for k, v in six.iteritems(d) if k in (
'server_id', 'attachment_id')} for d in volume.attachments]
volume_attributes = {
"uuid": volume.id,
"size": volume.size,
"status": volume.status,
"attachments": attachments,
"name": volume.name or "",
"multiattach": volume.multiattach,
"snapshot_id": volume.snapshot_id or "",
"project_id": getattr(volume, 'os-vol-tenant-attr:tenant_id'),
"metadata": volume.metadata,
"bootable": volume.bootable}
return element.Volume(**volume_attributes)
def execute(self):
"""Instantiates the graph with the openstack cluster data.
The graph is populated along 2 layers: virtual and physical. As each
new layer is built connections are made back to previous layers.
"""
self._add_physical_layer()
self._add_virtual_layer()
return self.model
| [
"hid-nakamura@vf.jp.nec.com"
] | hid-nakamura@vf.jp.nec.com |
37f9ffe43f45931ee39051d3b509924093639327 | 33af6185b48bd76f97f0a74390a3a812ee216c78 | /angr/angr/procedures/libc/fseek.py | 12804e949829a38007056038d366ae0bb5839ae7 | [
"BSD-2-Clause"
] | permissive | Ruide/angr-dev | dab0cabd907fce47ac698f890c3f3a8b80ab7e2a | 964dc80c758e25c698c2cbcc454ef5954c5fa0a0 | refs/heads/master | 2022-11-10T11:27:13.355024 | 2017-10-07T14:29:09 | 2017-10-07T14:29:09 | 104,417,044 | 0 | 1 | BSD-2-Clause | 2022-10-16T04:48:10 | 2017-09-22T01:35:12 | C | UTF-8 | Python | false | false | 1,201 | py | import angr
from . import io_file_data_for_arch
######################################
# fseek
######################################
class fseek(angr.SimProcedure):
#pylint:disable=arguments-differ
def run(self, file_ptr, offset, whence):
# TODO: Support symbolic file_ptr, offset, and whence
# Make sure whence can only be one of the three values: SEEK_SET(0), SEEK_CUR(1), and SEEK_END(2)
if self.state.se.symbolic(whence) and len(self.state.se.eval_upto(whence, 2)) > 1:
raise angr.SimProcedureError('multi-valued "whence" is not supported in fseek.')
else:
# Get all possible values
all_whence = self.state.se.eval_upto(whence, 2)
if not all_whence:
raise angr.SimProcedureError('"whence" has no satisfiable value.')
# There is only one value left
whence_int = all_whence[0]
if whence_int not in (0, 1, 2):
return 22 # EINVAL
fd_offset = io_file_data_for_arch(self.state.arch)['fd']
fd = self.state.mem[file_ptr + fd_offset : ].int.resolved
r = self.state.posix.seek(fd, offset, whence_int)
return r
| [
"rd.cheung.bupt.sms@gmail.com"
] | rd.cheung.bupt.sms@gmail.com |
2158bee91e8ddc05e94adf1ea116c57e35458e73 | ced18d4273f07a1472027f533edd77d198144314 | /multivariate regression.py | 262a5c1531ae8b09db48a0e60d04e6551a699d62 | [] | no_license | ordikhan/supervised-learning | ff2ba7430e0d4d60055407bad3fc180f6013c39f | a77444120f1ddd8ff23b19c89f82da641b4628b6 | refs/heads/main | 2023-05-01T09:42:41.813881 | 2021-05-06T09:40:54 | 2021-05-06T09:40:54 | 364,806,125 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 645 | py | import pandas as pd
from sklearn.model_selection import train_test_split
import statsmodels.api as sm
from sklearn.metrics import mean_squared_error
from math import sqrt
from sklearn.preprocessing import MinMaxScaler
df = pd.read_excel('C:/Users/e.almaee/Desktop/Dataset/cars.xls')
#print(df.head())
X = df[['Mileage', 'Liter', 'Doors']]
Y = df['Price']
scaler = MinMaxScaler()
X = scaler.fit_transform(X)
Xtrain, Xtest, Ytrain, Ytest = train_test_split(X, Y, test_size = 0.3)
model = sm.OLS(Ytrain, Xtrain).fit()
pred = model.predict(Xtest)
mse = mean_squared_error(Ytest, pred)
rmse = sqrt(mse)
print(mse, rmse)
| [
"noreply@github.com"
] | noreply@github.com |
ed0c8392bc8222a364e2625501ee4016605b8c74 | 776d97324adae9d929f90b701e6bf003df041bec | /0x04-python-more_data_structures/6-print_sorted_dictionary.py | 97bd5c62571c24df86e2c663fcb13d056547652f | [] | no_license | jblanco75/holbertonschool-higher_level_programming | 6be3284ecd3a8168425cb82d0bee1f9321825a56 | 94b1db9f120e234ec61a6db044113e7f4618d3ef | refs/heads/main | 2023-08-04T04:16:15.821683 | 2021-09-23T00:32:24 | 2021-09-23T00:32:24 | 361,894,154 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 150 | py | #!/usr/bin/python3
def print_sorted_dictionary(a_dictionary):
for k in sorted(a_dictionary):
print("{:s}: {}".format(k, a_dictionary[k]))
| [
"jblanco75@gmail.com"
] | jblanco75@gmail.com |
184aadc4ffd624e108706977dfa7e4758bfcbbb8 | 2857548f8cb5300d60062e70d75a45a92b1a9a4a | /my_office_admin/my_office_admin/wsgi.py | 424ba63e3f388464b59b5390be71f1161e8a9e99 | [] | no_license | vkrishnan/my-office-admin-test | 8b3ddd83b7be90ccf3483b84a044e454192e1633 | 22e73e4134123e7fbe5c5d5993de0b90b1cb7fe1 | refs/heads/master | 2021-01-10T01:29:57.572253 | 2016-01-12T03:10:37 | 2016-01-12T03:10:37 | 48,683,619 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | """
WSGI config for my_office_admin project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "my_office_admin.settings")
application = get_wsgi_application()
| [
"vinay.krishnan@hp.com"
] | vinay.krishnan@hp.com |
5e73ab252bfa094fd111ee5055e111d073daad4c | 99e1a9d7aa57bea19ae121e0ace98e054669cb20 | /resources/store.py | cf78d99a386a80b32218048b8d918aee4e8447e4 | [] | no_license | avciali/storeapitest | 0dd0cb9c7a3a3ad20d6f344b37b0289b76fa5cf2 | 4beabd27f5e6d7029c042a15421386b904bd1cb2 | refs/heads/master | 2021-08-30T10:02:17.050428 | 2017-12-17T11:25:47 | 2017-12-17T11:25:47 | 114,293,593 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 924 | py | from flask_restful import Resource
from models.store import StoreModel
class Store(Resource):
def get(self, name):
store = StoreModel.find_by_name(name)
if store:
return store.json()
return {"message": "Store not found"}, 404
def post(self,name):
if StoreModel.find_by_name(name):
return {"message": "A store with name {} already exits".format(name)}, 400
store = StoreModel(name)
try:
store.save_to_db()
except:
return {"message": "An error occurred while creation store"}, 500
return store.json(), 201
def delete(self,name):
store = StoreModel.find_by_name(name)
if store:
store.delete_from_db()
return {"message": "store deleted"}
class StoreList(Resource):
def get(self):
return {"stores": [store.json() for store in StoreModel.query.all()]}
| [
"aliavci82@gmail.com"
] | aliavci82@gmail.com |
92ac50897a3cb76170df9978daf88a94a96b930c | 6e9e23664a6a270d3821a00fd1aa740065f9f2c7 | /libs/DecisionTree.py | ad0ec85b4ead27d853f98f6c479035b476623ea0 | [
"MIT"
] | permissive | frenzymadness/BiologicalAge | d417a0252e91821cd25897b763ba0fe30910087b | 7ac1fb81474296422b4975688ce3fc559dbe3191 | refs/heads/master | 2021-10-20T12:51:54.344085 | 2019-02-27T20:43:52 | 2019-02-27T20:43:52 | 108,725,930 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,718 | py | from os.path import join
import json
from sklearn.tree import DecisionTreeClassifier
import pandas as pd
import numpy as np
class DecisionTree():
def __init__(self, type='b'):
self.data = pd.read_csv(join('data', type, 'data.csv'))
Y = self.data.values[:, -1].astype('float64')
self.groups = self.calculate_groups(Y)
with open(join('data', 'questions.json')) as fh:
self.questions = json.load(fh)
self.current_question = 0
self.answers = []
def train(self):
"""Train decision tree clasifier with data subset"""
X = self.data.values[:, 1:self.current_question+1].astype('float64')
Y = self.data.values[:, -1].astype('float64')
clf = DecisionTreeClassifier(min_samples_split=5)
self.dt = clf.fit(X, Y)
def calculate_groups(self, Y):
"""Calculate groups (counts) for each group"""
groups = []
list_of_groups = list(Y)
for x in set(Y):
groups.append(list_of_groups.count(x))
return groups
def get_question(self):
"""Get next question"""
self.current_question += 1
return self.questions[self.current_question-1]
def predict(self):
"""Train and predict class and return limits for reference images"""
self.train()
prediction = self.dt.predict(np.array(self.answers).reshape(1, -1))
prediction = int(prediction[0]) - 1
lower = sum(self.groups[:prediction])
upper = lower + self.groups[prediction]
return lower, upper
def save_answer(self, answer):
"""Save answer and make prediction"""
self.answers.append(answer)
return self.predict()
| [
"lbalhar@redhat.com"
] | lbalhar@redhat.com |
efa80ad0eec53a666170d3e49066f2fa97684f4b | 970a455c6c043c33e70ea60fd2b8eeec799da432 | /bin/rehex.py | 049b4352bd5f1d47dd5715ba3fd91cb0a1063964 | [
"MIT"
] | permissive | innovacoin/sentinel | 798d117f8822941dca9cd54703db47fb47cbcd00 | eeedacaca0be68423dcb57a7eef1ddb7cb1a06b8 | refs/heads/master | 2021-09-19T09:20:29.841449 | 2018-07-26T10:00:41 | 2018-07-26T10:00:41 | 112,226,133 | 4 | 7 | MIT | 2020-01-01T07:28:13 | 2017-11-27T17:22:49 | Python | UTF-8 | Python | false | false | 454 | py | import simplejson
import binascii
import sys
import pdb
from pprint import pprint
import sys
import os
sys.path.append(os.path.normpath(os.path.join(os.path.dirname(__file__), '../lib')))
import innovalib
# ============================================================================
usage = "%s <hex>" % sys.argv[0]
obj = None
if len(sys.argv) < 2:
print(usage)
sys.exit(1)
else:
obj = innovalib.deserialise(sys.argv[1])
pdb.set_trace()
1
| [
"djjamol@gmail.com"
] | djjamol@gmail.com |
7b3fa99aefcbe29c9f00c5b8f63ea045bfea12c3 | 97ef985dc843c4975c9507e0da9e93069d3ec084 | /Thesis_LuoHao/Bell/Bell_fitting.py | 7668ce49518d0e50aaf4805b760096f5f91caa3a | [] | no_license | FelixHaoL/thesis_FERC | 5e39b232a4bbc9eaba7f27be8fb9a864d278c033 | 89a4f8c3e8416565da0e3c477f19a58e6c3c0cc4 | refs/heads/master | 2022-11-30T08:05:01.187739 | 2020-08-12T03:23:11 | 2020-08-12T03:23:11 | 271,706,912 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 622 | py | from scipy.optimize import curve_fit, minimize
def bell(f,k0,x):
kbt = 4.06
k = np.exp(k0)*np.exp(f*x/kbt)
return k
def bell_r(f,k0,x):
kbt = 4.06
k = np.exp(k0)*np.exp(-f*x/kbt)
return k
def bell_(p, f, kf):
kbt = 4.06
k = np.exp(p[0])*np.exp(f*p[1]/kbt)
return np.sum((np.log(k)-np.log(kf))**2)
def bell_r_(p, f, kf):
kbt = 4.06
k = np.exp(p[0])*np.exp(-f*p[1]/kbt)
return np.sum((np.log(k)-np.log(kf))**2)
if __name__ == "__main__":
popt= minimize(bell_r_,(12,0.21),args=(np.array(force_fit), np.array(rate_fit)), bounds=((None,None), (0, None)),method='L-BFGS-B') | [
"haoool@outlook.com"
] | haoool@outlook.com |
675330baffa315263e450a4134a692cc638ef989 | f5382125f4c5a04544095cd8dfce58fabad3fe29 | /IfElseElif2.py | 43c2e60de986b547941d60c98328bab7ef76a9d3 | [
"LicenseRef-scancode-other-permissive",
"MIT"
] | permissive | ishanbhavsar3/python_ | 7005b9e36899602d84a76e4c36367e396fdd2d72 | e80d68e263ecb7c41bdd58c325f9e2efb50999f7 | refs/heads/master | 2023-06-05T06:20:08.976762 | 2021-06-20T08:17:02 | 2021-06-20T08:17:02 | 296,503,890 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,503 | py |
Num_List = 1, 2, 3, 4
print(Num_List)
Day_of_week_num = input("Input the day of the week in numbers and i will tell which day it is.")
int(Day_of_week_num)
if int(Day_of_week_num) == 1:
print("The day is a Sunday.")
elif int(Day_of_week_num) == 2:
print("The day is a Monday.")
elif int(Day_of_week_num) == 3:
print("The day is a Tuesday.")
elif int(Day_of_week_num) == 4:
print("The day is a Wednesday.")
elif int(Day_of_week_num) == 5:
print("The day is a Thursday.")
elif int(Day_of_week_num) == 6:
print("The day is a Friday")
elif int(Day_of_week_num) == 7:
print("The day is Saturday.")
else:
print("The number you inputted is not a day of the week or within 7.")
num_input = input("Input a number to test if it is odd or even.")
if int(num_input) % 2 == 0:
print(f"The number {num_input} is even.")
print(f"The number {num_input} gives a 0 as a remainder when divided with 2.")
else:
print(f"The number {num_input} is odd ")
print(f"The number {num_input} does not give remainder as 0 when divided with 2.")
# Spacing between lines of code.
Guess_Number = 1
while Guess_Number <= 5:
print(Guess_Number)
Guess_Number = Guess_Number + 1
# Spacing between lines of code.
Guess_Number = 1
while Guess_Number <= 1:
print('*' * Guess_Number)
Guess_Number = Guess_Number + 1
# Spacing between lines of code.
secret_Number = 9
Guess_Number = 0
while Guess_Number < 3:
Guess_Input = int(input("Guess a number from 1 to 10 and input it here."))
Guess_Number += 1
if Guess_Input == secret_Number:
print("You won the prize.Well Guessed!")
break
else:
print("Sorry,you failed!")
# Spacing between lines of code.
# While Loop Up and IfElseElif Down.
price1 = 1000
print(f"The price of the property is ${price1}")
int(price1)
if_cred_good = False
down_p = 0.1*int(price1)
if_cred_bad = True
down_p1 = 0.2*int(price1)
if if_cred_good:
print(f"Your down payment is ${down_p}")
elif if_cred_bad:
print(f"Your down payment is ${down_p1}")
else:
print("You are not buying the house.")
# IfElseElif Practice 1.
# Spacing between two files.
name = "This is a girl"
if_name = False
messages = "Ishan is a boy."
if_messages = True
if if_messages:
print(messages)
elif if_name:
print(name)
else:
print("404 Not Found")
# IfElseElif Practice 2.
# Spacing between two files.
is_hot = False
is_cold = True
if is_hot:
print("The day is hot.")
print("Wear cotton clothes.")
print("Drink plenty of water.")
elif is_cold:
print("The day is cold.")
print("Wear warm clothes.")
else:
print("It is a lovely day.")
# IfElseElif Practice 3.
# Spacing between two files.
is_new = True
if is_new:
print("The thing is new")
print("I hope you are happy.")
else:
print("The thing is old")
print("It's common to be unhappy.")
# IfElseElif Practice 4.
# Spacing between two files.
price = 1000000
print(f"Price of the property is ${price}")
is_credit_good = True
if is_credit_good:
down_payment = 0.1 * price
else:
down_payment = 0.2 * price
print(f"Your Down Payment will be: ${down_payment}")
# IfElseElif Practice 5.
# Spacing between two files.
price1 = 2000000
print(f"Price of property is ${price1}")
is_credit_bad = True
if is_credit_bad:
down_payment1 = 0.2 * 20000000
else:
down_payment1 = 0.1 * 20000000
print(f"Your down payment is: ${down_payment1}")
is_nice = True
if is_nice:
print("The file is nice")
else:
print("The file is not nice")
print('''The file is good is an if/else option which occurs due to presence of boolean values who are true or false
The boolean value in this case is true therefore the file is nice gets printed.
Its printing is dependant upon the boolean value is_nice.''')
print("The person making the file is Ishan Bhavsar.")
# IfElseElif Practice 6.
# Spacing between two files.
Ishan_Is_A_Boy = True
if Ishan_Is_A_Boy:
print("Ishan is a boy")
else:
print('Ishan is a girl.')
print("Differentiating files start.")
# Spacing between lines of code
# Made on 16th March 2020.
# Day : Monday.
# Event Updation at 8:17 P.M.
print("Welcome to differentiating files.")
print(
'''WELCOME!!!!'''
)
weight_in_kg = input("Please input your weight to get it converted to pounds: ")
int(weight_in_kg)
weight_in_pounds = int(weight_in_kg) * 0.45
int(weight_in_pounds)
print(f"{weight_in_pounds} is your weight in pounds.")
# Spacing between lines of code.
distance_in_km = input("Input distance of journey in kms to know it in miles: ")
int(distance_in_km)
distance_in_miles = int(distance_in_km) * 0.621371
int(distance_in_miles)
print(f"{distance_in_miles} is the distance of your journey in miles.")
# Spacing between lines of code.
temperature = input("Input today's temperature(!!!in numbers only!!!): ")
int(temperature)
if int(temperature) >= 30:
print("The day is hot.")
else:
print("The day is not hot.")
# Spacing between lines of code.
has_high_income = input("Do you have high income? Reply in 'True' or 'False' only. ")
has_criminal_record = input("Do you have any criminal record? Reply in 'True' or 'False' only. ")
has_high_credit = input("Do you have high credit? Reply in 'True' or 'False' only. ")
bool(has_high_income)
bool(has_criminal_record)
bool(has_high_credit)
one = input('Your loan value is: ')
int(one)
if has_high_credit and not has_criminal_record:
print(f"You are eligible for the loan of $ {one}.")
else:
print(f"You are not eligible for the loan of $ {one}")
# Spacing between lines of code.
if has_high_credit and has_high_income:
print(f"You are eligible for the loan of $ {one}")
# Spacing between lines of code.
name_of_man = input("What is your name?")
if len(name_of_man) < 3:
print("Your name is too short.")
elif len(name_of_man) > 50:
print("Your name is too long.")
else:
print("Your name is of good size.")
# Spacing between lines of code.
is_your_car_new = input("Is your car new(True/False)")
is_your_car_middle_aged = input("Is your car middle aged?(True/False)")
bool(is_your_car_new)
bool(is_your_car_middle_aged)
if bool(is_your_car_new):
print("You will get a good price for your car.")
elif bool(is_your_car_middle_aged):
print("You will get a fair price for your car.")
else:
print("Your car is old.So you will get a low price.")
name_of_car = input("What is the name of your car?")
str(name_of_car)
Review = input("Is this file good?(Yes/No)").upper()
Yes = "YES"
if str(Review) == Yes:
print("Thank you for your good review.")
elif str(Review) == "NO":
print("I will try to improve my software.Thank you.")
else:
print("you have inputted something different than what i asked you to.")
print("Input yes or no only.")
Tallest_Mountain = input("What is the name of the tallest mountain in the world?").upper()
if Tallest_Mountain == "MT.EVEREST":
print(f"Your guess of {Tallest_Mountain} is indeed correct!")
elif Tallest_Mountain == "Godwin Austin Peak":
print(f"Your input {Tallest_Mountain} is the second tallest mountain in the world.")
else:
print("You have inputted some wrong information here.Please quit if you don't wish to continue the game.")
| [
"noreply@github.com"
] | noreply@github.com |
74c89dbe14108ca58089f84b1849211bbef03127 | ab2d5b62b3ddbb7c7fe033807cd8884b8c69e89d | /Sound_Assessment/app/processing/process.py | bad02a40786b6b8e8cfcebab842cea57939dbc37 | [] | no_license | vanditthakkar/sound-assessment-toolset | 75bd8f51ef9fd03e6d7f78f7bc7fe23fd5d6cad4 | b91f024f39a8a3bef9dcb8539961c908bd8a379a | refs/heads/master | 2021-01-20T18:16:00.172285 | 2016-07-02T17:37:52 | 2016-07-02T17:37:52 | 62,408,582 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,034 | py | from algos_librosa import *
import os
import pprint
import midiMap
import time
class Process(object):
def __init__(self,file_name):
self.attr = {}
self.file_name = file_name
def extractFeatures(self):
y,sr = load(self.file_name)
fs = 2*sr
y_harmonic , y_precussive = effects.hpss(y)
audio = y_harmonic
onset_times = np.array([0])
onset_times, onset_rate = getOnsets(audio,fs)
segments = getSegments(audio,fs)
self.attr = {'key':[],
'octave':[],
'scale':[],
'loudness':[],
'pitch':[],
'times':[],
'peak':[],
'duration':[],
'effective_duration':[],
}
k = 0
for segment in segments:
#key,scale = getKeyScale(segment)
loudness = getLoudness(segment)
pitch = getPitch(segment)
key_dict = getPerfectKey(pitch[0])
key = key_dict['val']
octave = key_dict['octave']
times = onset_times[k]
duration = getDuration(segment)
effective_duration = getEffectiveDuration(segment)
peak = getPeak(segment)
if effective_duration<0.01:
continue
if loudness<1:
continue
k += 1
self.attr['key'].append(key)
self.attr['octave'].append(octave)
self.attr['loudness'].append(loudness)
self.attr['pitch'].append(pitch)
self.attr['effective_duration'].append(effective_duration)
self.attr['times'].append(times)
print "Written to midi file"
def getLoud(self):
return self.attr['loudness']
def getKeys(self):
return self.attr['key']
def getPitch(self):
return self.attr['pitch']
def getTime(self):
return self.attr['times']
def getEffectiveDuration(self):
return self.attr['effective_duration']
| [
"prashant.kiit2018@gmail.com"
] | prashant.kiit2018@gmail.com |
70280d9306bdb514ca2d699af1aed01d1f05a146 | 4cf3b7347f84d2e801d67d1004b28d7a83b94d69 | /first.py | ac4b32dd4c12ba0edb8c6954ed569f6dae1548f9 | [] | no_license | Andrew-code2020/gettingstartedpythoningitpod | 1289075785c560752bab06315090f5403e8bfd7b | c2f4e40e61e98ae71f30665b4e254c7727d6c0a3 | refs/heads/master | 2022-12-17T21:51:58.511205 | 2020-09-21T20:08:30 | 2020-09-21T20:08:30 | 296,558,719 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 142 | py | def print_message(message):
print(message)
print_message("Hello, World!")
my_list = [1, 2, 3, 4, 5]
for item in my_list:
print(item) | [
"burnsad@tcd.ie"
] | burnsad@tcd.ie |
1d1c16186e28592a9a31b87d92916a54f713d622 | 06fc484318582a00028ea0aaae5e12bbd6a7fbb8 | /test_dhe.py | d9111e393f400f82316d350c4cf4ad7cbcaefa98 | [] | no_license | sogol-golafshan/Image_Contrast | 786aea03741204f0aeb14edab284bef4060f22b0 | 3a9ac4d99468cc0e2ab051d052864a1c6fd1f6d7 | refs/heads/main | 2023-03-08T23:04:02.970980 | 2021-03-01T04:27:54 | 2021-03-01T04:27:54 | 338,703,199 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,412 | py | import unittest
from dhe_method import dhe
import numpy as np
class TestContrast(unittest.TestCase):
def Test_dhe(self):
self.img= np.array([[[122, 184, 168],
[133, 198, 182],
[209, 230, 245],
[132, 198, 176],
[112, 187, 159]],
[[103, 159, 178],
[100, 176, 149],
[69, 111, 130],
[94, 182, 152],
[101, 173, 143]]])
actual_result = dhe(self.img, alpha=0.5)
expected_result = np.array([[[ 86, 130, 18],
[143, 213, 196],
[217, 239, 255],
[142, 213, 189],
[ 84, 141, 119]],
[[ 52, 80, 89],
[ 44, 79, 66],
[ 21, 34, 40],
[ 60, 117, 98],
[ 29, 51, 42]]])
self.assertTrue(np.array_equal(expected_result, actual_result))
if __name__ == '__main__':
#unittest.main()
print('hi')
| [
"so.golafshan@gmail.com"
] | so.golafshan@gmail.com |
829f4d1d836ccd7c5ac15928a62f6cda12306d58 | 172d8fdde43fbbbee244bcc9835d6b5cffb08032 | /label/browser.py | a2e70cca1dcca690eb91afab11acec7afccb1dfc | [] | no_license | Rye-Catcher-ZCH/2020-zju-summer-intern | c618121f9e6bda962618909da2bf860e55793aa5 | 01dbbf5944824c9f180945a028a6609ed254b9ba | refs/heads/master | 2022-12-02T04:40:43.322066 | 2020-08-17T13:00:09 | 2020-08-17T13:01:33 | 288,179,836 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 784 | py | # coding=utf-8
import cv2
import h5py
import numpy as np
# 打印某一帧及其后续一系列帧
def show_frame(f_no, f_count, dataset_path):
"""
:param f_no: 帧号
:param f_count: 打印f_no号帧后续f_count帧
"""
i_count = 0
f = h5py.File(dataset_path, 'r')
for i_count in range(0, f_count):
image = f['data'][f_no + i_count]
cv2.imshow('img', image)
cv2.waitKey(500)
return
# f = h5py.File('path/filename.h5','r') #打开h5文件
f = h5py.File('datasets/data3_hog.h5', 'r')
# 查看所有的主键
# print([key for key in f.keys()])
# 查看图片数据
# print(f['data'][:])
# print(f['data'][:].shape)
# print(f['label'][:].shape)
# show_frame(204, 15, 'data/data3.h5')
# show_frame(204, 15, 'data/data3_anno.h5')
| [
"1325904781@qq.com"
] | 1325904781@qq.com |
0825d979fc8b3ae412bea14e56ae9ff2c14ca40d | 292f1401eb15258dff841737851c0cfbcc2542f1 | /contacts/views.py | 6f008700bea0aa17db5428258c13af953c5f451e | [] | no_license | disizmayur/Real-Estate-Management-System | 1913a0e1721d780fe8a9061f23376d034c95e18e | 80abba866f4f487c261946126b0c214a324df76b | refs/heads/master | 2023-01-11T22:11:20.476387 | 2020-11-06T03:08:30 | 2020-11-06T03:08:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 689 | py | from django.shortcuts import render,redirect
from .models import contacts
# Create your views here.
def contacts(request):
if request.method=="POST":
listings=request.POST['listings']
listings_id=request.POST['listings_id']
name=request.POST['name']
email=request.POST['email']
phone=request.POST['phone']
message=request.POST['message']
user_id=request.POST['user_id']
realtor_email=request.POST['realtor_email']
contact=contacts(listings=listings,listings_id=listings_id,name=name,email=email,phone=phone,message=message,user_id=user_id)
contact.save()
return redirect('/listings/'+listings_id) | [
"piyushjoy39@gmail.com"
] | piyushjoy39@gmail.com |
08daf685e289fcd1c3bd571084ec7adee883620e | 7f6038f6348a484fb969a2ee53b4241d4efaa1a3 | /manage.py | 1d8f73dc7947e1be112aa5c2c1c3ff4adef53cc4 | [] | no_license | anilpendela/Trello | 2177d91d79c716cfa7798e3440067a266ae19e36 | f69ff4312ecb07fa6e5047c5f947904573f90956 | refs/heads/main | 2023-02-24T06:43:24.230006 | 2021-01-17T16:48:41 | 2021-01-17T16:48:41 | 330,438,065 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 626 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Trello.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"anilpendela1106@gmail.com"
] | anilpendela1106@gmail.com |
9282adb45348906e6ebc5d527452b890d9b2e5ee | 295d4f5a7de875355ffdf0f3cfd347026df5e4bd | /15.py | 701c2ca5cc0dd39e04d212b1370f266cf440040c | [] | no_license | hehehe47/LeetCode | ef6e229bc49dc08c2cc1f69d7e00da9dc23499b7 | 358181eb0ae96294e6465b0d42bf8374fd18b546 | refs/heads/master | 2020-03-21T05:26:33.484894 | 2020-03-10T04:45:46 | 2020-03-10T04:45:46 | 138,160,829 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 738 | py | def threeSum(nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
# 最简单的 hash记任意两数的和,第二次遍历查找hash
# 问题在于没法去重
# d = {}
# l = []
# for i in range(len(nums)):
# for j in range(i + 1, len(nums)):
# if nums[i] + nums[j] not in d:
# d[nums[i] + nums[j]] = [[nums[i], nums[j]]]
# else:
# d[nums[i] + nums[j]].append([nums[i], nums[j]])
# for i in nums:
# if -i in d.keys():
# for c in d[-i]:
# e = c.copy()
# e.append(i)
# l.append(e)
# return l
print(threeSum([-1, 0, 1, 2, -1, -4]))
d = {1: 2}
enumerate
| [
"yhuichun47@gmail.com"
] | yhuichun47@gmail.com |
999cc1bfad70378ed8cc86c33ab3c8f2386baed2 | 595d58e00851a295d1b040c91d15f7d5c1c9b005 | /ResumeWebsite/urls.py | c55dc7cecfb752a0038d0281d03499f5fe0987af | [] | no_license | HelenaTeixeira/ResumeWebsite | 88113f26fd109d8f0aa762f0d0aaaa9f4717c55e | 54bc332be193671270983b1845f1af97a3e2d929 | refs/heads/master | 2022-12-04T05:15:20.116870 | 2020-08-20T15:01:18 | 2020-08-20T15:01:18 | 282,476,004 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 800 | py | """ResumeWebsite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('base.urls'))
]
| [
"helena.teixeira@tecnico.ulisboa.pt"
] | helena.teixeira@tecnico.ulisboa.pt |
3e2bad2f6238da2bf10a29f88685f5cc08343d91 | fa17a53b00e4b0551ac63a1bcacea60e838eb5eb | /236-lowest-common-ancestor-of-a-binary-tree/236-lowest-common-ancestor-of-a-binary-tree.py | 206e310f567c8951542920439ef59c069ed48fab | [] | no_license | SiddharthaHaldar/leetcodeProblems | 31504d79ae6f6f5e8be4bda48cc117ac022968f0 | c5de8d117942457cddec5b6d9c84cda07b14deed | refs/heads/master | 2022-09-20T10:32:28.653813 | 2022-08-28T03:44:10 | 2022-08-28T03:44:10 | 190,889,035 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 936 | py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def lowestCommonAncestor(self, root: 'TreeNode', p: 'TreeNode', q: 'TreeNode') -> 'TreeNode':
lca = None
def helper(root,p,q):
nonlocal lca
Self = False
left= False
right = False
if(root.val == p.val or root.val == q.val):
Self = True
if(root.left != None):
left = helper(root.left,p,q)
if(lca != None):
return False
if(root.right != None):
right = helper(root.right,p,q)
if((Self and (left or right)) or (left and right)):
lca = root
return False
return Self or left or right
helper(root,p,q)
return lca | [
"sidhaldar98@gmail.com"
] | sidhaldar98@gmail.com |
dc9090d64099cc6c96a32c845434f08547b250f8 | 4bc72996af7eb043f8ed179fd4127c2caf8ca852 | /codalab/bundles/make_bundle.py | ab07b2397837109e6e3ba8ab7fa168b1fa1ef2f8 | [] | no_license | skishore/bundles | 472d5f1a77fd5cac19dd2ddca5dc824efd77a6f9 | 9bde3f2c8355ad087b5e04d6dcbb24e3274149c4 | refs/heads/master | 2020-04-05T22:48:35.038580 | 2014-01-25T19:53:32 | 2014-01-25T19:55:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,893 | py | '''
MakeBundle is a Bundle type that symlinks a number of targets in from other
bundles to produce a new, packaged bundle.
'''
import os
from codalab.bundles.named_bundle import NamedBundle
from codalab.common import (
precondition,
State,
UsageError,
)
from codalab.lib import spec_util
class MakeBundle(NamedBundle):
BUNDLE_TYPE = 'make'
NAME_LENGTH = 8
@classmethod
def construct(cls, targets, metadata):
uuid = spec_util.generate_uuid()
# Check that targets does not include both keyed and anonymous targets.
if len(targets) > 1 and '' in targets:
raise UsageError('Must specify keys when packaging multiple targets!')
# Support anonymous make bundles with names based on their uuid.
if not metadata['name']:
metadata['name'] = 'make-%s' % (uuid[:cls.NAME_LENGTH],)
# List the dependencies of this bundle on its targets.
dependencies = []
for (child_path, (parent, parent_path)) in targets.iteritems():
dependencies.append({
'child_uuid': uuid,
'child_path': child_path,
'parent_uuid': parent.uuid,
'parent_path': parent_path,
})
return super(MakeBundle, cls).construct({
'uuid': uuid,
'bundle_type': cls.BUNDLE_TYPE,
'command': None,
'data_hash': None,
'state': State.CREATED,
'metadata': metadata,
'dependencies': dependencies,
})
def get_hard_dependencies(self):
return self.dependencies
def run(self, bundle_store, parent_dict, temp_dir):
if any(not dep.child_path for dep in self.dependencies):
message = '%s has keyed and anonymous targets!' % (self,),
precondition(len(self.dependencies) == 1, message)
temp_dir = os.path.join(temp_dir, 'anonymous_link')
self.install_dependencies(bundle_store, parent_dict, temp_dir, rel=True)
return bundle_store.upload(temp_dir, allow_symlinks=True)
| [
"skishore@mit.edu"
] | skishore@mit.edu |
b9f866ecbe5dbe3d6ca8a604c63acbefa5ccf299 | aa2e2560a8b3877cd5a87455d951715f31779770 | /rest/post/models.py | c8554492bc434948e51123f95e57b18c8589ba07 | [] | no_license | euntaekseon/api | f27b452850d6d0645d4e0e4c1ade094a8c7b7978 | 9e7363009677ed5d3f9a151d83cda4b1ce8d9f5f | refs/heads/master | 2020-08-30T17:40:18.590876 | 2019-10-30T05:01:36 | 2019-10-30T05:01:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 151 | py | from django.db import models
# Create your models here.
class Post(models.Model):
title = models.CharField(max_length=100)
body = models.TextField() | [
"set1192@naver.com"
] | set1192@naver.com |
588be8d4d204381a005e163489d8519964ea0870 | 260c5e964220bfee484b73efbcb2de5274f118c6 | /Login.py | 99546e376b872b3bd3d852094ee89b8cae5dbbcb | [] | no_license | Ashank-Dsouza/Instagram-Image-Scrapper | b7e9e11db997389b67e622b08a94570a589d3e39 | 7c696e568f316595b47c87118454b0291e5269cb | refs/heads/main | 2023-04-11T16:16:18.347058 | 2021-04-30T15:40:04 | 2021-04-30T15:40:04 | 363,155,370 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 270 | py | import instaloader
from Constants import Constants
class LoginInstagram:
def __init__(self):
pass
def Login():
constants = Constants
L = instaloader.Instaloader()
L.login(constants.USER, constants.PASSWORD)
return L | [
"ashankdsouza054@yahoo.com"
] | ashankdsouza054@yahoo.com |
adb68c6127787b716ec3cb1ef22a39edaa1de884 | 6987647b6487097b90623b5a205e1752966fc91f | /webempresa/services/migrations/0002_rename_services_service.py | cbaacfb1b6aa089795584f9e824e3d902d73f8e7 | [] | no_license | Ramiroacev/web-empresa | fea20e6a8eda8dcdcb4f701adc5e3e50945df593 | e3c9db95a027bb3511ee2953862708c2071c57b2 | refs/heads/main | 2023-08-28T01:52:52.660839 | 2021-11-04T13:15:03 | 2021-11-04T13:15:03 | 424,248,657 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 323 | py | # Generated by Django 3.2.8 on 2021-10-19 13:38
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('services', '0001_initial'),
]
operations = [
migrations.RenameModel(
old_name='Services',
new_name='Service',
),
]
| [
"ramiroacev@gmail.com"
] | ramiroacev@gmail.com |
35f1fed4e1d4d0fee1a1f837c9ca8c352be6b3c1 | 33081717542b8211084aa0def32e0941ef8660e6 | /loginhandle/migrations/0004_mysale.py | 7e9e72279c609c4133897e26c80075145012ddc1 | [] | no_license | Chinmay1812/Restaurant-Management-System | 1df7793a85baad3b0c2280bb3481ad97a584fb37 | e7c600ea634543d044cc57376afb4fbf846f90f0 | refs/heads/main | 2023-07-03T19:58:51.876995 | 2021-08-13T10:38:49 | 2021-08-13T10:38:49 | 395,600,330 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 577 | py | # Generated by Django 3.1.3 on 2020-11-23 17:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('loginhandle', '0003_mylist'),
]
operations = [
migrations.CreateModel(
name='mysale',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('profit', models.IntegerField()),
],
),
]
| [
"noreply@github.com"
] | noreply@github.com |
6e4abc00113d6b561e5acc7b39de395f44ae02c3 | 388ff52dec8f4780a2d1cfd3f07f9228373a6b03 | /0x0A-python-inheritance/6-base_geometry.py | 1f8561cd5c07e6e9c0738b8ac8295dfb5d3a7038 | [] | no_license | dairof7/holbertonschool-higher_level_programming | 6bbbb0eb2f2c13553e63056e0cee0ade7e028afe | 6de0ea30c02a69f9721b4304eb0d48fca626e2df | refs/heads/master | 2023-01-14T09:58:13.327692 | 2020-11-10T16:51:50 | 2020-11-10T16:51:50 | 259,339,091 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 260 | py | #!/usr/bin/python3
""" this module createa class BaseGeometry"""
class BaseGeometry():
"""empty BaseGeometry class"""
pass
def area(self):
"""method area
return a Exception"""
raise Exception("area() is not implemented")
| [
"dairof7@gmail.com"
] | dairof7@gmail.com |
40baafba1ef7c824adaf1d277f15fefe17073e26 | b04477308637c218e9331a14f23965174560c167 | /data_storage/json_indexer.py | b69765fe9845b551935abe092b40203ffc99d3b3 | [] | no_license | madboy/experiments | 2774d038dc3e6b95a3eea17f493cf73c20622611 | 89d50dd4dc50434b8563d1a19d80cadb09c76383 | refs/heads/master | 2021-01-10T21:37:51.041348 | 2015-06-03T12:25:13 | 2015-06-03T12:25:13 | 32,628,196 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 951 | py | #!/usr/bin/env python
import sys
from collections import defaultdict
import json
valid_indices = {'date': 0, 'name': 1, 'certainty': 2, 'message': 3}
idx = defaultdict(list)
def get_key(col_nbr, line):
cols = line.split('\t')
index_key = cols[col_nbr]
return index_key
def indexer(source, index_key):
if index_key in valid_indices:
index_file = "%s_%s.idx" % (index_key, 'jsonindex')
col_nbr = valid_indices[index_key]
with open(index_file, 'wb') as i:
for line in source.readlines():
line = line.strip()
index_key = get_key(col_nbr, line)
idx[index_key].append(line)
json.dump(dict(idx), i)
print("Index has been created")
else:
print("Invalid index key given")
if __name__ == '__main__':
source_file = sys.argv[1]
index = sys.argv[2]
with open(source_file, 'rb') as s:
indexer(s, index)
| [
"k.lindberg@gmail.com"
] | k.lindberg@gmail.com |
32c69ea6b628898d783b4ce873b7a823903debef | 1795dd51c3b2b44bafea76c68f81296a05b98bdf | /shell.py | 7fc7423cdbba8edce48efc946c9396d998c33719 | [] | no_license | artempyanykh/data-zero-to-cloud | e76dec563dcad063df6c97c53aabd4bbcc3dd3d3 | 804baa5524e5b8099ead85acbc033e6058036dd7 | refs/heads/master | 2020-03-20T18:31:23.201549 | 2018-06-24T15:07:41 | 2018-06-28T19:54:02 | 137,591,995 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 964 | py | #!/usr/bin/env python
import os
import IPython
from google.cloud import storage
import ConfigParser
def abspath_join(*args):
return os.path.abspath(os.path.join(os.sep, *args))
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = os.path.abspath(
os.path.join(os.path.sep, os.path.dirname(os.path.abspath(__file__)), 'config/credentials.json'))
config_file = abspath_join(os.path.abspath(__file__), '../config/config.ini')
config = ConfigParser.ConfigParser()
config.read(config_file)
bucket_name = config.get('CLOUD', 'target_dir')[5:]
client = storage.Client()
bucket = client.bucket(bucket_name)
def gls(path=''):
return list(bucket.list_blobs(prefix=path))
def gcat(path):
blob = bucket.get_blob(path)
contents = blob.download_as_string()
for x in contents.split('\n'):
print x
return
ns = {
'client': storage.Client(),
'bucket': bucket,
'gls': gls,
'gcat': gcat
}
IPython.start_ipython(user_ns=ns)
| [
"artem.pyanykh@gmail.com"
] | artem.pyanykh@gmail.com |
98eeeb68733b23d4c1da6ec09a9f857b2e7e3a56 | fb2446e3069ad08fff13ed025dd79afce9413e90 | /dottedline.py | a399853c86e7e5bb6564fc3e70f308c2ef4241e8 | [] | no_license | josenavarro-leadps/class-sample | 91f2ec2d9fdc81294dafe14f846c2020e3681e90 | 11dc46b819f0bef3b81fd4b504ffadf6bbf643c3 | refs/heads/master | 2021-01-21T04:47:17.365230 | 2016-06-13T06:21:49 | 2016-06-13T06:21:49 | 48,134,472 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 300 | py | import turtle
notshawn = turtle.Turtle()
count = 0
notshawn.speed(0)
while count < 20:
notshawn.forward(5)
notshawn.penup()
notshawn.forward(5)
notshawn.pendown()
notshawn.penup()
notshawn.goto(-100, -55)
notshawn.circle(10)
notshawn.pendown()
count = count + 1
turtle.exitonclick()
| [
"rc.navarro.jose@leadps.org"
] | rc.navarro.jose@leadps.org |
dc79004f521b1598dc31d45f964bbd2e5b317f5b | 943776f55bf6d5fc52dc4ad7cce73a307d3ac07d | /blog/mysite/blog/lib/python3.7/_collections_abc.py | a20b39d471a9277c0d7657e38b127baed4e9763e | [] | no_license | Samuel227/Django-blog | 71d0659915b9a71c947dc4095a18a4a02c7ccd01 | 70cce55bd29fb7298e68e03d5d15ed89651396fc | refs/heads/master | 2022-09-11T13:31:48.252976 | 2020-06-04T04:43:36 | 2020-06-04T04:43:36 | 268,402,135 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 64 | py | /Users/samuelopawale/anaconda3/lib/python3.7/_collections_abc.py | [
"samuelopawale@Samuels-MacBook-Air.local"
] | samuelopawale@Samuels-MacBook-Air.local |
c5e01bf596aac0ed1d208a0af718acd8de4776bc | 391fc54a98e7d59cd13f07faf288151222ccef70 | /music_generator/voter.py | 31d7f5758a9025e802f57e95048f6fbf2d569de7 | [] | no_license | BrydonLeonard/MusicalEnsemble | 84bc628f65f5ca04b70e35b565bed5e22344b9d0 | 6f1518588004c83215c59d46a6e4b52c44837627 | refs/heads/master | 2020-03-28T07:35:46.195555 | 2018-10-03T13:25:50 | 2018-10-03T13:25:50 | 147,911,671 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 559 | py | from keras.models import load_model
import numpy as np
from numpy import array
class Voter:
def __init__(self, model_file):
self.model_file = model_file
self.model = load_model(model_file)
def get_next_note_vote(self, previous_notes):
previous_notes_array = array(previous_notes)
previous_notes_array = previous_notes_array.reshape((1, len(previous_notes), 1))
predicted_note = self.model.predict(previous_notes_array, verbose=0)
chosen_note = np.argmax(predicted_note)
return chosen_note
| [
"brydon.leonard@gmail.com"
] | brydon.leonard@gmail.com |
d0d6559f25c598874a8af86466a744693f607a08 | ee5f59f815bb68d1118941680fdbc1902638577d | /fibonacci.py | 3c3b57db736297ce8d67f07e5e2f879fe5763976 | [] | no_license | Ercion/learning_python | abe78e53acacb9c07372ce2f841adad213a7dfc4 | f85cd144e4bd883078a04ea229f3a459e842238d | refs/heads/master | 2021-07-07T21:34:02.504537 | 2020-07-09T11:29:00 | 2020-07-09T11:29:00 | 140,273,073 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 453 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Mar 31 17:51:13 2020
@author: Ercan Karaçelik
"""
def fibonacci(n):
'''
Parameters
----------
n : number of fibonacci numbers
Returns
-------
fib_list : this list will keep fibonacci numbers
'''
a=0
b=1
fib_list=[]
for i in range(n):
a,b=b,a+b
fib_list.append(a)
return fib_list
print(fibonacci(15))
| [
"noreply@github.com"
] | noreply@github.com |
56514e3944ccbabdddd6b06f15a87ab47d7b8c4c | e30628292d284676300ec9a6c98dd50e5a8f8274 | /preprocessing/__init__.py | 1ee8c7f936b9fa26219c6a5f108b53715dc5b9e2 | [] | no_license | sasidhar-danturti/LoanPrediction-PP | 51d7d2ec232c9d4a626367ead8625364b2c69e1f | 492022e9fef00d4a63acae3c59e6450d31dc334a | refs/heads/master | 2023-07-10T16:49:58.865444 | 2023-06-22T04:45:34 | 2023-06-22T04:45:34 | 103,243,048 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24 | py | __author__ = 'synerzip'
| [
"sasidhar.danturti@synerzip.com"
] | sasidhar.danturti@synerzip.com |
7d3fde787ddd4ec5bd99d7b8c05cfb2be39f464f | 96f3b98226bad41fb15ca622a1057a88af85bf23 | /saleor/payment/gateways/dummy_credit_card/plugin.py | 99b9f9806c28d5033a24b663485ae9561908bd77 | [
"CC-BY-4.0",
"BSD-3-Clause"
] | permissive | Hispar/saleor | f14b6530ab9afda28cd2cf5141c964d07b8bed6f | c27a0cc85c037820efccefe8a9d6e5460bf21df3 | refs/heads/master | 2022-11-28T09:16:58.542242 | 2020-07-22T20:33:34 | 2020-07-22T20:33:34 | 281,746,209 | 0 | 0 | NOASSERTION | 2020-07-22T17:51:41 | 2020-07-22T17:51:40 | null | UTF-8 | Python | false | false | 4,258 | py | from typing import TYPE_CHECKING
from django.conf import settings
from saleor.plugins.base_plugin import BasePlugin, ConfigurationTypeField
from ..utils import get_supported_currencies
from . import (
GatewayConfig,
authorize,
capture,
confirm,
get_client_token,
process_payment,
refund,
void,
)
GATEWAY_NAME = "Dummy Credit Card"
if TYPE_CHECKING:
from ...interface import GatewayResponse, PaymentData, TokenConfig
def require_active_plugin(fn):
def wrapped(self, *args, **kwargs):
previous = kwargs.get("previous_value", None)
if not self.active:
return previous
return fn(self, *args, **kwargs)
return wrapped
class DummyCreditCardGatewayPlugin(BasePlugin):
PLUGIN_ID = "mirumee.payments.dummy_credit_card"
PLUGIN_NAME = GATEWAY_NAME
DEFAULT_ACTIVE = True
DEFAULT_CONFIGURATION = [
{"name": "Store customers card", "value": False},
{"name": "Automatic payment capture", "value": True},
{"name": "Supported currencies", "value": settings.DEFAULT_CURRENCY},
]
CONFIG_STRUCTURE = {
"Store customers card": {
"type": ConfigurationTypeField.BOOLEAN,
"help_text": "Determines if Saleor should store cards.",
"label": "Store customers card",
},
"Automatic payment capture": {
"type": ConfigurationTypeField.BOOLEAN,
"help_text": "Determines if Saleor should automaticaly capture payments.",
"label": "Automatic payment capture",
},
"Supported currencies": {
"type": ConfigurationTypeField.STRING,
"help_text": "Determines currencies supported by gateway."
" Please enter currency codes separated by a comma.",
"label": "Supported currencies",
},
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
configuration = {item["name"]: item["value"] for item in self.configuration}
self.config = GatewayConfig(
gateway_name=GATEWAY_NAME,
auto_capture=configuration["Automatic payment capture"],
supported_currencies=configuration["Supported currencies"],
connection_params={},
store_customer=configuration["Store customers card"],
)
def _get_gateway_config(self):
return self.config
@require_active_plugin
def authorize_payment(
self, payment_information: "PaymentData", previous_value
) -> "GatewayResponse":
return authorize(payment_information, self._get_gateway_config())
@require_active_plugin
def capture_payment(
self, payment_information: "PaymentData", previous_value
) -> "GatewayResponse":
return capture(payment_information, self._get_gateway_config())
@require_active_plugin
def confirm_payment(
self, payment_information: "PaymentData", previous_value
) -> "GatewayResponse":
return confirm(payment_information, self._get_gateway_config())
@require_active_plugin
def refund_payment(
self, payment_information: "PaymentData", previous_value
) -> "GatewayResponse":
return refund(payment_information, self._get_gateway_config())
@require_active_plugin
def void_payment(
self, payment_information: "PaymentData", previous_value
) -> "GatewayResponse":
return void(payment_information, self._get_gateway_config())
@require_active_plugin
def process_payment(
self, payment_information: "PaymentData", previous_value
) -> "GatewayResponse":
return process_payment(payment_information, self._get_gateway_config())
@require_active_plugin
def get_client_token(self, token_config: "TokenConfig", previous_value):
return get_client_token()
@require_active_plugin
def get_supported_currencies(self, previous_value):
config = self._get_gateway_config()
return get_supported_currencies(config, GATEWAY_NAME)
@require_active_plugin
def get_payment_config(self, previous_value):
config = self._get_gateway_config()
return [{"field": "store_customer_card", "value": config.store_customer}]
| [
"iga.karbowiak@mirumee.com"
] | iga.karbowiak@mirumee.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.