max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
|---|---|---|---|---|---|---|---|---|---|---|
python/streambar/__init__.py
|
mvdwerve/streambar
| 3
|
6629551
|
from _streambar import *
|
from _streambar import *
|
none
| 1
| 1.106714
| 1
|
|
perses/tests/test_relative_point_mutation_setup.py
|
schallerdavid/perses
| 99
|
6629552
|
<filename>perses/tests/test_relative_point_mutation_setup.py
def test_PointMutationExecutor():
from pkg_resources import resource_filename
from simtk import unit
from perses.app.relative_point_mutation_setup import PointMutationExecutor
pdb_filename = resource_filename("perses", "data/ala_vacuum.pdb")
PointMutationExecutor(
pdb_filename,
"1",
"2",
"ASP",
ionic_strength=0.15 * unit.molar,
flatten_torsions=True,
flatten_exceptions=True,
conduct_endstate_validation=False,
)
def test_PointMutationExecutor_endstate_validation():
from pkg_resources import resource_filename
from simtk import unit
from perses.app.relative_point_mutation_setup import PointMutationExecutor
pdb_filename = resource_filename("perses", "data/ala_vacuum.pdb")
PointMutationExecutor(
pdb_filename,
"1",
"2",
"ASP",
ionic_strength=0.15 * unit.molar,
flatten_torsions=False,
flatten_exceptions=False,
conduct_endstate_validation=True,
)
|
<filename>perses/tests/test_relative_point_mutation_setup.py
def test_PointMutationExecutor():
from pkg_resources import resource_filename
from simtk import unit
from perses.app.relative_point_mutation_setup import PointMutationExecutor
pdb_filename = resource_filename("perses", "data/ala_vacuum.pdb")
PointMutationExecutor(
pdb_filename,
"1",
"2",
"ASP",
ionic_strength=0.15 * unit.molar,
flatten_torsions=True,
flatten_exceptions=True,
conduct_endstate_validation=False,
)
def test_PointMutationExecutor_endstate_validation():
from pkg_resources import resource_filename
from simtk import unit
from perses.app.relative_point_mutation_setup import PointMutationExecutor
pdb_filename = resource_filename("perses", "data/ala_vacuum.pdb")
PointMutationExecutor(
pdb_filename,
"1",
"2",
"ASP",
ionic_strength=0.15 * unit.molar,
flatten_torsions=False,
flatten_exceptions=False,
conduct_endstate_validation=True,
)
|
none
| 1
| 2.062923
| 2
|
|
LeetCode/1299.replace-elements-with-greatest-element-on-right-side.py
|
tushar-1728/Coding
| 0
|
6629553
|
<gh_stars>0
#
# @lc app=leetcode id=1299 lang=python3
#
# [1299] Replace Elements with Greatest Element on Right Side
#
# @lc code=start
from typing import List
class Solution:
def replaceElements(self, arr: List[int]) -> List[int]:
n = len(arr)
if n >= 1:
mval = arr[-1]
arr[-1] = -1
for i in range(n-2, -1, -1):
temp = arr[i]
arr[i] = mval
if temp > mval:
mval = temp
return arr
# @lc code=end
|
#
# @lc app=leetcode id=1299 lang=python3
#
# [1299] Replace Elements with Greatest Element on Right Side
#
# @lc code=start
from typing import List
class Solution:
def replaceElements(self, arr: List[int]) -> List[int]:
n = len(arr)
if n >= 1:
mval = arr[-1]
arr[-1] = -1
for i in range(n-2, -1, -1):
temp = arr[i]
arr[i] = mval
if temp > mval:
mval = temp
return arr
# @lc code=end
|
en
| 0.439674
|
# # @lc app=leetcode id=1299 lang=python3 # # [1299] Replace Elements with Greatest Element on Right Side # # @lc code=start # @lc code=end
| 3.515955
| 4
|
elastalert/alerter/jira_alerter.py
|
JasperJuergensen/elastalert
| 2
|
6629554
|
import datetime
import logging
import sys
from elastalert.alerter import Alerter
from elastalert.alerter.match_string import JiraFormattedMatchString
from elastalert.exceptions import EAException
from elastalert.utils.time import pretty_ts, ts_now, ts_to_dt
from elastalert.utils.util import lookup_es_key
from jira.client import JIRA
from jira.exceptions import JIRAError
log = logging.getLogger(__name__)
class JiraAlerter(Alerter):
""" Creates a Jira ticket for each alert """
required_options = frozenset(
["jira_server", "jira_account_file", "jira_project", "jira_issuetype"]
)
# Maintain a static set of built-in fields that we explicitly know how to set
# For anything else, we will do best-effort and try to set a string value
known_field_list = [
"jira_account_file",
"jira_assignee",
"jira_bump_after_inactivity",
"jira_bump_in_statuses",
"jira_bump_not_in_statuses",
"jira_bump_only",
"jira_bump_tickets",
"jira_component",
"jira_components",
"jira_description",
"jira_ignore_in_title",
"jira_issuetype",
"jira_label",
"jira_labels",
"jira_max_age",
"jira_priority",
"jira_project",
"jira_server",
"jira_transition_to",
"jira_watchers",
]
# Some built-in jira types that can be used as custom fields require special handling
# Here is a sample of one of them:
# {"id":"customfield_12807","name":"My Custom Field","custom":true,"orderable":true,
# "navigable":true,"searchable":true,
# "clauseNames":["cf[12807]","My Custom Field"],"schema":{"type":"array","items":"string",
# "custom":"com.atlassian.jira.plugin.system.customfieldtypes:multiselect","customId":12807}}
# There are likely others that will need to be updated on a case-by-case basis
custom_string_types_with_special_handling = [
"com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes",
"com.atlassian.jira.plugin.system.customfieldtypes:multiselect",
"com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons",
]
def __init__(self, rule):
super(JiraAlerter, self).__init__(rule)
self.server = self.rule["jira_server"]
self.get_account(self.rule["jira_account_file"])
self.project = self.rule["jira_project"]
self.issue_type = self.rule["jira_issuetype"]
# Deferred settings refer to values that can only be resolved when a match
# is found and as such loading them will be delayed until we find a match
self.deferred_settings = []
# We used to support only a single component. This allows us to maintain backwards compatibility
# while also giving the user-facing API a more representative name
self.components = self.rule.get(
"jira_components", self.rule.get("jira_component")
)
# We used to support only a single label. This allows us to maintain backwards compatibility
# while also giving the user-facing API a more representative name
self.labels = self.rule.get("jira_labels", self.rule.get("jira_label"))
self.description = self.rule.get("jira_description", "")
self.assignee = self.rule.get("jira_assignee")
self.max_age = self.rule.get("jira_max_age", 30)
self.priority = self.rule.get("jira_priority")
self.bump_tickets = self.rule.get("jira_bump_tickets", False)
self.bump_not_in_statuses = self.rule.get("jira_bump_not_in_statuses")
self.bump_in_statuses = self.rule.get("jira_bump_in_statuses")
self.bump_after_inactivity = self.rule.get("jira_bump_after_inactivity", 0)
self.bump_only = self.rule.get("jira_bump_only", False)
self.transition = self.rule.get("jira_transition_to", False)
self.watchers = self.rule.get("jira_watchers")
self.client = None
if self.bump_in_statuses and self.bump_not_in_statuses:
msg = (
"Both jira_bump_in_statuses (%s) and jira_bump_not_in_statuses (%s) are set."
% (",".join(self.bump_in_statuses), ",".join(self.bump_not_in_statuses))
)
intersection = list(set(self.bump_in_statuses) & set(self.bump_in_statuses))
if intersection:
msg = (
"%s Both have common statuses of (%s). As such, no tickets will ever be found."
% (msg, ",".join(intersection))
)
msg += " This should be simplified to use only one or the other."
log.warning(msg)
self.reset_jira_args()
try:
self.client = JIRA(self.server, basic_auth=(self.user, self.password))
self.get_priorities()
self.jira_fields = self.client.fields()
self.get_arbitrary_fields()
except JIRAError as e:
# JIRAError may contain HTML, pass along only first 1024 chars
raise EAException(
"Error connecting to JIRA: %s" % (str(e)[:1024])
).with_traceback(sys.exc_info()[2])
self.set_priority()
def set_priority(self):
try:
if self.priority is not None and self.client is not None:
self.jira_args["priority"] = {"id": self.priority_ids[self.priority]}
except KeyError:
log.error(
"Priority %s not found. Valid priorities are %s"
% (self.priority, list(self.priority_ids.keys()))
)
def reset_jira_args(self):
self.jira_args = {
"project": {"key": self.project},
"issuetype": {"name": self.issue_type},
}
if self.components:
# Support single component or list
if type(self.components) != list:
self.jira_args["components"] = [{"name": self.components}]
else:
self.jira_args["components"] = [
{"name": component} for component in self.components
]
if self.labels:
# Support single label or list
if type(self.labels) != list:
self.labels = [self.labels]
self.jira_args["labels"] = self.labels
if self.watchers:
# Support single watcher or list
if type(self.watchers) != list:
self.watchers = [self.watchers]
if self.assignee:
self.jira_args["assignee"] = {"name": self.assignee}
self.set_priority()
def set_jira_arg(self, jira_field, value, fields):
# Remove the jira_ part. Convert underscores to spaces
normalized_jira_field = jira_field[5:].replace("_", " ").lower()
# All jira fields should be found in the 'id' or the 'name' field. Therefore, try both just in case
for identifier in ["name", "id"]:
field = next(
(
f
for f in fields
if normalized_jira_field == f[identifier].replace("_", " ").lower()
),
None,
)
if field:
break
if not field:
# Log a warning to ElastAlert saying that we couldn't find that type?
# OR raise and fail to load the alert entirely? Probably the latter...
raise Exception(
"Could not find a definition for the jira field '{0}'".format(
normalized_jira_field
)
)
arg_name = field["id"]
# Check the schema information to decide how to set the value correctly
# If the schema information is not available, raise an exception since we don't know how to set it
# Note this is only the case for two built-in types, id: issuekey and id: thumbnail
if not ("schema" in field or "type" in field["schema"]):
raise Exception(
"Could not determine schema information for the jira field '{0}'".format(
normalized_jira_field
)
)
arg_type = field["schema"]["type"]
# Handle arrays of simple types like strings or numbers
if arg_type == "array":
# As a convenience, support the scenario wherein the user only provides
# a single value for a multi-value field e.g. jira_labels: Only_One_Label
if type(value) != list:
value = [value]
array_items = field["schema"]["items"]
# Simple string types
if array_items in ["string", "date", "datetime"]:
# Special case for multi-select custom types (the JIRA metadata says that these are strings, but
# in reality, they are required to be provided as an object.
if (
"custom" in field["schema"]
and field["schema"]["custom"]
in self.custom_string_types_with_special_handling
):
self.jira_args[arg_name] = [{"value": v} for v in value]
else:
self.jira_args[arg_name] = value
elif array_items == "number":
self.jira_args[arg_name] = [int(v) for v in value]
# Also attempt to handle arrays of complex types that have to be passed as objects with an identifier 'key'
elif array_items == "option":
self.jira_args[arg_name] = [{"value": v} for v in value]
else:
# Try setting it as an object, using 'name' as the key
# This may not work, as the key might actually be 'key', 'id', 'value', or something else
# If it works, great! If not, it will manifest itself as an API error that will bubble up
self.jira_args[arg_name] = [{"name": v} for v in value]
# Handle non-array types
else:
# Simple string types
if arg_type in ["string", "date", "datetime"]:
# Special case for custom types (the JIRA metadata says that these are strings, but
# in reality, they are required to be provided as an object.
if (
"custom" in field["schema"]
and field["schema"]["custom"]
in self.custom_string_types_with_special_handling
):
self.jira_args[arg_name] = {"value": value}
else:
self.jira_args[arg_name] = value
# Number type
elif arg_type == "number":
self.jira_args[arg_name] = int(value)
elif arg_type == "option":
self.jira_args[arg_name] = {"value": value}
# Complex type
else:
self.jira_args[arg_name] = {"name": value}
def get_arbitrary_fields(self):
# Clear jira_args
self.reset_jira_args()
for jira_field, value in self.rule.items():
# If we find a field that is not covered by the set that we are aware of, it means it is either:
# 1. A built-in supported field in JIRA that we don't have on our radar
# 2. A custom field that a JIRA admin has configured
if (
jira_field.startswith("jira_")
and jira_field not in self.known_field_list
and str(value)[:1] != "#"
):
self.set_jira_arg(jira_field, value, self.jira_fields)
if (
jira_field.startswith("jira_")
and jira_field not in self.known_field_list
and str(value)[:1] == "#"
):
self.deferred_settings.append(jira_field)
def get_priorities(self):
""" Creates a mapping of priority index to id. """
priorities = self.client.priorities()
self.priority_ids = {}
for x in range(len(priorities)):
self.priority_ids[x] = priorities[x].id
def set_assignee(self, assignee):
self.assignee = assignee
if assignee:
self.jira_args["assignee"] = {"name": assignee}
elif "assignee" in self.jira_args:
self.jira_args.pop("assignee")
def find_existing_ticket(self, matches):
# Default title, get stripped search version
if "alert_subject" not in self.rule:
title = self.create_default_title(matches, True)
else:
title = self.create_title(matches)
if "jira_ignore_in_title" in self.rule:
title = title.replace(
matches[0].get(self.rule["jira_ignore_in_title"], ""), ""
)
# This is necessary for search to work. Other special characters and dashes
# directly adjacent to words appear to be ok
title = title.replace(" - ", " ")
title = title.replace("\\", "\\\\")
date = (
datetime.datetime.now() - datetime.timedelta(days=self.max_age)
).strftime("%Y-%m-%d")
jql = 'project=%s AND summary~"%s" and created >= "%s"' % (
self.project,
title,
date,
)
if self.bump_in_statuses:
jql = "%s and status in (%s)" % (
jql,
",".join(
[
'"%s"' % status if " " in status else status
for status in self.bump_in_statuses
]
),
)
if self.bump_not_in_statuses:
jql = "%s and status not in (%s)" % (
jql,
",".join(
[
'"%s"' % status if " " in status else status
for status in self.bump_not_in_statuses
]
),
)
try:
issues = self.client.search_issues(jql)
except JIRAError as e:
log.exception(
"Error while searching for JIRA ticket using jql '%s': %s" % (jql, e)
)
return None
if len(issues):
return issues[0]
def comment_on_ticket(self, ticket, match):
text = str(JiraFormattedMatchString(self.rule, match))
timestamp = pretty_ts(lookup_es_key(match, self.rule["timestamp_field"]))
comment = "This alert was triggered again at %s\n%s" % (timestamp, text)
self.client.add_comment(ticket, comment)
def transition_ticket(self, ticket):
transitions = self.client.transitions(ticket)
for t in transitions:
if t["name"] == self.transition:
self.client.transition_issue(ticket, t["id"])
def alert(self, matches):
# Reset arbitrary fields to pick up changes
self.get_arbitrary_fields()
if len(self.deferred_settings) > 0:
fields = self.client.fields()
for jira_field in self.deferred_settings:
value = lookup_es_key(matches[0], self.rule[jira_field][1:])
self.set_jira_arg(jira_field, value, fields)
title = self.create_title(matches)
if self.bump_tickets:
ticket = self.find_existing_ticket(matches)
if ticket:
inactivity_datetime = ts_now() - datetime.timedelta(
days=self.bump_after_inactivity
)
if ts_to_dt(ticket.fields.updated) >= inactivity_datetime:
if self.pipeline is not None:
self.pipeline["jira_ticket"] = None
self.pipeline["jira_server"] = self.server
return None
log.info("Commenting on existing ticket %s" % (ticket.key))
for match in matches:
try:
self.comment_on_ticket(ticket, match)
except JIRAError as e:
log.exception(
"Error while commenting on ticket %s: %s" % (ticket, e)
)
if self.labels:
for lbl in self.labels:
try:
ticket.fields.labels.append(lbl)
except JIRAError as e:
log.exception(
"Error while appending labels to ticket %s: %s"
% (ticket, e)
)
if self.transition:
log.info("Transitioning existing ticket %s" % (ticket.key))
try:
self.transition_ticket(ticket)
except JIRAError as e:
log.exception(
"Error while transitioning ticket %s: %s" % (ticket, e)
)
if self.pipeline is not None:
self.pipeline["jira_ticket"] = ticket
self.pipeline["jira_server"] = self.server
return None
if self.bump_only:
return None
self.jira_args["summary"] = title
self.jira_args["description"] = self.create_alert_body(matches)
try:
self.issue = self.client.create_issue(**self.jira_args)
# You can not add watchers on initial creation. Only as a follow-up action
if self.watchers:
for watcher in self.watchers:
try:
self.client.add_watcher(self.issue.key, watcher)
except Exception as ex:
# Re-raise the exception, preserve the stack-trace, and give some
# context as to which watcher failed to be added
raise Exception(
"Exception encountered when trying to add '{0}' as a watcher. "
"Does the user exist?\n{1}".format(watcher, ex)
).with_traceback(sys.exc_info()[2])
except JIRAError as e:
raise EAException(
"Error creating JIRA ticket using jira_args (%s): %s"
% (self.jira_args, e)
)
log.info("Opened Jira ticket: %s" % (self.issue))
if self.pipeline is not None:
self.pipeline["jira_ticket"] = self.issue
self.pipeline["jira_server"] = self.server
def create_alert_body(self, matches):
body = self.description + "\n"
body += self.get_aggregation_summary_text(matches)
if self.rule.get("alert_text_type") != "aggregation_summary_only":
for match in matches:
body += str(JiraFormattedMatchString(self.rule, match))
if len(matches) > 1:
body += "\n----------------------------------------\n"
return body
def get_aggregation_summary_text(self, matches):
text = super(JiraAlerter, self).get_aggregation_summary_text(matches)
if text:
text = "{{noformat}}{0}{{noformat}}".format(text)
return text
def create_default_title(self, matches, for_search=False):
# If there is a query_key, use that in the title
if "query_key" in self.rule and lookup_es_key(
matches[0], self.rule["query_key"]
):
title = "ElastAlert: %s matched %s" % (
lookup_es_key(matches[0], self.rule["query_key"]),
self.rule["name"],
)
else:
title = "ElastAlert: %s" % (self.rule["name"])
if for_search:
return title
title += " - %s" % (
pretty_ts(
matches[0][self.rule["timestamp_field"]],
self.rule.get("use_local_time"),
)
)
# Add count for spikes
count = matches[0].get("spike_count")
if count:
title += " - %s+ events" % (count)
return title
def get_info(self):
return {"type": "jira"}
|
import datetime
import logging
import sys
from elastalert.alerter import Alerter
from elastalert.alerter.match_string import JiraFormattedMatchString
from elastalert.exceptions import EAException
from elastalert.utils.time import pretty_ts, ts_now, ts_to_dt
from elastalert.utils.util import lookup_es_key
from jira.client import JIRA
from jira.exceptions import JIRAError
log = logging.getLogger(__name__)
class JiraAlerter(Alerter):
""" Creates a Jira ticket for each alert """
required_options = frozenset(
["jira_server", "jira_account_file", "jira_project", "jira_issuetype"]
)
# Maintain a static set of built-in fields that we explicitly know how to set
# For anything else, we will do best-effort and try to set a string value
known_field_list = [
"jira_account_file",
"jira_assignee",
"jira_bump_after_inactivity",
"jira_bump_in_statuses",
"jira_bump_not_in_statuses",
"jira_bump_only",
"jira_bump_tickets",
"jira_component",
"jira_components",
"jira_description",
"jira_ignore_in_title",
"jira_issuetype",
"jira_label",
"jira_labels",
"jira_max_age",
"jira_priority",
"jira_project",
"jira_server",
"jira_transition_to",
"jira_watchers",
]
# Some built-in jira types that can be used as custom fields require special handling
# Here is a sample of one of them:
# {"id":"customfield_12807","name":"My Custom Field","custom":true,"orderable":true,
# "navigable":true,"searchable":true,
# "clauseNames":["cf[12807]","My Custom Field"],"schema":{"type":"array","items":"string",
# "custom":"com.atlassian.jira.plugin.system.customfieldtypes:multiselect","customId":12807}}
# There are likely others that will need to be updated on a case-by-case basis
custom_string_types_with_special_handling = [
"com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes",
"com.atlassian.jira.plugin.system.customfieldtypes:multiselect",
"com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons",
]
def __init__(self, rule):
super(JiraAlerter, self).__init__(rule)
self.server = self.rule["jira_server"]
self.get_account(self.rule["jira_account_file"])
self.project = self.rule["jira_project"]
self.issue_type = self.rule["jira_issuetype"]
# Deferred settings refer to values that can only be resolved when a match
# is found and as such loading them will be delayed until we find a match
self.deferred_settings = []
# We used to support only a single component. This allows us to maintain backwards compatibility
# while also giving the user-facing API a more representative name
self.components = self.rule.get(
"jira_components", self.rule.get("jira_component")
)
# We used to support only a single label. This allows us to maintain backwards compatibility
# while also giving the user-facing API a more representative name
self.labels = self.rule.get("jira_labels", self.rule.get("jira_label"))
self.description = self.rule.get("jira_description", "")
self.assignee = self.rule.get("jira_assignee")
self.max_age = self.rule.get("jira_max_age", 30)
self.priority = self.rule.get("jira_priority")
self.bump_tickets = self.rule.get("jira_bump_tickets", False)
self.bump_not_in_statuses = self.rule.get("jira_bump_not_in_statuses")
self.bump_in_statuses = self.rule.get("jira_bump_in_statuses")
self.bump_after_inactivity = self.rule.get("jira_bump_after_inactivity", 0)
self.bump_only = self.rule.get("jira_bump_only", False)
self.transition = self.rule.get("jira_transition_to", False)
self.watchers = self.rule.get("jira_watchers")
self.client = None
if self.bump_in_statuses and self.bump_not_in_statuses:
msg = (
"Both jira_bump_in_statuses (%s) and jira_bump_not_in_statuses (%s) are set."
% (",".join(self.bump_in_statuses), ",".join(self.bump_not_in_statuses))
)
intersection = list(set(self.bump_in_statuses) & set(self.bump_in_statuses))
if intersection:
msg = (
"%s Both have common statuses of (%s). As such, no tickets will ever be found."
% (msg, ",".join(intersection))
)
msg += " This should be simplified to use only one or the other."
log.warning(msg)
self.reset_jira_args()
try:
self.client = JIRA(self.server, basic_auth=(self.user, self.password))
self.get_priorities()
self.jira_fields = self.client.fields()
self.get_arbitrary_fields()
except JIRAError as e:
# JIRAError may contain HTML, pass along only first 1024 chars
raise EAException(
"Error connecting to JIRA: %s" % (str(e)[:1024])
).with_traceback(sys.exc_info()[2])
self.set_priority()
def set_priority(self):
try:
if self.priority is not None and self.client is not None:
self.jira_args["priority"] = {"id": self.priority_ids[self.priority]}
except KeyError:
log.error(
"Priority %s not found. Valid priorities are %s"
% (self.priority, list(self.priority_ids.keys()))
)
def reset_jira_args(self):
self.jira_args = {
"project": {"key": self.project},
"issuetype": {"name": self.issue_type},
}
if self.components:
# Support single component or list
if type(self.components) != list:
self.jira_args["components"] = [{"name": self.components}]
else:
self.jira_args["components"] = [
{"name": component} for component in self.components
]
if self.labels:
# Support single label or list
if type(self.labels) != list:
self.labels = [self.labels]
self.jira_args["labels"] = self.labels
if self.watchers:
# Support single watcher or list
if type(self.watchers) != list:
self.watchers = [self.watchers]
if self.assignee:
self.jira_args["assignee"] = {"name": self.assignee}
self.set_priority()
def set_jira_arg(self, jira_field, value, fields):
# Remove the jira_ part. Convert underscores to spaces
normalized_jira_field = jira_field[5:].replace("_", " ").lower()
# All jira fields should be found in the 'id' or the 'name' field. Therefore, try both just in case
for identifier in ["name", "id"]:
field = next(
(
f
for f in fields
if normalized_jira_field == f[identifier].replace("_", " ").lower()
),
None,
)
if field:
break
if not field:
# Log a warning to ElastAlert saying that we couldn't find that type?
# OR raise and fail to load the alert entirely? Probably the latter...
raise Exception(
"Could not find a definition for the jira field '{0}'".format(
normalized_jira_field
)
)
arg_name = field["id"]
# Check the schema information to decide how to set the value correctly
# If the schema information is not available, raise an exception since we don't know how to set it
# Note this is only the case for two built-in types, id: issuekey and id: thumbnail
if not ("schema" in field or "type" in field["schema"]):
raise Exception(
"Could not determine schema information for the jira field '{0}'".format(
normalized_jira_field
)
)
arg_type = field["schema"]["type"]
# Handle arrays of simple types like strings or numbers
if arg_type == "array":
# As a convenience, support the scenario wherein the user only provides
# a single value for a multi-value field e.g. jira_labels: Only_One_Label
if type(value) != list:
value = [value]
array_items = field["schema"]["items"]
# Simple string types
if array_items in ["string", "date", "datetime"]:
# Special case for multi-select custom types (the JIRA metadata says that these are strings, but
# in reality, they are required to be provided as an object.
if (
"custom" in field["schema"]
and field["schema"]["custom"]
in self.custom_string_types_with_special_handling
):
self.jira_args[arg_name] = [{"value": v} for v in value]
else:
self.jira_args[arg_name] = value
elif array_items == "number":
self.jira_args[arg_name] = [int(v) for v in value]
# Also attempt to handle arrays of complex types that have to be passed as objects with an identifier 'key'
elif array_items == "option":
self.jira_args[arg_name] = [{"value": v} for v in value]
else:
# Try setting it as an object, using 'name' as the key
# This may not work, as the key might actually be 'key', 'id', 'value', or something else
# If it works, great! If not, it will manifest itself as an API error that will bubble up
self.jira_args[arg_name] = [{"name": v} for v in value]
# Handle non-array types
else:
# Simple string types
if arg_type in ["string", "date", "datetime"]:
# Special case for custom types (the JIRA metadata says that these are strings, but
# in reality, they are required to be provided as an object.
if (
"custom" in field["schema"]
and field["schema"]["custom"]
in self.custom_string_types_with_special_handling
):
self.jira_args[arg_name] = {"value": value}
else:
self.jira_args[arg_name] = value
# Number type
elif arg_type == "number":
self.jira_args[arg_name] = int(value)
elif arg_type == "option":
self.jira_args[arg_name] = {"value": value}
# Complex type
else:
self.jira_args[arg_name] = {"name": value}
def get_arbitrary_fields(self):
# Clear jira_args
self.reset_jira_args()
for jira_field, value in self.rule.items():
# If we find a field that is not covered by the set that we are aware of, it means it is either:
# 1. A built-in supported field in JIRA that we don't have on our radar
# 2. A custom field that a JIRA admin has configured
if (
jira_field.startswith("jira_")
and jira_field not in self.known_field_list
and str(value)[:1] != "#"
):
self.set_jira_arg(jira_field, value, self.jira_fields)
if (
jira_field.startswith("jira_")
and jira_field not in self.known_field_list
and str(value)[:1] == "#"
):
self.deferred_settings.append(jira_field)
def get_priorities(self):
""" Creates a mapping of priority index to id. """
priorities = self.client.priorities()
self.priority_ids = {}
for x in range(len(priorities)):
self.priority_ids[x] = priorities[x].id
def set_assignee(self, assignee):
self.assignee = assignee
if assignee:
self.jira_args["assignee"] = {"name": assignee}
elif "assignee" in self.jira_args:
self.jira_args.pop("assignee")
def find_existing_ticket(self, matches):
# Default title, get stripped search version
if "alert_subject" not in self.rule:
title = self.create_default_title(matches, True)
else:
title = self.create_title(matches)
if "jira_ignore_in_title" in self.rule:
title = title.replace(
matches[0].get(self.rule["jira_ignore_in_title"], ""), ""
)
# This is necessary for search to work. Other special characters and dashes
# directly adjacent to words appear to be ok
title = title.replace(" - ", " ")
title = title.replace("\\", "\\\\")
date = (
datetime.datetime.now() - datetime.timedelta(days=self.max_age)
).strftime("%Y-%m-%d")
jql = 'project=%s AND summary~"%s" and created >= "%s"' % (
self.project,
title,
date,
)
if self.bump_in_statuses:
jql = "%s and status in (%s)" % (
jql,
",".join(
[
'"%s"' % status if " " in status else status
for status in self.bump_in_statuses
]
),
)
if self.bump_not_in_statuses:
jql = "%s and status not in (%s)" % (
jql,
",".join(
[
'"%s"' % status if " " in status else status
for status in self.bump_not_in_statuses
]
),
)
try:
issues = self.client.search_issues(jql)
except JIRAError as e:
log.exception(
"Error while searching for JIRA ticket using jql '%s': %s" % (jql, e)
)
return None
if len(issues):
return issues[0]
def comment_on_ticket(self, ticket, match):
text = str(JiraFormattedMatchString(self.rule, match))
timestamp = pretty_ts(lookup_es_key(match, self.rule["timestamp_field"]))
comment = "This alert was triggered again at %s\n%s" % (timestamp, text)
self.client.add_comment(ticket, comment)
def transition_ticket(self, ticket):
transitions = self.client.transitions(ticket)
for t in transitions:
if t["name"] == self.transition:
self.client.transition_issue(ticket, t["id"])
def alert(self, matches):
# Reset arbitrary fields to pick up changes
self.get_arbitrary_fields()
if len(self.deferred_settings) > 0:
fields = self.client.fields()
for jira_field in self.deferred_settings:
value = lookup_es_key(matches[0], self.rule[jira_field][1:])
self.set_jira_arg(jira_field, value, fields)
title = self.create_title(matches)
if self.bump_tickets:
ticket = self.find_existing_ticket(matches)
if ticket:
inactivity_datetime = ts_now() - datetime.timedelta(
days=self.bump_after_inactivity
)
if ts_to_dt(ticket.fields.updated) >= inactivity_datetime:
if self.pipeline is not None:
self.pipeline["jira_ticket"] = None
self.pipeline["jira_server"] = self.server
return None
log.info("Commenting on existing ticket %s" % (ticket.key))
for match in matches:
try:
self.comment_on_ticket(ticket, match)
except JIRAError as e:
log.exception(
"Error while commenting on ticket %s: %s" % (ticket, e)
)
if self.labels:
for lbl in self.labels:
try:
ticket.fields.labels.append(lbl)
except JIRAError as e:
log.exception(
"Error while appending labels to ticket %s: %s"
% (ticket, e)
)
if self.transition:
log.info("Transitioning existing ticket %s" % (ticket.key))
try:
self.transition_ticket(ticket)
except JIRAError as e:
log.exception(
"Error while transitioning ticket %s: %s" % (ticket, e)
)
if self.pipeline is not None:
self.pipeline["jira_ticket"] = ticket
self.pipeline["jira_server"] = self.server
return None
if self.bump_only:
return None
self.jira_args["summary"] = title
self.jira_args["description"] = self.create_alert_body(matches)
try:
self.issue = self.client.create_issue(**self.jira_args)
# You can not add watchers on initial creation. Only as a follow-up action
if self.watchers:
for watcher in self.watchers:
try:
self.client.add_watcher(self.issue.key, watcher)
except Exception as ex:
# Re-raise the exception, preserve the stack-trace, and give some
# context as to which watcher failed to be added
raise Exception(
"Exception encountered when trying to add '{0}' as a watcher. "
"Does the user exist?\n{1}".format(watcher, ex)
).with_traceback(sys.exc_info()[2])
except JIRAError as e:
raise EAException(
"Error creating JIRA ticket using jira_args (%s): %s"
% (self.jira_args, e)
)
log.info("Opened Jira ticket: %s" % (self.issue))
if self.pipeline is not None:
self.pipeline["jira_ticket"] = self.issue
self.pipeline["jira_server"] = self.server
def create_alert_body(self, matches):
body = self.description + "\n"
body += self.get_aggregation_summary_text(matches)
if self.rule.get("alert_text_type") != "aggregation_summary_only":
for match in matches:
body += str(JiraFormattedMatchString(self.rule, match))
if len(matches) > 1:
body += "\n----------------------------------------\n"
return body
def get_aggregation_summary_text(self, matches):
text = super(JiraAlerter, self).get_aggregation_summary_text(matches)
if text:
text = "{{noformat}}{0}{{noformat}}".format(text)
return text
def create_default_title(self, matches, for_search=False):
# If there is a query_key, use that in the title
if "query_key" in self.rule and lookup_es_key(
matches[0], self.rule["query_key"]
):
title = "ElastAlert: %s matched %s" % (
lookup_es_key(matches[0], self.rule["query_key"]),
self.rule["name"],
)
else:
title = "ElastAlert: %s" % (self.rule["name"])
if for_search:
return title
title += " - %s" % (
pretty_ts(
matches[0][self.rule["timestamp_field"]],
self.rule.get("use_local_time"),
)
)
# Add count for spikes
count = matches[0].get("spike_count")
if count:
title += " - %s+ events" % (count)
return title
def get_info(self):
return {"type": "jira"}
|
en
| 0.889007
|
Creates a Jira ticket for each alert # Maintain a static set of built-in fields that we explicitly know how to set # For anything else, we will do best-effort and try to set a string value # Some built-in jira types that can be used as custom fields require special handling # Here is a sample of one of them: # {"id":"customfield_12807","name":"My Custom Field","custom":true,"orderable":true, # "navigable":true,"searchable":true, # "clauseNames":["cf[12807]","My Custom Field"],"schema":{"type":"array","items":"string", # "custom":"com.atlassian.jira.plugin.system.customfieldtypes:multiselect","customId":12807}} # There are likely others that will need to be updated on a case-by-case basis # Deferred settings refer to values that can only be resolved when a match # is found and as such loading them will be delayed until we find a match # We used to support only a single component. This allows us to maintain backwards compatibility # while also giving the user-facing API a more representative name # We used to support only a single label. This allows us to maintain backwards compatibility # while also giving the user-facing API a more representative name # JIRAError may contain HTML, pass along only first 1024 chars # Support single component or list # Support single label or list # Support single watcher or list # Remove the jira_ part. Convert underscores to spaces # All jira fields should be found in the 'id' or the 'name' field. Therefore, try both just in case # Log a warning to ElastAlert saying that we couldn't find that type? # OR raise and fail to load the alert entirely? Probably the latter... # Check the schema information to decide how to set the value correctly # If the schema information is not available, raise an exception since we don't know how to set it # Note this is only the case for two built-in types, id: issuekey and id: thumbnail # Handle arrays of simple types like strings or numbers # As a convenience, support the scenario wherein the user only provides # a single value for a multi-value field e.g. jira_labels: Only_One_Label # Simple string types # Special case for multi-select custom types (the JIRA metadata says that these are strings, but # in reality, they are required to be provided as an object. # Also attempt to handle arrays of complex types that have to be passed as objects with an identifier 'key' # Try setting it as an object, using 'name' as the key # This may not work, as the key might actually be 'key', 'id', 'value', or something else # If it works, great! If not, it will manifest itself as an API error that will bubble up # Handle non-array types # Simple string types # Special case for custom types (the JIRA metadata says that these are strings, but # in reality, they are required to be provided as an object. # Number type # Complex type # Clear jira_args # If we find a field that is not covered by the set that we are aware of, it means it is either: # 1. A built-in supported field in JIRA that we don't have on our radar # 2. A custom field that a JIRA admin has configured Creates a mapping of priority index to id. # Default title, get stripped search version # This is necessary for search to work. Other special characters and dashes # directly adjacent to words appear to be ok # Reset arbitrary fields to pick up changes # You can not add watchers on initial creation. Only as a follow-up action # Re-raise the exception, preserve the stack-trace, and give some # context as to which watcher failed to be added # If there is a query_key, use that in the title # Add count for spikes
| 2.178936
| 2
|
examples/Assertions/Basic/test_plan_dict.py
|
raoyitao/testplan
| 96
|
6629555
|
#!/usr/bin/env python
# This plan contains tests that demonstrate failures as well.
"""
This example shows usage of dict assertion namespaces.
"""
import re
import sys
from testplan import test_plan
from testplan.common.utils import comparison
from testplan.testing.multitest import MultiTest, testsuite, testcase
from testplan.report.testing.styles import Style, StyleEnum
@testsuite
class DictSuite:
"""
`result.dict` namespace can be used for applying advanced
assertion rules to dictionaries, which can be nested.
"""
@testcase
def test_dict_namespace(self, env, result):
actual = {"foo": 1, "bar": 2}
expected = {"foo": 1, "bar": 5, "extra-key": 10}
# `dict.match` (recursively) matches elements of the dictionaries
result.dict.match(actual, expected, description="Simple dict match")
# `dict.match` supports nested data as well
actual = {"foo": {"alpha": [1, 2, 3], "beta": {"color": "red"}}}
expected = {"foo": {"alpha": [1, 2], "beta": {"color": "blue"}}}
result.dict.match(actual, expected, description="Nested dict match")
# It is possible to use custom comparators with `dict.match`
actual = {
"foo": [1, 2, 3],
"bar": {"color": "blue"},
"baz": "hello world",
}
expected = {
"foo": [1, 2, lambda v: isinstance(v, int)],
"bar": {"color": comparison.In(["blue", "red", "yellow"])},
"baz": re.compile(r"\w+ world"),
}
result.dict.match(
actual, expected, description="Dict match: Custom comparators"
)
# You can also specify a comparator function to apply to all values in
# your dict. Standard comparators are available under
# testplan.common.utils.comparison.COMPARE_FUNCTIONS but any function
# f(x: Any, y: Any) -> bool can be used.
actual = {"foo": 1, "bar": 2, "baz": 3}
expected = {"foo": 1.0, "bar": 2.0, "baz": 3.0}
result.dict.match(
actual,
expected,
description="default assertion passes because the values are "
"numerically equal",
)
result.dict.match(
actual,
expected,
description="when we check types the assertion will fail",
value_cmp_func=comparison.COMPARE_FUNCTIONS["check_types"],
)
actual = {"foo": 1.02, "bar": 2.28, "baz": 3.50}
expected = {"foo": 0.98, "bar": 2.33, "baz": 3.46}
result.dict.match(
actual,
expected,
description="use a custom comparison function to check within a "
"tolerance",
value_cmp_func=lambda x, y: abs(x - y) < 0.1,
)
# The report_mode can be specified to limit the comparison
# information stored. By default all comparisons are stored and added
# to the report, but you can choose to discard some comparisons to
# reduce the size of the report when comparing very large dicts.
actual = {"key{}".format(i): i for i in range(10)}
expected = actual.copy()
expected["bad_key"] = "expected"
actual["bad_key"] = "actual"
result.dict.match(
actual,
expected,
description="only report the failing comparison",
report_mode=comparison.ReportOptions.FAILS_ONLY,
)
# `dict.check` can be used for checking existence / absence
# of keys within a dictionary
result.dict.check(
dictionary={"foo": 1, "bar": 2, "baz": 3},
has_keys=["foo", "alpha"],
absent_keys=["bar", "beta"],
)
# `dict.log` can be used to log a dictionary in human readable format.
result.dict.log(
dictionary={
"foo": [1, 2, 3],
"bar": {"color": "blue"},
"baz": "hello world",
}
)
@test_plan(
name="Dict Assertions Example",
stdout_style=Style(
passing=StyleEnum.ASSERTION_DETAIL, failing=StyleEnum.ASSERTION_DETAIL
),
)
def main(plan):
plan.add(
MultiTest(
name="Dict Assertions Test",
suites=[
DictSuite(),
],
)
)
if __name__ == "__main__":
sys.exit(not main())
|
#!/usr/bin/env python
# This plan contains tests that demonstrate failures as well.
"""
This example shows usage of dict assertion namespaces.
"""
import re
import sys
from testplan import test_plan
from testplan.common.utils import comparison
from testplan.testing.multitest import MultiTest, testsuite, testcase
from testplan.report.testing.styles import Style, StyleEnum
@testsuite
class DictSuite:
"""
`result.dict` namespace can be used for applying advanced
assertion rules to dictionaries, which can be nested.
"""
@testcase
def test_dict_namespace(self, env, result):
actual = {"foo": 1, "bar": 2}
expected = {"foo": 1, "bar": 5, "extra-key": 10}
# `dict.match` (recursively) matches elements of the dictionaries
result.dict.match(actual, expected, description="Simple dict match")
# `dict.match` supports nested data as well
actual = {"foo": {"alpha": [1, 2, 3], "beta": {"color": "red"}}}
expected = {"foo": {"alpha": [1, 2], "beta": {"color": "blue"}}}
result.dict.match(actual, expected, description="Nested dict match")
# It is possible to use custom comparators with `dict.match`
actual = {
"foo": [1, 2, 3],
"bar": {"color": "blue"},
"baz": "hello world",
}
expected = {
"foo": [1, 2, lambda v: isinstance(v, int)],
"bar": {"color": comparison.In(["blue", "red", "yellow"])},
"baz": re.compile(r"\w+ world"),
}
result.dict.match(
actual, expected, description="Dict match: Custom comparators"
)
# You can also specify a comparator function to apply to all values in
# your dict. Standard comparators are available under
# testplan.common.utils.comparison.COMPARE_FUNCTIONS but any function
# f(x: Any, y: Any) -> bool can be used.
actual = {"foo": 1, "bar": 2, "baz": 3}
expected = {"foo": 1.0, "bar": 2.0, "baz": 3.0}
result.dict.match(
actual,
expected,
description="default assertion passes because the values are "
"numerically equal",
)
result.dict.match(
actual,
expected,
description="when we check types the assertion will fail",
value_cmp_func=comparison.COMPARE_FUNCTIONS["check_types"],
)
actual = {"foo": 1.02, "bar": 2.28, "baz": 3.50}
expected = {"foo": 0.98, "bar": 2.33, "baz": 3.46}
result.dict.match(
actual,
expected,
description="use a custom comparison function to check within a "
"tolerance",
value_cmp_func=lambda x, y: abs(x - y) < 0.1,
)
# The report_mode can be specified to limit the comparison
# information stored. By default all comparisons are stored and added
# to the report, but you can choose to discard some comparisons to
# reduce the size of the report when comparing very large dicts.
actual = {"key{}".format(i): i for i in range(10)}
expected = actual.copy()
expected["bad_key"] = "expected"
actual["bad_key"] = "actual"
result.dict.match(
actual,
expected,
description="only report the failing comparison",
report_mode=comparison.ReportOptions.FAILS_ONLY,
)
# `dict.check` can be used for checking existence / absence
# of keys within a dictionary
result.dict.check(
dictionary={"foo": 1, "bar": 2, "baz": 3},
has_keys=["foo", "alpha"],
absent_keys=["bar", "beta"],
)
# `dict.log` can be used to log a dictionary in human readable format.
result.dict.log(
dictionary={
"foo": [1, 2, 3],
"bar": {"color": "blue"},
"baz": "hello world",
}
)
@test_plan(
name="Dict Assertions Example",
stdout_style=Style(
passing=StyleEnum.ASSERTION_DETAIL, failing=StyleEnum.ASSERTION_DETAIL
),
)
def main(plan):
plan.add(
MultiTest(
name="Dict Assertions Test",
suites=[
DictSuite(),
],
)
)
if __name__ == "__main__":
sys.exit(not main())
|
en
| 0.777793
|
#!/usr/bin/env python # This plan contains tests that demonstrate failures as well. This example shows usage of dict assertion namespaces. `result.dict` namespace can be used for applying advanced assertion rules to dictionaries, which can be nested. # `dict.match` (recursively) matches elements of the dictionaries # `dict.match` supports nested data as well # It is possible to use custom comparators with `dict.match` # You can also specify a comparator function to apply to all values in # your dict. Standard comparators are available under # testplan.common.utils.comparison.COMPARE_FUNCTIONS but any function # f(x: Any, y: Any) -> bool can be used. # The report_mode can be specified to limit the comparison # information stored. By default all comparisons are stored and added # to the report, but you can choose to discard some comparisons to # reduce the size of the report when comparing very large dicts. # `dict.check` can be used for checking existence / absence # of keys within a dictionary # `dict.log` can be used to log a dictionary in human readable format.
| 3.28987
| 3
|
seekpath/hpkot/tools.py
|
qiaojunfeng/seekpath
| 0
|
6629556
|
<reponame>qiaojunfeng/seekpath<filename>seekpath/hpkot/tools.py
import numpy
import numpy.linalg
def eval_expr_simple(expr, kparam):
"""
To evaluate expressions tha only require kparams and not a, b, c, ...
"""
if expr == "0":
return 0.
elif expr == "1/2":
return 1. / 2.
elif expr == "1":
return 1.
elif expr == "-1/2":
return -1. / 2.
elif expr == "1/4":
return 1. / 4.
elif expr == "3/8":
return 3. / 8.
elif expr == "3/4":
return 3. / 4.
elif expr == "5/8":
return 5. / 8.
elif expr == "1/3":
return 1. / 3.
else:
try:
return kparam[expr]
except KeyError as e:
raise ValueError(
"Asking for evaluation of symbol '{}' in "
"eval_expr_simple but this has not been defined or not "
"yet computed".format(e.message))
def extend_kparam(kparam):
"""
Extend the list of kparam with also expressions like :math:`1-x`, ...
:param kparam: a dictionary where the key is the expression as a string and
the value is the numerical value
:return: a similar dictionary, extended with simple expressions
"""
kparam_extended = {}
for k, v in kparam.items():
kparam_extended[k] = v
kparam_extended["-{}".format(k)] = -v
kparam_extended["1-{}".format(k)] = 1. - v
kparam_extended["-1+{}".format(k)] = -1. + v
kparam_extended["1/2-{}".format(k)] = 1. / 2. - v
kparam_extended["1/2+{}".format(k)] = 1. / 2. + v
return kparam_extended
def eval_expr(expr, a, b, c, cosalpha, cosbeta, cosgamma, kparam):
r"""
Given a string expression as a function of the parameters ``a``, ``b``, ``c`` (lengths of the
cell lattice vectors) and ``cosalpha``, ``cosbeta``, ``cosgamma`` (the cosines of the three
angles between lattice vectors) returns the numerical value of the expression.
:param a: length of the first lattice vector
:param b: length of the second lattice vector
:param c: length of the third lattice vector
:param cosalpha: cosine of the :math:`\alpha` angle (between lattice vectors 2 and 3)
:param cosbeta: cosine of the :math:`\beta` angle (between lattice vectors 1 and 3)
:param cosgamma: cosine of the :math:`\gamma` angle (between lattice vectors 1 and 2)
:param kparam: a dictionary that associates the value to expressions as a function
of the ``a, b, c, cosalpha, cosbeta, cosgamma`` parameters
:return: the value of the expression for the given values of the cell parameters
.. note:: To evaluate expressions, I hardcode a table of existing expressions in the
DB rather than parsing the string (to avoid additional dependencies and
avoid the use of ``eval``).
"""
from math import sqrt
sinalpha = sqrt(1. - cosalpha**2)
sinbeta = sqrt(1. - cosbeta**2)
singamma = sqrt(1. - cosgamma**2)
try:
if expr == "(a*a/b/b+(1+a/c*cosbeta)/sinbeta/sinbeta)/4":
return (a * a / b / b +
(1. + a / c * cosbeta) / sinbeta / sinbeta) / 4.
elif expr == "1-Z*b*b/a/a":
Z = kparam['Z']
return 1. - Z * b * b / a / a
elif expr == "1/2-2*Z*c*cosbeta/a":
Z = kparam['Z']
return 1. / 2. - 2. * Z * c * cosbeta / a
elif expr == "E/2+a*a/4/b/b+a*c*cosbeta/2/b/b":
E = kparam['E']
return E / 2. + a * a / 4. / b / b + a * c * cosbeta / 2. / b / b
elif expr == "2*F-Z":
F = kparam['F']
Z = kparam['Z']
return 2. * F - Z
elif expr == "c/2/a/cosbeta*(1-4*U+a*a*sinbeta*sinbeta/b/b)":
U = kparam['U']
return c / 2. / a / cosbeta * (
1. - 4. * U + a * a * sinbeta * sinbeta / b / b)
elif expr == "-1/4+W/2-Z*c*cosbeta/a":
W = kparam['W']
Z = kparam['Z']
return -1. / 4. + W / 2. - Z * c * cosbeta / a
elif expr == "(2+a/c*cosbeta)/4/sinbeta/sinbeta":
return (2. + a / c * cosbeta) / 4. / sinbeta / sinbeta
elif expr == "3/4-b*b/4/a/a/sinbeta/sinbeta":
return 3. / 4. - b * b / 4. / a / a / sinbeta / sinbeta
elif expr == "S-(3/4-S)*a*cosbeta/c":
S = kparam['S']
return S - (3. / 4. - S) * a * cosbeta / c
elif expr == "(1+a*a/b/b)/4":
return (1. + a * a / b / b) / 4.
elif expr == "-a*c*cosbeta/2/b/b":
return -a * c * cosbeta / 2. / b / b
elif expr == "1+Z-2*M":
Z = kparam['Z']
M = kparam['M']
return 1. + Z - 2. * M
elif expr == "X-2*D":
X = kparam['X']
D = kparam['D']
return X - 2 * D
elif expr == "(1+a/c*cosbeta)/2/sinbeta/sinbeta":
return (1. + a / c * cosbeta) / 2. / sinbeta / sinbeta
elif expr == "1/2+Y*c*cosbeta/a":
Y = kparam['Y']
return 1. / 2. + Y * c * cosbeta / a
elif expr == "a*a/4/c/c":
return a * a / 4. / c / c
elif expr == "5/6-2*D":
D = kparam['D']
return 5. / 6. - 2. * D
elif expr == "1/3+D":
D = kparam['D']
return 1. / 3. + D
elif expr == "1/6-c*c/9/a/a":
return 1. / 6. - c * c / 9. / a / a
elif expr == "1/2-2*Z":
Z = kparam['Z']
return 1. / 2. - 2. * Z
elif expr == "1/2+Z":
Z = kparam['Z']
return 1. / 2. + Z
elif expr == "(1+b*b/c/c)/4":
return (1. + b * b / c / c) / 4.
elif expr == "(1+c*c/b/b)/4":
return (1. + c * c / b / b) / 4.
elif expr == "(1+b*b/a/a)/4":
return (1. + b * b / a / a) / 4.
elif expr == "(1+a*a/b/b-a*a/c/c)/4":
return (1. + a * a / b / b - a * a / c / c) / 4.
elif expr == "(1+a*a/b/b+a*a/c/c)/4":
return (1. + a * a / b / b + a * a / c / c) / 4.
elif expr == "(1+c*c/a/a-c*c/b/b)/4":
return (1. + c * c / a / a - c * c / b / b) / 4.
elif expr == "(1+c*c/a/a+c*c/b/b)/4":
return (1. + c * c / a / a + c * c / b / b) / 4.
elif expr == "(1+b*b/a/a-b*b/c/c)/4":
return (1. + b * b / a / a - b * b / c / c) / 4.
elif expr == "(1+c*c/b/b-c*c/a/a)/4":
return (1. + c * c / b / b - c * c / a / a) / 4.
elif expr == "(1+a*a/c/c)/4":
return (1. + a * a / c / c) / 4.
elif expr == "(b*b-a*a)/4/c/c":
return (b * b - a * a) / 4. / c / c
elif expr == "(a*a+b*b)/4/c/c":
return (a * a + b * b) / 4. / c / c
elif expr == "(1+c*c/a/a)/4":
return (1. + c * c / a / a) / 4.
elif expr == "(c*c-b*b)/4/a/a":
return (c * c - b * b) / 4. / a / a
elif expr == "(b*b+c*c)/4/a/a":
return (b * b + c * c) / 4. / a / a
elif expr == "(a*a-c*c)/4/b/b":
return (a * a - c * c) / 4. / b / b
elif expr == "(c*c+a*a)/4/b/b":
return (c * c + a * a) / 4. / b / b
elif expr == "a*a/2/c/c":
return a * a / 2. / c / c
else:
raise ValueError('Unknown expression, define a new case:\n'
' elif expr == "{0}":\n'
' return {0}'.format(expr))
except KeyError as e:
raise ValueError("Asking for evaluation of symbol '{}' but this has "
"not been defined or not yet computed".format(
e.message))
def check_spglib_version():
"""
Check the SPGLIB version and raise a ValueError if the version is
older than 1.9.4.
Also raises an warning if the user has a version of SPGLIB that is
older than 1.13, because before then there were some bugs (e.g.
wrong treatment of oI, see e.g. issue )
Return the spglib module.
"""
try:
import spglib
except ImportError:
raise ValueError("spglib >= 1.9.4 is required for the creation "
"of the k-paths, but it could not be imported")
try:
version = spglib.__version__
except NameError:
version = "1.8.0" # or older, version was introduced only recently
try:
version_pieces = [int(_) for _ in version.split('.')]
if len(version_pieces) < 3:
raise ValueError
except ValueError:
raise ValueError("Unable to parse version number")
if tuple(version_pieces[:2]) < (1, 9):
raise ValueError("Invalid spglib version, need >= 1.9.4")
if version_pieces[:2] == (1, 9) and version_pieces[2] < 4:
raise ValueError("Invalid spglib version, need >= 1.9.4")
if tuple(version_pieces[:2]) < (1, 13):
import warnings
warnings.warn(
'You have a version of SPGLIB older than 1.13, '
'please consider upgrading to 1.13 or later since some bugs '
'have been fixed', RuntimeWarning)
return spglib
def get_cell_params(cell):
r"""
Return (a,b,c,cosalpha,cosbeta,cosgamma) given a :math:`3\times 3` cell
.. note:: Rows are vectors: ``v1 = cell[0]``, ``v2 = cell[1]``, ``v3 = cell[3]``
"""
import numpy
from math import sqrt
v1, v2, v3 = numpy.array(cell)
a = sqrt(sum(v1**2))
b = sqrt(sum(v2**2))
c = sqrt(sum(v3**2))
cosalpha = numpy.dot(v2, v3) / b / c
cosbeta = numpy.dot(v1, v3) / a / c
cosgamma = numpy.dot(v1, v2) / a / b
return (a, b, c, cosalpha, cosbeta, cosgamma)
def get_reciprocal_cell_rows(real_space_cell):
r"""
Given the cell in real space (3x3 matrix, vectors as rows,
return the reciprocal-space cell where again the G vectors are
rows, i.e. satisfying
``dot(real_space_cell, reciprocal_space_cell.T)`` = :math:`2 \pi I`,
where :math:`I` is the :math:`3\times 3` identity matrix.
:return: the :math:`3\times 3` list of reciprocal lattice vectors where each row is
one vector.
"""
reciprocal_space_columns = 2. * numpy.pi * \
numpy.linalg.inv(real_space_cell)
return (reciprocal_space_columns.T).tolist()
def get_real_cell_from_reciprocal_rows(reciprocal_space_rows):
r"""
Given the cell in reciprocal space (3x3 matrix, G vectors as rows,
return the real-space cell where again the R vectors are
rows, i.e. satisfying
``dot(real_space_cell, reciprocal_space_cell.T)`` = :math:`2 \pi I`,
where :math:`I` is the :math:`3\times 3` identity matrix.
.. note:: This is actually the same as :py:func:`get_reciprocal_cell_rows`.
:return: the :math:`3\times 3` list of real lattice vectors where each row is
one vector.
"""
real_space_columns = 2. * numpy.pi * \
numpy.linalg.inv(reciprocal_space_rows)
return (real_space_columns.T).tolist()
def get_path_data(ext_bravais):
"""
Given an extended Bravais symbol among those defined in the HPKOT paper
(only first three characters, like cF1), return the points and the
suggested path.
:param ext_bravais: a string among the allowed etended Bravais lattices
defined in HPKOT.
:return: a tuple ``(kparam_def, points_def, path)`` where the
first element is the list with the definition of the
k-point parameters, the second is the dictionary with the
definition of the k-points, and the third is the list
with the suggested paths.
.. note:: ``kparam_def`` has to be a list and not a dictionary
because the order matters (later k-parameters can be defined
in terms of previous ones)
"""
import os
# Get the data from the band_data folder
this_folder = os.path.split(os.path.abspath(__file__))[0]
folder = os.path.join(this_folder, "band_path_data", ext_bravais)
path_file = os.path.join(folder, "path.txt")
points_file = os.path.join(folder, "points.txt")
kparam_file = os.path.join(folder, "k_vector_parameters.txt")
with open(kparam_file) as f:
kparam_raw = [_.split() for _ in f.readlines() if _.strip()]
with open(points_file) as f:
points_raw = [_.split() for _ in f.readlines()]
with open(path_file) as f:
path_raw = [_.split() for _ in f.readlines()]
# check
if any(len(_) != 2 for _ in kparam_raw):
raise ValueError("Invalid line length in {}".format(kparam_file))
if any(len(_) != 2 for _ in path_raw):
raise ValueError("Invalid line length in {}".format(path_file))
if any(len(_) != 4 for _ in points_raw):
raise ValueError("Invalid line length in {}".format(points_file))
# order must be preserved here
kparam_def = [(_[0], _[1].strip()) for _ in kparam_raw]
points_def = {}
for label, kPx, kPy, kPz in points_raw:
if label in points_def:
raise ValueError("Internal error! Point {} defined multiple times "
"for Bravais lattice {}".format(
label, ext_bravais))
points_def[label] = (kPx, kPy, kPz)
path = [(_[0], _[1]) for _ in path_raw]
# check path is valid
for p1, p2 in path:
if p1 not in points_def:
raise ValueError(
"Point {} found in path (for {}) but undefined!".format(
p1, ext_bravais))
if p2 not in points_def:
raise ValueError(
"Point {} found in path (for {}) but undefined!".format(
p2, ext_bravais))
return (kparam_def, points_def, path)
|
import numpy
import numpy.linalg
def eval_expr_simple(expr, kparam):
"""
To evaluate expressions tha only require kparams and not a, b, c, ...
"""
if expr == "0":
return 0.
elif expr == "1/2":
return 1. / 2.
elif expr == "1":
return 1.
elif expr == "-1/2":
return -1. / 2.
elif expr == "1/4":
return 1. / 4.
elif expr == "3/8":
return 3. / 8.
elif expr == "3/4":
return 3. / 4.
elif expr == "5/8":
return 5. / 8.
elif expr == "1/3":
return 1. / 3.
else:
try:
return kparam[expr]
except KeyError as e:
raise ValueError(
"Asking for evaluation of symbol '{}' in "
"eval_expr_simple but this has not been defined or not "
"yet computed".format(e.message))
def extend_kparam(kparam):
"""
Extend the list of kparam with also expressions like :math:`1-x`, ...
:param kparam: a dictionary where the key is the expression as a string and
the value is the numerical value
:return: a similar dictionary, extended with simple expressions
"""
kparam_extended = {}
for k, v in kparam.items():
kparam_extended[k] = v
kparam_extended["-{}".format(k)] = -v
kparam_extended["1-{}".format(k)] = 1. - v
kparam_extended["-1+{}".format(k)] = -1. + v
kparam_extended["1/2-{}".format(k)] = 1. / 2. - v
kparam_extended["1/2+{}".format(k)] = 1. / 2. + v
return kparam_extended
def eval_expr(expr, a, b, c, cosalpha, cosbeta, cosgamma, kparam):
r"""
Given a string expression as a function of the parameters ``a``, ``b``, ``c`` (lengths of the
cell lattice vectors) and ``cosalpha``, ``cosbeta``, ``cosgamma`` (the cosines of the three
angles between lattice vectors) returns the numerical value of the expression.
:param a: length of the first lattice vector
:param b: length of the second lattice vector
:param c: length of the third lattice vector
:param cosalpha: cosine of the :math:`\alpha` angle (between lattice vectors 2 and 3)
:param cosbeta: cosine of the :math:`\beta` angle (between lattice vectors 1 and 3)
:param cosgamma: cosine of the :math:`\gamma` angle (between lattice vectors 1 and 2)
:param kparam: a dictionary that associates the value to expressions as a function
of the ``a, b, c, cosalpha, cosbeta, cosgamma`` parameters
:return: the value of the expression for the given values of the cell parameters
.. note:: To evaluate expressions, I hardcode a table of existing expressions in the
DB rather than parsing the string (to avoid additional dependencies and
avoid the use of ``eval``).
"""
from math import sqrt
sinalpha = sqrt(1. - cosalpha**2)
sinbeta = sqrt(1. - cosbeta**2)
singamma = sqrt(1. - cosgamma**2)
try:
if expr == "(a*a/b/b+(1+a/c*cosbeta)/sinbeta/sinbeta)/4":
return (a * a / b / b +
(1. + a / c * cosbeta) / sinbeta / sinbeta) / 4.
elif expr == "1-Z*b*b/a/a":
Z = kparam['Z']
return 1. - Z * b * b / a / a
elif expr == "1/2-2*Z*c*cosbeta/a":
Z = kparam['Z']
return 1. / 2. - 2. * Z * c * cosbeta / a
elif expr == "E/2+a*a/4/b/b+a*c*cosbeta/2/b/b":
E = kparam['E']
return E / 2. + a * a / 4. / b / b + a * c * cosbeta / 2. / b / b
elif expr == "2*F-Z":
F = kparam['F']
Z = kparam['Z']
return 2. * F - Z
elif expr == "c/2/a/cosbeta*(1-4*U+a*a*sinbeta*sinbeta/b/b)":
U = kparam['U']
return c / 2. / a / cosbeta * (
1. - 4. * U + a * a * sinbeta * sinbeta / b / b)
elif expr == "-1/4+W/2-Z*c*cosbeta/a":
W = kparam['W']
Z = kparam['Z']
return -1. / 4. + W / 2. - Z * c * cosbeta / a
elif expr == "(2+a/c*cosbeta)/4/sinbeta/sinbeta":
return (2. + a / c * cosbeta) / 4. / sinbeta / sinbeta
elif expr == "3/4-b*b/4/a/a/sinbeta/sinbeta":
return 3. / 4. - b * b / 4. / a / a / sinbeta / sinbeta
elif expr == "S-(3/4-S)*a*cosbeta/c":
S = kparam['S']
return S - (3. / 4. - S) * a * cosbeta / c
elif expr == "(1+a*a/b/b)/4":
return (1. + a * a / b / b) / 4.
elif expr == "-a*c*cosbeta/2/b/b":
return -a * c * cosbeta / 2. / b / b
elif expr == "1+Z-2*M":
Z = kparam['Z']
M = kparam['M']
return 1. + Z - 2. * M
elif expr == "X-2*D":
X = kparam['X']
D = kparam['D']
return X - 2 * D
elif expr == "(1+a/c*cosbeta)/2/sinbeta/sinbeta":
return (1. + a / c * cosbeta) / 2. / sinbeta / sinbeta
elif expr == "1/2+Y*c*cosbeta/a":
Y = kparam['Y']
return 1. / 2. + Y * c * cosbeta / a
elif expr == "a*a/4/c/c":
return a * a / 4. / c / c
elif expr == "5/6-2*D":
D = kparam['D']
return 5. / 6. - 2. * D
elif expr == "1/3+D":
D = kparam['D']
return 1. / 3. + D
elif expr == "1/6-c*c/9/a/a":
return 1. / 6. - c * c / 9. / a / a
elif expr == "1/2-2*Z":
Z = kparam['Z']
return 1. / 2. - 2. * Z
elif expr == "1/2+Z":
Z = kparam['Z']
return 1. / 2. + Z
elif expr == "(1+b*b/c/c)/4":
return (1. + b * b / c / c) / 4.
elif expr == "(1+c*c/b/b)/4":
return (1. + c * c / b / b) / 4.
elif expr == "(1+b*b/a/a)/4":
return (1. + b * b / a / a) / 4.
elif expr == "(1+a*a/b/b-a*a/c/c)/4":
return (1. + a * a / b / b - a * a / c / c) / 4.
elif expr == "(1+a*a/b/b+a*a/c/c)/4":
return (1. + a * a / b / b + a * a / c / c) / 4.
elif expr == "(1+c*c/a/a-c*c/b/b)/4":
return (1. + c * c / a / a - c * c / b / b) / 4.
elif expr == "(1+c*c/a/a+c*c/b/b)/4":
return (1. + c * c / a / a + c * c / b / b) / 4.
elif expr == "(1+b*b/a/a-b*b/c/c)/4":
return (1. + b * b / a / a - b * b / c / c) / 4.
elif expr == "(1+c*c/b/b-c*c/a/a)/4":
return (1. + c * c / b / b - c * c / a / a) / 4.
elif expr == "(1+a*a/c/c)/4":
return (1. + a * a / c / c) / 4.
elif expr == "(b*b-a*a)/4/c/c":
return (b * b - a * a) / 4. / c / c
elif expr == "(a*a+b*b)/4/c/c":
return (a * a + b * b) / 4. / c / c
elif expr == "(1+c*c/a/a)/4":
return (1. + c * c / a / a) / 4.
elif expr == "(c*c-b*b)/4/a/a":
return (c * c - b * b) / 4. / a / a
elif expr == "(b*b+c*c)/4/a/a":
return (b * b + c * c) / 4. / a / a
elif expr == "(a*a-c*c)/4/b/b":
return (a * a - c * c) / 4. / b / b
elif expr == "(c*c+a*a)/4/b/b":
return (c * c + a * a) / 4. / b / b
elif expr == "a*a/2/c/c":
return a * a / 2. / c / c
else:
raise ValueError('Unknown expression, define a new case:\n'
' elif expr == "{0}":\n'
' return {0}'.format(expr))
except KeyError as e:
raise ValueError("Asking for evaluation of symbol '{}' but this has "
"not been defined or not yet computed".format(
e.message))
def check_spglib_version():
"""
Check the SPGLIB version and raise a ValueError if the version is
older than 1.9.4.
Also raises an warning if the user has a version of SPGLIB that is
older than 1.13, because before then there were some bugs (e.g.
wrong treatment of oI, see e.g. issue )
Return the spglib module.
"""
try:
import spglib
except ImportError:
raise ValueError("spglib >= 1.9.4 is required for the creation "
"of the k-paths, but it could not be imported")
try:
version = spglib.__version__
except NameError:
version = "1.8.0" # or older, version was introduced only recently
try:
version_pieces = [int(_) for _ in version.split('.')]
if len(version_pieces) < 3:
raise ValueError
except ValueError:
raise ValueError("Unable to parse version number")
if tuple(version_pieces[:2]) < (1, 9):
raise ValueError("Invalid spglib version, need >= 1.9.4")
if version_pieces[:2] == (1, 9) and version_pieces[2] < 4:
raise ValueError("Invalid spglib version, need >= 1.9.4")
if tuple(version_pieces[:2]) < (1, 13):
import warnings
warnings.warn(
'You have a version of SPGLIB older than 1.13, '
'please consider upgrading to 1.13 or later since some bugs '
'have been fixed', RuntimeWarning)
return spglib
def get_cell_params(cell):
r"""
Return (a,b,c,cosalpha,cosbeta,cosgamma) given a :math:`3\times 3` cell
.. note:: Rows are vectors: ``v1 = cell[0]``, ``v2 = cell[1]``, ``v3 = cell[3]``
"""
import numpy
from math import sqrt
v1, v2, v3 = numpy.array(cell)
a = sqrt(sum(v1**2))
b = sqrt(sum(v2**2))
c = sqrt(sum(v3**2))
cosalpha = numpy.dot(v2, v3) / b / c
cosbeta = numpy.dot(v1, v3) / a / c
cosgamma = numpy.dot(v1, v2) / a / b
return (a, b, c, cosalpha, cosbeta, cosgamma)
def get_reciprocal_cell_rows(real_space_cell):
r"""
Given the cell in real space (3x3 matrix, vectors as rows,
return the reciprocal-space cell where again the G vectors are
rows, i.e. satisfying
``dot(real_space_cell, reciprocal_space_cell.T)`` = :math:`2 \pi I`,
where :math:`I` is the :math:`3\times 3` identity matrix.
:return: the :math:`3\times 3` list of reciprocal lattice vectors where each row is
one vector.
"""
reciprocal_space_columns = 2. * numpy.pi * \
numpy.linalg.inv(real_space_cell)
return (reciprocal_space_columns.T).tolist()
def get_real_cell_from_reciprocal_rows(reciprocal_space_rows):
r"""
Given the cell in reciprocal space (3x3 matrix, G vectors as rows,
return the real-space cell where again the R vectors are
rows, i.e. satisfying
``dot(real_space_cell, reciprocal_space_cell.T)`` = :math:`2 \pi I`,
where :math:`I` is the :math:`3\times 3` identity matrix.
.. note:: This is actually the same as :py:func:`get_reciprocal_cell_rows`.
:return: the :math:`3\times 3` list of real lattice vectors where each row is
one vector.
"""
real_space_columns = 2. * numpy.pi * \
numpy.linalg.inv(reciprocal_space_rows)
return (real_space_columns.T).tolist()
def get_path_data(ext_bravais):
"""
Given an extended Bravais symbol among those defined in the HPKOT paper
(only first three characters, like cF1), return the points and the
suggested path.
:param ext_bravais: a string among the allowed etended Bravais lattices
defined in HPKOT.
:return: a tuple ``(kparam_def, points_def, path)`` where the
first element is the list with the definition of the
k-point parameters, the second is the dictionary with the
definition of the k-points, and the third is the list
with the suggested paths.
.. note:: ``kparam_def`` has to be a list and not a dictionary
because the order matters (later k-parameters can be defined
in terms of previous ones)
"""
import os
# Get the data from the band_data folder
this_folder = os.path.split(os.path.abspath(__file__))[0]
folder = os.path.join(this_folder, "band_path_data", ext_bravais)
path_file = os.path.join(folder, "path.txt")
points_file = os.path.join(folder, "points.txt")
kparam_file = os.path.join(folder, "k_vector_parameters.txt")
with open(kparam_file) as f:
kparam_raw = [_.split() for _ in f.readlines() if _.strip()]
with open(points_file) as f:
points_raw = [_.split() for _ in f.readlines()]
with open(path_file) as f:
path_raw = [_.split() for _ in f.readlines()]
# check
if any(len(_) != 2 for _ in kparam_raw):
raise ValueError("Invalid line length in {}".format(kparam_file))
if any(len(_) != 2 for _ in path_raw):
raise ValueError("Invalid line length in {}".format(path_file))
if any(len(_) != 4 for _ in points_raw):
raise ValueError("Invalid line length in {}".format(points_file))
# order must be preserved here
kparam_def = [(_[0], _[1].strip()) for _ in kparam_raw]
points_def = {}
for label, kPx, kPy, kPz in points_raw:
if label in points_def:
raise ValueError("Internal error! Point {} defined multiple times "
"for Bravais lattice {}".format(
label, ext_bravais))
points_def[label] = (kPx, kPy, kPz)
path = [(_[0], _[1]) for _ in path_raw]
# check path is valid
for p1, p2 in path:
if p1 not in points_def:
raise ValueError(
"Point {} found in path (for {}) but undefined!".format(
p1, ext_bravais))
if p2 not in points_def:
raise ValueError(
"Point {} found in path (for {}) but undefined!".format(
p2, ext_bravais))
return (kparam_def, points_def, path)
|
en
| 0.791329
|
To evaluate expressions tha only require kparams and not a, b, c, ... Extend the list of kparam with also expressions like :math:`1-x`, ... :param kparam: a dictionary where the key is the expression as a string and the value is the numerical value :return: a similar dictionary, extended with simple expressions Given a string expression as a function of the parameters ``a``, ``b``, ``c`` (lengths of the cell lattice vectors) and ``cosalpha``, ``cosbeta``, ``cosgamma`` (the cosines of the three angles between lattice vectors) returns the numerical value of the expression. :param a: length of the first lattice vector :param b: length of the second lattice vector :param c: length of the third lattice vector :param cosalpha: cosine of the :math:`\alpha` angle (between lattice vectors 2 and 3) :param cosbeta: cosine of the :math:`\beta` angle (between lattice vectors 1 and 3) :param cosgamma: cosine of the :math:`\gamma` angle (between lattice vectors 1 and 2) :param kparam: a dictionary that associates the value to expressions as a function of the ``a, b, c, cosalpha, cosbeta, cosgamma`` parameters :return: the value of the expression for the given values of the cell parameters .. note:: To evaluate expressions, I hardcode a table of existing expressions in the DB rather than parsing the string (to avoid additional dependencies and avoid the use of ``eval``). Check the SPGLIB version and raise a ValueError if the version is older than 1.9.4. Also raises an warning if the user has a version of SPGLIB that is older than 1.13, because before then there were some bugs (e.g. wrong treatment of oI, see e.g. issue ) Return the spglib module. # or older, version was introduced only recently Return (a,b,c,cosalpha,cosbeta,cosgamma) given a :math:`3\times 3` cell .. note:: Rows are vectors: ``v1 = cell[0]``, ``v2 = cell[1]``, ``v3 = cell[3]`` Given the cell in real space (3x3 matrix, vectors as rows, return the reciprocal-space cell where again the G vectors are rows, i.e. satisfying ``dot(real_space_cell, reciprocal_space_cell.T)`` = :math:`2 \pi I`, where :math:`I` is the :math:`3\times 3` identity matrix. :return: the :math:`3\times 3` list of reciprocal lattice vectors where each row is one vector. Given the cell in reciprocal space (3x3 matrix, G vectors as rows, return the real-space cell where again the R vectors are rows, i.e. satisfying ``dot(real_space_cell, reciprocal_space_cell.T)`` = :math:`2 \pi I`, where :math:`I` is the :math:`3\times 3` identity matrix. .. note:: This is actually the same as :py:func:`get_reciprocal_cell_rows`. :return: the :math:`3\times 3` list of real lattice vectors where each row is one vector. Given an extended Bravais symbol among those defined in the HPKOT paper (only first three characters, like cF1), return the points and the suggested path. :param ext_bravais: a string among the allowed etended Bravais lattices defined in HPKOT. :return: a tuple ``(kparam_def, points_def, path)`` where the first element is the list with the definition of the k-point parameters, the second is the dictionary with the definition of the k-points, and the third is the list with the suggested paths. .. note:: ``kparam_def`` has to be a list and not a dictionary because the order matters (later k-parameters can be defined in terms of previous ones) # Get the data from the band_data folder # check # order must be preserved here # check path is valid
| 3.476637
| 3
|
data/external/repositories_2to3/109477/gatsby-hackathon-seizure-master/code/python/seizures/helper/data_structures.py
|
Keesiu/meta-kaggle
| 0
|
6629557
|
<filename>data/external/repositories_2to3/109477/gatsby-hackathon-seizure-master/code/python/seizures/helper/data_structures.py
import numpy as np
def stack_matrices(X_list):
X_stack = np.vstack(X_list)
return X_stack
def stack_vectors(y_list):
tmp = np.concatenate(y_list)
return tmp
def test_stack_matrices(X, X_list):
i_begin = 0
for X_tmp in X_list:
i_end = i_begin + X_tmp.shape[0]
assert np.all(X[i_begin:i_end, :] == X_tmp)
i_begin += X_tmp.shape[0]
print('stack = list')
def test_stack_vectors(y, y_list):
i_begin = 0
for y_tmp in y_list:
i_end = i_begin + y_tmp.shape[0]
assert np.all(y[i_begin:i_end] == y_tmp)
i_begin += y_tmp.shape[0]
print('stack = list')
# old buggy version
#def stack_matrices(X_list):
# num_samples = np.sum([len(X) for X in X_list])
# dim = X_list[0].shape[1]
#
# X_stack = np.zeros((num_samples, dim))
# i = 0
# for i in range(len(X_list)):
# X = X_list[i]
# X_stack[i:(i + len(X))] = X
# i += len(X)
# return X_stack
#def stack_vectors(y_list):
# num_samples = np.sum([len(y) for y in y_list])
#
# y_stack = np.zeros(num_samples)
# i = 0
# for i in range(len(y_list)):
# y = y_list[i]
# y_stack[i:(i + len(y))] = y
# i += len(y)
#
# return y_stack
|
<filename>data/external/repositories_2to3/109477/gatsby-hackathon-seizure-master/code/python/seizures/helper/data_structures.py
import numpy as np
def stack_matrices(X_list):
X_stack = np.vstack(X_list)
return X_stack
def stack_vectors(y_list):
tmp = np.concatenate(y_list)
return tmp
def test_stack_matrices(X, X_list):
i_begin = 0
for X_tmp in X_list:
i_end = i_begin + X_tmp.shape[0]
assert np.all(X[i_begin:i_end, :] == X_tmp)
i_begin += X_tmp.shape[0]
print('stack = list')
def test_stack_vectors(y, y_list):
i_begin = 0
for y_tmp in y_list:
i_end = i_begin + y_tmp.shape[0]
assert np.all(y[i_begin:i_end] == y_tmp)
i_begin += y_tmp.shape[0]
print('stack = list')
# old buggy version
#def stack_matrices(X_list):
# num_samples = np.sum([len(X) for X in X_list])
# dim = X_list[0].shape[1]
#
# X_stack = np.zeros((num_samples, dim))
# i = 0
# for i in range(len(X_list)):
# X = X_list[i]
# X_stack[i:(i + len(X))] = X
# i += len(X)
# return X_stack
#def stack_vectors(y_list):
# num_samples = np.sum([len(y) for y in y_list])
#
# y_stack = np.zeros(num_samples)
# i = 0
# for i in range(len(y_list)):
# y = y_list[i]
# y_stack[i:(i + len(y))] = y
# i += len(y)
#
# return y_stack
|
en
| 0.383045
|
# old buggy version #def stack_matrices(X_list): # num_samples = np.sum([len(X) for X in X_list]) # dim = X_list[0].shape[1] # # X_stack = np.zeros((num_samples, dim)) # i = 0 # for i in range(len(X_list)): # X = X_list[i] # X_stack[i:(i + len(X))] = X # i += len(X) # return X_stack #def stack_vectors(y_list): # num_samples = np.sum([len(y) for y in y_list]) # # y_stack = np.zeros(num_samples) # i = 0 # for i in range(len(y_list)): # y = y_list[i] # y_stack[i:(i + len(y))] = y # i += len(y) # # return y_stack
| 2.598195
| 3
|
Agents/API/alarm.py
|
mbay-SAG/cumulocity-thinedge-example
| 2
|
6629558
|
import requests
import logging
import json
import API.authentication as auth
from datetime import datetime, date, time, timedelta
logger = logging.getLogger('Alarm API')
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger.info('Logger for Alarm was initialised')
def getAlarmsFromManagedObject(internalID, pageSize=2500, days=0, currentPage=1):
try:
logger.info('Checking for alarms of managed object in c8y')
if days != 0:
dateFrom = date.today()
dateTo = date.today() - timedelta(days)
url = "https://%s/alarm/alarms?pageSize=%s&source=%s&dateFrom=%s&dateTo=%s¤tPage=%s&withTotalPages=True"%(auth.get().tenant, pageSize, internalID, dateFrom, dateTo, currentPage, )
else:
url = "https://%s/alarm/alarms?pageSize=%s&source=%s¤tPage=%s&withTotalPages=True"%(auth.get().tenant, pageSize, internalID, currentPage, )
logger.debug('Requesting the following url: ' + str(url))
response = requests.request("GET", url, headers=auth.get().headers, data = auth.get().payload)
logger.debug('Response from request: ' + str(response.text))
logger.debug('Response from request with code : ' + str(response.status_code))
if response.status_code == 200:
logger.info('Received alarms')
json_data = json.loads(response.text)
return json_data
else:
logger.error('Receiving following status code %s'%(str(response.status_code)))
return {}
except Exception as e:
logger.error('The following error occured: %s' % (str(e)))
def getAlarms(pageSize=2500, days=0, currentPage=1):
try:
logger.info('Checking for alarms in c8y')
if days != 0:
dateFrom = date.today()
dateTo = date.today() - timedelta(days)
url = "https://%s/alarm/alarms?pageSize=%s&dateFrom=%s&dateTo=%s¤tPage=%s&withTotalPages=True"%(auth.get().tenant, pageSize, dateFrom, dateTo, currentPage, )
else:
url = "https://%s/alarm/alarms?pageSize=%s¤tPage=%s&withTotalPages=True"%(auth.get().tenant, pageSize, currentPage, )
logger.debug('Requesting the following url: ' + str(url))
response = requests.request("GET", url, headers=auth.get().headers, data = auth.get().payload)
logger.debug('Response from request: ' + str(response.text))
logger.debug('Response from request with code : ' + str(response.status_code))
if response.status_code == 200:
logger.info('Received alarms')
json_data = json.loads(response.text)
logger.debug('Receiving the following response %s'%(str(response.text)))
return json_data
else:
logger.error('Receiving following status code %s'%(str(response.status_code)))
return {}
except Exception as e:
logger.error('The following error occured: %s' % (str(e)))
def createAlarms(type, text, source ):
logger.info('Creating alarm in c8y')
if __name__ == '__main__':
pass
|
import requests
import logging
import json
import API.authentication as auth
from datetime import datetime, date, time, timedelta
logger = logging.getLogger('Alarm API')
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger.info('Logger for Alarm was initialised')
def getAlarmsFromManagedObject(internalID, pageSize=2500, days=0, currentPage=1):
try:
logger.info('Checking for alarms of managed object in c8y')
if days != 0:
dateFrom = date.today()
dateTo = date.today() - timedelta(days)
url = "https://%s/alarm/alarms?pageSize=%s&source=%s&dateFrom=%s&dateTo=%s¤tPage=%s&withTotalPages=True"%(auth.get().tenant, pageSize, internalID, dateFrom, dateTo, currentPage, )
else:
url = "https://%s/alarm/alarms?pageSize=%s&source=%s¤tPage=%s&withTotalPages=True"%(auth.get().tenant, pageSize, internalID, currentPage, )
logger.debug('Requesting the following url: ' + str(url))
response = requests.request("GET", url, headers=auth.get().headers, data = auth.get().payload)
logger.debug('Response from request: ' + str(response.text))
logger.debug('Response from request with code : ' + str(response.status_code))
if response.status_code == 200:
logger.info('Received alarms')
json_data = json.loads(response.text)
return json_data
else:
logger.error('Receiving following status code %s'%(str(response.status_code)))
return {}
except Exception as e:
logger.error('The following error occured: %s' % (str(e)))
def getAlarms(pageSize=2500, days=0, currentPage=1):
try:
logger.info('Checking for alarms in c8y')
if days != 0:
dateFrom = date.today()
dateTo = date.today() - timedelta(days)
url = "https://%s/alarm/alarms?pageSize=%s&dateFrom=%s&dateTo=%s¤tPage=%s&withTotalPages=True"%(auth.get().tenant, pageSize, dateFrom, dateTo, currentPage, )
else:
url = "https://%s/alarm/alarms?pageSize=%s¤tPage=%s&withTotalPages=True"%(auth.get().tenant, pageSize, currentPage, )
logger.debug('Requesting the following url: ' + str(url))
response = requests.request("GET", url, headers=auth.get().headers, data = auth.get().payload)
logger.debug('Response from request: ' + str(response.text))
logger.debug('Response from request with code : ' + str(response.status_code))
if response.status_code == 200:
logger.info('Received alarms')
json_data = json.loads(response.text)
logger.debug('Receiving the following response %s'%(str(response.text)))
return json_data
else:
logger.error('Receiving following status code %s'%(str(response.status_code)))
return {}
except Exception as e:
logger.error('The following error occured: %s' % (str(e)))
def createAlarms(type, text, source ):
logger.info('Creating alarm in c8y')
if __name__ == '__main__':
pass
|
none
| 1
| 2.617378
| 3
|
|
zdevelop/tests/test_client.py
|
illuscio-dev/spanclient-py
| 0
|
6629559
|
<reponame>illuscio-dev/spanclient-py
import pytest
import rapidjson as json
import uuid
import aiohttp
import yaml
import io
import csv
import copy
from aiostream.stream import enumerate as aio_enumeerate
from dataclasses import dataclass
from grahamcracker import DataSchema, schema_for
from bson import BSON
from bson.raw_bson import RawBSONDocument
from typing import AsyncGenerator, List, Optional, Callable, Dict, Any
from spantools import errors_api
from spanclient import (
handle_response_aio,
iter_paged_aio,
StatusMismatchError,
SpanClient,
ContentDecodeError,
handles,
ClientRequest,
ContentTypeUnknownError,
test_utils,
MimeType,
register_mimetype,
)
from spanclient.test_utils import MockResponse, MockConfig, RequestValidator
class MockSession:
def __init__(
self,
method_list: List[str],
response_list: List[MockResponse],
call_checks: Optional[List[Callable]] = None,
):
if call_checks is None:
call_checks = list()
self._response_list: List[MockResponse] = response_list
self._call_checks: List[Callable] = call_checks
self._method_list: List[str] = method_list
def __getattr__(self, item: str):
if not item.startswith("_"):
assert item == self._method_list.pop()
return self._route
else:
return super().__getattribute__(item)
def _route(
self,
url: str,
*,
headers: Optional[dict] = None,
json: Optional[dict] = None,
data: Optional[bytes] = None,
params: Optional[dict] = None,
):
return self._response_list.pop(0)
@dataclass
class Name:
first: str
last: str
@schema_for(Name)
class NameSchema(DataSchema[Name]):
pass
@dataclass
class NameID:
id: uuid.UUID
first: str
last: str
@schema_for(NameID)
class NameIDSchema(DataSchema[NameID]):
pass
class TestMockResponse:
def test_default_status(self):
r = MockResponse()
assert r.status == 200
def test_content_type_mimetyp(self):
r = MockResponse(status=200, _content_type=MimeType.JSON)
assert r.content_type == "application/json"
@pytest.mark.parametrize("arg", ["_text", "_json", "_yaml", "_bson"])
def test_mimetype_override(self, arg):
if arg == "_text":
data = "some data"
else:
data = {"key": "value"}
kwargs = {arg: data, "_content_type": "application/custom"}
r = MockResponse(**kwargs)
assert r.content_type == "application/custom"
assert getattr(r, arg) is not None
@pytest.mark.asyncio
async def test_json(self):
r = MockResponse(status=200, _json={"key": "value"})
assert await r.json() == {"key": "value"}
assert await r.text() == json.dumps({"key": "value"})
assert await r.read() == json.dumps({"key": "value"}).encode()
assert r.content_type == "application/json"
@pytest.mark.asyncio
async def test_text(self):
r = MockResponse(status=200, _text="test")
assert await r.text() == "test"
assert await r.read() == "test".encode()
assert r.content_type == "text/plain"
class TestStatusCodes:
@pytest.mark.asyncio
async def test_status_code_pass(self):
r = MockResponse(status=200)
await handle_response_aio(r)
@pytest.mark.asyncio
async def test_status_code_pass_non_default(self):
r = MockResponse(status=201)
await handle_response_aio(r, valid_status_codes=201)
@pytest.mark.asyncio
async def test_status_code_pass_tuple(self):
r = MockResponse(status=201)
await handle_response_aio(r, valid_status_codes=(200, 201))
@pytest.mark.asyncio
async def test_status_code_fail(self):
r = MockResponse(status=400)
with pytest.raises(StatusMismatchError):
try:
await handle_response_aio(r)
except StatusMismatchError as error:
assert error.response is r
raise error
@pytest.mark.asyncio
async def test_status_code_fail_non_default(self):
r = MockResponse(status=400)
with pytest.raises(StatusMismatchError):
try:
await handle_response_aio(r, valid_status_codes=201)
except StatusMismatchError as error:
assert error.response is r
raise error
@pytest.mark.asyncio
async def test_status_code_fail_tuple(self):
r = MockResponse(status=400)
with pytest.raises(StatusMismatchError):
try:
await handle_response_aio(r, valid_status_codes=(200, 201))
except StatusMismatchError as error:
assert error.response is r
raise error
class TestDataLoad:
@pytest.mark.asyncio
async def test_no_data_returned(self):
r = MockResponse(status=200)
r_info = await handle_response_aio(r)
assert r_info.loaded is None
assert r_info.decoded is None
@pytest.mark.asyncio
async def test_text(self):
r = MockResponse(status=200, _text="test text")
r_info = await handle_response_aio(r)
assert r_info.loaded == "test text"
assert r_info.decoded == "test text"
@pytest.mark.asyncio
async def test_json(self):
r = MockResponse(status=200, _json={"first": "Harry", "last": "Potter"})
r.headers["Content-Type"] = "application/json"
r_info = await handle_response_aio(r, data_schema=NameSchema())
assert r_info.loaded == Name("Harry", "Potter")
assert r_info.decoded == {"first": "Harry", "last": "Potter"}
@pytest.mark.asyncio
async def test_json_no_schema(self):
r = MockResponse(status=200, _json={"first": "Harry", "last": "Potter"})
r.headers["Content-Type"] = "application/json"
r_info = await handle_response_aio(r)
assert r_info.loaded == {"first": "Harry", "last": "Potter"}
assert r_info.decoded == {"first": "Harry", "last": "Potter"}
@pytest.mark.asyncio
async def test_bson(self):
r = MockResponse(
status=200, _content=BSON.encode({"first": "Harry", "last": "Potter"})
)
r.headers["Content-Type"] = "application/bson"
r_info = await handle_response_aio(r, data_schema=NameSchema())
assert r_info.loaded == Name("Harry", "Potter")
assert isinstance(r_info.decoded, RawBSONDocument)
assert dict(r_info.decoded) == {"first": "Harry", "last": "Potter"}
@pytest.mark.asyncio
async def test_bson_no_schema(self):
r = MockResponse(
status=200, _content=BSON.encode({"first": "Harry", "last": "Potter"})
)
r.headers["Content-Type"] = "application/bson"
r_info = await handle_response_aio(r)
assert dict(r_info.loaded) == {"first": "Harry", "last": "Potter"}
assert isinstance(r_info.decoded, RawBSONDocument)
assert dict(r_info.decoded) == {"first": "Harry", "last": "Potter"}
@pytest.mark.asyncio
async def test_unknown_no_content_type_header(self):
r = MockResponse(status=200)
with pytest.raises(ContentDecodeError):
try:
await handle_response_aio(r, data_schema=NameSchema())
except ContentDecodeError as error:
assert error.response is r
raise error
@pytest.mark.asyncio
async def test_unknown_content(self):
r = MockResponse(status=200, _content=b"some content")
r.headers["Content-Type"] = "application/unknown"
with pytest.raises(ContentTypeUnknownError):
try:
await handle_response_aio(r, data_schema=NameSchema())
except ContentTypeUnknownError as error:
assert error.response is r
raise error
class TestErrorHandling:
@pytest.mark.parametrize(
"error_type",
[
errors_api.APIError,
errors_api.InvalidMethodError,
errors_api.RequestValidationError,
errors_api.ResponseValidationError,
errors_api.NothingToReturnError,
],
)
@pytest.mark.asyncio
async def test_api_error(self, error_type: errors_api.APIError):
error_id = uuid.uuid4()
r = MockResponse(status=200)
r.headers["error-name"] = error_type.__name__
r.headers["error-code"] = str(error_type.api_code)
r.headers["error-data"] = json.dumps({"key": "value"})
r.headers["error-message"] = "some message"
r.headers["error-id"] = str(error_id)
try:
await handle_response_aio(r)
except BaseException as error:
assert isinstance(error, error_type)
assert error.id == error_id
assert error.error_data == {"key": "value"}
assert str(error) == "some message"
else:
raise AssertionError("error not raised")
class TestPaging:
@pytest.mark.asyncio
async def test_handle_normal(self):
methods = ["get", "get", "get"]
harry_json = {"first": "Harry", "last": "Potter"}
headers = {"paging-next": "/some/url", "Content-Type": "application/json"}
mock_response_1 = MockResponse(status=200, _json=[harry_json, harry_json])
mock_response_1.headers = headers
mock_response_2 = MockResponse(status=200, _json=[harry_json, harry_json])
mock_response_2.headers = headers
mock_response_3 = MockResponse(status=200, _json=[harry_json, harry_json])
mock_response_3.headers = {
"paging-next": None,
"Content-Type": "application/json",
}
responses = [mock_response_1, mock_response_2, mock_response_3]
mock_session = MockSession(method_list=methods, response_list=responses)
item_iter = aio_enumeerate(
iter_paged_aio(
session=mock_session,
url_base="/test/base",
limit=2,
data_schema=NameSchema(many=True),
),
start=1,
)
i = 0
async for i, r_info in item_iter:
print(r_info)
assert r_info.loaded == Name("Harry", "Potter")
assert r_info.decoded == {"first": "Harry", "last": "Potter"}
assert i == 6
@pytest.mark.asyncio
async def test_handle_nothing_to_return(self):
methods = ["get", "get", "get", "get", "get"]
harry_json = {"first": "Harry", "last": "Potter"}
headers = {"paging-next": "/some/url", "Content-Type": "application/json"}
mock_response_1 = MockResponse(status=200, _json=[harry_json, harry_json])
mock_response_1.headers = headers
mock_response_2 = MockResponse(status=200, _json=[harry_json, harry_json])
mock_response_2.headers = headers
mock_response_3 = MockResponse(status=200, _json=[harry_json, harry_json])
mock_response_3.headers = headers
mock_response_4 = MockResponse(status=400)
mock_response_4.headers = {
"paging-next": "/some/url",
"Content-Type": "application/json",
"error-code": errors_api.NothingToReturnError.api_code,
"error-name": errors_api.NothingToReturnError.__name__,
"error-message": "Some Message",
"error-id": str(uuid.uuid4()),
}
responses = [
mock_response_1,
mock_response_2,
mock_response_3,
mock_response_4,
None,
]
mock_session = MockSession(method_list=methods, response_list=responses)
item_iter = aio_enumeerate(
iter_paged_aio(
session=mock_session,
url_base="/test/base",
limit=2,
data_schema=NameSchema(many=True),
),
start=1,
)
i = 0
async for i, r_info in item_iter:
print(r_info)
assert r_info.loaded == Name("Harry", "Potter")
assert r_info.decoded == {"first": "Harry", "last": "Potter"}
assert i == 6
def validate_name_post(validator: RequestValidator, response: MockResponse):
TestSpanClient.VALIDATOR_TRIGGERED = True
assert validator.req_data_decoded == NameSchema().dump(
{"first": "Harry", "last": "Potter"}
)
class TestClientInit:
def test_default_host(self):
class APIClient(SpanClient):
DEFAULT_HOST_NAME = "SomeHost"
client = APIClient()
assert client.host_name == "SomeHost"
def test_no_host_raises(self):
class APIClient(SpanClient):
pass
with pytest.raises(ValueError):
_ = APIClient()
def test_default_port(self):
class APIClient(SpanClient):
DEFAULT_PORT = 8080
client = APIClient(host_name="SomeHost")
assert client.host_name == "SomeHost:8080"
class TestSpanClient:
UUID1 = uuid.uuid4()
@pytest.mark.asyncio
async def test_context_spawn_session(self):
class APIClient(SpanClient):
pass
client = APIClient(host_name="api-host")
assert client._session is None
async with client:
assert client._session is not None
@pytest.mark.asyncio
async def test_prop_spawn_session(self):
class APIClient(SpanClient):
pass
client = APIClient(host_name="api-host")
assert client._session is None
assert client.session is not None
assert client._session is not None
session = client.session
async with client:
assert client._session is session
assert client.session is session
@test_utils.mock_aiohttp(
method="GET",
resp=test_utils.MockResponse(
status=201, _json={"id": str(UUID1), "first": "Harry", "last": "Potter"}
),
req_validator=test_utils.RequestValidator(url=f"http://api-host/names/{UUID1}"),
)
@pytest.mark.asyncio
async def test_basic_client(self):
class APIClient(SpanClient):
@handles.get("/names/{name_id}", resp_codes=201, resp_schema=NameIDSchema())
async def name_fetch(
self, name_id: uuid.UUID, *, req: ClientRequest
) -> NameID:
req.path_params["name_id"] = name_id
client = APIClient(host_name="api-host")
name = await client.name_fetch(TestSpanClient.UUID1)
assert name.id == TestSpanClient.UUID1
assert name.first == "Harry"
assert name.last == "Potter"
@test_utils.mock_aiohttp(
method="GET",
resp=test_utils.MockResponse(),
req_validator=test_utils.RequestValidator(url=f"http://api-host/changed"),
)
@pytest.mark.asyncio
async def test_basic_client_endpoint_setting_alter(self):
class APIClient(SpanClient):
@handles.get("/original")
async def name_fetch(self, *, req: ClientRequest):
print("test")
assert req.endpoint_settings.endpoint == "/original"
req.endpoint_settings.endpoint = "/changed"
client = APIClient(host_name="api-host")
await client.name_fetch()
await client.name_fetch()
@test_utils.mock_aiohttp(
method="GET",
resp=(
test_utils.MockResponse(status=200, _text="response 1"),
test_utils.MockResponse(status=201, _text="response 2"),
),
)
@pytest.mark.asyncio
async def test_basic_client_double_response(self):
class APIClient(SpanClient):
@handles.get("/names", resp_codes=(200, 201))
async def name_fetch(self, *, req: ClientRequest) -> aiohttp.ClientResponse:
info = await req.execute()
return info.resp
client = APIClient(host_name="api-host")
resp1 = await client.name_fetch()
assert resp1.status == 200
assert await resp1.text() == "response 1"
resp2 = await client.name_fetch()
assert resp2.status == 201
assert await resp2.text() == "response 2"
@test_utils.mock_aiohttp(
method="GET",
resp=(
test_utils.MockResponse(status=200, _text="response 1"),
test_utils.MockResponse(status=201, _text="response 2"),
),
req_validator=(
test_utils.RequestValidator(url=f"http://api-host/names/1"),
test_utils.RequestValidator(url=f"http://api-host/names/2"),
),
)
@pytest.mark.asyncio
async def test_basic_client_double_validate(self):
class APIClient(SpanClient):
@handles.get("/names/{num}", resp_codes=(200, 201))
async def name_fetch(
self, num: int, *, req: ClientRequest
) -> aiohttp.ClientResponse:
req.path_params["num"] = num
info = await req.execute()
return info.resp
client = APIClient(host_name="api-host")
resp1 = await client.name_fetch(1)
assert resp1.status == 200
assert await resp1.text() == "response 1"
resp2 = await client.name_fetch(2)
assert resp2.status == 201
assert await resp2.text() == "response 2"
@test_utils.mock_aiohttp(
method="GET",
resp=(
test_utils.MockResponse(status=200, _text="response 1"),
test_utils.MockResponse(status=400, _text="response 2"),
),
)
@pytest.mark.asyncio
async def test_basic_client_double_response_bad_status(self):
class APIClient(SpanClient):
@handles.get("/names", resp_codes=(200, 201))
async def name_fetch(self, *, req: ClientRequest) -> aiohttp.ClientResponse:
info = await req.execute()
return info.resp
client = APIClient(host_name="api-host")
resp1 = await client.name_fetch()
assert resp1.status == 200
assert await resp1.text() == "response 1"
with pytest.raises(test_utils.StatusMismatchError):
await client.name_fetch()
@test_utils.mock_aiohttp(
method="GET",
resp=(
test_utils.MockResponse(status=200, _text="response 1"),
test_utils.MockResponse(status=201, _text="response 2"),
),
req_validator=(
test_utils.RequestValidator(url=f"http://api-host/names/1"),
test_utils.RequestValidator(url=f"http://api-host/names/2"),
),
)
@pytest.mark.asyncio
async def test_basic_client_double_validate_fail_second(self):
class APIClient(SpanClient):
@handles.get("/names/{num}", resp_codes=(200, 201))
async def name_fetch(
self, num: int, *, req: ClientRequest
) -> aiohttp.ClientResponse:
req.path_params["num"] = num
info = await req.execute()
return info.resp
client = APIClient(host_name="api-host")
resp1 = await client.name_fetch(1)
assert resp1.status == 200
assert await resp1.text() == "response 1"
with pytest.raises(test_utils.URLMismatchError):
await client.name_fetch(3)
VALIDATOR_TRIGGERED = False
@test_utils.mock_aiohttp(
method="POST",
resp=test_utils.MockResponse(
status=201, _json={"id": str(UUID1), "first": "Harry", "last": "Potter"}
),
req_validator=test_utils.RequestValidator(
url=f"http://api-host/names/{UUID1}",
params={"limit": 10, "offset": 0},
headers={"Accept": "application/json", "Content-Type": "application/json"},
media={"first": "Harry", "last": "Potter"},
custom_hook=validate_name_post,
),
)
@pytest.mark.asyncio
async def test_req_validation_client(self):
class APIClient(SpanClient):
@handles.post(
"/names/{name_id}",
req_schema=NameIDSchema(exclude=("id",)),
query_params={"limit": 10, "offset": 0},
headers={"Accept": "application/json"},
resp_codes=201,
resp_schema=NameIDSchema(),
)
async def name_fetch(
self, name_id: uuid.UUID, *, req: ClientRequest
) -> NameID:
req.media = Name("Harry", "Potter")
req.path_params["name_id"] = name_id
client = APIClient(host_name="api-host")
assert TestSpanClient.VALIDATOR_TRIGGERED is False
name = await client.name_fetch(TestSpanClient.UUID1)
assert name.id == TestSpanClient.UUID1
assert name.first == "Harry"
assert name.last == "Potter"
assert TestSpanClient.VALIDATOR_TRIGGERED is True
@test_utils.mock_aiohttp(
method="POST",
resp=test_utils.MockResponse(status=201),
req_validator=test_utils.RequestValidator(url=f"http://api-host/names/{UUID1}"),
)
@pytest.mark.asyncio
async def test_result_no_data(self):
class APIClient(SpanClient):
@handles.post("/names/{name_id}", resp_codes=201)
async def name_fetch(
self, name_id: uuid.UUID, *, req: ClientRequest
) -> aiohttp.ClientResponse:
req.path_params["name_id"] = name_id
client = APIClient(host_name="api-host")
resp = await client.name_fetch(TestSpanClient.UUID1)
assert resp.status == 201
@test_utils.mock_aiohttp(
method="POST",
resp=test_utils.MockResponse(status=201),
req_validator=test_utils.RequestValidator(url=f"http://api-host/names"),
)
@pytest.mark.asyncio
async def test_result_returned(self):
class APIClient(SpanClient):
@handles.post("/names", resp_codes=201)
async def name_fetch(
self, name_id: uuid.UUID, *, req: ClientRequest
) -> str:
await req.execute()
return "custom"
client = APIClient(host_name="api-host")
resp = await client.name_fetch(TestSpanClient.UUID1)
assert resp == "custom"
@test_utils.mock_aiohttp(
method="GET",
resp=test_utils.MockResponse(
status=200, _json={"id": str(UUID1), "first": "Hermione", "last": "Granger"}
),
req_validator=test_utils.RequestValidator(url=f"http://api-host/names/{UUID1}"),
)
@pytest.mark.asyncio
async def test_update_object(self):
class APIClient(SpanClient):
@handles.get(
"/names/{name_id}",
resp_codes=200,
resp_schema=NameIDSchema(load_dataclass=False),
)
async def name_fetch(self, name: NameID, *, req: ClientRequest) -> NameID:
req.path_params["name_id"] = name.id
req.update_obj = name
client = APIClient(host_name="api-host")
name = NameID(TestSpanClient.UUID1, "Harry", "Potter")
name_returned = await client.name_fetch(name)
assert name is name_returned
assert name.id == TestSpanClient.UUID1
assert name.first == "Hermione"
assert name.last == "Granger"
@test_utils.mock_aiohttp(
method="GET",
resp=test_utils.MockResponse(
status=200, _json={"id": str(UUID1), "first": "Hermione", "last": "Granger"}
),
req_validator=test_utils.RequestValidator(url=f"http://api-host/names/{UUID1}"),
)
@pytest.mark.asyncio
async def test_update_object_custom(self):
def custom_updater(current: Name, new: dict):
current.id = new["id"]
current.first = new["first"] + "-custom"
current.last = new["last"] + "-custom"
class APIClient(SpanClient):
@handles.get(
"/names/{name_id}",
resp_codes=200,
resp_schema=NameIDSchema(load_dataclass=False),
data_updater=custom_updater,
)
async def name_fetch(self, name: NameID, *, req: ClientRequest) -> NameID:
req.path_params["name_id"] = name.id
req.update_obj = name
client = APIClient(host_name="api-host")
name = NameID(TestSpanClient.UUID1, "Harry", "Potter")
name_returned = await client.name_fetch(name)
assert name is name_returned
assert name.id == TestSpanClient.UUID1
assert name.first == "Hermione-custom"
assert name.last == "Granger-custom"
@test_utils.mock_aiohttp(
method="POST",
resp=test_utils.MockResponse(status=200),
req_validator=test_utils.RequestValidator(
url=f"http://api-host/names", media="Harry Potter"
),
)
@pytest.mark.asyncio
async def test_send_implicit_text(self):
class APIClient(SpanClient):
@handles.post("/names", resp_codes=200)
async def name_fetch(self, *, req: ClientRequest) -> aiohttp.ClientResponse:
req.media = "<NAME>"
client = APIClient(host_name="api-host")
resp = await client.name_fetch()
assert resp.status == 200
@test_utils.mock_aiohttp(
method="POST",
resp=test_utils.MockResponse(status=200),
req_validator=test_utils.RequestValidator(
url=f"http://api-host/names", media=b"Some Bin Data"
),
)
@pytest.mark.asyncio
async def test_send_unknown_mimetype(self):
class APIClient(SpanClient):
@handles.post("/names", resp_codes=200)
async def name_fetch(self, *, req: ClientRequest) -> aiohttp.ClientResponse:
req.media = b"Some Bin Data"
req.mimetype_send = "application/unknown"
client = APIClient(host_name="api-host")
resp = await client.name_fetch()
assert resp.status == 200
@test_utils.mock_aiohttp(
method="POST",
resp=test_utils.MockResponse(status=200),
req_validator=test_utils.RequestValidator(
url=f"http://api-host/names", media=b"Some Bin Data"
),
)
@pytest.mark.asyncio
async def test_send_unknown_mimetype_error(self):
class APIClient(SpanClient):
@handles.post("/names", resp_codes=200)
async def name_fetch(self, *, req: ClientRequest) -> aiohttp.ClientResponse:
req.media = {"key": "value"}
req.mimetype_send = "application/unknown"
client = APIClient(host_name="api-host")
with pytest.raises(ContentTypeUnknownError):
await client.name_fetch()
@test_utils.mock_aiohttp(
method="POST",
req_validator=test_utils.RequestValidator(
url=f"http://api-host/names",
media={"first": "Harry", "last": "Potter"},
headers={"Content-Type": MimeType.YAML.value},
),
resp=test_utils.MockResponse(
status=200, _yaml={"first": "Ron", "last": "Weasley"}
),
)
@pytest.mark.asyncio
async def test_yaml_round_trip_schema(self):
class APIClient(SpanClient):
@handles.post(
"/names",
req_schema=NameSchema(),
resp_codes=200,
resp_schema=NameSchema(),
)
async def name_fetch(self, *, req: ClientRequest) -> aiohttp.ClientResponse:
req.media = Name("Harry", "Potter")
req.mimetype_send = MimeType.YAML
client = APIClient(host_name="api-host")
response = await client.name_fetch()
assert response == Name("Ron", "Weasley")
@test_utils.mock_aiohttp(
method="POST",
req_validator=test_utils.RequestValidator(
url=f"http://api-host/names",
media={"first": "Harry", "last": "Potter"},
headers={"Content-Type": MimeType.BSON.value},
),
resp=test_utils.MockResponse(
status=200, _bson={"first": "Ron", "last": "Weasley"}
),
)
@pytest.mark.asyncio
async def test_bson_round_trip_schema(self):
class APIClient(SpanClient):
@handles.post(
"/names",
req_schema=NameSchema(),
resp_codes=200,
resp_schema=NameSchema(),
)
async def name_fetch(self, *, req: ClientRequest) -> aiohttp.ClientResponse:
req.media = Name("Harry", "Potter")
req.mimetype_send = MimeType.BSON
client = APIClient(host_name="api-host")
response = await client.name_fetch()
assert response == Name("Ron", "Weasley")
@test_utils.mock_aiohttp(
method="POST",
req_validator=test_utils.RequestValidator(
url=f"http://api-host/names",
media=[
{"first": "Harry", "last": "Potter"},
{"first": "Draco", "last": "Malfoy"},
],
headers={"Content-Type": MimeType.BSON.value},
),
resp=test_utils.MockResponse(
status=200,
_bson=[
{"first": "Ron", "last": "Weasley"},
{"first": "Hermione", "last": "Granger"},
],
),
)
@pytest.mark.asyncio
async def test_bson_list_round_trip_schema(self):
class APIClient(SpanClient):
@handles.post(
"/names",
req_schema=NameSchema(many=True),
resp_codes=200,
resp_schema=NameSchema(many=True),
)
async def name_fetch(self, *, req: ClientRequest) -> aiohttp.ClientResponse:
req.media = [Name("Harry", "Potter"), Name("Draco", "Malfoy")]
req.mimetype_send = MimeType.BSON
client = APIClient(host_name="api-host")
response = await client.name_fetch()
assert response == [Name("Ron", "Weasley"), Name("Hermione", "Granger")]
@test_utils.mock_aiohttp(
method="POST",
resp=test_utils.MockResponse(
status=200,
_content=yaml.safe_dump({"first": "Ron", "last": "Weasley"}).encode(),
),
)
@pytest.mark.asyncio
async def test_sniff_return_content(self):
class APIClient(SpanClient):
@handles.post("/names", resp_codes=200, resp_schema=NameSchema())
async def name_fetch(self, *, req: ClientRequest) -> aiohttp.ClientResponse:
pass
client = APIClient(host_name="api-host")
response = await client.name_fetch()
assert response == Name("Ron", "Weasley")
@test_utils.mock_aiohttp(
method="POST",
resp=test_utils.MockResponse(status=200),
req_validator=test_utils.RequestValidator(
headers={"Accept": MimeType.YAML.value}
),
)
@pytest.mark.asyncio
async def test_accept_mimetype(self):
class APIClient(SpanClient):
@handles.post("/names", mimetype_accept=MimeType.YAML, resp_codes=200)
async def name_fetch(self, *, req: ClientRequest) -> aiohttp.ClientResponse:
pass
client = APIClient(host_name="api-host")
await client.name_fetch()
@test_utils.mock_aiohttp(
method="GET",
req_validator=[
# Validator 1
test_utils.RequestValidator(
headers={"Accept": MimeType.JSON.value},
params={"paging-offset": 0, "paging-limit": 2},
),
# Validator 2
test_utils.RequestValidator(
headers={"Accept": MimeType.JSON.value},
params={"paging-offset": 2, "paging-limit": 2},
),
],
resp=[
# Response 1
test_utils.MockResponse(
status=200,
headers={"paging-next": "some_page"},
_json=[
{"first": "Harry", "last": "Potter"},
{"first": "Ron", "last": "Weasley"},
],
),
# Response 2
test_utils.MockResponse(
status=200,
_json=[
{"first": "Hermione", "last": "Granger"},
{"first": "Draco", "last": "Malfoy"},
],
),
],
)
@pytest.mark.asyncio
async def test_paged(self):
class APIClient(SpanClient):
@handles.paged(limit=2)
@handles.get(
"/names",
mimetype_accept=MimeType.JSON,
resp_codes=200,
resp_schema=NameSchema(many=True),
)
async def name_fetch(
self, *, req: ClientRequest
) -> AsyncGenerator[Name, None]:
pass
client = APIClient(host_name="api-host")
names = list()
async for name in client.name_fetch():
print(name)
names.append(name)
assert names == [
Name("Harry", "Potter"),
Name("Ron", "Weasley"),
Name("Hermione", "Granger"),
Name("Draco", "Malfoy"),
]
@test_utils.mock_aiohttp(
method="GET",
req_validator=[
# Validator 1
test_utils.RequestValidator(
headers={"Accept": MimeType.BSON.value},
params={"paging-offset": 0, "paging-limit": 2},
)
],
resp=[
# Response 1
test_utils.MockResponse(
status=200, _bson=[{"first": "Harry", "last": "Potter"}]
)
],
)
@pytest.mark.asyncio
async def test_paged_single_bson(self):
class APIClient(SpanClient):
@handles.paged(limit=2)
@handles.get(
"/names",
mimetype_accept=MimeType.BSON,
resp_codes=200,
resp_schema=NameSchema(many=True),
)
async def name_fetch(
self, *, req: ClientRequest
) -> AsyncGenerator[Name, None]:
pass
client = APIClient(host_name="api-host")
names = list()
async for name in client.name_fetch():
print(name)
names.append(name)
assert names == [Name("Harry", "Potter")]
@test_utils.mock_aiohttp(
method="GET",
req_validator=[
# Validator 1
test_utils.RequestValidator(
headers={"Accept": MimeType.JSON.value},
params={"paging-offset": 0, "paging-limit": 2},
),
# Validator 2
test_utils.RequestValidator(
headers={"Accept": MimeType.JSON.value},
params={"paging-offset": 2, "paging-limit": 2},
),
],
resp=[
# Response 1
test_utils.MockResponse(
status=200,
headers={"paging-next": "some_page"},
_json=[
{"first": "Harry", "last": "Potter"},
{"first": "Ron", "last": "Weasley"},
],
),
# Response 2
test_utils.MockResponse(
status=200,
_json=[
{"first": "Hermione", "last": "Granger"},
{"first": "Draco", "last": "Malfoy"},
],
),
],
)
@pytest.mark.asyncio
async def test_paged_limit_override(self):
class APIClient(SpanClient):
@handles.paged(limit=50)
@handles.get(
"/names",
mimetype_accept=MimeType.JSON,
resp_codes=200,
resp_schema=NameSchema(many=True),
)
async def name_fetch(
self, limit: int, *, req: ClientRequest
) -> AsyncGenerator[Name, None]:
req.paging.limit = limit
client = APIClient(host_name="api-host")
names = list()
async for name in client.name_fetch(2):
print(name)
names.append(name)
assert names == [
Name("Harry", "Potter"),
Name("Ron", "Weasley"),
Name("Hermione", "Granger"),
Name("Draco", "Malfoy"),
]
@test_utils.mock_aiohttp(
method="GET",
req_validator=[
# Validator 1
test_utils.RequestValidator(
headers={"Accept": MimeType.JSON.value},
params={"paging-offset": 2, "paging-limit": 2},
),
# Validator 2
test_utils.RequestValidator(
headers={"Accept": MimeType.JSON.value},
params={"paging-offset": 4, "paging-limit": 2},
),
],
resp=[
# Response 1
test_utils.MockResponse(
status=200,
headers={"paging-next": "some_page"},
_json=[
{"first": "Harry", "last": "Potter"},
{"first": "Ron", "last": "Weasley"},
],
),
# Response 2
test_utils.MockResponse(
status=200,
_json=[
{"first": "Hermione", "last": "Granger"},
{"first": "Draco", "last": "Malfoy"},
],
),
],
)
@pytest.mark.asyncio
async def test_paged_offset_override(self):
class APIClient(SpanClient):
@handles.paged(limit=50)
@handles.get(
"/names",
mimetype_accept=MimeType.JSON,
resp_codes=200,
resp_schema=NameSchema(many=True),
)
async def name_fetch(
self, skip: int, *, req: ClientRequest
) -> AsyncGenerator[Name, None]:
req.paging.offset_start = skip
req.paging.limit = 2
client = APIClient(host_name="api-host")
names = list()
async for name in client.name_fetch(2):
print(name)
names.append(name)
assert names == [
Name("Harry", "Potter"),
Name("Ron", "Weasley"),
Name("Hermione", "Granger"),
Name("Draco", "Malfoy"),
]
@test_utils.mock_aiohttp(
method="GET",
req_validator=[
# Validator 1
test_utils.RequestValidator(
headers={"Accept": MimeType.JSON.value},
params={"paging-offset": 0, "paging-limit": 2},
),
# Validator 2
test_utils.RequestValidator(
headers={"Accept": MimeType.JSON.value},
params={"paging-offset": 2, "paging-limit": 2},
),
# Validator 3
test_utils.RequestValidator(
headers={"Accept": MimeType.JSON.value},
params={"paging-offset": 4, "paging-limit": 2},
),
],
resp=[
# Response 1
test_utils.MockResponse(
status=200,
headers={"paging-next": "some_page"},
_json=[
{"first": "Harry", "last": "Potter"},
{"first": "Ron", "last": "Weasley"},
],
),
# Response 2
test_utils.MockResponse(
status=200,
_json=[
{"first": "Hermione", "last": "Granger"},
{"first": "Draco", "last": "Malfoy"},
],
),
# Response 3
test_utils.MockResponse(
status=200,
_exception=errors_api.NothingToReturnError(
message="No Items to return", error_id=uuid.uuid4()
),
),
],
)
@pytest.mark.asyncio
async def test_paged_manual(self):
class APIClient(SpanClient):
@handles.paged(limit=2)
@handles.get(
"/names",
mimetype_accept=MimeType.JSON,
resp_codes=200,
resp_schema=NameSchema(many=True),
)
async def name_fetch(
self, *, req: ClientRequest
) -> AsyncGenerator[Name, None]:
result = await req.execute()
return result.loaded
client = APIClient(host_name="api-host")
names = list()
async for name in client.name_fetch():
names.append(name)
assert names == [
Name("Harry", "Potter"),
Name("Ron", "Weasley"),
Name("Hermione", "Granger"),
Name("Draco", "Malfoy"),
]
@test_utils.mock_aiohttp(
method="GET",
req_validator=[
# Validator 1
test_utils.RequestValidator(
headers={"Accept": MimeType.JSON.value},
params={"paging-offset": 0, "paging-limit": 2},
),
# Validator 2
test_utils.RequestValidator(
headers={"Accept": MimeType.JSON.value},
params={"paging-offset": 2, "paging-limit": 2},
),
],
resp=[
# Response 1
test_utils.MockResponse(status=200, headers={"paging-next": "some_page"}),
# Response 2
test_utils.MockResponse(status=200),
],
)
@pytest.mark.asyncio
async def test_paged_empty_body(self):
class APIClient(SpanClient):
@handles.paged(limit=2)
@handles.get("/names", mimetype_accept=MimeType.JSON, resp_codes=200)
async def name_fetch(
self, *, req: ClientRequest
) -> AsyncGenerator[Name, None]:
pass
client = APIClient(host_name="api-host")
responses = list()
async for resp in client.name_fetch():
assert isinstance(resp, test_utils.MockResponse)
responses.append(responses)
assert len(responses) == 2
@test_utils.mock_aiohttp(
method="GET",
resp=test_utils.MockResponse(status=201),
req_validator=test_utils.RequestValidator(url="http://api-host/names"),
)
@pytest.mark.asyncio
async def test_custom_hook_manipulate_resp(self, get_config: MockConfig = None):
class APIClient(SpanClient):
@handles.get("/names", resp_codes=201, resp_schema=NameSchema())
async def name_fetch(self, *, req: ClientRequest) -> NameID:
pass
def custom_hook(validator: RequestValidator, response: MockResponse):
response.mock_json({"first": "Harry", "last": "Potter"})
get_config.req_validator[0].custom_hook = custom_hook
client = APIClient(host_name="api-host")
name = await client.name_fetch()
assert name.first == "Harry"
assert name.last == "Potter"
@pytest.mark.asyncio
@test_utils.mock_aiohttp(
method="POST",
resp=MockResponse(201),
req_validator=RequestValidator(
content_type=MimeType.JSON, headers={"Content-Type": "application/json"}
),
)
async def test_mock_req_validate_data_type_w_schema(
self, post_config: MockConfig = None
):
test_name = Name("Harry", "Potter")
def post_hook(validator: RequestValidator, resp: MockResponse):
data = NameSchema().load(validator.req_data_decoded)
assert data == test_name
mock_data = NameSchema().dump(test_name)
resp.mock_json(mock_data)
post_config.req_validator[0].custom_hook = post_hook
class APIClient(SpanClient):
@handles.post(
"/names",
req_schema=NameSchema(),
resp_codes=201,
resp_schema=NameSchema(),
)
async def name_fetch(self, *, req: ClientRequest) -> NameID:
req.media = Name("Harry", "Potter")
client = APIClient(host_name="api-host")
name = await client.name_fetch()
print(name)
@pytest.mark.asyncio
@test_utils.mock_aiohttp(
method="GET",
resp=MockResponse(),
req_validator=RequestValidator(url="http://api-host:8080/name"),
)
async def test_custom_port_url(self):
class APIClient(SpanClient):
@handles.get("/name")
async def name_fetch(self, *, req: ClientRequest) -> None:
pass
client = APIClient(host_name="api-host", port=8080)
_ = await client.name_fetch()
@pytest.mark.asyncio
@test_utils.mock_aiohttp(
method="GET",
resp=MockResponse(),
req_validator=RequestValidator(url="http://api-host:8080/name"),
)
async def test_custom_default_port_url(self):
class APIClient(SpanClient):
DEFAULT_PORT = 8080
@handles.get("/name")
async def name_fetch(self, *, req: ClientRequest) -> None:
pass
client = APIClient(host_name="api-host")
_ = await client.name_fetch()
@test_utils.mock_aiohttp(
method="GET",
resp=test_utils.MockResponse(),
req_validator=test_utils.RequestValidator(
url=f"http://api-host/endpoint",
params={"project.field1": "1", "project.field2": "0"},
),
)
@pytest.mark.asyncio
async def test_projection(self):
class APIClient(SpanClient):
@handles.get("/endpoint")
async def name_fetch(self, *, req: ClientRequest):
req.projection["field1"] = 1
req.projection["field2"] = 0
client = APIClient(host_name="api-host")
await client.name_fetch()
@test_utils.mock_aiohttp(
method="GET",
resp=test_utils.MockResponse(),
req_validator=test_utils.RequestValidator(
url=f"http://api-host/endpoint",
params={"project.field1": "0", "project.field2": "1"},
),
)
@pytest.mark.asyncio
async def test_projection_from_user(self):
class APIClient(SpanClient):
@handles.get("/endpoint")
async def name_fetch(
self, projection: Dict[str, int], *, req: ClientRequest
):
req.projection = projection
client = APIClient(host_name="api-host")
user_projection = {"field1": 0, "field2": 1}
await client.name_fetch(user_projection)
@test_utils.mock_aiohttp(
method="GET",
resp=test_utils.MockResponse(),
req_validator=test_utils.RequestValidator(
url=f"http://api-host/endpoint",
params={"project.field1": "1", "project.field2": "0",},
),
)
@pytest.mark.asyncio
async def test_projection_validation_error(self):
class APIClient(SpanClient):
@handles.get("/endpoint")
async def name_fetch(self, *, req: ClientRequest):
req.projection["field1"] = 1
client = APIClient(host_name="api-host")
with pytest.raises(test_utils.ParamsMismatchError):
await client.name_fetch()
class TestClientReqValidationErrors:
UUID1 = uuid.uuid4()
@pytest.mark.asyncio
@test_utils.mock_aiohttp(
method="GET",
resp=MockResponse(200),
req_validator=test_utils.RequestValidator(url=f"http://api-host/names/{UUID1}"),
)
async def test_url_mismatch(self):
class APIClient(SpanClient):
@handles.get("/names/{name_id}", resp_codes=200)
async def name_fetch(self, *, req: ClientRequest) -> NameID:
req.path_params["name_id"] = "wrong_id"
client = APIClient(host_name="api-host")
with pytest.raises(test_utils.URLMismatchError):
await client.name_fetch()
@pytest.mark.asyncio
@test_utils.mock_aiohttp(
method="GET",
resp=MockResponse(200),
req_validator=test_utils.RequestValidator(
url=f"http://api-host/names", params={"offset": 0}
),
)
async def test_params_key_missing(self):
class APIClient(SpanClient):
@handles.get("/names", resp_codes=200)
async def name_fetch(self, *, req: ClientRequest) -> NameID:
pass
client = APIClient(host_name="api-host")
with pytest.raises(test_utils.ParamsMismatchError):
await client.name_fetch()
@pytest.mark.asyncio
@test_utils.mock_aiohttp(
method="GET",
resp=MockResponse(200),
req_validator=test_utils.RequestValidator(
url=f"http://api-host/names", params={"offset": 0}
),
)
async def test_params_value_wrong(self):
class APIClient(SpanClient):
@handles.get("/names", resp_codes=200)
async def name_fetch(self, *, req: ClientRequest) -> NameID:
req.query_params["offset"] = 10
client = APIClient(host_name="api-host")
with pytest.raises(test_utils.ParamsMismatchError):
await client.name_fetch()
@pytest.mark.asyncio
@test_utils.mock_aiohttp(
method="GET",
resp=MockResponse(200),
req_validator=test_utils.RequestValidator(
url=f"http://api-host/names", headers={"Accept": "application/bson"}
),
)
async def test_headers_key_missing(self):
class APIClient(SpanClient):
@handles.get("/names", resp_codes=200)
async def name_fetch(self, *, req: ClientRequest) -> NameID:
pass
client = APIClient(host_name="api-host")
with pytest.raises(test_utils.HeadersMismatchError):
await client.name_fetch()
@test_utils.mock_aiohttp(
method="GET",
resp=MockResponse(200),
req_validator=test_utils.RequestValidator(
url=f"http://api-host/names", headers={"Accept": "application/bson"}
),
)
@pytest.mark.asyncio
async def test_headers_value_wrong(self):
class APIClient(SpanClient):
@handles.get("/names", resp_codes=200)
async def name_fetch(self, *, req: ClientRequest) -> NameID:
req.headers["Accept"] = "application/json"
client = APIClient(host_name="api-host")
with pytest.raises(test_utils.HeadersMismatchError):
await client.name_fetch()
@test_utils.mock_aiohttp(
method="GET",
resp=MockResponse(200),
req_validator=test_utils.RequestValidator(
url=f"http://api-host/names", headers={"Accept": "application/bson"}
),
)
@pytest.mark.asyncio
async def test_media_value_wrong(self):
class APIClient(SpanClient):
@handles.get("/names", resp_codes=200)
async def name_fetch(self, *, req: ClientRequest) -> NameID:
req.headers["Accept"] = "application/json"
client = APIClient(host_name="api-host")
with pytest.raises(test_utils.HeadersMismatchError):
await client.name_fetch()
@test_utils.mock_aiohttp(
method="GET",
resp=MockResponse(200),
req_validator=test_utils.RequestValidator(
url=f"http://api-host/names", media={"first": "Harry", "last": "Potter"}
),
)
@pytest.mark.asyncio
async def test_media_value_wrong(self):
class APIClient(SpanClient):
@handles.get("/names", resp_codes=200)
async def name_fetch(self, *, req: ClientRequest) -> NameID:
req.media = {"first": "Harry", "last": "Granger"}
client = APIClient(host_name="api-host")
with pytest.raises(test_utils.DataValidationError):
await client.name_fetch()
@test_utils.mock_aiohttp(
method="GET",
resp=MockResponse(200),
req_validator=test_utils.RequestValidator(
url=f"http://api-host/names", custom_hook=validate_name_post
),
)
@pytest.mark.asyncio
async def test_media_custom_validation_failure(self):
class APIClient(SpanClient):
@handles.get("/names", resp_codes=200)
async def name_fetch(self, *, req: ClientRequest) -> NameID:
req.media = {"first": "Harry", "last": "Granger"}
client = APIClient(host_name="api-host")
with pytest.raises(AssertionError):
await client.name_fetch()
@test_utils.mock_aiohttp(
method="POST",
resp=MockResponse(200),
req_validator=test_utils.RequestValidator(
headers={"Content-Type": MimeType.YAML.value}
),
)
@pytest.mark.asyncio
async def test_media_type_validation_error(self):
class APIClient(SpanClient):
@handles.post("/names", mimetype_send=MimeType.YAML, resp_codes=200)
async def name_fetch(self, *, req: ClientRequest) -> NameID:
req.media = json.dumps({"first": "Harry", "last": "Granger"})
client = APIClient(host_name="api-host")
with pytest.raises(test_utils.DataTypeValidationError):
await client.name_fetch()
@pytest.mark.asyncio
@test_utils.mock_aiohttp(
method="GET",
resp=MockResponse(),
req_validator=test_utils.RequestValidator(url="http://api-host/names"),
)
async def test_mock_config_pass(self, *, get_config: MockConfig = None):
class APIClient(SpanClient):
@handles.get("/names", resp_codes=201)
async def name_fetch(self, *, req: ClientRequest) -> aiohttp.ClientResponse:
pass
client = APIClient(host_name="api-host")
get_config.resp[0].status = 201
r = await client.name_fetch()
assert r.status == 201
@pytest.mark.asyncio
@test_utils.mock_aiohttp(
method="GET",
resp=[MockResponse(200), MockResponse(201)],
req_validator=[
test_utils.RequestValidator(content_type=MimeType.JSON),
test_utils.RequestValidator(content_type=MimeType.JSON),
],
)
async def test_mock_config_pass_validator(self, *, get_config: MockConfig = None):
class APIClient(SpanClient):
@handles.get("/names", resp_codes=(200, 201))
async def name_fetch(
self, media, *, req: ClientRequest
) -> aiohttp.ClientResponse:
req.media = media
client = APIClient(host_name="api-host")
data1 = {"key": "value1"}
data2 = {"key": "value2"}
get_config.resp[0].mock_json(data1)
get_config.resp[1].mock_json(data2)
get_config.req_validator[0].media = data1
get_config.req_validator[1].media = data2
data_return = await client.name_fetch(data1)
assert data_return == data1
data_return = await client.name_fetch(data2)
assert data_return == data2
@pytest.mark.asyncio
@test_utils.mock_aiohttp(method="GET", resp=MockResponse(200))
@test_utils.mock_aiohttp(
method="POST",
resp=MockResponse(201),
req_validator=RequestValidator(media={"first": "harry", "last": "potter"}),
)
async def test_mock_config_multiple(
self, *, get_config: MockConfig = None, post_config: MockConfig = None
):
class APIClient(SpanClient):
@handles.get("/names", resp_codes=200)
async def name_fetch(self, *, req: ClientRequest) -> aiohttp.ClientResponse:
pass
@handles.post("/names", resp_codes=201)
async def name_create(
self, media, *, req: ClientRequest
) -> aiohttp.ClientResponse:
req.media = media
client = APIClient(host_name="api-host")
assert get_config.resp[0].status == 200
assert post_config.resp[0].status == 201
r = await client.name_fetch()
print(r)
assert r.status == 200
r = await client.name_create({"first": "harry", "last": "potter"})
print(r)
assert r.status == 201
await client.session.close()
@pytest.mark.asyncio
@test_utils.mock_aiohttp(
method="POST", resp=MockResponse(200), req_validator=RequestValidator()
)
async def test_custom_handler(self, post_config: MockConfig = None):
invoked_encode = dict(set=False)
invoked_decode = dict(set=False)
def csv_encode(data: List[dict]) -> bytes:
invoked_encode["set"] = True
encoded = io.StringIO()
headers = list(data[0].keys())
writer = csv.DictWriter(encoded, fieldnames=headers)
writer.writeheader()
writer.writerows(data)
return encoded.getvalue().encode()
def csv_decode(data: bytes) -> List[Dict[str, Any]]:
invoked_decode["set"] = True
csv_file = io.StringIO(data.decode())
reader = csv.DictReader(csv_file)
return [row for row in reader]
class APIClient(SpanClient):
register_mimetype("text/csv", encoder=csv_encode, decoder=csv_decode)
@handles.post("/csv", mimetype_send="text/csv")
async def csv_roundtrip(
self, csv_data: List[Dict[str, Any]], *, req: ClientRequest
) -> List[Dict[str, Any]]:
req.media = csv_data
data = [{"key": "value1"}, {"key": "value2"}]
post_config.req_validator[0].req_data = copy.copy(data)
post_config.resp[0].content_type = "text/csv"
post_config.resp[0].mock_content(csv_encode(data))
client = APIClient(host_name="api-host")
async with client:
resp_data = await client.csv_roundtrip(data)
assert data == resp_data
assert invoked_decode["set"] is True
assert invoked_encode["set"] is True
|
import pytest
import rapidjson as json
import uuid
import aiohttp
import yaml
import io
import csv
import copy
from aiostream.stream import enumerate as aio_enumeerate
from dataclasses import dataclass
from grahamcracker import DataSchema, schema_for
from bson import BSON
from bson.raw_bson import RawBSONDocument
from typing import AsyncGenerator, List, Optional, Callable, Dict, Any
from spantools import errors_api
from spanclient import (
handle_response_aio,
iter_paged_aio,
StatusMismatchError,
SpanClient,
ContentDecodeError,
handles,
ClientRequest,
ContentTypeUnknownError,
test_utils,
MimeType,
register_mimetype,
)
from spanclient.test_utils import MockResponse, MockConfig, RequestValidator
class MockSession:
def __init__(
self,
method_list: List[str],
response_list: List[MockResponse],
call_checks: Optional[List[Callable]] = None,
):
if call_checks is None:
call_checks = list()
self._response_list: List[MockResponse] = response_list
self._call_checks: List[Callable] = call_checks
self._method_list: List[str] = method_list
def __getattr__(self, item: str):
if not item.startswith("_"):
assert item == self._method_list.pop()
return self._route
else:
return super().__getattribute__(item)
def _route(
self,
url: str,
*,
headers: Optional[dict] = None,
json: Optional[dict] = None,
data: Optional[bytes] = None,
params: Optional[dict] = None,
):
return self._response_list.pop(0)
@dataclass
class Name:
first: str
last: str
@schema_for(Name)
class NameSchema(DataSchema[Name]):
pass
@dataclass
class NameID:
id: uuid.UUID
first: str
last: str
@schema_for(NameID)
class NameIDSchema(DataSchema[NameID]):
pass
class TestMockResponse:
def test_default_status(self):
r = MockResponse()
assert r.status == 200
def test_content_type_mimetyp(self):
r = MockResponse(status=200, _content_type=MimeType.JSON)
assert r.content_type == "application/json"
@pytest.mark.parametrize("arg", ["_text", "_json", "_yaml", "_bson"])
def test_mimetype_override(self, arg):
if arg == "_text":
data = "some data"
else:
data = {"key": "value"}
kwargs = {arg: data, "_content_type": "application/custom"}
r = MockResponse(**kwargs)
assert r.content_type == "application/custom"
assert getattr(r, arg) is not None
@pytest.mark.asyncio
async def test_json(self):
r = MockResponse(status=200, _json={"key": "value"})
assert await r.json() == {"key": "value"}
assert await r.text() == json.dumps({"key": "value"})
assert await r.read() == json.dumps({"key": "value"}).encode()
assert r.content_type == "application/json"
@pytest.mark.asyncio
async def test_text(self):
r = MockResponse(status=200, _text="test")
assert await r.text() == "test"
assert await r.read() == "test".encode()
assert r.content_type == "text/plain"
class TestStatusCodes:
@pytest.mark.asyncio
async def test_status_code_pass(self):
r = MockResponse(status=200)
await handle_response_aio(r)
@pytest.mark.asyncio
async def test_status_code_pass_non_default(self):
r = MockResponse(status=201)
await handle_response_aio(r, valid_status_codes=201)
@pytest.mark.asyncio
async def test_status_code_pass_tuple(self):
r = MockResponse(status=201)
await handle_response_aio(r, valid_status_codes=(200, 201))
@pytest.mark.asyncio
async def test_status_code_fail(self):
r = MockResponse(status=400)
with pytest.raises(StatusMismatchError):
try:
await handle_response_aio(r)
except StatusMismatchError as error:
assert error.response is r
raise error
@pytest.mark.asyncio
async def test_status_code_fail_non_default(self):
r = MockResponse(status=400)
with pytest.raises(StatusMismatchError):
try:
await handle_response_aio(r, valid_status_codes=201)
except StatusMismatchError as error:
assert error.response is r
raise error
@pytest.mark.asyncio
async def test_status_code_fail_tuple(self):
r = MockResponse(status=400)
with pytest.raises(StatusMismatchError):
try:
await handle_response_aio(r, valid_status_codes=(200, 201))
except StatusMismatchError as error:
assert error.response is r
raise error
class TestDataLoad:
@pytest.mark.asyncio
async def test_no_data_returned(self):
r = MockResponse(status=200)
r_info = await handle_response_aio(r)
assert r_info.loaded is None
assert r_info.decoded is None
@pytest.mark.asyncio
async def test_text(self):
r = MockResponse(status=200, _text="test text")
r_info = await handle_response_aio(r)
assert r_info.loaded == "test text"
assert r_info.decoded == "test text"
@pytest.mark.asyncio
async def test_json(self):
r = MockResponse(status=200, _json={"first": "Harry", "last": "Potter"})
r.headers["Content-Type"] = "application/json"
r_info = await handle_response_aio(r, data_schema=NameSchema())
assert r_info.loaded == Name("Harry", "Potter")
assert r_info.decoded == {"first": "Harry", "last": "Potter"}
@pytest.mark.asyncio
async def test_json_no_schema(self):
r = MockResponse(status=200, _json={"first": "Harry", "last": "Potter"})
r.headers["Content-Type"] = "application/json"
r_info = await handle_response_aio(r)
assert r_info.loaded == {"first": "Harry", "last": "Potter"}
assert r_info.decoded == {"first": "Harry", "last": "Potter"}
@pytest.mark.asyncio
async def test_bson(self):
r = MockResponse(
status=200, _content=BSON.encode({"first": "Harry", "last": "Potter"})
)
r.headers["Content-Type"] = "application/bson"
r_info = await handle_response_aio(r, data_schema=NameSchema())
assert r_info.loaded == Name("Harry", "Potter")
assert isinstance(r_info.decoded, RawBSONDocument)
assert dict(r_info.decoded) == {"first": "Harry", "last": "Potter"}
@pytest.mark.asyncio
async def test_bson_no_schema(self):
r = MockResponse(
status=200, _content=BSON.encode({"first": "Harry", "last": "Potter"})
)
r.headers["Content-Type"] = "application/bson"
r_info = await handle_response_aio(r)
assert dict(r_info.loaded) == {"first": "Harry", "last": "Potter"}
assert isinstance(r_info.decoded, RawBSONDocument)
assert dict(r_info.decoded) == {"first": "Harry", "last": "Potter"}
@pytest.mark.asyncio
async def test_unknown_no_content_type_header(self):
r = MockResponse(status=200)
with pytest.raises(ContentDecodeError):
try:
await handle_response_aio(r, data_schema=NameSchema())
except ContentDecodeError as error:
assert error.response is r
raise error
@pytest.mark.asyncio
async def test_unknown_content(self):
r = MockResponse(status=200, _content=b"some content")
r.headers["Content-Type"] = "application/unknown"
with pytest.raises(ContentTypeUnknownError):
try:
await handle_response_aio(r, data_schema=NameSchema())
except ContentTypeUnknownError as error:
assert error.response is r
raise error
class TestErrorHandling:
@pytest.mark.parametrize(
"error_type",
[
errors_api.APIError,
errors_api.InvalidMethodError,
errors_api.RequestValidationError,
errors_api.ResponseValidationError,
errors_api.NothingToReturnError,
],
)
@pytest.mark.asyncio
async def test_api_error(self, error_type: errors_api.APIError):
error_id = uuid.uuid4()
r = MockResponse(status=200)
r.headers["error-name"] = error_type.__name__
r.headers["error-code"] = str(error_type.api_code)
r.headers["error-data"] = json.dumps({"key": "value"})
r.headers["error-message"] = "some message"
r.headers["error-id"] = str(error_id)
try:
await handle_response_aio(r)
except BaseException as error:
assert isinstance(error, error_type)
assert error.id == error_id
assert error.error_data == {"key": "value"}
assert str(error) == "some message"
else:
raise AssertionError("error not raised")
class TestPaging:
@pytest.mark.asyncio
async def test_handle_normal(self):
methods = ["get", "get", "get"]
harry_json = {"first": "Harry", "last": "Potter"}
headers = {"paging-next": "/some/url", "Content-Type": "application/json"}
mock_response_1 = MockResponse(status=200, _json=[harry_json, harry_json])
mock_response_1.headers = headers
mock_response_2 = MockResponse(status=200, _json=[harry_json, harry_json])
mock_response_2.headers = headers
mock_response_3 = MockResponse(status=200, _json=[harry_json, harry_json])
mock_response_3.headers = {
"paging-next": None,
"Content-Type": "application/json",
}
responses = [mock_response_1, mock_response_2, mock_response_3]
mock_session = MockSession(method_list=methods, response_list=responses)
item_iter = aio_enumeerate(
iter_paged_aio(
session=mock_session,
url_base="/test/base",
limit=2,
data_schema=NameSchema(many=True),
),
start=1,
)
i = 0
async for i, r_info in item_iter:
print(r_info)
assert r_info.loaded == Name("Harry", "Potter")
assert r_info.decoded == {"first": "Harry", "last": "Potter"}
assert i == 6
@pytest.mark.asyncio
async def test_handle_nothing_to_return(self):
methods = ["get", "get", "get", "get", "get"]
harry_json = {"first": "Harry", "last": "Potter"}
headers = {"paging-next": "/some/url", "Content-Type": "application/json"}
mock_response_1 = MockResponse(status=200, _json=[harry_json, harry_json])
mock_response_1.headers = headers
mock_response_2 = MockResponse(status=200, _json=[harry_json, harry_json])
mock_response_2.headers = headers
mock_response_3 = MockResponse(status=200, _json=[harry_json, harry_json])
mock_response_3.headers = headers
mock_response_4 = MockResponse(status=400)
mock_response_4.headers = {
"paging-next": "/some/url",
"Content-Type": "application/json",
"error-code": errors_api.NothingToReturnError.api_code,
"error-name": errors_api.NothingToReturnError.__name__,
"error-message": "Some Message",
"error-id": str(uuid.uuid4()),
}
responses = [
mock_response_1,
mock_response_2,
mock_response_3,
mock_response_4,
None,
]
mock_session = MockSession(method_list=methods, response_list=responses)
item_iter = aio_enumeerate(
iter_paged_aio(
session=mock_session,
url_base="/test/base",
limit=2,
data_schema=NameSchema(many=True),
),
start=1,
)
i = 0
async for i, r_info in item_iter:
print(r_info)
assert r_info.loaded == Name("Harry", "Potter")
assert r_info.decoded == {"first": "Harry", "last": "Potter"}
assert i == 6
def validate_name_post(validator: RequestValidator, response: MockResponse):
TestSpanClient.VALIDATOR_TRIGGERED = True
assert validator.req_data_decoded == NameSchema().dump(
{"first": "Harry", "last": "Potter"}
)
class TestClientInit:
def test_default_host(self):
class APIClient(SpanClient):
DEFAULT_HOST_NAME = "SomeHost"
client = APIClient()
assert client.host_name == "SomeHost"
def test_no_host_raises(self):
class APIClient(SpanClient):
pass
with pytest.raises(ValueError):
_ = APIClient()
def test_default_port(self):
class APIClient(SpanClient):
DEFAULT_PORT = 8080
client = APIClient(host_name="SomeHost")
assert client.host_name == "SomeHost:8080"
class TestSpanClient:
UUID1 = uuid.uuid4()
@pytest.mark.asyncio
async def test_context_spawn_session(self):
class APIClient(SpanClient):
pass
client = APIClient(host_name="api-host")
assert client._session is None
async with client:
assert client._session is not None
@pytest.mark.asyncio
async def test_prop_spawn_session(self):
class APIClient(SpanClient):
pass
client = APIClient(host_name="api-host")
assert client._session is None
assert client.session is not None
assert client._session is not None
session = client.session
async with client:
assert client._session is session
assert client.session is session
@test_utils.mock_aiohttp(
method="GET",
resp=test_utils.MockResponse(
status=201, _json={"id": str(UUID1), "first": "Harry", "last": "Potter"}
),
req_validator=test_utils.RequestValidator(url=f"http://api-host/names/{UUID1}"),
)
@pytest.mark.asyncio
async def test_basic_client(self):
class APIClient(SpanClient):
@handles.get("/names/{name_id}", resp_codes=201, resp_schema=NameIDSchema())
async def name_fetch(
self, name_id: uuid.UUID, *, req: ClientRequest
) -> NameID:
req.path_params["name_id"] = name_id
client = APIClient(host_name="api-host")
name = await client.name_fetch(TestSpanClient.UUID1)
assert name.id == TestSpanClient.UUID1
assert name.first == "Harry"
assert name.last == "Potter"
@test_utils.mock_aiohttp(
method="GET",
resp=test_utils.MockResponse(),
req_validator=test_utils.RequestValidator(url=f"http://api-host/changed"),
)
@pytest.mark.asyncio
async def test_basic_client_endpoint_setting_alter(self):
class APIClient(SpanClient):
@handles.get("/original")
async def name_fetch(self, *, req: ClientRequest):
print("test")
assert req.endpoint_settings.endpoint == "/original"
req.endpoint_settings.endpoint = "/changed"
client = APIClient(host_name="api-host")
await client.name_fetch()
await client.name_fetch()
@test_utils.mock_aiohttp(
method="GET",
resp=(
test_utils.MockResponse(status=200, _text="response 1"),
test_utils.MockResponse(status=201, _text="response 2"),
),
)
@pytest.mark.asyncio
async def test_basic_client_double_response(self):
class APIClient(SpanClient):
@handles.get("/names", resp_codes=(200, 201))
async def name_fetch(self, *, req: ClientRequest) -> aiohttp.ClientResponse:
info = await req.execute()
return info.resp
client = APIClient(host_name="api-host")
resp1 = await client.name_fetch()
assert resp1.status == 200
assert await resp1.text() == "response 1"
resp2 = await client.name_fetch()
assert resp2.status == 201
assert await resp2.text() == "response 2"
@test_utils.mock_aiohttp(
method="GET",
resp=(
test_utils.MockResponse(status=200, _text="response 1"),
test_utils.MockResponse(status=201, _text="response 2"),
),
req_validator=(
test_utils.RequestValidator(url=f"http://api-host/names/1"),
test_utils.RequestValidator(url=f"http://api-host/names/2"),
),
)
@pytest.mark.asyncio
async def test_basic_client_double_validate(self):
class APIClient(SpanClient):
@handles.get("/names/{num}", resp_codes=(200, 201))
async def name_fetch(
self, num: int, *, req: ClientRequest
) -> aiohttp.ClientResponse:
req.path_params["num"] = num
info = await req.execute()
return info.resp
client = APIClient(host_name="api-host")
resp1 = await client.name_fetch(1)
assert resp1.status == 200
assert await resp1.text() == "response 1"
resp2 = await client.name_fetch(2)
assert resp2.status == 201
assert await resp2.text() == "response 2"
@test_utils.mock_aiohttp(
method="GET",
resp=(
test_utils.MockResponse(status=200, _text="response 1"),
test_utils.MockResponse(status=400, _text="response 2"),
),
)
@pytest.mark.asyncio
async def test_basic_client_double_response_bad_status(self):
class APIClient(SpanClient):
@handles.get("/names", resp_codes=(200, 201))
async def name_fetch(self, *, req: ClientRequest) -> aiohttp.ClientResponse:
info = await req.execute()
return info.resp
client = APIClient(host_name="api-host")
resp1 = await client.name_fetch()
assert resp1.status == 200
assert await resp1.text() == "response 1"
with pytest.raises(test_utils.StatusMismatchError):
await client.name_fetch()
@test_utils.mock_aiohttp(
method="GET",
resp=(
test_utils.MockResponse(status=200, _text="response 1"),
test_utils.MockResponse(status=201, _text="response 2"),
),
req_validator=(
test_utils.RequestValidator(url=f"http://api-host/names/1"),
test_utils.RequestValidator(url=f"http://api-host/names/2"),
),
)
@pytest.mark.asyncio
async def test_basic_client_double_validate_fail_second(self):
class APIClient(SpanClient):
@handles.get("/names/{num}", resp_codes=(200, 201))
async def name_fetch(
self, num: int, *, req: ClientRequest
) -> aiohttp.ClientResponse:
req.path_params["num"] = num
info = await req.execute()
return info.resp
client = APIClient(host_name="api-host")
resp1 = await client.name_fetch(1)
assert resp1.status == 200
assert await resp1.text() == "response 1"
with pytest.raises(test_utils.URLMismatchError):
await client.name_fetch(3)
VALIDATOR_TRIGGERED = False
@test_utils.mock_aiohttp(
method="POST",
resp=test_utils.MockResponse(
status=201, _json={"id": str(UUID1), "first": "Harry", "last": "Potter"}
),
req_validator=test_utils.RequestValidator(
url=f"http://api-host/names/{UUID1}",
params={"limit": 10, "offset": 0},
headers={"Accept": "application/json", "Content-Type": "application/json"},
media={"first": "Harry", "last": "Potter"},
custom_hook=validate_name_post,
),
)
@pytest.mark.asyncio
async def test_req_validation_client(self):
class APIClient(SpanClient):
@handles.post(
"/names/{name_id}",
req_schema=NameIDSchema(exclude=("id",)),
query_params={"limit": 10, "offset": 0},
headers={"Accept": "application/json"},
resp_codes=201,
resp_schema=NameIDSchema(),
)
async def name_fetch(
self, name_id: uuid.UUID, *, req: ClientRequest
) -> NameID:
req.media = Name("Harry", "Potter")
req.path_params["name_id"] = name_id
client = APIClient(host_name="api-host")
assert TestSpanClient.VALIDATOR_TRIGGERED is False
name = await client.name_fetch(TestSpanClient.UUID1)
assert name.id == TestSpanClient.UUID1
assert name.first == "Harry"
assert name.last == "Potter"
assert TestSpanClient.VALIDATOR_TRIGGERED is True
@test_utils.mock_aiohttp(
method="POST",
resp=test_utils.MockResponse(status=201),
req_validator=test_utils.RequestValidator(url=f"http://api-host/names/{UUID1}"),
)
@pytest.mark.asyncio
async def test_result_no_data(self):
class APIClient(SpanClient):
@handles.post("/names/{name_id}", resp_codes=201)
async def name_fetch(
self, name_id: uuid.UUID, *, req: ClientRequest
) -> aiohttp.ClientResponse:
req.path_params["name_id"] = name_id
client = APIClient(host_name="api-host")
resp = await client.name_fetch(TestSpanClient.UUID1)
assert resp.status == 201
@test_utils.mock_aiohttp(
method="POST",
resp=test_utils.MockResponse(status=201),
req_validator=test_utils.RequestValidator(url=f"http://api-host/names"),
)
@pytest.mark.asyncio
async def test_result_returned(self):
class APIClient(SpanClient):
@handles.post("/names", resp_codes=201)
async def name_fetch(
self, name_id: uuid.UUID, *, req: ClientRequest
) -> str:
await req.execute()
return "custom"
client = APIClient(host_name="api-host")
resp = await client.name_fetch(TestSpanClient.UUID1)
assert resp == "custom"
@test_utils.mock_aiohttp(
method="GET",
resp=test_utils.MockResponse(
status=200, _json={"id": str(UUID1), "first": "Hermione", "last": "Granger"}
),
req_validator=test_utils.RequestValidator(url=f"http://api-host/names/{UUID1}"),
)
@pytest.mark.asyncio
async def test_update_object(self):
class APIClient(SpanClient):
@handles.get(
"/names/{name_id}",
resp_codes=200,
resp_schema=NameIDSchema(load_dataclass=False),
)
async def name_fetch(self, name: NameID, *, req: ClientRequest) -> NameID:
req.path_params["name_id"] = name.id
req.update_obj = name
client = APIClient(host_name="api-host")
name = NameID(TestSpanClient.UUID1, "Harry", "Potter")
name_returned = await client.name_fetch(name)
assert name is name_returned
assert name.id == TestSpanClient.UUID1
assert name.first == "Hermione"
assert name.last == "Granger"
@test_utils.mock_aiohttp(
method="GET",
resp=test_utils.MockResponse(
status=200, _json={"id": str(UUID1), "first": "Hermione", "last": "Granger"}
),
req_validator=test_utils.RequestValidator(url=f"http://api-host/names/{UUID1}"),
)
@pytest.mark.asyncio
async def test_update_object_custom(self):
def custom_updater(current: Name, new: dict):
current.id = new["id"]
current.first = new["first"] + "-custom"
current.last = new["last"] + "-custom"
class APIClient(SpanClient):
@handles.get(
"/names/{name_id}",
resp_codes=200,
resp_schema=NameIDSchema(load_dataclass=False),
data_updater=custom_updater,
)
async def name_fetch(self, name: NameID, *, req: ClientRequest) -> NameID:
req.path_params["name_id"] = name.id
req.update_obj = name
client = APIClient(host_name="api-host")
name = NameID(TestSpanClient.UUID1, "Harry", "Potter")
name_returned = await client.name_fetch(name)
assert name is name_returned
assert name.id == TestSpanClient.UUID1
assert name.first == "Hermione-custom"
assert name.last == "Granger-custom"
@test_utils.mock_aiohttp(
method="POST",
resp=test_utils.MockResponse(status=200),
req_validator=test_utils.RequestValidator(
url=f"http://api-host/names", media="Harry Potter"
),
)
@pytest.mark.asyncio
async def test_send_implicit_text(self):
class APIClient(SpanClient):
@handles.post("/names", resp_codes=200)
async def name_fetch(self, *, req: ClientRequest) -> aiohttp.ClientResponse:
req.media = "<NAME>"
client = APIClient(host_name="api-host")
resp = await client.name_fetch()
assert resp.status == 200
@test_utils.mock_aiohttp(
method="POST",
resp=test_utils.MockResponse(status=200),
req_validator=test_utils.RequestValidator(
url=f"http://api-host/names", media=b"Some Bin Data"
),
)
@pytest.mark.asyncio
async def test_send_unknown_mimetype(self):
class APIClient(SpanClient):
@handles.post("/names", resp_codes=200)
async def name_fetch(self, *, req: ClientRequest) -> aiohttp.ClientResponse:
req.media = b"Some Bin Data"
req.mimetype_send = "application/unknown"
client = APIClient(host_name="api-host")
resp = await client.name_fetch()
assert resp.status == 200
@test_utils.mock_aiohttp(
method="POST",
resp=test_utils.MockResponse(status=200),
req_validator=test_utils.RequestValidator(
url=f"http://api-host/names", media=b"Some Bin Data"
),
)
@pytest.mark.asyncio
async def test_send_unknown_mimetype_error(self):
class APIClient(SpanClient):
@handles.post("/names", resp_codes=200)
async def name_fetch(self, *, req: ClientRequest) -> aiohttp.ClientResponse:
req.media = {"key": "value"}
req.mimetype_send = "application/unknown"
client = APIClient(host_name="api-host")
with pytest.raises(ContentTypeUnknownError):
await client.name_fetch()
@test_utils.mock_aiohttp(
method="POST",
req_validator=test_utils.RequestValidator(
url=f"http://api-host/names",
media={"first": "Harry", "last": "Potter"},
headers={"Content-Type": MimeType.YAML.value},
),
resp=test_utils.MockResponse(
status=200, _yaml={"first": "Ron", "last": "Weasley"}
),
)
@pytest.mark.asyncio
async def test_yaml_round_trip_schema(self):
class APIClient(SpanClient):
@handles.post(
"/names",
req_schema=NameSchema(),
resp_codes=200,
resp_schema=NameSchema(),
)
async def name_fetch(self, *, req: ClientRequest) -> aiohttp.ClientResponse:
req.media = Name("Harry", "Potter")
req.mimetype_send = MimeType.YAML
client = APIClient(host_name="api-host")
response = await client.name_fetch()
assert response == Name("Ron", "Weasley")
@test_utils.mock_aiohttp(
method="POST",
req_validator=test_utils.RequestValidator(
url=f"http://api-host/names",
media={"first": "Harry", "last": "Potter"},
headers={"Content-Type": MimeType.BSON.value},
),
resp=test_utils.MockResponse(
status=200, _bson={"first": "Ron", "last": "Weasley"}
),
)
@pytest.mark.asyncio
async def test_bson_round_trip_schema(self):
class APIClient(SpanClient):
@handles.post(
"/names",
req_schema=NameSchema(),
resp_codes=200,
resp_schema=NameSchema(),
)
async def name_fetch(self, *, req: ClientRequest) -> aiohttp.ClientResponse:
req.media = Name("Harry", "Potter")
req.mimetype_send = MimeType.BSON
client = APIClient(host_name="api-host")
response = await client.name_fetch()
assert response == Name("Ron", "Weasley")
@test_utils.mock_aiohttp(
method="POST",
req_validator=test_utils.RequestValidator(
url=f"http://api-host/names",
media=[
{"first": "Harry", "last": "Potter"},
{"first": "Draco", "last": "Malfoy"},
],
headers={"Content-Type": MimeType.BSON.value},
),
resp=test_utils.MockResponse(
status=200,
_bson=[
{"first": "Ron", "last": "Weasley"},
{"first": "Hermione", "last": "Granger"},
],
),
)
@pytest.mark.asyncio
async def test_bson_list_round_trip_schema(self):
class APIClient(SpanClient):
@handles.post(
"/names",
req_schema=NameSchema(many=True),
resp_codes=200,
resp_schema=NameSchema(many=True),
)
async def name_fetch(self, *, req: ClientRequest) -> aiohttp.ClientResponse:
req.media = [Name("Harry", "Potter"), Name("Draco", "Malfoy")]
req.mimetype_send = MimeType.BSON
client = APIClient(host_name="api-host")
response = await client.name_fetch()
assert response == [Name("Ron", "Weasley"), Name("Hermione", "Granger")]
@test_utils.mock_aiohttp(
method="POST",
resp=test_utils.MockResponse(
status=200,
_content=yaml.safe_dump({"first": "Ron", "last": "Weasley"}).encode(),
),
)
@pytest.mark.asyncio
async def test_sniff_return_content(self):
class APIClient(SpanClient):
@handles.post("/names", resp_codes=200, resp_schema=NameSchema())
async def name_fetch(self, *, req: ClientRequest) -> aiohttp.ClientResponse:
pass
client = APIClient(host_name="api-host")
response = await client.name_fetch()
assert response == Name("Ron", "Weasley")
@test_utils.mock_aiohttp(
method="POST",
resp=test_utils.MockResponse(status=200),
req_validator=test_utils.RequestValidator(
headers={"Accept": MimeType.YAML.value}
),
)
@pytest.mark.asyncio
async def test_accept_mimetype(self):
class APIClient(SpanClient):
@handles.post("/names", mimetype_accept=MimeType.YAML, resp_codes=200)
async def name_fetch(self, *, req: ClientRequest) -> aiohttp.ClientResponse:
pass
client = APIClient(host_name="api-host")
await client.name_fetch()
@test_utils.mock_aiohttp(
method="GET",
req_validator=[
# Validator 1
test_utils.RequestValidator(
headers={"Accept": MimeType.JSON.value},
params={"paging-offset": 0, "paging-limit": 2},
),
# Validator 2
test_utils.RequestValidator(
headers={"Accept": MimeType.JSON.value},
params={"paging-offset": 2, "paging-limit": 2},
),
],
resp=[
# Response 1
test_utils.MockResponse(
status=200,
headers={"paging-next": "some_page"},
_json=[
{"first": "Harry", "last": "Potter"},
{"first": "Ron", "last": "Weasley"},
],
),
# Response 2
test_utils.MockResponse(
status=200,
_json=[
{"first": "Hermione", "last": "Granger"},
{"first": "Draco", "last": "Malfoy"},
],
),
],
)
@pytest.mark.asyncio
async def test_paged(self):
class APIClient(SpanClient):
@handles.paged(limit=2)
@handles.get(
"/names",
mimetype_accept=MimeType.JSON,
resp_codes=200,
resp_schema=NameSchema(many=True),
)
async def name_fetch(
self, *, req: ClientRequest
) -> AsyncGenerator[Name, None]:
pass
client = APIClient(host_name="api-host")
names = list()
async for name in client.name_fetch():
print(name)
names.append(name)
assert names == [
Name("Harry", "Potter"),
Name("Ron", "Weasley"),
Name("Hermione", "Granger"),
Name("Draco", "Malfoy"),
]
@test_utils.mock_aiohttp(
method="GET",
req_validator=[
# Validator 1
test_utils.RequestValidator(
headers={"Accept": MimeType.BSON.value},
params={"paging-offset": 0, "paging-limit": 2},
)
],
resp=[
# Response 1
test_utils.MockResponse(
status=200, _bson=[{"first": "Harry", "last": "Potter"}]
)
],
)
@pytest.mark.asyncio
async def test_paged_single_bson(self):
class APIClient(SpanClient):
@handles.paged(limit=2)
@handles.get(
"/names",
mimetype_accept=MimeType.BSON,
resp_codes=200,
resp_schema=NameSchema(many=True),
)
async def name_fetch(
self, *, req: ClientRequest
) -> AsyncGenerator[Name, None]:
pass
client = APIClient(host_name="api-host")
names = list()
async for name in client.name_fetch():
print(name)
names.append(name)
assert names == [Name("Harry", "Potter")]
@test_utils.mock_aiohttp(
method="GET",
req_validator=[
# Validator 1
test_utils.RequestValidator(
headers={"Accept": MimeType.JSON.value},
params={"paging-offset": 0, "paging-limit": 2},
),
# Validator 2
test_utils.RequestValidator(
headers={"Accept": MimeType.JSON.value},
params={"paging-offset": 2, "paging-limit": 2},
),
],
resp=[
# Response 1
test_utils.MockResponse(
status=200,
headers={"paging-next": "some_page"},
_json=[
{"first": "Harry", "last": "Potter"},
{"first": "Ron", "last": "Weasley"},
],
),
# Response 2
test_utils.MockResponse(
status=200,
_json=[
{"first": "Hermione", "last": "Granger"},
{"first": "Draco", "last": "Malfoy"},
],
),
],
)
@pytest.mark.asyncio
async def test_paged_limit_override(self):
class APIClient(SpanClient):
@handles.paged(limit=50)
@handles.get(
"/names",
mimetype_accept=MimeType.JSON,
resp_codes=200,
resp_schema=NameSchema(many=True),
)
async def name_fetch(
self, limit: int, *, req: ClientRequest
) -> AsyncGenerator[Name, None]:
req.paging.limit = limit
client = APIClient(host_name="api-host")
names = list()
async for name in client.name_fetch(2):
print(name)
names.append(name)
assert names == [
Name("Harry", "Potter"),
Name("Ron", "Weasley"),
Name("Hermione", "Granger"),
Name("Draco", "Malfoy"),
]
@test_utils.mock_aiohttp(
method="GET",
req_validator=[
# Validator 1
test_utils.RequestValidator(
headers={"Accept": MimeType.JSON.value},
params={"paging-offset": 2, "paging-limit": 2},
),
# Validator 2
test_utils.RequestValidator(
headers={"Accept": MimeType.JSON.value},
params={"paging-offset": 4, "paging-limit": 2},
),
],
resp=[
# Response 1
test_utils.MockResponse(
status=200,
headers={"paging-next": "some_page"},
_json=[
{"first": "Harry", "last": "Potter"},
{"first": "Ron", "last": "Weasley"},
],
),
# Response 2
test_utils.MockResponse(
status=200,
_json=[
{"first": "Hermione", "last": "Granger"},
{"first": "Draco", "last": "Malfoy"},
],
),
],
)
@pytest.mark.asyncio
async def test_paged_offset_override(self):
class APIClient(SpanClient):
@handles.paged(limit=50)
@handles.get(
"/names",
mimetype_accept=MimeType.JSON,
resp_codes=200,
resp_schema=NameSchema(many=True),
)
async def name_fetch(
self, skip: int, *, req: ClientRequest
) -> AsyncGenerator[Name, None]:
req.paging.offset_start = skip
req.paging.limit = 2
client = APIClient(host_name="api-host")
names = list()
async for name in client.name_fetch(2):
print(name)
names.append(name)
assert names == [
Name("Harry", "Potter"),
Name("Ron", "Weasley"),
Name("Hermione", "Granger"),
Name("Draco", "Malfoy"),
]
@test_utils.mock_aiohttp(
method="GET",
req_validator=[
# Validator 1
test_utils.RequestValidator(
headers={"Accept": MimeType.JSON.value},
params={"paging-offset": 0, "paging-limit": 2},
),
# Validator 2
test_utils.RequestValidator(
headers={"Accept": MimeType.JSON.value},
params={"paging-offset": 2, "paging-limit": 2},
),
# Validator 3
test_utils.RequestValidator(
headers={"Accept": MimeType.JSON.value},
params={"paging-offset": 4, "paging-limit": 2},
),
],
resp=[
# Response 1
test_utils.MockResponse(
status=200,
headers={"paging-next": "some_page"},
_json=[
{"first": "Harry", "last": "Potter"},
{"first": "Ron", "last": "Weasley"},
],
),
# Response 2
test_utils.MockResponse(
status=200,
_json=[
{"first": "Hermione", "last": "Granger"},
{"first": "Draco", "last": "Malfoy"},
],
),
# Response 3
test_utils.MockResponse(
status=200,
_exception=errors_api.NothingToReturnError(
message="No Items to return", error_id=uuid.uuid4()
),
),
],
)
@pytest.mark.asyncio
async def test_paged_manual(self):
class APIClient(SpanClient):
@handles.paged(limit=2)
@handles.get(
"/names",
mimetype_accept=MimeType.JSON,
resp_codes=200,
resp_schema=NameSchema(many=True),
)
async def name_fetch(
self, *, req: ClientRequest
) -> AsyncGenerator[Name, None]:
result = await req.execute()
return result.loaded
client = APIClient(host_name="api-host")
names = list()
async for name in client.name_fetch():
names.append(name)
assert names == [
Name("Harry", "Potter"),
Name("Ron", "Weasley"),
Name("Hermione", "Granger"),
Name("Draco", "Malfoy"),
]
@test_utils.mock_aiohttp(
method="GET",
req_validator=[
# Validator 1
test_utils.RequestValidator(
headers={"Accept": MimeType.JSON.value},
params={"paging-offset": 0, "paging-limit": 2},
),
# Validator 2
test_utils.RequestValidator(
headers={"Accept": MimeType.JSON.value},
params={"paging-offset": 2, "paging-limit": 2},
),
],
resp=[
# Response 1
test_utils.MockResponse(status=200, headers={"paging-next": "some_page"}),
# Response 2
test_utils.MockResponse(status=200),
],
)
@pytest.mark.asyncio
async def test_paged_empty_body(self):
class APIClient(SpanClient):
@handles.paged(limit=2)
@handles.get("/names", mimetype_accept=MimeType.JSON, resp_codes=200)
async def name_fetch(
self, *, req: ClientRequest
) -> AsyncGenerator[Name, None]:
pass
client = APIClient(host_name="api-host")
responses = list()
async for resp in client.name_fetch():
assert isinstance(resp, test_utils.MockResponse)
responses.append(responses)
assert len(responses) == 2
@test_utils.mock_aiohttp(
method="GET",
resp=test_utils.MockResponse(status=201),
req_validator=test_utils.RequestValidator(url="http://api-host/names"),
)
@pytest.mark.asyncio
async def test_custom_hook_manipulate_resp(self, get_config: MockConfig = None):
class APIClient(SpanClient):
@handles.get("/names", resp_codes=201, resp_schema=NameSchema())
async def name_fetch(self, *, req: ClientRequest) -> NameID:
pass
def custom_hook(validator: RequestValidator, response: MockResponse):
response.mock_json({"first": "Harry", "last": "Potter"})
get_config.req_validator[0].custom_hook = custom_hook
client = APIClient(host_name="api-host")
name = await client.name_fetch()
assert name.first == "Harry"
assert name.last == "Potter"
@pytest.mark.asyncio
@test_utils.mock_aiohttp(
method="POST",
resp=MockResponse(201),
req_validator=RequestValidator(
content_type=MimeType.JSON, headers={"Content-Type": "application/json"}
),
)
async def test_mock_req_validate_data_type_w_schema(
self, post_config: MockConfig = None
):
test_name = Name("Harry", "Potter")
def post_hook(validator: RequestValidator, resp: MockResponse):
data = NameSchema().load(validator.req_data_decoded)
assert data == test_name
mock_data = NameSchema().dump(test_name)
resp.mock_json(mock_data)
post_config.req_validator[0].custom_hook = post_hook
class APIClient(SpanClient):
@handles.post(
"/names",
req_schema=NameSchema(),
resp_codes=201,
resp_schema=NameSchema(),
)
async def name_fetch(self, *, req: ClientRequest) -> NameID:
req.media = Name("Harry", "Potter")
client = APIClient(host_name="api-host")
name = await client.name_fetch()
print(name)
@pytest.mark.asyncio
@test_utils.mock_aiohttp(
method="GET",
resp=MockResponse(),
req_validator=RequestValidator(url="http://api-host:8080/name"),
)
async def test_custom_port_url(self):
class APIClient(SpanClient):
@handles.get("/name")
async def name_fetch(self, *, req: ClientRequest) -> None:
pass
client = APIClient(host_name="api-host", port=8080)
_ = await client.name_fetch()
@pytest.mark.asyncio
@test_utils.mock_aiohttp(
method="GET",
resp=MockResponse(),
req_validator=RequestValidator(url="http://api-host:8080/name"),
)
async def test_custom_default_port_url(self):
class APIClient(SpanClient):
DEFAULT_PORT = 8080
@handles.get("/name")
async def name_fetch(self, *, req: ClientRequest) -> None:
pass
client = APIClient(host_name="api-host")
_ = await client.name_fetch()
@test_utils.mock_aiohttp(
method="GET",
resp=test_utils.MockResponse(),
req_validator=test_utils.RequestValidator(
url=f"http://api-host/endpoint",
params={"project.field1": "1", "project.field2": "0"},
),
)
@pytest.mark.asyncio
async def test_projection(self):
class APIClient(SpanClient):
@handles.get("/endpoint")
async def name_fetch(self, *, req: ClientRequest):
req.projection["field1"] = 1
req.projection["field2"] = 0
client = APIClient(host_name="api-host")
await client.name_fetch()
@test_utils.mock_aiohttp(
method="GET",
resp=test_utils.MockResponse(),
req_validator=test_utils.RequestValidator(
url=f"http://api-host/endpoint",
params={"project.field1": "0", "project.field2": "1"},
),
)
@pytest.mark.asyncio
async def test_projection_from_user(self):
class APIClient(SpanClient):
@handles.get("/endpoint")
async def name_fetch(
self, projection: Dict[str, int], *, req: ClientRequest
):
req.projection = projection
client = APIClient(host_name="api-host")
user_projection = {"field1": 0, "field2": 1}
await client.name_fetch(user_projection)
@test_utils.mock_aiohttp(
method="GET",
resp=test_utils.MockResponse(),
req_validator=test_utils.RequestValidator(
url=f"http://api-host/endpoint",
params={"project.field1": "1", "project.field2": "0",},
),
)
@pytest.mark.asyncio
async def test_projection_validation_error(self):
class APIClient(SpanClient):
@handles.get("/endpoint")
async def name_fetch(self, *, req: ClientRequest):
req.projection["field1"] = 1
client = APIClient(host_name="api-host")
with pytest.raises(test_utils.ParamsMismatchError):
await client.name_fetch()
class TestClientReqValidationErrors:
UUID1 = uuid.uuid4()
@pytest.mark.asyncio
@test_utils.mock_aiohttp(
method="GET",
resp=MockResponse(200),
req_validator=test_utils.RequestValidator(url=f"http://api-host/names/{UUID1}"),
)
async def test_url_mismatch(self):
class APIClient(SpanClient):
@handles.get("/names/{name_id}", resp_codes=200)
async def name_fetch(self, *, req: ClientRequest) -> NameID:
req.path_params["name_id"] = "wrong_id"
client = APIClient(host_name="api-host")
with pytest.raises(test_utils.URLMismatchError):
await client.name_fetch()
@pytest.mark.asyncio
@test_utils.mock_aiohttp(
method="GET",
resp=MockResponse(200),
req_validator=test_utils.RequestValidator(
url=f"http://api-host/names", params={"offset": 0}
),
)
async def test_params_key_missing(self):
class APIClient(SpanClient):
@handles.get("/names", resp_codes=200)
async def name_fetch(self, *, req: ClientRequest) -> NameID:
pass
client = APIClient(host_name="api-host")
with pytest.raises(test_utils.ParamsMismatchError):
await client.name_fetch()
@pytest.mark.asyncio
@test_utils.mock_aiohttp(
method="GET",
resp=MockResponse(200),
req_validator=test_utils.RequestValidator(
url=f"http://api-host/names", params={"offset": 0}
),
)
async def test_params_value_wrong(self):
class APIClient(SpanClient):
@handles.get("/names", resp_codes=200)
async def name_fetch(self, *, req: ClientRequest) -> NameID:
req.query_params["offset"] = 10
client = APIClient(host_name="api-host")
with pytest.raises(test_utils.ParamsMismatchError):
await client.name_fetch()
@pytest.mark.asyncio
@test_utils.mock_aiohttp(
method="GET",
resp=MockResponse(200),
req_validator=test_utils.RequestValidator(
url=f"http://api-host/names", headers={"Accept": "application/bson"}
),
)
async def test_headers_key_missing(self):
class APIClient(SpanClient):
@handles.get("/names", resp_codes=200)
async def name_fetch(self, *, req: ClientRequest) -> NameID:
pass
client = APIClient(host_name="api-host")
with pytest.raises(test_utils.HeadersMismatchError):
await client.name_fetch()
@test_utils.mock_aiohttp(
method="GET",
resp=MockResponse(200),
req_validator=test_utils.RequestValidator(
url=f"http://api-host/names", headers={"Accept": "application/bson"}
),
)
@pytest.mark.asyncio
async def test_headers_value_wrong(self):
class APIClient(SpanClient):
@handles.get("/names", resp_codes=200)
async def name_fetch(self, *, req: ClientRequest) -> NameID:
req.headers["Accept"] = "application/json"
client = APIClient(host_name="api-host")
with pytest.raises(test_utils.HeadersMismatchError):
await client.name_fetch()
@test_utils.mock_aiohttp(
method="GET",
resp=MockResponse(200),
req_validator=test_utils.RequestValidator(
url=f"http://api-host/names", headers={"Accept": "application/bson"}
),
)
@pytest.mark.asyncio
async def test_media_value_wrong(self):
class APIClient(SpanClient):
@handles.get("/names", resp_codes=200)
async def name_fetch(self, *, req: ClientRequest) -> NameID:
req.headers["Accept"] = "application/json"
client = APIClient(host_name="api-host")
with pytest.raises(test_utils.HeadersMismatchError):
await client.name_fetch()
@test_utils.mock_aiohttp(
method="GET",
resp=MockResponse(200),
req_validator=test_utils.RequestValidator(
url=f"http://api-host/names", media={"first": "Harry", "last": "Potter"}
),
)
@pytest.mark.asyncio
async def test_media_value_wrong(self):
class APIClient(SpanClient):
@handles.get("/names", resp_codes=200)
async def name_fetch(self, *, req: ClientRequest) -> NameID:
req.media = {"first": "Harry", "last": "Granger"}
client = APIClient(host_name="api-host")
with pytest.raises(test_utils.DataValidationError):
await client.name_fetch()
@test_utils.mock_aiohttp(
method="GET",
resp=MockResponse(200),
req_validator=test_utils.RequestValidator(
url=f"http://api-host/names", custom_hook=validate_name_post
),
)
@pytest.mark.asyncio
async def test_media_custom_validation_failure(self):
class APIClient(SpanClient):
@handles.get("/names", resp_codes=200)
async def name_fetch(self, *, req: ClientRequest) -> NameID:
req.media = {"first": "Harry", "last": "Granger"}
client = APIClient(host_name="api-host")
with pytest.raises(AssertionError):
await client.name_fetch()
@test_utils.mock_aiohttp(
method="POST",
resp=MockResponse(200),
req_validator=test_utils.RequestValidator(
headers={"Content-Type": MimeType.YAML.value}
),
)
@pytest.mark.asyncio
async def test_media_type_validation_error(self):
class APIClient(SpanClient):
@handles.post("/names", mimetype_send=MimeType.YAML, resp_codes=200)
async def name_fetch(self, *, req: ClientRequest) -> NameID:
req.media = json.dumps({"first": "Harry", "last": "Granger"})
client = APIClient(host_name="api-host")
with pytest.raises(test_utils.DataTypeValidationError):
await client.name_fetch()
@pytest.mark.asyncio
@test_utils.mock_aiohttp(
method="GET",
resp=MockResponse(),
req_validator=test_utils.RequestValidator(url="http://api-host/names"),
)
async def test_mock_config_pass(self, *, get_config: MockConfig = None):
class APIClient(SpanClient):
@handles.get("/names", resp_codes=201)
async def name_fetch(self, *, req: ClientRequest) -> aiohttp.ClientResponse:
pass
client = APIClient(host_name="api-host")
get_config.resp[0].status = 201
r = await client.name_fetch()
assert r.status == 201
@pytest.mark.asyncio
@test_utils.mock_aiohttp(
method="GET",
resp=[MockResponse(200), MockResponse(201)],
req_validator=[
test_utils.RequestValidator(content_type=MimeType.JSON),
test_utils.RequestValidator(content_type=MimeType.JSON),
],
)
async def test_mock_config_pass_validator(self, *, get_config: MockConfig = None):
class APIClient(SpanClient):
@handles.get("/names", resp_codes=(200, 201))
async def name_fetch(
self, media, *, req: ClientRequest
) -> aiohttp.ClientResponse:
req.media = media
client = APIClient(host_name="api-host")
data1 = {"key": "value1"}
data2 = {"key": "value2"}
get_config.resp[0].mock_json(data1)
get_config.resp[1].mock_json(data2)
get_config.req_validator[0].media = data1
get_config.req_validator[1].media = data2
data_return = await client.name_fetch(data1)
assert data_return == data1
data_return = await client.name_fetch(data2)
assert data_return == data2
@pytest.mark.asyncio
@test_utils.mock_aiohttp(method="GET", resp=MockResponse(200))
@test_utils.mock_aiohttp(
method="POST",
resp=MockResponse(201),
req_validator=RequestValidator(media={"first": "harry", "last": "potter"}),
)
async def test_mock_config_multiple(
self, *, get_config: MockConfig = None, post_config: MockConfig = None
):
class APIClient(SpanClient):
@handles.get("/names", resp_codes=200)
async def name_fetch(self, *, req: ClientRequest) -> aiohttp.ClientResponse:
pass
@handles.post("/names", resp_codes=201)
async def name_create(
self, media, *, req: ClientRequest
) -> aiohttp.ClientResponse:
req.media = media
client = APIClient(host_name="api-host")
assert get_config.resp[0].status == 200
assert post_config.resp[0].status == 201
r = await client.name_fetch()
print(r)
assert r.status == 200
r = await client.name_create({"first": "harry", "last": "potter"})
print(r)
assert r.status == 201
await client.session.close()
@pytest.mark.asyncio
@test_utils.mock_aiohttp(
method="POST", resp=MockResponse(200), req_validator=RequestValidator()
)
async def test_custom_handler(self, post_config: MockConfig = None):
invoked_encode = dict(set=False)
invoked_decode = dict(set=False)
def csv_encode(data: List[dict]) -> bytes:
invoked_encode["set"] = True
encoded = io.StringIO()
headers = list(data[0].keys())
writer = csv.DictWriter(encoded, fieldnames=headers)
writer.writeheader()
writer.writerows(data)
return encoded.getvalue().encode()
def csv_decode(data: bytes) -> List[Dict[str, Any]]:
invoked_decode["set"] = True
csv_file = io.StringIO(data.decode())
reader = csv.DictReader(csv_file)
return [row for row in reader]
class APIClient(SpanClient):
register_mimetype("text/csv", encoder=csv_encode, decoder=csv_decode)
@handles.post("/csv", mimetype_send="text/csv")
async def csv_roundtrip(
self, csv_data: List[Dict[str, Any]], *, req: ClientRequest
) -> List[Dict[str, Any]]:
req.media = csv_data
data = [{"key": "value1"}, {"key": "value2"}]
post_config.req_validator[0].req_data = copy.copy(data)
post_config.resp[0].content_type = "text/csv"
post_config.resp[0].mock_content(csv_encode(data))
client = APIClient(host_name="api-host")
async with client:
resp_data = await client.csv_roundtrip(data)
assert data == resp_data
assert invoked_decode["set"] is True
assert invoked_encode["set"] is True
|
en
| 0.373491
|
# Validator 1 # Validator 2 # Response 1 # Response 2 # Validator 1 # Response 1 # Validator 1 # Validator 2 # Response 1 # Response 2 # Validator 1 # Validator 2 # Response 1 # Response 2 # Validator 1 # Validator 2 # Validator 3 # Response 1 # Response 2 # Response 3 # Validator 1 # Validator 2 # Response 1 # Response 2
| 2.000476
| 2
|
packages/news_classifier/news_classifier/models/pipeline.py
|
marco-cardoso/ML-News-article-classification-architecture
| 0
|
6629560
|
from sklearn.compose import ColumnTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from news_classifier.config import variables
from news_classifier.features import DropFeatures
category_classifier = Pipeline(
steps=[
('column_transformer', ColumnTransformer(
transformers=[
('tfid', TfidfVectorizer(), variables.TEXT_FEATURES[0])
]
)),
('clf', LinearSVC(C=0.5, dual=True, loss='squared_hinge', penalty='l2', tol=0.001))
]
)
|
from sklearn.compose import ColumnTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from news_classifier.config import variables
from news_classifier.features import DropFeatures
category_classifier = Pipeline(
steps=[
('column_transformer', ColumnTransformer(
transformers=[
('tfid', TfidfVectorizer(), variables.TEXT_FEATURES[0])
]
)),
('clf', LinearSVC(C=0.5, dual=True, loss='squared_hinge', penalty='l2', tol=0.001))
]
)
|
none
| 1
| 2.412891
| 2
|
|
xnote/app/views_calendar.py
|
sebastianczech/Xnote
| 0
|
6629561
|
<reponame>sebastianczech/Xnote<gh_stars>0
from datetime import datetime, timedelta
from django.shortcuts import render
from django.utils import dateparse
from googleapiclient.discovery import build
from . import api_google
def calendar(request):
# Get credential for Google API
credential = api_google.api_google_credential()
# Build a service object for the API that you want to call
service = build("calendar", "v3", credentials=credential)
# Make requests to the API service using the interface provided by the service object.
# Get collection of calendars in the user's calendar list
calendar = service.calendarList()
# Use calendarList, which method list() returns entries on the user's calendar list
calendarList = calendar.list(minAccessRole='owner').execute()
# utils.console("List of calendars from Google API:")
# utils.object2json(calendarList)
# Prepare list for events
eventsList = []
calendarEvents = []
# Prepare filter while getting events from calendars
now = datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time
year = (datetime.utcnow() + timedelta(days=365)).isoformat() + 'Z' # 'Z' indicates UTC time
# For each calendar get events using prepared filters
for calendarItem in list(calendarList.get('items', [])):
result = service.events().list(
calendarId=calendarItem.get('id'),
maxResults=250,
singleEvents=True,
timeMin=now,
timeMax=year,
orderBy='startTime').execute()
events = result.get('items', [])
# Store events in list with all events from all calendars
eventsList.extend(events)
# Prepare dedicated list with selected attributes from calendar and events
for event in events:
calendarEvent = {}
calendarEvent["calendar"] = calendarItem.get("summary")
calendarEvent["color"] = calendarItem.get("backgroundColor")
calendarEvent["date"] = event.get("start").get("date") if event.get("start").get("date") else dateparse.parse_datetime(event.get("start").get("dateTime")).date().isoformat()
calendarEvent["time"] = dateparse.parse_datetime(event.get("start").get("dateTime")).time().isoformat() if event.get("start").get("dateTime") else "- - -"
calendarEvent["event"] = event.get("summary")
calendarEvents.append(calendarEvent)
# Sort dedicated list by date
sortedCalendarEvents = sorted(calendarEvents, key=lambda k: k['date'])
# utils.console("List of events from Google API:")
# utils.object2json(eventsList)
# utils.console("List of calendar events created from calendar and events:")
# utils.object2json(calendarEvents)
# Render view with the list of calendars
return render(request, 'app/calendar.html', {
'calendarList': calendarList,
'eventsList': eventsList,
'calendarEvents': sortedCalendarEvents
})
|
from datetime import datetime, timedelta
from django.shortcuts import render
from django.utils import dateparse
from googleapiclient.discovery import build
from . import api_google
def calendar(request):
# Get credential for Google API
credential = api_google.api_google_credential()
# Build a service object for the API that you want to call
service = build("calendar", "v3", credentials=credential)
# Make requests to the API service using the interface provided by the service object.
# Get collection of calendars in the user's calendar list
calendar = service.calendarList()
# Use calendarList, which method list() returns entries on the user's calendar list
calendarList = calendar.list(minAccessRole='owner').execute()
# utils.console("List of calendars from Google API:")
# utils.object2json(calendarList)
# Prepare list for events
eventsList = []
calendarEvents = []
# Prepare filter while getting events from calendars
now = datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time
year = (datetime.utcnow() + timedelta(days=365)).isoformat() + 'Z' # 'Z' indicates UTC time
# For each calendar get events using prepared filters
for calendarItem in list(calendarList.get('items', [])):
result = service.events().list(
calendarId=calendarItem.get('id'),
maxResults=250,
singleEvents=True,
timeMin=now,
timeMax=year,
orderBy='startTime').execute()
events = result.get('items', [])
# Store events in list with all events from all calendars
eventsList.extend(events)
# Prepare dedicated list with selected attributes from calendar and events
for event in events:
calendarEvent = {}
calendarEvent["calendar"] = calendarItem.get("summary")
calendarEvent["color"] = calendarItem.get("backgroundColor")
calendarEvent["date"] = event.get("start").get("date") if event.get("start").get("date") else dateparse.parse_datetime(event.get("start").get("dateTime")).date().isoformat()
calendarEvent["time"] = dateparse.parse_datetime(event.get("start").get("dateTime")).time().isoformat() if event.get("start").get("dateTime") else "- - -"
calendarEvent["event"] = event.get("summary")
calendarEvents.append(calendarEvent)
# Sort dedicated list by date
sortedCalendarEvents = sorted(calendarEvents, key=lambda k: k['date'])
# utils.console("List of events from Google API:")
# utils.object2json(eventsList)
# utils.console("List of calendar events created from calendar and events:")
# utils.object2json(calendarEvents)
# Render view with the list of calendars
return render(request, 'app/calendar.html', {
'calendarList': calendarList,
'eventsList': eventsList,
'calendarEvents': sortedCalendarEvents
})
|
en
| 0.768695
|
# Get credential for Google API # Build a service object for the API that you want to call # Make requests to the API service using the interface provided by the service object. # Get collection of calendars in the user's calendar list # Use calendarList, which method list() returns entries on the user's calendar list # utils.console("List of calendars from Google API:") # utils.object2json(calendarList) # Prepare list for events # Prepare filter while getting events from calendars # 'Z' indicates UTC time # 'Z' indicates UTC time # For each calendar get events using prepared filters # Store events in list with all events from all calendars # Prepare dedicated list with selected attributes from calendar and events # Sort dedicated list by date # utils.console("List of events from Google API:") # utils.object2json(eventsList) # utils.console("List of calendar events created from calendar and events:") # utils.object2json(calendarEvents) # Render view with the list of calendars
| 2.876917
| 3
|
code/ch07/perceptron_pos.py
|
imjaden/Introduction-NLP
| 1,628
|
6629562
|
from pyhanlp import *
import zipfile
import os
from pyhanlp.static import download, remove_file, HANLP_DATA_PATH
def test_data_path():
"""
获取测试数据路径,位于$root/data/test,根目录由配置文件指定。
:return:
"""
data_path = os.path.join(HANLP_DATA_PATH, 'test')
if not os.path.isdir(data_path):
os.mkdir(data_path)
return data_path
## 验证是否存在 MSR语料库,如果没有自动下载
def ensure_data(data_name, data_url):
root_path = test_data_path()
dest_path = os.path.join(root_path, data_name)
if os.path.exists(dest_path):
return dest_path
if data_url.endswith('.zip'):
dest_path += '.zip'
download(data_url, dest_path)
if data_url.endswith('.zip'):
with zipfile.ZipFile(dest_path, "r") as archive:
archive.extractall(root_path)
remove_file(dest_path)
dest_path = dest_path[:-len('.zip')]
return dest_path
## 指定 PKU 语料库
PKU98 = ensure_data("pku98", "http://file.hankcs.com/corpus/pku98.zip")
PKU199801 = os.path.join(PKU98, '199801.txt')
PKU199801_TRAIN = os.path.join(PKU98, '199801-train.txt')
PKU199801_TEST = os.path.join(PKU98, '199801-test.txt')
POS_MODEL = os.path.join(PKU98, 'pos.bin')
NER_MODEL = os.path.join(PKU98, 'ner.bin')
## ===============================================
## 以下开始 感知机 词性标注
AbstractLexicalAnalyzer = JClass('com.hankcs.hanlp.tokenizer.lexical.AbstractLexicalAnalyzer')
PerceptronSegmenter = JClass('com.hankcs.hanlp.model.perceptron.PerceptronSegmenter')
POSTrainer = JClass('com.hankcs.hanlp.model.perceptron.POSTrainer')
PerceptronPOSTagger = JClass('com.hankcs.hanlp.model.perceptron.PerceptronPOSTagger')
def train_perceptron_pos(corpus):
trainer = POSTrainer()
trainer.train(corpus, POS_MODEL) # 训练感知机模型
tagger = PerceptronPOSTagger(POS_MODEL) # 加载
analyzer = AbstractLexicalAnalyzer(PerceptronSegmenter(), tagger) # 构造词法分析器,与感知机分词器结合,能同时进行分词和词性标注。
print(analyzer.analyze("李狗蛋的希望是希望上学")) # 分词+词性标注
print(analyzer.analyze("李狗蛋的希望是希望上学").translateLabels()) # 对词性进行翻译
return tagger
if __name__ == '__main__':
train_perceptron_pos(PKU199801_TRAIN)
|
from pyhanlp import *
import zipfile
import os
from pyhanlp.static import download, remove_file, HANLP_DATA_PATH
def test_data_path():
"""
获取测试数据路径,位于$root/data/test,根目录由配置文件指定。
:return:
"""
data_path = os.path.join(HANLP_DATA_PATH, 'test')
if not os.path.isdir(data_path):
os.mkdir(data_path)
return data_path
## 验证是否存在 MSR语料库,如果没有自动下载
def ensure_data(data_name, data_url):
root_path = test_data_path()
dest_path = os.path.join(root_path, data_name)
if os.path.exists(dest_path):
return dest_path
if data_url.endswith('.zip'):
dest_path += '.zip'
download(data_url, dest_path)
if data_url.endswith('.zip'):
with zipfile.ZipFile(dest_path, "r") as archive:
archive.extractall(root_path)
remove_file(dest_path)
dest_path = dest_path[:-len('.zip')]
return dest_path
## 指定 PKU 语料库
PKU98 = ensure_data("pku98", "http://file.hankcs.com/corpus/pku98.zip")
PKU199801 = os.path.join(PKU98, '199801.txt')
PKU199801_TRAIN = os.path.join(PKU98, '199801-train.txt')
PKU199801_TEST = os.path.join(PKU98, '199801-test.txt')
POS_MODEL = os.path.join(PKU98, 'pos.bin')
NER_MODEL = os.path.join(PKU98, 'ner.bin')
## ===============================================
## 以下开始 感知机 词性标注
AbstractLexicalAnalyzer = JClass('com.hankcs.hanlp.tokenizer.lexical.AbstractLexicalAnalyzer')
PerceptronSegmenter = JClass('com.hankcs.hanlp.model.perceptron.PerceptronSegmenter')
POSTrainer = JClass('com.hankcs.hanlp.model.perceptron.POSTrainer')
PerceptronPOSTagger = JClass('com.hankcs.hanlp.model.perceptron.PerceptronPOSTagger')
def train_perceptron_pos(corpus):
trainer = POSTrainer()
trainer.train(corpus, POS_MODEL) # 训练感知机模型
tagger = PerceptronPOSTagger(POS_MODEL) # 加载
analyzer = AbstractLexicalAnalyzer(PerceptronSegmenter(), tagger) # 构造词法分析器,与感知机分词器结合,能同时进行分词和词性标注。
print(analyzer.analyze("李狗蛋的希望是希望上学")) # 分词+词性标注
print(analyzer.analyze("李狗蛋的希望是希望上学").translateLabels()) # 对词性进行翻译
return tagger
if __name__ == '__main__':
train_perceptron_pos(PKU199801_TRAIN)
|
zh
| 0.959983
|
获取测试数据路径,位于$root/data/test,根目录由配置文件指定。 :return: ## 验证是否存在 MSR语料库,如果没有自动下载 ## 指定 PKU 语料库 ## =============================================== ## 以下开始 感知机 词性标注 # 训练感知机模型 # 加载 # 构造词法分析器,与感知机分词器结合,能同时进行分词和词性标注。 # 分词+词性标注 # 对词性进行翻译
| 2.651678
| 3
|
regression/linear_regression.py
|
nickblum/regression
| 0
|
6629563
|
<reponame>nickblum/regression
def compute_cost(X,y,theta):
pass
def gradient_descent(X,y,theta,regularize):
pass
def find_curve(X=0,y=0,alpha=100):
"""
NOTE TO SELF: USE numpy -- it's written in C and a zillion times faster than python functions for arrays and such
Need to include docstring here. A brief explanation of the function
X: A brief explanation of this variable
y: A brief explanation of this variable
alpha: This one too
"""
print('Finding curve')
|
def compute_cost(X,y,theta):
pass
def gradient_descent(X,y,theta,regularize):
pass
def find_curve(X=0,y=0,alpha=100):
"""
NOTE TO SELF: USE numpy -- it's written in C and a zillion times faster than python functions for arrays and such
Need to include docstring here. A brief explanation of the function
X: A brief explanation of this variable
y: A brief explanation of this variable
alpha: This one too
"""
print('Finding curve')
|
en
| 0.849997
|
NOTE TO SELF: USE numpy -- it's written in C and a zillion times faster than python functions for arrays and such Need to include docstring here. A brief explanation of the function X: A brief explanation of this variable y: A brief explanation of this variable alpha: This one too
| 3.212288
| 3
|
setup.py
|
GreenelyAB/TranslationsClient
| 0
|
6629564
|
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, "README.rst"), encoding="utf-8") as f:
long_description = f.read()
setup(
name="TranslationsClient",
version="0.1.0",
description="A translations service Python client",
long_description=long_description,
url="https://github.com/GreenelyAB/TranslationsClient",
author="GreenelyAB",
author_email="<EMAIL>",
license="MIT",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Topic :: Communications",
"Topic :: Software Development :: Internationalization",
"Topic :: Software Development :: Localization",
"Topic :: Software Development :: Libraries",
"Topic :: Text Processing",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.5",
],
keywords="translation translations client internationalization",
packages=find_packages("src", exclude=["tests"]),
package_dir = {"": "src"},
install_requires=["pyzmq>=15.2.0"],
extras_require={
},
)
|
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, "README.rst"), encoding="utf-8") as f:
long_description = f.read()
setup(
name="TranslationsClient",
version="0.1.0",
description="A translations service Python client",
long_description=long_description,
url="https://github.com/GreenelyAB/TranslationsClient",
author="GreenelyAB",
author_email="<EMAIL>",
license="MIT",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Topic :: Communications",
"Topic :: Software Development :: Internationalization",
"Topic :: Software Development :: Localization",
"Topic :: Software Development :: Libraries",
"Topic :: Text Processing",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.5",
],
keywords="translation translations client internationalization",
packages=find_packages("src", exclude=["tests"]),
package_dir = {"": "src"},
install_requires=["pyzmq>=15.2.0"],
extras_require={
},
)
|
en
| 0.719398
|
# -*- coding: utf-8 -*- # Get the long description from the README file
| 1.441476
| 1
|
RIKEN/benchmarks/cosmoflow/implementations/implementation_fugaku_closed/mesh/mesh_tensorflow/layers.py
|
bgerofi/hpc_results_v0.7
| 2
|
6629565
|
# coding=utf-8
# Copyright 2020 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Layers implemented in Mesh TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import gin
from mesh_tensorflow import ops_with_redefined_builtins as mtf
import tensorflow.compat.v1 as tf
@gin.configurable
def unit_scaling_convention(value=False):
"""Turn this on with gin to enable the unit-scaling convention.
TODO(noam): turn this comment into a position paper and post to arxiv
Under the unit-scaling convention, all weights are initialized with unit
variance, and the outputs of most contractions (matmul/einsum operations) are
divided by the square-root of the sizes of the contracting dimensions.
This differs from the typical inverse-square-root weight-initalization
convention often attributed to
http://proceedings.mlr.press/v9/glorot10a.html
in which weights are typically initialized according to a distribution with
mean zero and standard-deviation equal to the inverse-square-root of the
contracting dimension(s).
Under both conventions, the purpose of the inverse-square-root scaling is so
that activations in a layer should be scaled similarly to the activations in
the previous layer. (Typically, models are initialized so that activations in
all layers should have RMS=O(1)).
The difference between the two conventions is whether this scaling happens in
the parameters (their way), or as an explicit multiplier on the activations
(our way).
In our opinion, parameter-scaling (their way) has three main disadvantages:
1. Optimizers need to be aware of differently-scaled parameters. This is
because the learning-rates of adaptive optimizers represent target step-sizes
for the parameters. The desired step size for a parameter logically depends
on the scale of the parameter itself, and so one typically needs to lower the
learning-rate when the layers get bigger and the parameters get consequently
smaller. Under the unit-scaling convention, this is unnecessary, since all
parameters are on the same unit scale.
2. It is often unwieldy from an engineering standpoint to communicate to both
the variable initializers and to the optimizer what the scale of the variable
should be. Typically, the variable initializer guesses this by inferring from
the dimension order which dimension of the variable might represent
contracting dimensions. This is highly error-prone.
3. Sometimes contractions happen without being associated with parameters, as
in neural attention. It may be important here too to divide by the square
root of the contracting dimensions, in order to maintain activation scale.
See the discussion in section 3.2.1 of https://arxiv.org/abs/1706.03762
Being in the habit of scaling the outputs of contractions in this way makes
it more likely to remember to do the same thing in these circumstances.
Note: When switching to the unit-scaling convention, it is probably necessary
to raise the learning rate, since larger parameters need larger updates. An
exception is when using Adafactor, which by default scales the updates
relative to the scale of the current parameter values.
Args:
value: a boolean
Returns:
a boolean
"""
return value
def us_einsum(xs, *args, **kwargs):
"""Einsum with optional unit-scaling convention.
If the unit-scaling convention is enabled, then divide the output by
the square-root of the product of the contracting dimensions.
Args:
xs: a list of mtf.Tensor
*args: arguments to mtf.einsum
**kwargs: keyword arguments to mtf.einsum
Returns:
a mtf.Tensor
"""
y = mtf.einsum(xs, *args, **kwargs)
if unit_scaling_convention():
all_input_dims = set(sum([x.shape.dims for x in xs], []))
reduced_dims = [d for d in all_input_dims if d not in y.shape.dims]
y *= mtf.Shape(reduced_dims).size ** -0.5
return y
def dense(x,
new_dims,
reduced_dims=None,
expert_dims=None,
use_bias=True,
activation=None,
master_dtype=tf.float32,
slice_dtype=tf.float32,
variable_dtype=None,
kernel_initializer=None,
kernel_weights=None,
name=None):
"""Dense layer doing (kernel*x + bias) computation.
Args:
x: a mtf.Tensor of shape [..., reduced_dims].
new_dims: a list of mtf.Dimension.
reduced_dims: a list of mtf.Dimensions of x to be reduced.
If omitted (deprecated interface), we reduce the last dimension.
expert_dims: an optional list of mtf.Dimension which represent different
experts. Different experts get different weights.
use_bias: a boolean, whether to add bias.
activation: an optional function from mtf.Tensor to mtf.Tensor
master_dtype: a tf.dtype (deprecated - use variable_dtype)
slice_dtype: a tf.dtype (deprecated - use variable_dtype)
variable_dtype: a mtf.VariableDType
kernel_initializer: an initializer for kernel variable.
kernel_weights: mtf.Tensor weights matrix to use for dense computation
name: a string used for tf.variable_scope.
Returns:
a mtf.Tensor of shape [..., new_dims].
"""
if not isinstance(new_dims, list):
new_dims = [new_dims]
if variable_dtype is None:
variable_dtype = mtf.VariableDType(master_dtype, slice_dtype, x.dtype)
if expert_dims is None:
expert_dims = []
if reduced_dims is None:
tf.logging.warning(
"Deprecation warning - it is recommended to pass reduced_dims "
"explicitly to mtf.layers.dense() so as not to depend on dimension "
"order. To silence this warning, explicitly pass "
"reduced_dims=x.shape.dims[-1:] (in scope %s)"
% tf.get_variable_scope().name)
reduced_dims = x.shape.dims[-1:]
# if any reduced dims have the same names as new dims, first change these
# dimension names in the input so as to avoid name conflict in the weight
# matrix.
reduced_dims = reduced_dims[:]
for i in range(len(reduced_dims)):
if reduced_dims[i] in new_dims:
original_name = reduced_dims[i].name
tmp_name = "_" + original_name
reduced_dims[i] = mtf.Dimension(tmp_name, reduced_dims[i].size)
x = mtf.rename_dimension(x, original_name, tmp_name)
output_shape = mtf.Shape([d for d in x.shape.dims if d not in reduced_dims] +
new_dims)
if not kernel_weights:
kernel_weights = get_dense_kernel_weights(x, new_dims, reduced_dims,
expert_dims, kernel_initializer,
name, variable_dtype,
master_dtype, slice_dtype)
with tf.variable_scope(name, default_name="dense"):
y = us_einsum([x, kernel_weights], output_shape)
if use_bias:
b = mtf.get_variable(
x.mesh,
"bias",
mtf.Shape(expert_dims + new_dims),
initializer=tf.zeros_initializer(),
dtype=variable_dtype)
y += b
if activation is not None:
y = activation(y)
return y
def get_dense_kernel_weights(x,
new_dims,
reduced_dims,
expert_dims,
kernel_initializer,
name=None,
variable_dtype=None,
master_dtype=tf.float32,
slice_dtype=tf.float32):
"""Create w matrix variable.
Args:
x: a mtf.Tensor.
new_dims: a list of mtf.Dimension.
reduced_dims: a list of mtf.Dimensions of x to be reduced.
expert_dims: an optional list of mtf.Dimension which represent different
experts. Different experts get different weights.
kernel_initializer: an initializer for kernel variable.
name: a string used for tf.variable_scope.
variable_dtype: a mtf.VariableDType
master_dtype: a tf.dtype (deprecated - use variable_dtype)
slice_dtype: a tf.dtype (deprecated - use variable_dtype)
Returns:
a mtf.Tensor.
"""
if variable_dtype is None:
variable_dtype = mtf.VariableDType(master_dtype, slice_dtype, x.dtype)
w_shape = mtf.Shape(expert_dims + reduced_dims + new_dims)
with tf.variable_scope(name, default_name="dense"):
if kernel_initializer is None:
kernel_initializer = VarianceScalingInitializer()
if isinstance(kernel_initializer, DenseInitializer):
kernel_initializer = kernel_initializer(reduced_dims, new_dims)
w = mtf.get_variable(
x.mesh,
"kernel",
w_shape,
initializer=kernel_initializer,
dtype=variable_dtype)
w = mtf.cast(w, x.dtype)
return w
def dense_product(x,
reduced_dims,
new_dims,
activation_functions=None,
name="dense_product",
**kwargs):
"""Component-wise product of multiple dense layers.
e.g. if activation_functions=["linear", "sigmoid"], then this implements
Gated Linear Units https://arxiv.org/pdf/1612.08083.pdf
Args:
x: a Tensor
reduced_dims: a list of Dimensions.
new_dims: a list of Dimensions.
activation_functions: a list of activation functions (or a singleton)
Each can be a either:
- a callable function from Tensor to Tensor
- a string function name from namespace mtf)
- None or "linear", meaning no activation function
name: an optional string
**kwargs: additional kwargs for mtf.layers.dense()
"""
if not isinstance(activation_functions, list):
activation_functions = [activation_functions]
num_factors = len(activation_functions)
factors = []
for i, activation in enumerate(activation_functions):
if activation == "linear":
activation = None
elif isinstance(activation, str):
activation = getattr(mtf, activation)
factors.append(
dense(x,
reduced_dims=reduced_dims,
new_dims=new_dims,
activation=activation,
name="%s_%d" % (name, i) if num_factors > 1 else name,
**kwargs))
return functools.reduce(mtf.multiply, factors)
class DenseInitializer(object):
"""Initializer that can be passed to dense().
The __call__ function takes reduced_dims and new_dims and returns a
tf initializer class.
"""
def __call__(self, reduced_dims, new_dims):
raise NotImplementedError("not implemented")
@gin.configurable
class VarianceScalingInitializer(DenseInitializer):
"""Initializer capable of adapting its scale to the shape of weights.
With `distribution="normal"`, samples are drawn from a truncated normal
distribution centered on zero, with `stddev = sqrt(scale / n)` where n is:
1.0 if unit_scaling_convention() is turned on
otherwise:
number of input units in the weight tensor, if mode = "fan_in"
number of output units, if mode = "fan_out"
average of the numbers of input and output units, if mode = "fan_avg"
With `distribution="uniform"`,
samples are drawn from a uniform distribution
within [-limit, limit], with `limit = sqrt(3 * scale / n)`.
# Arguments
scale: Scaling factor (positive float).
mode: One of "fan_in", "fan_out", "fan_avg".
distribution: Random distribution to use. One of "normal", "uniform".
seed: A Python integer. Used to seed the random generator.
"""
def __init__(self, scale=1.0,
mode="fan_in",
distribution="normal"):
self.scale = scale
self.mode = mode.lower()
self.distribution = distribution.lower()
def __call__(self, reduced_dims, new_dims):
fan_in = mtf.list_product(d.size for d in reduced_dims)
fan_out = mtf.list_product(d.size for d in new_dims)
scale = self.scale
if self.mode == "fan_in":
if not unit_scaling_convention():
scale /= max(1., fan_in)
elif self.mode == "fan_out":
if unit_scaling_convention():
raise ValueError("Unit scaling convention only works with \"fan_in\"")
scale /= max(1., fan_out)
elif self.mode == "fan_avg":
if unit_scaling_convention():
raise ValueError("Unit scaling convention only works with \"fan_in\"")
scale /= max(1., float(fan_in + fan_out) / 2)
else:
raise ValueError(
"Invalid `mode` argument: "
"expected on of {\"fan_in\", \"fan_out\", \"fan_avg\"} "
"but got %s" % (self.mode,))
stddev = scale ** 0.5
if self.distribution == "normal":
return tf.truncated_normal_initializer(stddev=stddev)
elif self.distribution == "uniform":
limit = stddev * 3. ** 0.5
return tf.random_uniform_initializer(minval=-limit, maxval=limit)
else:
raise ValueError("Invalid `distribution` argument: "
"expected one of {\"normal\", \"uniform\"} "
"but got %s" % (self.distribution,))
def conv1d(x, output_dim, filter_size=3, stride=1, **kw_args):
"""1D Convolution.
Args:
x: a mtf.Tensor of format NWC.
output_dim: a mtf.Dimension, indicating the output channel dimension.
filter_size: a positive integer, the filter width.
stride: a positive integer, the stride.
**kw_args: optional keyword arguments to mtf.layers.conv2d.
Returns:
a mtf.Tensor of format NWO, where O is the output dimension.
"""
fake_height_dim = mtf.Dimension("fake_height", 1)
x = mtf.reshape(
x, mtf.Shape(x.shape.dims[:-2] + [fake_height_dim] + x.shape.dims[-2:]))
output = conv2d(
x,
output_dim,
filter_size=(1, filter_size),
strides=(1, stride),
**kw_args)
return mtf.reshape(
output,
mtf.Shape([
d for d in x.shape.dims
if d != fake_height_dim and d != x.shape.dims[-1]
] + [output_dim]))
def _depthwise_conv1d_hack(x,
depth_dim,
length_dim,
min_relative_pos=-1,
max_relative_pos=1,
name=None,
use_bias=True,
initializer_scale=1.0,
kernel_depth_weights=None):
"""Hacky version of a 1d depthwise convolution.
Args:
x: a mtf.Tensor
depth_dim: mtf.Dimension,
length_dim: mtf.Dimension,
min_relative_pos: int, min relative position,
max_relative_pos: int, max relative position,
name: str, variable_scope name,
use_bias: Bool, whether to use bias,
initializer_scale: int, initalizer scale,
kernel_depth_weights: an optional list of kernel weight tensors. The list
contains one element for each relative position in the kernel. Each element
has a width equal to the depth over which the separable conv operation is
being "separated"
Returns:
an mtf.Tensor
"""
ret = 0
kernel_size = max_relative_pos - min_relative_pos + 1
with tf.variable_scope(name, default_name="depthwise_conv_hack"):
for i in range(kernel_size):
relative_pos = min_relative_pos + i
shifted_input = mtf.shift(x, -relative_pos, length_dim, wrap=False)
ret += dense(
shifted_input,
new_dims=[],
reduced_dims=[],
expert_dims=[depth_dim],
kernel_weights=kernel_depth_weights[i]
if kernel_depth_weights else None,
name="depthwise_dense_%d" % i,
use_bias=use_bias and (i == 0),
kernel_initializer=VarianceScalingInitializer(
scale=initializer_scale / kernel_size))
return ret
def separable_conv1d(x,
output_dim,
min_relative_pos=-1,
max_relative_pos=1,
depthwise_filter_initializer_scale=1.0,
pointwise_filter_initializer_scale=1.0,
name=None,
use_bias=True,
kernel_depth_weights=None):
"""1-D convolution with separable filters.
The filter size will be `max_relative_pos - min_relative_pos + 1`.
Args:
x: a mtf.Tensor of format NWC.
output_dim: a mtf.Dimension, indicating the output channel dimension.
min_relative_pos: an integer, the inclusive minimum relative positive of the
depthwise filter, where a relative position of zero means the left end of
the filter aligns with the left end of the input.
max_relative_pos: an integer, the inclusive maximum relative position of the
depthwise filter, where a relative position of zero means the right end of
the filter aligns with the right end of the input.
depthwise_filter_initializer_scale: a positive float, the scale of the
initializer for the depthwise filter.
pointwise_filter_initializer_scale: a positive float, the scale of the
initializer for the pointwise filter.
name: a string used for tf.variable_scope.
use_bias: a bool, whether to use bias in the convolutions.
kernel_depth_weights: an optional list of kernel weight tensors. The list
contains one element for each relative position in the kernel. Each element
has a width equal to the dimension over which the separable conv operation
is being "separated"
Returns:
a mtf.Tensor of format NWO, where O is the output dimension.
"""
depth_dim = x.shape.dims[-1]
length_dim = x.shape.dims[-2]
with tf.variable_scope(name, default_name="separable_conv1d"):
depthwise = _depthwise_conv1d_hack(
x,
depth_dim=depth_dim,
length_dim=length_dim,
min_relative_pos=min_relative_pos,
max_relative_pos=max_relative_pos,
use_bias=use_bias,
initializer_scale=depthwise_filter_initializer_scale,
kernel_depth_weights=kernel_depth_weights)
return dense(
depthwise,
new_dims=[output_dim],
reduced_dims=[depth_dim],
name="pointwise_dense",
use_bias=use_bias,
kernel_initializer=VarianceScalingInitializer(
scale=pointwise_filter_initializer_scale))
def conv2d(x, output_dim, filter_size=(3, 3),
strides=(1, 1), padding="SAME", filter_initializer=None,
variable_dtype=None, name=None):
"""2D Convolution.
Args:
x: a mtf.Tensor of format NHWC.
output_dim: a mtf.Dimension, indicating the output channel dimension.
filter_size: a list or tuple in format [filter_height, filter_width].
strides: a list or tuple in format [stride_height, stride_width].
padding: either "SAME" or "VALID".
filter_initializer: the initializer for tf.get_variable.
variable_dtype: a mtf.VariableDType
name: a string used for tf.variable_scope.
Returns:
a mtf.Tensor.
"""
fh_dim = mtf.Dimension("fh", filter_size[0])
fw_dim = mtf.Dimension("fw", filter_size[1])
input_dim = x.shape[-1]
with tf.variable_scope(name, default_name="conv2d"):
if variable_dtype is None:
variable_dtype = mtf.VariableDType(activation_dtype=x.dtype)
conv_filter = mtf.get_variable(
x.mesh, "kernel", [fh_dim, fw_dim, input_dim, output_dim],
initializer=filter_initializer, dtype=variable_dtype)
# Pad stride in batch and channel dimensions.
strides = [1] + list(strides) + [1]
return mtf.Conv2dOperation(x, conv_filter, strides, padding).outputs[0]
def conv2d_with_blocks(
x, output_dim, filter_size=(3, 3),
strides=(1, 1), padding="SAME",
h_blocks_dim=None, w_blocks_dim=None, filter_initializer=None,
variable_dtype=None, name=None):
"""2D Convolution with spatial partitioning.
Spatial partitioning is implemented by decomposing the image into blocks.
Block dimensions represented as h_blocks_dim and w_blocks_dim can be split
along the mesh axis. If split, then we do a halo exchange where each block
receives the part of the image from its left and right neighbors necessary to
do the convolution. Exchange can involve complete or partial blocks depending
on the filter height and width.
Currently, only "SAME" padding with dilation rate of 1 is supported.
Args:
x: a Tensor of shape
[batch, h_blocks_dim, w_blocks_dim, h_dim, w_dim, in_channels_dim]
output_dim: a mtf.Dimension, indicating the output channel dimension.
filter_size: a list or tuple in format [filter_height, filter_width].
strides: a list or tuple in format [stride_height, stride_width].
padding: string, "SAME". The type of padding algorithm to use.
"Valid" is not currently supported.
h_blocks_dim: Dimension representing number of height blocks.
w_blocks_dim: Dimension representing number of witdh blocks.
filter_initializer: the initializer for tf.get_variable.
variable_dtype: a mtf.VariableDType
name: a name for the operation (optional).
Returns:
A Tensor of shape
[batch, h_blocks_dim, w_blocks_dim, h_dim, w_dim, out_channels_dim]
"""
# If h_blocks_dim and w_blocks_dim are not split, directly call conv2d.
if h_blocks_dim is None and w_blocks_dim is None:
return conv2d(x, output_dim,
filter_size, strides, padding, filter_initializer,
variable_dtype, name)
#assert filter_size[0] % 2 == 1
#assert filter_size[1] % 2 == 1
# Padding 'VALID' is not supported yet.
if padding != "SAME":
raise NotImplementedError("conv2d_with_blocks requires padding=SAME")
# Halo exchange for h_blocks and w_blocks.
h_dim, w_dim = x.shape.dims[-3:-1]
for blocks_dim, block_size_dim, halo_size in [
(h_blocks_dim, h_dim, [(filter_size[0] - 1) // 2, filter_size[0] // 2]),
(w_blocks_dim, w_dim, [(filter_size[1] - 1) // 2, filter_size[1] // 2])]:
if halo_size != [0, 0]:
print("#halo check#", blocks_dim, block_size_dim, filter_size, "halo_size=", halo_size)
if blocks_dim is not None:
x = mtf.halo_exchange(x, blocks_dim, block_size_dim, halo_size)
else:
x = mtf.pad(x, halo_size, block_size_dim.name)
return conv2d(x, output_dim,
filter_size, strides, "VALID", filter_initializer,
variable_dtype, name)
def conv2d_transpose(x, output_dim,
filter_size=(2, 2), strides=(2, 2),
padding="SAME", filter_initializer=None,
variable_dtype=None, name=None):
"""2D Transposed Convolution.
Args:
x: a mtf.Tensor of format NHWC.
output_dim: a mtf.Dimension, indicating the output channel dimension.
filter_size: a list or tuple in format
[filter_height, filter_width]. Only filter_size of (2, 2) is tested.
strides: a list or tuple in format
[stride_height, stride_width]. Only strides of (2, 2) is tested.
padding: either "SAME" or "VALID".
filter_initializer: the initializer for tf.get_variable.
variable_dtype: a mtf.VariableDType
name: a string used for tf.variable_scope.
Returns:
a mtf.Tensor.
"""
fh_dim = mtf.Dimension("fh", filter_size[0])
fw_dim = mtf.Dimension("fw", filter_size[1])
input_dim = x.shape[-1]
with tf.variable_scope(name, default_name="conv2d_transpose"):
if variable_dtype is None:
variable_dtype = mtf.VariableDType(activation_dtype=x.dtype)
conv_filter = mtf.get_variable(
x.mesh, "kernel", [fh_dim, fw_dim, output_dim, input_dim],
initializer=filter_initializer, dtype=variable_dtype)
# Pad stride in batch and channel dimensions.
strides = [1] + list(strides) + [1]
return mtf.Conv2dTransposeOperation(
x, conv_filter, strides, padding).outputs[0]
def conv2d_transpose_with_blocks(
x, output_dim, filter_size=(2, 2),
strides=(2, 2), padding="SAME",
h_blocks_dim=None, w_blocks_dim=None, filter_initializer=None,
variable_dtype=None, name=None):
"""2D Transposed Convolution with spatial partitioning.
Spatial partitioning is implemented by decomposing the image into blocks.
Block dimensions represented as h_blocks_dim and w_blocks_dim can be split
along the mesh axis. If split, then we do a halo exchange where each block
receives the part of the image from its left and right neighbors necessary to
do the convolution. Exchange can involve complete or partial blocks depending
on the filter depth and height.
Currently, only "SAME" padding with dilation rate of 1 is supported. Only
splitting along the depth and height dimensions are supported.
Args:
x: a Tensor of shape
[batch, h_blocks_dim, w_blocks_dim, h_dim, w_dim, in_channel_dim]
output_dim: a mtf.Dimension, indicating the output channel dimension.
filter_size: a list or tuple in format
[filter_height, filter_width]. Only filter_size of (2, 2) is tested.
strides: a list or tuple in format
[stride_height, stride_width]. Only strides of (2, 2) is tested.
padding: string, "SAME". The type of padding algorithm to use.
"Valid" is not currently supported.
h_blocks_dim: Dimension representing number of height blocks.
w_blocks_dim: Dimension representing number of width blocks.
filter_initializer: the initializer for tf.get_variable.
variable_dtype: a mtf.VariableDType
name: a name for the operation (optional).
Returns:
A Tensor of shape
[batch, h_blocks_dim, w_blocks_dim, h_dim, w_dim, out_channels_dim]
"""
# If h_blocks_dim and w_blocks_dim are not split, directly call conv2d_trans.
if h_blocks_dim is None and w_blocks_dim is None:
return conv2d_transpose(
x, output_dim, filter_size, strides, padding, filter_initializer,
variable_dtype, name)
# Now only supports even-sized filters.
assert filter_size[0] % 2 == 0
assert filter_size[1] % 2 == 0
# Padding 'VALID' is not supported yet.
if padding != "SAME":
raise NotImplementedError(
"conv2d_transpose_with_blocks requires padding=SAME")
# Halo exchange for h_blocks and w_blocks.
# TODO(lehou): figure out the halo_size in general cases.
h_dim, w_dim = x.shape.dims[-3:-1]
for blocks_dim, block_size_dim, halo_size in [
(h_blocks_dim, h_dim, filter_size[0] // 2 - 1),
(w_blocks_dim, w_dim, filter_size[1] // 2 - 1)]:
if halo_size > 0:
if blocks_dim is not None:
x = mtf.halo_exchange(x, blocks_dim, block_size_dim, halo_size)
else:
x = mtf.pad(x, [halo_size, halo_size], block_size_dim.name)
return conv2d_transpose(
x, output_dim, filter_size, strides, "VALID", filter_initializer,
variable_dtype, name)
def conv3d(x, output_dim, filter_size=(3, 3, 3),
strides=(1, 1, 1), padding="SAME",
filter_initializer=None,
variable_dtype=None, name=None):
"""3D Convolution.
Args:
x: a mtf.Tensor of format NDHWC.
output_dim: a mtf.Dimension, indicating the output channel dimension.
filter_size: a list or tuple in format
[filter_depth, filter_height, filter_width].
strides: a list or tuple in format
[stride_depth, stride_height, stride_width].
padding: either "SAME" or "VALID".
filter_initializer: the initializer for tf.get_variable.
variable_dtype: a mtf.VariableDType
name: a string used for tf.variable_scope.
Returns:
a mtf.Tensor.
"""
fd_dim = mtf.Dimension("fd", filter_size[0])
fh_dim = mtf.Dimension("fh", filter_size[1])
fw_dim = mtf.Dimension("fw", filter_size[2])
input_dim = x.shape[-1]
with tf.variable_scope(name, default_name="conv3d"):
if variable_dtype is None:
variable_dtype = mtf.VariableDType(activation_dtype=x.dtype)
conv_filter = mtf.get_variable(
x.mesh, "kernel", [fd_dim, fh_dim, fw_dim, input_dim, output_dim],
initializer=filter_initializer, dtype=variable_dtype)
# Pad stride in batch and channel dimensions.
strides = [1] + list(strides) + [1]
return mtf.Conv3dOperation(x, conv_filter, strides, padding).outputs[0]
def conv3d_to_dense(x, mesh_size=(2,2), name=None):
d_dim, h_dim, w_dim = x.shape.dims[-4:-1]
for block_size_dim, num_procs in [
(d_dim, mesh_size[0]),
(h_dim, mesh_size[1])]:
if num_procs > 1:
x = mtf.conv3d_to_dense(x, block_size_dim)
return x
def conv3d_with_MPI(
x, output_dim, filter_size=(2, 2, 2),
strides=(1, 1, 1), padding="SAME",
d_blocks_dim=None, h_blocks_dim=None, filter_initializer=None,
variable_dtype=None, name=None):
# If d_blocks_dim and h_blocks_dim are not split, directly call conv3d.
if d_blocks_dim is None and h_blocks_dim is None:
return conv3d(x, output_dim,
filter_size, strides, padding, filter_initializer,
variable_dtype, name)
# assert filter_size[0] % 2 == 1
# assert filter_size[1] % 2 == 1
# assert filter_size[2] % 2 == 1
# Padding 'VALID' is not supported yet.
if padding != "SAME":
raise NotImplementedError("conv3d_with_blocks requires padding=SAME")
# Halo exchange for d_blocks and h_blocks.
d_dim, h_dim, w_dim = x.shape.dims[-4:-1]
for blocks_dim, block_size_dim, halo_size in [
(d_blocks_dim, d_dim, filter_size[0] // 2),
(h_blocks_dim, h_dim, filter_size[1] // 2)]:
if halo_size > 0:
if blocks_dim is not None:
x = mtf.halo_exchange_with_MPI(x, blocks_dim, block_size_dim, halo_size)
else:
x = mtf.pad(x, [halo_size, halo_size], block_size_dim.name)
# Pad w dimension with zeros.
#x = mtf.pad(x, [filter_size[2] // 2, filter_size[2] // 2],
# dim_name=w_dim.name, name="conv3d_pad_w_dim")
x = mtf.pad(x, [(filter_size[2] - 1 ) // 2, filter_size[2] // 2],
dim_name=w_dim.name, name="conv3d_pad_w_dim")
return conv3d(x, output_dim,
filter_size, strides, "VALID", filter_initializer,
variable_dtype, name)
def conv3d_with_blocks(
x, output_dim, filter_size=(3, 3, 3),
strides=(1, 1, 1), padding="SAME",
d_blocks_dim=None, h_blocks_dim=None, filter_initializer=None,
variable_dtype=None, name=None):
"""3D Convolution with spatial partitioning.
Spatial partitioning is implemented by decomposing the image into blocks.
Block dimensions represented as d_blocks_dim and h_blocks_dim can be split
along the mesh axis. If split, then we do a halo exchange where each block
receives the part of the image from its left and right neighbors necessary to
do the convolution. Exchange can involve complete or partial blocks depending
on the filter depth and height.
Currently, only "SAME" padding with dilation rate of 1 is supported. Only
splitting along the depth and height dimensions are supported.
Args:
x: a Tensor of shape
[batch, d_blocks_dim, h_blocks_dim, d_dim, h_dim, w_dim, in_channel_dim]
output_dim: a mtf.Dimension, indicating the output channel dimension.
filter_size: a list or tuple in format
[filter_depth, filter_height, filter_width].
strides: a list or tuple in format
[stride_depth, stride_height, stride_width].
padding: string, "SAME". The type of padding algorithm to use.
"Valid" is not currently supported.
d_blocks_dim: Dimension representing number of depth blocks.
h_blocks_dim: Dimension representing number of height blocks.
filter_initializer: the initializer for tf.get_variable.
variable_dtype: a mtf.VariableDType
name: a name for the operation (optional).
Returns:
A Tensor of shape
[batch, d_blocks_dim, h_blocks_dim, w_blocks_dim,
d_dim, h_dim, w_dim, out_channels_dim]
"""
# If d_blocks_dim and h_blocks_dim are not split, directly call conv3d.
if d_blocks_dim is None and h_blocks_dim is None:
return conv3d(x, output_dim,
filter_size, strides, padding, filter_initializer,
variable_dtype, name)
assert filter_size[0] % 2 == 1
assert filter_size[1] % 2 == 1
assert filter_size[2] % 2 == 1
# Padding 'VALID' is not supported yet.
if padding != "SAME":
raise NotImplementedError("conv3d_with_blocks requires padding=SAME")
# Halo exchange for d_blocks and h_blocks.
d_dim, h_dim, w_dim = x.shape.dims[-4:-1]
for blocks_dim, block_size_dim, halo_size in [
(d_blocks_dim, d_dim, filter_size[0] // 2),
(h_blocks_dim, h_dim, filter_size[1] // 2)]:
if halo_size > 0:
if blocks_dim is not None:
x = mtf.halo_exchange(x, blocks_dim, block_size_dim, halo_size)
else:
x = mtf.pad(x, [halo_size, halo_size], block_size_dim.name)
# Pad w dimension with zeros.
x = mtf.pad(x, [filter_size[2] // 2, filter_size[2] // 2],
dim_name=w_dim.name, name="conv3d_pad_w_dim")
return conv3d(x, output_dim,
filter_size, strides, "VALID", filter_initializer,
variable_dtype, name)
def conv3d_with_blocks_ymzk(
x, output_dim, filter_size=(3, 3, 3),
strides=(1, 1, 1), padding="SAME",
d_blocks_dim=None, h_blocks_dim=None, filter_initializer=None,
variable_dtype=None, name=None):
"""3D Convolution with spatial partitioning.
Spatial partitioning is implemented by decomposing the image into blocks.
Block dimensions represented as d_blocks_dim and h_blocks_dim can be split
along the mesh axis. If split, then we do a halo exchange where each block
receives the part of the image from its left and right neighbors necessary to
do the convolution. Exchange can involve complete or partial blocks depending
on the filter depth and height.
Currently, only "SAME" padding with dilation rate of 1 is supported. Only
splitting along the depth and height dimensions are supported.
Args:
x: a Tensor of shape
[batch, d_blocks_dim, h_blocks_dim, d_dim, h_dim, w_dim, in_channel_dim]
output_dim: a mtf.Dimension, indicating the output channel dimension.
filter_size: a list or tuple in format
[filter_depth, filter_height, filter_width].
strides: a list or tuple in format
[stride_depth, stride_height, stride_width].
padding: string, "SAME". The type of padding algorithm to use.
"Valid" is not currently supported.
d_blocks_dim: Dimension representing number of depth blocks.
h_blocks_dim: Dimension representing number of height blocks.
filter_initializer: the initializer for tf.get_variable.
variable_dtype: a mtf.VariableDType
name: a name for the operation (optional).
Returns:
A Tensor of shape
[batch, d_blocks_dim, h_blocks_dim, w_blocks_dim,
d_dim, h_dim, w_dim, out_channels_dim]
"""
# If d_blocks_dim and h_blocks_dim are not split, directly call conv3d.
if d_blocks_dim is None and h_blocks_dim is None:
return conv3d(x, output_dim,
filter_size, strides, padding, filter_initializer,
variable_dtype, name)
#ymzk
#assert filter_size[0] % 2 == 1
#assert filter_size[1] % 2 == 1
#assert filter_size[2] % 2 == 1
# Padding 'VALID' is not supported yet.
if padding != "SAME":
raise NotImplementedError("conv3d_with_blocks requires padding=SAME")
# Halo exchange for d_blocks and h_blocks.
d_dim, h_dim, w_dim = x.shape.dims[-4:-1]
# for blocks_dim, block_size_dim, halo_size in [
# (d_blocks_dim, d_dim, [(filter_size[0] - 1) // 2, filter_size[0] // 2])]:
for blocks_dim, block_size_dim, halo_size in [
(d_blocks_dim, d_dim, [(filter_size[0] - 1) // 2, filter_size[0] // 2]),
(h_blocks_dim, h_dim, [(filter_size[1] - 1) // 2, filter_size[1] // 2])]:
tf.logging.info("#3d# %s, %s, %s, %s, %s, %s"% (x, output_dim, filter_size, blocks_dim, block_size_dim, halo_size))
#print("#3d#", x, output_dim, filter_size, blocks_dim, block_size_dim, halo_size)
if halo_size != [0, 0]:
if blocks_dim is not None:
x = mtf.halo_exchange_ymzk(x, blocks_dim, block_size_dim, halo_size)
else:
x = mtf.pad(x, [halo_size, halo_size], block_size_dim.name)
tf.logging.info("#3d_exch_after# %s" %x)
# Pad w dimension with zeros.
#x = mtf.pad(x, [(filter_size[2] - 1) // 2, filter_size[2] // 2],
x = mtf.pad(x, [(filter_size[2] - 1 ) // 2, filter_size[2] // 2],
dim_name=w_dim.name, name="conv3d_pad_w_dim")
#x = mtf.pad(x, [(filter_size[2] - 1 ) // 2, filter_size[2] // 2],
# dim_name=h_dim.name, name="conv3d_pad_h_dim")
return conv3d(x, output_dim,
filter_size, strides, "VALID", filter_initializer,
variable_dtype, name)
def conv3d_transpose(x, output_dim,
filter_size=(2, 2, 2), strides=(2, 2, 2),
padding="SAME", filter_initializer=None,
variable_dtype=None, name=None):
"""3D Transposed Convolution.
Args:
x: a mtf.Tensor of format NDHWC.
output_dim: a mtf.Dimension, indicating the output channel dimension.
filter_size: a list or tuple in format
[filter_depth, filter_height, filter_width].
Only filter_size of (2, 2, 2) is tested.
strides: a list or tuple in format
[stride_depth, stride_height, stride_width].
Only strides of (2, 2, 2) is tested.
padding: either "SAME" or "VALID".
filter_initializer: the initializer for tf.get_variable.
variable_dtype: a mtf.VariableDType
name: a string used for tf.variable_scope.
Returns:
a mtf.Tensor.
"""
fd_dim = mtf.Dimension("fd", filter_size[0])
fh_dim = mtf.Dimension("fh", filter_size[1])
fw_dim = mtf.Dimension("fw", filter_size[2])
input_dim = x.shape[-1]
with tf.variable_scope(name, default_name="conv3d_transpose"):
if variable_dtype is None:
variable_dtype = mtf.VariableDType(activation_dtype=x.dtype)
conv_filter = mtf.get_variable(
x.mesh, "kernel", [fd_dim, fh_dim, fw_dim, output_dim, input_dim],
initializer=filter_initializer, dtype=variable_dtype)
# Pad stride in batch and channel dimensions.
strides = [1] + list(strides) + [1]
return mtf.Conv3dTransposeOperation(
x, conv_filter, strides, padding).outputs[0]
def conv3d_transpose_with_blocks(
x, output_dim, filter_size=(2, 2, 2),
strides=(2, 2, 2), padding="SAME",
d_blocks_dim=None, h_blocks_dim=None, filter_initializer=None,
variable_dtype=None, name=None):
"""3D Transposed Convolution with spatial partitioning.
Spatial partitioning is implemented by decomposing the image into blocks.
Block dimensions represented as d_blocks_dim and h_blocks_dim can be split
along the mesh axis. If split, then we do a halo exchange where each block
receives the part of the image from its left and right neighbors necessary to
do the convolution. Exchange can involve complete or partial blocks depending
on the filter depth and height.
Currently, only "SAME" padding with dilation rate of 1 is supported. Only
splitting along the depth and height dimensions are supported.
Args:
x: a Tensor of shape
[batch, d_blocks_dim, h_blocks_dim, d_dim, h_dim, w_dim, in_channel_dim]
output_dim: a mtf.Dimension, indicating the output channel dimension.
filter_size: a list or tuple in format
[filter_depth, filter_height, filter_width].
Only filter_size of (2, 2, 2) is tested.
strides: a list or tuple in format
[stride_depth, stride_height, stride_width].
Only strides of (2, 2, 2) is tested.
padding: string, "SAME". The type of padding algorithm to use.
"Valid" is not currently supported.
d_blocks_dim: Dimension representing number of depth blocks.
h_blocks_dim: Dimension representing number of height blocks.
filter_initializer: the initializer for tf.get_variable.
variable_dtype: a mtf.VariableDType
name: a name for the operation (optional).
Returns:
A Tensor of shape
[batch, d_blocks_dim, h_blocks_dim, w_blocks_dim,
d_dim, h_dim, w_dim, out_channels_dim]
"""
# If d_blocks_dim and h_blocks_dim are not split, directly call conv3d_trans.
if d_blocks_dim is None and h_blocks_dim is None:
return conv3d_transpose(
x, output_dim, filter_size, strides, padding, filter_initializer,
variable_dtype, name)
# Now only supports even-sized filters.
assert filter_size[0] % 2 == 0
assert filter_size[1] % 2 == 0
assert filter_size[2] % 2 == 0
# Padding 'VALID' is not supported yet.
if padding != "SAME":
raise NotImplementedError(
"conv3d_transpose_with_blocks requires padding=SAME")
# Halo exchange for d_blocks and h_blocks.
# TODO(lehou): figure out the halo_size in general cases.
d_dim, h_dim, w_dim = x.shape.dims[-4:-1]
for blocks_dim, block_size_dim, halo_size in [
(d_blocks_dim, d_dim, filter_size[0] // 2 - 1),
(h_blocks_dim, h_dim, filter_size[1] // 2 - 1)]:
if halo_size > 0:
if blocks_dim is not None:
x = mtf.halo_exchange(x, blocks_dim, block_size_dim, halo_size)
else:
x = mtf.pad(x, [halo_size, halo_size], block_size_dim.name)
# Pad w dimension with zeros.
x = mtf.pad(x, [filter_size[2] // 2 - 1, filter_size[2] // 2 - 1],
dim_name=w_dim.name, name="conv3d_trans_pad_w_dim")
return conv3d_transpose(
x, output_dim, filter_size, strides, "VALID", filter_initializer,
variable_dtype, name)
def layer_norm(x, dim, epsilon=1e-6, name="layer_prepostprocess"):
"""Layer normalization over dimension dim.
Args:
x: a mtf.Tensor whose shape contains dim.
dim: a mtf.Dimension
epsilon: a floating point number
name: a string used for tf.variable_scope.
Returns:
a mtf.Tensor with same shape as x.
"""
with tf.variable_scope(name + "/layer_norm"):
scale = mtf.get_variable(
x.mesh,
"layer_norm_scale",
mtf.Shape([dim]),
initializer=tf.ones_initializer(),
activation_dtype=x.dtype)
bias = mtf.get_variable(
x.mesh,
"layer_norm_bias",
mtf.Shape([dim]),
initializer=tf.zeros_initializer(),
activation_dtype=x.dtype)
reduced_shape = x.shape - dim
mean = mtf.reduce_mean(x, output_shape=reduced_shape)
variance = mtf.reduce_mean(mtf.square(x - mean), output_shape=reduced_shape)
norm_x = (x - mean) * mtf.rsqrt(variance + epsilon)
return norm_x * scale + bias
def batch_norm(x, is_training, momentum, epsilon=1e-9,
dims_idx_start=0, dims_idx_end=-1,
init_zero=False, name=None):
"""Batch normalization.
Args:
x: a mtf.Tensor whose shape contains [batch_dim, ..., dim]
is_training: a boolean, whether mode is training.
momentum: a floating point number, specifying batch norm decay value.
epsilon: a floating point number.
dims_idx_start: an integer. Dimension with indices in
[dims_idx_start, dims_idx_end - 1] will be normalized.
dims_idx_end: an integer. Dimension with indices in
[dims_idx_start, dims_idx_end - 1] will be normalized.
init_zero: a boolean, whether to initialize scale with 0's or 1's.
name: a string used for tf.variable_scope.
Returns:
a mtf.Tensor with same shape as x.
"""
with tf.variable_scope(name, default_name="batch_norm", values=[x]):
if init_zero:
gamma_initializer = tf.zeros_initializer()
else:
gamma_initializer = tf.ones_initializer()
norm_dim = x.shape.dims[dims_idx_start:dims_idx_end]
reduced_shape = x.shape - norm_dim
scale = mtf.get_variable(
x.mesh,
"batch_norm_scale",
reduced_shape,
initializer=gamma_initializer,
activation_dtype=x.dtype)
bias = mtf.get_variable(
x.mesh,
"batch_norm_bias",
reduced_shape,
initializer=tf.zeros_initializer(),
activation_dtype=x.dtype)
moving_mean = mtf.get_variable(
x.mesh, "bn_moving_mean", reduced_shape,
initializer=tf.random_normal_initializer(stddev=1.0),
activation_dtype=x.dtype,
trainable=False)
moving_variance = mtf.get_variable(
x.mesh, "bn_moving_variance",
reduced_shape, initializer=tf.ones_initializer(),
activation_dtype=x.dtype,
trainable=False)
# At training time, calculate mean and variance and normalize across batch
# dim.
if is_training:
mean = mtf.reduce_mean(x, output_shape=reduced_shape)
variance = mtf.reduce_mean(
mtf.square(x - mean), output_shape=reduced_shape)
norm_x = (x - mean) * mtf.rsqrt(variance + epsilon)
# Update running mean and running variance.
# TODO(lehou): do not return update_ops; handle them inside MTF.
bn_stats_update_ops = []
bn_stats_update_ops.append(mtf.assign(
moving_mean, momentum * moving_mean + (1 - momentum) * mean,
name="{}/bn_mean_update".format(name)))
bn_stats_update_ops.append(mtf.assign(
moving_variance,
momentum * moving_variance + (1 - momentum) * variance,
name="{}/bn_var_update".format(name)))
else:
# At eval and test time, use the running mean and variance.
norm_x = (x - moving_mean) * mtf.rsqrt(moving_variance + epsilon)
bn_stats_update_ops = []
return (norm_x * scale) + bias, bn_stats_update_ops
def softmax_cross_entropy_with_logits(logits, targets, vocab_dim, z_loss=0.0):
"""Per-example softmax loss.
`logits` is a Tensor with floating-point dtype, containing the predicted
relative log probabilities of the classes.
Either hard targets or soft targets are supported.
In the case of hard targets, `targets` is a Tensor with integer dtype whose
values are in the range [0, vocab_dim.size). `targets` should have the same
set of dimensions as `logits`, but without `vocab_dim`.
In the case of soft targets, `targets` is a Tensor with floating point dtype
and the same dimensions as `logits. Reducing `targets` along `vocab_dim`
should result in all ones.
if z_loss is nonzero, we add a loss equal to z_loss*log(z)^2, where z is the
partition function. Example value: z_loss=1e-4. Two uses of z_loss are:
- To keep the logits from drifting too far from zero, which can cause
unacceptable roundoff errors in bfloat16.
- To encourage the logits to be normalized log-probabilities.
Args:
logits: a mtf.Tensor whose shape contains vocab_dim
targets: a mtf.Tensor representing hard or soft targets (see comments)
vocab_dim: a mtf.Dimension
z_loss: a float
Returns:
a mtf.Tensor whose shape is equal to logits.shape - vocab_dim
Raises:
ValueError: if the shapes do not match.
"""
if targets.dtype.is_integer:
# hard targets
if (set(targets.shape.dims)
!= set(logits.shape.dims).difference([vocab_dim])):
raise ValueError(
"softmax_cross_entropy_with_logits with hard targets "
"dims in targets=%s should be dims in logits=%s other than "
"vocab_dim=%s" % (targets, logits, vocab_dim))
targets = mtf.one_hot(targets, vocab_dim, dtype=logits.dtype)
elif set(targets.shape.dims) != set(logits.shape.dims):
raise ValueError(
"softmax_cross_entropy_with_logits with soft targets "
"dims in targets=%s should be dims in logits=%s"% (targets, logits))
if vocab_dim not in logits.shape.dims:
raise ValueError("vocab_dim must be in logits.shape.dims")
log_z = mtf.reduce_logsumexp(logits, vocab_dim)
log_softmax = logits - log_z
loss = mtf.negative(
mtf.reduce_sum(log_softmax * targets, reduced_dim=vocab_dim))
if z_loss != 0:
loss += z_loss * mtf.square(log_z)
return loss
def sigmoid_cross_entropy_with_logits(logits, targets):
"""Sigmoid cross-entropy loss.
Args:
logits: a mtf.Tensor
targets: a mtf.Tensor with the same shape as logits
Returns:
a mtf.Tensor whose shape is equal to logits.shape
Raises:
ValueError: if the shapes do not match.
"""
if logits.shape != targets.shape:
raise ValueError(
"logits shape must equal targets shape"
"logits=%s targets=%s" % (logits.to_string, targets.to_string))
x = logits
z = targets
return mtf.relu(x) - x * z + mtf.log(1 + mtf.exp(-mtf.abs(x)))
def weights_nonzero(targets, dtype=tf.float32):
def my_fn(x):
return tf.cast(tf.not_equal(x, 0), dtype)
return mtf.cwise(my_fn, [targets], output_dtype=dtype, name="weights_nonzero")
def dense_relu_dense(x,
hidden_channels,
dropout=0.0,
dropout_broadcast_dims=None,
master_dtype=tf.float32,
slice_dtype=tf.float32, name=None):
"""Hidden layer with ReLU activation followed by linear projection.
The output has the same number of channels as the input.
Args:
x: a mtf.Tensor
hidden_channels: a mtf.Dimension - channels in the hidden layer
dropout: an optional float
dropout_broadcast_dims: an optional list of mtf.Dimension
master_dtype: a tf.dtype
slice_dtype: a tf.dtype
name: an optional string
Returns:
a mtf.Tensor with the same shape as x.
"""
with tf.variable_scope(name, default_name="dense_relu_dense"):
io_channels = x.shape.dims[-1]
h = dense(x, hidden_channels,
use_bias=False, activation=mtf.relu,
master_dtype=master_dtype, slice_dtype=slice_dtype, name="wi")
if dropout != 0.0:
h = mtf.dropout(h, 1.0 - dropout,
noise_shape=h.shape - dropout_broadcast_dims)
return dense(h, io_channels, use_bias=False, activation=None,
master_dtype=master_dtype, slice_dtype=slice_dtype,
name="wo")
def local_1d_halo_exchange(k, v, num_w_blocks, w_dim, mask_right):
"""Halo exchange for keys and values for Local 1D attention."""
if num_w_blocks is not None:
if mask_right:
k = mtf.left_halo_exchange(k, num_w_blocks, w_dim, w_dim.size)
v = mtf.left_halo_exchange(v, num_w_blocks, w_dim, w_dim.size)
else:
k = mtf.halo_exchange(k, num_w_blocks, w_dim, w_dim.size)
v = mtf.halo_exchange(v, num_w_blocks, w_dim, w_dim.size)
else:
if mask_right:
k = mtf.pad(k, [w_dim, None], w_dim.name)
v = mtf.pad(v, [w_dim, None], w_dim.name)
else:
k = mtf.pad(k, [w_dim, w_dim], w_dim.name)
v = mtf.pad(v, [w_dim, w_dim], w_dim.name)
return k, v
def local_self_attention_spatial_blocks(
query_antecedent,
kv_channels,
heads,
memory_w_dim=None,
mask_right=False,
master_dtype=tf.float32,
slice_dtype=tf.float32,
name=None):
"""Attention to the source position and a neighborhood to the left or right.
The sequence is divided into blocks of length block_size.
Attention for a given query position can only see memory positions
less than or equal to the query position, in the corresponding block
and the previous block.
Args:
query_antecedent: a mtf.Tensor with shape
[batch, num_h_blocks, num_w_blocks, h_dim, w_dim, io_channels]
must have the same size as query_length, but a different name.
kv_channels: a mtf.Dimension (the size of the key and value vectors)
heads: a mtf.Dimension (the number of heads)
memory_w_dim: mtf Dimension, for the memory width block.
mask_right: bool, flag specifying whether we mask out attention to the right
for the decoder.
master_dtype: a tf.dtype
slice_dtype: a tf.dtype
name: an optional string.
Returns:
a Tensor of shape
[batch, num_h_blocks, num_w_blocks, h_dim, w_dim, io_channels]
Raises:
ValueError: if channels or depth don't match.
"""
with tf.variable_scope(
name, default_name="multihead_attention",
values=[query_antecedent]):
w_dim, io_channels = query_antecedent.shape.dims[-2:]
batch, num_w_blocks = query_antecedent.shape.dims[:2]
wq, wk, wv, wo = multihead_attention_vars(
query_antecedent.mesh, heads, io_channels, kv_channels,
master_dtype, slice_dtype, query_antecedent.dtype)
# Rename dimensions for the memory height and width.
memory_antecedent = mtf.rename_dimension(
query_antecedent, w_dim.name, "memory_" + w_dim.name)
memory_w_dim = memory_antecedent.shape.dims[-2]
# Call einsum over the query and memory to get query q, keys k and values v.
q = mtf.einsum(
[query_antecedent, wq],
mtf.Shape([batch, heads, num_w_blocks, w_dim, kv_channels]))
k = mtf.einsum(
[memory_antecedent, wk],
mtf.Shape([batch, heads, num_w_blocks, memory_w_dim, kv_channels]))
v = mtf.einsum(
[memory_antecedent, wv],
mtf.Shape([batch, heads, num_w_blocks, memory_w_dim, kv_channels]))
# Halo exchange for memory blocks.
k, v = local_1d_halo_exchange(k, v, num_w_blocks, memory_w_dim, mask_right)
# Calculate the causal mask to avoid peeking into the future. We compute
# this once and reuse it for all blocks since the block_size is known.
mask = None
if mask_right:
mask = attention_bias_local_block(
query_antecedent.mesh, w_dim, memory_w_dim)
output = dot_product_attention(q, k, v, mask=mask)
return mtf.einsum(
[output, wo], mtf.Shape([batch, num_w_blocks, w_dim, io_channels]))
def masked_local_attention_1d(x,
kv_channels,
heads,
window_size=128,
master_dtype=tf.float32,
slice_dtype=tf.float32,
length_per_split=None,
return_kv=None,
params=None,
name=None):
"""Attention to the source position and a neighborhood to the left of it.
Attention for a given query position p can only see memory positions
in the range (p - window_size, p].
Args:
x: a mtf.Tensor with shape batch_dims + [length, io_channels]
kv_channels: a mtf.Dimension (the size of the key and value vectors)
heads: a mtf.Dimension (the number of heads)
window_size: an integer
master_dtype: a tf.dtype (deprecated - use params arg)
slice_dtype: a tf.dtype (deprecated - use params arg)
length_per_split: an optional integer indicating the part of the length
dimension per processor. You can omit if the length dimension is not
split.
return_kv: an optional list onto which to append the computed k and v.
params: an optional quadruple of Tensors (see multihead_attention_params())
name: an optional string.
Returns:
a Tensor with the same shape as x
Raises:
ValueError: if channels or depth don't match.
"""
with tf.variable_scope(
name, default_name="masked_local_attention_1d", values=[x]):
batch_dims = x.shape.dims[:-2]
length, io_channels = x.shape.dims[-2:]
if params is None:
wq, wk, wv, wo = multihead_attention_vars(
x.mesh, heads, io_channels, kv_channels,
master_dtype, slice_dtype, x.dtype)
else:
wq, wk, wv, wo = params
# Get query q, keys k and values v.
qkv_shape = mtf.Shape(batch_dims + [heads, length, kv_channels])
q = mtf.einsum([x, wq], qkv_shape)
k = mtf.einsum([x, wk], qkv_shape)
v = mtf.einsum([x, wv], qkv_shape)
if return_kv is not None:
return_kv.extend([k, v])
# Choose a suitable block size.
# We choose the greatest divisor of length_per_split less than or equal
# to max(window_size, 128)
if length_per_split is None:
length_per_split = length.size
block_length = max(window_size, 128)
while length_per_split % block_length != 0:
block_length -= 1
query_block_length = mtf.Dimension("query_block_length", block_length)
memory_block_length = mtf.Dimension("memory_block_length", block_length)
# The num_blocks dimension gets the same name as the length dimension,
# so it will be split in the same way.
num_blocks = mtf.Dimension(length.name, length.size // block_length)
q_shape = batch_dims + [heads, num_blocks, query_block_length, kv_channels]
kv_shape = batch_dims + [
heads, num_blocks, memory_block_length, kv_channels]
q = mtf.reshape(q, q_shape)
k = mtf.reshape(k, kv_shape)
v = mtf.reshape(v, kv_shape)
# augment the keys and values for each block with keys and values for
# the previous window_size timesteps.
k = mtf.left_halo_exchange(k, num_blocks, memory_block_length, window_size)
v = mtf.left_halo_exchange(v, num_blocks, memory_block_length, window_size)
padded_memory_block_length = mtf.Dimension(
"memory_block_length", window_size + block_length)
mpos = mtf.range(x.mesh, padded_memory_block_length, tf.float32)
qpos = mtf.range(x.mesh, query_block_length, tf.float32) + window_size
# prevent looking forward
mask = mtf.cast(mtf.greater(mpos, qpos), x.dtype) * -1e9
# prevent looking >=block_length timesteps backward
mask += mtf.cast(mtf.less_equal(mpos, qpos - block_length), x.dtype) * -1e9
# Note: The first window_size-1 positions can see back into pre-time
# where all the keys and values are zero. We could mask this out, but we
# don't.
o = dot_product_attention(q, k, v, mask=mask)
o = mtf.reshape(o, batch_dims + [heads, length, kv_channels])
return mtf.einsum([o, wo], mtf.Shape(batch_dims + [length, io_channels]))
def masked_local_attention_1d_incremental(x,
prev_k,
prev_v,
step_num,
master_dtype=None,
slice_dtype=None,
params=None,
name=None):
"""Incremental local self-attention (one decode step).
Incremental version of masked_local_attention_1d()
Args:
x: a mtf.Tensor with shape [batch..., io_channels]
prev_k: mtf.Tensor with shape
[batch..., heads, window_length, kv_channels]
prev_v: mtf.Tensor with shape
[batch..., heads, window_length, kv_channels]
step_num: mtf Scalar with dtype tf.int32
master_dtype: a tf.dtype (deprecated)
slice_dtype: a tf.dtype (deprecated)
params: a quadruple of Tensors (see multihead_attention_params())
name: an optional string.
Returns:
y: A mtf.Tensor with shape [batch..., io_channels]
new_k: mtf.Tensor with shape
[batch..., heads, window_length, kv_channels]
new_v: mtf.Tensor with shape
[batch..., heads, window_length, kv_channels]
Raises:
ValueError: if the dimensions do not match.
"""
batch_dims = x.shape.dims[:-1]
io_channels = x.shape.dims[-1]
heads, window_length, kv_channels = prev_k.shape.dims[-3:]
with tf.variable_scope(name, default_name="masked_local_attention_1d"):
if params is None:
wq, wk, wv, wo = multihead_attention_vars(
x.mesh, heads, io_channels, kv_channels,
master_dtype, slice_dtype, x.dtype)
else:
wq, wk, wv, wo = params
q = mtf.einsum([x, wq], mtf.Shape(batch_dims + [heads, kv_channels]))
k = mtf.einsum([x, wk], mtf.Shape(batch_dims + [heads, kv_channels]))
v = mtf.einsum([x, wv], mtf.Shape(batch_dims + [heads, kv_channels]))
current_position = mtf.equal(
mtf.range(x.mesh, window_length, dtype=tf.int32),
mtf.mod(step_num, window_length.size))
k = mtf.where(current_position, k, prev_k, output_shape=prev_k.shape)
v = mtf.where(current_position, v, prev_v, output_shape=prev_v.shape)
o = dot_product_attention(q, k, v, mask=None)
y = mtf.einsum([o, wo], x.shape)
return y, k, v
def local_2d_halo_exchange(k, v, num_h_blocks, h_dim,
num_w_blocks, w_dim, mask_right):
"""Halo exchange for keys and values for Local 2D attention."""
for blocks_dim, block_size_dim, halo_size in [
(num_h_blocks, h_dim, h_dim.size),
(num_w_blocks, w_dim, w_dim.size)]:
# shape of k is [num_h_blocks, num_w_blocks, h_dim, w_dim, kv_channels]
if halo_size > 0:
if blocks_dim is not None:
if mask_right:
k = mtf.left_halo_exchange(k, blocks_dim, block_size_dim, halo_size)
v = mtf.left_halo_exchange(v, blocks_dim, block_size_dim, halo_size)
else:
k = mtf.halo_exchange(k, blocks_dim, block_size_dim, halo_size)
v = mtf.halo_exchange(v, blocks_dim, block_size_dim, halo_size)
else:
if mask_right:
k = mtf.pad(k, [halo_size, None], block_size_dim.name)
v = mtf.pad(v, [halo_size, None], block_size_dim.name)
else:
k = mtf.pad(k, [halo_size, halo_size], block_size_dim.name)
v = mtf.pad(v, [halo_size, halo_size], block_size_dim.name)
return k, v
def local_2d_self_attention_spatial_blocks(query_antecedent,
kv_channels,
heads,
memory_h_dim=None,
memory_w_dim=None,
mask_right=False,
master_dtype=tf.float32,
slice_dtype=tf.float32,
name=None):
"""Attention to the source position and a neighborhood to the left or right.
The sequence is divided into blocks of length block_size.
Attention for a given query position can only see memory positions
less than or equal to the query position, in the corresponding block
and the previous block.
Args:
query_antecedent: a mtf.Tensor with shape [batch, num_h_blocks,
num_w_blocks, h_dim, w_dim, io_channels] must have the same size as
query_length, but a different name.
kv_channels: a mtf.Dimension (the size of the key and value vectors)
heads: a mtf.Dimension (the number of heads)
memory_h_dim: mtf Dimension, for the memory height block.
memory_w_dim: mtf Dimension, for the memory width block.
mask_right: bool, flag specifying whether we mask out attention to the right
for the decoder.
master_dtype: a tf.dtype
slice_dtype: a tf.dtype
name: an optional string.
Returns:
a Tensor of shape
[batch, num_h_blocks, num_w_blocks, h_dim, w_dim, io_channels]
Raises:
ValueError: if channels or depth don't match.
"""
with tf.variable_scope(
name, default_name="multihead_attention", values=[query_antecedent]):
h_dim, w_dim, io_channels = query_antecedent.shape.dims[-3:]
batch, num_h_blocks, num_w_blocks = query_antecedent.shape.dims[:3]
wq, wk, wv, wo = multihead_attention_vars(
query_antecedent.mesh, heads, io_channels, kv_channels,
master_dtype, slice_dtype, query_antecedent.dtype)
# Rename dimensions for the memory height and width.
memory_antecedent = mtf.rename_dimension(query_antecedent, h_dim.name,
"memory_" + h_dim.name)
memory_antecedent = mtf.rename_dimension(memory_antecedent, w_dim.name,
"memory_" + w_dim.name)
memory_h_dim, memory_w_dim = memory_antecedent.shape.dims[-3:-1]
# Call einsum over the query and memory to get query q, keys k and values v.
q = mtf.einsum([query_antecedent, wq],
mtf.Shape([
batch, heads, num_h_blocks, num_w_blocks, h_dim, w_dim,
kv_channels
]))
k = mtf.einsum([memory_antecedent, wk],
mtf.Shape([batch, heads, num_h_blocks, num_w_blocks,
memory_h_dim, memory_w_dim, kv_channels]))
v = mtf.einsum([memory_antecedent, wv],
mtf.Shape([batch, heads, num_h_blocks, num_w_blocks,
memory_h_dim, memory_w_dim, kv_channels]))
# Halo exchange for memory blocks.
k, v = local_2d_halo_exchange(k, v, num_h_blocks, memory_h_dim,
num_w_blocks, memory_w_dim, mask_right)
# Calculate the causal mask to avoid peeking into the future. We compute
# this once and reuse it for all blocks since the block_size is known.
mask = None
if mask_right:
mask = attention_bias_local_2d_block(query_antecedent.mesh, h_dim, w_dim,
memory_h_dim, memory_w_dim)
output = dot_product_attention(q, k, v, mask=mask)
return mtf.einsum(
[output, wo],
mtf.Shape(
[batch, num_h_blocks, num_w_blocks, h_dim, w_dim, io_channels]))
def rename_length_to_memory_length(
x, length_name="length", memory_length_name="memory_length"):
return mtf.rename_dimension(x, length_name, memory_length_name)
def multihead_attention_vars(
mesh, heads, io_channels, kv_channels,
master_dtype, slice_dtype, activation_dtype):
"""Deprecated version of multihead_attention_params with combine=True."""
return multihead_attention_params(
mesh, heads, io_channels, kv_channels,
mtf.VariableDType(master_dtype, slice_dtype, activation_dtype),
combine=True)
def multihead_attention_params(mesh, heads, io_channels, kv_channels,
variable_dtype, combine=False):
"""Create Parameters for Multihead Attention.
If the combine flag is set to True, then we create only one variable
which stacks together all of the parameters. Otherwise, we create four
separate variables.
Args:
mesh: a Mesh
heads: a Dimension
io_channels: a Dimension
kv_channels: a Dimension
variable_dtype: a mtf.VariableDType
combine: a boolean
Returns:
wq: a Tensor with shape [heads, io_channels, kv_channels]
wk: a Tensor with shape [heads, io_channels, kv_channels]
wv: a Tensor with shape [heads, io_channels, kv_channels]
wo: a Tensor with shape [heads, io_channels, kv_channels]
"""
qkvo = mtf.Dimension("qkvo", 4)
qk_stddev = (io_channels.size ** -0.5) * (kv_channels.size ** -0.25)
v_stddev = io_channels.size ** -0.5
# TODO(noam): should be: o_stddev = (kv_channels.size * heads.size) ** -0.5
# verify that this still works and change it.
o_stddev = (io_channels.size * heads.size) ** -0.5
if combine:
def qkvo_initializer(shape,
dtype=None,
partition_info=None,
verify_shape=None):
del partition_info, verify_shape
return tf.random_normal(shape, dtype=dtype) * tf.reshape(
tf.cast([qk_stddev, qk_stddev, v_stddev, o_stddev],
dtype or tf.float32), [4, 1, 1, 1])
var = mtf.get_variable(
mesh, "qkvo", mtf.Shape([qkvo, heads, io_channels, kv_channels]),
initializer=qkvo_initializer, dtype=variable_dtype)
return mtf.unstack(var, qkvo)
else:
return [mtf.get_variable( # pylint: disable=g-complex-comprehension
mesh, name, mtf.Shape([heads, io_channels, kv_channels]),
initializer=tf.random_normal_initializer(stddev=stddev),
dtype=variable_dtype) for name, stddev in zip(
["q", "k", "v", "o"],
[qk_stddev, qk_stddev, v_stddev, o_stddev])]
def dot_product_attention(q,
k,
v,
mask,
dropout=0.0,
dropout_broadcast_dims=None,
extra_logit=None):
"""Dot-product attention.
Args:
q: Tensor with shape [...., length_q, depth_k]. Typically leading dimensions
are [batch, heads].
k: Tensor with shape [..., length_kv, depth_k]. Leading dimensions must
match with q.
v: Tensor with shape [..., length_kv, depth_v] Leading dimensions must
match with q.
mask: mask Tensor (see attention_mask())
dropout: a float.
dropout_broadcast_dims: an optional list of mtf.Dimension
extra_logit: an optional scalar or tensor
Returns:
Tensor with shape [..., length_q, depth_v].
"""
length_kv = k.shape.dims[-2]
logits_shape = mtf.Shape(q.shape.dims[:-1] + [length_kv])
logits = mtf.einsum([q, k], logits_shape)
if mask is not None:
logits += mask
weights = mtf.softmax(logits, length_kv, extra_logit=extra_logit)
if dropout != 0.0:
weights = mtf.dropout(
weights, 1.0 - dropout,
noise_shape=weights.shape - dropout_broadcast_dims)
depth_v = v.shape.dims[-1]
outputs_shape = mtf.Shape(q.shape.dims[:-1] + [depth_v])
outputs = mtf.einsum([weights, v], outputs_shape)
return outputs
def multihead_attention(query_antecedent,
memory_antecedent,
mask,
kv_channels,
heads,
dropout=0.0,
dropout_broadcast_dims=None,
master_dtype=tf.float32,
slice_dtype=tf.float32,
name="multihead_attention"):
"""Multihead scaled-dot-product attention with input/output transformations.
In order to use only one variable containing the four weight matrices
packed together, we insist that the query and memory antecedents have the
same dimensionality (io_channels) and that the keys and values have the
same dimensionality (kv_channels).
Args:
query_antecedent: a mtf.Tensor with shape
[<batch_dims>, query_length, io_channels]
memory_antecedent: a mtf.Tensor with shape
[batch, memory_length, io_channels] (optional)
mask: mask Tensor (see attention_mask())
kv_channels: a mtf.Dimension (the size of the key and value vectors)
heads: a mtf.Dimension (the number of heads)
dropout: a floating point value
dropout_broadcast_dims: an optional list of mtf.Dimension
master_dtype: a tf.dtype
slice_dtype: a tf.dtype
name: an optional string.
Returns:
A mtf.Tensor with shape [batch, query_length, io_channels]
Raises:
ValueError: if the dimensions do not match.
"""
batch_dims = query_antecedent.shape.dims[:-2]
query_length, io_channels = query_antecedent.shape.dims[-2:]
with tf.variable_scope(name,
default_name="multihead_attention",
values=[query_antecedent, memory_antecedent]):
wq, wk, wv, wo = multihead_attention_vars(
query_antecedent.mesh, heads, io_channels, kv_channels,
master_dtype, slice_dtype, query_antecedent.dtype)
if memory_antecedent is None:
memory_antecedent = rename_length_to_memory_length(
query_antecedent, query_length.name)
memory_batch_dims = memory_antecedent.shape.dims[:-2]
memory_length, memory_channels = memory_antecedent.shape.dims[-2:]
if memory_batch_dims != batch_dims:
raise ValueError("memory batch must equal query batch")
if memory_channels != io_channels:
raise ValueError("memory channels must equal query channels")
q = mtf.einsum(
[query_antecedent, wq],
mtf.Shape(batch_dims + [heads, query_length, kv_channels]))
k = mtf.einsum(
[memory_antecedent, wk],
mtf.Shape(batch_dims + [heads, memory_length, kv_channels]))
v = mtf.einsum(
[memory_antecedent, wv],
mtf.Shape(batch_dims + [heads, memory_length, kv_channels]))
o = dot_product_attention(
q, k, v, mask, dropout, dropout_broadcast_dims)
return mtf.einsum(
[o, wo], mtf.Shape(batch_dims + [query_length, io_channels]))
def multihead_self_attention_incremental(query_antecedent,
prev_k,
prev_v,
step_num,
master_dtype,
slice_dtype,
name="multihead_attention"):
"""Incremental self-attention (one decode step).
In order to use only one variable containing the four weight matrices
packed together, we insist that the query and memory antecedents have the
same dimensionality (io_channels) and that the keys and values have the
same dimensionality (kv_channels).
Args:
query_antecedent: a mtf.Tensor with shape [batch..., io_channels]
prev_k: mtf.Tensor with shape [batch..., heads, memory_length, kv_channels]
prev_v: mtf.Tensor with shape [batch..., heads, memory_length, kv_channels]
step_num: mtf Scalar with dtype tf.int32
master_dtype: a tf.dtype
slice_dtype: a tf.dtype
name: an optional string.
Returns:
y: A mtf.Tensor with shape [batch..., io_channels]
new_k: mtf.Tensor with shape [batch..., heads, memory_length, kv_channels]
new_v: mtf.Tensor with shape [batch..., heads, memory_length, kv_channels]
Raises:
ValueError: if the dimensions do not match.
"""
batch_dims = query_antecedent.shape.dims[:-1]
io_channels = query_antecedent.shape.dims[-1]
heads, memory_length, kv_channels = prev_k.shape.dims[-3:]
with tf.variable_scope(name, default_name="multihead_attention"):
wq, wk, wv, wo = multihead_attention_vars(
query_antecedent.mesh, heads, io_channels, kv_channels,
master_dtype, slice_dtype, query_antecedent.dtype)
memory_antecedent = query_antecedent
q = mtf.einsum(
[query_antecedent, wq],
mtf.Shape(batch_dims + [heads, kv_channels]))
k = mtf.einsum(
[memory_antecedent, wk],
mtf.Shape(batch_dims + [heads, kv_channels]))
v = mtf.einsum(
[memory_antecedent, wv],
mtf.Shape(batch_dims + [heads, kv_channels]))
k = prev_k + mtf.multiply(
k, mtf.one_hot(step_num, memory_length, dtype=prev_k.dtype),
output_shape=prev_k.shape)
v = prev_v + mtf.multiply(
v, mtf.one_hot(step_num, memory_length, dtype=prev_v.dtype),
output_shape=prev_v.shape)
mask = mtf.cast(
mtf.greater(mtf.range(
query_antecedent.mesh, memory_length, dtype=tf.int32), step_num),
q.dtype) * -1e9
o = dot_product_attention(q, k, v, mask)
y = mtf.einsum([o, wo], query_antecedent.shape)
return y, k, v
def multihead_encdec_attention_incremental(query_antecedent,
wq, wo, k, v,
mask,
name="multihead_attention"):
"""Incremental attention over encoder (one decode step).
In order to use only one variable containing the four weight matrices
packed together, we insist that the query and memory antecedents have the
same dimensionality (io_channels) and that the keys and values have the
same dimensionality (kv_channels).
memory_dims is a subset of query_dims
Args:
query_antecedent: a mtf.Tensor with shape query_dims + [io_channels]
wq: a mtf.Tensor with shape [heads, io_channels, kv_channels]
wo: a mtf.Tensor with shape [heads, io_channels, kv_channels]
k: memory_dims + [heads, memory_length, kv_channels]
v: memory_dims + [heads, memory_length, kv_channels]
mask: mask Tensor (see attention_mask())
name: an optional string.
Returns:
A mtf.Tensor with shape [batch, qlen, io_channels]
"""
heads, _, kv_channels = k.shape.dims[-3:]
query_dims = query_antecedent.shape.dims[:-1]
with tf.variable_scope(name, default_name="multihead_attention"):
q = mtf.einsum(
[query_antecedent, wq],
mtf.Shape(query_dims + [heads, kv_channels]))
o = dot_product_attention(q, k, v, mask)
return mtf.einsum([o, wo], query_antecedent.shape)
def attention_mask_ignore_padding(inputs, dtype=tf.float32):
"""Bias for encoder-decoder attention.
Args:
inputs: a mtf.Tensor with shape [..., length_dim]
dtype: a tf.dtype
Returns:
a mtf.Tensor with shape [..., memory_length_dim]
"""
inputs = rename_length_to_memory_length(inputs)
return mtf.cast(mtf.equal(inputs, 0), dtype) * -1e9
def attention_mask_autoregressive(query_pos, dtype=tf.float32):
"""Bias for self-attention where attention to the right is disallowed.
Args:
query_pos: a mtf.Tensor with shape [..., length_dim]
dtype: a tf.dtype
Returns:
a mtf.Tensor with shape [..., length_dim, memory_length_dim]
"""
memory_pos = rename_length_to_memory_length(query_pos)
return mtf.cast(mtf.less(query_pos, memory_pos), dtype) * -1e9
def attention_mask_same_segment(
query_segment, memory_segment=None, dtype=tf.float32):
"""Bias for attention where attention between segments is disallowed.
Args:
query_segment: a mtf.Tensor with shape [..., length_dim]
memory_segment: a mtf.Tensor with shape [..., memory_length_dim]
dtype: a tf.dtype
Returns:
a mtf.Tensor with shape [..., length_dim, memory_length_dim]
"""
memory_segment = rename_length_to_memory_length(
memory_segment or query_segment)
return mtf.cast(mtf.not_equal(query_segment, memory_segment), dtype) * -1e9
def attention_bias_local_block(mesh, block_length, memory_length,
dtype=tf.int32):
"""Bias for attention for local blocks where attention to right is disallowed.
Create the bias matrix by using two separate masks, one for the memory part
which doesn't overlap with the query and second which interacts with the query
and should be disallowed to look to the right of the current query position.
Args:
mesh: a MeshTensorflow object
block_length: a mtf.Dimension
memory_length: a mtf.Dimension
dtype: a tf.dtype
Returns:
a mtf.Tensor with shape [block_length, memory_length]
"""
memory_length = mtf.Dimension(memory_length.name, block_length.size)
memory_mask = mtf.zeros(mesh, [block_length, memory_length], dtype=dtype)
mask = mtf.cast(mtf.less(mtf.range(mesh, block_length, dtype=dtype),
mtf.range(mesh, memory_length, dtype=dtype)),
dtype=dtype)
mask = mtf.cast(
mtf.concat([memory_mask, mask], memory_length.name),
dtype=tf.float32) * -1e9
return mask
def attention_bias_local_2d_block(mesh,
h_dim,
w_dim,
memory_h_dim,
memory_w_dim,
dtype=tf.int32):
"""Bias for attention for local blocks where attention to right is disallowed.
Create the bias matrix by using two separate masks, one for the memory part
which doesn't overlap with the query and second which interacts with the query
and should be disallowed to look to the right of the current query position.
Args:
mesh: a MeshTensorflow object
h_dim: a mtf.Dimension
w_dim: a mtf.Dimension
memory_h_dim: a mtf.Dimension
memory_w_dim: a mtf.Dimension
dtype: a tf.dtype
Returns:
a mtf.Tensor with shape [block_length, memory_length]
"""
memory_height = mtf.Dimension(memory_h_dim.name, h_dim.size)
memory_width = mtf.Dimension(memory_w_dim.name, w_dim.size)
mask_top_visible = mtf.zeros(mesh, [h_dim, memory_height], dtype=dtype)
mask_left_visible = mtf.zeros(mesh, [w_dim, memory_width], dtype=dtype)
mask_query = mtf.greater(
mtf.range(mesh, memory_height, dtype=tf.int32),
mtf.range(mesh, memory_width, dtype=dtype))
width_mask = mtf.concat([mask_left_visible, mask_query], memory_width.name)
mask = mtf.cast(
mtf.concat([mask_top_visible, width_mask], memory_height.name),
dtype=tf.float32) * -1e9
return mask
def multiplicative_jitter(x, epsilon=1e-2):
"""Multiply values by a random number between 1-epsilon and 1+epsilon.
Makes models more resilient to rounding errors introduced by bfloat16.
This seems particularly important for logits.
Args:
x: a mtf.Tensor
epsilon: a floating point value
Returns:
a mtf.Tensor with the same type and shape as x.
"""
if epsilon == 0:
return x
return x * mtf.random_uniform(
x.mesh, x.shape, minval=1.0 - epsilon, maxval=1.0+epsilon, dtype=x.dtype)
def multihead_self_attention_memory_compressed(x,
mask_right,
compression_factor,
kv_channels,
heads,
dropout=0.0,
dropout_broadcast_dims=None,
master_dtype=tf.float32,
slice_dtype=tf.float32,
name="multihead_attention"):
"""Memory-compressed self-attention.
The memory is first average-pooled (strided) to make it shorter by
a factor of compression_factor.
Args:
x: a mtf.Tensor with shape
[<batch_dims>, query_length, io_channels]
mask_right: a boolean
compression_factor: an integer
kv_channels: a mtf.Dimension (the size of the key and value vectors)
heads: a mtf.Dimension (the number of heads)
dropout: a floating point value
dropout_broadcast_dims: an optional list of mtf.Dimension
master_dtype: a tf.dtype
slice_dtype: a tf.dtype
name: an optional string.
Returns:
A mtf.Tensor with shape [batch, query_length, io_channels]
Raises:
ValueError: if the dimensions do not match.
"""
batch_dims = x.shape.dims[:-2]
length, io_channels = x.shape.dims[-2:]
with tf.variable_scope(name,
default_name="compressed_attention",
values=[x]):
wq, wk, wv, wo = multihead_attention_vars(
x.mesh, heads, io_channels, kv_channels,
master_dtype, slice_dtype, x.dtype)
memory_antecedent = compress_mean(x, length, compression_factor)
memory_antecedent = rename_length_to_memory_length(memory_antecedent)
memory_length = memory_antecedent.shape.dims[-2]
q = mtf.einsum(
[x, wq],
mtf.Shape(batch_dims + [heads, length, kv_channels]))
k = mtf.einsum(
[memory_antecedent, wk],
mtf.Shape(batch_dims + [heads, memory_length, kv_channels]))
v = mtf.einsum(
[memory_antecedent, wv],
mtf.Shape(batch_dims + [heads, memory_length, kv_channels]))
if mask_right:
query_pos = mtf.range(x.mesh, length, dtype=tf.int32)
memory_pos = (
mtf.range(x.mesh, memory_length, dtype=tf.int32) * compression_factor
+ (compression_factor - 1))
mask = mtf.cast(mtf.greater(memory_pos, query_pos), x.dtype) * -1e9
else:
mask = None
o = dot_product_attention(
q, k, v, mask, dropout, dropout_broadcast_dims, extra_logit=0.0)
return mtf.einsum(
[o, wo], mtf.Shape(batch_dims + [length, io_channels]))
def compress_mean(x, dim, compression_factor):
"""Compress by taking group means.
Args:
x: a Tensor
dim: a dimension in x.shape
compression_factor: an integer
Returns:
a Tensor
"""
dims = x.shape.dims
pos = dims.index(dim)
compressed_dim = mtf.Dimension(dim.name, dim.size // compression_factor)
compression_factor_dim = mtf.Dimension(
"compression_factor", compression_factor)
new_shape = (
dims[:pos] + [compressed_dim, compression_factor_dim] + dims[pos + 1:])
x = mtf.reshape(x, new_shape)
x = mtf.reduce_mean(x, reduced_dim=compression_factor_dim)
return x
def embedding_weights(mesh,
vocab_dim,
output_dim,
variable_dtype,
name="embedding",
ensemble_dim=None,
initializer=None):
"""Embedding weights."""
if not ensemble_dim:
ensemble_dim = []
elif not isinstance(ensemble_dim, list):
ensemble_dim = [ensemble_dim]
shape = mtf.Shape(ensemble_dim) + [vocab_dim, output_dim]
if initializer is None:
initializer = tf.random_normal_initializer()
ret = mtf.get_variable(
mesh, name, shape, dtype=variable_dtype, initializer=initializer)
return ret
def embedding(indices, vocab_dim, output_dim, variable_dtype, name="embedding"):
"""Embedding layer."""
weights = embedding_weights(
indices.mesh, vocab_dim, output_dim, variable_dtype, name)
return mtf.gather(weights, indices, vocab_dim)
def max_pool2d(x, ksize=(2, 2), name="max_pool2d"):
"""2D max pooling.
Pooling is applied on the HW dimensions. We assume the dimensions of x is
[NHWC]. There can be multiple batch dimensions, e.g., [10, 4, 4, 10, 10, 3].
Currently we only support unoverlapping pooling: strides == ksize. Also the
input HW dimensions must be divisible by ksize.
Args:
x: a Tensor
ksize: kernel size. A list or tuple
name: an optional string
Returns:
a Tensor
"""
return x if tuple(ksize) == (1, 1) else mtf.PoolOperation(
x, ksize, strides=ksize, pool_fn_string="MAX_2D", name=name).outputs[0]
def max_pool3d(x, ksize=(2, 2, 2), name="max_pool3d"):
"""3D max pooling.
Pooling is applied on the DHW dimensions. We assume the dimensions of x is
[NDHWC]. There can be multiple batch dimensions, e.g.,
[10, 4, 4, 10, 10, 10, 3].
Currently we only support unoverlapping pooling: strides == ksize. Also the
input DHW dimensions must be divisible by ksize.
Args:
x: a Tensor
ksize: kernel size. A list or tuple
name: an optional string
Returns:
a Tensor
"""
return x if tuple(ksize) == (1, 1, 1) else mtf.PoolOperation(
x, ksize, strides=ksize, pool_fn_string="MAX_3D", name=name).outputs[0]
def avg_pool2d(x, ksize=(2, 2), name="avg_pool2d"):
"""2D average pooling.
Pooling is applied on the HW dimensions. We assume the dimensions of x is
[NHWC]. There can be multiple batch dimensions, e.g., [10, 4, 4, 10, 10, 3].
Currently we only support unoverlapping pooling: strides == ksize. Also the
input HW dimensions must be divisible by ksize.
Args:
x: a Tensor
ksize: kernel size. A list or tuple
name: an optional string
Returns:
a Tensor
"""
return x if tuple(ksize) == (1, 1) else mtf.PoolOperation(
x, ksize, strides=ksize, pool_fn_string="AVG_2D", name=name).outputs[0]
def avg_pool3d(x, ksize=(2, 2, 2), name="avg_pool3d"):
"""3D average pooling.
Pooling is applied on the DHW dimensions. We assume the dimensions of x is
[NDHWC]. There can be multiple batch dimensions, e.g.,
[10, 4, 4, 10, 10, 10, 3].
Currently we only support unoverlapping pooling: strides == ksize. Also the
input DHW dimensions must be divisible by ksize.
Args:
x: a Tensor
ksize: kernel size. A list or tuple
name: an optional string
Returns:
a Tensor
"""
return x if tuple(ksize) == (1, 1, 1) else mtf.PoolOperation(
x, ksize, strides=ksize, pool_fn_string="AVG_3D", name=name).outputs[0]
def _reversible_half_residual_grad(
explicit_inputs, all_inputs, forward_operations, outputs, output_grads):
"""Backpropagation function for a revnet."""
x1, _, x2, _ = explicit_inputs
extra_inputs = all_inputs[len(explicit_inputs):]
_, _, y1, _ = outputs
dy2, dy2_backwards, dy1, dy1_backwards = output_grads
# last operation should be an addition to produce y1
if not isinstance(forward_operations[-1], mtf.AddOperation):
raise ValueError("expected an addition here")
f_ops = forward_operations[:-1]
orig_fx2 = f_ops[-1].outputs[0]
orig_x2 = x2
if dy2_backwards is not None:
x2 = dy2_backwards
if dy1_backwards is not None:
y1 = dy1_backwards
graph = all_inputs[0].graph
f_again_ops, mapping = graph.clone_operations(f_ops, {orig_x2: x2})
fx2 = mapping[orig_fx2]
x1 = y1 - fx2
grads = mtf.gradients(ys=[fx2], xs=[x2] + extra_inputs, grad_ys=[dy1],
operations=f_again_ops)
dx2 = dy2 + grads[0]
extra_inputs_grads = grads[1:]
dx1 = dy1
return [dx1, x1, dx2, x2] + extra_inputs_grads
def _half_residual_and_swap(x1, x1_backwards, x2, x2_backwards, f=None):
return x2, x2_backwards, x1 + f(x2), x1_backwards
def reversible_half_residual_and_swap(x1,
x1_backwards,
x2,
x2_backwards,
f,
recompute_grads=True):
"""Building block of a revnet.
https://arxiv.org/abs/1707.04585
All the inputs and output Tensors have the same shape and dtype.
The forward computation is:
y1 = x1 + f(x2)
y2 = x2
The x1_backwards and x2_backwards tensors are used by backpropagation.
None should be passed for the first layer, then the outputs of each layer
should be passed to the next.
Example usage:
x1, x1_backwards, x2, x2_backwards = x, None, x, None
for f in my_functions:
x1, x1_backwards, x2, x2_backwards = mtf.layers.reversible_half_residual(
x1, x1_backwards, x2, x2_backwards)
y = (x1 + x2) / 2
Args:
x1: a Tensor
x1_backwards: a Tensor or None
x2: a Tensor
x2_backwards: a Tensor or None
f: a function from Tensor to Tensor
recompute_grads: a boolean
Returns:
y2: a Tensor
y2_backwards: a Tensor
y1: a Tensor
y1_backwards: a Tensor
"""
if recompute_grads:
if x1_backwards is None:
x1_backwards = mtf.zeros_like(x1)
if x2_backwards is None:
x2_backwards = mtf.zeros_like(x2)
return mtf.custom_gradient(
functools.partial(_half_residual_and_swap, f=f),
_reversible_half_residual_grad,
[x1, x1_backwards, x2, x2_backwards])
else:
return _half_residual_and_swap(x1, x1_backwards, x2, x2_backwards, f)
|
# coding=utf-8
# Copyright 2020 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Layers implemented in Mesh TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import gin
from mesh_tensorflow import ops_with_redefined_builtins as mtf
import tensorflow.compat.v1 as tf
@gin.configurable
def unit_scaling_convention(value=False):
"""Turn this on with gin to enable the unit-scaling convention.
TODO(noam): turn this comment into a position paper and post to arxiv
Under the unit-scaling convention, all weights are initialized with unit
variance, and the outputs of most contractions (matmul/einsum operations) are
divided by the square-root of the sizes of the contracting dimensions.
This differs from the typical inverse-square-root weight-initalization
convention often attributed to
http://proceedings.mlr.press/v9/glorot10a.html
in which weights are typically initialized according to a distribution with
mean zero and standard-deviation equal to the inverse-square-root of the
contracting dimension(s).
Under both conventions, the purpose of the inverse-square-root scaling is so
that activations in a layer should be scaled similarly to the activations in
the previous layer. (Typically, models are initialized so that activations in
all layers should have RMS=O(1)).
The difference between the two conventions is whether this scaling happens in
the parameters (their way), or as an explicit multiplier on the activations
(our way).
In our opinion, parameter-scaling (their way) has three main disadvantages:
1. Optimizers need to be aware of differently-scaled parameters. This is
because the learning-rates of adaptive optimizers represent target step-sizes
for the parameters. The desired step size for a parameter logically depends
on the scale of the parameter itself, and so one typically needs to lower the
learning-rate when the layers get bigger and the parameters get consequently
smaller. Under the unit-scaling convention, this is unnecessary, since all
parameters are on the same unit scale.
2. It is often unwieldy from an engineering standpoint to communicate to both
the variable initializers and to the optimizer what the scale of the variable
should be. Typically, the variable initializer guesses this by inferring from
the dimension order which dimension of the variable might represent
contracting dimensions. This is highly error-prone.
3. Sometimes contractions happen without being associated with parameters, as
in neural attention. It may be important here too to divide by the square
root of the contracting dimensions, in order to maintain activation scale.
See the discussion in section 3.2.1 of https://arxiv.org/abs/1706.03762
Being in the habit of scaling the outputs of contractions in this way makes
it more likely to remember to do the same thing in these circumstances.
Note: When switching to the unit-scaling convention, it is probably necessary
to raise the learning rate, since larger parameters need larger updates. An
exception is when using Adafactor, which by default scales the updates
relative to the scale of the current parameter values.
Args:
value: a boolean
Returns:
a boolean
"""
return value
def us_einsum(xs, *args, **kwargs):
"""Einsum with optional unit-scaling convention.
If the unit-scaling convention is enabled, then divide the output by
the square-root of the product of the contracting dimensions.
Args:
xs: a list of mtf.Tensor
*args: arguments to mtf.einsum
**kwargs: keyword arguments to mtf.einsum
Returns:
a mtf.Tensor
"""
y = mtf.einsum(xs, *args, **kwargs)
if unit_scaling_convention():
all_input_dims = set(sum([x.shape.dims for x in xs], []))
reduced_dims = [d for d in all_input_dims if d not in y.shape.dims]
y *= mtf.Shape(reduced_dims).size ** -0.5
return y
def dense(x,
new_dims,
reduced_dims=None,
expert_dims=None,
use_bias=True,
activation=None,
master_dtype=tf.float32,
slice_dtype=tf.float32,
variable_dtype=None,
kernel_initializer=None,
kernel_weights=None,
name=None):
"""Dense layer doing (kernel*x + bias) computation.
Args:
x: a mtf.Tensor of shape [..., reduced_dims].
new_dims: a list of mtf.Dimension.
reduced_dims: a list of mtf.Dimensions of x to be reduced.
If omitted (deprecated interface), we reduce the last dimension.
expert_dims: an optional list of mtf.Dimension which represent different
experts. Different experts get different weights.
use_bias: a boolean, whether to add bias.
activation: an optional function from mtf.Tensor to mtf.Tensor
master_dtype: a tf.dtype (deprecated - use variable_dtype)
slice_dtype: a tf.dtype (deprecated - use variable_dtype)
variable_dtype: a mtf.VariableDType
kernel_initializer: an initializer for kernel variable.
kernel_weights: mtf.Tensor weights matrix to use for dense computation
name: a string used for tf.variable_scope.
Returns:
a mtf.Tensor of shape [..., new_dims].
"""
if not isinstance(new_dims, list):
new_dims = [new_dims]
if variable_dtype is None:
variable_dtype = mtf.VariableDType(master_dtype, slice_dtype, x.dtype)
if expert_dims is None:
expert_dims = []
if reduced_dims is None:
tf.logging.warning(
"Deprecation warning - it is recommended to pass reduced_dims "
"explicitly to mtf.layers.dense() so as not to depend on dimension "
"order. To silence this warning, explicitly pass "
"reduced_dims=x.shape.dims[-1:] (in scope %s)"
% tf.get_variable_scope().name)
reduced_dims = x.shape.dims[-1:]
# if any reduced dims have the same names as new dims, first change these
# dimension names in the input so as to avoid name conflict in the weight
# matrix.
reduced_dims = reduced_dims[:]
for i in range(len(reduced_dims)):
if reduced_dims[i] in new_dims:
original_name = reduced_dims[i].name
tmp_name = "_" + original_name
reduced_dims[i] = mtf.Dimension(tmp_name, reduced_dims[i].size)
x = mtf.rename_dimension(x, original_name, tmp_name)
output_shape = mtf.Shape([d for d in x.shape.dims if d not in reduced_dims] +
new_dims)
if not kernel_weights:
kernel_weights = get_dense_kernel_weights(x, new_dims, reduced_dims,
expert_dims, kernel_initializer,
name, variable_dtype,
master_dtype, slice_dtype)
with tf.variable_scope(name, default_name="dense"):
y = us_einsum([x, kernel_weights], output_shape)
if use_bias:
b = mtf.get_variable(
x.mesh,
"bias",
mtf.Shape(expert_dims + new_dims),
initializer=tf.zeros_initializer(),
dtype=variable_dtype)
y += b
if activation is not None:
y = activation(y)
return y
def get_dense_kernel_weights(x,
new_dims,
reduced_dims,
expert_dims,
kernel_initializer,
name=None,
variable_dtype=None,
master_dtype=tf.float32,
slice_dtype=tf.float32):
"""Create w matrix variable.
Args:
x: a mtf.Tensor.
new_dims: a list of mtf.Dimension.
reduced_dims: a list of mtf.Dimensions of x to be reduced.
expert_dims: an optional list of mtf.Dimension which represent different
experts. Different experts get different weights.
kernel_initializer: an initializer for kernel variable.
name: a string used for tf.variable_scope.
variable_dtype: a mtf.VariableDType
master_dtype: a tf.dtype (deprecated - use variable_dtype)
slice_dtype: a tf.dtype (deprecated - use variable_dtype)
Returns:
a mtf.Tensor.
"""
if variable_dtype is None:
variable_dtype = mtf.VariableDType(master_dtype, slice_dtype, x.dtype)
w_shape = mtf.Shape(expert_dims + reduced_dims + new_dims)
with tf.variable_scope(name, default_name="dense"):
if kernel_initializer is None:
kernel_initializer = VarianceScalingInitializer()
if isinstance(kernel_initializer, DenseInitializer):
kernel_initializer = kernel_initializer(reduced_dims, new_dims)
w = mtf.get_variable(
x.mesh,
"kernel",
w_shape,
initializer=kernel_initializer,
dtype=variable_dtype)
w = mtf.cast(w, x.dtype)
return w
def dense_product(x,
reduced_dims,
new_dims,
activation_functions=None,
name="dense_product",
**kwargs):
"""Component-wise product of multiple dense layers.
e.g. if activation_functions=["linear", "sigmoid"], then this implements
Gated Linear Units https://arxiv.org/pdf/1612.08083.pdf
Args:
x: a Tensor
reduced_dims: a list of Dimensions.
new_dims: a list of Dimensions.
activation_functions: a list of activation functions (or a singleton)
Each can be a either:
- a callable function from Tensor to Tensor
- a string function name from namespace mtf)
- None or "linear", meaning no activation function
name: an optional string
**kwargs: additional kwargs for mtf.layers.dense()
"""
if not isinstance(activation_functions, list):
activation_functions = [activation_functions]
num_factors = len(activation_functions)
factors = []
for i, activation in enumerate(activation_functions):
if activation == "linear":
activation = None
elif isinstance(activation, str):
activation = getattr(mtf, activation)
factors.append(
dense(x,
reduced_dims=reduced_dims,
new_dims=new_dims,
activation=activation,
name="%s_%d" % (name, i) if num_factors > 1 else name,
**kwargs))
return functools.reduce(mtf.multiply, factors)
class DenseInitializer(object):
"""Initializer that can be passed to dense().
The __call__ function takes reduced_dims and new_dims and returns a
tf initializer class.
"""
def __call__(self, reduced_dims, new_dims):
raise NotImplementedError("not implemented")
@gin.configurable
class VarianceScalingInitializer(DenseInitializer):
"""Initializer capable of adapting its scale to the shape of weights.
With `distribution="normal"`, samples are drawn from a truncated normal
distribution centered on zero, with `stddev = sqrt(scale / n)` where n is:
1.0 if unit_scaling_convention() is turned on
otherwise:
number of input units in the weight tensor, if mode = "fan_in"
number of output units, if mode = "fan_out"
average of the numbers of input and output units, if mode = "fan_avg"
With `distribution="uniform"`,
samples are drawn from a uniform distribution
within [-limit, limit], with `limit = sqrt(3 * scale / n)`.
# Arguments
scale: Scaling factor (positive float).
mode: One of "fan_in", "fan_out", "fan_avg".
distribution: Random distribution to use. One of "normal", "uniform".
seed: A Python integer. Used to seed the random generator.
"""
def __init__(self, scale=1.0,
mode="fan_in",
distribution="normal"):
self.scale = scale
self.mode = mode.lower()
self.distribution = distribution.lower()
def __call__(self, reduced_dims, new_dims):
fan_in = mtf.list_product(d.size for d in reduced_dims)
fan_out = mtf.list_product(d.size for d in new_dims)
scale = self.scale
if self.mode == "fan_in":
if not unit_scaling_convention():
scale /= max(1., fan_in)
elif self.mode == "fan_out":
if unit_scaling_convention():
raise ValueError("Unit scaling convention only works with \"fan_in\"")
scale /= max(1., fan_out)
elif self.mode == "fan_avg":
if unit_scaling_convention():
raise ValueError("Unit scaling convention only works with \"fan_in\"")
scale /= max(1., float(fan_in + fan_out) / 2)
else:
raise ValueError(
"Invalid `mode` argument: "
"expected on of {\"fan_in\", \"fan_out\", \"fan_avg\"} "
"but got %s" % (self.mode,))
stddev = scale ** 0.5
if self.distribution == "normal":
return tf.truncated_normal_initializer(stddev=stddev)
elif self.distribution == "uniform":
limit = stddev * 3. ** 0.5
return tf.random_uniform_initializer(minval=-limit, maxval=limit)
else:
raise ValueError("Invalid `distribution` argument: "
"expected one of {\"normal\", \"uniform\"} "
"but got %s" % (self.distribution,))
def conv1d(x, output_dim, filter_size=3, stride=1, **kw_args):
"""1D Convolution.
Args:
x: a mtf.Tensor of format NWC.
output_dim: a mtf.Dimension, indicating the output channel dimension.
filter_size: a positive integer, the filter width.
stride: a positive integer, the stride.
**kw_args: optional keyword arguments to mtf.layers.conv2d.
Returns:
a mtf.Tensor of format NWO, where O is the output dimension.
"""
fake_height_dim = mtf.Dimension("fake_height", 1)
x = mtf.reshape(
x, mtf.Shape(x.shape.dims[:-2] + [fake_height_dim] + x.shape.dims[-2:]))
output = conv2d(
x,
output_dim,
filter_size=(1, filter_size),
strides=(1, stride),
**kw_args)
return mtf.reshape(
output,
mtf.Shape([
d for d in x.shape.dims
if d != fake_height_dim and d != x.shape.dims[-1]
] + [output_dim]))
def _depthwise_conv1d_hack(x,
depth_dim,
length_dim,
min_relative_pos=-1,
max_relative_pos=1,
name=None,
use_bias=True,
initializer_scale=1.0,
kernel_depth_weights=None):
"""Hacky version of a 1d depthwise convolution.
Args:
x: a mtf.Tensor
depth_dim: mtf.Dimension,
length_dim: mtf.Dimension,
min_relative_pos: int, min relative position,
max_relative_pos: int, max relative position,
name: str, variable_scope name,
use_bias: Bool, whether to use bias,
initializer_scale: int, initalizer scale,
kernel_depth_weights: an optional list of kernel weight tensors. The list
contains one element for each relative position in the kernel. Each element
has a width equal to the depth over which the separable conv operation is
being "separated"
Returns:
an mtf.Tensor
"""
ret = 0
kernel_size = max_relative_pos - min_relative_pos + 1
with tf.variable_scope(name, default_name="depthwise_conv_hack"):
for i in range(kernel_size):
relative_pos = min_relative_pos + i
shifted_input = mtf.shift(x, -relative_pos, length_dim, wrap=False)
ret += dense(
shifted_input,
new_dims=[],
reduced_dims=[],
expert_dims=[depth_dim],
kernel_weights=kernel_depth_weights[i]
if kernel_depth_weights else None,
name="depthwise_dense_%d" % i,
use_bias=use_bias and (i == 0),
kernel_initializer=VarianceScalingInitializer(
scale=initializer_scale / kernel_size))
return ret
def separable_conv1d(x,
output_dim,
min_relative_pos=-1,
max_relative_pos=1,
depthwise_filter_initializer_scale=1.0,
pointwise_filter_initializer_scale=1.0,
name=None,
use_bias=True,
kernel_depth_weights=None):
"""1-D convolution with separable filters.
The filter size will be `max_relative_pos - min_relative_pos + 1`.
Args:
x: a mtf.Tensor of format NWC.
output_dim: a mtf.Dimension, indicating the output channel dimension.
min_relative_pos: an integer, the inclusive minimum relative positive of the
depthwise filter, where a relative position of zero means the left end of
the filter aligns with the left end of the input.
max_relative_pos: an integer, the inclusive maximum relative position of the
depthwise filter, where a relative position of zero means the right end of
the filter aligns with the right end of the input.
depthwise_filter_initializer_scale: a positive float, the scale of the
initializer for the depthwise filter.
pointwise_filter_initializer_scale: a positive float, the scale of the
initializer for the pointwise filter.
name: a string used for tf.variable_scope.
use_bias: a bool, whether to use bias in the convolutions.
kernel_depth_weights: an optional list of kernel weight tensors. The list
contains one element for each relative position in the kernel. Each element
has a width equal to the dimension over which the separable conv operation
is being "separated"
Returns:
a mtf.Tensor of format NWO, where O is the output dimension.
"""
depth_dim = x.shape.dims[-1]
length_dim = x.shape.dims[-2]
with tf.variable_scope(name, default_name="separable_conv1d"):
depthwise = _depthwise_conv1d_hack(
x,
depth_dim=depth_dim,
length_dim=length_dim,
min_relative_pos=min_relative_pos,
max_relative_pos=max_relative_pos,
use_bias=use_bias,
initializer_scale=depthwise_filter_initializer_scale,
kernel_depth_weights=kernel_depth_weights)
return dense(
depthwise,
new_dims=[output_dim],
reduced_dims=[depth_dim],
name="pointwise_dense",
use_bias=use_bias,
kernel_initializer=VarianceScalingInitializer(
scale=pointwise_filter_initializer_scale))
def conv2d(x, output_dim, filter_size=(3, 3),
strides=(1, 1), padding="SAME", filter_initializer=None,
variable_dtype=None, name=None):
"""2D Convolution.
Args:
x: a mtf.Tensor of format NHWC.
output_dim: a mtf.Dimension, indicating the output channel dimension.
filter_size: a list or tuple in format [filter_height, filter_width].
strides: a list or tuple in format [stride_height, stride_width].
padding: either "SAME" or "VALID".
filter_initializer: the initializer for tf.get_variable.
variable_dtype: a mtf.VariableDType
name: a string used for tf.variable_scope.
Returns:
a mtf.Tensor.
"""
fh_dim = mtf.Dimension("fh", filter_size[0])
fw_dim = mtf.Dimension("fw", filter_size[1])
input_dim = x.shape[-1]
with tf.variable_scope(name, default_name="conv2d"):
if variable_dtype is None:
variable_dtype = mtf.VariableDType(activation_dtype=x.dtype)
conv_filter = mtf.get_variable(
x.mesh, "kernel", [fh_dim, fw_dim, input_dim, output_dim],
initializer=filter_initializer, dtype=variable_dtype)
# Pad stride in batch and channel dimensions.
strides = [1] + list(strides) + [1]
return mtf.Conv2dOperation(x, conv_filter, strides, padding).outputs[0]
def conv2d_with_blocks(
x, output_dim, filter_size=(3, 3),
strides=(1, 1), padding="SAME",
h_blocks_dim=None, w_blocks_dim=None, filter_initializer=None,
variable_dtype=None, name=None):
"""2D Convolution with spatial partitioning.
Spatial partitioning is implemented by decomposing the image into blocks.
Block dimensions represented as h_blocks_dim and w_blocks_dim can be split
along the mesh axis. If split, then we do a halo exchange where each block
receives the part of the image from its left and right neighbors necessary to
do the convolution. Exchange can involve complete or partial blocks depending
on the filter height and width.
Currently, only "SAME" padding with dilation rate of 1 is supported.
Args:
x: a Tensor of shape
[batch, h_blocks_dim, w_blocks_dim, h_dim, w_dim, in_channels_dim]
output_dim: a mtf.Dimension, indicating the output channel dimension.
filter_size: a list or tuple in format [filter_height, filter_width].
strides: a list or tuple in format [stride_height, stride_width].
padding: string, "SAME". The type of padding algorithm to use.
"Valid" is not currently supported.
h_blocks_dim: Dimension representing number of height blocks.
w_blocks_dim: Dimension representing number of witdh blocks.
filter_initializer: the initializer for tf.get_variable.
variable_dtype: a mtf.VariableDType
name: a name for the operation (optional).
Returns:
A Tensor of shape
[batch, h_blocks_dim, w_blocks_dim, h_dim, w_dim, out_channels_dim]
"""
# If h_blocks_dim and w_blocks_dim are not split, directly call conv2d.
if h_blocks_dim is None and w_blocks_dim is None:
return conv2d(x, output_dim,
filter_size, strides, padding, filter_initializer,
variable_dtype, name)
#assert filter_size[0] % 2 == 1
#assert filter_size[1] % 2 == 1
# Padding 'VALID' is not supported yet.
if padding != "SAME":
raise NotImplementedError("conv2d_with_blocks requires padding=SAME")
# Halo exchange for h_blocks and w_blocks.
h_dim, w_dim = x.shape.dims[-3:-1]
for blocks_dim, block_size_dim, halo_size in [
(h_blocks_dim, h_dim, [(filter_size[0] - 1) // 2, filter_size[0] // 2]),
(w_blocks_dim, w_dim, [(filter_size[1] - 1) // 2, filter_size[1] // 2])]:
if halo_size != [0, 0]:
print("#halo check#", blocks_dim, block_size_dim, filter_size, "halo_size=", halo_size)
if blocks_dim is not None:
x = mtf.halo_exchange(x, blocks_dim, block_size_dim, halo_size)
else:
x = mtf.pad(x, halo_size, block_size_dim.name)
return conv2d(x, output_dim,
filter_size, strides, "VALID", filter_initializer,
variable_dtype, name)
def conv2d_transpose(x, output_dim,
filter_size=(2, 2), strides=(2, 2),
padding="SAME", filter_initializer=None,
variable_dtype=None, name=None):
"""2D Transposed Convolution.
Args:
x: a mtf.Tensor of format NHWC.
output_dim: a mtf.Dimension, indicating the output channel dimension.
filter_size: a list or tuple in format
[filter_height, filter_width]. Only filter_size of (2, 2) is tested.
strides: a list or tuple in format
[stride_height, stride_width]. Only strides of (2, 2) is tested.
padding: either "SAME" or "VALID".
filter_initializer: the initializer for tf.get_variable.
variable_dtype: a mtf.VariableDType
name: a string used for tf.variable_scope.
Returns:
a mtf.Tensor.
"""
fh_dim = mtf.Dimension("fh", filter_size[0])
fw_dim = mtf.Dimension("fw", filter_size[1])
input_dim = x.shape[-1]
with tf.variable_scope(name, default_name="conv2d_transpose"):
if variable_dtype is None:
variable_dtype = mtf.VariableDType(activation_dtype=x.dtype)
conv_filter = mtf.get_variable(
x.mesh, "kernel", [fh_dim, fw_dim, output_dim, input_dim],
initializer=filter_initializer, dtype=variable_dtype)
# Pad stride in batch and channel dimensions.
strides = [1] + list(strides) + [1]
return mtf.Conv2dTransposeOperation(
x, conv_filter, strides, padding).outputs[0]
def conv2d_transpose_with_blocks(
x, output_dim, filter_size=(2, 2),
strides=(2, 2), padding="SAME",
h_blocks_dim=None, w_blocks_dim=None, filter_initializer=None,
variable_dtype=None, name=None):
"""2D Transposed Convolution with spatial partitioning.
Spatial partitioning is implemented by decomposing the image into blocks.
Block dimensions represented as h_blocks_dim and w_blocks_dim can be split
along the mesh axis. If split, then we do a halo exchange where each block
receives the part of the image from its left and right neighbors necessary to
do the convolution. Exchange can involve complete or partial blocks depending
on the filter depth and height.
Currently, only "SAME" padding with dilation rate of 1 is supported. Only
splitting along the depth and height dimensions are supported.
Args:
x: a Tensor of shape
[batch, h_blocks_dim, w_blocks_dim, h_dim, w_dim, in_channel_dim]
output_dim: a mtf.Dimension, indicating the output channel dimension.
filter_size: a list or tuple in format
[filter_height, filter_width]. Only filter_size of (2, 2) is tested.
strides: a list or tuple in format
[stride_height, stride_width]. Only strides of (2, 2) is tested.
padding: string, "SAME". The type of padding algorithm to use.
"Valid" is not currently supported.
h_blocks_dim: Dimension representing number of height blocks.
w_blocks_dim: Dimension representing number of width blocks.
filter_initializer: the initializer for tf.get_variable.
variable_dtype: a mtf.VariableDType
name: a name for the operation (optional).
Returns:
A Tensor of shape
[batch, h_blocks_dim, w_blocks_dim, h_dim, w_dim, out_channels_dim]
"""
# If h_blocks_dim and w_blocks_dim are not split, directly call conv2d_trans.
if h_blocks_dim is None and w_blocks_dim is None:
return conv2d_transpose(
x, output_dim, filter_size, strides, padding, filter_initializer,
variable_dtype, name)
# Now only supports even-sized filters.
assert filter_size[0] % 2 == 0
assert filter_size[1] % 2 == 0
# Padding 'VALID' is not supported yet.
if padding != "SAME":
raise NotImplementedError(
"conv2d_transpose_with_blocks requires padding=SAME")
# Halo exchange for h_blocks and w_blocks.
# TODO(lehou): figure out the halo_size in general cases.
h_dim, w_dim = x.shape.dims[-3:-1]
for blocks_dim, block_size_dim, halo_size in [
(h_blocks_dim, h_dim, filter_size[0] // 2 - 1),
(w_blocks_dim, w_dim, filter_size[1] // 2 - 1)]:
if halo_size > 0:
if blocks_dim is not None:
x = mtf.halo_exchange(x, blocks_dim, block_size_dim, halo_size)
else:
x = mtf.pad(x, [halo_size, halo_size], block_size_dim.name)
return conv2d_transpose(
x, output_dim, filter_size, strides, "VALID", filter_initializer,
variable_dtype, name)
def conv3d(x, output_dim, filter_size=(3, 3, 3),
strides=(1, 1, 1), padding="SAME",
filter_initializer=None,
variable_dtype=None, name=None):
"""3D Convolution.
Args:
x: a mtf.Tensor of format NDHWC.
output_dim: a mtf.Dimension, indicating the output channel dimension.
filter_size: a list or tuple in format
[filter_depth, filter_height, filter_width].
strides: a list or tuple in format
[stride_depth, stride_height, stride_width].
padding: either "SAME" or "VALID".
filter_initializer: the initializer for tf.get_variable.
variable_dtype: a mtf.VariableDType
name: a string used for tf.variable_scope.
Returns:
a mtf.Tensor.
"""
fd_dim = mtf.Dimension("fd", filter_size[0])
fh_dim = mtf.Dimension("fh", filter_size[1])
fw_dim = mtf.Dimension("fw", filter_size[2])
input_dim = x.shape[-1]
with tf.variable_scope(name, default_name="conv3d"):
if variable_dtype is None:
variable_dtype = mtf.VariableDType(activation_dtype=x.dtype)
conv_filter = mtf.get_variable(
x.mesh, "kernel", [fd_dim, fh_dim, fw_dim, input_dim, output_dim],
initializer=filter_initializer, dtype=variable_dtype)
# Pad stride in batch and channel dimensions.
strides = [1] + list(strides) + [1]
return mtf.Conv3dOperation(x, conv_filter, strides, padding).outputs[0]
def conv3d_to_dense(x, mesh_size=(2,2), name=None):
d_dim, h_dim, w_dim = x.shape.dims[-4:-1]
for block_size_dim, num_procs in [
(d_dim, mesh_size[0]),
(h_dim, mesh_size[1])]:
if num_procs > 1:
x = mtf.conv3d_to_dense(x, block_size_dim)
return x
def conv3d_with_MPI(
x, output_dim, filter_size=(2, 2, 2),
strides=(1, 1, 1), padding="SAME",
d_blocks_dim=None, h_blocks_dim=None, filter_initializer=None,
variable_dtype=None, name=None):
# If d_blocks_dim and h_blocks_dim are not split, directly call conv3d.
if d_blocks_dim is None and h_blocks_dim is None:
return conv3d(x, output_dim,
filter_size, strides, padding, filter_initializer,
variable_dtype, name)
# assert filter_size[0] % 2 == 1
# assert filter_size[1] % 2 == 1
# assert filter_size[2] % 2 == 1
# Padding 'VALID' is not supported yet.
if padding != "SAME":
raise NotImplementedError("conv3d_with_blocks requires padding=SAME")
# Halo exchange for d_blocks and h_blocks.
d_dim, h_dim, w_dim = x.shape.dims[-4:-1]
for blocks_dim, block_size_dim, halo_size in [
(d_blocks_dim, d_dim, filter_size[0] // 2),
(h_blocks_dim, h_dim, filter_size[1] // 2)]:
if halo_size > 0:
if blocks_dim is not None:
x = mtf.halo_exchange_with_MPI(x, blocks_dim, block_size_dim, halo_size)
else:
x = mtf.pad(x, [halo_size, halo_size], block_size_dim.name)
# Pad w dimension with zeros.
#x = mtf.pad(x, [filter_size[2] // 2, filter_size[2] // 2],
# dim_name=w_dim.name, name="conv3d_pad_w_dim")
x = mtf.pad(x, [(filter_size[2] - 1 ) // 2, filter_size[2] // 2],
dim_name=w_dim.name, name="conv3d_pad_w_dim")
return conv3d(x, output_dim,
filter_size, strides, "VALID", filter_initializer,
variable_dtype, name)
def conv3d_with_blocks(
x, output_dim, filter_size=(3, 3, 3),
strides=(1, 1, 1), padding="SAME",
d_blocks_dim=None, h_blocks_dim=None, filter_initializer=None,
variable_dtype=None, name=None):
"""3D Convolution with spatial partitioning.
Spatial partitioning is implemented by decomposing the image into blocks.
Block dimensions represented as d_blocks_dim and h_blocks_dim can be split
along the mesh axis. If split, then we do a halo exchange where each block
receives the part of the image from its left and right neighbors necessary to
do the convolution. Exchange can involve complete or partial blocks depending
on the filter depth and height.
Currently, only "SAME" padding with dilation rate of 1 is supported. Only
splitting along the depth and height dimensions are supported.
Args:
x: a Tensor of shape
[batch, d_blocks_dim, h_blocks_dim, d_dim, h_dim, w_dim, in_channel_dim]
output_dim: a mtf.Dimension, indicating the output channel dimension.
filter_size: a list or tuple in format
[filter_depth, filter_height, filter_width].
strides: a list or tuple in format
[stride_depth, stride_height, stride_width].
padding: string, "SAME". The type of padding algorithm to use.
"Valid" is not currently supported.
d_blocks_dim: Dimension representing number of depth blocks.
h_blocks_dim: Dimension representing number of height blocks.
filter_initializer: the initializer for tf.get_variable.
variable_dtype: a mtf.VariableDType
name: a name for the operation (optional).
Returns:
A Tensor of shape
[batch, d_blocks_dim, h_blocks_dim, w_blocks_dim,
d_dim, h_dim, w_dim, out_channels_dim]
"""
# If d_blocks_dim and h_blocks_dim are not split, directly call conv3d.
if d_blocks_dim is None and h_blocks_dim is None:
return conv3d(x, output_dim,
filter_size, strides, padding, filter_initializer,
variable_dtype, name)
assert filter_size[0] % 2 == 1
assert filter_size[1] % 2 == 1
assert filter_size[2] % 2 == 1
# Padding 'VALID' is not supported yet.
if padding != "SAME":
raise NotImplementedError("conv3d_with_blocks requires padding=SAME")
# Halo exchange for d_blocks and h_blocks.
d_dim, h_dim, w_dim = x.shape.dims[-4:-1]
for blocks_dim, block_size_dim, halo_size in [
(d_blocks_dim, d_dim, filter_size[0] // 2),
(h_blocks_dim, h_dim, filter_size[1] // 2)]:
if halo_size > 0:
if blocks_dim is not None:
x = mtf.halo_exchange(x, blocks_dim, block_size_dim, halo_size)
else:
x = mtf.pad(x, [halo_size, halo_size], block_size_dim.name)
# Pad w dimension with zeros.
x = mtf.pad(x, [filter_size[2] // 2, filter_size[2] // 2],
dim_name=w_dim.name, name="conv3d_pad_w_dim")
return conv3d(x, output_dim,
filter_size, strides, "VALID", filter_initializer,
variable_dtype, name)
def conv3d_with_blocks_ymzk(
x, output_dim, filter_size=(3, 3, 3),
strides=(1, 1, 1), padding="SAME",
d_blocks_dim=None, h_blocks_dim=None, filter_initializer=None,
variable_dtype=None, name=None):
"""3D Convolution with spatial partitioning.
Spatial partitioning is implemented by decomposing the image into blocks.
Block dimensions represented as d_blocks_dim and h_blocks_dim can be split
along the mesh axis. If split, then we do a halo exchange where each block
receives the part of the image from its left and right neighbors necessary to
do the convolution. Exchange can involve complete or partial blocks depending
on the filter depth and height.
Currently, only "SAME" padding with dilation rate of 1 is supported. Only
splitting along the depth and height dimensions are supported.
Args:
x: a Tensor of shape
[batch, d_blocks_dim, h_blocks_dim, d_dim, h_dim, w_dim, in_channel_dim]
output_dim: a mtf.Dimension, indicating the output channel dimension.
filter_size: a list or tuple in format
[filter_depth, filter_height, filter_width].
strides: a list or tuple in format
[stride_depth, stride_height, stride_width].
padding: string, "SAME". The type of padding algorithm to use.
"Valid" is not currently supported.
d_blocks_dim: Dimension representing number of depth blocks.
h_blocks_dim: Dimension representing number of height blocks.
filter_initializer: the initializer for tf.get_variable.
variable_dtype: a mtf.VariableDType
name: a name for the operation (optional).
Returns:
A Tensor of shape
[batch, d_blocks_dim, h_blocks_dim, w_blocks_dim,
d_dim, h_dim, w_dim, out_channels_dim]
"""
# If d_blocks_dim and h_blocks_dim are not split, directly call conv3d.
if d_blocks_dim is None and h_blocks_dim is None:
return conv3d(x, output_dim,
filter_size, strides, padding, filter_initializer,
variable_dtype, name)
#ymzk
#assert filter_size[0] % 2 == 1
#assert filter_size[1] % 2 == 1
#assert filter_size[2] % 2 == 1
# Padding 'VALID' is not supported yet.
if padding != "SAME":
raise NotImplementedError("conv3d_with_blocks requires padding=SAME")
# Halo exchange for d_blocks and h_blocks.
d_dim, h_dim, w_dim = x.shape.dims[-4:-1]
# for blocks_dim, block_size_dim, halo_size in [
# (d_blocks_dim, d_dim, [(filter_size[0] - 1) // 2, filter_size[0] // 2])]:
for blocks_dim, block_size_dim, halo_size in [
(d_blocks_dim, d_dim, [(filter_size[0] - 1) // 2, filter_size[0] // 2]),
(h_blocks_dim, h_dim, [(filter_size[1] - 1) // 2, filter_size[1] // 2])]:
tf.logging.info("#3d# %s, %s, %s, %s, %s, %s"% (x, output_dim, filter_size, blocks_dim, block_size_dim, halo_size))
#print("#3d#", x, output_dim, filter_size, blocks_dim, block_size_dim, halo_size)
if halo_size != [0, 0]:
if blocks_dim is not None:
x = mtf.halo_exchange_ymzk(x, blocks_dim, block_size_dim, halo_size)
else:
x = mtf.pad(x, [halo_size, halo_size], block_size_dim.name)
tf.logging.info("#3d_exch_after# %s" %x)
# Pad w dimension with zeros.
#x = mtf.pad(x, [(filter_size[2] - 1) // 2, filter_size[2] // 2],
x = mtf.pad(x, [(filter_size[2] - 1 ) // 2, filter_size[2] // 2],
dim_name=w_dim.name, name="conv3d_pad_w_dim")
#x = mtf.pad(x, [(filter_size[2] - 1 ) // 2, filter_size[2] // 2],
# dim_name=h_dim.name, name="conv3d_pad_h_dim")
return conv3d(x, output_dim,
filter_size, strides, "VALID", filter_initializer,
variable_dtype, name)
def conv3d_transpose(x, output_dim,
filter_size=(2, 2, 2), strides=(2, 2, 2),
padding="SAME", filter_initializer=None,
variable_dtype=None, name=None):
"""3D Transposed Convolution.
Args:
x: a mtf.Tensor of format NDHWC.
output_dim: a mtf.Dimension, indicating the output channel dimension.
filter_size: a list or tuple in format
[filter_depth, filter_height, filter_width].
Only filter_size of (2, 2, 2) is tested.
strides: a list or tuple in format
[stride_depth, stride_height, stride_width].
Only strides of (2, 2, 2) is tested.
padding: either "SAME" or "VALID".
filter_initializer: the initializer for tf.get_variable.
variable_dtype: a mtf.VariableDType
name: a string used for tf.variable_scope.
Returns:
a mtf.Tensor.
"""
fd_dim = mtf.Dimension("fd", filter_size[0])
fh_dim = mtf.Dimension("fh", filter_size[1])
fw_dim = mtf.Dimension("fw", filter_size[2])
input_dim = x.shape[-1]
with tf.variable_scope(name, default_name="conv3d_transpose"):
if variable_dtype is None:
variable_dtype = mtf.VariableDType(activation_dtype=x.dtype)
conv_filter = mtf.get_variable(
x.mesh, "kernel", [fd_dim, fh_dim, fw_dim, output_dim, input_dim],
initializer=filter_initializer, dtype=variable_dtype)
# Pad stride in batch and channel dimensions.
strides = [1] + list(strides) + [1]
return mtf.Conv3dTransposeOperation(
x, conv_filter, strides, padding).outputs[0]
def conv3d_transpose_with_blocks(
x, output_dim, filter_size=(2, 2, 2),
strides=(2, 2, 2), padding="SAME",
d_blocks_dim=None, h_blocks_dim=None, filter_initializer=None,
variable_dtype=None, name=None):
"""3D Transposed Convolution with spatial partitioning.
Spatial partitioning is implemented by decomposing the image into blocks.
Block dimensions represented as d_blocks_dim and h_blocks_dim can be split
along the mesh axis. If split, then we do a halo exchange where each block
receives the part of the image from its left and right neighbors necessary to
do the convolution. Exchange can involve complete or partial blocks depending
on the filter depth and height.
Currently, only "SAME" padding with dilation rate of 1 is supported. Only
splitting along the depth and height dimensions are supported.
Args:
x: a Tensor of shape
[batch, d_blocks_dim, h_blocks_dim, d_dim, h_dim, w_dim, in_channel_dim]
output_dim: a mtf.Dimension, indicating the output channel dimension.
filter_size: a list or tuple in format
[filter_depth, filter_height, filter_width].
Only filter_size of (2, 2, 2) is tested.
strides: a list or tuple in format
[stride_depth, stride_height, stride_width].
Only strides of (2, 2, 2) is tested.
padding: string, "SAME". The type of padding algorithm to use.
"Valid" is not currently supported.
d_blocks_dim: Dimension representing number of depth blocks.
h_blocks_dim: Dimension representing number of height blocks.
filter_initializer: the initializer for tf.get_variable.
variable_dtype: a mtf.VariableDType
name: a name for the operation (optional).
Returns:
A Tensor of shape
[batch, d_blocks_dim, h_blocks_dim, w_blocks_dim,
d_dim, h_dim, w_dim, out_channels_dim]
"""
# If d_blocks_dim and h_blocks_dim are not split, directly call conv3d_trans.
if d_blocks_dim is None and h_blocks_dim is None:
return conv3d_transpose(
x, output_dim, filter_size, strides, padding, filter_initializer,
variable_dtype, name)
# Now only supports even-sized filters.
assert filter_size[0] % 2 == 0
assert filter_size[1] % 2 == 0
assert filter_size[2] % 2 == 0
# Padding 'VALID' is not supported yet.
if padding != "SAME":
raise NotImplementedError(
"conv3d_transpose_with_blocks requires padding=SAME")
# Halo exchange for d_blocks and h_blocks.
# TODO(lehou): figure out the halo_size in general cases.
d_dim, h_dim, w_dim = x.shape.dims[-4:-1]
for blocks_dim, block_size_dim, halo_size in [
(d_blocks_dim, d_dim, filter_size[0] // 2 - 1),
(h_blocks_dim, h_dim, filter_size[1] // 2 - 1)]:
if halo_size > 0:
if blocks_dim is not None:
x = mtf.halo_exchange(x, blocks_dim, block_size_dim, halo_size)
else:
x = mtf.pad(x, [halo_size, halo_size], block_size_dim.name)
# Pad w dimension with zeros.
x = mtf.pad(x, [filter_size[2] // 2 - 1, filter_size[2] // 2 - 1],
dim_name=w_dim.name, name="conv3d_trans_pad_w_dim")
return conv3d_transpose(
x, output_dim, filter_size, strides, "VALID", filter_initializer,
variable_dtype, name)
def layer_norm(x, dim, epsilon=1e-6, name="layer_prepostprocess"):
"""Layer normalization over dimension dim.
Args:
x: a mtf.Tensor whose shape contains dim.
dim: a mtf.Dimension
epsilon: a floating point number
name: a string used for tf.variable_scope.
Returns:
a mtf.Tensor with same shape as x.
"""
with tf.variable_scope(name + "/layer_norm"):
scale = mtf.get_variable(
x.mesh,
"layer_norm_scale",
mtf.Shape([dim]),
initializer=tf.ones_initializer(),
activation_dtype=x.dtype)
bias = mtf.get_variable(
x.mesh,
"layer_norm_bias",
mtf.Shape([dim]),
initializer=tf.zeros_initializer(),
activation_dtype=x.dtype)
reduced_shape = x.shape - dim
mean = mtf.reduce_mean(x, output_shape=reduced_shape)
variance = mtf.reduce_mean(mtf.square(x - mean), output_shape=reduced_shape)
norm_x = (x - mean) * mtf.rsqrt(variance + epsilon)
return norm_x * scale + bias
def batch_norm(x, is_training, momentum, epsilon=1e-9,
dims_idx_start=0, dims_idx_end=-1,
init_zero=False, name=None):
"""Batch normalization.
Args:
x: a mtf.Tensor whose shape contains [batch_dim, ..., dim]
is_training: a boolean, whether mode is training.
momentum: a floating point number, specifying batch norm decay value.
epsilon: a floating point number.
dims_idx_start: an integer. Dimension with indices in
[dims_idx_start, dims_idx_end - 1] will be normalized.
dims_idx_end: an integer. Dimension with indices in
[dims_idx_start, dims_idx_end - 1] will be normalized.
init_zero: a boolean, whether to initialize scale with 0's or 1's.
name: a string used for tf.variable_scope.
Returns:
a mtf.Tensor with same shape as x.
"""
with tf.variable_scope(name, default_name="batch_norm", values=[x]):
if init_zero:
gamma_initializer = tf.zeros_initializer()
else:
gamma_initializer = tf.ones_initializer()
norm_dim = x.shape.dims[dims_idx_start:dims_idx_end]
reduced_shape = x.shape - norm_dim
scale = mtf.get_variable(
x.mesh,
"batch_norm_scale",
reduced_shape,
initializer=gamma_initializer,
activation_dtype=x.dtype)
bias = mtf.get_variable(
x.mesh,
"batch_norm_bias",
reduced_shape,
initializer=tf.zeros_initializer(),
activation_dtype=x.dtype)
moving_mean = mtf.get_variable(
x.mesh, "bn_moving_mean", reduced_shape,
initializer=tf.random_normal_initializer(stddev=1.0),
activation_dtype=x.dtype,
trainable=False)
moving_variance = mtf.get_variable(
x.mesh, "bn_moving_variance",
reduced_shape, initializer=tf.ones_initializer(),
activation_dtype=x.dtype,
trainable=False)
# At training time, calculate mean and variance and normalize across batch
# dim.
if is_training:
mean = mtf.reduce_mean(x, output_shape=reduced_shape)
variance = mtf.reduce_mean(
mtf.square(x - mean), output_shape=reduced_shape)
norm_x = (x - mean) * mtf.rsqrt(variance + epsilon)
# Update running mean and running variance.
# TODO(lehou): do not return update_ops; handle them inside MTF.
bn_stats_update_ops = []
bn_stats_update_ops.append(mtf.assign(
moving_mean, momentum * moving_mean + (1 - momentum) * mean,
name="{}/bn_mean_update".format(name)))
bn_stats_update_ops.append(mtf.assign(
moving_variance,
momentum * moving_variance + (1 - momentum) * variance,
name="{}/bn_var_update".format(name)))
else:
# At eval and test time, use the running mean and variance.
norm_x = (x - moving_mean) * mtf.rsqrt(moving_variance + epsilon)
bn_stats_update_ops = []
return (norm_x * scale) + bias, bn_stats_update_ops
def softmax_cross_entropy_with_logits(logits, targets, vocab_dim, z_loss=0.0):
"""Per-example softmax loss.
`logits` is a Tensor with floating-point dtype, containing the predicted
relative log probabilities of the classes.
Either hard targets or soft targets are supported.
In the case of hard targets, `targets` is a Tensor with integer dtype whose
values are in the range [0, vocab_dim.size). `targets` should have the same
set of dimensions as `logits`, but without `vocab_dim`.
In the case of soft targets, `targets` is a Tensor with floating point dtype
and the same dimensions as `logits. Reducing `targets` along `vocab_dim`
should result in all ones.
if z_loss is nonzero, we add a loss equal to z_loss*log(z)^2, where z is the
partition function. Example value: z_loss=1e-4. Two uses of z_loss are:
- To keep the logits from drifting too far from zero, which can cause
unacceptable roundoff errors in bfloat16.
- To encourage the logits to be normalized log-probabilities.
Args:
logits: a mtf.Tensor whose shape contains vocab_dim
targets: a mtf.Tensor representing hard or soft targets (see comments)
vocab_dim: a mtf.Dimension
z_loss: a float
Returns:
a mtf.Tensor whose shape is equal to logits.shape - vocab_dim
Raises:
ValueError: if the shapes do not match.
"""
if targets.dtype.is_integer:
# hard targets
if (set(targets.shape.dims)
!= set(logits.shape.dims).difference([vocab_dim])):
raise ValueError(
"softmax_cross_entropy_with_logits with hard targets "
"dims in targets=%s should be dims in logits=%s other than "
"vocab_dim=%s" % (targets, logits, vocab_dim))
targets = mtf.one_hot(targets, vocab_dim, dtype=logits.dtype)
elif set(targets.shape.dims) != set(logits.shape.dims):
raise ValueError(
"softmax_cross_entropy_with_logits with soft targets "
"dims in targets=%s should be dims in logits=%s"% (targets, logits))
if vocab_dim not in logits.shape.dims:
raise ValueError("vocab_dim must be in logits.shape.dims")
log_z = mtf.reduce_logsumexp(logits, vocab_dim)
log_softmax = logits - log_z
loss = mtf.negative(
mtf.reduce_sum(log_softmax * targets, reduced_dim=vocab_dim))
if z_loss != 0:
loss += z_loss * mtf.square(log_z)
return loss
def sigmoid_cross_entropy_with_logits(logits, targets):
"""Sigmoid cross-entropy loss.
Args:
logits: a mtf.Tensor
targets: a mtf.Tensor with the same shape as logits
Returns:
a mtf.Tensor whose shape is equal to logits.shape
Raises:
ValueError: if the shapes do not match.
"""
if logits.shape != targets.shape:
raise ValueError(
"logits shape must equal targets shape"
"logits=%s targets=%s" % (logits.to_string, targets.to_string))
x = logits
z = targets
return mtf.relu(x) - x * z + mtf.log(1 + mtf.exp(-mtf.abs(x)))
def weights_nonzero(targets, dtype=tf.float32):
def my_fn(x):
return tf.cast(tf.not_equal(x, 0), dtype)
return mtf.cwise(my_fn, [targets], output_dtype=dtype, name="weights_nonzero")
def dense_relu_dense(x,
hidden_channels,
dropout=0.0,
dropout_broadcast_dims=None,
master_dtype=tf.float32,
slice_dtype=tf.float32, name=None):
"""Hidden layer with ReLU activation followed by linear projection.
The output has the same number of channels as the input.
Args:
x: a mtf.Tensor
hidden_channels: a mtf.Dimension - channels in the hidden layer
dropout: an optional float
dropout_broadcast_dims: an optional list of mtf.Dimension
master_dtype: a tf.dtype
slice_dtype: a tf.dtype
name: an optional string
Returns:
a mtf.Tensor with the same shape as x.
"""
with tf.variable_scope(name, default_name="dense_relu_dense"):
io_channels = x.shape.dims[-1]
h = dense(x, hidden_channels,
use_bias=False, activation=mtf.relu,
master_dtype=master_dtype, slice_dtype=slice_dtype, name="wi")
if dropout != 0.0:
h = mtf.dropout(h, 1.0 - dropout,
noise_shape=h.shape - dropout_broadcast_dims)
return dense(h, io_channels, use_bias=False, activation=None,
master_dtype=master_dtype, slice_dtype=slice_dtype,
name="wo")
def local_1d_halo_exchange(k, v, num_w_blocks, w_dim, mask_right):
"""Halo exchange for keys and values for Local 1D attention."""
if num_w_blocks is not None:
if mask_right:
k = mtf.left_halo_exchange(k, num_w_blocks, w_dim, w_dim.size)
v = mtf.left_halo_exchange(v, num_w_blocks, w_dim, w_dim.size)
else:
k = mtf.halo_exchange(k, num_w_blocks, w_dim, w_dim.size)
v = mtf.halo_exchange(v, num_w_blocks, w_dim, w_dim.size)
else:
if mask_right:
k = mtf.pad(k, [w_dim, None], w_dim.name)
v = mtf.pad(v, [w_dim, None], w_dim.name)
else:
k = mtf.pad(k, [w_dim, w_dim], w_dim.name)
v = mtf.pad(v, [w_dim, w_dim], w_dim.name)
return k, v
def local_self_attention_spatial_blocks(
query_antecedent,
kv_channels,
heads,
memory_w_dim=None,
mask_right=False,
master_dtype=tf.float32,
slice_dtype=tf.float32,
name=None):
"""Attention to the source position and a neighborhood to the left or right.
The sequence is divided into blocks of length block_size.
Attention for a given query position can only see memory positions
less than or equal to the query position, in the corresponding block
and the previous block.
Args:
query_antecedent: a mtf.Tensor with shape
[batch, num_h_blocks, num_w_blocks, h_dim, w_dim, io_channels]
must have the same size as query_length, but a different name.
kv_channels: a mtf.Dimension (the size of the key and value vectors)
heads: a mtf.Dimension (the number of heads)
memory_w_dim: mtf Dimension, for the memory width block.
mask_right: bool, flag specifying whether we mask out attention to the right
for the decoder.
master_dtype: a tf.dtype
slice_dtype: a tf.dtype
name: an optional string.
Returns:
a Tensor of shape
[batch, num_h_blocks, num_w_blocks, h_dim, w_dim, io_channels]
Raises:
ValueError: if channels or depth don't match.
"""
with tf.variable_scope(
name, default_name="multihead_attention",
values=[query_antecedent]):
w_dim, io_channels = query_antecedent.shape.dims[-2:]
batch, num_w_blocks = query_antecedent.shape.dims[:2]
wq, wk, wv, wo = multihead_attention_vars(
query_antecedent.mesh, heads, io_channels, kv_channels,
master_dtype, slice_dtype, query_antecedent.dtype)
# Rename dimensions for the memory height and width.
memory_antecedent = mtf.rename_dimension(
query_antecedent, w_dim.name, "memory_" + w_dim.name)
memory_w_dim = memory_antecedent.shape.dims[-2]
# Call einsum over the query and memory to get query q, keys k and values v.
q = mtf.einsum(
[query_antecedent, wq],
mtf.Shape([batch, heads, num_w_blocks, w_dim, kv_channels]))
k = mtf.einsum(
[memory_antecedent, wk],
mtf.Shape([batch, heads, num_w_blocks, memory_w_dim, kv_channels]))
v = mtf.einsum(
[memory_antecedent, wv],
mtf.Shape([batch, heads, num_w_blocks, memory_w_dim, kv_channels]))
# Halo exchange for memory blocks.
k, v = local_1d_halo_exchange(k, v, num_w_blocks, memory_w_dim, mask_right)
# Calculate the causal mask to avoid peeking into the future. We compute
# this once and reuse it for all blocks since the block_size is known.
mask = None
if mask_right:
mask = attention_bias_local_block(
query_antecedent.mesh, w_dim, memory_w_dim)
output = dot_product_attention(q, k, v, mask=mask)
return mtf.einsum(
[output, wo], mtf.Shape([batch, num_w_blocks, w_dim, io_channels]))
def masked_local_attention_1d(x,
kv_channels,
heads,
window_size=128,
master_dtype=tf.float32,
slice_dtype=tf.float32,
length_per_split=None,
return_kv=None,
params=None,
name=None):
"""Attention to the source position and a neighborhood to the left of it.
Attention for a given query position p can only see memory positions
in the range (p - window_size, p].
Args:
x: a mtf.Tensor with shape batch_dims + [length, io_channels]
kv_channels: a mtf.Dimension (the size of the key and value vectors)
heads: a mtf.Dimension (the number of heads)
window_size: an integer
master_dtype: a tf.dtype (deprecated - use params arg)
slice_dtype: a tf.dtype (deprecated - use params arg)
length_per_split: an optional integer indicating the part of the length
dimension per processor. You can omit if the length dimension is not
split.
return_kv: an optional list onto which to append the computed k and v.
params: an optional quadruple of Tensors (see multihead_attention_params())
name: an optional string.
Returns:
a Tensor with the same shape as x
Raises:
ValueError: if channels or depth don't match.
"""
with tf.variable_scope(
name, default_name="masked_local_attention_1d", values=[x]):
batch_dims = x.shape.dims[:-2]
length, io_channels = x.shape.dims[-2:]
if params is None:
wq, wk, wv, wo = multihead_attention_vars(
x.mesh, heads, io_channels, kv_channels,
master_dtype, slice_dtype, x.dtype)
else:
wq, wk, wv, wo = params
# Get query q, keys k and values v.
qkv_shape = mtf.Shape(batch_dims + [heads, length, kv_channels])
q = mtf.einsum([x, wq], qkv_shape)
k = mtf.einsum([x, wk], qkv_shape)
v = mtf.einsum([x, wv], qkv_shape)
if return_kv is not None:
return_kv.extend([k, v])
# Choose a suitable block size.
# We choose the greatest divisor of length_per_split less than or equal
# to max(window_size, 128)
if length_per_split is None:
length_per_split = length.size
block_length = max(window_size, 128)
while length_per_split % block_length != 0:
block_length -= 1
query_block_length = mtf.Dimension("query_block_length", block_length)
memory_block_length = mtf.Dimension("memory_block_length", block_length)
# The num_blocks dimension gets the same name as the length dimension,
# so it will be split in the same way.
num_blocks = mtf.Dimension(length.name, length.size // block_length)
q_shape = batch_dims + [heads, num_blocks, query_block_length, kv_channels]
kv_shape = batch_dims + [
heads, num_blocks, memory_block_length, kv_channels]
q = mtf.reshape(q, q_shape)
k = mtf.reshape(k, kv_shape)
v = mtf.reshape(v, kv_shape)
# augment the keys and values for each block with keys and values for
# the previous window_size timesteps.
k = mtf.left_halo_exchange(k, num_blocks, memory_block_length, window_size)
v = mtf.left_halo_exchange(v, num_blocks, memory_block_length, window_size)
padded_memory_block_length = mtf.Dimension(
"memory_block_length", window_size + block_length)
mpos = mtf.range(x.mesh, padded_memory_block_length, tf.float32)
qpos = mtf.range(x.mesh, query_block_length, tf.float32) + window_size
# prevent looking forward
mask = mtf.cast(mtf.greater(mpos, qpos), x.dtype) * -1e9
# prevent looking >=block_length timesteps backward
mask += mtf.cast(mtf.less_equal(mpos, qpos - block_length), x.dtype) * -1e9
# Note: The first window_size-1 positions can see back into pre-time
# where all the keys and values are zero. We could mask this out, but we
# don't.
o = dot_product_attention(q, k, v, mask=mask)
o = mtf.reshape(o, batch_dims + [heads, length, kv_channels])
return mtf.einsum([o, wo], mtf.Shape(batch_dims + [length, io_channels]))
def masked_local_attention_1d_incremental(x,
prev_k,
prev_v,
step_num,
master_dtype=None,
slice_dtype=None,
params=None,
name=None):
"""Incremental local self-attention (one decode step).
Incremental version of masked_local_attention_1d()
Args:
x: a mtf.Tensor with shape [batch..., io_channels]
prev_k: mtf.Tensor with shape
[batch..., heads, window_length, kv_channels]
prev_v: mtf.Tensor with shape
[batch..., heads, window_length, kv_channels]
step_num: mtf Scalar with dtype tf.int32
master_dtype: a tf.dtype (deprecated)
slice_dtype: a tf.dtype (deprecated)
params: a quadruple of Tensors (see multihead_attention_params())
name: an optional string.
Returns:
y: A mtf.Tensor with shape [batch..., io_channels]
new_k: mtf.Tensor with shape
[batch..., heads, window_length, kv_channels]
new_v: mtf.Tensor with shape
[batch..., heads, window_length, kv_channels]
Raises:
ValueError: if the dimensions do not match.
"""
batch_dims = x.shape.dims[:-1]
io_channels = x.shape.dims[-1]
heads, window_length, kv_channels = prev_k.shape.dims[-3:]
with tf.variable_scope(name, default_name="masked_local_attention_1d"):
if params is None:
wq, wk, wv, wo = multihead_attention_vars(
x.mesh, heads, io_channels, kv_channels,
master_dtype, slice_dtype, x.dtype)
else:
wq, wk, wv, wo = params
q = mtf.einsum([x, wq], mtf.Shape(batch_dims + [heads, kv_channels]))
k = mtf.einsum([x, wk], mtf.Shape(batch_dims + [heads, kv_channels]))
v = mtf.einsum([x, wv], mtf.Shape(batch_dims + [heads, kv_channels]))
current_position = mtf.equal(
mtf.range(x.mesh, window_length, dtype=tf.int32),
mtf.mod(step_num, window_length.size))
k = mtf.where(current_position, k, prev_k, output_shape=prev_k.shape)
v = mtf.where(current_position, v, prev_v, output_shape=prev_v.shape)
o = dot_product_attention(q, k, v, mask=None)
y = mtf.einsum([o, wo], x.shape)
return y, k, v
def local_2d_halo_exchange(k, v, num_h_blocks, h_dim,
num_w_blocks, w_dim, mask_right):
"""Halo exchange for keys and values for Local 2D attention."""
for blocks_dim, block_size_dim, halo_size in [
(num_h_blocks, h_dim, h_dim.size),
(num_w_blocks, w_dim, w_dim.size)]:
# shape of k is [num_h_blocks, num_w_blocks, h_dim, w_dim, kv_channels]
if halo_size > 0:
if blocks_dim is not None:
if mask_right:
k = mtf.left_halo_exchange(k, blocks_dim, block_size_dim, halo_size)
v = mtf.left_halo_exchange(v, blocks_dim, block_size_dim, halo_size)
else:
k = mtf.halo_exchange(k, blocks_dim, block_size_dim, halo_size)
v = mtf.halo_exchange(v, blocks_dim, block_size_dim, halo_size)
else:
if mask_right:
k = mtf.pad(k, [halo_size, None], block_size_dim.name)
v = mtf.pad(v, [halo_size, None], block_size_dim.name)
else:
k = mtf.pad(k, [halo_size, halo_size], block_size_dim.name)
v = mtf.pad(v, [halo_size, halo_size], block_size_dim.name)
return k, v
def local_2d_self_attention_spatial_blocks(query_antecedent,
kv_channels,
heads,
memory_h_dim=None,
memory_w_dim=None,
mask_right=False,
master_dtype=tf.float32,
slice_dtype=tf.float32,
name=None):
"""Attention to the source position and a neighborhood to the left or right.
The sequence is divided into blocks of length block_size.
Attention for a given query position can only see memory positions
less than or equal to the query position, in the corresponding block
and the previous block.
Args:
query_antecedent: a mtf.Tensor with shape [batch, num_h_blocks,
num_w_blocks, h_dim, w_dim, io_channels] must have the same size as
query_length, but a different name.
kv_channels: a mtf.Dimension (the size of the key and value vectors)
heads: a mtf.Dimension (the number of heads)
memory_h_dim: mtf Dimension, for the memory height block.
memory_w_dim: mtf Dimension, for the memory width block.
mask_right: bool, flag specifying whether we mask out attention to the right
for the decoder.
master_dtype: a tf.dtype
slice_dtype: a tf.dtype
name: an optional string.
Returns:
a Tensor of shape
[batch, num_h_blocks, num_w_blocks, h_dim, w_dim, io_channels]
Raises:
ValueError: if channels or depth don't match.
"""
with tf.variable_scope(
name, default_name="multihead_attention", values=[query_antecedent]):
h_dim, w_dim, io_channels = query_antecedent.shape.dims[-3:]
batch, num_h_blocks, num_w_blocks = query_antecedent.shape.dims[:3]
wq, wk, wv, wo = multihead_attention_vars(
query_antecedent.mesh, heads, io_channels, kv_channels,
master_dtype, slice_dtype, query_antecedent.dtype)
# Rename dimensions for the memory height and width.
memory_antecedent = mtf.rename_dimension(query_antecedent, h_dim.name,
"memory_" + h_dim.name)
memory_antecedent = mtf.rename_dimension(memory_antecedent, w_dim.name,
"memory_" + w_dim.name)
memory_h_dim, memory_w_dim = memory_antecedent.shape.dims[-3:-1]
# Call einsum over the query and memory to get query q, keys k and values v.
q = mtf.einsum([query_antecedent, wq],
mtf.Shape([
batch, heads, num_h_blocks, num_w_blocks, h_dim, w_dim,
kv_channels
]))
k = mtf.einsum([memory_antecedent, wk],
mtf.Shape([batch, heads, num_h_blocks, num_w_blocks,
memory_h_dim, memory_w_dim, kv_channels]))
v = mtf.einsum([memory_antecedent, wv],
mtf.Shape([batch, heads, num_h_blocks, num_w_blocks,
memory_h_dim, memory_w_dim, kv_channels]))
# Halo exchange for memory blocks.
k, v = local_2d_halo_exchange(k, v, num_h_blocks, memory_h_dim,
num_w_blocks, memory_w_dim, mask_right)
# Calculate the causal mask to avoid peeking into the future. We compute
# this once and reuse it for all blocks since the block_size is known.
mask = None
if mask_right:
mask = attention_bias_local_2d_block(query_antecedent.mesh, h_dim, w_dim,
memory_h_dim, memory_w_dim)
output = dot_product_attention(q, k, v, mask=mask)
return mtf.einsum(
[output, wo],
mtf.Shape(
[batch, num_h_blocks, num_w_blocks, h_dim, w_dim, io_channels]))
def rename_length_to_memory_length(
x, length_name="length", memory_length_name="memory_length"):
return mtf.rename_dimension(x, length_name, memory_length_name)
def multihead_attention_vars(
mesh, heads, io_channels, kv_channels,
master_dtype, slice_dtype, activation_dtype):
"""Deprecated version of multihead_attention_params with combine=True."""
return multihead_attention_params(
mesh, heads, io_channels, kv_channels,
mtf.VariableDType(master_dtype, slice_dtype, activation_dtype),
combine=True)
def multihead_attention_params(mesh, heads, io_channels, kv_channels,
variable_dtype, combine=False):
"""Create Parameters for Multihead Attention.
If the combine flag is set to True, then we create only one variable
which stacks together all of the parameters. Otherwise, we create four
separate variables.
Args:
mesh: a Mesh
heads: a Dimension
io_channels: a Dimension
kv_channels: a Dimension
variable_dtype: a mtf.VariableDType
combine: a boolean
Returns:
wq: a Tensor with shape [heads, io_channels, kv_channels]
wk: a Tensor with shape [heads, io_channels, kv_channels]
wv: a Tensor with shape [heads, io_channels, kv_channels]
wo: a Tensor with shape [heads, io_channels, kv_channels]
"""
qkvo = mtf.Dimension("qkvo", 4)
qk_stddev = (io_channels.size ** -0.5) * (kv_channels.size ** -0.25)
v_stddev = io_channels.size ** -0.5
# TODO(noam): should be: o_stddev = (kv_channels.size * heads.size) ** -0.5
# verify that this still works and change it.
o_stddev = (io_channels.size * heads.size) ** -0.5
if combine:
def qkvo_initializer(shape,
dtype=None,
partition_info=None,
verify_shape=None):
del partition_info, verify_shape
return tf.random_normal(shape, dtype=dtype) * tf.reshape(
tf.cast([qk_stddev, qk_stddev, v_stddev, o_stddev],
dtype or tf.float32), [4, 1, 1, 1])
var = mtf.get_variable(
mesh, "qkvo", mtf.Shape([qkvo, heads, io_channels, kv_channels]),
initializer=qkvo_initializer, dtype=variable_dtype)
return mtf.unstack(var, qkvo)
else:
return [mtf.get_variable( # pylint: disable=g-complex-comprehension
mesh, name, mtf.Shape([heads, io_channels, kv_channels]),
initializer=tf.random_normal_initializer(stddev=stddev),
dtype=variable_dtype) for name, stddev in zip(
["q", "k", "v", "o"],
[qk_stddev, qk_stddev, v_stddev, o_stddev])]
def dot_product_attention(q,
k,
v,
mask,
dropout=0.0,
dropout_broadcast_dims=None,
extra_logit=None):
"""Dot-product attention.
Args:
q: Tensor with shape [...., length_q, depth_k]. Typically leading dimensions
are [batch, heads].
k: Tensor with shape [..., length_kv, depth_k]. Leading dimensions must
match with q.
v: Tensor with shape [..., length_kv, depth_v] Leading dimensions must
match with q.
mask: mask Tensor (see attention_mask())
dropout: a float.
dropout_broadcast_dims: an optional list of mtf.Dimension
extra_logit: an optional scalar or tensor
Returns:
Tensor with shape [..., length_q, depth_v].
"""
length_kv = k.shape.dims[-2]
logits_shape = mtf.Shape(q.shape.dims[:-1] + [length_kv])
logits = mtf.einsum([q, k], logits_shape)
if mask is not None:
logits += mask
weights = mtf.softmax(logits, length_kv, extra_logit=extra_logit)
if dropout != 0.0:
weights = mtf.dropout(
weights, 1.0 - dropout,
noise_shape=weights.shape - dropout_broadcast_dims)
depth_v = v.shape.dims[-1]
outputs_shape = mtf.Shape(q.shape.dims[:-1] + [depth_v])
outputs = mtf.einsum([weights, v], outputs_shape)
return outputs
def multihead_attention(query_antecedent,
memory_antecedent,
mask,
kv_channels,
heads,
dropout=0.0,
dropout_broadcast_dims=None,
master_dtype=tf.float32,
slice_dtype=tf.float32,
name="multihead_attention"):
"""Multihead scaled-dot-product attention with input/output transformations.
In order to use only one variable containing the four weight matrices
packed together, we insist that the query and memory antecedents have the
same dimensionality (io_channels) and that the keys and values have the
same dimensionality (kv_channels).
Args:
query_antecedent: a mtf.Tensor with shape
[<batch_dims>, query_length, io_channels]
memory_antecedent: a mtf.Tensor with shape
[batch, memory_length, io_channels] (optional)
mask: mask Tensor (see attention_mask())
kv_channels: a mtf.Dimension (the size of the key and value vectors)
heads: a mtf.Dimension (the number of heads)
dropout: a floating point value
dropout_broadcast_dims: an optional list of mtf.Dimension
master_dtype: a tf.dtype
slice_dtype: a tf.dtype
name: an optional string.
Returns:
A mtf.Tensor with shape [batch, query_length, io_channels]
Raises:
ValueError: if the dimensions do not match.
"""
batch_dims = query_antecedent.shape.dims[:-2]
query_length, io_channels = query_antecedent.shape.dims[-2:]
with tf.variable_scope(name,
default_name="multihead_attention",
values=[query_antecedent, memory_antecedent]):
wq, wk, wv, wo = multihead_attention_vars(
query_antecedent.mesh, heads, io_channels, kv_channels,
master_dtype, slice_dtype, query_antecedent.dtype)
if memory_antecedent is None:
memory_antecedent = rename_length_to_memory_length(
query_antecedent, query_length.name)
memory_batch_dims = memory_antecedent.shape.dims[:-2]
memory_length, memory_channels = memory_antecedent.shape.dims[-2:]
if memory_batch_dims != batch_dims:
raise ValueError("memory batch must equal query batch")
if memory_channels != io_channels:
raise ValueError("memory channels must equal query channels")
q = mtf.einsum(
[query_antecedent, wq],
mtf.Shape(batch_dims + [heads, query_length, kv_channels]))
k = mtf.einsum(
[memory_antecedent, wk],
mtf.Shape(batch_dims + [heads, memory_length, kv_channels]))
v = mtf.einsum(
[memory_antecedent, wv],
mtf.Shape(batch_dims + [heads, memory_length, kv_channels]))
o = dot_product_attention(
q, k, v, mask, dropout, dropout_broadcast_dims)
return mtf.einsum(
[o, wo], mtf.Shape(batch_dims + [query_length, io_channels]))
def multihead_self_attention_incremental(query_antecedent,
prev_k,
prev_v,
step_num,
master_dtype,
slice_dtype,
name="multihead_attention"):
"""Incremental self-attention (one decode step).
In order to use only one variable containing the four weight matrices
packed together, we insist that the query and memory antecedents have the
same dimensionality (io_channels) and that the keys and values have the
same dimensionality (kv_channels).
Args:
query_antecedent: a mtf.Tensor with shape [batch..., io_channels]
prev_k: mtf.Tensor with shape [batch..., heads, memory_length, kv_channels]
prev_v: mtf.Tensor with shape [batch..., heads, memory_length, kv_channels]
step_num: mtf Scalar with dtype tf.int32
master_dtype: a tf.dtype
slice_dtype: a tf.dtype
name: an optional string.
Returns:
y: A mtf.Tensor with shape [batch..., io_channels]
new_k: mtf.Tensor with shape [batch..., heads, memory_length, kv_channels]
new_v: mtf.Tensor with shape [batch..., heads, memory_length, kv_channels]
Raises:
ValueError: if the dimensions do not match.
"""
batch_dims = query_antecedent.shape.dims[:-1]
io_channels = query_antecedent.shape.dims[-1]
heads, memory_length, kv_channels = prev_k.shape.dims[-3:]
with tf.variable_scope(name, default_name="multihead_attention"):
wq, wk, wv, wo = multihead_attention_vars(
query_antecedent.mesh, heads, io_channels, kv_channels,
master_dtype, slice_dtype, query_antecedent.dtype)
memory_antecedent = query_antecedent
q = mtf.einsum(
[query_antecedent, wq],
mtf.Shape(batch_dims + [heads, kv_channels]))
k = mtf.einsum(
[memory_antecedent, wk],
mtf.Shape(batch_dims + [heads, kv_channels]))
v = mtf.einsum(
[memory_antecedent, wv],
mtf.Shape(batch_dims + [heads, kv_channels]))
k = prev_k + mtf.multiply(
k, mtf.one_hot(step_num, memory_length, dtype=prev_k.dtype),
output_shape=prev_k.shape)
v = prev_v + mtf.multiply(
v, mtf.one_hot(step_num, memory_length, dtype=prev_v.dtype),
output_shape=prev_v.shape)
mask = mtf.cast(
mtf.greater(mtf.range(
query_antecedent.mesh, memory_length, dtype=tf.int32), step_num),
q.dtype) * -1e9
o = dot_product_attention(q, k, v, mask)
y = mtf.einsum([o, wo], query_antecedent.shape)
return y, k, v
def multihead_encdec_attention_incremental(query_antecedent,
wq, wo, k, v,
mask,
name="multihead_attention"):
"""Incremental attention over encoder (one decode step).
In order to use only one variable containing the four weight matrices
packed together, we insist that the query and memory antecedents have the
same dimensionality (io_channels) and that the keys and values have the
same dimensionality (kv_channels).
memory_dims is a subset of query_dims
Args:
query_antecedent: a mtf.Tensor with shape query_dims + [io_channels]
wq: a mtf.Tensor with shape [heads, io_channels, kv_channels]
wo: a mtf.Tensor with shape [heads, io_channels, kv_channels]
k: memory_dims + [heads, memory_length, kv_channels]
v: memory_dims + [heads, memory_length, kv_channels]
mask: mask Tensor (see attention_mask())
name: an optional string.
Returns:
A mtf.Tensor with shape [batch, qlen, io_channels]
"""
heads, _, kv_channels = k.shape.dims[-3:]
query_dims = query_antecedent.shape.dims[:-1]
with tf.variable_scope(name, default_name="multihead_attention"):
q = mtf.einsum(
[query_antecedent, wq],
mtf.Shape(query_dims + [heads, kv_channels]))
o = dot_product_attention(q, k, v, mask)
return mtf.einsum([o, wo], query_antecedent.shape)
def attention_mask_ignore_padding(inputs, dtype=tf.float32):
"""Bias for encoder-decoder attention.
Args:
inputs: a mtf.Tensor with shape [..., length_dim]
dtype: a tf.dtype
Returns:
a mtf.Tensor with shape [..., memory_length_dim]
"""
inputs = rename_length_to_memory_length(inputs)
return mtf.cast(mtf.equal(inputs, 0), dtype) * -1e9
def attention_mask_autoregressive(query_pos, dtype=tf.float32):
"""Bias for self-attention where attention to the right is disallowed.
Args:
query_pos: a mtf.Tensor with shape [..., length_dim]
dtype: a tf.dtype
Returns:
a mtf.Tensor with shape [..., length_dim, memory_length_dim]
"""
memory_pos = rename_length_to_memory_length(query_pos)
return mtf.cast(mtf.less(query_pos, memory_pos), dtype) * -1e9
def attention_mask_same_segment(
query_segment, memory_segment=None, dtype=tf.float32):
"""Bias for attention where attention between segments is disallowed.
Args:
query_segment: a mtf.Tensor with shape [..., length_dim]
memory_segment: a mtf.Tensor with shape [..., memory_length_dim]
dtype: a tf.dtype
Returns:
a mtf.Tensor with shape [..., length_dim, memory_length_dim]
"""
memory_segment = rename_length_to_memory_length(
memory_segment or query_segment)
return mtf.cast(mtf.not_equal(query_segment, memory_segment), dtype) * -1e9
def attention_bias_local_block(mesh, block_length, memory_length,
dtype=tf.int32):
"""Bias for attention for local blocks where attention to right is disallowed.
Create the bias matrix by using two separate masks, one for the memory part
which doesn't overlap with the query and second which interacts with the query
and should be disallowed to look to the right of the current query position.
Args:
mesh: a MeshTensorflow object
block_length: a mtf.Dimension
memory_length: a mtf.Dimension
dtype: a tf.dtype
Returns:
a mtf.Tensor with shape [block_length, memory_length]
"""
memory_length = mtf.Dimension(memory_length.name, block_length.size)
memory_mask = mtf.zeros(mesh, [block_length, memory_length], dtype=dtype)
mask = mtf.cast(mtf.less(mtf.range(mesh, block_length, dtype=dtype),
mtf.range(mesh, memory_length, dtype=dtype)),
dtype=dtype)
mask = mtf.cast(
mtf.concat([memory_mask, mask], memory_length.name),
dtype=tf.float32) * -1e9
return mask
def attention_bias_local_2d_block(mesh,
h_dim,
w_dim,
memory_h_dim,
memory_w_dim,
dtype=tf.int32):
"""Bias for attention for local blocks where attention to right is disallowed.
Create the bias matrix by using two separate masks, one for the memory part
which doesn't overlap with the query and second which interacts with the query
and should be disallowed to look to the right of the current query position.
Args:
mesh: a MeshTensorflow object
h_dim: a mtf.Dimension
w_dim: a mtf.Dimension
memory_h_dim: a mtf.Dimension
memory_w_dim: a mtf.Dimension
dtype: a tf.dtype
Returns:
a mtf.Tensor with shape [block_length, memory_length]
"""
memory_height = mtf.Dimension(memory_h_dim.name, h_dim.size)
memory_width = mtf.Dimension(memory_w_dim.name, w_dim.size)
mask_top_visible = mtf.zeros(mesh, [h_dim, memory_height], dtype=dtype)
mask_left_visible = mtf.zeros(mesh, [w_dim, memory_width], dtype=dtype)
mask_query = mtf.greater(
mtf.range(mesh, memory_height, dtype=tf.int32),
mtf.range(mesh, memory_width, dtype=dtype))
width_mask = mtf.concat([mask_left_visible, mask_query], memory_width.name)
mask = mtf.cast(
mtf.concat([mask_top_visible, width_mask], memory_height.name),
dtype=tf.float32) * -1e9
return mask
def multiplicative_jitter(x, epsilon=1e-2):
"""Multiply values by a random number between 1-epsilon and 1+epsilon.
Makes models more resilient to rounding errors introduced by bfloat16.
This seems particularly important for logits.
Args:
x: a mtf.Tensor
epsilon: a floating point value
Returns:
a mtf.Tensor with the same type and shape as x.
"""
if epsilon == 0:
return x
return x * mtf.random_uniform(
x.mesh, x.shape, minval=1.0 - epsilon, maxval=1.0+epsilon, dtype=x.dtype)
def multihead_self_attention_memory_compressed(x,
mask_right,
compression_factor,
kv_channels,
heads,
dropout=0.0,
dropout_broadcast_dims=None,
master_dtype=tf.float32,
slice_dtype=tf.float32,
name="multihead_attention"):
"""Memory-compressed self-attention.
The memory is first average-pooled (strided) to make it shorter by
a factor of compression_factor.
Args:
x: a mtf.Tensor with shape
[<batch_dims>, query_length, io_channels]
mask_right: a boolean
compression_factor: an integer
kv_channels: a mtf.Dimension (the size of the key and value vectors)
heads: a mtf.Dimension (the number of heads)
dropout: a floating point value
dropout_broadcast_dims: an optional list of mtf.Dimension
master_dtype: a tf.dtype
slice_dtype: a tf.dtype
name: an optional string.
Returns:
A mtf.Tensor with shape [batch, query_length, io_channels]
Raises:
ValueError: if the dimensions do not match.
"""
batch_dims = x.shape.dims[:-2]
length, io_channels = x.shape.dims[-2:]
with tf.variable_scope(name,
default_name="compressed_attention",
values=[x]):
wq, wk, wv, wo = multihead_attention_vars(
x.mesh, heads, io_channels, kv_channels,
master_dtype, slice_dtype, x.dtype)
memory_antecedent = compress_mean(x, length, compression_factor)
memory_antecedent = rename_length_to_memory_length(memory_antecedent)
memory_length = memory_antecedent.shape.dims[-2]
q = mtf.einsum(
[x, wq],
mtf.Shape(batch_dims + [heads, length, kv_channels]))
k = mtf.einsum(
[memory_antecedent, wk],
mtf.Shape(batch_dims + [heads, memory_length, kv_channels]))
v = mtf.einsum(
[memory_antecedent, wv],
mtf.Shape(batch_dims + [heads, memory_length, kv_channels]))
if mask_right:
query_pos = mtf.range(x.mesh, length, dtype=tf.int32)
memory_pos = (
mtf.range(x.mesh, memory_length, dtype=tf.int32) * compression_factor
+ (compression_factor - 1))
mask = mtf.cast(mtf.greater(memory_pos, query_pos), x.dtype) * -1e9
else:
mask = None
o = dot_product_attention(
q, k, v, mask, dropout, dropout_broadcast_dims, extra_logit=0.0)
return mtf.einsum(
[o, wo], mtf.Shape(batch_dims + [length, io_channels]))
def compress_mean(x, dim, compression_factor):
"""Compress by taking group means.
Args:
x: a Tensor
dim: a dimension in x.shape
compression_factor: an integer
Returns:
a Tensor
"""
dims = x.shape.dims
pos = dims.index(dim)
compressed_dim = mtf.Dimension(dim.name, dim.size // compression_factor)
compression_factor_dim = mtf.Dimension(
"compression_factor", compression_factor)
new_shape = (
dims[:pos] + [compressed_dim, compression_factor_dim] + dims[pos + 1:])
x = mtf.reshape(x, new_shape)
x = mtf.reduce_mean(x, reduced_dim=compression_factor_dim)
return x
def embedding_weights(mesh,
vocab_dim,
output_dim,
variable_dtype,
name="embedding",
ensemble_dim=None,
initializer=None):
"""Embedding weights."""
if not ensemble_dim:
ensemble_dim = []
elif not isinstance(ensemble_dim, list):
ensemble_dim = [ensemble_dim]
shape = mtf.Shape(ensemble_dim) + [vocab_dim, output_dim]
if initializer is None:
initializer = tf.random_normal_initializer()
ret = mtf.get_variable(
mesh, name, shape, dtype=variable_dtype, initializer=initializer)
return ret
def embedding(indices, vocab_dim, output_dim, variable_dtype, name="embedding"):
"""Embedding layer."""
weights = embedding_weights(
indices.mesh, vocab_dim, output_dim, variable_dtype, name)
return mtf.gather(weights, indices, vocab_dim)
def max_pool2d(x, ksize=(2, 2), name="max_pool2d"):
"""2D max pooling.
Pooling is applied on the HW dimensions. We assume the dimensions of x is
[NHWC]. There can be multiple batch dimensions, e.g., [10, 4, 4, 10, 10, 3].
Currently we only support unoverlapping pooling: strides == ksize. Also the
input HW dimensions must be divisible by ksize.
Args:
x: a Tensor
ksize: kernel size. A list or tuple
name: an optional string
Returns:
a Tensor
"""
return x if tuple(ksize) == (1, 1) else mtf.PoolOperation(
x, ksize, strides=ksize, pool_fn_string="MAX_2D", name=name).outputs[0]
def max_pool3d(x, ksize=(2, 2, 2), name="max_pool3d"):
"""3D max pooling.
Pooling is applied on the DHW dimensions. We assume the dimensions of x is
[NDHWC]. There can be multiple batch dimensions, e.g.,
[10, 4, 4, 10, 10, 10, 3].
Currently we only support unoverlapping pooling: strides == ksize. Also the
input DHW dimensions must be divisible by ksize.
Args:
x: a Tensor
ksize: kernel size. A list or tuple
name: an optional string
Returns:
a Tensor
"""
return x if tuple(ksize) == (1, 1, 1) else mtf.PoolOperation(
x, ksize, strides=ksize, pool_fn_string="MAX_3D", name=name).outputs[0]
def avg_pool2d(x, ksize=(2, 2), name="avg_pool2d"):
"""2D average pooling.
Pooling is applied on the HW dimensions. We assume the dimensions of x is
[NHWC]. There can be multiple batch dimensions, e.g., [10, 4, 4, 10, 10, 3].
Currently we only support unoverlapping pooling: strides == ksize. Also the
input HW dimensions must be divisible by ksize.
Args:
x: a Tensor
ksize: kernel size. A list or tuple
name: an optional string
Returns:
a Tensor
"""
return x if tuple(ksize) == (1, 1) else mtf.PoolOperation(
x, ksize, strides=ksize, pool_fn_string="AVG_2D", name=name).outputs[0]
def avg_pool3d(x, ksize=(2, 2, 2), name="avg_pool3d"):
"""3D average pooling.
Pooling is applied on the DHW dimensions. We assume the dimensions of x is
[NDHWC]. There can be multiple batch dimensions, e.g.,
[10, 4, 4, 10, 10, 10, 3].
Currently we only support unoverlapping pooling: strides == ksize. Also the
input DHW dimensions must be divisible by ksize.
Args:
x: a Tensor
ksize: kernel size. A list or tuple
name: an optional string
Returns:
a Tensor
"""
return x if tuple(ksize) == (1, 1, 1) else mtf.PoolOperation(
x, ksize, strides=ksize, pool_fn_string="AVG_3D", name=name).outputs[0]
def _reversible_half_residual_grad(
explicit_inputs, all_inputs, forward_operations, outputs, output_grads):
"""Backpropagation function for a revnet."""
x1, _, x2, _ = explicit_inputs
extra_inputs = all_inputs[len(explicit_inputs):]
_, _, y1, _ = outputs
dy2, dy2_backwards, dy1, dy1_backwards = output_grads
# last operation should be an addition to produce y1
if not isinstance(forward_operations[-1], mtf.AddOperation):
raise ValueError("expected an addition here")
f_ops = forward_operations[:-1]
orig_fx2 = f_ops[-1].outputs[0]
orig_x2 = x2
if dy2_backwards is not None:
x2 = dy2_backwards
if dy1_backwards is not None:
y1 = dy1_backwards
graph = all_inputs[0].graph
f_again_ops, mapping = graph.clone_operations(f_ops, {orig_x2: x2})
fx2 = mapping[orig_fx2]
x1 = y1 - fx2
grads = mtf.gradients(ys=[fx2], xs=[x2] + extra_inputs, grad_ys=[dy1],
operations=f_again_ops)
dx2 = dy2 + grads[0]
extra_inputs_grads = grads[1:]
dx1 = dy1
return [dx1, x1, dx2, x2] + extra_inputs_grads
def _half_residual_and_swap(x1, x1_backwards, x2, x2_backwards, f=None):
return x2, x2_backwards, x1 + f(x2), x1_backwards
def reversible_half_residual_and_swap(x1,
x1_backwards,
x2,
x2_backwards,
f,
recompute_grads=True):
"""Building block of a revnet.
https://arxiv.org/abs/1707.04585
All the inputs and output Tensors have the same shape and dtype.
The forward computation is:
y1 = x1 + f(x2)
y2 = x2
The x1_backwards and x2_backwards tensors are used by backpropagation.
None should be passed for the first layer, then the outputs of each layer
should be passed to the next.
Example usage:
x1, x1_backwards, x2, x2_backwards = x, None, x, None
for f in my_functions:
x1, x1_backwards, x2, x2_backwards = mtf.layers.reversible_half_residual(
x1, x1_backwards, x2, x2_backwards)
y = (x1 + x2) / 2
Args:
x1: a Tensor
x1_backwards: a Tensor or None
x2: a Tensor
x2_backwards: a Tensor or None
f: a function from Tensor to Tensor
recompute_grads: a boolean
Returns:
y2: a Tensor
y2_backwards: a Tensor
y1: a Tensor
y1_backwards: a Tensor
"""
if recompute_grads:
if x1_backwards is None:
x1_backwards = mtf.zeros_like(x1)
if x2_backwards is None:
x2_backwards = mtf.zeros_like(x2)
return mtf.custom_gradient(
functools.partial(_half_residual_and_swap, f=f),
_reversible_half_residual_grad,
[x1, x1_backwards, x2, x2_backwards])
else:
return _half_residual_and_swap(x1, x1_backwards, x2, x2_backwards, f)
|
en
| 0.76343
|
# coding=utf-8 # Copyright 2020 The Mesh TensorFlow Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Layers implemented in Mesh TensorFlow. Turn this on with gin to enable the unit-scaling convention. TODO(noam): turn this comment into a position paper and post to arxiv Under the unit-scaling convention, all weights are initialized with unit variance, and the outputs of most contractions (matmul/einsum operations) are divided by the square-root of the sizes of the contracting dimensions. This differs from the typical inverse-square-root weight-initalization convention often attributed to http://proceedings.mlr.press/v9/glorot10a.html in which weights are typically initialized according to a distribution with mean zero and standard-deviation equal to the inverse-square-root of the contracting dimension(s). Under both conventions, the purpose of the inverse-square-root scaling is so that activations in a layer should be scaled similarly to the activations in the previous layer. (Typically, models are initialized so that activations in all layers should have RMS=O(1)). The difference between the two conventions is whether this scaling happens in the parameters (their way), or as an explicit multiplier on the activations (our way). In our opinion, parameter-scaling (their way) has three main disadvantages: 1. Optimizers need to be aware of differently-scaled parameters. This is because the learning-rates of adaptive optimizers represent target step-sizes for the parameters. The desired step size for a parameter logically depends on the scale of the parameter itself, and so one typically needs to lower the learning-rate when the layers get bigger and the parameters get consequently smaller. Under the unit-scaling convention, this is unnecessary, since all parameters are on the same unit scale. 2. It is often unwieldy from an engineering standpoint to communicate to both the variable initializers and to the optimizer what the scale of the variable should be. Typically, the variable initializer guesses this by inferring from the dimension order which dimension of the variable might represent contracting dimensions. This is highly error-prone. 3. Sometimes contractions happen without being associated with parameters, as in neural attention. It may be important here too to divide by the square root of the contracting dimensions, in order to maintain activation scale. See the discussion in section 3.2.1 of https://arxiv.org/abs/1706.03762 Being in the habit of scaling the outputs of contractions in this way makes it more likely to remember to do the same thing in these circumstances. Note: When switching to the unit-scaling convention, it is probably necessary to raise the learning rate, since larger parameters need larger updates. An exception is when using Adafactor, which by default scales the updates relative to the scale of the current parameter values. Args: value: a boolean Returns: a boolean Einsum with optional unit-scaling convention. If the unit-scaling convention is enabled, then divide the output by the square-root of the product of the contracting dimensions. Args: xs: a list of mtf.Tensor *args: arguments to mtf.einsum **kwargs: keyword arguments to mtf.einsum Returns: a mtf.Tensor Dense layer doing (kernel*x + bias) computation. Args: x: a mtf.Tensor of shape [..., reduced_dims]. new_dims: a list of mtf.Dimension. reduced_dims: a list of mtf.Dimensions of x to be reduced. If omitted (deprecated interface), we reduce the last dimension. expert_dims: an optional list of mtf.Dimension which represent different experts. Different experts get different weights. use_bias: a boolean, whether to add bias. activation: an optional function from mtf.Tensor to mtf.Tensor master_dtype: a tf.dtype (deprecated - use variable_dtype) slice_dtype: a tf.dtype (deprecated - use variable_dtype) variable_dtype: a mtf.VariableDType kernel_initializer: an initializer for kernel variable. kernel_weights: mtf.Tensor weights matrix to use for dense computation name: a string used for tf.variable_scope. Returns: a mtf.Tensor of shape [..., new_dims]. # if any reduced dims have the same names as new dims, first change these # dimension names in the input so as to avoid name conflict in the weight # matrix. Create w matrix variable. Args: x: a mtf.Tensor. new_dims: a list of mtf.Dimension. reduced_dims: a list of mtf.Dimensions of x to be reduced. expert_dims: an optional list of mtf.Dimension which represent different experts. Different experts get different weights. kernel_initializer: an initializer for kernel variable. name: a string used for tf.variable_scope. variable_dtype: a mtf.VariableDType master_dtype: a tf.dtype (deprecated - use variable_dtype) slice_dtype: a tf.dtype (deprecated - use variable_dtype) Returns: a mtf.Tensor. Component-wise product of multiple dense layers. e.g. if activation_functions=["linear", "sigmoid"], then this implements Gated Linear Units https://arxiv.org/pdf/1612.08083.pdf Args: x: a Tensor reduced_dims: a list of Dimensions. new_dims: a list of Dimensions. activation_functions: a list of activation functions (or a singleton) Each can be a either: - a callable function from Tensor to Tensor - a string function name from namespace mtf) - None or "linear", meaning no activation function name: an optional string **kwargs: additional kwargs for mtf.layers.dense() Initializer that can be passed to dense(). The __call__ function takes reduced_dims and new_dims and returns a tf initializer class. Initializer capable of adapting its scale to the shape of weights. With `distribution="normal"`, samples are drawn from a truncated normal distribution centered on zero, with `stddev = sqrt(scale / n)` where n is: 1.0 if unit_scaling_convention() is turned on otherwise: number of input units in the weight tensor, if mode = "fan_in" number of output units, if mode = "fan_out" average of the numbers of input and output units, if mode = "fan_avg" With `distribution="uniform"`, samples are drawn from a uniform distribution within [-limit, limit], with `limit = sqrt(3 * scale / n)`. # Arguments scale: Scaling factor (positive float). mode: One of "fan_in", "fan_out", "fan_avg". distribution: Random distribution to use. One of "normal", "uniform". seed: A Python integer. Used to seed the random generator. 1D Convolution. Args: x: a mtf.Tensor of format NWC. output_dim: a mtf.Dimension, indicating the output channel dimension. filter_size: a positive integer, the filter width. stride: a positive integer, the stride. **kw_args: optional keyword arguments to mtf.layers.conv2d. Returns: a mtf.Tensor of format NWO, where O is the output dimension. Hacky version of a 1d depthwise convolution. Args: x: a mtf.Tensor depth_dim: mtf.Dimension, length_dim: mtf.Dimension, min_relative_pos: int, min relative position, max_relative_pos: int, max relative position, name: str, variable_scope name, use_bias: Bool, whether to use bias, initializer_scale: int, initalizer scale, kernel_depth_weights: an optional list of kernel weight tensors. The list contains one element for each relative position in the kernel. Each element has a width equal to the depth over which the separable conv operation is being "separated" Returns: an mtf.Tensor 1-D convolution with separable filters. The filter size will be `max_relative_pos - min_relative_pos + 1`. Args: x: a mtf.Tensor of format NWC. output_dim: a mtf.Dimension, indicating the output channel dimension. min_relative_pos: an integer, the inclusive minimum relative positive of the depthwise filter, where a relative position of zero means the left end of the filter aligns with the left end of the input. max_relative_pos: an integer, the inclusive maximum relative position of the depthwise filter, where a relative position of zero means the right end of the filter aligns with the right end of the input. depthwise_filter_initializer_scale: a positive float, the scale of the initializer for the depthwise filter. pointwise_filter_initializer_scale: a positive float, the scale of the initializer for the pointwise filter. name: a string used for tf.variable_scope. use_bias: a bool, whether to use bias in the convolutions. kernel_depth_weights: an optional list of kernel weight tensors. The list contains one element for each relative position in the kernel. Each element has a width equal to the dimension over which the separable conv operation is being "separated" Returns: a mtf.Tensor of format NWO, where O is the output dimension. 2D Convolution. Args: x: a mtf.Tensor of format NHWC. output_dim: a mtf.Dimension, indicating the output channel dimension. filter_size: a list or tuple in format [filter_height, filter_width]. strides: a list or tuple in format [stride_height, stride_width]. padding: either "SAME" or "VALID". filter_initializer: the initializer for tf.get_variable. variable_dtype: a mtf.VariableDType name: a string used for tf.variable_scope. Returns: a mtf.Tensor. # Pad stride in batch and channel dimensions. 2D Convolution with spatial partitioning. Spatial partitioning is implemented by decomposing the image into blocks. Block dimensions represented as h_blocks_dim and w_blocks_dim can be split along the mesh axis. If split, then we do a halo exchange where each block receives the part of the image from its left and right neighbors necessary to do the convolution. Exchange can involve complete or partial blocks depending on the filter height and width. Currently, only "SAME" padding with dilation rate of 1 is supported. Args: x: a Tensor of shape [batch, h_blocks_dim, w_blocks_dim, h_dim, w_dim, in_channels_dim] output_dim: a mtf.Dimension, indicating the output channel dimension. filter_size: a list or tuple in format [filter_height, filter_width]. strides: a list or tuple in format [stride_height, stride_width]. padding: string, "SAME". The type of padding algorithm to use. "Valid" is not currently supported. h_blocks_dim: Dimension representing number of height blocks. w_blocks_dim: Dimension representing number of witdh blocks. filter_initializer: the initializer for tf.get_variable. variable_dtype: a mtf.VariableDType name: a name for the operation (optional). Returns: A Tensor of shape [batch, h_blocks_dim, w_blocks_dim, h_dim, w_dim, out_channels_dim] # If h_blocks_dim and w_blocks_dim are not split, directly call conv2d. #assert filter_size[0] % 2 == 1 #assert filter_size[1] % 2 == 1 # Padding 'VALID' is not supported yet. # Halo exchange for h_blocks and w_blocks. #", blocks_dim, block_size_dim, filter_size, "halo_size=", halo_size) 2D Transposed Convolution. Args: x: a mtf.Tensor of format NHWC. output_dim: a mtf.Dimension, indicating the output channel dimension. filter_size: a list or tuple in format [filter_height, filter_width]. Only filter_size of (2, 2) is tested. strides: a list or tuple in format [stride_height, stride_width]. Only strides of (2, 2) is tested. padding: either "SAME" or "VALID". filter_initializer: the initializer for tf.get_variable. variable_dtype: a mtf.VariableDType name: a string used for tf.variable_scope. Returns: a mtf.Tensor. # Pad stride in batch and channel dimensions. 2D Transposed Convolution with spatial partitioning. Spatial partitioning is implemented by decomposing the image into blocks. Block dimensions represented as h_blocks_dim and w_blocks_dim can be split along the mesh axis. If split, then we do a halo exchange where each block receives the part of the image from its left and right neighbors necessary to do the convolution. Exchange can involve complete or partial blocks depending on the filter depth and height. Currently, only "SAME" padding with dilation rate of 1 is supported. Only splitting along the depth and height dimensions are supported. Args: x: a Tensor of shape [batch, h_blocks_dim, w_blocks_dim, h_dim, w_dim, in_channel_dim] output_dim: a mtf.Dimension, indicating the output channel dimension. filter_size: a list or tuple in format [filter_height, filter_width]. Only filter_size of (2, 2) is tested. strides: a list or tuple in format [stride_height, stride_width]. Only strides of (2, 2) is tested. padding: string, "SAME". The type of padding algorithm to use. "Valid" is not currently supported. h_blocks_dim: Dimension representing number of height blocks. w_blocks_dim: Dimension representing number of width blocks. filter_initializer: the initializer for tf.get_variable. variable_dtype: a mtf.VariableDType name: a name for the operation (optional). Returns: A Tensor of shape [batch, h_blocks_dim, w_blocks_dim, h_dim, w_dim, out_channels_dim] # If h_blocks_dim and w_blocks_dim are not split, directly call conv2d_trans. # Now only supports even-sized filters. # Padding 'VALID' is not supported yet. # Halo exchange for h_blocks and w_blocks. # TODO(lehou): figure out the halo_size in general cases. 3D Convolution. Args: x: a mtf.Tensor of format NDHWC. output_dim: a mtf.Dimension, indicating the output channel dimension. filter_size: a list or tuple in format [filter_depth, filter_height, filter_width]. strides: a list or tuple in format [stride_depth, stride_height, stride_width]. padding: either "SAME" or "VALID". filter_initializer: the initializer for tf.get_variable. variable_dtype: a mtf.VariableDType name: a string used for tf.variable_scope. Returns: a mtf.Tensor. # Pad stride in batch and channel dimensions. # If d_blocks_dim and h_blocks_dim are not split, directly call conv3d. # assert filter_size[0] % 2 == 1 # assert filter_size[1] % 2 == 1 # assert filter_size[2] % 2 == 1 # Padding 'VALID' is not supported yet. # Halo exchange for d_blocks and h_blocks. # Pad w dimension with zeros. #x = mtf.pad(x, [filter_size[2] // 2, filter_size[2] // 2], # dim_name=w_dim.name, name="conv3d_pad_w_dim") 3D Convolution with spatial partitioning. Spatial partitioning is implemented by decomposing the image into blocks. Block dimensions represented as d_blocks_dim and h_blocks_dim can be split along the mesh axis. If split, then we do a halo exchange where each block receives the part of the image from its left and right neighbors necessary to do the convolution. Exchange can involve complete or partial blocks depending on the filter depth and height. Currently, only "SAME" padding with dilation rate of 1 is supported. Only splitting along the depth and height dimensions are supported. Args: x: a Tensor of shape [batch, d_blocks_dim, h_blocks_dim, d_dim, h_dim, w_dim, in_channel_dim] output_dim: a mtf.Dimension, indicating the output channel dimension. filter_size: a list or tuple in format [filter_depth, filter_height, filter_width]. strides: a list or tuple in format [stride_depth, stride_height, stride_width]. padding: string, "SAME". The type of padding algorithm to use. "Valid" is not currently supported. d_blocks_dim: Dimension representing number of depth blocks. h_blocks_dim: Dimension representing number of height blocks. filter_initializer: the initializer for tf.get_variable. variable_dtype: a mtf.VariableDType name: a name for the operation (optional). Returns: A Tensor of shape [batch, d_blocks_dim, h_blocks_dim, w_blocks_dim, d_dim, h_dim, w_dim, out_channels_dim] # If d_blocks_dim and h_blocks_dim are not split, directly call conv3d. # Padding 'VALID' is not supported yet. # Halo exchange for d_blocks and h_blocks. # Pad w dimension with zeros. 3D Convolution with spatial partitioning. Spatial partitioning is implemented by decomposing the image into blocks. Block dimensions represented as d_blocks_dim and h_blocks_dim can be split along the mesh axis. If split, then we do a halo exchange where each block receives the part of the image from its left and right neighbors necessary to do the convolution. Exchange can involve complete or partial blocks depending on the filter depth and height. Currently, only "SAME" padding with dilation rate of 1 is supported. Only splitting along the depth and height dimensions are supported. Args: x: a Tensor of shape [batch, d_blocks_dim, h_blocks_dim, d_dim, h_dim, w_dim, in_channel_dim] output_dim: a mtf.Dimension, indicating the output channel dimension. filter_size: a list or tuple in format [filter_depth, filter_height, filter_width]. strides: a list or tuple in format [stride_depth, stride_height, stride_width]. padding: string, "SAME". The type of padding algorithm to use. "Valid" is not currently supported. d_blocks_dim: Dimension representing number of depth blocks. h_blocks_dim: Dimension representing number of height blocks. filter_initializer: the initializer for tf.get_variable. variable_dtype: a mtf.VariableDType name: a name for the operation (optional). Returns: A Tensor of shape [batch, d_blocks_dim, h_blocks_dim, w_blocks_dim, d_dim, h_dim, w_dim, out_channels_dim] # If d_blocks_dim and h_blocks_dim are not split, directly call conv3d. #ymzk #assert filter_size[0] % 2 == 1 #assert filter_size[1] % 2 == 1 #assert filter_size[2] % 2 == 1 # Padding 'VALID' is not supported yet. # Halo exchange for d_blocks and h_blocks. # for blocks_dim, block_size_dim, halo_size in [ # (d_blocks_dim, d_dim, [(filter_size[0] - 1) // 2, filter_size[0] // 2])]: # %s, %s, %s, %s, %s, %s"% (x, output_dim, filter_size, blocks_dim, block_size_dim, halo_size)) #print("#3d#", x, output_dim, filter_size, blocks_dim, block_size_dim, halo_size) # %s" %x) # Pad w dimension with zeros. #x = mtf.pad(x, [(filter_size[2] - 1) // 2, filter_size[2] // 2], #x = mtf.pad(x, [(filter_size[2] - 1 ) // 2, filter_size[2] // 2], # dim_name=h_dim.name, name="conv3d_pad_h_dim") 3D Transposed Convolution. Args: x: a mtf.Tensor of format NDHWC. output_dim: a mtf.Dimension, indicating the output channel dimension. filter_size: a list or tuple in format [filter_depth, filter_height, filter_width]. Only filter_size of (2, 2, 2) is tested. strides: a list or tuple in format [stride_depth, stride_height, stride_width]. Only strides of (2, 2, 2) is tested. padding: either "SAME" or "VALID". filter_initializer: the initializer for tf.get_variable. variable_dtype: a mtf.VariableDType name: a string used for tf.variable_scope. Returns: a mtf.Tensor. # Pad stride in batch and channel dimensions. 3D Transposed Convolution with spatial partitioning. Spatial partitioning is implemented by decomposing the image into blocks. Block dimensions represented as d_blocks_dim and h_blocks_dim can be split along the mesh axis. If split, then we do a halo exchange where each block receives the part of the image from its left and right neighbors necessary to do the convolution. Exchange can involve complete or partial blocks depending on the filter depth and height. Currently, only "SAME" padding with dilation rate of 1 is supported. Only splitting along the depth and height dimensions are supported. Args: x: a Tensor of shape [batch, d_blocks_dim, h_blocks_dim, d_dim, h_dim, w_dim, in_channel_dim] output_dim: a mtf.Dimension, indicating the output channel dimension. filter_size: a list or tuple in format [filter_depth, filter_height, filter_width]. Only filter_size of (2, 2, 2) is tested. strides: a list or tuple in format [stride_depth, stride_height, stride_width]. Only strides of (2, 2, 2) is tested. padding: string, "SAME". The type of padding algorithm to use. "Valid" is not currently supported. d_blocks_dim: Dimension representing number of depth blocks. h_blocks_dim: Dimension representing number of height blocks. filter_initializer: the initializer for tf.get_variable. variable_dtype: a mtf.VariableDType name: a name for the operation (optional). Returns: A Tensor of shape [batch, d_blocks_dim, h_blocks_dim, w_blocks_dim, d_dim, h_dim, w_dim, out_channels_dim] # If d_blocks_dim and h_blocks_dim are not split, directly call conv3d_trans. # Now only supports even-sized filters. # Padding 'VALID' is not supported yet. # Halo exchange for d_blocks and h_blocks. # TODO(lehou): figure out the halo_size in general cases. # Pad w dimension with zeros. Layer normalization over dimension dim. Args: x: a mtf.Tensor whose shape contains dim. dim: a mtf.Dimension epsilon: a floating point number name: a string used for tf.variable_scope. Returns: a mtf.Tensor with same shape as x. Batch normalization. Args: x: a mtf.Tensor whose shape contains [batch_dim, ..., dim] is_training: a boolean, whether mode is training. momentum: a floating point number, specifying batch norm decay value. epsilon: a floating point number. dims_idx_start: an integer. Dimension with indices in [dims_idx_start, dims_idx_end - 1] will be normalized. dims_idx_end: an integer. Dimension with indices in [dims_idx_start, dims_idx_end - 1] will be normalized. init_zero: a boolean, whether to initialize scale with 0's or 1's. name: a string used for tf.variable_scope. Returns: a mtf.Tensor with same shape as x. # At training time, calculate mean and variance and normalize across batch # dim. # Update running mean and running variance. # TODO(lehou): do not return update_ops; handle them inside MTF. # At eval and test time, use the running mean and variance. Per-example softmax loss. `logits` is a Tensor with floating-point dtype, containing the predicted relative log probabilities of the classes. Either hard targets or soft targets are supported. In the case of hard targets, `targets` is a Tensor with integer dtype whose values are in the range [0, vocab_dim.size). `targets` should have the same set of dimensions as `logits`, but without `vocab_dim`. In the case of soft targets, `targets` is a Tensor with floating point dtype and the same dimensions as `logits. Reducing `targets` along `vocab_dim` should result in all ones. if z_loss is nonzero, we add a loss equal to z_loss*log(z)^2, where z is the partition function. Example value: z_loss=1e-4. Two uses of z_loss are: - To keep the logits from drifting too far from zero, which can cause unacceptable roundoff errors in bfloat16. - To encourage the logits to be normalized log-probabilities. Args: logits: a mtf.Tensor whose shape contains vocab_dim targets: a mtf.Tensor representing hard or soft targets (see comments) vocab_dim: a mtf.Dimension z_loss: a float Returns: a mtf.Tensor whose shape is equal to logits.shape - vocab_dim Raises: ValueError: if the shapes do not match. # hard targets Sigmoid cross-entropy loss. Args: logits: a mtf.Tensor targets: a mtf.Tensor with the same shape as logits Returns: a mtf.Tensor whose shape is equal to logits.shape Raises: ValueError: if the shapes do not match. Hidden layer with ReLU activation followed by linear projection. The output has the same number of channels as the input. Args: x: a mtf.Tensor hidden_channels: a mtf.Dimension - channels in the hidden layer dropout: an optional float dropout_broadcast_dims: an optional list of mtf.Dimension master_dtype: a tf.dtype slice_dtype: a tf.dtype name: an optional string Returns: a mtf.Tensor with the same shape as x. Halo exchange for keys and values for Local 1D attention. Attention to the source position and a neighborhood to the left or right. The sequence is divided into blocks of length block_size. Attention for a given query position can only see memory positions less than or equal to the query position, in the corresponding block and the previous block. Args: query_antecedent: a mtf.Tensor with shape [batch, num_h_blocks, num_w_blocks, h_dim, w_dim, io_channels] must have the same size as query_length, but a different name. kv_channels: a mtf.Dimension (the size of the key and value vectors) heads: a mtf.Dimension (the number of heads) memory_w_dim: mtf Dimension, for the memory width block. mask_right: bool, flag specifying whether we mask out attention to the right for the decoder. master_dtype: a tf.dtype slice_dtype: a tf.dtype name: an optional string. Returns: a Tensor of shape [batch, num_h_blocks, num_w_blocks, h_dim, w_dim, io_channels] Raises: ValueError: if channels or depth don't match. # Rename dimensions for the memory height and width. # Call einsum over the query and memory to get query q, keys k and values v. # Halo exchange for memory blocks. # Calculate the causal mask to avoid peeking into the future. We compute # this once and reuse it for all blocks since the block_size is known. Attention to the source position and a neighborhood to the left of it. Attention for a given query position p can only see memory positions in the range (p - window_size, p]. Args: x: a mtf.Tensor with shape batch_dims + [length, io_channels] kv_channels: a mtf.Dimension (the size of the key and value vectors) heads: a mtf.Dimension (the number of heads) window_size: an integer master_dtype: a tf.dtype (deprecated - use params arg) slice_dtype: a tf.dtype (deprecated - use params arg) length_per_split: an optional integer indicating the part of the length dimension per processor. You can omit if the length dimension is not split. return_kv: an optional list onto which to append the computed k and v. params: an optional quadruple of Tensors (see multihead_attention_params()) name: an optional string. Returns: a Tensor with the same shape as x Raises: ValueError: if channels or depth don't match. # Get query q, keys k and values v. # Choose a suitable block size. # We choose the greatest divisor of length_per_split less than or equal # to max(window_size, 128) # The num_blocks dimension gets the same name as the length dimension, # so it will be split in the same way. # augment the keys and values for each block with keys and values for # the previous window_size timesteps. # prevent looking forward # prevent looking >=block_length timesteps backward # Note: The first window_size-1 positions can see back into pre-time # where all the keys and values are zero. We could mask this out, but we # don't. Incremental local self-attention (one decode step). Incremental version of masked_local_attention_1d() Args: x: a mtf.Tensor with shape [batch..., io_channels] prev_k: mtf.Tensor with shape [batch..., heads, window_length, kv_channels] prev_v: mtf.Tensor with shape [batch..., heads, window_length, kv_channels] step_num: mtf Scalar with dtype tf.int32 master_dtype: a tf.dtype (deprecated) slice_dtype: a tf.dtype (deprecated) params: a quadruple of Tensors (see multihead_attention_params()) name: an optional string. Returns: y: A mtf.Tensor with shape [batch..., io_channels] new_k: mtf.Tensor with shape [batch..., heads, window_length, kv_channels] new_v: mtf.Tensor with shape [batch..., heads, window_length, kv_channels] Raises: ValueError: if the dimensions do not match. Halo exchange for keys and values for Local 2D attention. # shape of k is [num_h_blocks, num_w_blocks, h_dim, w_dim, kv_channels] Attention to the source position and a neighborhood to the left or right. The sequence is divided into blocks of length block_size. Attention for a given query position can only see memory positions less than or equal to the query position, in the corresponding block and the previous block. Args: query_antecedent: a mtf.Tensor with shape [batch, num_h_blocks, num_w_blocks, h_dim, w_dim, io_channels] must have the same size as query_length, but a different name. kv_channels: a mtf.Dimension (the size of the key and value vectors) heads: a mtf.Dimension (the number of heads) memory_h_dim: mtf Dimension, for the memory height block. memory_w_dim: mtf Dimension, for the memory width block. mask_right: bool, flag specifying whether we mask out attention to the right for the decoder. master_dtype: a tf.dtype slice_dtype: a tf.dtype name: an optional string. Returns: a Tensor of shape [batch, num_h_blocks, num_w_blocks, h_dim, w_dim, io_channels] Raises: ValueError: if channels or depth don't match. # Rename dimensions for the memory height and width. # Call einsum over the query and memory to get query q, keys k and values v. # Halo exchange for memory blocks. # Calculate the causal mask to avoid peeking into the future. We compute # this once and reuse it for all blocks since the block_size is known. Deprecated version of multihead_attention_params with combine=True. Create Parameters for Multihead Attention. If the combine flag is set to True, then we create only one variable which stacks together all of the parameters. Otherwise, we create four separate variables. Args: mesh: a Mesh heads: a Dimension io_channels: a Dimension kv_channels: a Dimension variable_dtype: a mtf.VariableDType combine: a boolean Returns: wq: a Tensor with shape [heads, io_channels, kv_channels] wk: a Tensor with shape [heads, io_channels, kv_channels] wv: a Tensor with shape [heads, io_channels, kv_channels] wo: a Tensor with shape [heads, io_channels, kv_channels] # TODO(noam): should be: o_stddev = (kv_channels.size * heads.size) ** -0.5 # verify that this still works and change it. # pylint: disable=g-complex-comprehension Dot-product attention. Args: q: Tensor with shape [...., length_q, depth_k]. Typically leading dimensions are [batch, heads]. k: Tensor with shape [..., length_kv, depth_k]. Leading dimensions must match with q. v: Tensor with shape [..., length_kv, depth_v] Leading dimensions must match with q. mask: mask Tensor (see attention_mask()) dropout: a float. dropout_broadcast_dims: an optional list of mtf.Dimension extra_logit: an optional scalar or tensor Returns: Tensor with shape [..., length_q, depth_v]. Multihead scaled-dot-product attention with input/output transformations. In order to use only one variable containing the four weight matrices packed together, we insist that the query and memory antecedents have the same dimensionality (io_channels) and that the keys and values have the same dimensionality (kv_channels). Args: query_antecedent: a mtf.Tensor with shape [<batch_dims>, query_length, io_channels] memory_antecedent: a mtf.Tensor with shape [batch, memory_length, io_channels] (optional) mask: mask Tensor (see attention_mask()) kv_channels: a mtf.Dimension (the size of the key and value vectors) heads: a mtf.Dimension (the number of heads) dropout: a floating point value dropout_broadcast_dims: an optional list of mtf.Dimension master_dtype: a tf.dtype slice_dtype: a tf.dtype name: an optional string. Returns: A mtf.Tensor with shape [batch, query_length, io_channels] Raises: ValueError: if the dimensions do not match. Incremental self-attention (one decode step). In order to use only one variable containing the four weight matrices packed together, we insist that the query and memory antecedents have the same dimensionality (io_channels) and that the keys and values have the same dimensionality (kv_channels). Args: query_antecedent: a mtf.Tensor with shape [batch..., io_channels] prev_k: mtf.Tensor with shape [batch..., heads, memory_length, kv_channels] prev_v: mtf.Tensor with shape [batch..., heads, memory_length, kv_channels] step_num: mtf Scalar with dtype tf.int32 master_dtype: a tf.dtype slice_dtype: a tf.dtype name: an optional string. Returns: y: A mtf.Tensor with shape [batch..., io_channels] new_k: mtf.Tensor with shape [batch..., heads, memory_length, kv_channels] new_v: mtf.Tensor with shape [batch..., heads, memory_length, kv_channels] Raises: ValueError: if the dimensions do not match. Incremental attention over encoder (one decode step). In order to use only one variable containing the four weight matrices packed together, we insist that the query and memory antecedents have the same dimensionality (io_channels) and that the keys and values have the same dimensionality (kv_channels). memory_dims is a subset of query_dims Args: query_antecedent: a mtf.Tensor with shape query_dims + [io_channels] wq: a mtf.Tensor with shape [heads, io_channels, kv_channels] wo: a mtf.Tensor with shape [heads, io_channels, kv_channels] k: memory_dims + [heads, memory_length, kv_channels] v: memory_dims + [heads, memory_length, kv_channels] mask: mask Tensor (see attention_mask()) name: an optional string. Returns: A mtf.Tensor with shape [batch, qlen, io_channels] Bias for encoder-decoder attention. Args: inputs: a mtf.Tensor with shape [..., length_dim] dtype: a tf.dtype Returns: a mtf.Tensor with shape [..., memory_length_dim] Bias for self-attention where attention to the right is disallowed. Args: query_pos: a mtf.Tensor with shape [..., length_dim] dtype: a tf.dtype Returns: a mtf.Tensor with shape [..., length_dim, memory_length_dim] Bias for attention where attention between segments is disallowed. Args: query_segment: a mtf.Tensor with shape [..., length_dim] memory_segment: a mtf.Tensor with shape [..., memory_length_dim] dtype: a tf.dtype Returns: a mtf.Tensor with shape [..., length_dim, memory_length_dim] Bias for attention for local blocks where attention to right is disallowed. Create the bias matrix by using two separate masks, one for the memory part which doesn't overlap with the query and second which interacts with the query and should be disallowed to look to the right of the current query position. Args: mesh: a MeshTensorflow object block_length: a mtf.Dimension memory_length: a mtf.Dimension dtype: a tf.dtype Returns: a mtf.Tensor with shape [block_length, memory_length] Bias for attention for local blocks where attention to right is disallowed. Create the bias matrix by using two separate masks, one for the memory part which doesn't overlap with the query and second which interacts with the query and should be disallowed to look to the right of the current query position. Args: mesh: a MeshTensorflow object h_dim: a mtf.Dimension w_dim: a mtf.Dimension memory_h_dim: a mtf.Dimension memory_w_dim: a mtf.Dimension dtype: a tf.dtype Returns: a mtf.Tensor with shape [block_length, memory_length] Multiply values by a random number between 1-epsilon and 1+epsilon. Makes models more resilient to rounding errors introduced by bfloat16. This seems particularly important for logits. Args: x: a mtf.Tensor epsilon: a floating point value Returns: a mtf.Tensor with the same type and shape as x. Memory-compressed self-attention. The memory is first average-pooled (strided) to make it shorter by a factor of compression_factor. Args: x: a mtf.Tensor with shape [<batch_dims>, query_length, io_channels] mask_right: a boolean compression_factor: an integer kv_channels: a mtf.Dimension (the size of the key and value vectors) heads: a mtf.Dimension (the number of heads) dropout: a floating point value dropout_broadcast_dims: an optional list of mtf.Dimension master_dtype: a tf.dtype slice_dtype: a tf.dtype name: an optional string. Returns: A mtf.Tensor with shape [batch, query_length, io_channels] Raises: ValueError: if the dimensions do not match. Compress by taking group means. Args: x: a Tensor dim: a dimension in x.shape compression_factor: an integer Returns: a Tensor Embedding weights. Embedding layer. 2D max pooling. Pooling is applied on the HW dimensions. We assume the dimensions of x is [NHWC]. There can be multiple batch dimensions, e.g., [10, 4, 4, 10, 10, 3]. Currently we only support unoverlapping pooling: strides == ksize. Also the input HW dimensions must be divisible by ksize. Args: x: a Tensor ksize: kernel size. A list or tuple name: an optional string Returns: a Tensor 3D max pooling. Pooling is applied on the DHW dimensions. We assume the dimensions of x is [NDHWC]. There can be multiple batch dimensions, e.g., [10, 4, 4, 10, 10, 10, 3]. Currently we only support unoverlapping pooling: strides == ksize. Also the input DHW dimensions must be divisible by ksize. Args: x: a Tensor ksize: kernel size. A list or tuple name: an optional string Returns: a Tensor 2D average pooling. Pooling is applied on the HW dimensions. We assume the dimensions of x is [NHWC]. There can be multiple batch dimensions, e.g., [10, 4, 4, 10, 10, 3]. Currently we only support unoverlapping pooling: strides == ksize. Also the input HW dimensions must be divisible by ksize. Args: x: a Tensor ksize: kernel size. A list or tuple name: an optional string Returns: a Tensor 3D average pooling. Pooling is applied on the DHW dimensions. We assume the dimensions of x is [NDHWC]. There can be multiple batch dimensions, e.g., [10, 4, 4, 10, 10, 10, 3]. Currently we only support unoverlapping pooling: strides == ksize. Also the input DHW dimensions must be divisible by ksize. Args: x: a Tensor ksize: kernel size. A list or tuple name: an optional string Returns: a Tensor Backpropagation function for a revnet. # last operation should be an addition to produce y1 Building block of a revnet. https://arxiv.org/abs/1707.04585 All the inputs and output Tensors have the same shape and dtype. The forward computation is: y1 = x1 + f(x2) y2 = x2 The x1_backwards and x2_backwards tensors are used by backpropagation. None should be passed for the first layer, then the outputs of each layer should be passed to the next. Example usage: x1, x1_backwards, x2, x2_backwards = x, None, x, None for f in my_functions: x1, x1_backwards, x2, x2_backwards = mtf.layers.reversible_half_residual( x1, x1_backwards, x2, x2_backwards) y = (x1 + x2) / 2 Args: x1: a Tensor x1_backwards: a Tensor or None x2: a Tensor x2_backwards: a Tensor or None f: a function from Tensor to Tensor recompute_grads: a boolean Returns: y2: a Tensor y2_backwards: a Tensor y1: a Tensor y1_backwards: a Tensor
| 2.351554
| 2
|
tests/features/stubbing.py
|
flexmock/flexmock
| 15
|
6629566
|
<reponame>flexmock/flexmock
"""Tests for flexmock stubbing feature."""
# pylint: disable=missing-docstring,no-self-use,no-member
from flexmock import flexmock
from flexmock._api import flexmock_teardown
from tests import some_module
class StubbingTestCase:
def test_use_replace_with_for_callable_shortcut_kwargs(self):
class Foo:
def method(self):
return "bar"
instance = Foo()
flexmock(instance, method=lambda: "baz")
assert instance.method() == "baz"
def test_should_replace_attributes_that_are_instances_of_classes(self):
class Class1:
pass
class Class2:
class1 = Class1()
class2 = Class2()
flexmock(class2, class1="test")
assert class2.class1 == "test"
def test_replace_non_callable_instance_attributes(self):
class FooClass:
def __init__(self):
self.attribute = 1
instance1 = FooClass()
instance2 = FooClass()
flexmock(instance1, attribute=2)
flexmock(instance2, attribute=1)
assert instance1.attribute == 2
flexmock_teardown()
assert instance1.attribute == 1
def test_replace_non_callable_module_attributes(self):
flexmock(some_module, MODULE_LEVEL_ATTRIBUTE="yay")
assert some_module.MODULE_LEVEL_ATTRIBUTE == "yay"
flexmock_teardown()
assert some_module.MODULE_LEVEL_ATTRIBUTE == "test"
def test_replace_non_callable_class_attributes(self):
class FooClass:
attribute = 1
instance1 = FooClass()
instance2 = FooClass()
flexmock(instance1, attribute=2)
assert instance1.attribute == 2
assert instance2.attribute == 1
flexmock_teardown()
assert instance1.attribute == 1
def test_fake_object_takes_properties(self):
fake1 = flexmock(bar=property(lambda self: "baz"))
fake2 = flexmock(foo=property(lambda self: "baz"))
assert fake1.bar == "baz"
assert fake2.foo == "baz"
|
"""Tests for flexmock stubbing feature."""
# pylint: disable=missing-docstring,no-self-use,no-member
from flexmock import flexmock
from flexmock._api import flexmock_teardown
from tests import some_module
class StubbingTestCase:
def test_use_replace_with_for_callable_shortcut_kwargs(self):
class Foo:
def method(self):
return "bar"
instance = Foo()
flexmock(instance, method=lambda: "baz")
assert instance.method() == "baz"
def test_should_replace_attributes_that_are_instances_of_classes(self):
class Class1:
pass
class Class2:
class1 = Class1()
class2 = Class2()
flexmock(class2, class1="test")
assert class2.class1 == "test"
def test_replace_non_callable_instance_attributes(self):
class FooClass:
def __init__(self):
self.attribute = 1
instance1 = FooClass()
instance2 = FooClass()
flexmock(instance1, attribute=2)
flexmock(instance2, attribute=1)
assert instance1.attribute == 2
flexmock_teardown()
assert instance1.attribute == 1
def test_replace_non_callable_module_attributes(self):
flexmock(some_module, MODULE_LEVEL_ATTRIBUTE="yay")
assert some_module.MODULE_LEVEL_ATTRIBUTE == "yay"
flexmock_teardown()
assert some_module.MODULE_LEVEL_ATTRIBUTE == "test"
def test_replace_non_callable_class_attributes(self):
class FooClass:
attribute = 1
instance1 = FooClass()
instance2 = FooClass()
flexmock(instance1, attribute=2)
assert instance1.attribute == 2
assert instance2.attribute == 1
flexmock_teardown()
assert instance1.attribute == 1
def test_fake_object_takes_properties(self):
fake1 = flexmock(bar=property(lambda self: "baz"))
fake2 = flexmock(foo=property(lambda self: "baz"))
assert fake1.bar == "baz"
assert fake2.foo == "baz"
|
en
| 0.876259
|
Tests for flexmock stubbing feature. # pylint: disable=missing-docstring,no-self-use,no-member
| 2.427031
| 2
|
src/qrcode/pyqart/art/qart.py
|
lapinozz/ArtCoder
| 15
|
6629567
|
<reponame>lapinozz/ArtCoder<filename>src/qrcode/pyqart/art/qart.py
# Added at : 2016.8.2
# Author : 7sDream
# Usage : Accept data and source image, make a QArt.
import itertools
from random import randint
from .source import QArtSourceImage
from .bitblock import BitBlock
from ..qr import QrData, QrPainter
from ..qr.data.numbers import Numbers
from ..qr.painter.point import QrPointType
from ..qr.ec import RSEncoder
from ..common import Bits, BIT_PER_CW
import PIL.Image as Image
__all__ = ['QArtist']
INF = float('inf')
class QArtist(QrPainter):
def __init__(self, data, contentValues, contentPriorities, version=None, mask=None, level=0, rotation=0,
dither=False, only_data=False, rand=False, higher_first=False,
dy=None, dx=None):
#assert isinstance(contentValues, (str, QArtSourceImage, Image.Image))
#if not isinstance(img, QArtSourceImage):
img = QArtSourceImage(contentValues, contentPriorities)
#self.source = img
self.dy = dy
self.dx = dx
self._only_data = bool(only_data)
self._higher_first = bool(higher_first)
if not isinstance(data, QrData):
data = QrData(data, level)
#data = QrData(url + '#', level)
super().__init__(data, version, mask, rotation)
args, _, _ = self.get_params()
print('Processing input image...', end='', flush=True)
self._targets = img.to_targets(
self.canvas, args, bool(dither), rand, dy, dx)
self.dither = dither
print('Done.')
self._bits = None
@property
def bits(self):
if self._bits is not None:
return self._bits
args, available, used = self.get_params()
cci_length = args.cci_length_of(Numbers)
available_for_number = available - 4 - cci_length
used += 4 + cci_length
if available_for_number < 4:
return super().bits
else:
numbers_count = available_for_number // 10 * 3
remaining = available_for_number % 10
if remaining >= 7:
numbers_count += 2
remaining -= 7
elif remaining >= 4:
numbers_count += 1
remaining -= 4
upper = args.dcwc * BIT_PER_CW - remaining
#self._data.put_numbers('0' * numbers_count)
"""
upper = 0
numbers_count = 0
"""
while True:
bits = super().bits
di = 0
eci = args.dcwc * BIT_PER_CW
ecbc = args.eccwcpb * BIT_PER_CW
data_bits = Bits()
ec_bits = Bits()
for i in range(args.bc):
dbc = args.dcwcof(i) * BIT_PER_CW
low = 0
high = dbc
if di < used:
low = used - di
if low >= dbc:
data_bits.extend(bits, di, dbc)
ec_bits.extend(bits, eci, ecbc)
di += dbc
eci += ecbc
continue
if di + dbc > upper:
high = upper - di
if high <= 0:
data_bits.extend(bits, di, dbc)
ec_bits.extend(bits, eci, ecbc)
di += dbc
eci += ecbc
continue
if not self._only_data:
#print('Create BitBlock', '{i}/{bc}...'.format(i=i+1, bc=args.bc,), end='', flush=True)
block = BitBlock(bits, di, dbc, eci, ecbc)
else:
block = Bits.copy_from(bits, di, dbc)
# Lock uncontrollable bits
locked_bits = set()
if not self._only_data:
for j in itertools.chain(range(0, low), range(high, dbc)):
assert block.set(j, bits[di + j])
else:
for j in itertools.chain(range(0, low), range(high, dbc)):
locked_bits.add(j)
targets_index = list(range(di, di+dbc))
if not self._only_data:
targets_index.extend(range(eci, eci+ecbc))
def compare(x):
t = self._targets[x]
if t.is_hard_zero():
if self._higher_first:
return INF
else:
return -1
else:
return t.contrast
targets_index = sorted(targets_index, key=compare,
reverse=self._higher_first)
for target_index in targets_index:
target = self._targets[target_index]
point = target.point
fill = target.fill
if point.invert:
fill = not fill
if target.is_hard_zero():
fill = False
if point.type is QrPointType.DATA:
index = point.offset - di
else:
assert point.type is QrPointType.CORRECTION
index = point.offset - eci + dbc
if not self._only_data:
block.set(index, fill)
elif index not in locked_bits:
block[index] = fill
if not self._only_data:
new_block_bits = block.bits()
data_bits.extend(new_block_bits, 0, dbc)
ec_bits.extend(new_block_bits, dbc, ecbc)
else:
data_bits.extend(block)
ec_bits.extend(RSEncoder.encode(block, ecbc // 8, True))
di += dbc
eci += ecbc
error_count = 0
numbers = ''
for i in range(0, numbers_count, 3):
if i + 3 > numbers_count:
count = [None, 4, 7][numbers_count - i]
else:
count = 10
offset = used + i // 3 * 10
value = Bits.copy_from(data_bits, offset, count)
value = value.as_int
if count == 10 and value >= 1000:
rand_pos = randint(0, 4)
hard_zero_pos = offset + rand_pos
self._targets[hard_zero_pos].set_hard_zero()
error_count += 1
value -= 2**(9 - rand_pos)
elif count == 7 and value >= 100:
rand_pos = randint(0, 1)
hard_zero_pos = offset + rand_pos
self._targets[hard_zero_pos].set_hard_zero()
error_count += 1
value -= 2**(6 - rand_pos)
elif count == 4 and value >= 10:
hard_zero_pos = offset
self._targets[hard_zero_pos].set_hard_zero()
error_count += 1
value -= 8
numbers += str(value).rjust(count // 3, '0')
#print('Error count', error_count, end='')
if error_count == 0:
#print(', send to printer.')
data_bits.extend(ec_bits)
self._bits = data_bits
return data_bits
else:
#print(', restart.')
pass
|
# Added at : 2016.8.2
# Author : 7sDream
# Usage : Accept data and source image, make a QArt.
import itertools
from random import randint
from .source import QArtSourceImage
from .bitblock import BitBlock
from ..qr import QrData, QrPainter
from ..qr.data.numbers import Numbers
from ..qr.painter.point import QrPointType
from ..qr.ec import RSEncoder
from ..common import Bits, BIT_PER_CW
import PIL.Image as Image
__all__ = ['QArtist']
INF = float('inf')
class QArtist(QrPainter):
def __init__(self, data, contentValues, contentPriorities, version=None, mask=None, level=0, rotation=0,
dither=False, only_data=False, rand=False, higher_first=False,
dy=None, dx=None):
#assert isinstance(contentValues, (str, QArtSourceImage, Image.Image))
#if not isinstance(img, QArtSourceImage):
img = QArtSourceImage(contentValues, contentPriorities)
#self.source = img
self.dy = dy
self.dx = dx
self._only_data = bool(only_data)
self._higher_first = bool(higher_first)
if not isinstance(data, QrData):
data = QrData(data, level)
#data = QrData(url + '#', level)
super().__init__(data, version, mask, rotation)
args, _, _ = self.get_params()
print('Processing input image...', end='', flush=True)
self._targets = img.to_targets(
self.canvas, args, bool(dither), rand, dy, dx)
self.dither = dither
print('Done.')
self._bits = None
@property
def bits(self):
if self._bits is not None:
return self._bits
args, available, used = self.get_params()
cci_length = args.cci_length_of(Numbers)
available_for_number = available - 4 - cci_length
used += 4 + cci_length
if available_for_number < 4:
return super().bits
else:
numbers_count = available_for_number // 10 * 3
remaining = available_for_number % 10
if remaining >= 7:
numbers_count += 2
remaining -= 7
elif remaining >= 4:
numbers_count += 1
remaining -= 4
upper = args.dcwc * BIT_PER_CW - remaining
#self._data.put_numbers('0' * numbers_count)
"""
upper = 0
numbers_count = 0
"""
while True:
bits = super().bits
di = 0
eci = args.dcwc * BIT_PER_CW
ecbc = args.eccwcpb * BIT_PER_CW
data_bits = Bits()
ec_bits = Bits()
for i in range(args.bc):
dbc = args.dcwcof(i) * BIT_PER_CW
low = 0
high = dbc
if di < used:
low = used - di
if low >= dbc:
data_bits.extend(bits, di, dbc)
ec_bits.extend(bits, eci, ecbc)
di += dbc
eci += ecbc
continue
if di + dbc > upper:
high = upper - di
if high <= 0:
data_bits.extend(bits, di, dbc)
ec_bits.extend(bits, eci, ecbc)
di += dbc
eci += ecbc
continue
if not self._only_data:
#print('Create BitBlock', '{i}/{bc}...'.format(i=i+1, bc=args.bc,), end='', flush=True)
block = BitBlock(bits, di, dbc, eci, ecbc)
else:
block = Bits.copy_from(bits, di, dbc)
# Lock uncontrollable bits
locked_bits = set()
if not self._only_data:
for j in itertools.chain(range(0, low), range(high, dbc)):
assert block.set(j, bits[di + j])
else:
for j in itertools.chain(range(0, low), range(high, dbc)):
locked_bits.add(j)
targets_index = list(range(di, di+dbc))
if not self._only_data:
targets_index.extend(range(eci, eci+ecbc))
def compare(x):
t = self._targets[x]
if t.is_hard_zero():
if self._higher_first:
return INF
else:
return -1
else:
return t.contrast
targets_index = sorted(targets_index, key=compare,
reverse=self._higher_first)
for target_index in targets_index:
target = self._targets[target_index]
point = target.point
fill = target.fill
if point.invert:
fill = not fill
if target.is_hard_zero():
fill = False
if point.type is QrPointType.DATA:
index = point.offset - di
else:
assert point.type is QrPointType.CORRECTION
index = point.offset - eci + dbc
if not self._only_data:
block.set(index, fill)
elif index not in locked_bits:
block[index] = fill
if not self._only_data:
new_block_bits = block.bits()
data_bits.extend(new_block_bits, 0, dbc)
ec_bits.extend(new_block_bits, dbc, ecbc)
else:
data_bits.extend(block)
ec_bits.extend(RSEncoder.encode(block, ecbc // 8, True))
di += dbc
eci += ecbc
error_count = 0
numbers = ''
for i in range(0, numbers_count, 3):
if i + 3 > numbers_count:
count = [None, 4, 7][numbers_count - i]
else:
count = 10
offset = used + i // 3 * 10
value = Bits.copy_from(data_bits, offset, count)
value = value.as_int
if count == 10 and value >= 1000:
rand_pos = randint(0, 4)
hard_zero_pos = offset + rand_pos
self._targets[hard_zero_pos].set_hard_zero()
error_count += 1
value -= 2**(9 - rand_pos)
elif count == 7 and value >= 100:
rand_pos = randint(0, 1)
hard_zero_pos = offset + rand_pos
self._targets[hard_zero_pos].set_hard_zero()
error_count += 1
value -= 2**(6 - rand_pos)
elif count == 4 and value >= 10:
hard_zero_pos = offset
self._targets[hard_zero_pos].set_hard_zero()
error_count += 1
value -= 8
numbers += str(value).rjust(count // 3, '0')
#print('Error count', error_count, end='')
if error_count == 0:
#print(', send to printer.')
data_bits.extend(ec_bits)
self._bits = data_bits
return data_bits
else:
#print(', restart.')
pass
|
en
| 0.351437
|
# Added at : 2016.8.2 # Author : 7sDream # Usage : Accept data and source image, make a QArt. #assert isinstance(contentValues, (str, QArtSourceImage, Image.Image)) #if not isinstance(img, QArtSourceImage): #self.source = img #data = QrData(url + '#', level) #self._data.put_numbers('0' * numbers_count) upper = 0 numbers_count = 0 #print('Create BitBlock', '{i}/{bc}...'.format(i=i+1, bc=args.bc,), end='', flush=True) # Lock uncontrollable bits #print('Error count', error_count, end='') #print(', send to printer.') #print(', restart.')
| 2.555007
| 3
|
day05/1-xpath.py
|
Mhh123/spider
| 0
|
6629568
|
from lxml import etree
# 生成tree对象
tree = etree.parse('xpath.html')
print(tree)
#<lxml.etree._ElementTree object at 0x000000000252B388>
ret = tree.xpath('//div[@class="hero"]/text()')
"""
扩展:
def xpath(self, _path, namespaces=None, extensions=None, smart_strings=True, **_variables): # real signature unknown; restored from __doc__
xpath(self, _path, namespaces=None, extensions=None, smart_strings=True, **_variables)
XPath evaluate in context of document.
``namespaces`` is an optional dictionary with prefix to namespace URI
mappings, used by XPath. ``extensions`` defines additional extension
functions.
Returns a list (nodeset), or bool, float or string.
In case of a list result, return Element for element nodes,
string for text and attribute values.
Note: if you are going to apply multiple XPath expressions
against the same document, it is more efficient to use
XPathEvaluator directly.
pass
"""
ret_1 = tree.xpath('//div[@class="hero"]//text()')
# print(ret_1)
# string = ''.join(ret).replace('\t','').replace('\n','')
# print(ret)
# print(string)
|
from lxml import etree
# 生成tree对象
tree = etree.parse('xpath.html')
print(tree)
#<lxml.etree._ElementTree object at 0x000000000252B388>
ret = tree.xpath('//div[@class="hero"]/text()')
"""
扩展:
def xpath(self, _path, namespaces=None, extensions=None, smart_strings=True, **_variables): # real signature unknown; restored from __doc__
xpath(self, _path, namespaces=None, extensions=None, smart_strings=True, **_variables)
XPath evaluate in context of document.
``namespaces`` is an optional dictionary with prefix to namespace URI
mappings, used by XPath. ``extensions`` defines additional extension
functions.
Returns a list (nodeset), or bool, float or string.
In case of a list result, return Element for element nodes,
string for text and attribute values.
Note: if you are going to apply multiple XPath expressions
against the same document, it is more efficient to use
XPathEvaluator directly.
pass
"""
ret_1 = tree.xpath('//div[@class="hero"]//text()')
# print(ret_1)
# string = ''.join(ret).replace('\t','').replace('\n','')
# print(ret)
# print(string)
|
en
| 0.551892
|
# 生成tree对象 #<lxml.etree._ElementTree object at 0x000000000252B388> 扩展:
def xpath(self, _path, namespaces=None, extensions=None, smart_strings=True, **_variables): # real signature unknown; restored from __doc__
xpath(self, _path, namespaces=None, extensions=None, smart_strings=True, **_variables)
XPath evaluate in context of document.
``namespaces`` is an optional dictionary with prefix to namespace URI
mappings, used by XPath. ``extensions`` defines additional extension
functions.
Returns a list (nodeset), or bool, float or string.
In case of a list result, return Element for element nodes,
string for text and attribute values.
Note: if you are going to apply multiple XPath expressions
against the same document, it is more efficient to use
XPathEvaluator directly.
pass # print(ret_1) # string = ''.join(ret).replace('\t','').replace('\n','') # print(ret) # print(string)
| 3.41762
| 3
|
backtracking/minimax.py
|
jenia90/Python
| 21
|
6629569
|
<gh_stars>10-100
from __future__ import annotations
import math
""" Minimax helps to achieve maximum score in a game by checking all possible moves
depth is current depth in game tree.
nodeIndex is index of current node in scores[].
if move is of maximizer return true else false
leaves of game tree is stored in scores[]
height is maximum height of Game tree
"""
def minimax(
depth: int, node_index: int, is_max: bool, scores: list[int], height: float
) -> int:
"""
>>> import math
>>> scores = [90, 23, 6, 33, 21, 65, 123, 34423]
>>> height = math.log(len(scores), 2)
>>> minimax(0, 0, True, scores, height)
65
>>> minimax(-1, 0, True, scores, height)
Traceback (most recent call last):
...
ValueError: Depth cannot be less than 0
>>> minimax(0, 0, True, [], 2)
Traceback (most recent call last):
...
ValueError: Scores cannot be empty
>>> scores = [3, 5, 2, 9, 12, 5, 23, 23]
>>> height = math.log(len(scores), 2)
>>> minimax(0, 0, True, scores, height)
12
>>> minimax('1', 2, True, [], 2 )
Traceback (most recent call last):
...
TypeError: '<' not supported between instances of 'str' and 'int'
"""
if depth < 0:
raise ValueError("Depth cannot be less than 0")
if len(scores) == 0:
raise ValueError("Scores cannot be empty")
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1, node_index * 2, False, scores, height),
minimax(depth + 1, node_index * 2 + 1, False, scores, height),
)
return min(
minimax(depth + 1, node_index * 2, True, scores, height),
minimax(depth + 1, node_index * 2 + 1, True, scores, height),
)
def main():
scores = [90, 23, 6, 33, 21, 65, 123, 34423]
height = math.log(len(scores), 2)
print("Optimal value : ", end="")
print(minimax(0, 0, True, scores, height))
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
|
from __future__ import annotations
import math
""" Minimax helps to achieve maximum score in a game by checking all possible moves
depth is current depth in game tree.
nodeIndex is index of current node in scores[].
if move is of maximizer return true else false
leaves of game tree is stored in scores[]
height is maximum height of Game tree
"""
def minimax(
depth: int, node_index: int, is_max: bool, scores: list[int], height: float
) -> int:
"""
>>> import math
>>> scores = [90, 23, 6, 33, 21, 65, 123, 34423]
>>> height = math.log(len(scores), 2)
>>> minimax(0, 0, True, scores, height)
65
>>> minimax(-1, 0, True, scores, height)
Traceback (most recent call last):
...
ValueError: Depth cannot be less than 0
>>> minimax(0, 0, True, [], 2)
Traceback (most recent call last):
...
ValueError: Scores cannot be empty
>>> scores = [3, 5, 2, 9, 12, 5, 23, 23]
>>> height = math.log(len(scores), 2)
>>> minimax(0, 0, True, scores, height)
12
>>> minimax('1', 2, True, [], 2 )
Traceback (most recent call last):
...
TypeError: '<' not supported between instances of 'str' and 'int'
"""
if depth < 0:
raise ValueError("Depth cannot be less than 0")
if len(scores) == 0:
raise ValueError("Scores cannot be empty")
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1, node_index * 2, False, scores, height),
minimax(depth + 1, node_index * 2 + 1, False, scores, height),
)
return min(
minimax(depth + 1, node_index * 2, True, scores, height),
minimax(depth + 1, node_index * 2 + 1, True, scores, height),
)
def main():
scores = [90, 23, 6, 33, 21, 65, 123, 34423]
height = math.log(len(scores), 2)
print("Optimal value : ", end="")
print(minimax(0, 0, True, scores, height))
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
|
en
| 0.682908
|
Minimax helps to achieve maximum score in a game by checking all possible moves depth is current depth in game tree. nodeIndex is index of current node in scores[]. if move is of maximizer return true else false leaves of game tree is stored in scores[] height is maximum height of Game tree >>> import math >>> scores = [90, 23, 6, 33, 21, 65, 123, 34423] >>> height = math.log(len(scores), 2) >>> minimax(0, 0, True, scores, height) 65 >>> minimax(-1, 0, True, scores, height) Traceback (most recent call last): ... ValueError: Depth cannot be less than 0 >>> minimax(0, 0, True, [], 2) Traceback (most recent call last): ... ValueError: Scores cannot be empty >>> scores = [3, 5, 2, 9, 12, 5, 23, 23] >>> height = math.log(len(scores), 2) >>> minimax(0, 0, True, scores, height) 12 >>> minimax('1', 2, True, [], 2 ) Traceback (most recent call last): ... TypeError: '<' not supported between instances of 'str' and 'int'
| 4.173225
| 4
|
tests/test_games/test_go_fish/test_game.py
|
joedaws/motherbrain
| 0
|
6629570
|
<gh_stars>0
import pytest
from motherbrain.games.go_fish import INITIAL_HAND_SIZE_MAP
from motherbrain.games.run import create_game
from motherbrain import MOTHERBRAIN_PATH
import yaml
from yaml import Loader
import os
@pytest.fixture
def game():
"""Parse configs, create game, and play."""
config_path = os.path.join(MOTHERBRAIN_PATH, 'games/go_fish/config/')
go_fish_config_path = os.path.join(config_path, 'random.yaml')
# load config
with open(go_fish_config_path, 'rb') as stream:
config = yaml.load(stream, Loader=Loader)
# load game
game = create_game(config)
return game
def test_attributes(game):
assert game.state
assert game.num_players
assert game.turn
def test_deal(game):
game.reset()
game.deal()
# check player hands
for player in game.state.players:
assert len(player.hand) == INITIAL_HAND_SIZE_MAP[game.num_players]
# check observations
for player, obs in game.state.observations.items():
for opponent in obs.opponents:
assert obs.observed_hand_len[opponent].hand_len == INITIAL_HAND_SIZE_MAP[game.num_players]
assert obs.observed_ranks[opponent].ranks['2'] == 0
# check that deck has correct amount of cards
assert len(game.state.deck.cards) == 52 - 4*5
def test_turn(game):
game.reset()
game.deal()
game.turn()
|
import pytest
from motherbrain.games.go_fish import INITIAL_HAND_SIZE_MAP
from motherbrain.games.run import create_game
from motherbrain import MOTHERBRAIN_PATH
import yaml
from yaml import Loader
import os
@pytest.fixture
def game():
"""Parse configs, create game, and play."""
config_path = os.path.join(MOTHERBRAIN_PATH, 'games/go_fish/config/')
go_fish_config_path = os.path.join(config_path, 'random.yaml')
# load config
with open(go_fish_config_path, 'rb') as stream:
config = yaml.load(stream, Loader=Loader)
# load game
game = create_game(config)
return game
def test_attributes(game):
assert game.state
assert game.num_players
assert game.turn
def test_deal(game):
game.reset()
game.deal()
# check player hands
for player in game.state.players:
assert len(player.hand) == INITIAL_HAND_SIZE_MAP[game.num_players]
# check observations
for player, obs in game.state.observations.items():
for opponent in obs.opponents:
assert obs.observed_hand_len[opponent].hand_len == INITIAL_HAND_SIZE_MAP[game.num_players]
assert obs.observed_ranks[opponent].ranks['2'] == 0
# check that deck has correct amount of cards
assert len(game.state.deck.cards) == 52 - 4*5
def test_turn(game):
game.reset()
game.deal()
game.turn()
|
en
| 0.869422
|
Parse configs, create game, and play. # load config # load game # check player hands # check observations # check that deck has correct amount of cards
| 2.246996
| 2
|
Lib/tkinter/test/test_tkinter/test_simpledialog.py
|
Kshitijkrishnadas/haribol
| 2,441
|
6629571
|
import unittest
import tkinter
from test.support import requires, run_unittest, swap_attr
from tkinter.test.support import AbstractDefaultRootTest
from tkinter.simpledialog import Dialog, askinteger
requires('gui')
class DefaultRootTest(AbstractDefaultRootTest, unittest.TestCase):
def test_askinteger(self):
self.assertRaises(RuntimeError, askinteger, "Go To Line", "Line number")
root = tkinter.Tk()
with swap_attr(Dialog, 'wait_window', lambda self, w: w.destroy()):
askinteger("Go To Line", "Line number")
root.destroy()
tkinter.NoDefaultRoot()
self.assertRaises(RuntimeError, askinteger, "Go To Line", "Line number")
tests_gui = (DefaultRootTest,)
if __name__ == "__main__":
run_unittest(*tests_gui)
|
import unittest
import tkinter
from test.support import requires, run_unittest, swap_attr
from tkinter.test.support import AbstractDefaultRootTest
from tkinter.simpledialog import Dialog, askinteger
requires('gui')
class DefaultRootTest(AbstractDefaultRootTest, unittest.TestCase):
def test_askinteger(self):
self.assertRaises(RuntimeError, askinteger, "Go To Line", "Line number")
root = tkinter.Tk()
with swap_attr(Dialog, 'wait_window', lambda self, w: w.destroy()):
askinteger("Go To Line", "Line number")
root.destroy()
tkinter.NoDefaultRoot()
self.assertRaises(RuntimeError, askinteger, "Go To Line", "Line number")
tests_gui = (DefaultRootTest,)
if __name__ == "__main__":
run_unittest(*tests_gui)
|
none
| 1
| 2.556062
| 3
|
|
mmcv/fileio/io.py
|
imabackstabber/mmcv
| 0
|
6629572
|
# Copyright (c) OpenMMLab. All rights reserved.
from io import BytesIO, StringIO
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, TextIO, Union
from ..utils import is_list_of
from .file_client import FileClient
from .handlers import BaseFileHandler, JsonHandler, PickleHandler, YamlHandler
FileLikeObject = Union[TextIO, StringIO, BytesIO]
file_handlers = {
'json': JsonHandler(),
'yaml': YamlHandler(),
'yml': YamlHandler(),
'pickle': PickleHandler(),
'pkl': PickleHandler()
}
def load(file: Union[str, Path, FileLikeObject],
file_format: Optional[str] = None,
file_client_args: Optional[Dict] = None,
**kwargs):
"""Load data from json/yaml/pickle files.
This method provides a unified api for loading data from serialized files.
Note:
In v1.3.16 and later, ``load`` supports loading data from serialized
files those can be storaged in different backends.
Args:
file (str or :obj:`Path` or file-like object): Filename or a file-like
object.
file_format (str, optional): If not specified, the file format will be
inferred from the file extension, otherwise use the specified one.
Currently supported formats include "json", "yaml/yml" and
"pickle/pkl".
file_client_args (dict, optional): Arguments to instantiate a
FileClient. See :class:`mmcv.fileio.FileClient` for details.
Default: None.
Examples:
>>> load('/path/of/your/file') # file is storaged in disk
>>> load('https://path/of/your/file') # file is storaged in Internet
>>> load('s3://path/of/your/file') # file is storaged in petrel
Returns:
The content from the file.
"""
if isinstance(file, Path):
file = str(file)
if file_format is None and isinstance(file, str):
file_format = file.split('.')[-1]
if file_format not in file_handlers:
raise TypeError(f'Unsupported format: {file_format}')
handler = file_handlers[file_format]
f: FileLikeObject
if isinstance(file, str):
file_client = FileClient.infer_client(file_client_args, file)
if handler.str_like:
with StringIO(file_client.get_text(file)) as f:
obj = handler.load_from_fileobj(f, **kwargs)
else:
with BytesIO(file_client.get(file)) as f:
obj = handler.load_from_fileobj(f, **kwargs)
elif hasattr(file, 'read'):
obj = handler.load_from_fileobj(file, **kwargs)
else:
raise TypeError('"file" must be a filepath str or a file-object')
return obj
def dump(obj: Any,
file: Optional[Union[str, Path, FileLikeObject]] = None,
file_format: Optional[str] = None,
file_client_args: Optional[Dict] = None,
**kwargs):
"""Dump data to json/yaml/pickle strings or files.
This method provides a unified api for dumping data as strings or to files,
and also supports custom arguments for each file format.
Note:
In v1.3.16 and later, ``dump`` supports dumping data as strings or to
files which is saved to different backends.
Args:
obj (any): The python object to be dumped.
file (str or :obj:`Path` or file-like object, optional): If not
specified, then the object is dumped to a str, otherwise to a file
specified by the filename or file-like object.
file_format (str, optional): Same as :func:`load`.
file_client_args (dict, optional): Arguments to instantiate a
FileClient. See :class:`mmcv.fileio.FileClient` for details.
Default: None.
Examples:
>>> dump('hello world', '/path/of/your/file') # disk
>>> dump('hello world', 's3://path/of/your/file') # ceph or petrel
Returns:
bool: True for success, False otherwise.
"""
if isinstance(file, Path):
file = str(file)
if file_format is None:
if isinstance(file, str):
file_format = file.split('.')[-1]
elif file is None:
raise ValueError(
'file_format must be specified since file is None')
if file_format not in file_handlers:
raise TypeError(f'Unsupported format: {file_format}')
f: FileLikeObject
handler = file_handlers[file_format]
if file is None:
return handler.dump_to_str(obj, **kwargs)
elif isinstance(file, str):
file_client = FileClient.infer_client(file_client_args, file)
if handler.str_like:
with StringIO() as f:
handler.dump_to_fileobj(obj, f, **kwargs)
file_client.put_text(f.getvalue(), file)
else:
with BytesIO() as f:
handler.dump_to_fileobj(obj, f, **kwargs)
file_client.put(f.getvalue(), file)
elif hasattr(file, 'write'):
handler.dump_to_fileobj(obj, file, **kwargs)
else:
raise TypeError('"file" must be a filename str or a file-object')
def _register_handler(handler: BaseFileHandler,
file_formats: Union[str, List[str]]) -> None:
"""Register a handler for some file extensions.
Args:
handler (:obj:`BaseFileHandler`): Handler to be registered.
file_formats (str or list[str]): File formats to be handled by this
handler.
"""
if not isinstance(handler, BaseFileHandler):
raise TypeError(
f'handler must be a child of BaseFileHandler, not {type(handler)}')
if isinstance(file_formats, str):
file_formats = [file_formats]
if not is_list_of(file_formats, str):
raise TypeError('file_formats must be a str or a list of str')
for ext in file_formats:
file_handlers[ext] = handler
def register_handler(file_formats: Union[str, list], **kwargs) -> Callable:
def wrap(cls):
_register_handler(cls(**kwargs), file_formats)
return cls
return wrap
|
# Copyright (c) OpenMMLab. All rights reserved.
from io import BytesIO, StringIO
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, TextIO, Union
from ..utils import is_list_of
from .file_client import FileClient
from .handlers import BaseFileHandler, JsonHandler, PickleHandler, YamlHandler
FileLikeObject = Union[TextIO, StringIO, BytesIO]
file_handlers = {
'json': JsonHandler(),
'yaml': YamlHandler(),
'yml': YamlHandler(),
'pickle': PickleHandler(),
'pkl': PickleHandler()
}
def load(file: Union[str, Path, FileLikeObject],
file_format: Optional[str] = None,
file_client_args: Optional[Dict] = None,
**kwargs):
"""Load data from json/yaml/pickle files.
This method provides a unified api for loading data from serialized files.
Note:
In v1.3.16 and later, ``load`` supports loading data from serialized
files those can be storaged in different backends.
Args:
file (str or :obj:`Path` or file-like object): Filename or a file-like
object.
file_format (str, optional): If not specified, the file format will be
inferred from the file extension, otherwise use the specified one.
Currently supported formats include "json", "yaml/yml" and
"pickle/pkl".
file_client_args (dict, optional): Arguments to instantiate a
FileClient. See :class:`mmcv.fileio.FileClient` for details.
Default: None.
Examples:
>>> load('/path/of/your/file') # file is storaged in disk
>>> load('https://path/of/your/file') # file is storaged in Internet
>>> load('s3://path/of/your/file') # file is storaged in petrel
Returns:
The content from the file.
"""
if isinstance(file, Path):
file = str(file)
if file_format is None and isinstance(file, str):
file_format = file.split('.')[-1]
if file_format not in file_handlers:
raise TypeError(f'Unsupported format: {file_format}')
handler = file_handlers[file_format]
f: FileLikeObject
if isinstance(file, str):
file_client = FileClient.infer_client(file_client_args, file)
if handler.str_like:
with StringIO(file_client.get_text(file)) as f:
obj = handler.load_from_fileobj(f, **kwargs)
else:
with BytesIO(file_client.get(file)) as f:
obj = handler.load_from_fileobj(f, **kwargs)
elif hasattr(file, 'read'):
obj = handler.load_from_fileobj(file, **kwargs)
else:
raise TypeError('"file" must be a filepath str or a file-object')
return obj
def dump(obj: Any,
file: Optional[Union[str, Path, FileLikeObject]] = None,
file_format: Optional[str] = None,
file_client_args: Optional[Dict] = None,
**kwargs):
"""Dump data to json/yaml/pickle strings or files.
This method provides a unified api for dumping data as strings or to files,
and also supports custom arguments for each file format.
Note:
In v1.3.16 and later, ``dump`` supports dumping data as strings or to
files which is saved to different backends.
Args:
obj (any): The python object to be dumped.
file (str or :obj:`Path` or file-like object, optional): If not
specified, then the object is dumped to a str, otherwise to a file
specified by the filename or file-like object.
file_format (str, optional): Same as :func:`load`.
file_client_args (dict, optional): Arguments to instantiate a
FileClient. See :class:`mmcv.fileio.FileClient` for details.
Default: None.
Examples:
>>> dump('hello world', '/path/of/your/file') # disk
>>> dump('hello world', 's3://path/of/your/file') # ceph or petrel
Returns:
bool: True for success, False otherwise.
"""
if isinstance(file, Path):
file = str(file)
if file_format is None:
if isinstance(file, str):
file_format = file.split('.')[-1]
elif file is None:
raise ValueError(
'file_format must be specified since file is None')
if file_format not in file_handlers:
raise TypeError(f'Unsupported format: {file_format}')
f: FileLikeObject
handler = file_handlers[file_format]
if file is None:
return handler.dump_to_str(obj, **kwargs)
elif isinstance(file, str):
file_client = FileClient.infer_client(file_client_args, file)
if handler.str_like:
with StringIO() as f:
handler.dump_to_fileobj(obj, f, **kwargs)
file_client.put_text(f.getvalue(), file)
else:
with BytesIO() as f:
handler.dump_to_fileobj(obj, f, **kwargs)
file_client.put(f.getvalue(), file)
elif hasattr(file, 'write'):
handler.dump_to_fileobj(obj, file, **kwargs)
else:
raise TypeError('"file" must be a filename str or a file-object')
def _register_handler(handler: BaseFileHandler,
file_formats: Union[str, List[str]]) -> None:
"""Register a handler for some file extensions.
Args:
handler (:obj:`BaseFileHandler`): Handler to be registered.
file_formats (str or list[str]): File formats to be handled by this
handler.
"""
if not isinstance(handler, BaseFileHandler):
raise TypeError(
f'handler must be a child of BaseFileHandler, not {type(handler)}')
if isinstance(file_formats, str):
file_formats = [file_formats]
if not is_list_of(file_formats, str):
raise TypeError('file_formats must be a str or a list of str')
for ext in file_formats:
file_handlers[ext] = handler
def register_handler(file_formats: Union[str, list], **kwargs) -> Callable:
def wrap(cls):
_register_handler(cls(**kwargs), file_formats)
return cls
return wrap
|
en
| 0.73342
|
# Copyright (c) OpenMMLab. All rights reserved. Load data from json/yaml/pickle files. This method provides a unified api for loading data from serialized files. Note: In v1.3.16 and later, ``load`` supports loading data from serialized files those can be storaged in different backends. Args: file (str or :obj:`Path` or file-like object): Filename or a file-like object. file_format (str, optional): If not specified, the file format will be inferred from the file extension, otherwise use the specified one. Currently supported formats include "json", "yaml/yml" and "pickle/pkl". file_client_args (dict, optional): Arguments to instantiate a FileClient. See :class:`mmcv.fileio.FileClient` for details. Default: None. Examples: >>> load('/path/of/your/file') # file is storaged in disk >>> load('https://path/of/your/file') # file is storaged in Internet >>> load('s3://path/of/your/file') # file is storaged in petrel Returns: The content from the file. Dump data to json/yaml/pickle strings or files. This method provides a unified api for dumping data as strings or to files, and also supports custom arguments for each file format. Note: In v1.3.16 and later, ``dump`` supports dumping data as strings or to files which is saved to different backends. Args: obj (any): The python object to be dumped. file (str or :obj:`Path` or file-like object, optional): If not specified, then the object is dumped to a str, otherwise to a file specified by the filename or file-like object. file_format (str, optional): Same as :func:`load`. file_client_args (dict, optional): Arguments to instantiate a FileClient. See :class:`mmcv.fileio.FileClient` for details. Default: None. Examples: >>> dump('hello world', '/path/of/your/file') # disk >>> dump('hello world', 's3://path/of/your/file') # ceph or petrel Returns: bool: True for success, False otherwise. Register a handler for some file extensions. Args: handler (:obj:`BaseFileHandler`): Handler to be registered. file_formats (str or list[str]): File formats to be handled by this handler.
| 2.575313
| 3
|
blodiator/graf/grfnodecore.py
|
MansourM61/Blodiator
| 0
|
6629573
|
<filename>blodiator/graf/grfnodecore.py<gh_stars>0
'''
********************************************************************************
Python Script: grfnodecore Module
Writter: <NAME>
Date: 7 Feburary 2019
This Python script is compatible with Python 3.x.
The script is used to define GrfNodeCore class the node in
Blodiator. The node used in the block diagrams is handled by this class.
GrfNode GrfConnector GrfBlock
| | |
| | |
GrfNodeCore GrfConnectorCore GrfBlockCore
| | |
| | |
|_____________GrfObject______________|
History:
Ver 0.0.7: 29 January 2019;
first code
Ver 0.0.8: 7 Feburary 2019;
1- center changing is fixed.
Ver 0.0.31: 24 June 2019;
1- logging is added.
Ver 0.0.32: 28 June 2019;
1- Class is changed to GrfNodeCore.
Ver 0.0.36: 3 July 2019;
1- node/connector loading properties are added.
********************************************************************************
'''
import tkinter as tk
from ..etc import cntsheetcanavs
from ..etc import coloredtext
from . import grfobject
from ..grafix import gfxcircle
style = 'normal'
fg = 'purple'
bg = 'black'
src = 'GrfNodeCore: '
#################################################
DEF_NAME = 'node' # default name
CENTER = (300, 300) # default center coordinate
SIZE = 10 # default size
CAT = 'graph' # default category
COLOR_NORMAL = ('black', 'black') # default color for normal state
COLOR_DISABLED = ('pink', 'pink') # default color for disabled state
COLOR_SELECTED = ('red', 'red') # default color for selected state
COLOR_ERRONEOUS = ('yellow', 'yellow') # default color for erroneous state
BRUSH_NORMAL = (1.0, []) # default line thickness and style for normal state
BRUSH_DISABLED = (1.0, []) # default line thickness and style for disabled state
BRUSH_SELECTED = (1.0, []) # default line thickness and style for selected state
BRUSH_ERRONEOUS = (1.0, []) # default line thickness and style for erroneous state
MODE = ('normal', 'disabled', 'selected', 'erroneous') # states of the object
POSITION = ('N', 'S', 'E', 'W') # position of the port
BOUNDARY_MARGIN = 0 # default boundary margin
IN_PORT = ['0', None, None, None] # default input port
OUT_PORT = [ ['0', None, None, None], ['0', None, None, None] ] # default output port
SIG_TYPE = ('none', 'logical', 'electrical', 'optical') # available signal type
CON_COLOR = {'none': 'black', 'logical': 'blue', 'electrical': 'green', 'optical': 'red'} # connector colour
#################################################
# GrfNodeCore class: this is the node class for blockdiagram objects
# {
class GrfNodeCore(grfobject.GrfObject):
"""
Node item in the Blodiator.
Define an instance of 'GrfNodeCore' with appropriate arguments:
sheetCanvas = an instance of the canvas. For Blodiator it is an instance of CntSheetCanvas
tag = a string used by tkinter to identify the block
label = item label
cat = a string showing the category of the block
inPort = list containing information about input ports of the item
outPort = list containing information about output ports of the item
con_type = connection type: ('none', 'logical', 'electrical', 'optical')
color_type = a tuple containing colors for different coneection type
mode = state of the object: 'normal', 'disabled', 'selected', 'erroneous'
std = standard output which is an instance of 'ColoredText' class
This class contains the required fundamental functions for drawing and managing
node item in Blodiator. Functions such as updating center, color,
brush, tag, label, input and output ports, mode, etc are defined in this class.
"""
version = '0.0.36' # version of the class
# < class functions section >
# < class functions section >
# < inherited functions section >
# __init__ func: initialiser dunar
# {
def __init__(self, sheetCanvas=None, cat=CAT, label=DEF_NAME, center=CENTER,
size=SIZE, mode=MODE[0], inPort=IN_PORT, outPort=OUT_PORT,
con_type=SIG_TYPE[0], color_type=CON_COLOR, std=None):
"""
Construct a GrfNodeCore
input:
sheetCanvas = an instance of the canvas. For Blodiator it is an instance of CntSheetCanvas
tag = a string used by tkinter to identify the block
label = item label
center = center of the block
cat = a string showing the category of the block
inPort = list containing information about input ports of the item
outPort = list containing information about output ports of the item
con_type = connection type: ('none', 'logical', 'electrical', 'optical')
color_type = a tuple containing colors for different coneection type
mode = state of the object: 'normal', 'disabled', 'selected', 'erroneous'
std = standard output which is an instance of 'ColoredText' class
output: none
"""
if std is None:
print(src + ': Please specify a standard output for messages!')
exit()
else:
self.std = std
self.std.Print('Initialising GrfNodeCore', fg, bg, style, src)
self.__size = size
colorList = [COLOR_NORMAL, COLOR_DISABLED, COLOR_SELECTED, COLOR_ERRONEOUS]
brushList = [BRUSH_NORMAL, BRUSH_DISABLED, BRUSH_SELECTED, BRUSH_ERRONEOUS]
colorset = dict(zip(MODE, colorList))
brushset = dict(zip(MODE, brushList))
super(GrfNodeCore, self).__init__(sheetCanvas=sheetCanvas, cat=cat, label=label, center=center,
mode=mode, colorset=colorset, brushset=brushset, color_type=color_type,
inPort=inPort, outPort=outPort, con_type=con_type, std=std)
# } __init__ func
# __repr__ func: repr dunar
# {
def __repr__(self):
"""
Class repr dunar function.
"""
txt = super(GrfNodeCore, self).__repr__()
txt += '; size = {0}'.format(self.__size) # generate formatted text
return txt
# } __repr__ func
# __str__ func: str dunar
# {
def __str__(self):
"""
Class str dunar function.
"""
txt = super(GrfNodeCore, self).__str__()
inID = self.inPort[0]
outID = [P[0] for P in self.outPort]
txt += '; input ID = {0}; output ID = {1}'.format(inID, outID) # generate formatted text
return txt
# } __str__ func
# __contains__ func: in dunar
# {
def __contains__(self, arg):
"""
Class in dunar function.
"""
return arg in self.grfx[0]
# } __contains__ func
# < inherited functions section >
# < class functions section >
# initBlock func: initialise the object
# {
def initBlock(self):
"""
Initialises the node graphics on the canvas
input: none
output: none
"""
self.std.Print('Setting up GrfNodeCore', fg, bg, style, src)
tag = self.label + '-circle'
obj = []
obj.append(gfxcircle.GfxCircle(sheetCanvas=self.sheetCanvas, tag=tag, center=self.center,
size=self.__size, mode=self.mode, std=self.std))
obj[0].colorset = self.colorset
obj[0].brushset = self.brushset
self.grfx = obj
self.update_bbox()
self.update_color()
self.update_brush()
# } initBlock func
# update_bbox func: update the bounding box
# {
def update_bbox(self):
"""
Updates the node bounding box
input: none
output: none
"""
centX, centY = self.center
brush_thickness = self.brush[0]
margin = self.__size + brush_thickness + BOUNDARY_MARGIN
self.bbox = [int(centX - margin), int(centY - margin),
int(centX + margin), int(centY + margin)]
# } update_bbox func
# update_center func: update the center
# {
def update_center(self):
"""
Updates the node center
input: none
output: none
"""
self.grfx[0].center = self.center
self.update_bbox()
# } update_center func
# update_color func: update the color
# {
def update_color(self):
"""
Updates the connector color set (outline and filling colors for all different modes)
input: none
output: none
"""
colorset = self.colorset
self.grfx[0].colorset = colorset
pass
# } update_color func
# update_brush func: update the brush
# {
def update_brush(self):
"""
Updates the connector brush set (line thickness and style for all different modes)
input: none
output: none
"""
brushset = self.brushset
self.grfx[0].brushset = brushset
pass
# } update_brush func
# < class functions section >
# < getter and setter functions section >
# property: inPortID
# inPortID getter func: input port id getter
# {
@property
def inPortID(self):
"""
Class property getter: input port ID
"""
return self.inPort[0]
# } inPortID getter func
# inPortID setter func: input port id setter
# {
@inPortID.setter
def inPortID(self, inPort_id):
"""
Class property setter: input port ID
"""
self.__inPort_id = inPort_id
inPort = [inPort_id, None, None, None]
self.inPort = inPort
# } inPortID setter func
# property: size
# size getter func: output size getter
# {
@property
def size(self):
"""
Class property getter: node size
"""
return self.__size
# } size getter func
# size setter func: size setter
# {
@size.setter
def size(self, size):
"""
Class property setter: node size
"""
self.__size = size
self.grfx[0].size = size
# } size setter func
# property: outPortID
# outPortID getter func: output port id getter
# {
@property
def outPortID(self):
"""
Class property getter: output port ID
"""
return self.outPort[0]
# } outPortID getter func
# outPortID setter func: output port id setter
# {
@outPortID.setter
def outPortID(self, outPort_id):
"""
Class property setter: output port ID
"""
self.__outPort_id = outPort_id
outPort = [outPort_id, None, None, None]
self.outPort = outPort
# } outPortID setter func
# < getter and setter functions section >
# } GrfNodeCore class
# main func: contains code to test GrfNodeCore class
# {
def main():
CT = coloredtext.ColoredText()
root = tk.Tk()
root.geometry("600x600")
root.title('Sheet Test Bench')
canvas = tk.Canvas(root, width=600, height=600)
canvas.pack()
obj = GrfNodeCore(sheetCanvas=canvas, label='graph', std=CT)
obj.draw()
# CT.Print(repr(obj))
CT.Print('\n')
obj.mode = MODE[2]
# CT.Print(repr(obj))
obj.center = (100, 100)
obj.mode = MODE[0]
obj.center = (300, 100)
print(obj.bbox)
root.mainloop()
# } main func
if __name__ == '__main__':
main()
|
<filename>blodiator/graf/grfnodecore.py<gh_stars>0
'''
********************************************************************************
Python Script: grfnodecore Module
Writter: <NAME>
Date: 7 Feburary 2019
This Python script is compatible with Python 3.x.
The script is used to define GrfNodeCore class the node in
Blodiator. The node used in the block diagrams is handled by this class.
GrfNode GrfConnector GrfBlock
| | |
| | |
GrfNodeCore GrfConnectorCore GrfBlockCore
| | |
| | |
|_____________GrfObject______________|
History:
Ver 0.0.7: 29 January 2019;
first code
Ver 0.0.8: 7 Feburary 2019;
1- center changing is fixed.
Ver 0.0.31: 24 June 2019;
1- logging is added.
Ver 0.0.32: 28 June 2019;
1- Class is changed to GrfNodeCore.
Ver 0.0.36: 3 July 2019;
1- node/connector loading properties are added.
********************************************************************************
'''
import tkinter as tk
from ..etc import cntsheetcanavs
from ..etc import coloredtext
from . import grfobject
from ..grafix import gfxcircle
style = 'normal'
fg = 'purple'
bg = 'black'
src = 'GrfNodeCore: '
#################################################
DEF_NAME = 'node' # default name
CENTER = (300, 300) # default center coordinate
SIZE = 10 # default size
CAT = 'graph' # default category
COLOR_NORMAL = ('black', 'black') # default color for normal state
COLOR_DISABLED = ('pink', 'pink') # default color for disabled state
COLOR_SELECTED = ('red', 'red') # default color for selected state
COLOR_ERRONEOUS = ('yellow', 'yellow') # default color for erroneous state
BRUSH_NORMAL = (1.0, []) # default line thickness and style for normal state
BRUSH_DISABLED = (1.0, []) # default line thickness and style for disabled state
BRUSH_SELECTED = (1.0, []) # default line thickness and style for selected state
BRUSH_ERRONEOUS = (1.0, []) # default line thickness and style for erroneous state
MODE = ('normal', 'disabled', 'selected', 'erroneous') # states of the object
POSITION = ('N', 'S', 'E', 'W') # position of the port
BOUNDARY_MARGIN = 0 # default boundary margin
IN_PORT = ['0', None, None, None] # default input port
OUT_PORT = [ ['0', None, None, None], ['0', None, None, None] ] # default output port
SIG_TYPE = ('none', 'logical', 'electrical', 'optical') # available signal type
CON_COLOR = {'none': 'black', 'logical': 'blue', 'electrical': 'green', 'optical': 'red'} # connector colour
#################################################
# GrfNodeCore class: this is the node class for blockdiagram objects
# {
class GrfNodeCore(grfobject.GrfObject):
"""
Node item in the Blodiator.
Define an instance of 'GrfNodeCore' with appropriate arguments:
sheetCanvas = an instance of the canvas. For Blodiator it is an instance of CntSheetCanvas
tag = a string used by tkinter to identify the block
label = item label
cat = a string showing the category of the block
inPort = list containing information about input ports of the item
outPort = list containing information about output ports of the item
con_type = connection type: ('none', 'logical', 'electrical', 'optical')
color_type = a tuple containing colors for different coneection type
mode = state of the object: 'normal', 'disabled', 'selected', 'erroneous'
std = standard output which is an instance of 'ColoredText' class
This class contains the required fundamental functions for drawing and managing
node item in Blodiator. Functions such as updating center, color,
brush, tag, label, input and output ports, mode, etc are defined in this class.
"""
version = '0.0.36' # version of the class
# < class functions section >
# < class functions section >
# < inherited functions section >
# __init__ func: initialiser dunar
# {
def __init__(self, sheetCanvas=None, cat=CAT, label=DEF_NAME, center=CENTER,
size=SIZE, mode=MODE[0], inPort=IN_PORT, outPort=OUT_PORT,
con_type=SIG_TYPE[0], color_type=CON_COLOR, std=None):
"""
Construct a GrfNodeCore
input:
sheetCanvas = an instance of the canvas. For Blodiator it is an instance of CntSheetCanvas
tag = a string used by tkinter to identify the block
label = item label
center = center of the block
cat = a string showing the category of the block
inPort = list containing information about input ports of the item
outPort = list containing information about output ports of the item
con_type = connection type: ('none', 'logical', 'electrical', 'optical')
color_type = a tuple containing colors for different coneection type
mode = state of the object: 'normal', 'disabled', 'selected', 'erroneous'
std = standard output which is an instance of 'ColoredText' class
output: none
"""
if std is None:
print(src + ': Please specify a standard output for messages!')
exit()
else:
self.std = std
self.std.Print('Initialising GrfNodeCore', fg, bg, style, src)
self.__size = size
colorList = [COLOR_NORMAL, COLOR_DISABLED, COLOR_SELECTED, COLOR_ERRONEOUS]
brushList = [BRUSH_NORMAL, BRUSH_DISABLED, BRUSH_SELECTED, BRUSH_ERRONEOUS]
colorset = dict(zip(MODE, colorList))
brushset = dict(zip(MODE, brushList))
super(GrfNodeCore, self).__init__(sheetCanvas=sheetCanvas, cat=cat, label=label, center=center,
mode=mode, colorset=colorset, brushset=brushset, color_type=color_type,
inPort=inPort, outPort=outPort, con_type=con_type, std=std)
# } __init__ func
# __repr__ func: repr dunar
# {
def __repr__(self):
"""
Class repr dunar function.
"""
txt = super(GrfNodeCore, self).__repr__()
txt += '; size = {0}'.format(self.__size) # generate formatted text
return txt
# } __repr__ func
# __str__ func: str dunar
# {
def __str__(self):
"""
Class str dunar function.
"""
txt = super(GrfNodeCore, self).__str__()
inID = self.inPort[0]
outID = [P[0] for P in self.outPort]
txt += '; input ID = {0}; output ID = {1}'.format(inID, outID) # generate formatted text
return txt
# } __str__ func
# __contains__ func: in dunar
# {
def __contains__(self, arg):
"""
Class in dunar function.
"""
return arg in self.grfx[0]
# } __contains__ func
# < inherited functions section >
# < class functions section >
# initBlock func: initialise the object
# {
def initBlock(self):
"""
Initialises the node graphics on the canvas
input: none
output: none
"""
self.std.Print('Setting up GrfNodeCore', fg, bg, style, src)
tag = self.label + '-circle'
obj = []
obj.append(gfxcircle.GfxCircle(sheetCanvas=self.sheetCanvas, tag=tag, center=self.center,
size=self.__size, mode=self.mode, std=self.std))
obj[0].colorset = self.colorset
obj[0].brushset = self.brushset
self.grfx = obj
self.update_bbox()
self.update_color()
self.update_brush()
# } initBlock func
# update_bbox func: update the bounding box
# {
def update_bbox(self):
"""
Updates the node bounding box
input: none
output: none
"""
centX, centY = self.center
brush_thickness = self.brush[0]
margin = self.__size + brush_thickness + BOUNDARY_MARGIN
self.bbox = [int(centX - margin), int(centY - margin),
int(centX + margin), int(centY + margin)]
# } update_bbox func
# update_center func: update the center
# {
def update_center(self):
"""
Updates the node center
input: none
output: none
"""
self.grfx[0].center = self.center
self.update_bbox()
# } update_center func
# update_color func: update the color
# {
def update_color(self):
"""
Updates the connector color set (outline and filling colors for all different modes)
input: none
output: none
"""
colorset = self.colorset
self.grfx[0].colorset = colorset
pass
# } update_color func
# update_brush func: update the brush
# {
def update_brush(self):
"""
Updates the connector brush set (line thickness and style for all different modes)
input: none
output: none
"""
brushset = self.brushset
self.grfx[0].brushset = brushset
pass
# } update_brush func
# < class functions section >
# < getter and setter functions section >
# property: inPortID
# inPortID getter func: input port id getter
# {
@property
def inPortID(self):
"""
Class property getter: input port ID
"""
return self.inPort[0]
# } inPortID getter func
# inPortID setter func: input port id setter
# {
@inPortID.setter
def inPortID(self, inPort_id):
"""
Class property setter: input port ID
"""
self.__inPort_id = inPort_id
inPort = [inPort_id, None, None, None]
self.inPort = inPort
# } inPortID setter func
# property: size
# size getter func: output size getter
# {
@property
def size(self):
"""
Class property getter: node size
"""
return self.__size
# } size getter func
# size setter func: size setter
# {
@size.setter
def size(self, size):
"""
Class property setter: node size
"""
self.__size = size
self.grfx[0].size = size
# } size setter func
# property: outPortID
# outPortID getter func: output port id getter
# {
@property
def outPortID(self):
"""
Class property getter: output port ID
"""
return self.outPort[0]
# } outPortID getter func
# outPortID setter func: output port id setter
# {
@outPortID.setter
def outPortID(self, outPort_id):
"""
Class property setter: output port ID
"""
self.__outPort_id = outPort_id
outPort = [outPort_id, None, None, None]
self.outPort = outPort
# } outPortID setter func
# < getter and setter functions section >
# } GrfNodeCore class
# main func: contains code to test GrfNodeCore class
# {
def main():
CT = coloredtext.ColoredText()
root = tk.Tk()
root.geometry("600x600")
root.title('Sheet Test Bench')
canvas = tk.Canvas(root, width=600, height=600)
canvas.pack()
obj = GrfNodeCore(sheetCanvas=canvas, label='graph', std=CT)
obj.draw()
# CT.Print(repr(obj))
CT.Print('\n')
obj.mode = MODE[2]
# CT.Print(repr(obj))
obj.center = (100, 100)
obj.mode = MODE[0]
obj.center = (300, 100)
print(obj.bbox)
root.mainloop()
# } main func
if __name__ == '__main__':
main()
|
en
| 0.537055
|
********************************************************************************
Python Script: grfnodecore Module
Writter: <NAME>
Date: 7 Feburary 2019
This Python script is compatible with Python 3.x.
The script is used to define GrfNodeCore class the node in
Blodiator. The node used in the block diagrams is handled by this class.
GrfNode GrfConnector GrfBlock
| | |
| | |
GrfNodeCore GrfConnectorCore GrfBlockCore
| | |
| | |
|_____________GrfObject______________|
History:
Ver 0.0.7: 29 January 2019;
first code
Ver 0.0.8: 7 Feburary 2019;
1- center changing is fixed.
Ver 0.0.31: 24 June 2019;
1- logging is added.
Ver 0.0.32: 28 June 2019;
1- Class is changed to GrfNodeCore.
Ver 0.0.36: 3 July 2019;
1- node/connector loading properties are added.
******************************************************************************** ################################################# # default name # default center coordinate # default size # default category # default color for normal state # default color for disabled state # default color for selected state # default color for erroneous state # default line thickness and style for normal state # default line thickness and style for disabled state # default line thickness and style for selected state # default line thickness and style for erroneous state # states of the object # position of the port # default boundary margin # default input port # default output port # available signal type # connector colour ################################################# # GrfNodeCore class: this is the node class for blockdiagram objects # { Node item in the Blodiator.
Define an instance of 'GrfNodeCore' with appropriate arguments:
sheetCanvas = an instance of the canvas. For Blodiator it is an instance of CntSheetCanvas
tag = a string used by tkinter to identify the block
label = item label
cat = a string showing the category of the block
inPort = list containing information about input ports of the item
outPort = list containing information about output ports of the item
con_type = connection type: ('none', 'logical', 'electrical', 'optical')
color_type = a tuple containing colors for different coneection type
mode = state of the object: 'normal', 'disabled', 'selected', 'erroneous'
std = standard output which is an instance of 'ColoredText' class
This class contains the required fundamental functions for drawing and managing
node item in Blodiator. Functions such as updating center, color,
brush, tag, label, input and output ports, mode, etc are defined in this class. # version of the class # < class functions section > # < class functions section > # < inherited functions section > # __init__ func: initialiser dunar # { Construct a GrfNodeCore
input:
sheetCanvas = an instance of the canvas. For Blodiator it is an instance of CntSheetCanvas
tag = a string used by tkinter to identify the block
label = item label
center = center of the block
cat = a string showing the category of the block
inPort = list containing information about input ports of the item
outPort = list containing information about output ports of the item
con_type = connection type: ('none', 'logical', 'electrical', 'optical')
color_type = a tuple containing colors for different coneection type
mode = state of the object: 'normal', 'disabled', 'selected', 'erroneous'
std = standard output which is an instance of 'ColoredText' class
output: none # } __init__ func # __repr__ func: repr dunar # { Class repr dunar function. # generate formatted text # } __repr__ func # __str__ func: str dunar # { Class str dunar function. # generate formatted text # } __str__ func # __contains__ func: in dunar # { Class in dunar function. # } __contains__ func # < inherited functions section > # < class functions section > # initBlock func: initialise the object # { Initialises the node graphics on the canvas
input: none
output: none # } initBlock func # update_bbox func: update the bounding box # { Updates the node bounding box
input: none
output: none # } update_bbox func # update_center func: update the center # { Updates the node center
input: none
output: none # } update_center func # update_color func: update the color # { Updates the connector color set (outline and filling colors for all different modes)
input: none
output: none # } update_color func # update_brush func: update the brush # { Updates the connector brush set (line thickness and style for all different modes)
input: none
output: none # } update_brush func # < class functions section > # < getter and setter functions section > # property: inPortID # inPortID getter func: input port id getter # { Class property getter: input port ID # } inPortID getter func # inPortID setter func: input port id setter # { Class property setter: input port ID # } inPortID setter func # property: size # size getter func: output size getter # { Class property getter: node size # } size getter func # size setter func: size setter # { Class property setter: node size # } size setter func # property: outPortID # outPortID getter func: output port id getter # { Class property getter: output port ID # } outPortID getter func # outPortID setter func: output port id setter # { Class property setter: output port ID # } outPortID setter func # < getter and setter functions section > # } GrfNodeCore class # main func: contains code to test GrfNodeCore class # { # CT.Print(repr(obj)) # CT.Print(repr(obj)) # } main func
| 2.091034
| 2
|
爬虫/第一章/获取页面.py
|
Aloof-0/codesr
| 1
|
6629574
|
# -*- coding: utf-8 -*-
# @Time : 2020/7/19 13:02
# @Author : Frosty
# @Email : <EMAIL>
# @File : 获取页面.py
# @Time : 2020/7/19 13:02
# @Software: PyCharm
import requests
a = "http://ntlias-stu.boxuegu.com/#/login" #定义link为目标网易地址
r = requests.get(a)
r.raise_for_status()
r.encoding = r.apparent_encoding
print(r.text)
print("文本编码:",r.encoding)
|
# -*- coding: utf-8 -*-
# @Time : 2020/7/19 13:02
# @Author : Frosty
# @Email : <EMAIL>
# @File : 获取页面.py
# @Time : 2020/7/19 13:02
# @Software: PyCharm
import requests
a = "http://ntlias-stu.boxuegu.com/#/login" #定义link为目标网易地址
r = requests.get(a)
r.raise_for_status()
r.encoding = r.apparent_encoding
print(r.text)
print("文本编码:",r.encoding)
|
zh
| 0.254938
|
# -*- coding: utf-8 -*- # @Time : 2020/7/19 13:02 # @Author : Frosty # @Email : <EMAIL> # @File : 获取页面.py # @Time : 2020/7/19 13:02 # @Software: PyCharm #/login" #定义link为目标网易地址
| 2.120203
| 2
|
Code/options.py
|
skywolf829/ASMRSR
| 4
|
6629575
|
<reponame>skywolf829/ASMRSR
import os
import json
class Options():
def get_default():
opt = {}
# Input info
opt["mode"] = "2D" # What SinGAN to use - 2D, 2DTV, or 3D
opt["feat_model"] = "RDN" # What SinGAN to use - 2D or 3D
opt["upscale_model"] = "LIIF" # What SinGAN to use - 2D or 3D
opt['residual_weighing'] = False
opt["data_folder"] = "TrainingData/Snickers"
opt['scaling_mode'] = None # magnitude, channel, learned, none
opt['load_data_at_start'] = False
opt['single_shot'] = False
opt["save_folder"] = "SavedModels"
opt["save_name"] = "Temp" # Folder that the model will be saved to
opt["num_channels"] = 3
opt["spatial_downscale_ratio"] = 0.5 # Spatial downscale ratio between levels
opt["min_dimension_size"] = 16 # Smallest a dimension can go to upscale from
opt["cropping_resolution"] = 48
opt["time_cropping_resolution"] = 48
opt["train_date_time"] = None # The day/time the model was trained (finish time)
opt['fine_tuning'] = False
opt['dataset_name'] = "isotropic1024coarse"
opt['num_dataset_timesteps'] = 100
opt['x_resolution'] = 1024
opt['y_resolution'] = 1024
opt['z_resolution'] = 1
opt['t_resolution'] = 400
opt['ts_skip'] = 10
opt['num_dims'] = 3
opt['random_flipping'] = True
opt['num_networked_workers'] = 4
opt["num_workers"] = 2
# generator info
opt["num_blocks"] = 5
opt['num_discrim_blocks'] = 5
opt["base_num_kernels"] = 64 # Num of kernels in smallest scale conv layers
opt["pre_padding"] = False # Padding on conv layers in the GAN
opt["kernel_size"] = 3
opt["padding"] = 1
opt["stride"] = 1
opt['conv_groups'] = 1
opt['separate_chans'] = False
opt['B'] = 0.2
opt['num_lstm_layers'] = 3
opt['training_seq_length'] = 3
opt['temporal_direction'] = "forward"
opt['temporal_generator'] = "TSRTVD"
opt['feat_grid_channels'] = 16
opt['feat_grid_x'] = 32
opt['feat_grid_y'] = 32
opt['feat_grid_z'] = 32
opt['num_positional_encoding_terms'] = 6
opt["n"] = 0 # Number of scales in the heirarchy, defined by the input and min_dimension_size
opt["resolutions"] = [] # The scales for the GAN
opt["downsample_mode"] = "average_pooling"
opt["upsample_mode"] = "trilinear"
opt['scale_factor_start'] = 1.0
opt['scale_factor_end'] = 4.0
opt["train_distributed"] = False
opt["device"] = "cuda:0"
opt["gpus_per_node"] = 8
opt["num_nodes"] = 1
opt["ranking"] = 0
opt["save_generators"] = True
opt["save_discriminators"] = True
opt["physical_constraints"] = "none"
opt["patch_size"] = 128
opt["training_patch_size"] = 96
opt["regularization"] = "GP" #Either TV (total variation) or GP (gradient penalty) or SN
# GAN training info
opt["alpha_1"] = 1 # Reconstruction loss coefficient
opt["alpha_2"] = 0.1 # Adversarial loss coefficient
opt["alpha_3"] = 0 # Soft physical loss coefficient
opt["alpha_4"] = 0 # mag_and_angle loss
opt["alpha_5"] = 0 # first derivative loss coeff
opt["alpha_6"] = 0 # Lagrangian transport loss
opt["adaptive_streamlines"] = False
opt['streamline_res'] = 100
opt['streamline_length'] = 5
opt['periodic'] = False
opt["generator_steps"] = 1
opt["discriminator_steps"] = 1
opt["epochs"] = 1000
opt["minibatch"] = 1 # Minibatch for training
opt["g_lr"] = 0.001 # Learning rate for GAN generator
opt["d_lr"] = 0.0004 # Learning rate for GAN discriminator
opt["beta_1"] = 0.5
opt["beta_2"] = 0.999
opt["gamma"] = 0.5
# Info during training (to continue if it stopped)
opt["scale_in_training"] = 0
opt["iteration_number"] = 0
opt["epoch_number"] = 0
opt["save_every"] = 1000
opt["save_training_loss"] = True
return opt
def save_options(opt, save_location):
with open(os.path.join(save_location, "options.json"), 'w') as fp:
json.dump(opt, fp, sort_keys=True, indent=4)
def load_options(load_location):
opt = Options.get_default()
print(load_location)
if not os.path.exists(load_location):
print("%s doesn't exist, load failed" % load_location)
return
if os.path.exists(os.path.join(load_location, "options.json")):
with open(os.path.join(load_location, "options.json"), 'r') as fp:
opt2 = json.load(fp)
else:
print("%s doesn't exist, load failed" % "options.json")
return
# For forward compatibility with new attributes in the options file
for attr in opt2.keys():
opt[attr] = opt2[attr]
return opt
|
import os
import json
class Options():
def get_default():
opt = {}
# Input info
opt["mode"] = "2D" # What SinGAN to use - 2D, 2DTV, or 3D
opt["feat_model"] = "RDN" # What SinGAN to use - 2D or 3D
opt["upscale_model"] = "LIIF" # What SinGAN to use - 2D or 3D
opt['residual_weighing'] = False
opt["data_folder"] = "TrainingData/Snickers"
opt['scaling_mode'] = None # magnitude, channel, learned, none
opt['load_data_at_start'] = False
opt['single_shot'] = False
opt["save_folder"] = "SavedModels"
opt["save_name"] = "Temp" # Folder that the model will be saved to
opt["num_channels"] = 3
opt["spatial_downscale_ratio"] = 0.5 # Spatial downscale ratio between levels
opt["min_dimension_size"] = 16 # Smallest a dimension can go to upscale from
opt["cropping_resolution"] = 48
opt["time_cropping_resolution"] = 48
opt["train_date_time"] = None # The day/time the model was trained (finish time)
opt['fine_tuning'] = False
opt['dataset_name'] = "isotropic1024coarse"
opt['num_dataset_timesteps'] = 100
opt['x_resolution'] = 1024
opt['y_resolution'] = 1024
opt['z_resolution'] = 1
opt['t_resolution'] = 400
opt['ts_skip'] = 10
opt['num_dims'] = 3
opt['random_flipping'] = True
opt['num_networked_workers'] = 4
opt["num_workers"] = 2
# generator info
opt["num_blocks"] = 5
opt['num_discrim_blocks'] = 5
opt["base_num_kernels"] = 64 # Num of kernels in smallest scale conv layers
opt["pre_padding"] = False # Padding on conv layers in the GAN
opt["kernel_size"] = 3
opt["padding"] = 1
opt["stride"] = 1
opt['conv_groups'] = 1
opt['separate_chans'] = False
opt['B'] = 0.2
opt['num_lstm_layers'] = 3
opt['training_seq_length'] = 3
opt['temporal_direction'] = "forward"
opt['temporal_generator'] = "TSRTVD"
opt['feat_grid_channels'] = 16
opt['feat_grid_x'] = 32
opt['feat_grid_y'] = 32
opt['feat_grid_z'] = 32
opt['num_positional_encoding_terms'] = 6
opt["n"] = 0 # Number of scales in the heirarchy, defined by the input and min_dimension_size
opt["resolutions"] = [] # The scales for the GAN
opt["downsample_mode"] = "average_pooling"
opt["upsample_mode"] = "trilinear"
opt['scale_factor_start'] = 1.0
opt['scale_factor_end'] = 4.0
opt["train_distributed"] = False
opt["device"] = "cuda:0"
opt["gpus_per_node"] = 8
opt["num_nodes"] = 1
opt["ranking"] = 0
opt["save_generators"] = True
opt["save_discriminators"] = True
opt["physical_constraints"] = "none"
opt["patch_size"] = 128
opt["training_patch_size"] = 96
opt["regularization"] = "GP" #Either TV (total variation) or GP (gradient penalty) or SN
# GAN training info
opt["alpha_1"] = 1 # Reconstruction loss coefficient
opt["alpha_2"] = 0.1 # Adversarial loss coefficient
opt["alpha_3"] = 0 # Soft physical loss coefficient
opt["alpha_4"] = 0 # mag_and_angle loss
opt["alpha_5"] = 0 # first derivative loss coeff
opt["alpha_6"] = 0 # Lagrangian transport loss
opt["adaptive_streamlines"] = False
opt['streamline_res'] = 100
opt['streamline_length'] = 5
opt['periodic'] = False
opt["generator_steps"] = 1
opt["discriminator_steps"] = 1
opt["epochs"] = 1000
opt["minibatch"] = 1 # Minibatch for training
opt["g_lr"] = 0.001 # Learning rate for GAN generator
opt["d_lr"] = 0.0004 # Learning rate for GAN discriminator
opt["beta_1"] = 0.5
opt["beta_2"] = 0.999
opt["gamma"] = 0.5
# Info during training (to continue if it stopped)
opt["scale_in_training"] = 0
opt["iteration_number"] = 0
opt["epoch_number"] = 0
opt["save_every"] = 1000
opt["save_training_loss"] = True
return opt
def save_options(opt, save_location):
with open(os.path.join(save_location, "options.json"), 'w') as fp:
json.dump(opt, fp, sort_keys=True, indent=4)
def load_options(load_location):
opt = Options.get_default()
print(load_location)
if not os.path.exists(load_location):
print("%s doesn't exist, load failed" % load_location)
return
if os.path.exists(os.path.join(load_location, "options.json")):
with open(os.path.join(load_location, "options.json"), 'r') as fp:
opt2 = json.load(fp)
else:
print("%s doesn't exist, load failed" % "options.json")
return
# For forward compatibility with new attributes in the options file
for attr in opt2.keys():
opt[attr] = opt2[attr]
return opt
|
en
| 0.854827
|
# Input info # What SinGAN to use - 2D, 2DTV, or 3D # What SinGAN to use - 2D or 3D # What SinGAN to use - 2D or 3D # magnitude, channel, learned, none # Folder that the model will be saved to # Spatial downscale ratio between levels # Smallest a dimension can go to upscale from # The day/time the model was trained (finish time) # generator info # Num of kernels in smallest scale conv layers # Padding on conv layers in the GAN # Number of scales in the heirarchy, defined by the input and min_dimension_size # The scales for the GAN #Either TV (total variation) or GP (gradient penalty) or SN # GAN training info # Reconstruction loss coefficient # Adversarial loss coefficient # Soft physical loss coefficient # mag_and_angle loss # first derivative loss coeff # Lagrangian transport loss # Minibatch for training # Learning rate for GAN generator # Learning rate for GAN discriminator # Info during training (to continue if it stopped) # For forward compatibility with new attributes in the options file
| 2.46134
| 2
|
contak/migrations/0001_initial.py
|
dasfrosty/contak-service
| 0
|
6629576
|
# Generated by Django 3.1.5 on 2021-01-31 19:08
import django.db.models.deletion
import django_extensions.db.fields
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="Contact",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"created",
django_extensions.db.fields.CreationDateTimeField(
auto_now_add=True, verbose_name="created"
),
),
(
"modified",
django_extensions.db.fields.ModificationDateTimeField(
auto_now=True, verbose_name="modified"
),
),
("first_name", models.TextField()),
("last_name", models.TextField()),
("note", models.TextField(blank=True, null=True)),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
],
options={
"ordering": ["last_name", "first_name", "id"],
},
),
]
|
# Generated by Django 3.1.5 on 2021-01-31 19:08
import django.db.models.deletion
import django_extensions.db.fields
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="Contact",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"created",
django_extensions.db.fields.CreationDateTimeField(
auto_now_add=True, verbose_name="created"
),
),
(
"modified",
django_extensions.db.fields.ModificationDateTimeField(
auto_now=True, verbose_name="modified"
),
),
("first_name", models.TextField()),
("last_name", models.TextField()),
("note", models.TextField(blank=True, null=True)),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
],
options={
"ordering": ["last_name", "first_name", "id"],
},
),
]
|
en
| 0.813028
|
# Generated by Django 3.1.5 on 2021-01-31 19:08
| 1.698571
| 2
|
training/pytorch/structured/custom_containers/gpu/trainer/experiment.py
|
gogasca/ai-platform-samples-1
| 0
|
6629577
|
<filename>training/pytorch/structured/custom_containers/gpu/trainer/experiment.py<gh_stars>0
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the \"License\");
# you may not use this file except in compliance with the License.\n",
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an \"AS IS\" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import inputs
import model
def train(sequential_model, train_loader, criterion, optimizer, epoch):
"""Create the training loop for one epoch. Read the data from the
dataloader, calculate the loss, and update the DNN. Lastly, display some
statistics about the performance of the DNN during training.
Args:
sequential_model: The neural network that you are training, based on
nn.Module
train_loader: The training dataset
criterion: The loss function used during training
optimizer: The selected optmizer to update parameters and gradients
epoch: The current epoch that the training loop is on
"""
sequential_model.train()
running_loss = 0.0
for batch_index, data in enumerate(train_loader):
features = data['features']
target = data['target']
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = sequential_model(features)
loss = criterion(outputs, target)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if batch_index % 10 == 9: # print every 10 mini-batches
print('[epoch: %d, batch: %5d] loss: %.3f' %
(epoch, batch_index + 1, running_loss / 10))
running_loss = 0.0
def test(sequential_model, test_loader, criterion):
"""Test / Evaluate the DNNs performance with a test / eval dataset.
Read the data from the dataloader and calculate the loss. Lastly,
display some statistics about the performance of the DNN during testing.
Args:
sequential_model: The neural network that you are testing, based on
nn.Module
test_loader: The test / evaluation dataset
criterion: The loss function
"""
sequential_model.eval()
test_loss = 0.0
correct = 0
with torch.no_grad():
for _, data in enumerate(test_loader, 0):
features = data['features']
target = data['target']
output = sequential_model(features)
# sum up batch loss
test_loss += criterion(output, target)
# compute accuracy for a binary classifier
# Values > 0.5 = 1
# Values <= 0.5 = 0
correct += ((output > 0.5) == (target > 0.5)).sum().item()
# get the average loss for the test set.
test_loss /= (len(test_loader.sampler) / test_loader.batch_size)
# print statistics
print('\nTest set:\n\tAverage loss: {:.4f}'.format(test_loss))
print('\tAccuracy: {}/{} ({:.0f}%)\n'.format(
correct,
len(test_loader.sampler),
100. * correct / len(test_loader.sampler)))
def run(args):
"""Load the data, train, evaluate, and export the model for serving and
evaluating.
Args:
args: experiment parameters.
"""
cuda_availability = torch.cuda.is_available()
if cuda_availability:
device = torch.device('cuda:{}'.format(torch.cuda.current_device()))
else:
device = 'cpu'
print('\n*************************')
print('`cuda` available: {}'.format(cuda_availability))
print('Current Device: {}'.format(device))
print('*************************\n')
torch.manual_seed(args.seed)
# Open our dataset
train_loader, test_loader, eval_loader = inputs.load_data(args, device)
# Create the model, loss function, and optimizer
sequential_model, criterion, optimizer = model.create(args, device)
# Train / Test the model
for epoch in range(1, args.num_epochs + 1):
train(sequential_model, train_loader, criterion, optimizer, epoch)
test(sequential_model, test_loader, criterion)
# Evalutate the model
print("Evaluate the model using the evaluation dataset")
test(sequential_model, eval_loader, criterion)
# Export the trained model
torch.save(sequential_model.state_dict(), args.model_name)
# Save the model to GCS
if args.job_dir:
inputs.save_model(args)
|
<filename>training/pytorch/structured/custom_containers/gpu/trainer/experiment.py<gh_stars>0
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the \"License\");
# you may not use this file except in compliance with the License.\n",
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an \"AS IS\" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import inputs
import model
def train(sequential_model, train_loader, criterion, optimizer, epoch):
"""Create the training loop for one epoch. Read the data from the
dataloader, calculate the loss, and update the DNN. Lastly, display some
statistics about the performance of the DNN during training.
Args:
sequential_model: The neural network that you are training, based on
nn.Module
train_loader: The training dataset
criterion: The loss function used during training
optimizer: The selected optmizer to update parameters and gradients
epoch: The current epoch that the training loop is on
"""
sequential_model.train()
running_loss = 0.0
for batch_index, data in enumerate(train_loader):
features = data['features']
target = data['target']
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = sequential_model(features)
loss = criterion(outputs, target)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if batch_index % 10 == 9: # print every 10 mini-batches
print('[epoch: %d, batch: %5d] loss: %.3f' %
(epoch, batch_index + 1, running_loss / 10))
running_loss = 0.0
def test(sequential_model, test_loader, criterion):
"""Test / Evaluate the DNNs performance with a test / eval dataset.
Read the data from the dataloader and calculate the loss. Lastly,
display some statistics about the performance of the DNN during testing.
Args:
sequential_model: The neural network that you are testing, based on
nn.Module
test_loader: The test / evaluation dataset
criterion: The loss function
"""
sequential_model.eval()
test_loss = 0.0
correct = 0
with torch.no_grad():
for _, data in enumerate(test_loader, 0):
features = data['features']
target = data['target']
output = sequential_model(features)
# sum up batch loss
test_loss += criterion(output, target)
# compute accuracy for a binary classifier
# Values > 0.5 = 1
# Values <= 0.5 = 0
correct += ((output > 0.5) == (target > 0.5)).sum().item()
# get the average loss for the test set.
test_loss /= (len(test_loader.sampler) / test_loader.batch_size)
# print statistics
print('\nTest set:\n\tAverage loss: {:.4f}'.format(test_loss))
print('\tAccuracy: {}/{} ({:.0f}%)\n'.format(
correct,
len(test_loader.sampler),
100. * correct / len(test_loader.sampler)))
def run(args):
"""Load the data, train, evaluate, and export the model for serving and
evaluating.
Args:
args: experiment parameters.
"""
cuda_availability = torch.cuda.is_available()
if cuda_availability:
device = torch.device('cuda:{}'.format(torch.cuda.current_device()))
else:
device = 'cpu'
print('\n*************************')
print('`cuda` available: {}'.format(cuda_availability))
print('Current Device: {}'.format(device))
print('*************************\n')
torch.manual_seed(args.seed)
# Open our dataset
train_loader, test_loader, eval_loader = inputs.load_data(args, device)
# Create the model, loss function, and optimizer
sequential_model, criterion, optimizer = model.create(args, device)
# Train / Test the model
for epoch in range(1, args.num_epochs + 1):
train(sequential_model, train_loader, criterion, optimizer, epoch)
test(sequential_model, test_loader, criterion)
# Evalutate the model
print("Evaluate the model using the evaluation dataset")
test(sequential_model, eval_loader, criterion)
# Export the trained model
torch.save(sequential_model.state_dict(), args.model_name)
# Save the model to GCS
if args.job_dir:
inputs.save_model(args)
|
en
| 0.826408
|
# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not use this file except in compliance with the License.\n", # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Create the training loop for one epoch. Read the data from the dataloader, calculate the loss, and update the DNN. Lastly, display some statistics about the performance of the DNN during training. Args: sequential_model: The neural network that you are training, based on nn.Module train_loader: The training dataset criterion: The loss function used during training optimizer: The selected optmizer to update parameters and gradients epoch: The current epoch that the training loop is on # zero the parameter gradients # forward + backward + optimize # print statistics # print every 10 mini-batches Test / Evaluate the DNNs performance with a test / eval dataset. Read the data from the dataloader and calculate the loss. Lastly, display some statistics about the performance of the DNN during testing. Args: sequential_model: The neural network that you are testing, based on nn.Module test_loader: The test / evaluation dataset criterion: The loss function # sum up batch loss # compute accuracy for a binary classifier # Values > 0.5 = 1 # Values <= 0.5 = 0 # get the average loss for the test set. # print statistics Load the data, train, evaluate, and export the model for serving and evaluating. Args: args: experiment parameters. # Open our dataset # Create the model, loss function, and optimizer # Train / Test the model # Evalutate the model # Export the trained model # Save the model to GCS
| 3.018635
| 3
|
flask_turnkey/auth.py
|
DommertTech/flask-turnkey
| 0
|
6629578
|
<filename>flask_turnkey/auth.py<gh_stars>0
# Flask-TurnKey Version 0.0.1
# Auth.py
from flask_turboduck.auth import Auth
from app import app, db
from models import User
# Authentication wrapper for TurboDuck
auth = Auth(app, db, user_model=User)
|
<filename>flask_turnkey/auth.py<gh_stars>0
# Flask-TurnKey Version 0.0.1
# Auth.py
from flask_turboduck.auth import Auth
from app import app, db
from models import User
# Authentication wrapper for TurboDuck
auth = Auth(app, db, user_model=User)
|
en
| 0.489926
|
# Flask-TurnKey Version 0.0.1 # Auth.py # Authentication wrapper for TurboDuck
| 1.688651
| 2
|
tests/test_pabotlib.py
|
kitschyboy/pabot
| 0
|
6629579
|
<filename>tests/test_pabotlib.py
import unittest
import os
from pabot import pabotlib
from robot.running.context import EXECUTION_CONTEXTS
from robot.running.namespace import Namespace
from robot.running.model import TestSuite
from robot.variables import Variables
class PabotLibTests(unittest.TestCase):
def setUp(self):
builtinmock = lambda: 0
builtinmock.get_variable_value = lambda *args: None
self._runs = 0
def runned(*args):
self._runs += 1
builtinmock.run_keyword = runned
pabotlib.BuiltIn = lambda: builtinmock
self.builtinmock = builtinmock
def test_pabotlib_listener_path(self):
lib = pabotlib.PabotLib()
lib._start_suite("Suite", {"longname": "Suite"})
self.assertEqual(lib._path, "Suite")
lib._start_test("Test", {"longname": "Suite.Test"})
self.assertEqual(lib._path, "Suite.Test")
lib._start_keyword("Keyword1", {})
self.assertEqual(lib._path, "Suite.Test.0")
lib._end_keyword("Keyword1", {})
lib._start_keyword("Keyword2", {})
self.assertEqual(lib._path, "Suite.Test.1")
lib._end_keyword("Keyword2", {})
self.assertEqual(lib._path, "Suite.Test")
lib._end_test("Test", {"longname": "Suite.Test"})
self.assertEqual(lib._path, "Suite")
lib._end_suite("Suite", {"longname": "Suite"})
self.assertEqual(lib._path, "")
lib._close()
def test_pabotlib_listener_when_dynamic_import_with_import_library(self):
lib = pabotlib.PabotLib()
lib._end_keyword("Import Library", {})
self.assertEqual(lib._path, "0")
lib._start_keyword("Some Keyword", {})
self.assertEqual(lib._path, "0.1")
lib._end_keyword("Some Keyword", {})
self.assertEqual(lib._path, "0")
lib._start_keyword("Some Keyword 2", {})
self.assertEqual(lib._path, "0.2")
lib._end_keyword("Some Keyword 2", {})
self.assertEqual(lib._path, "0")
lib._end_keyword("Big word", {})
self.assertEqual(lib._path, "1")
lib._start_keyword("Little word", {})
self.assertEqual(lib._path, "1.1")
lib._end_keyword("Little word", {})
self.assertEqual(lib._path, "1")
lib._end_test("Test", {"longname": "Suite.Test"})
self.assertEqual(lib._path, "Suite")
lib._end_suite("Suite", {"longname": "Suite"})
self.assertEqual(lib._path, "")
lib._close()
def test_pabotlib_listener_from_start_keyword(self):
lib = pabotlib.PabotLib()
# Don't know if this is possible.
lib._start_keyword("Some Keyword", {})
self.assertEqual(lib._path, "0.0")
lib._end_keyword("Some Keyword", {})
self.assertEqual(lib._path, "0")
lib._start_keyword("Some Keyword 2", {})
self.assertEqual(lib._path, "0.1")
lib._end_keyword("Some Keyword 2", {})
self.assertEqual(lib._path, "0")
lib._end_keyword("Big word", {})
self.assertEqual(lib._path, "1")
lib._start_keyword("Little word", {})
self.assertEqual(lib._path, "1.1")
lib._end_keyword("Little word", {})
self.assertEqual(lib._path, "1")
lib._end_test("Test", {"longname": "Suite.Test"})
self.assertEqual(lib._path, "Suite")
lib._end_suite("Suite", {"longname": "Suite"})
self.assertEqual(lib._path, "")
lib._close()
def test_pabotlib_listener_from_end_keywords(self):
lib = pabotlib.PabotLib()
lib._end_keyword("Some Keyword", {})
self.assertEqual(lib._path, "0")
lib._end_keyword("Some Keyword 2", {})
self.assertEqual(lib._path, "1")
lib._end_keyword("Big word", {})
self.assertEqual(lib._path, "2")
lib._start_keyword("Little word", {})
self.assertEqual(lib._path, "2.1")
lib._end_keyword("Little word", {})
self.assertEqual(lib._path, "2")
lib._end_test("Test", {"longname": "Suite.Test"})
self.assertEqual(lib._path, "Suite")
lib._end_suite("Suite", {"longname": "Suite"})
self.assertEqual(lib._path, "")
lib._close()
def test_pabotlib_set_get_parallel_value(self):
lib = pabotlib.PabotLib()
lib.set_parallel_value_for_key("key", 1)
value = lib.get_parallel_value_for_key("key")
self.assertEqual(value, 1)
def test_pabotlib_run_only_once(self):
lib = pabotlib.PabotLib()
self.assertEqual(self._runs, 0)
lib.run_only_once("keyword")
self.assertEqual(self._runs, 1)
lib.run_only_once("keyword")
self.assertEqual(self._runs, 1)
def test_pabotlib_run_on_last_process(self):
lib = pabotlib.PabotLib()
self.assertEqual(self._runs, 0)
self.builtinmock.get_variable_value = lambda *args: "0"
lib.run_on_last_process("keyword")
self.assertEqual(self._runs, 0)
self.builtinmock.get_variable_value = lambda *args: "1"
lib.get_parallel_value_for_key = lambda *args: 1
lib.run_on_last_process("keyword")
self.assertEqual(self._runs, 1)
def test_pabotlib_run_on_last_process_defaults_to_running(self):
lib = pabotlib.PabotLib()
self.assertEqual(self._runs, 0)
lib.run_on_last_process("keyword")
self.assertEqual(self._runs, 1)
def test_acquire_and_release_lock(self):
lib = pabotlib.PabotLib()
self.assertTrue(lib.acquire_lock("lockname"))
self.assertTrue(lib.acquire_lock("lock2"))
lib.release_lock("lockname")
self.assertTrue(lib.acquire_lock("lockname"))
lib.release_lock("lock2")
lib.release_lock("lockname")
def test_releasing_lock_on_close(self):
lib = pabotlib.PabotLib()
self.assertTrue(lib.acquire_lock("somelock"))
self.assertTrue(lib.acquire_lock("somelock2"))
self.assertTrue("somelock" in lib._locks)
self.assertTrue("somelock2" in lib._locks)
lib._close()
self.assertTrue("somelock" not in lib._locks)
self.assertTrue("somelock2" not in lib._locks)
def test_acquire_and_release_valueset(self):
lib = pabotlib.PabotLib()
lib._values = lib._parse_values(
resourcefile=os.path.join("tests", "resourcefile.dat")
)
vals = lib.acquire_value_set()
self.assertIn(
vals, ["MyValueSet", "TestSystemWithLasers", "TestSystemWithTachyonCannon"]
)
value = lib.get_value_from_set("key")
try:
lib.get_value_from_set("nokey")
raise RuntimeError("This should not go here")
except AssertionError:
pass
lib.release_value_set()
self.assertEqual(value, "someval")
try:
lib.get_value_from_set("key")
raise RuntimeError("This should not go here")
except AssertionError:
pass
def test_acquire_and_disable_valueset(self):
lib = pabotlib.PabotLib()
lib._values = lib._parse_values(
resourcefile=os.path.join("tests", "resourcefile.dat")
)
vals = lib.acquire_value_set()
self.assertIn(
vals, ["MyValueSet", "TestSystemWithLasers", "TestSystemWithTachyonCannon"]
)
lib.disable_value_set()
vals2 = lib.acquire_value_set()
self.assertNotEqual(vals, vals2)
lib.release_value_set()
def test_acquire_and_release_valueset_with_tag(self):
lib = pabotlib.PabotLib()
lib._values = lib._parse_values(
resourcefile=os.path.join("tests", "resourcefile.dat")
)
vals = lib.acquire_value_set("laser")
self.assertEqual(vals, "TestSystemWithLasers")
value = lib.get_value_from_set("noise")
self.assertEqual(value, "zapp")
lib.release_value_set()
vals = lib.acquire_value_set("tachyon")
self.assertEqual(vals, "TestSystemWithTachyonCannon")
value = lib.get_value_from_set("noise")
self.assertEqual(value, "zump")
lib.release_value_set()
def test_acquire_and_release_valueset_with_shared_tag(self):
lib = pabotlib.PabotLib()
lib._values = lib._parse_values(
resourcefile=os.path.join("tests", "resourcefile.dat")
)
vals = lib.acquire_value_set("commontag")
self.assertIn(vals, ["TestSystemWithLasers", "TestSystemWithTachyonCannon"])
value = lib.get_value_from_set("commonval")
lib.release_value_set()
self.assertEqual(value, "true")
def test_reacquire_valueset(self):
lib = pabotlib.PabotLib()
lib._values = lib._parse_values(
resourcefile=os.path.join("tests", "resourcefile.dat")
)
lib.acquire_value_set()
try:
lib.acquire_value_set()
self.fail("Should have thrown an exception")
except ValueError:
pass
finally:
lib.release_value_set()
def test_trying_to_acquire_valueset_with_none_existing_tag(self):
lib = pabotlib.PabotLib()
lib._values = lib._parse_values(
resourcefile=os.path.join("tests", "resourcefile.dat")
)
try:
lib.acquire_value_set("none-existing-tag")
self.fail("Should have thrown an exception")
except ValueError:
pass
def _output(self):
output = lambda: 0
output.start_keyword = output.end_keyword = lambda *a: 0
output.fail = output.debug = output.trace = lambda *a: 0
return output
def _create_ctx(self):
suite = TestSuite()
variables = Variables()
EXECUTION_CONTEXTS._contexts = []
EXECUTION_CONTEXTS.start_suite(
suite, Namespace(variables, suite, suite.resource), self._output()
)
if __name__ == "__main__":
unittest.main()
|
<filename>tests/test_pabotlib.py
import unittest
import os
from pabot import pabotlib
from robot.running.context import EXECUTION_CONTEXTS
from robot.running.namespace import Namespace
from robot.running.model import TestSuite
from robot.variables import Variables
class PabotLibTests(unittest.TestCase):
def setUp(self):
builtinmock = lambda: 0
builtinmock.get_variable_value = lambda *args: None
self._runs = 0
def runned(*args):
self._runs += 1
builtinmock.run_keyword = runned
pabotlib.BuiltIn = lambda: builtinmock
self.builtinmock = builtinmock
def test_pabotlib_listener_path(self):
lib = pabotlib.PabotLib()
lib._start_suite("Suite", {"longname": "Suite"})
self.assertEqual(lib._path, "Suite")
lib._start_test("Test", {"longname": "Suite.Test"})
self.assertEqual(lib._path, "Suite.Test")
lib._start_keyword("Keyword1", {})
self.assertEqual(lib._path, "Suite.Test.0")
lib._end_keyword("Keyword1", {})
lib._start_keyword("Keyword2", {})
self.assertEqual(lib._path, "Suite.Test.1")
lib._end_keyword("Keyword2", {})
self.assertEqual(lib._path, "Suite.Test")
lib._end_test("Test", {"longname": "Suite.Test"})
self.assertEqual(lib._path, "Suite")
lib._end_suite("Suite", {"longname": "Suite"})
self.assertEqual(lib._path, "")
lib._close()
def test_pabotlib_listener_when_dynamic_import_with_import_library(self):
lib = pabotlib.PabotLib()
lib._end_keyword("Import Library", {})
self.assertEqual(lib._path, "0")
lib._start_keyword("Some Keyword", {})
self.assertEqual(lib._path, "0.1")
lib._end_keyword("Some Keyword", {})
self.assertEqual(lib._path, "0")
lib._start_keyword("Some Keyword 2", {})
self.assertEqual(lib._path, "0.2")
lib._end_keyword("Some Keyword 2", {})
self.assertEqual(lib._path, "0")
lib._end_keyword("Big word", {})
self.assertEqual(lib._path, "1")
lib._start_keyword("Little word", {})
self.assertEqual(lib._path, "1.1")
lib._end_keyword("Little word", {})
self.assertEqual(lib._path, "1")
lib._end_test("Test", {"longname": "Suite.Test"})
self.assertEqual(lib._path, "Suite")
lib._end_suite("Suite", {"longname": "Suite"})
self.assertEqual(lib._path, "")
lib._close()
def test_pabotlib_listener_from_start_keyword(self):
lib = pabotlib.PabotLib()
# Don't know if this is possible.
lib._start_keyword("Some Keyword", {})
self.assertEqual(lib._path, "0.0")
lib._end_keyword("Some Keyword", {})
self.assertEqual(lib._path, "0")
lib._start_keyword("Some Keyword 2", {})
self.assertEqual(lib._path, "0.1")
lib._end_keyword("Some Keyword 2", {})
self.assertEqual(lib._path, "0")
lib._end_keyword("Big word", {})
self.assertEqual(lib._path, "1")
lib._start_keyword("Little word", {})
self.assertEqual(lib._path, "1.1")
lib._end_keyword("Little word", {})
self.assertEqual(lib._path, "1")
lib._end_test("Test", {"longname": "Suite.Test"})
self.assertEqual(lib._path, "Suite")
lib._end_suite("Suite", {"longname": "Suite"})
self.assertEqual(lib._path, "")
lib._close()
def test_pabotlib_listener_from_end_keywords(self):
lib = pabotlib.PabotLib()
lib._end_keyword("Some Keyword", {})
self.assertEqual(lib._path, "0")
lib._end_keyword("Some Keyword 2", {})
self.assertEqual(lib._path, "1")
lib._end_keyword("Big word", {})
self.assertEqual(lib._path, "2")
lib._start_keyword("Little word", {})
self.assertEqual(lib._path, "2.1")
lib._end_keyword("Little word", {})
self.assertEqual(lib._path, "2")
lib._end_test("Test", {"longname": "Suite.Test"})
self.assertEqual(lib._path, "Suite")
lib._end_suite("Suite", {"longname": "Suite"})
self.assertEqual(lib._path, "")
lib._close()
def test_pabotlib_set_get_parallel_value(self):
lib = pabotlib.PabotLib()
lib.set_parallel_value_for_key("key", 1)
value = lib.get_parallel_value_for_key("key")
self.assertEqual(value, 1)
def test_pabotlib_run_only_once(self):
lib = pabotlib.PabotLib()
self.assertEqual(self._runs, 0)
lib.run_only_once("keyword")
self.assertEqual(self._runs, 1)
lib.run_only_once("keyword")
self.assertEqual(self._runs, 1)
def test_pabotlib_run_on_last_process(self):
lib = pabotlib.PabotLib()
self.assertEqual(self._runs, 0)
self.builtinmock.get_variable_value = lambda *args: "0"
lib.run_on_last_process("keyword")
self.assertEqual(self._runs, 0)
self.builtinmock.get_variable_value = lambda *args: "1"
lib.get_parallel_value_for_key = lambda *args: 1
lib.run_on_last_process("keyword")
self.assertEqual(self._runs, 1)
def test_pabotlib_run_on_last_process_defaults_to_running(self):
lib = pabotlib.PabotLib()
self.assertEqual(self._runs, 0)
lib.run_on_last_process("keyword")
self.assertEqual(self._runs, 1)
def test_acquire_and_release_lock(self):
lib = pabotlib.PabotLib()
self.assertTrue(lib.acquire_lock("lockname"))
self.assertTrue(lib.acquire_lock("lock2"))
lib.release_lock("lockname")
self.assertTrue(lib.acquire_lock("lockname"))
lib.release_lock("lock2")
lib.release_lock("lockname")
def test_releasing_lock_on_close(self):
lib = pabotlib.PabotLib()
self.assertTrue(lib.acquire_lock("somelock"))
self.assertTrue(lib.acquire_lock("somelock2"))
self.assertTrue("somelock" in lib._locks)
self.assertTrue("somelock2" in lib._locks)
lib._close()
self.assertTrue("somelock" not in lib._locks)
self.assertTrue("somelock2" not in lib._locks)
def test_acquire_and_release_valueset(self):
lib = pabotlib.PabotLib()
lib._values = lib._parse_values(
resourcefile=os.path.join("tests", "resourcefile.dat")
)
vals = lib.acquire_value_set()
self.assertIn(
vals, ["MyValueSet", "TestSystemWithLasers", "TestSystemWithTachyonCannon"]
)
value = lib.get_value_from_set("key")
try:
lib.get_value_from_set("nokey")
raise RuntimeError("This should not go here")
except AssertionError:
pass
lib.release_value_set()
self.assertEqual(value, "someval")
try:
lib.get_value_from_set("key")
raise RuntimeError("This should not go here")
except AssertionError:
pass
def test_acquire_and_disable_valueset(self):
lib = pabotlib.PabotLib()
lib._values = lib._parse_values(
resourcefile=os.path.join("tests", "resourcefile.dat")
)
vals = lib.acquire_value_set()
self.assertIn(
vals, ["MyValueSet", "TestSystemWithLasers", "TestSystemWithTachyonCannon"]
)
lib.disable_value_set()
vals2 = lib.acquire_value_set()
self.assertNotEqual(vals, vals2)
lib.release_value_set()
def test_acquire_and_release_valueset_with_tag(self):
lib = pabotlib.PabotLib()
lib._values = lib._parse_values(
resourcefile=os.path.join("tests", "resourcefile.dat")
)
vals = lib.acquire_value_set("laser")
self.assertEqual(vals, "TestSystemWithLasers")
value = lib.get_value_from_set("noise")
self.assertEqual(value, "zapp")
lib.release_value_set()
vals = lib.acquire_value_set("tachyon")
self.assertEqual(vals, "TestSystemWithTachyonCannon")
value = lib.get_value_from_set("noise")
self.assertEqual(value, "zump")
lib.release_value_set()
def test_acquire_and_release_valueset_with_shared_tag(self):
lib = pabotlib.PabotLib()
lib._values = lib._parse_values(
resourcefile=os.path.join("tests", "resourcefile.dat")
)
vals = lib.acquire_value_set("commontag")
self.assertIn(vals, ["TestSystemWithLasers", "TestSystemWithTachyonCannon"])
value = lib.get_value_from_set("commonval")
lib.release_value_set()
self.assertEqual(value, "true")
def test_reacquire_valueset(self):
lib = pabotlib.PabotLib()
lib._values = lib._parse_values(
resourcefile=os.path.join("tests", "resourcefile.dat")
)
lib.acquire_value_set()
try:
lib.acquire_value_set()
self.fail("Should have thrown an exception")
except ValueError:
pass
finally:
lib.release_value_set()
def test_trying_to_acquire_valueset_with_none_existing_tag(self):
lib = pabotlib.PabotLib()
lib._values = lib._parse_values(
resourcefile=os.path.join("tests", "resourcefile.dat")
)
try:
lib.acquire_value_set("none-existing-tag")
self.fail("Should have thrown an exception")
except ValueError:
pass
def _output(self):
output = lambda: 0
output.start_keyword = output.end_keyword = lambda *a: 0
output.fail = output.debug = output.trace = lambda *a: 0
return output
def _create_ctx(self):
suite = TestSuite()
variables = Variables()
EXECUTION_CONTEXTS._contexts = []
EXECUTION_CONTEXTS.start_suite(
suite, Namespace(variables, suite, suite.resource), self._output()
)
if __name__ == "__main__":
unittest.main()
|
en
| 0.917934
|
# Don't know if this is possible.
| 2.535568
| 3
|
ansible/lib/ansible/modules/extras/packaging/os/apk.py
|
kiv-box/kafka
| 0
|
6629580
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, <NAME> <https://github.com/kbrebanov>
# Based on pacman (Afterburn <http://github.com/afterburn>, <NAME> <<EMAIL>>)
# and apt (<NAME> <<EMAIL>>>) modules.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: apk
short_description: Manages apk packages
description:
- Manages I(apk) packages for Alpine Linux.
version_added: "2.0"
options:
name:
description:
- A package name, like C(foo), or mutliple packages, like C(foo, bar).
required: false
default: null
state:
description:
- Indicates the desired package(s) state.
- C(present) ensures the package(s) is/are present.
- C(absent) ensures the package(s) is/are absent.
- C(latest) ensures the package(s) is/are present and the latest version(s).
required: false
default: present
choices: [ "present", "absent", "latest" ]
update_cache:
description:
- Update repository indexes. Can be run with other steps or on it's own.
required: false
default: no
choices: [ "yes", "no" ]
upgrade:
description:
- Upgrade all installed packages to their latest version.
required: false
default: no
choices: [ "yes", "no" ]
notes:
- '"name" and "upgrade" are mutually exclusive.'
'''
EXAMPLES = '''
# Update repositories and install "foo" package
- apk: name=foo update_cache=yes
# Update repositories and install "foo" and "bar" packages
- apk: name=foo,bar update_cache=yes
# Remove "foo" package
- apk: name=foo state=absent
# Remove "foo" and "bar" packages
- apk: name=foo,bar state=absent
# Install the package "foo"
- apk: name=foo state=present
# Install the packages "foo" and "bar"
- apk: name=foo,bar state=present
# Update repositories and update package "foo" to latest version
- apk: name=foo state=latest update_cache=yes
# Update repositories and update packages "foo" and "bar" to latest versions
- apk: name=foo,bar state=latest update_cache=yes
# Update all installed packages to the latest versions
- apk: upgrade=yes
# Update repositories as a separate step
- apk: update_cache=yes
'''
import os
import re
def update_package_db(module):
cmd = "%s update" % (APK_PATH)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc == 0:
return True
else:
module.fail_json(msg="could not update package db")
def query_package(module, name):
cmd = "%s -v info --installed %s" % (APK_PATH, name)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc == 0:
return True
else:
return False
def query_latest(module, name):
cmd = "%s version %s" % (APK_PATH, name)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
search_pattern = "(%s)-[\d\.\w]+-[\d\w]+\s+(.)\s+[\d\.\w]+-[\d\w]+\s+" % (name)
match = re.search(search_pattern, stdout)
if match and match.group(2) == "<":
return False
return True
def query_virtual(module, name):
cmd = "%s -v info --description %s" % (APK_PATH, name)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
search_pattern = "^%s: virtual meta package" % (name)
if re.search(search_pattern, stdout):
return True
return False
def get_dependencies(module, name):
cmd = "%s -v info --depends %s" % (APK_PATH, name)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
dependencies = stdout.split()
if len(dependencies) > 1:
return dependencies[1:]
else:
return []
def upgrade_packages(module):
if module.check_mode:
cmd = "%s upgrade --simulate" % (APK_PATH)
else:
cmd = "%s upgrade" % (APK_PATH)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc != 0:
module.fail_json(msg="failed to upgrade packages")
if re.search('^OK', stdout):
module.exit_json(changed=False, msg="packages already upgraded")
module.exit_json(changed=True, msg="upgraded packages")
def install_packages(module, names, state):
upgrade = False
to_install = []
to_upgrade = []
for name in names:
# Check if virtual package
if query_virtual(module, name):
# Get virtual package dependencies
dependencies = get_dependencies(module, name)
for dependency in dependencies:
if state == 'latest' and not query_latest(module, dependency):
to_upgrade.append(dependency)
else:
if not query_package(module, name):
to_install.append(name)
elif state == 'latest' and not query_latest(module, name):
to_upgrade.append(name)
if to_upgrade:
upgrade = True
if not to_install and not upgrade:
module.exit_json(changed=False, msg="package(s) already installed")
packages = " ".join(to_install) + " ".join(to_upgrade)
if upgrade:
if module.check_mode:
cmd = "%s add --upgrade --simulate %s" % (APK_PATH, packages)
else:
cmd = "%s add --upgrade %s" % (APK_PATH, packages)
else:
if module.check_mode:
cmd = "%s add --simulate %s" % (APK_PATH, packages)
else:
cmd = "%s add %s" % (APK_PATH, packages)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc != 0:
module.fail_json(msg="failed to install %s" % (packages))
module.exit_json(changed=True, msg="installed %s package(s)" % (packages))
def remove_packages(module, names):
installed = []
for name in names:
if query_package(module, name):
installed.append(name)
if not installed:
module.exit_json(changed=False, msg="package(s) already removed")
names = " ".join(installed)
if module.check_mode:
cmd = "%s del --purge --simulate %s" % (APK_PATH, names)
else:
cmd = "%s del --purge %s" % (APK_PATH, names)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc != 0:
module.fail_json(msg="failed to remove %s package(s)" % (names))
module.exit_json(changed=True, msg="removed %s package(s)" % (names))
# ==========================================
# Main control flow.
def main():
module = AnsibleModule(
argument_spec = dict(
state = dict(default='present', choices=['present', 'installed', 'absent', 'removed', 'latest']),
name = dict(type='list'),
update_cache = dict(default='no', type='bool'),
upgrade = dict(default='no', type='bool'),
),
required_one_of = [['name', 'update_cache', 'upgrade']],
mutually_exclusive = [['name', 'upgrade']],
supports_check_mode = True
)
# Set LANG env since we parse stdout
module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
global APK_PATH
APK_PATH = module.get_bin_path('apk', required=True)
p = module.params
# normalize the state parameter
if p['state'] in ['present', 'installed']:
p['state'] = 'present'
if p['state'] in ['absent', 'removed']:
p['state'] = 'absent'
if p['update_cache']:
update_package_db(module)
if not p['name']:
module.exit_json(changed=True, msg='updated repository indexes')
if p['upgrade']:
upgrade_packages(module)
if p['state'] in ['present', 'latest']:
install_packages(module, p['name'], p['state'])
elif p['state'] == 'absent':
remove_packages(module, p['name'])
# Import module snippets.
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, <NAME> <https://github.com/kbrebanov>
# Based on pacman (Afterburn <http://github.com/afterburn>, <NAME> <<EMAIL>>)
# and apt (<NAME> <<EMAIL>>>) modules.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: apk
short_description: Manages apk packages
description:
- Manages I(apk) packages for Alpine Linux.
version_added: "2.0"
options:
name:
description:
- A package name, like C(foo), or mutliple packages, like C(foo, bar).
required: false
default: null
state:
description:
- Indicates the desired package(s) state.
- C(present) ensures the package(s) is/are present.
- C(absent) ensures the package(s) is/are absent.
- C(latest) ensures the package(s) is/are present and the latest version(s).
required: false
default: present
choices: [ "present", "absent", "latest" ]
update_cache:
description:
- Update repository indexes. Can be run with other steps or on it's own.
required: false
default: no
choices: [ "yes", "no" ]
upgrade:
description:
- Upgrade all installed packages to their latest version.
required: false
default: no
choices: [ "yes", "no" ]
notes:
- '"name" and "upgrade" are mutually exclusive.'
'''
EXAMPLES = '''
# Update repositories and install "foo" package
- apk: name=foo update_cache=yes
# Update repositories and install "foo" and "bar" packages
- apk: name=foo,bar update_cache=yes
# Remove "foo" package
- apk: name=foo state=absent
# Remove "foo" and "bar" packages
- apk: name=foo,bar state=absent
# Install the package "foo"
- apk: name=foo state=present
# Install the packages "foo" and "bar"
- apk: name=foo,bar state=present
# Update repositories and update package "foo" to latest version
- apk: name=foo state=latest update_cache=yes
# Update repositories and update packages "foo" and "bar" to latest versions
- apk: name=foo,bar state=latest update_cache=yes
# Update all installed packages to the latest versions
- apk: upgrade=yes
# Update repositories as a separate step
- apk: update_cache=yes
'''
import os
import re
def update_package_db(module):
cmd = "%s update" % (APK_PATH)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc == 0:
return True
else:
module.fail_json(msg="could not update package db")
def query_package(module, name):
cmd = "%s -v info --installed %s" % (APK_PATH, name)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc == 0:
return True
else:
return False
def query_latest(module, name):
cmd = "%s version %s" % (APK_PATH, name)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
search_pattern = "(%s)-[\d\.\w]+-[\d\w]+\s+(.)\s+[\d\.\w]+-[\d\w]+\s+" % (name)
match = re.search(search_pattern, stdout)
if match and match.group(2) == "<":
return False
return True
def query_virtual(module, name):
cmd = "%s -v info --description %s" % (APK_PATH, name)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
search_pattern = "^%s: virtual meta package" % (name)
if re.search(search_pattern, stdout):
return True
return False
def get_dependencies(module, name):
cmd = "%s -v info --depends %s" % (APK_PATH, name)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
dependencies = stdout.split()
if len(dependencies) > 1:
return dependencies[1:]
else:
return []
def upgrade_packages(module):
if module.check_mode:
cmd = "%s upgrade --simulate" % (APK_PATH)
else:
cmd = "%s upgrade" % (APK_PATH)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc != 0:
module.fail_json(msg="failed to upgrade packages")
if re.search('^OK', stdout):
module.exit_json(changed=False, msg="packages already upgraded")
module.exit_json(changed=True, msg="upgraded packages")
def install_packages(module, names, state):
upgrade = False
to_install = []
to_upgrade = []
for name in names:
# Check if virtual package
if query_virtual(module, name):
# Get virtual package dependencies
dependencies = get_dependencies(module, name)
for dependency in dependencies:
if state == 'latest' and not query_latest(module, dependency):
to_upgrade.append(dependency)
else:
if not query_package(module, name):
to_install.append(name)
elif state == 'latest' and not query_latest(module, name):
to_upgrade.append(name)
if to_upgrade:
upgrade = True
if not to_install and not upgrade:
module.exit_json(changed=False, msg="package(s) already installed")
packages = " ".join(to_install) + " ".join(to_upgrade)
if upgrade:
if module.check_mode:
cmd = "%s add --upgrade --simulate %s" % (APK_PATH, packages)
else:
cmd = "%s add --upgrade %s" % (APK_PATH, packages)
else:
if module.check_mode:
cmd = "%s add --simulate %s" % (APK_PATH, packages)
else:
cmd = "%s add %s" % (APK_PATH, packages)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc != 0:
module.fail_json(msg="failed to install %s" % (packages))
module.exit_json(changed=True, msg="installed %s package(s)" % (packages))
def remove_packages(module, names):
installed = []
for name in names:
if query_package(module, name):
installed.append(name)
if not installed:
module.exit_json(changed=False, msg="package(s) already removed")
names = " ".join(installed)
if module.check_mode:
cmd = "%s del --purge --simulate %s" % (APK_PATH, names)
else:
cmd = "%s del --purge %s" % (APK_PATH, names)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc != 0:
module.fail_json(msg="failed to remove %s package(s)" % (names))
module.exit_json(changed=True, msg="removed %s package(s)" % (names))
# ==========================================
# Main control flow.
def main():
module = AnsibleModule(
argument_spec = dict(
state = dict(default='present', choices=['present', 'installed', 'absent', 'removed', 'latest']),
name = dict(type='list'),
update_cache = dict(default='no', type='bool'),
upgrade = dict(default='no', type='bool'),
),
required_one_of = [['name', 'update_cache', 'upgrade']],
mutually_exclusive = [['name', 'upgrade']],
supports_check_mode = True
)
# Set LANG env since we parse stdout
module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
global APK_PATH
APK_PATH = module.get_bin_path('apk', required=True)
p = module.params
# normalize the state parameter
if p['state'] in ['present', 'installed']:
p['state'] = 'present'
if p['state'] in ['absent', 'removed']:
p['state'] = 'absent'
if p['update_cache']:
update_package_db(module)
if not p['name']:
module.exit_json(changed=True, msg='updated repository indexes')
if p['upgrade']:
upgrade_packages(module)
if p['state'] in ['present', 'latest']:
install_packages(module, p['name'], p['state'])
elif p['state'] == 'absent':
remove_packages(module, p['name'])
# Import module snippets.
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
en
| 0.706808
|
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2015, <NAME> <https://github.com/kbrebanov> # Based on pacman (Afterburn <http://github.com/afterburn>, <NAME> <<EMAIL>>) # and apt (<NAME> <<EMAIL>>>) modules. # # This module is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this software. If not, see <http://www.gnu.org/licenses/>. --- module: apk short_description: Manages apk packages description: - Manages I(apk) packages for Alpine Linux. version_added: "2.0" options: name: description: - A package name, like C(foo), or mutliple packages, like C(foo, bar). required: false default: null state: description: - Indicates the desired package(s) state. - C(present) ensures the package(s) is/are present. - C(absent) ensures the package(s) is/are absent. - C(latest) ensures the package(s) is/are present and the latest version(s). required: false default: present choices: [ "present", "absent", "latest" ] update_cache: description: - Update repository indexes. Can be run with other steps or on it's own. required: false default: no choices: [ "yes", "no" ] upgrade: description: - Upgrade all installed packages to their latest version. required: false default: no choices: [ "yes", "no" ] notes: - '"name" and "upgrade" are mutually exclusive.' # Update repositories and install "foo" package - apk: name=foo update_cache=yes # Update repositories and install "foo" and "bar" packages - apk: name=foo,bar update_cache=yes # Remove "foo" package - apk: name=foo state=absent # Remove "foo" and "bar" packages - apk: name=foo,bar state=absent # Install the package "foo" - apk: name=foo state=present # Install the packages "foo" and "bar" - apk: name=foo,bar state=present # Update repositories and update package "foo" to latest version - apk: name=foo state=latest update_cache=yes # Update repositories and update packages "foo" and "bar" to latest versions - apk: name=foo,bar state=latest update_cache=yes # Update all installed packages to the latest versions - apk: upgrade=yes # Update repositories as a separate step - apk: update_cache=yes # Check if virtual package # Get virtual package dependencies # ========================================== # Main control flow. # Set LANG env since we parse stdout # normalize the state parameter # Import module snippets.
| 1.576836
| 2
|
test_gtcls_ges.py
|
Wangsj18/ctcx_recognition
| 0
|
6629581
|
import glob
import tensorflow as tf
import sys
import parameters as pa
import rnn_network
import argparse
import numpy as np
import os
import video_utils as vu
import metrics.edit_distance as ed
import itertools
import time
from tqdm import tqdm
import scipy.spatial.distance as dist
from sklearn.metrics import f1_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import accuracy_score
def getTime(time1=0):
if not time1:
return time.time()
else:
interval = time.time() - time1
return time.time(), interval
def infer_npy_slide_extravoting(base_name):
feats = ['cooc_map','jc'] # ['cooc_map', 'jc']
time_step = 5 * 15
batch_size = 1
num_classes = 9
if pa.rnn_saved_jcla_folder != "":
jcla_path = os.path.join(pa.rnn_saved_jcla_folder, base_name + '.npy')
jcla_npy = np.load(jcla_path)
time_len = jcla_npy.shape[0]
jcla = np.zeros((jcla_npy.shape[0] + time_step - 1, jcla_npy.shape[1]))
jcla[(time_step - 1):,:] = jcla_npy
else:
joints_path = os.path.join(pa.rnn_saved_joints_folder, base_name + '.npy')
tjc_npy = np.load(joints_path)
time_len = tjc_npy.shape[0]
tjc = np.zeros((tjc_npy.shape[0] + time_step - 1, tjc_npy.shape[1], tjc_npy.shape[2]))
tjc[(time_step - 1):, :, :] = tjc_npy
cooc_path = os.path.join(pa.rnn_saved_cooc_folder, base_name + '.npy')
cooc_npy = np.load(cooc_path)
#cls_path = os.path.join(pa.label_abs_folder, base_name + '.csv')
cls_path = os.path.join(pa.rnn_predicted_abs_cls_folder, base_name + '.csv')
print("cls path:", cls_path)
cls_list = vu.load_label(cls_path)
cooc = np.zeros((cooc_npy.shape[0] + time_step - 1, cooc_npy.shape[1], cooc_npy.shape[2]))
cooc[(time_step - 1):, :, :] = cooc_npy
cooc = np.expand_dims(cooc, -1)
cls = np.zeros((len(cls_list) + time_step - 1, ))
cls[(time_step - 1):, ] = cls_list
runtime_profile = {
'rec': [],
'vote': [],
}
# batch_time_feature holder:
tf_btf_1 = tf.placeholder(tf.float32, [batch_size, time_step, 17, 17, 1])
tf_btf_2 = tf.placeholder(tf.float32, [batch_size, time_step, 30])
tf_btf_3 = tf.placeholder(tf.int32, [batch_size, time_step])
# batch_time label(classes) holder
tf_btl = tf.placeholder(tf.int32, [batch_size, time_step])
with tf.variable_scope("ges_net"):
# b t c(0/1)
btl_onehot = tf.one_hot(tf_btl, num_classes, axis=-1)
btf3_onehot = tf.one_hot(tf_btf_3, 5, axis=-1)
if len(feats) == 2:
pred = rnn_network.build_fusion_network_map([tf_btf_1, tf_btf_2, btf3_onehot], num_classes)
elif len(feats) == 1:
if 'jc' in feats:
pred = rnn_network.build_network_one_feat_cls([tf_btf_2, btf3_onehot], pa.lstm_units, num_classes, name='jc_net')
elif 'cooc_map' in feats:
pred = rnn_network.build_network_coocmap_cls([tf_btf_1, btf3_onehot], num_classes, name='coocmap_net')
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
rnn_saver = tf.train.Saver()
rnn_ckpt = tf.train.get_checkpoint_state(pa.checkpoint_path+'/best')
if rnn_ckpt:
rnn_saver.restore(sess, rnn_ckpt.model_checkpoint_path)
print("Loading weights from:", pa.checkpoint_path+'/best')
else:
raise RuntimeError("No check point save file.")
btc_pred = tf.transpose(pred, [1, 0, 2]) # TBC to BTC
bt_pred = tf.argmax(btc_pred, 2)
print("Save to:", pa.rnn_predicted_out_folder)
pred_list = []
M = -7
cand_list = []
num_step_desc = tqdm(range(time_len))
for step in num_step_desc:
ckpt_time = getTime()
btf_3 = np.zeros((batch_size, time_step))
btf_3[0, :] = cls[step:step + time_step]
if len(feats) == 2:
btf_1 = np.zeros((batch_size, time_step, 17, 17, 1))
btf_1[0, :, :, :, :] = cooc[step:step + time_step, :, :, :]
if pa.rnn_saved_jcla_folder != "":
btf_2 = np.zeros((batch_size, time_step, jcla.shape[-1]))
btf_2[0, :, :] = jcla[step:step + time_step, :]
else:
btjc = np.zeros((batch_size, time_step, 18, 3))
btjc[0, :, :, :] = tjc[step:step + time_step, :, :]
btf_2 = vu.extract_less_bone_length_joint_angle_sign(btjc)
feed_dict = {tf_btf_1: btf_1, tf_btf_2: btf_2, tf_btf_3: btf_3}
elif len(feats) == 1:
if "cooc_map" in feats:
btf_1 = np.zeros((batch_size, time_step, 17, 17, 1))
btf_1[0, :, :, :, :] = cooc[step:step + time_step, :, :, :]
feed_dict = {tf_btf_1: btf_1, tf_btf_3: btf_3}
else:
if pa.rnn_saved_jcla_folder != "":
btf_2 = np.zeros((batch_size, time_step, jcla.shape[-1]))
btf_2[0, :, :] = jcla[step:step + time_step, :]
else:
btjc = np.zeros((batch_size, time_step, 18, 3))
btjc[0, :, :, :] = tjc[step:step + time_step, :, :]
btf_2 = vu.extract_less_bone_length_joint_angle_sign(btjc)
feed_dict = {tf_btf_2: btf_2, tf_btf_3: btf_3}
bt_pred_num = sess.run(bt_pred, feed_dict=feed_dict)
pred_result = bt_pred_num[0, M:]
ckpt_time, rec_time = getTime(ckpt_time)
runtime_profile['rec'].append(rec_time)
voting_result = max_voting(pred_result)
cand_list.append(voting_result)
if len(cand_list) < 7:
pred_list.append(voting_result)
else:
assert len(cand_list) == 7
sec_voting_result = max_voting(cand_list)
pred_list.append(sec_voting_result)
del cand_list[0]
ckpt_time, vote_time = getTime(ckpt_time)
runtime_profile['vote'].append(vote_time)
num_step_desc.set_description('rec time: {rec:.4f}'.format(rec=np.mean(runtime_profile['rec'])))
def save_label(label_list, csv_file):
"""
:param label_list: a list of int
:param csv_file:
:return:
"""
str_list = ["%d" % e for e in label_list]
str_line = ",".join(str_list)
with open(csv_file, 'w') as label_file:
label_file.write(str_line)
print("saved: %s" % csv_file)
save_path = os.path.join(pa.rnn_predicted_out_folder, base_name + ".csv")
save_label(pred_list, save_path)
def max_voting(voting_list):
"""
:param voting_list: a list of int
:return: the most common value
"""
if not isinstance(voting_list, list):
voting_list = voting_list.tolist()
voting_results = {}
for i in voting_list:
voting_results = update_dict_count(i, voting_results)
voting_tuple = sorted(zip(voting_results.values(), voting_results.keys()))
max_value = voting_tuple[-1][0]
max_candidates = []
for j in voting_results.keys():
if voting_results[j] == max_value:
max_candidates.append(j)
if len(max_candidates) == 1:
return max_candidates[0]
else:
voting_list_reverse = voting_list[::-1]
idx = [voting_list_reverse.index(x) for x in max_candidates]
output = voting_list_reverse[min(idx)]
return output
def update_dict_count(key, dict):
if key not in dict.keys():
dict[key] = 1
else:
dict[key] += 1
return dict
def predict_from_test_folder():
with open(pa.test_list) as f:
csv_list_test = f.readlines()
for label_path in tqdm(csv_list_test):
base_folder = label_path[-8:-5]
print("Predict: %s" % base_folder)
tf.reset_default_graph()
infer_npy_slide_extravoting(base_folder)
def run_edit_distance_on_predict_out():
with open(pa.test_list) as f:
label_files = f.readlines()
sum_n, sum_i, sum_d, sum_s = 0, 0, 0, 0
for label in label_files:
label = label[:-1]
pred_name = os.path.basename(label)
pred_path = os.path.join(pa.rnn_predicted_out_folder, pred_name)
ges_path = os.path.join(pa.label_ges_folder, pred_name)
gt_label = vu.load_label(ges_path)
pred_label = vu.load_label(pred_path)
gt_group = itertools.groupby(gt_label)
gt_group = [k for k, g in gt_group]
pred_group = itertools.groupby(pred_label)
pred_group = [k for k, g in pred_group]
S, D, I = ed.SDI(pred_group, gt_group)
N = len(gt_group)
acc = (N - I - D - S) / N
print("%s - N:%d S:%d, D:%d, I:%d, ACC:%.4f" % (pred_name, N, S, D, I, acc))
# Sum
sum_n = sum_n + N
sum_i = sum_i + I
sum_d = sum_d + D
sum_s = sum_s + S
sum_acc = (sum_n - sum_i - sum_d - sum_s) / sum_n
print("OVERALL - N:%d S:%d, D:%d, I:%d, ACC:%.4f" % (sum_n, sum_s, sum_d, sum_i, sum_acc))
def compute_f1score():
with open(pa.test_list) as f:
label_files = f.readlines()
gt_list = []
pred_list = []
for label in label_files:
label = label[:-1]
pred_name = os.path.basename(label)
pred_path = os.path.join(pa.rnn_predicted_out_folder, pred_name)
ges_path = os.path.join(pa.label_ges_folder, pred_name)
gt_label = vu.load_label(ges_path)
pred_label = vu.load_label(pred_path)
gt_list.extend(gt_label)
pred_list.extend(pred_label)
precision = precision_score(gt_list, pred_list, average="macro")
recall = recall_score(gt_list, pred_list, average="macro")
accuracy = accuracy_score(gt_list, pred_list)
f1score = f1_score(gt_list, pred_list, average="macro")
print("OVERALL precision -", precision)
print("OVERALL recall -", recall)
print("OVERALL accuracy -", accuracy)
print("OVERALL f1score -", f1score)
def command_edit_accuracy():
with open(pa.test_list) as f:
label_files = f.readlines()
sum_n, sum_i, sum_d, sum_s = 0, 0, 0, 0
for label in label_files:
label = label[:-1]
pred_name = os.path.basename(label)
pred_path = os.path.join(pa.rnn_predicted_out_folder, pred_name)
cls_path = os.path.join(pa.label_abs_folder, pred_name)
ges_path = os.path.join(pa.label_ges_folder, pred_name)
gt_label = vu.load_label_cls_ges(cls_path, ges_path, 0)
pred_cls_path = os.path.join(pa.rnn_predicted_abs_cls_folder, pred_name)
pred_label = vu.load_label_cls_ges(pred_cls_path, pred_path, pa.label_delay_frames)
gt_group = itertools.groupby(gt_label)
gt_group = [k for k, g in gt_group]
pred_group = itertools.groupby(pred_label)
pred_group = [k for k, g in pred_group]
S, D, I = ed.SDI(pred_group, gt_group)
N = len(gt_group)
acc = (N - I - D - S) / N
print("%s - N:%d S:%d, D:%d, I:%d, ACC:%.4f" % (pred_name, N, S, D, I, acc))
# Sum
sum_n = sum_n + N
sum_i = sum_i + I
sum_d = sum_d + D
sum_s = sum_s + S
sum_acc = (sum_n - sum_i - sum_d - sum_s) / sum_n
print("OVERALL - N:%d S:%d, D:%d, I:%d, ACC:%.4f" % (sum_n, sum_s, sum_d, sum_i, sum_acc))
def command_f1_score():
with open(pa.test_list) as f:
label_files = f.readlines()
gt_list = []
pred_list = []
for label in label_files:
label = label[:-1]
pred_name = os.path.basename(label)
pred_path = os.path.join(pa.rnn_predicted_out_folder, pred_name)
cls_path = os.path.join(pa.label_abs_folder, pred_name)
ges_path = os.path.join(pa.label_ges_folder, pred_name)
gt_label = vu.load_label_cls_ges(cls_path, ges_path, 0)
pred_cls_path = os.path.join(pa.rnn_predicted_abs_cls_folder, pred_name)
pred_label = vu.load_label_cls_ges(pred_cls_path, pred_path, pa.label_delay_frames)
gt_list.extend(gt_label)
pred_list.extend(pred_label)
precision = precision_score(gt_list, pred_list, average="macro")
recall = recall_score(gt_list, pred_list, average="macro")
accuracy = accuracy_score(gt_list, pred_list)
f1score = f1_score(gt_list, pred_list, average="macro")
print("OVERALL precision -", precision)
print("OVERALL recall -", recall)
print("OVERALL accuracy -", accuracy)
print("OVERALL f1score -", f1score)
if __name__ == "__main__":
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
parser = argparse.ArgumentParser(description='detect gestures')
parser.add_argument("-p", help="Predict videos from test folder", default=False, action="store_true")
parser.add_argument("-e", help="Compute Edit Distance of predicted labels and ground truth labels", default=False,
action="store_true")
parser.add_argument("-f", help="Compute F1 score",
default=False, action="store_true")
parser.add_argument("-ce", help="Compute command edit accuracy",
default=False, action="store_true")
parser.add_argument("-cf", help="Compute command f1 score",
default=False, action="store_true")
args = parser.parse_args()
if args.p:
predict_from_test_folder()
elif args.e:
run_edit_distance_on_predict_out()
elif args.f:
compute_f1score()
elif args.ce:
command_edit_accuracy()
elif args.cf:
command_f1_score()
else:
print("Please specify an argument.")
|
import glob
import tensorflow as tf
import sys
import parameters as pa
import rnn_network
import argparse
import numpy as np
import os
import video_utils as vu
import metrics.edit_distance as ed
import itertools
import time
from tqdm import tqdm
import scipy.spatial.distance as dist
from sklearn.metrics import f1_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import accuracy_score
def getTime(time1=0):
if not time1:
return time.time()
else:
interval = time.time() - time1
return time.time(), interval
def infer_npy_slide_extravoting(base_name):
feats = ['cooc_map','jc'] # ['cooc_map', 'jc']
time_step = 5 * 15
batch_size = 1
num_classes = 9
if pa.rnn_saved_jcla_folder != "":
jcla_path = os.path.join(pa.rnn_saved_jcla_folder, base_name + '.npy')
jcla_npy = np.load(jcla_path)
time_len = jcla_npy.shape[0]
jcla = np.zeros((jcla_npy.shape[0] + time_step - 1, jcla_npy.shape[1]))
jcla[(time_step - 1):,:] = jcla_npy
else:
joints_path = os.path.join(pa.rnn_saved_joints_folder, base_name + '.npy')
tjc_npy = np.load(joints_path)
time_len = tjc_npy.shape[0]
tjc = np.zeros((tjc_npy.shape[0] + time_step - 1, tjc_npy.shape[1], tjc_npy.shape[2]))
tjc[(time_step - 1):, :, :] = tjc_npy
cooc_path = os.path.join(pa.rnn_saved_cooc_folder, base_name + '.npy')
cooc_npy = np.load(cooc_path)
#cls_path = os.path.join(pa.label_abs_folder, base_name + '.csv')
cls_path = os.path.join(pa.rnn_predicted_abs_cls_folder, base_name + '.csv')
print("cls path:", cls_path)
cls_list = vu.load_label(cls_path)
cooc = np.zeros((cooc_npy.shape[0] + time_step - 1, cooc_npy.shape[1], cooc_npy.shape[2]))
cooc[(time_step - 1):, :, :] = cooc_npy
cooc = np.expand_dims(cooc, -1)
cls = np.zeros((len(cls_list) + time_step - 1, ))
cls[(time_step - 1):, ] = cls_list
runtime_profile = {
'rec': [],
'vote': [],
}
# batch_time_feature holder:
tf_btf_1 = tf.placeholder(tf.float32, [batch_size, time_step, 17, 17, 1])
tf_btf_2 = tf.placeholder(tf.float32, [batch_size, time_step, 30])
tf_btf_3 = tf.placeholder(tf.int32, [batch_size, time_step])
# batch_time label(classes) holder
tf_btl = tf.placeholder(tf.int32, [batch_size, time_step])
with tf.variable_scope("ges_net"):
# b t c(0/1)
btl_onehot = tf.one_hot(tf_btl, num_classes, axis=-1)
btf3_onehot = tf.one_hot(tf_btf_3, 5, axis=-1)
if len(feats) == 2:
pred = rnn_network.build_fusion_network_map([tf_btf_1, tf_btf_2, btf3_onehot], num_classes)
elif len(feats) == 1:
if 'jc' in feats:
pred = rnn_network.build_network_one_feat_cls([tf_btf_2, btf3_onehot], pa.lstm_units, num_classes, name='jc_net')
elif 'cooc_map' in feats:
pred = rnn_network.build_network_coocmap_cls([tf_btf_1, btf3_onehot], num_classes, name='coocmap_net')
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
rnn_saver = tf.train.Saver()
rnn_ckpt = tf.train.get_checkpoint_state(pa.checkpoint_path+'/best')
if rnn_ckpt:
rnn_saver.restore(sess, rnn_ckpt.model_checkpoint_path)
print("Loading weights from:", pa.checkpoint_path+'/best')
else:
raise RuntimeError("No check point save file.")
btc_pred = tf.transpose(pred, [1, 0, 2]) # TBC to BTC
bt_pred = tf.argmax(btc_pred, 2)
print("Save to:", pa.rnn_predicted_out_folder)
pred_list = []
M = -7
cand_list = []
num_step_desc = tqdm(range(time_len))
for step in num_step_desc:
ckpt_time = getTime()
btf_3 = np.zeros((batch_size, time_step))
btf_3[0, :] = cls[step:step + time_step]
if len(feats) == 2:
btf_1 = np.zeros((batch_size, time_step, 17, 17, 1))
btf_1[0, :, :, :, :] = cooc[step:step + time_step, :, :, :]
if pa.rnn_saved_jcla_folder != "":
btf_2 = np.zeros((batch_size, time_step, jcla.shape[-1]))
btf_2[0, :, :] = jcla[step:step + time_step, :]
else:
btjc = np.zeros((batch_size, time_step, 18, 3))
btjc[0, :, :, :] = tjc[step:step + time_step, :, :]
btf_2 = vu.extract_less_bone_length_joint_angle_sign(btjc)
feed_dict = {tf_btf_1: btf_1, tf_btf_2: btf_2, tf_btf_3: btf_3}
elif len(feats) == 1:
if "cooc_map" in feats:
btf_1 = np.zeros((batch_size, time_step, 17, 17, 1))
btf_1[0, :, :, :, :] = cooc[step:step + time_step, :, :, :]
feed_dict = {tf_btf_1: btf_1, tf_btf_3: btf_3}
else:
if pa.rnn_saved_jcla_folder != "":
btf_2 = np.zeros((batch_size, time_step, jcla.shape[-1]))
btf_2[0, :, :] = jcla[step:step + time_step, :]
else:
btjc = np.zeros((batch_size, time_step, 18, 3))
btjc[0, :, :, :] = tjc[step:step + time_step, :, :]
btf_2 = vu.extract_less_bone_length_joint_angle_sign(btjc)
feed_dict = {tf_btf_2: btf_2, tf_btf_3: btf_3}
bt_pred_num = sess.run(bt_pred, feed_dict=feed_dict)
pred_result = bt_pred_num[0, M:]
ckpt_time, rec_time = getTime(ckpt_time)
runtime_profile['rec'].append(rec_time)
voting_result = max_voting(pred_result)
cand_list.append(voting_result)
if len(cand_list) < 7:
pred_list.append(voting_result)
else:
assert len(cand_list) == 7
sec_voting_result = max_voting(cand_list)
pred_list.append(sec_voting_result)
del cand_list[0]
ckpt_time, vote_time = getTime(ckpt_time)
runtime_profile['vote'].append(vote_time)
num_step_desc.set_description('rec time: {rec:.4f}'.format(rec=np.mean(runtime_profile['rec'])))
def save_label(label_list, csv_file):
"""
:param label_list: a list of int
:param csv_file:
:return:
"""
str_list = ["%d" % e for e in label_list]
str_line = ",".join(str_list)
with open(csv_file, 'w') as label_file:
label_file.write(str_line)
print("saved: %s" % csv_file)
save_path = os.path.join(pa.rnn_predicted_out_folder, base_name + ".csv")
save_label(pred_list, save_path)
def max_voting(voting_list):
"""
:param voting_list: a list of int
:return: the most common value
"""
if not isinstance(voting_list, list):
voting_list = voting_list.tolist()
voting_results = {}
for i in voting_list:
voting_results = update_dict_count(i, voting_results)
voting_tuple = sorted(zip(voting_results.values(), voting_results.keys()))
max_value = voting_tuple[-1][0]
max_candidates = []
for j in voting_results.keys():
if voting_results[j] == max_value:
max_candidates.append(j)
if len(max_candidates) == 1:
return max_candidates[0]
else:
voting_list_reverse = voting_list[::-1]
idx = [voting_list_reverse.index(x) for x in max_candidates]
output = voting_list_reverse[min(idx)]
return output
def update_dict_count(key, dict):
if key not in dict.keys():
dict[key] = 1
else:
dict[key] += 1
return dict
def predict_from_test_folder():
with open(pa.test_list) as f:
csv_list_test = f.readlines()
for label_path in tqdm(csv_list_test):
base_folder = label_path[-8:-5]
print("Predict: %s" % base_folder)
tf.reset_default_graph()
infer_npy_slide_extravoting(base_folder)
def run_edit_distance_on_predict_out():
with open(pa.test_list) as f:
label_files = f.readlines()
sum_n, sum_i, sum_d, sum_s = 0, 0, 0, 0
for label in label_files:
label = label[:-1]
pred_name = os.path.basename(label)
pred_path = os.path.join(pa.rnn_predicted_out_folder, pred_name)
ges_path = os.path.join(pa.label_ges_folder, pred_name)
gt_label = vu.load_label(ges_path)
pred_label = vu.load_label(pred_path)
gt_group = itertools.groupby(gt_label)
gt_group = [k for k, g in gt_group]
pred_group = itertools.groupby(pred_label)
pred_group = [k for k, g in pred_group]
S, D, I = ed.SDI(pred_group, gt_group)
N = len(gt_group)
acc = (N - I - D - S) / N
print("%s - N:%d S:%d, D:%d, I:%d, ACC:%.4f" % (pred_name, N, S, D, I, acc))
# Sum
sum_n = sum_n + N
sum_i = sum_i + I
sum_d = sum_d + D
sum_s = sum_s + S
sum_acc = (sum_n - sum_i - sum_d - sum_s) / sum_n
print("OVERALL - N:%d S:%d, D:%d, I:%d, ACC:%.4f" % (sum_n, sum_s, sum_d, sum_i, sum_acc))
def compute_f1score():
with open(pa.test_list) as f:
label_files = f.readlines()
gt_list = []
pred_list = []
for label in label_files:
label = label[:-1]
pred_name = os.path.basename(label)
pred_path = os.path.join(pa.rnn_predicted_out_folder, pred_name)
ges_path = os.path.join(pa.label_ges_folder, pred_name)
gt_label = vu.load_label(ges_path)
pred_label = vu.load_label(pred_path)
gt_list.extend(gt_label)
pred_list.extend(pred_label)
precision = precision_score(gt_list, pred_list, average="macro")
recall = recall_score(gt_list, pred_list, average="macro")
accuracy = accuracy_score(gt_list, pred_list)
f1score = f1_score(gt_list, pred_list, average="macro")
print("OVERALL precision -", precision)
print("OVERALL recall -", recall)
print("OVERALL accuracy -", accuracy)
print("OVERALL f1score -", f1score)
def command_edit_accuracy():
with open(pa.test_list) as f:
label_files = f.readlines()
sum_n, sum_i, sum_d, sum_s = 0, 0, 0, 0
for label in label_files:
label = label[:-1]
pred_name = os.path.basename(label)
pred_path = os.path.join(pa.rnn_predicted_out_folder, pred_name)
cls_path = os.path.join(pa.label_abs_folder, pred_name)
ges_path = os.path.join(pa.label_ges_folder, pred_name)
gt_label = vu.load_label_cls_ges(cls_path, ges_path, 0)
pred_cls_path = os.path.join(pa.rnn_predicted_abs_cls_folder, pred_name)
pred_label = vu.load_label_cls_ges(pred_cls_path, pred_path, pa.label_delay_frames)
gt_group = itertools.groupby(gt_label)
gt_group = [k for k, g in gt_group]
pred_group = itertools.groupby(pred_label)
pred_group = [k for k, g in pred_group]
S, D, I = ed.SDI(pred_group, gt_group)
N = len(gt_group)
acc = (N - I - D - S) / N
print("%s - N:%d S:%d, D:%d, I:%d, ACC:%.4f" % (pred_name, N, S, D, I, acc))
# Sum
sum_n = sum_n + N
sum_i = sum_i + I
sum_d = sum_d + D
sum_s = sum_s + S
sum_acc = (sum_n - sum_i - sum_d - sum_s) / sum_n
print("OVERALL - N:%d S:%d, D:%d, I:%d, ACC:%.4f" % (sum_n, sum_s, sum_d, sum_i, sum_acc))
def command_f1_score():
with open(pa.test_list) as f:
label_files = f.readlines()
gt_list = []
pred_list = []
for label in label_files:
label = label[:-1]
pred_name = os.path.basename(label)
pred_path = os.path.join(pa.rnn_predicted_out_folder, pred_name)
cls_path = os.path.join(pa.label_abs_folder, pred_name)
ges_path = os.path.join(pa.label_ges_folder, pred_name)
gt_label = vu.load_label_cls_ges(cls_path, ges_path, 0)
pred_cls_path = os.path.join(pa.rnn_predicted_abs_cls_folder, pred_name)
pred_label = vu.load_label_cls_ges(pred_cls_path, pred_path, pa.label_delay_frames)
gt_list.extend(gt_label)
pred_list.extend(pred_label)
precision = precision_score(gt_list, pred_list, average="macro")
recall = recall_score(gt_list, pred_list, average="macro")
accuracy = accuracy_score(gt_list, pred_list)
f1score = f1_score(gt_list, pred_list, average="macro")
print("OVERALL precision -", precision)
print("OVERALL recall -", recall)
print("OVERALL accuracy -", accuracy)
print("OVERALL f1score -", f1score)
if __name__ == "__main__":
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
parser = argparse.ArgumentParser(description='detect gestures')
parser.add_argument("-p", help="Predict videos from test folder", default=False, action="store_true")
parser.add_argument("-e", help="Compute Edit Distance of predicted labels and ground truth labels", default=False,
action="store_true")
parser.add_argument("-f", help="Compute F1 score",
default=False, action="store_true")
parser.add_argument("-ce", help="Compute command edit accuracy",
default=False, action="store_true")
parser.add_argument("-cf", help="Compute command f1 score",
default=False, action="store_true")
args = parser.parse_args()
if args.p:
predict_from_test_folder()
elif args.e:
run_edit_distance_on_predict_out()
elif args.f:
compute_f1score()
elif args.ce:
command_edit_accuracy()
elif args.cf:
command_f1_score()
else:
print("Please specify an argument.")
|
en
| 0.597734
|
# ['cooc_map', 'jc'] #cls_path = os.path.join(pa.label_abs_folder, base_name + '.csv') # batch_time_feature holder: # batch_time label(classes) holder # b t c(0/1) # TBC to BTC :param label_list: a list of int
:param csv_file:
:return: :param voting_list: a list of int
:return: the most common value # Sum # Sum
| 1.781272
| 2
|
coding/learn_import/importlib_01_demo.py
|
yatao91/learning_road
| 3
|
6629582
|
<filename>coding/learn_import/importlib_01_demo.py
# -*- coding: utf-8 -*-
import importlib
import sys
print(sys.modules)
|
<filename>coding/learn_import/importlib_01_demo.py
# -*- coding: utf-8 -*-
import importlib
import sys
print(sys.modules)
|
en
| 0.769321
|
# -*- coding: utf-8 -*-
| 1.607624
| 2
|
config.py
|
maxowner1024/w12scan-client
| 0
|
6629583
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 2019/1/21 10:08 PM
# @Author : w8ay
# @File : config.py
# 程序运行的线程数
import os
THREAD_NUM = 25
DEBUG = False
# LOGGER_LEVEL = 1 if DEBUG else 2
LOGGER_LEVEL = 1
# Ip的缓存数量
NUM_CACHE_IP = 77
# 域名的缓存数量
NUM_CACHE_DOMAIN = 2
# Masscan相关
MASSCAN_RATE = 3000 # masscan 的速率
MASSCAN_DEFAULT_PORT = "21,22,23,80-90,161,389,443,445,873,1099,1433,1521,1900,2082,2083,2222,2601,2604,3128,3306," \
"3311,3312,3389,4440,4848,5432,5560,5900,5901,5902,6082,6379,7001-7010,7778,8080-8090,8649," \
"8888,9000,9200,10000,11211,27017,28017,50000,50030,50060 "
MASSCAN_FULL_SCAN = True # 是否扫描全端口
# WEB Restful接口地址
WEB_INTERFACE = os.environ.get("WEB_INTERFACE", default="http://127.0.0.1:8000/")
WEB_INTERFACE_KEY = "hello-w12scan!"
# WEB POCS repository 提供指纹识别对应的poc仓库
WEB_REPOSITORY = "https://github.com/boy-hack/airbug"
# 是否用 plugins目录下的插件进行扫描探,为false将不会进行探测和使用airbug项目进行攻击
IS_START_PLUGINS = True
# reids数据库
REDIS_HOST = os.environ.get("REDIS_HOST", default="127.0.0.1:6379")
REDIS_PASSWORD = os.environ.get("REDIS_PASSWORD", default="")
# 该扫描节点的名称(自定义)
NODE_NAME = "w12_node_{0}".format(os.environ.get("NODE_NAME", "bilibibi"))
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 2019/1/21 10:08 PM
# @Author : w8ay
# @File : config.py
# 程序运行的线程数
import os
THREAD_NUM = 25
DEBUG = False
# LOGGER_LEVEL = 1 if DEBUG else 2
LOGGER_LEVEL = 1
# Ip的缓存数量
NUM_CACHE_IP = 77
# 域名的缓存数量
NUM_CACHE_DOMAIN = 2
# Masscan相关
MASSCAN_RATE = 3000 # masscan 的速率
MASSCAN_DEFAULT_PORT = "21,22,23,80-90,161,389,443,445,873,1099,1433,1521,1900,2082,2083,2222,2601,2604,3128,3306," \
"3311,3312,3389,4440,4848,5432,5560,5900,5901,5902,6082,6379,7001-7010,7778,8080-8090,8649," \
"8888,9000,9200,10000,11211,27017,28017,50000,50030,50060 "
MASSCAN_FULL_SCAN = True # 是否扫描全端口
# WEB Restful接口地址
WEB_INTERFACE = os.environ.get("WEB_INTERFACE", default="http://127.0.0.1:8000/")
WEB_INTERFACE_KEY = "hello-w12scan!"
# WEB POCS repository 提供指纹识别对应的poc仓库
WEB_REPOSITORY = "https://github.com/boy-hack/airbug"
# 是否用 plugins目录下的插件进行扫描探,为false将不会进行探测和使用airbug项目进行攻击
IS_START_PLUGINS = True
# reids数据库
REDIS_HOST = os.environ.get("REDIS_HOST", default="127.0.0.1:6379")
REDIS_PASSWORD = os.environ.get("REDIS_PASSWORD", default="")
# 该扫描节点的名称(自定义)
NODE_NAME = "w12_node_{0}".format(os.environ.get("NODE_NAME", "bilibibi"))
|
zh
| 0.735797
|
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # @Time : 2019/1/21 10:08 PM # @Author : w8ay # @File : config.py # 程序运行的线程数 # LOGGER_LEVEL = 1 if DEBUG else 2 # Ip的缓存数量 # 域名的缓存数量 # Masscan相关 # masscan 的速率 # 是否扫描全端口 # WEB Restful接口地址 # WEB POCS repository 提供指纹识别对应的poc仓库 # 是否用 plugins目录下的插件进行扫描探,为false将不会进行探测和使用airbug项目进行攻击 # reids数据库 # 该扫描节点的名称(自定义)
| 1.899176
| 2
|
scripts/chi2014/send_email.py
|
soyapark-kaist/confer
| 1
|
6629584
|
#!/usr/bin/python
import sys, os, operator, smtplib, re
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
def send_email(addr, subject, msg_body):
email_subject = subject
from_addr="<EMAIL>"
to_addr = [addr]
msg = MIMEMultipart()
msg['From'] = 'Confer Team <<EMAIL>>'
msg['To'] = addr
msg['Subject'] = email_subject
msg.attach(MIMEText(msg_body))
smtp_conn = smtplib.SMTP_SSL('localhost', 25)
smtp_conn.sendmail(from_addr, to_addr, msg.as_string())
smtp_conn.close()
def send_survey_email():
f = open(sys.argv[1]).read()
names = re.split('\n', f)
subject = "Confer@CHI 2014 -- make interesting connections!"
for name in names:
tokens = re.split(',', name.strip())
tokens = map(lambda x: x.strip(), tokens)
print tokens
msg_body = """
Dear %s,
We're pleased that you're using Confer to mark the papers you want to see at CHI 2014. Did you know Confer can also introduce you to *people* you ought to meet while you're there? Confer has identified a number of individuals whose paper selections suggest that they share your research interests. If you enable Confer's meetups feature, these people will be able to find you and introduce themselves! Just go to http://confer.csail.mit.edu/chi2014/meetups and enable the meetups feature to start making some interesting connections at CHI 2014.
Best,
The Confer Team
<EMAIL>
""" %(tokens[1])
send_email(tokens[0], subject, msg_body)
def main():
send_survey_email()
if __name__ == '__main__':
main()
|
#!/usr/bin/python
import sys, os, operator, smtplib, re
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
def send_email(addr, subject, msg_body):
email_subject = subject
from_addr="<EMAIL>"
to_addr = [addr]
msg = MIMEMultipart()
msg['From'] = 'Confer Team <<EMAIL>>'
msg['To'] = addr
msg['Subject'] = email_subject
msg.attach(MIMEText(msg_body))
smtp_conn = smtplib.SMTP_SSL('localhost', 25)
smtp_conn.sendmail(from_addr, to_addr, msg.as_string())
smtp_conn.close()
def send_survey_email():
f = open(sys.argv[1]).read()
names = re.split('\n', f)
subject = "Confer@CHI 2014 -- make interesting connections!"
for name in names:
tokens = re.split(',', name.strip())
tokens = map(lambda x: x.strip(), tokens)
print tokens
msg_body = """
Dear %s,
We're pleased that you're using Confer to mark the papers you want to see at CHI 2014. Did you know Confer can also introduce you to *people* you ought to meet while you're there? Confer has identified a number of individuals whose paper selections suggest that they share your research interests. If you enable Confer's meetups feature, these people will be able to find you and introduce themselves! Just go to http://confer.csail.mit.edu/chi2014/meetups and enable the meetups feature to start making some interesting connections at CHI 2014.
Best,
The Confer Team
<EMAIL>
""" %(tokens[1])
send_email(tokens[0], subject, msg_body)
def main():
send_survey_email()
if __name__ == '__main__':
main()
|
en
| 0.937394
|
#!/usr/bin/python Dear %s, We're pleased that you're using Confer to mark the papers you want to see at CHI 2014. Did you know Confer can also introduce you to *people* you ought to meet while you're there? Confer has identified a number of individuals whose paper selections suggest that they share your research interests. If you enable Confer's meetups feature, these people will be able to find you and introduce themselves! Just go to http://confer.csail.mit.edu/chi2014/meetups and enable the meetups feature to start making some interesting connections at CHI 2014. Best, The Confer Team <EMAIL>
| 3.498651
| 3
|
gfirefly/netconnect/manager.py
|
handsome3163/H2Dgame-Firefly
| 675
|
6629585
|
#coding:utf8
'''
Created on 2014-2-23
连接管理器
@author: lan (www.9miao.com)
'''
from gtwisted.utils import log
from connection import Connection
class ConnectionManager:
''' 连接管理器
@param _connections: dict {connID:conn Object}管理的所有连接
'''
def __init__(self):
'''初始化
@param _connections: dict {connID:conn Object}
'''
self._connections = {}
def getNowConnCnt(self):
'''获取当前连接数量'''
return len(self._connections.items())
def addConnection(self, conn):
'''加入一条连接
@param _conn: Conn object
'''
_conn = Connection(conn)
if self._connections.has_key(_conn.id):
raise Exception("系统记录冲突")
self._connections[_conn.id] = _conn
def dropConnectionByID(self, connID):
'''更加连接的id删除连接实例
@param connID: int 连接的id
'''
try:
del self._connections[connID]
except Exception as e:
log.msg(str(e))
def getConnectionByID(self, connID):
"""根据ID获取一条连接
@param connID: int 连接的id
"""
return self._connections.get(connID,None)
def loseConnection(self,connID):
"""根据连接ID主动端口与客户端的连接
"""
conn = self.getConnectionByID(connID)
if conn:
conn.loseConnection()
def pushObject(self,topicID , msg, sendList):
"""主动推送消息
"""
for target in sendList:
try:
conn = self.getConnectionByID(target)
if conn:
conn.safeToWriteData(topicID,msg)
except Exception,e:
log.err(str(e))
|
#coding:utf8
'''
Created on 2014-2-23
连接管理器
@author: lan (www.9miao.com)
'''
from gtwisted.utils import log
from connection import Connection
class ConnectionManager:
''' 连接管理器
@param _connections: dict {connID:conn Object}管理的所有连接
'''
def __init__(self):
'''初始化
@param _connections: dict {connID:conn Object}
'''
self._connections = {}
def getNowConnCnt(self):
'''获取当前连接数量'''
return len(self._connections.items())
def addConnection(self, conn):
'''加入一条连接
@param _conn: Conn object
'''
_conn = Connection(conn)
if self._connections.has_key(_conn.id):
raise Exception("系统记录冲突")
self._connections[_conn.id] = _conn
def dropConnectionByID(self, connID):
'''更加连接的id删除连接实例
@param connID: int 连接的id
'''
try:
del self._connections[connID]
except Exception as e:
log.msg(str(e))
def getConnectionByID(self, connID):
"""根据ID获取一条连接
@param connID: int 连接的id
"""
return self._connections.get(connID,None)
def loseConnection(self,connID):
"""根据连接ID主动端口与客户端的连接
"""
conn = self.getConnectionByID(connID)
if conn:
conn.loseConnection()
def pushObject(self,topicID , msg, sendList):
"""主动推送消息
"""
for target in sendList:
try:
conn = self.getConnectionByID(target)
if conn:
conn.safeToWriteData(topicID,msg)
except Exception,e:
log.err(str(e))
|
zh
| 0.691325
|
#coding:utf8 Created on 2014-2-23 连接管理器 @author: lan (www.9miao.com) 连接管理器 @param _connections: dict {connID:conn Object}管理的所有连接 初始化 @param _connections: dict {connID:conn Object} 获取当前连接数量 加入一条连接 @param _conn: Conn object 更加连接的id删除连接实例 @param connID: int 连接的id 根据ID获取一条连接 @param connID: int 连接的id 根据连接ID主动端口与客户端的连接 主动推送消息
| 2.42632
| 2
|
trydjango_1/Blog/url.py
|
KumarPython/Django-Projects
| 0
|
6629586
|
from django.urls import path
from .views import ArticleListView,ArticleDetailView,ArticleCreateView,ArticleUpdateView,ArticleDeleteView
app_name='blog'
urlpatterns = [
path('', ArticleListView.as_view(), name='article-list'),
path('<int:pk>/', ArticleDetailView.as_view(), name='article-detail'),
path('<int:pk>/update/', ArticleUpdateView.as_view(), name='article-update'),
path('<int:pk>/delete/', ArticleDeleteView.as_view(), name='article-delete'),
path('create/', ArticleCreateView.as_view(), name='article-create'),
]
|
from django.urls import path
from .views import ArticleListView,ArticleDetailView,ArticleCreateView,ArticleUpdateView,ArticleDeleteView
app_name='blog'
urlpatterns = [
path('', ArticleListView.as_view(), name='article-list'),
path('<int:pk>/', ArticleDetailView.as_view(), name='article-detail'),
path('<int:pk>/update/', ArticleUpdateView.as_view(), name='article-update'),
path('<int:pk>/delete/', ArticleDeleteView.as_view(), name='article-delete'),
path('create/', ArticleCreateView.as_view(), name='article-create'),
]
|
none
| 1
| 1.885908
| 2
|
|
src/controllers/usuario.py
|
erickymoreno/Site-Secretaria-de-Obras
| 0
|
6629587
|
from src import app, db, login_manager
from flask import render_template, redirect, url_for, request, session
from flask_login import login_required, current_user, login_user, logout_user
from src.models.tables import Usuario, Endereco
import bcrypt, sys
@app.route("/cadastro", methods=["GET", "POST"])
def cadastro():
if request.method =="GET":
mensagem = request.args.get("mensagem")
return render_template("cadastro.html")
if request.method == "POST":
nome = request.form["inputNome"]
nome+= " " + request.form["inputSobrenome"]
cpf = request.form["inputCpf"]
data_de_nascimento = request.form["inputDataDeNascimento"]
email = request.form["inputEmail"]
if request.form["inputSenha"] == request.form["inputConfirmaSenha"]:
senha = request.form["inputSenha"]
senhaEcriptada = bcrypt.hashpw(senha.encode("UTF-8"), bcrypt.gensalt())
else:
mensagem = "As senhas não correspondem"
return render_template("cadastro.html", mensagem=mensagem)
cep = request.form["inputCep"]
rua = request.form["inputRua"]
bairro = request.form["inputBairro"]
numero = request.form["inputNumero"]
referencia = request.form["inputReferencia"]
telefone = request.form["inputTelefone"]
usuario = Usuario(nome=nome, cpf=cpf, data_de_nascimento=data_de_nascimento, email=email, senha=<PASSWORD>haEcriptada, telefone=telefone)
db.session.add(usuario)
db.session.commit()
endereco= Endereco(rua=rua, numero=numero, bairro=bairro, cep=cep, referencia=referencia, usuario_id=usuario.id)
db.session.add(endereco)
db.session.commit()
return render_template("cadastro.html")
@login_manager.user_loader
def get_user(usuario_id):
return Usuario.query.filter_by(id=usuario_id).first()
@app.route("/login", methods=["POST", "GET"])
def login():
if request.method == "GET":
mensagem = request.args.get("mensagem")
return render_template("login.html", mensagem=mensagem)
if request.method == "POST":
email = request.form["inputEmail"]
senha = request.form["inputSenha"]
usuario = Usuario.query.filter_by(email=email).first()
autozidado = False
if usuario:
autorizado = bcrypt.checkpw(
senha.encode("utf8"), usuario.senha.encode("utf8")
)
if not usuario or not autorizado:
mensagem = "Email ou senha incorreto"
return render_template("login.html", mensagem=mensagem)
else:
login_user(usuario)
return redirect("/home")
|
from src import app, db, login_manager
from flask import render_template, redirect, url_for, request, session
from flask_login import login_required, current_user, login_user, logout_user
from src.models.tables import Usuario, Endereco
import bcrypt, sys
@app.route("/cadastro", methods=["GET", "POST"])
def cadastro():
if request.method =="GET":
mensagem = request.args.get("mensagem")
return render_template("cadastro.html")
if request.method == "POST":
nome = request.form["inputNome"]
nome+= " " + request.form["inputSobrenome"]
cpf = request.form["inputCpf"]
data_de_nascimento = request.form["inputDataDeNascimento"]
email = request.form["inputEmail"]
if request.form["inputSenha"] == request.form["inputConfirmaSenha"]:
senha = request.form["inputSenha"]
senhaEcriptada = bcrypt.hashpw(senha.encode("UTF-8"), bcrypt.gensalt())
else:
mensagem = "As senhas não correspondem"
return render_template("cadastro.html", mensagem=mensagem)
cep = request.form["inputCep"]
rua = request.form["inputRua"]
bairro = request.form["inputBairro"]
numero = request.form["inputNumero"]
referencia = request.form["inputReferencia"]
telefone = request.form["inputTelefone"]
usuario = Usuario(nome=nome, cpf=cpf, data_de_nascimento=data_de_nascimento, email=email, senha=<PASSWORD>haEcriptada, telefone=telefone)
db.session.add(usuario)
db.session.commit()
endereco= Endereco(rua=rua, numero=numero, bairro=bairro, cep=cep, referencia=referencia, usuario_id=usuario.id)
db.session.add(endereco)
db.session.commit()
return render_template("cadastro.html")
@login_manager.user_loader
def get_user(usuario_id):
return Usuario.query.filter_by(id=usuario_id).first()
@app.route("/login", methods=["POST", "GET"])
def login():
if request.method == "GET":
mensagem = request.args.get("mensagem")
return render_template("login.html", mensagem=mensagem)
if request.method == "POST":
email = request.form["inputEmail"]
senha = request.form["inputSenha"]
usuario = Usuario.query.filter_by(email=email).first()
autozidado = False
if usuario:
autorizado = bcrypt.checkpw(
senha.encode("utf8"), usuario.senha.encode("utf8")
)
if not usuario or not autorizado:
mensagem = "Email ou senha incorreto"
return render_template("login.html", mensagem=mensagem)
else:
login_user(usuario)
return redirect("/home")
|
none
| 1
| 2.613284
| 3
|
|
52-54-feedparser/my-code-RSS-feeds/URL_namedtuples.py
|
Anthlis/My_100_Days_Of_Python
| 2
|
6629588
|
import collections
Measurement = collections.namedtuple(
'Measurement',
'temp, lat, long, quality')
def get_meas():
data = [
Measurement(11.2, 19.2, 11.1, 'hard'),
Measurement(22.5, 44.234, 19.02, 'strong'),
Measurement(13.5, 2, 3, 'soft'),
]
return data
def main():
for i, idx in enumerate(get_meas(), start=1):
print(f"{i}. temp={idx.temp}, lat={idx.lat}, long={idx.long} and quality={idx.quality}")
if __name__ == '__main__':
main()
|
import collections
Measurement = collections.namedtuple(
'Measurement',
'temp, lat, long, quality')
def get_meas():
data = [
Measurement(11.2, 19.2, 11.1, 'hard'),
Measurement(22.5, 44.234, 19.02, 'strong'),
Measurement(13.5, 2, 3, 'soft'),
]
return data
def main():
for i, idx in enumerate(get_meas(), start=1):
print(f"{i}. temp={idx.temp}, lat={idx.lat}, long={idx.long} and quality={idx.quality}")
if __name__ == '__main__':
main()
|
none
| 1
| 3.147843
| 3
|
|
fabfile.py
|
fle-internal/content-provenance
| 0
|
6629589
|
import json
import os
import time
import requests
from jinja2 import Template
from fabric.api import env, task, local, sudo, run
from fabric.api import get, put, require
from fabric.colors import red, green, blue, yellow
from fabric.context_managers import cd, prefix, show, hide, shell_env
from fabric.contrib.files import exists, sed, upload_template
from fabric.utils import puts
# PREREQUISITES
# 1. SusOps engineer be part of the GCP project kolibri-demo-servers
# 2. The username $USER must be one of the default accounts created on instances, see:
# https://console.cloud.google.com/compute/metadata?project=kolibri-demo-servers
# FAB SETTTINGS
################################################################################
env.user = os.environ.get('USER') # assume ur local username == remote username
CONFIG_DIR = './config'
# CONSTANTS
################################################################################
env.roledefs = {
'alejandro-demo': { 'hosts':['172.16.58.3'] }
}
SAMPLE_CURATED_CHANNELS = [
# curated
'32b5fc156a7d46ddb8cea9663a1871be',
'591b7e1bc89645ef846c1685a7dd7b50',
'335b8e0ed8a3426580e4c58f62810d25',
'fdab6fb66ba24d05acd011e85bdb36ba',
'54aa253a3266416da0c847e16e64aa7b',
'e006726b1f35487eb7b2aa7cb11caf4c',
'8111ac9ab99646a1be9984f13b29167d',
'0543f0f0516b4eeebf281854e80d3e14',
'ae8f138108c1410aa4c6d8bf734ebf57',
'be30cd98263244768c8684320441eecb',
'0a9cd3c76a36402e87d6bf80a997901f',
'6f63fe92ad1044fdb3b3c17d54d0978e',
'9e5305326ed742d0892479dea825a514',
'292583c17e6d4199b81f0423bec58766',
'34fd6722dd734687bc5291fc717d2d7f',
'a68a5bf4aa8a475197658c7a0da528c7',
]
SAMPLE_CHANNELS_INFO = {
'Camara Tanzania': {
'a68a5bf4aa8a475197658c7a0da528c7': 'Camara Education Tanzania'
},
'KICD': {
'591b7e1bc89645ef846c1685a7dd7b50': 'KICD Biology Curriculum (DRAFT)',
'32b5fc156a7d46ddb8cea9663a1871be': 'KICD Chemistry Curriculum (DRAFT)',
'335b8e0ed8a3426580e4c58f62810d25': 'KICD Life Skills Curriculum (DRAFT)',
'fdab6fb66ba24d05acd011e85bdb36ba': 'KICD Mathematics Curriculum (DRAFT)',
'54aa253a3266416da0c847e16e64aa7b': 'KICD Physics Curriculum (DRAFT)'
},
'Nalanda India': {
'ae8f138108c1410aa4c6d8bf734ebf57': 'Nalanda Math',
# 'be30cd98263244768c8684320441eecb': 'Math Olympiad (The Nalanda Project)', # not PUBLISHed
# '0a9cd3c76a36402e87d6bf80a997901f': 'Maharashtra 6,7,8', # not PUBLISHed
'6f63fe92ad1044fdb3b3c17d54d0978e': 'BodhaGuru CBSE English Channel',
# '9e5305326ed742d0892479dea825a514': 'CBSE English Medium Class 3 to 8', # filtered out because does not load
# '292583c17e6d4199b81f0423bec58766': 'CBSE KA English Class 6 to 9', # filtered because unique constraint fails
'34fd6722dd734687bc5291fc717d2d7f': 'CBSE Khan Academy Math 6-9 (English)'
},
'UNICEF Uganda': {
'e006726b1f35487eb7b2aa7cb11caf4c': 'Secondary School',
'8111ac9ab99646a1be9984f13b29167d': 'Youth Center',
# '0543f0f0516b4eeebf281854e80d3e14': 'Teacher Resources' # skipped because not publshing
}
}
# GET DATA FROM STUDIO
################################################################################
@task
def download_sankey_graph_data_file(channel_id, destdir='data_v2'):
filename = channel_id + '.json'
url = 'https://studio.learningequality.org/content/exports/importsdata/sankey/' + filename
response = requests.get(url)
destpath = os.path.join(destdir, filename)
with open(destpath, 'wb') as destfile:
destfile.write(response.content)
puts(green('Saved Sankey graph data file ' + destpath))
@task
def download_sankey_graph_data():
for channel_id in SAMPLE_CURATED_CHANNELS:
download_sankey_graph_data_file(channel_id)
# BUILD
################################################################################
@task
def build():
local('rm -rf build/*')
local('cp -r components build/')
local('cp -r fonts build/')
local('cp sankeyTest.js build/')
local('cp style.css build/')
for org, channels_info in SAMPLE_CHANNELS_INFO.items():
for channel_id, channel_name in channels_info.items():
print(channel_id)
assert channel_id in SAMPLE_CURATED_CHANNELS
build_page(channel_id)
build_listing_page(SAMPLE_CHANNELS_INFO)
def get_channel_data(channel_id):
filename = channel_id + '.json'
data_path = 'data_v2/' + filename
# load channel_data
graph_data = json.load(open(data_path))
channel_data = graph_data['nodes'][channel_id]
return channel_data
def build_page(channel_id):
destdir = 'build/' + channel_id
local('mkdir ' + destdir)
local('mkdir ' + destdir + '/data_v2')
# copy data into place
filename = channel_id + '.json'
data_path = 'data_v2/' + filename
local('cp ' + data_path + ' ' + destdir+'/data_v2/')
# load channel_data
channel_data = get_channel_data(channel_id)
# load template
template_path = 'index_html_template.jinja2'
template_src = open(template_path).read()
template = Template(template_src)
# render template
index_html = template.render(
title='Import counts for channel ' + channel_data['name'] + ' (' + channel_data['channel_id'] + ')',
data_path=data_path,
)
with open(os.path.join(destdir, 'index.html'), 'w') as outf:
outf.write(index_html)
def build_listing_page(sample_channels_info):
# load template
template_path = 'listing_template.jinja2'
template_src = open(template_path).read()
template = Template(template_src)
# render template
index_html = template.render(
title='Studio Channel content import provenanance',
description='These charts show the aggregate counts of content is imported from to create channels aligned to particular curriculum in different countries.',
sample_channels_info=sample_channels_info,
)
with open(os.path.join('build', 'index.html'), 'w') as outf:
outf.write(index_html)
# DEPLOY
################################################################################
DEPLOY_ZIPNAME = 'webroot.zip'
@task
def deploy():
if os.path.exists(DEPLOY_ZIPNAME):
os.remove(DEPLOY_ZIPNAME)
local('zip -r %s build/' % DEPLOY_ZIPNAME)
remote_zip_path = '/var/www/' + DEPLOY_ZIPNAME
if exists(remote_zip_path):
print('removing old zip', remote_zip_path)
sudo('rm %s' % remote_zip_path)
put(DEPLOY_ZIPNAME, remote_zip_path)
with cd('/var/www/'):
sudo('unzip %s ' % DEPLOY_ZIPNAME)
sudo('rm -rf importcounts')
sudo('mv build importcounts')
# cleanup zip files
if exists(remote_zip_path):
print('removing old zip', remote_zip_path)
sudo('rm %s' % remote_zip_path)
if os.path.exists(DEPLOY_ZIPNAME):
os.remove(DEPLOY_ZIPNAME)
|
import json
import os
import time
import requests
from jinja2 import Template
from fabric.api import env, task, local, sudo, run
from fabric.api import get, put, require
from fabric.colors import red, green, blue, yellow
from fabric.context_managers import cd, prefix, show, hide, shell_env
from fabric.contrib.files import exists, sed, upload_template
from fabric.utils import puts
# PREREQUISITES
# 1. SusOps engineer be part of the GCP project kolibri-demo-servers
# 2. The username $USER must be one of the default accounts created on instances, see:
# https://console.cloud.google.com/compute/metadata?project=kolibri-demo-servers
# FAB SETTTINGS
################################################################################
env.user = os.environ.get('USER') # assume ur local username == remote username
CONFIG_DIR = './config'
# CONSTANTS
################################################################################
env.roledefs = {
'alejandro-demo': { 'hosts':['172.16.58.3'] }
}
SAMPLE_CURATED_CHANNELS = [
# curated
'32b5fc156a7d46ddb8cea9663a1871be',
'591b7e1bc89645ef846c1685a7dd7b50',
'335b8e0ed8a3426580e4c58f62810d25',
'fdab6fb66ba24d05acd011e85bdb36ba',
'54aa253a3266416da0c847e16e64aa7b',
'e006726b1f35487eb7b2aa7cb11caf4c',
'8111ac9ab99646a1be9984f13b29167d',
'0543f0f0516b4eeebf281854e80d3e14',
'ae8f138108c1410aa4c6d8bf734ebf57',
'be30cd98263244768c8684320441eecb',
'0a9cd3c76a36402e87d6bf80a997901f',
'6f63fe92ad1044fdb3b3c17d54d0978e',
'9e5305326ed742d0892479dea825a514',
'292583c17e6d4199b81f0423bec58766',
'34fd6722dd734687bc5291fc717d2d7f',
'a68a5bf4aa8a475197658c7a0da528c7',
]
SAMPLE_CHANNELS_INFO = {
'Camara Tanzania': {
'a68a5bf4aa8a475197658c7a0da528c7': 'Camara Education Tanzania'
},
'KICD': {
'591b7e1bc89645ef846c1685a7dd7b50': 'KICD Biology Curriculum (DRAFT)',
'32b5fc156a7d46ddb8cea9663a1871be': 'KICD Chemistry Curriculum (DRAFT)',
'335b8e0ed8a3426580e4c58f62810d25': 'KICD Life Skills Curriculum (DRAFT)',
'fdab6fb66ba24d05acd011e85bdb36ba': 'KICD Mathematics Curriculum (DRAFT)',
'54aa253a3266416da0c847e16e64aa7b': 'KICD Physics Curriculum (DRAFT)'
},
'Nalanda India': {
'ae8f138108c1410aa4c6d8bf734ebf57': 'Nalanda Math',
# 'be30cd98263244768c8684320441eecb': 'Math Olympiad (The Nalanda Project)', # not PUBLISHed
# '0a9cd3c76a36402e87d6bf80a997901f': 'Maharashtra 6,7,8', # not PUBLISHed
'6f63fe92ad1044fdb3b3c17d54d0978e': 'BodhaGuru CBSE English Channel',
# '9e5305326ed742d0892479dea825a514': 'CBSE English Medium Class 3 to 8', # filtered out because does not load
# '292583c17e6d4199b81f0423bec58766': 'CBSE KA English Class 6 to 9', # filtered because unique constraint fails
'34fd6722dd734687bc5291fc717d2d7f': 'CBSE Khan Academy Math 6-9 (English)'
},
'UNICEF Uganda': {
'e006726b1f35487eb7b2aa7cb11caf4c': 'Secondary School',
'8111ac9ab99646a1be9984f13b29167d': 'Youth Center',
# '0543f0f0516b4eeebf281854e80d3e14': 'Teacher Resources' # skipped because not publshing
}
}
# GET DATA FROM STUDIO
################################################################################
@task
def download_sankey_graph_data_file(channel_id, destdir='data_v2'):
filename = channel_id + '.json'
url = 'https://studio.learningequality.org/content/exports/importsdata/sankey/' + filename
response = requests.get(url)
destpath = os.path.join(destdir, filename)
with open(destpath, 'wb') as destfile:
destfile.write(response.content)
puts(green('Saved Sankey graph data file ' + destpath))
@task
def download_sankey_graph_data():
for channel_id in SAMPLE_CURATED_CHANNELS:
download_sankey_graph_data_file(channel_id)
# BUILD
################################################################################
@task
def build():
local('rm -rf build/*')
local('cp -r components build/')
local('cp -r fonts build/')
local('cp sankeyTest.js build/')
local('cp style.css build/')
for org, channels_info in SAMPLE_CHANNELS_INFO.items():
for channel_id, channel_name in channels_info.items():
print(channel_id)
assert channel_id in SAMPLE_CURATED_CHANNELS
build_page(channel_id)
build_listing_page(SAMPLE_CHANNELS_INFO)
def get_channel_data(channel_id):
filename = channel_id + '.json'
data_path = 'data_v2/' + filename
# load channel_data
graph_data = json.load(open(data_path))
channel_data = graph_data['nodes'][channel_id]
return channel_data
def build_page(channel_id):
destdir = 'build/' + channel_id
local('mkdir ' + destdir)
local('mkdir ' + destdir + '/data_v2')
# copy data into place
filename = channel_id + '.json'
data_path = 'data_v2/' + filename
local('cp ' + data_path + ' ' + destdir+'/data_v2/')
# load channel_data
channel_data = get_channel_data(channel_id)
# load template
template_path = 'index_html_template.jinja2'
template_src = open(template_path).read()
template = Template(template_src)
# render template
index_html = template.render(
title='Import counts for channel ' + channel_data['name'] + ' (' + channel_data['channel_id'] + ')',
data_path=data_path,
)
with open(os.path.join(destdir, 'index.html'), 'w') as outf:
outf.write(index_html)
def build_listing_page(sample_channels_info):
# load template
template_path = 'listing_template.jinja2'
template_src = open(template_path).read()
template = Template(template_src)
# render template
index_html = template.render(
title='Studio Channel content import provenanance',
description='These charts show the aggregate counts of content is imported from to create channels aligned to particular curriculum in different countries.',
sample_channels_info=sample_channels_info,
)
with open(os.path.join('build', 'index.html'), 'w') as outf:
outf.write(index_html)
# DEPLOY
################################################################################
DEPLOY_ZIPNAME = 'webroot.zip'
@task
def deploy():
if os.path.exists(DEPLOY_ZIPNAME):
os.remove(DEPLOY_ZIPNAME)
local('zip -r %s build/' % DEPLOY_ZIPNAME)
remote_zip_path = '/var/www/' + DEPLOY_ZIPNAME
if exists(remote_zip_path):
print('removing old zip', remote_zip_path)
sudo('rm %s' % remote_zip_path)
put(DEPLOY_ZIPNAME, remote_zip_path)
with cd('/var/www/'):
sudo('unzip %s ' % DEPLOY_ZIPNAME)
sudo('rm -rf importcounts')
sudo('mv build importcounts')
# cleanup zip files
if exists(remote_zip_path):
print('removing old zip', remote_zip_path)
sudo('rm %s' % remote_zip_path)
if os.path.exists(DEPLOY_ZIPNAME):
os.remove(DEPLOY_ZIPNAME)
|
de
| 0.254892
|
# PREREQUISITES # 1. SusOps engineer be part of the GCP project kolibri-demo-servers # 2. The username $USER must be one of the default accounts created on instances, see: # https://console.cloud.google.com/compute/metadata?project=kolibri-demo-servers # FAB SETTTINGS ################################################################################ # assume ur local username == remote username # CONSTANTS ################################################################################ # curated # 'be30cd98263244768c8684320441eecb': 'Math Olympiad (The Nalanda Project)', # not PUBLISHed # '0a9cd3c76a36402e87d6bf80a997901f': 'Maharashtra 6,7,8', # not PUBLISHed # '9e5305326ed742d0892479dea825a514': 'CBSE English Medium Class 3 to 8', # filtered out because does not load # '292583c17e6d4199b81f0423bec58766': 'CBSE KA English Class 6 to 9', # filtered because unique constraint fails # '0543f0f0516b4eeebf281854e80d3e14': 'Teacher Resources' # skipped because not publshing # GET DATA FROM STUDIO ################################################################################ # BUILD ################################################################################ # load channel_data # copy data into place # load channel_data # load template # render template # load template # render template # DEPLOY ################################################################################ # cleanup zip files
| 1.708157
| 2
|
pyutils/test_mvm.py
|
eltrompetero/maxent_fim
| 0
|
6629590
|
# =============================================================================================== #
# Test module for Median Voter Model.
# Author: <NAME>, <EMAIL>
# =============================================================================================== #
from .mvm import *
from importlib import import_module
def test_corr():
for n in [5,7,9,11]:
assert np.isclose(pair_corr(bin_states(n,True), weights=create_mvm_p(n, 1))[1][0],
corr(n)[0])
def test_couplings():
np.random.seed(0)
J = np.random.normal(size=4, scale=.5)
for n in [5,7,9,11]:
smo_fun, smop_fun, soo_fun, soop_fun, _ = setup_maxent(n)
Jmo, Joo = couplings(n)
assert np.isclose([smo_fun(Jmo,Jmo,Joo,Joo), soo_fun(Jmo,Jmo,Joo,Joo)],
np.array(corr(n)), atol=1e-7).all()
print("Test passed: numerically solved couplings return expected correlations for n=%d."%n)
Js, soln = couplings(n, data_corr=(smo_fun(*J), smop_fun(*J), soo_fun(*J), soop_fun(*J)),
full_output=True)
# couplings do not have to be so accurate to match correlations
# but numerical precision becomes a noticeable issue even for n=11
#assert np.isclose(J, Js, atol=1e-2).all(), (np.linalg.norm(J - Js), J, Js, soln['message'])
corr1 = np.array([smo_fun(*J), smop_fun(*J), soo_fun(*J), soop_fun(*J)])
corr2 = np.array([smo_fun(*Js), smop_fun(*Js), soo_fun(*Js), soop_fun(*Js)])
corrErr = np.linalg.norm(corr1-corr2)
assert corrErr<1e-6, corrErr
print("Test passed: original couplings returned for n=%d."%n)
def test_setup_maxent():
np.random.seed(0)
Jmo, Jmop, Joo, Jop = np.random.normal(size=4, scale=.3)
nRange = [5,7,9,11]
for i,n in enumerate(nRange):
ising = import_module('coniii.ising_eqn.ising_eqn_%d_sym'%n)
hJ = np.zeros(n+n*(n-1)//2)
hJ[n:2*n-1] = Jmo
hJ[n] = Jmop
hJ[2*n-1:] = Joo
hJ[2*n-1:2*n-1+n-2] = Jop
sisjME = ising.calc_observables(hJ)
# extract corresponding pairwise correlations from full pairwise maxent model
smoME, smopME, sooME, sopME = sisjME[n+1], sisjME[n], sisjME[-1], sisjME[n+n-1]
smo, smop, soo, sop, pk = setup_maxent(n)
assert np.isclose( smoME, smo(Jmo, Jmop, Joo, Jop) )
assert np.isclose( smopME, smop(Jmo, Jmop, Joo, Jop) )
assert np.isclose( sooME, soo(Jmo, Jmop, Joo, Jop) )
assert np.isclose( sopME, sop(Jmo, Jmop, Joo, Jop) )
p = ising.p(hJ)
k = bin_states(n).sum(1)
k[k<n/2] = n - k[k<n/2]
pkME = np.array([p[k==i].sum() for i in range(n//2+1,n+1)])
assert np.isclose( pkME, pk(Jmo, Jmop, Joo, Jop) ).all()
print("Test passed: Pairwise correlations agree with ConIII module.")
def test_setup_mo_perturbation():
logPartitionList, sMOpcoeffs, sMOcoeffs, sOOpcoeffs, sOOcoeffs = setup_mo_perturbation(5, 0, 0, 0, 0)
# when couplings are zero, every pairwise correlation should be zero (this checks the coeffs)
assert np.isclose(coeffs_to_corr(sMOpcoeffs, logPartitionList), 0)
assert np.isclose(coeffs_to_corr(sMOcoeffs, logPartitionList), 0)
assert np.isclose(coeffs_to_corr(sOOpcoeffs, logPartitionList), 0)
assert np.isclose(coeffs_to_corr(sOOcoeffs, logPartitionList), 0)
# check equations for pairwise correlations with ConIII module
J = np.random.normal(size=4, scale=.3)
# Jmop, Jmo, Joop, Joo
nRange = [5,7,9,11,13]
for i,n in enumerate(nRange):
ising = import_module('coniii.ising_eqn.ising_eqn_%d_sym'%n)
hJ = np.zeros(n+n*(n-1)//2)
hJ[n] = J[0]
hJ[n+1:n+n-1] = J[1]
hJ[n+n-1:n+n-1+n-2] = J[2]
hJ[3*n-3:] = J[3]
sisjME = ising.calc_observables(hJ)
sisjME = sisjME[n], sisjME[n+1], sisjME[n+n-1], sisjME[-1]
logPartitionList, sMOpcoeffs, sMOcoeffs, sOOpcoeffs, sOOcoeffs = setup_mo_perturbation(n, *J)
sisj = (coeffs_to_corr(sMOpcoeffs, logPartitionList),
coeffs_to_corr(sMOcoeffs, logPartitionList),
coeffs_to_corr(sOOpcoeffs, logPartitionList),
coeffs_to_corr(sOOcoeffs, logPartitionList))
assert np.isclose(sisj,sisjME).all()
def test_setup_oo_perturbation():
(logPartitionList,
sMOpcoeffs,
sMOcoeffs,
sO1O2pcoeffs,
sOOpcoeffs,
sOOcoeffs) = setup_oo_perturbation(5, *np.zeros(5))
# when couplings are zero, every pairwise correlation should be zero (this checks the coeffs)
assert np.isclose(coeffs_to_corr(sMOpcoeffs, logPartitionList), 0)
assert np.isclose(coeffs_to_corr(sMOcoeffs, logPartitionList), 0)
assert np.isclose(coeffs_to_corr(sO1O2pcoeffs, logPartitionList), 0)
assert np.isclose(coeffs_to_corr(sOOpcoeffs, logPartitionList), 0)
assert np.isclose(coeffs_to_corr(sOOcoeffs, logPartitionList), 0)
# check equations for pairwise correlations with ConIII module
J = np.random.normal(size=5, scale=.3)
# Jmop, Jmo, Jo1o2, Joop, Joo
nRange = [5,7,9,11,13]
for i,n in enumerate(nRange):
ising = import_module('coniii.ising_eqn.ising_eqn_%d_sym'%n)
hJ = np.zeros(n+n*(n-1)//2)
hJ[n:n+2] = J[0]
hJ[n+2:n+n-1] = J[1]
hJ[n+n-1] = J[2]
hJ[n+n:n+n-1+n-2+n-3] = J[3]
hJ[4*n-6:] = J[4]
sisjME = ising.calc_observables(hJ)
sisjME = sisjME[[n,n+2,2*n-1,2*n,4*n-6]]
(logPartitionList,
sMOpcoeffs,
sMOcoeffs,
sO1O2pcoeffs,
sOOpcoeffs,
sOOcoeffs) = setup_oo_perturbation(n, *J)
sisj = (coeffs_to_corr(sMOpcoeffs, logPartitionList),
coeffs_to_corr(sMOcoeffs, logPartitionList),
coeffs_to_corr(sO1O2pcoeffs, logPartitionList),
coeffs_to_corr(sOOpcoeffs, logPartitionList),
coeffs_to_corr(sOOcoeffs, logPartitionList))
assert np.isclose(sisj,sisjME).all()
def test_setup_perturbation(n=9):
rng = np.random.RandomState(0)
Jpair = couplings(n)
for i in range(10):
J = np.zeros(12)
J[[0,1,9,10]] = Jpair[0] + rng.normal()
J[J==0] = Jpair[1] + rng.normal()
# calculate sisj using simplified model
logPartitionList, kList, sisjCoeffs = setup_perturbation(J, n)
sisj = np.array([coeffs_to_corr(coeffs, logPartitionList)
for coeffs in sisjCoeffs])
sisj = squareform(square_J(sisj, n))
Jasvec = np.insert(squareform(square_J(J, n)), np.zeros(n, dtype=int), 0)
ising = import_module('coniii.ising_eqn.ising_eqn_%d_sym'%n)
sisjME = ising.calc_observables(Jasvec)[n:]
assert np.linalg.norm(sisj-sisjME)<1e-13
print("Test passed: pairwise correlations are numerically in agreement with ConIII calculation.")
|
# =============================================================================================== #
# Test module for Median Voter Model.
# Author: <NAME>, <EMAIL>
# =============================================================================================== #
from .mvm import *
from importlib import import_module
def test_corr():
for n in [5,7,9,11]:
assert np.isclose(pair_corr(bin_states(n,True), weights=create_mvm_p(n, 1))[1][0],
corr(n)[0])
def test_couplings():
np.random.seed(0)
J = np.random.normal(size=4, scale=.5)
for n in [5,7,9,11]:
smo_fun, smop_fun, soo_fun, soop_fun, _ = setup_maxent(n)
Jmo, Joo = couplings(n)
assert np.isclose([smo_fun(Jmo,Jmo,Joo,Joo), soo_fun(Jmo,Jmo,Joo,Joo)],
np.array(corr(n)), atol=1e-7).all()
print("Test passed: numerically solved couplings return expected correlations for n=%d."%n)
Js, soln = couplings(n, data_corr=(smo_fun(*J), smop_fun(*J), soo_fun(*J), soop_fun(*J)),
full_output=True)
# couplings do not have to be so accurate to match correlations
# but numerical precision becomes a noticeable issue even for n=11
#assert np.isclose(J, Js, atol=1e-2).all(), (np.linalg.norm(J - Js), J, Js, soln['message'])
corr1 = np.array([smo_fun(*J), smop_fun(*J), soo_fun(*J), soop_fun(*J)])
corr2 = np.array([smo_fun(*Js), smop_fun(*Js), soo_fun(*Js), soop_fun(*Js)])
corrErr = np.linalg.norm(corr1-corr2)
assert corrErr<1e-6, corrErr
print("Test passed: original couplings returned for n=%d."%n)
def test_setup_maxent():
np.random.seed(0)
Jmo, Jmop, Joo, Jop = np.random.normal(size=4, scale=.3)
nRange = [5,7,9,11]
for i,n in enumerate(nRange):
ising = import_module('coniii.ising_eqn.ising_eqn_%d_sym'%n)
hJ = np.zeros(n+n*(n-1)//2)
hJ[n:2*n-1] = Jmo
hJ[n] = Jmop
hJ[2*n-1:] = Joo
hJ[2*n-1:2*n-1+n-2] = Jop
sisjME = ising.calc_observables(hJ)
# extract corresponding pairwise correlations from full pairwise maxent model
smoME, smopME, sooME, sopME = sisjME[n+1], sisjME[n], sisjME[-1], sisjME[n+n-1]
smo, smop, soo, sop, pk = setup_maxent(n)
assert np.isclose( smoME, smo(Jmo, Jmop, Joo, Jop) )
assert np.isclose( smopME, smop(Jmo, Jmop, Joo, Jop) )
assert np.isclose( sooME, soo(Jmo, Jmop, Joo, Jop) )
assert np.isclose( sopME, sop(Jmo, Jmop, Joo, Jop) )
p = ising.p(hJ)
k = bin_states(n).sum(1)
k[k<n/2] = n - k[k<n/2]
pkME = np.array([p[k==i].sum() for i in range(n//2+1,n+1)])
assert np.isclose( pkME, pk(Jmo, Jmop, Joo, Jop) ).all()
print("Test passed: Pairwise correlations agree with ConIII module.")
def test_setup_mo_perturbation():
logPartitionList, sMOpcoeffs, sMOcoeffs, sOOpcoeffs, sOOcoeffs = setup_mo_perturbation(5, 0, 0, 0, 0)
# when couplings are zero, every pairwise correlation should be zero (this checks the coeffs)
assert np.isclose(coeffs_to_corr(sMOpcoeffs, logPartitionList), 0)
assert np.isclose(coeffs_to_corr(sMOcoeffs, logPartitionList), 0)
assert np.isclose(coeffs_to_corr(sOOpcoeffs, logPartitionList), 0)
assert np.isclose(coeffs_to_corr(sOOcoeffs, logPartitionList), 0)
# check equations for pairwise correlations with ConIII module
J = np.random.normal(size=4, scale=.3)
# Jmop, Jmo, Joop, Joo
nRange = [5,7,9,11,13]
for i,n in enumerate(nRange):
ising = import_module('coniii.ising_eqn.ising_eqn_%d_sym'%n)
hJ = np.zeros(n+n*(n-1)//2)
hJ[n] = J[0]
hJ[n+1:n+n-1] = J[1]
hJ[n+n-1:n+n-1+n-2] = J[2]
hJ[3*n-3:] = J[3]
sisjME = ising.calc_observables(hJ)
sisjME = sisjME[n], sisjME[n+1], sisjME[n+n-1], sisjME[-1]
logPartitionList, sMOpcoeffs, sMOcoeffs, sOOpcoeffs, sOOcoeffs = setup_mo_perturbation(n, *J)
sisj = (coeffs_to_corr(sMOpcoeffs, logPartitionList),
coeffs_to_corr(sMOcoeffs, logPartitionList),
coeffs_to_corr(sOOpcoeffs, logPartitionList),
coeffs_to_corr(sOOcoeffs, logPartitionList))
assert np.isclose(sisj,sisjME).all()
def test_setup_oo_perturbation():
(logPartitionList,
sMOpcoeffs,
sMOcoeffs,
sO1O2pcoeffs,
sOOpcoeffs,
sOOcoeffs) = setup_oo_perturbation(5, *np.zeros(5))
# when couplings are zero, every pairwise correlation should be zero (this checks the coeffs)
assert np.isclose(coeffs_to_corr(sMOpcoeffs, logPartitionList), 0)
assert np.isclose(coeffs_to_corr(sMOcoeffs, logPartitionList), 0)
assert np.isclose(coeffs_to_corr(sO1O2pcoeffs, logPartitionList), 0)
assert np.isclose(coeffs_to_corr(sOOpcoeffs, logPartitionList), 0)
assert np.isclose(coeffs_to_corr(sOOcoeffs, logPartitionList), 0)
# check equations for pairwise correlations with ConIII module
J = np.random.normal(size=5, scale=.3)
# Jmop, Jmo, Jo1o2, Joop, Joo
nRange = [5,7,9,11,13]
for i,n in enumerate(nRange):
ising = import_module('coniii.ising_eqn.ising_eqn_%d_sym'%n)
hJ = np.zeros(n+n*(n-1)//2)
hJ[n:n+2] = J[0]
hJ[n+2:n+n-1] = J[1]
hJ[n+n-1] = J[2]
hJ[n+n:n+n-1+n-2+n-3] = J[3]
hJ[4*n-6:] = J[4]
sisjME = ising.calc_observables(hJ)
sisjME = sisjME[[n,n+2,2*n-1,2*n,4*n-6]]
(logPartitionList,
sMOpcoeffs,
sMOcoeffs,
sO1O2pcoeffs,
sOOpcoeffs,
sOOcoeffs) = setup_oo_perturbation(n, *J)
sisj = (coeffs_to_corr(sMOpcoeffs, logPartitionList),
coeffs_to_corr(sMOcoeffs, logPartitionList),
coeffs_to_corr(sO1O2pcoeffs, logPartitionList),
coeffs_to_corr(sOOpcoeffs, logPartitionList),
coeffs_to_corr(sOOcoeffs, logPartitionList))
assert np.isclose(sisj,sisjME).all()
def test_setup_perturbation(n=9):
rng = np.random.RandomState(0)
Jpair = couplings(n)
for i in range(10):
J = np.zeros(12)
J[[0,1,9,10]] = Jpair[0] + rng.normal()
J[J==0] = Jpair[1] + rng.normal()
# calculate sisj using simplified model
logPartitionList, kList, sisjCoeffs = setup_perturbation(J, n)
sisj = np.array([coeffs_to_corr(coeffs, logPartitionList)
for coeffs in sisjCoeffs])
sisj = squareform(square_J(sisj, n))
Jasvec = np.insert(squareform(square_J(J, n)), np.zeros(n, dtype=int), 0)
ising = import_module('coniii.ising_eqn.ising_eqn_%d_sym'%n)
sisjME = ising.calc_observables(Jasvec)[n:]
assert np.linalg.norm(sisj-sisjME)<1e-13
print("Test passed: pairwise correlations are numerically in agreement with ConIII calculation.")
|
en
| 0.574623
|
# =============================================================================================== # # Test module for Median Voter Model. # Author: <NAME>, <EMAIL> # =============================================================================================== # # couplings do not have to be so accurate to match correlations # but numerical precision becomes a noticeable issue even for n=11 #assert np.isclose(J, Js, atol=1e-2).all(), (np.linalg.norm(J - Js), J, Js, soln['message']) # extract corresponding pairwise correlations from full pairwise maxent model # when couplings are zero, every pairwise correlation should be zero (this checks the coeffs) # check equations for pairwise correlations with ConIII module # Jmop, Jmo, Joop, Joo # when couplings are zero, every pairwise correlation should be zero (this checks the coeffs) # check equations for pairwise correlations with ConIII module # Jmop, Jmo, Jo1o2, Joop, Joo # calculate sisj using simplified model
| 1.96354
| 2
|
runner.py
|
almightyvaidy/justcheckingin
| 0
|
6629591
|
<reponame>almightyvaidy/justcheckingin
# -*- coding: utf-8 -*-
"""
Created on Thu May 6 11:36:44 2021
@author: vaidy
"""
# what the the external dat input that need to supplied to the objects instatiated using this class
class runner:
def __init__(self,x):
return self
def health():
return
def sleep():
return
def strech():
return
def run():
# call ditance becasue the you increase distance when you run
# you get tired when you run
# you lose energy while running
# your joints get damage when running with some probability
return
def poop():
return
def body():
return
|
# -*- coding: utf-8 -*-
"""
Created on Thu May 6 11:36:44 2021
@author: vaidy
"""
# what the the external dat input that need to supplied to the objects instatiated using this class
class runner:
def __init__(self,x):
return self
def health():
return
def sleep():
return
def strech():
return
def run():
# call ditance becasue the you increase distance when you run
# you get tired when you run
# you lose energy while running
# your joints get damage when running with some probability
return
def poop():
return
def body():
return
|
en
| 0.87715
|
# -*- coding: utf-8 -*- Created on Thu May 6 11:36:44 2021
@author: vaidy # what the the external dat input that need to supplied to the objects instatiated using this class # call ditance becasue the you increase distance when you run # you get tired when you run # you lose energy while running # your joints get damage when running with some probability
| 3.178951
| 3
|
var/spack/repos/builtin/packages/krims/package.py
|
LiamBindle/spack
| 2,360
|
6629592
|
<filename>var/spack/repos/builtin/packages/krims/package.py
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Krims(CMakePackage):
"""The bucket of Krimskrams every C or C++ project needs"""
homepage = "https://lazyten.org/krims"
url = "https://github.com/lazyten/krims/archive/v0.2.1.tar.gz"
git = "https://github.com/lazyten/krims.git"
maintainers = ['mfherbst']
#
# Versions
#
version("develop", branch="master")
version("0.2.1", sha256="baac8de392e6c2a73a535f71596f51d4a80a08d9c0ecbf9a2d72d1d70dd17999")
#
# Variants
#
# Library build type
variant("build_type", default="DebugRelease",
description="The build type to build",
values=("Debug", "Release", "DebugRelease"))
variant("shared", default=True,
description="Build shared libraries (else the static one)")
# Components
variant("examples", default=False,
description="Compile examples")
#
# Conflicts
#
# Only builds on clang > 3.5 and gcc > 4.8
conflicts("%intel", msg="krims only builds with gcc and clang")
conflicts("%gcc@:4.8")
conflicts("%clang@:3.5")
#
# patch
#
# float80 is enable only x86_64
patch('float80.patch')
#
# Dependencies
#
depends_on("cmake@3:", type="build")
#
# Settings and cmake cache
#
def cmake_args(self):
spec = self.spec
args = [
"-DAUTOCHECKOUT_MISSING_REPOS=OFF",
#
"-DBUILD_SHARED_LIBS=" + str("+shared" in spec),
# TODO Hard-disable tests for now, since rapidcheck not in Spack
"-DKRIMS_ENABLE_TESTS=OFF",
"-DKRIMS_ENABLE_EXAMPLES=" + str("+examples" in spec),
]
return args
|
<filename>var/spack/repos/builtin/packages/krims/package.py
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Krims(CMakePackage):
"""The bucket of Krimskrams every C or C++ project needs"""
homepage = "https://lazyten.org/krims"
url = "https://github.com/lazyten/krims/archive/v0.2.1.tar.gz"
git = "https://github.com/lazyten/krims.git"
maintainers = ['mfherbst']
#
# Versions
#
version("develop", branch="master")
version("0.2.1", sha256="baac8de392e6c2a73a535f71596f51d4a80a08d9c0ecbf9a2d72d1d70dd17999")
#
# Variants
#
# Library build type
variant("build_type", default="DebugRelease",
description="The build type to build",
values=("Debug", "Release", "DebugRelease"))
variant("shared", default=True,
description="Build shared libraries (else the static one)")
# Components
variant("examples", default=False,
description="Compile examples")
#
# Conflicts
#
# Only builds on clang > 3.5 and gcc > 4.8
conflicts("%intel", msg="krims only builds with gcc and clang")
conflicts("%gcc@:4.8")
conflicts("%clang@:3.5")
#
# patch
#
# float80 is enable only x86_64
patch('float80.patch')
#
# Dependencies
#
depends_on("cmake@3:", type="build")
#
# Settings and cmake cache
#
def cmake_args(self):
spec = self.spec
args = [
"-DAUTOCHECKOUT_MISSING_REPOS=OFF",
#
"-DBUILD_SHARED_LIBS=" + str("+shared" in spec),
# TODO Hard-disable tests for now, since rapidcheck not in Spack
"-DKRIMS_ENABLE_TESTS=OFF",
"-DKRIMS_ENABLE_EXAMPLES=" + str("+examples" in spec),
]
return args
|
en
| 0.712513
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) The bucket of Krimskrams every C or C++ project needs # # Versions # # # Variants # # Library build type # Components # # Conflicts # # Only builds on clang > 3.5 and gcc > 4.8 # # patch # # float80 is enable only x86_64 # # Dependencies # # # Settings and cmake cache # # # TODO Hard-disable tests for now, since rapidcheck not in Spack
| 1.48622
| 1
|
carlo/restrictions.py
|
ahitrin/carlo
| 0
|
6629593
|
<reponame>ahitrin/carlo
def eq(first, second):
return (first, second)
def ge(first, second):
pass
def cardinal_many(*args):
pass
|
def eq(first, second):
return (first, second)
def ge(first, second):
pass
def cardinal_many(*args):
pass
|
none
| 1
| 1.993688
| 2
|
|
esphome/cpp_helpers.py
|
OttoWinter/esphomeyaml
| 249
|
6629594
|
import logging
from esphome.const import (
CONF_DISABLED_BY_DEFAULT,
CONF_ENTITY_CATEGORY,
CONF_ICON,
CONF_INTERNAL,
CONF_NAME,
CONF_SETUP_PRIORITY,
CONF_UPDATE_INTERVAL,
CONF_TYPE_ID,
)
# pylint: disable=unused-import
from esphome.core import coroutine, ID, CORE
from esphome.types import ConfigType
from esphome.cpp_generator import add, get_variable
from esphome.cpp_types import App
from esphome.util import Registry, RegistryEntry
_LOGGER = logging.getLogger(__name__)
async def gpio_pin_expression(conf):
"""Generate an expression for the given pin option.
This is a coroutine, you must await it with a 'await' expression!
"""
if conf is None:
return None
from esphome import pins
for key, (func, _) in pins.PIN_SCHEMA_REGISTRY.items():
if key in conf:
return await coroutine(func)(conf)
return await coroutine(pins.PIN_SCHEMA_REGISTRY[CORE.target_platform][0])(conf)
async def register_component(var, config):
"""Register the given obj as a component.
This is a coroutine, you must await it with a 'await' expression!
:param var: The variable representing the component.
:param config: The configuration for the component.
"""
import inspect
id_ = str(var.base)
if id_ not in CORE.component_ids:
raise ValueError(
f"Component ID {id_} was not declared to inherit from Component, or was registered twice. Please create a bug report with your configuration."
)
CORE.component_ids.remove(id_)
if CONF_SETUP_PRIORITY in config:
add(var.set_setup_priority(config[CONF_SETUP_PRIORITY]))
if CONF_UPDATE_INTERVAL in config:
add(var.set_update_interval(config[CONF_UPDATE_INTERVAL]))
# Set component source by inspecting the stack and getting the callee module
# https://stackoverflow.com/a/1095621
name = None
try:
for frm in inspect.stack()[1:]:
mod = inspect.getmodule(frm[0])
if mod is None:
continue
name = mod.__name__
if name.startswith("esphome.components."):
name = name[len("esphome.components.") :]
break
if name == "esphome.automation":
name = "automation"
# continue looking further up in stack in case we find a better one
if name == "esphome.coroutine":
# Only works for async-await coroutine syntax
break
except (KeyError, AttributeError, IndexError) as e:
_LOGGER.warning(
"Error while finding name of component, please report this", exc_info=e
)
if name is not None:
add(var.set_component_source(name))
add(App.register_component(var))
return var
async def register_parented(var, value):
if isinstance(value, ID):
paren = await get_variable(value)
else:
paren = value
add(var.set_parent(paren))
async def setup_entity(var, config):
"""Set up generic properties of an Entity"""
add(var.set_name(config[CONF_NAME]))
add(var.set_disabled_by_default(config[CONF_DISABLED_BY_DEFAULT]))
if CONF_INTERNAL in config:
add(var.set_internal(config[CONF_INTERNAL]))
if CONF_ICON in config:
add(var.set_icon(config[CONF_ICON]))
if CONF_ENTITY_CATEGORY in config:
add(var.set_entity_category(config[CONF_ENTITY_CATEGORY]))
def extract_registry_entry_config(registry, full_config):
# type: (Registry, ConfigType) -> RegistryEntry
key, config = next((k, v) for k, v in full_config.items() if k in registry)
return registry[key], config
async def build_registry_entry(registry, full_config):
registry_entry, config = extract_registry_entry_config(registry, full_config)
type_id = full_config[CONF_TYPE_ID]
builder = registry_entry.coroutine_fun
return await builder(config, type_id)
async def build_registry_list(registry, config):
actions = []
for conf in config:
action = await build_registry_entry(registry, conf)
actions.append(action)
return actions
|
import logging
from esphome.const import (
CONF_DISABLED_BY_DEFAULT,
CONF_ENTITY_CATEGORY,
CONF_ICON,
CONF_INTERNAL,
CONF_NAME,
CONF_SETUP_PRIORITY,
CONF_UPDATE_INTERVAL,
CONF_TYPE_ID,
)
# pylint: disable=unused-import
from esphome.core import coroutine, ID, CORE
from esphome.types import ConfigType
from esphome.cpp_generator import add, get_variable
from esphome.cpp_types import App
from esphome.util import Registry, RegistryEntry
_LOGGER = logging.getLogger(__name__)
async def gpio_pin_expression(conf):
"""Generate an expression for the given pin option.
This is a coroutine, you must await it with a 'await' expression!
"""
if conf is None:
return None
from esphome import pins
for key, (func, _) in pins.PIN_SCHEMA_REGISTRY.items():
if key in conf:
return await coroutine(func)(conf)
return await coroutine(pins.PIN_SCHEMA_REGISTRY[CORE.target_platform][0])(conf)
async def register_component(var, config):
"""Register the given obj as a component.
This is a coroutine, you must await it with a 'await' expression!
:param var: The variable representing the component.
:param config: The configuration for the component.
"""
import inspect
id_ = str(var.base)
if id_ not in CORE.component_ids:
raise ValueError(
f"Component ID {id_} was not declared to inherit from Component, or was registered twice. Please create a bug report with your configuration."
)
CORE.component_ids.remove(id_)
if CONF_SETUP_PRIORITY in config:
add(var.set_setup_priority(config[CONF_SETUP_PRIORITY]))
if CONF_UPDATE_INTERVAL in config:
add(var.set_update_interval(config[CONF_UPDATE_INTERVAL]))
# Set component source by inspecting the stack and getting the callee module
# https://stackoverflow.com/a/1095621
name = None
try:
for frm in inspect.stack()[1:]:
mod = inspect.getmodule(frm[0])
if mod is None:
continue
name = mod.__name__
if name.startswith("esphome.components."):
name = name[len("esphome.components.") :]
break
if name == "esphome.automation":
name = "automation"
# continue looking further up in stack in case we find a better one
if name == "esphome.coroutine":
# Only works for async-await coroutine syntax
break
except (KeyError, AttributeError, IndexError) as e:
_LOGGER.warning(
"Error while finding name of component, please report this", exc_info=e
)
if name is not None:
add(var.set_component_source(name))
add(App.register_component(var))
return var
async def register_parented(var, value):
if isinstance(value, ID):
paren = await get_variable(value)
else:
paren = value
add(var.set_parent(paren))
async def setup_entity(var, config):
"""Set up generic properties of an Entity"""
add(var.set_name(config[CONF_NAME]))
add(var.set_disabled_by_default(config[CONF_DISABLED_BY_DEFAULT]))
if CONF_INTERNAL in config:
add(var.set_internal(config[CONF_INTERNAL]))
if CONF_ICON in config:
add(var.set_icon(config[CONF_ICON]))
if CONF_ENTITY_CATEGORY in config:
add(var.set_entity_category(config[CONF_ENTITY_CATEGORY]))
def extract_registry_entry_config(registry, full_config):
# type: (Registry, ConfigType) -> RegistryEntry
key, config = next((k, v) for k, v in full_config.items() if k in registry)
return registry[key], config
async def build_registry_entry(registry, full_config):
registry_entry, config = extract_registry_entry_config(registry, full_config)
type_id = full_config[CONF_TYPE_ID]
builder = registry_entry.coroutine_fun
return await builder(config, type_id)
async def build_registry_list(registry, config):
actions = []
for conf in config:
action = await build_registry_entry(registry, conf)
actions.append(action)
return actions
|
en
| 0.756162
|
# pylint: disable=unused-import Generate an expression for the given pin option. This is a coroutine, you must await it with a 'await' expression! Register the given obj as a component. This is a coroutine, you must await it with a 'await' expression! :param var: The variable representing the component. :param config: The configuration for the component. # Set component source by inspecting the stack and getting the callee module # https://stackoverflow.com/a/1095621 # continue looking further up in stack in case we find a better one # Only works for async-await coroutine syntax Set up generic properties of an Entity # type: (Registry, ConfigType) -> RegistryEntry
| 1.995244
| 2
|
survey_bot/bot_admin/migrations/0003_auto_20211019_0858.py
|
eisichenko/Survey-Service
| 0
|
6629595
|
# Generated by Django 3.2.7 on 2021-10-19 08:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bot_admin', '0002_telegrampoll_options_text'),
]
operations = [
migrations.AlterField(
model_name='student',
name='group',
field=models.TextField(max_length=100, verbose_name='Group'),
),
migrations.AlterField(
model_name='student',
name='real_name',
field=models.TextField(max_length=100, verbose_name='Real name'),
),
migrations.AlterField(
model_name='telegrammessage',
name='answer',
field=models.TextField(max_length=4096, null=True, verbose_name='Question answer'),
),
migrations.AlterField(
model_name='telegrammessage',
name='text',
field=models.TextField(max_length=4096, verbose_name='Message text'),
),
migrations.AlterField(
model_name='telegrampoll',
name='question',
field=models.TextField(max_length=300, verbose_name='Poll question'),
),
]
|
# Generated by Django 3.2.7 on 2021-10-19 08:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bot_admin', '0002_telegrampoll_options_text'),
]
operations = [
migrations.AlterField(
model_name='student',
name='group',
field=models.TextField(max_length=100, verbose_name='Group'),
),
migrations.AlterField(
model_name='student',
name='real_name',
field=models.TextField(max_length=100, verbose_name='Real name'),
),
migrations.AlterField(
model_name='telegrammessage',
name='answer',
field=models.TextField(max_length=4096, null=True, verbose_name='Question answer'),
),
migrations.AlterField(
model_name='telegrammessage',
name='text',
field=models.TextField(max_length=4096, verbose_name='Message text'),
),
migrations.AlterField(
model_name='telegrampoll',
name='question',
field=models.TextField(max_length=300, verbose_name='Poll question'),
),
]
|
en
| 0.856759
|
# Generated by Django 3.2.7 on 2021-10-19 08:58
| 1.62802
| 2
|
maquinaria/alquileres/views/__init__.py
|
CFredy9/Maquinaria
| 0
|
6629596
|
from .alquileres import *
|
from .alquileres import *
|
none
| 1
| 1.266523
| 1
|
|
src/odinapi/views/read_smiles.py
|
Odin-SMR/odin-api
| 0
|
6629597
|
from datetime import datetime
from dateutil.relativedelta import relativedelta
from h5py import File
import numpy as np
def read_smiles_file(
file, date, species, file_index,
smiles_basepath_pattern='/vds-data/ISS_SMILES_Level2/{0}/v2.4',
):
file_index = int(file_index)
smiles_datapath = smiles_basepath_pattern.format(species)
year = date[0:4]
month = date[5:7]
smiles_datapath = "{0}/{1}/{2}/".format(smiles_datapath, year, month)
ifile = smiles_datapath + file
data = dict()
data_fields = dict()
geolocation_fields = dict()
with File(ifile, 'r') as f:
fdata = f['HDFEOS']['SWATHS'][species]
for key in fdata['Data Fields']:
data_fields[key] = np.array(fdata['Data Fields'][key])
for key in fdata['Geolocation Fields']:
geolocation_fields[key] = np.array(
fdata['Geolocation Fields'][key])
# transform the mls date to MJD and add to dict
mjd = []
smiles_date0 = datetime(1958, 1, 1)
for time_i in geolocation_fields['Time']:
date_i = smiles_date0 + relativedelta(seconds=time_i)
mjd_i = date_i - datetime(1858, 11, 17)
sec_per_day = 24*60*60.0
mjd.append(mjd_i.total_seconds() / sec_per_day)
geolocation_fields['MJD'] = np.array(mjd)
data['data_fields'] = data_fields
data['geolocation_fields'] = geolocation_fields
# select data from the given index
for key in data['data_fields']:
data['data_fields'][key] = data[
'data_fields'][key][file_index].tolist()
geoloc = data['geolocation_fields']
for key in geoloc:
if key not in ['Altitude']:
try:
geoloc[key] = geoloc[key][file_index].tolist()
except AttributeError:
geoloc[key] = geoloc[key][file_index]
if isinstance(geoloc[key], bytes):
geoloc[key] = geoloc[key].decode()
else:
geoloc[key] = geoloc[key].tolist()
return data
|
from datetime import datetime
from dateutil.relativedelta import relativedelta
from h5py import File
import numpy as np
def read_smiles_file(
file, date, species, file_index,
smiles_basepath_pattern='/vds-data/ISS_SMILES_Level2/{0}/v2.4',
):
file_index = int(file_index)
smiles_datapath = smiles_basepath_pattern.format(species)
year = date[0:4]
month = date[5:7]
smiles_datapath = "{0}/{1}/{2}/".format(smiles_datapath, year, month)
ifile = smiles_datapath + file
data = dict()
data_fields = dict()
geolocation_fields = dict()
with File(ifile, 'r') as f:
fdata = f['HDFEOS']['SWATHS'][species]
for key in fdata['Data Fields']:
data_fields[key] = np.array(fdata['Data Fields'][key])
for key in fdata['Geolocation Fields']:
geolocation_fields[key] = np.array(
fdata['Geolocation Fields'][key])
# transform the mls date to MJD and add to dict
mjd = []
smiles_date0 = datetime(1958, 1, 1)
for time_i in geolocation_fields['Time']:
date_i = smiles_date0 + relativedelta(seconds=time_i)
mjd_i = date_i - datetime(1858, 11, 17)
sec_per_day = 24*60*60.0
mjd.append(mjd_i.total_seconds() / sec_per_day)
geolocation_fields['MJD'] = np.array(mjd)
data['data_fields'] = data_fields
data['geolocation_fields'] = geolocation_fields
# select data from the given index
for key in data['data_fields']:
data['data_fields'][key] = data[
'data_fields'][key][file_index].tolist()
geoloc = data['geolocation_fields']
for key in geoloc:
if key not in ['Altitude']:
try:
geoloc[key] = geoloc[key][file_index].tolist()
except AttributeError:
geoloc[key] = geoloc[key][file_index]
if isinstance(geoloc[key], bytes):
geoloc[key] = geoloc[key].decode()
else:
geoloc[key] = geoloc[key].tolist()
return data
|
en
| 0.771378
|
# transform the mls date to MJD and add to dict # select data from the given index
| 2.301073
| 2
|
bin/mkjsonapi.py
|
bd4/monster-hunter-scripts
| 2
|
6629598
|
<reponame>bd4/monster-hunter-scripts
#!/usr/bin/env python3
import os
import json
import sys
import errno
import urllib.request, urllib.parse, urllib.error
import argparse
import _pathfix
from mhapi.db import MHDB
from mhapi import model
ENTITIES = """item weapon monster armor
skilltree skill decoration
horn_melody wyporium""".split()
def parse_args(argv=None):
parser = argparse.ArgumentParser(description=
"Create static JSON files that mimic a REST API for monster hunter data"
)
parser.add_argument("-o", "--outpath",
help="output base directory, defaults to web/jsonapi/"
" in project root")
parser.add_argument("-g", "--game", help="game, one of 4u, gu, gen")
parser.add_argument("entities", nargs="*",
help=", ".join(ENTITIES))
return parser.parse_args(argv)
def mkdirs_p(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
SAFE_CHARS = " &'+\""
def file_path(path, model_object, alt_name_field=None):
if alt_name_field:
key = urllib.parse.quote(model_object[alt_name_field].encode("utf8"),
SAFE_CHARS)
else:
key = str(model_object.id)
return os.path.join(path, "%s.json" % key)
def write_list_file(path, model_list):
list_path = os.path.join(path, "_list.json")
with open(list_path, "w") as f:
json.dump([o.as_list_data() for o in model_list],
f, cls=model.ModelJSONEncoder, indent=2)
def write_index_file(path, indexes):
for key, data in indexes.items():
index_path = os.path.join(path, "_index_%s.json" % key)
with open(index_path, "w") as f:
json.dump(data, f, cls=model.ModelJSONEncoder, indent=2)
def write_all_file(path, all_data):
all_path = os.path.join(path, "_all.json")
with open(all_path, "w") as f:
json.dump(all_data, f, cls=model.ModelJSONEncoder, indent=2)
def write_map_file(path, map_data):
map_path = os.path.join(path, "_map.json")
with open(map_path, "w") as f:
json.dump(map_data, f, cls=model.ModelJSONEncoder, indent=2)
def monster_json(db, path):
monsters = db.get_monsters()
mkdirs_p(path)
write_list_file(path, monsters)
indexes = {}
for m in monsters:
monster_path = file_path(path, m)
m.update_indexes(indexes)
data = m.as_data()
damage = db.get_monster_damage(m.id)
damage.set_breakable(db.get_monster_breaks(m.id))
data["damage"] = damage.as_data()
with open(monster_path, "w") as f:
json.dump(data, f, cls=model.ModelJSONEncoder, indent=2)
write_index_file(path, indexes)
def armor_json(db, path):
armors = db.get_armors()
mkdirs_p(path)
write_list_file(path, armors)
all_data = []
indexes = {}
for a in armors:
armor_path = file_path(path, a)
a.update_indexes(indexes)
skills = db.get_item_skills(a.id)
if not skills:
print("WARN: armor '%s' (%d) has no skills" % (a.name, a.id))
a.set_skills(skills)
all_data.append(a.as_data())
with open(armor_path, "w") as f:
a.json_dump(f)
write_index_file(path, indexes)
write_all_file(path, all_data)
def decoration_json(db, path):
decorations = db.get_decorations()
mkdirs_p(path)
write_list_file(path, decorations)
all_data = []
indexes = {}
for a in decorations:
decoration_path = file_path(path, a)
a.update_indexes(indexes)
skills = db.get_item_skills(a.id)
if not skills:
print("WARN: decoration '%s' (%d) has no skills" % (a.name, a.id))
a.set_skills(skills)
all_data.append(a.as_data())
with open(decoration_path, "w") as f:
a.json_dump(f)
write_index_file(path, indexes)
write_all_file(path, all_data)
def skill_json(db, path):
skills = db.get_skills()
mkdirs_p(path)
write_list_file(path, skills)
indexes = {}
for s in skills:
s.update_indexes(indexes)
skill_path = file_path(path, s)
with open(skill_path, "w") as f:
s.json_dump(f)
write_index_file(path, indexes)
def skilltree_json(db, path):
skill_trees = db.get_skill_trees()
mkdirs_p(path)
write_list_file(path, skill_trees)
all_data = {}
for st in skill_trees:
ds = db.get_decorations_by_skills([st.id])
for d in ds:
d.set_skills(db.get_item_skills(d.id))
st.set_decorations(ds)
skilltree_path = file_path(path, st)
all_data[st.name] = st
with open(skilltree_path, "w") as f:
st.json_dump(f)
write_all_file(path, all_data)
def weapon_json(db, path):
weapons = db.get_weapons()
mkdirs_p(path)
write_list_file(path, weapons)
item_stars = model.ItemStars(db)
all_data = []
melodies = {}
indexes = {}
for w in weapons:
weapon_path = file_path(path, w)
w.update_indexes(indexes)
data = w.as_data()
child_weapons = db.get_weapons_by_parent(w.id)
data["children"] = [dict(id=c.id, name=c.name) for c in child_weapons]
if w.horn_notes:
if w.horn_notes not in melodies:
melodies[w.horn_notes] = [
dict(song=melody.song, effect1=melody.effect1)
for melody in db.get_horn_melodies_by_notes(w.horn_notes)
]
data["horn_melodies"] = melodies[w.horn_notes]
stars = item_stars.get_weapon_stars(w)
data["village_stars"] = stars["Village"]
data["guild_stars"] = stars["Guild"]
data["permit_stars"] = stars["Permit"]
data["arena_stars"] = stars["Arena"]
all_data.append(data)
with open(weapon_path, "w") as f:
json.dump(data, f, cls=model.ModelJSONEncoder, indent=2)
tree_path = os.path.join(path, "%s_tree.json" % w.id)
costs = model.get_costs(db, w)
for cost in costs:
cost["path"] = [dict(name=w.name, id=w.id)
for w in cost["path"]]
with open(tree_path, "w") as f:
json.dump(costs, f, cls=model.ModelJSONEncoder, indent=2)
write_index_file(path, indexes)
write_all_file(path, all_data)
def item_json(db, path):
if db.game == "4u":
items = db.get_items(wyporium=True)
else:
items = db.get_items()
mkdirs_p(path)
write_list_file(path, items)
indexes = {}
for item in items:
item_path = file_path(path, item)
item.update_indexes(indexes)
with open(item_path, "w") as f:
item.json_dump(f)
write_index_file(path, indexes)
def wyporium_json(db, path):
trade_map = {}
for item in db.get_wyporium_trades():
trade_map[item.id] = dict(id=item.id,
name=item.name)
all_data = item.as_data()
for k in all_data.keys():
if not k.startswith("wyporium"):
continue
trade_map[item.id][k] = all_data[k]
print(trade_map)
mkdirs_p(path)
write_map_file(path, trade_map)
def horn_melody_json(db, path):
# only 143 rows, just do index with all data
melodies = db.get_horn_melodies()
mkdirs_p(path)
indexes = {}
for melody in melodies:
melody.update_indexes(indexes)
write_index_file(path, indexes)
def main():
args = parse_args()
db = MHDB(game=args.game, include_item_components=True)
if not args.outpath:
args.outpath = os.path.join(_pathfix.web_path, "jsonapi")
if args.entities:
for entity in args.entities:
if entity not in ENTITIES:
print("Unknown entity: %s" % entity)
sys.exit(1)
else:
args.entities = ENTITIES
if db.game != "4u":
args.entities.remove("wyporium")
for entity in args.entities:
fn = globals()["%s_json" % entity]
fn(db, os.path.join(args.outpath, entity))
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
import os
import json
import sys
import errno
import urllib.request, urllib.parse, urllib.error
import argparse
import _pathfix
from mhapi.db import MHDB
from mhapi import model
ENTITIES = """item weapon monster armor
skilltree skill decoration
horn_melody wyporium""".split()
def parse_args(argv=None):
parser = argparse.ArgumentParser(description=
"Create static JSON files that mimic a REST API for monster hunter data"
)
parser.add_argument("-o", "--outpath",
help="output base directory, defaults to web/jsonapi/"
" in project root")
parser.add_argument("-g", "--game", help="game, one of 4u, gu, gen")
parser.add_argument("entities", nargs="*",
help=", ".join(ENTITIES))
return parser.parse_args(argv)
def mkdirs_p(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
SAFE_CHARS = " &'+\""
def file_path(path, model_object, alt_name_field=None):
if alt_name_field:
key = urllib.parse.quote(model_object[alt_name_field].encode("utf8"),
SAFE_CHARS)
else:
key = str(model_object.id)
return os.path.join(path, "%s.json" % key)
def write_list_file(path, model_list):
list_path = os.path.join(path, "_list.json")
with open(list_path, "w") as f:
json.dump([o.as_list_data() for o in model_list],
f, cls=model.ModelJSONEncoder, indent=2)
def write_index_file(path, indexes):
for key, data in indexes.items():
index_path = os.path.join(path, "_index_%s.json" % key)
with open(index_path, "w") as f:
json.dump(data, f, cls=model.ModelJSONEncoder, indent=2)
def write_all_file(path, all_data):
all_path = os.path.join(path, "_all.json")
with open(all_path, "w") as f:
json.dump(all_data, f, cls=model.ModelJSONEncoder, indent=2)
def write_map_file(path, map_data):
map_path = os.path.join(path, "_map.json")
with open(map_path, "w") as f:
json.dump(map_data, f, cls=model.ModelJSONEncoder, indent=2)
def monster_json(db, path):
monsters = db.get_monsters()
mkdirs_p(path)
write_list_file(path, monsters)
indexes = {}
for m in monsters:
monster_path = file_path(path, m)
m.update_indexes(indexes)
data = m.as_data()
damage = db.get_monster_damage(m.id)
damage.set_breakable(db.get_monster_breaks(m.id))
data["damage"] = damage.as_data()
with open(monster_path, "w") as f:
json.dump(data, f, cls=model.ModelJSONEncoder, indent=2)
write_index_file(path, indexes)
def armor_json(db, path):
armors = db.get_armors()
mkdirs_p(path)
write_list_file(path, armors)
all_data = []
indexes = {}
for a in armors:
armor_path = file_path(path, a)
a.update_indexes(indexes)
skills = db.get_item_skills(a.id)
if not skills:
print("WARN: armor '%s' (%d) has no skills" % (a.name, a.id))
a.set_skills(skills)
all_data.append(a.as_data())
with open(armor_path, "w") as f:
a.json_dump(f)
write_index_file(path, indexes)
write_all_file(path, all_data)
def decoration_json(db, path):
decorations = db.get_decorations()
mkdirs_p(path)
write_list_file(path, decorations)
all_data = []
indexes = {}
for a in decorations:
decoration_path = file_path(path, a)
a.update_indexes(indexes)
skills = db.get_item_skills(a.id)
if not skills:
print("WARN: decoration '%s' (%d) has no skills" % (a.name, a.id))
a.set_skills(skills)
all_data.append(a.as_data())
with open(decoration_path, "w") as f:
a.json_dump(f)
write_index_file(path, indexes)
write_all_file(path, all_data)
def skill_json(db, path):
skills = db.get_skills()
mkdirs_p(path)
write_list_file(path, skills)
indexes = {}
for s in skills:
s.update_indexes(indexes)
skill_path = file_path(path, s)
with open(skill_path, "w") as f:
s.json_dump(f)
write_index_file(path, indexes)
def skilltree_json(db, path):
skill_trees = db.get_skill_trees()
mkdirs_p(path)
write_list_file(path, skill_trees)
all_data = {}
for st in skill_trees:
ds = db.get_decorations_by_skills([st.id])
for d in ds:
d.set_skills(db.get_item_skills(d.id))
st.set_decorations(ds)
skilltree_path = file_path(path, st)
all_data[st.name] = st
with open(skilltree_path, "w") as f:
st.json_dump(f)
write_all_file(path, all_data)
def weapon_json(db, path):
weapons = db.get_weapons()
mkdirs_p(path)
write_list_file(path, weapons)
item_stars = model.ItemStars(db)
all_data = []
melodies = {}
indexes = {}
for w in weapons:
weapon_path = file_path(path, w)
w.update_indexes(indexes)
data = w.as_data()
child_weapons = db.get_weapons_by_parent(w.id)
data["children"] = [dict(id=c.id, name=c.name) for c in child_weapons]
if w.horn_notes:
if w.horn_notes not in melodies:
melodies[w.horn_notes] = [
dict(song=melody.song, effect1=melody.effect1)
for melody in db.get_horn_melodies_by_notes(w.horn_notes)
]
data["horn_melodies"] = melodies[w.horn_notes]
stars = item_stars.get_weapon_stars(w)
data["village_stars"] = stars["Village"]
data["guild_stars"] = stars["Guild"]
data["permit_stars"] = stars["Permit"]
data["arena_stars"] = stars["Arena"]
all_data.append(data)
with open(weapon_path, "w") as f:
json.dump(data, f, cls=model.ModelJSONEncoder, indent=2)
tree_path = os.path.join(path, "%s_tree.json" % w.id)
costs = model.get_costs(db, w)
for cost in costs:
cost["path"] = [dict(name=w.name, id=w.id)
for w in cost["path"]]
with open(tree_path, "w") as f:
json.dump(costs, f, cls=model.ModelJSONEncoder, indent=2)
write_index_file(path, indexes)
write_all_file(path, all_data)
def item_json(db, path):
if db.game == "4u":
items = db.get_items(wyporium=True)
else:
items = db.get_items()
mkdirs_p(path)
write_list_file(path, items)
indexes = {}
for item in items:
item_path = file_path(path, item)
item.update_indexes(indexes)
with open(item_path, "w") as f:
item.json_dump(f)
write_index_file(path, indexes)
def wyporium_json(db, path):
trade_map = {}
for item in db.get_wyporium_trades():
trade_map[item.id] = dict(id=item.id,
name=item.name)
all_data = item.as_data()
for k in all_data.keys():
if not k.startswith("wyporium"):
continue
trade_map[item.id][k] = all_data[k]
print(trade_map)
mkdirs_p(path)
write_map_file(path, trade_map)
def horn_melody_json(db, path):
# only 143 rows, just do index with all data
melodies = db.get_horn_melodies()
mkdirs_p(path)
indexes = {}
for melody in melodies:
melody.update_indexes(indexes)
write_index_file(path, indexes)
def main():
args = parse_args()
db = MHDB(game=args.game, include_item_components=True)
if not args.outpath:
args.outpath = os.path.join(_pathfix.web_path, "jsonapi")
if args.entities:
for entity in args.entities:
if entity not in ENTITIES:
print("Unknown entity: %s" % entity)
sys.exit(1)
else:
args.entities = ENTITIES
if db.game != "4u":
args.entities.remove("wyporium")
for entity in args.entities:
fn = globals()["%s_json" % entity]
fn(db, os.path.join(args.outpath, entity))
if __name__ == '__main__':
main()
|
en
| 0.39181
|
#!/usr/bin/env python3 item weapon monster armor skilltree skill decoration horn_melody wyporium # only 143 rows, just do index with all data
| 2.680153
| 3
|
test_pe_core/test_compute_unit.py
|
zbelateche/ee272_cgra
| 1
|
6629599
|
import glob
import pytest
import pe
import fault
import os
from random import randint
import inspect
from pe_core import pe_core_genesis2
import glob
import itertools
import magma as m
# Generate the PE
pe_core = pe_core_genesis2.pe_core_wrapper.generator()()
pe_compute_unit = m.DefineFromVerilogFile(
'genesis_verif/test_pe_comp_unq1.sv')[0]
_tester = fault.Tester(pe_compute_unit)
_tester.compile(target='verilator', directory="test_pe_core/build",
include_directories=["../../genesis_verif"],
magma_output="verilog",
flags=['-Wno-fatal'])
@pytest.fixture
def tester(scope="module"):
return _tester
def teardown_module():
# Cleanup PE genesis2 collateral
for item in glob.glob('genesis_*'):
os.system(f"rm -r {item}")
os.system(f"rm PEtest_pe")
os.system(f"rm PECOMPtest_pe_comp_unq1")
os.system(f"rm REGMODEtest_opt_reg")
os.system(f"rm REGMODEtest_opt_reg_file")
ops, signed_ops = [], []
for name, op in inspect.getmembers(pe, inspect.isfunction):
signature = inspect.signature(op)
if "signed" in signature.parameters:
signed_ops.append(name)
else:
ops.append(name)
def pytest_generate_tests(metafunc):
if 'op' in metafunc.fixturenames:
metafunc.parametrize("op", ops)
if 'signed_op' in metafunc.fixturenames:
metafunc.parametrize("signed_op", signed_ops)
metafunc.parametrize("signed", [True, False])
if 'const_value' in metafunc.fixturenames:
metafunc.parametrize("const_value", range(16))
if 'strategy' in metafunc.fixturenames:
metafunc.parametrize("strategy", ["complete", "random"])
build_dir = "test_pe_core/build"
def run_test(functional_model, strategy, tester, signed):
tester.clear()
pe_compute_unit = tester.circuit
N = 4
_iter = None
if strategy == "complete":
_iter = itertools.product(range(0, N), range(0, N), range(0, 2))
elif strategy == "random":
n = 256
_iter = [
(randint(0, (1 << N) - 1), randint(0, (1 << N) - 1), randint(0, 1))
for _ in range(n)
]
tester.poke(pe_compute_unit.op_code, functional_model.instruction)
for op_a, op_b, op_d_p in _iter:
tester.poke(pe_compute_unit.op_a, op_a)
tester.poke(pe_compute_unit.op_b, op_b)
tester.poke(pe_compute_unit.op_d_p, op_d_p)
tester.eval()
res, res_p = functional_model._alu(op_a=op_a, op_b=op_b, op_d_p=op_d_p)
tester.expect(pe_compute_unit.res, res)
tester.expect(pe_compute_unit.res_p, res_p)
tester.eval()
tester.run(target='verilator')
def test_op(op, strategy, tester):
functional_model = getattr(pe, op)()
run_test(functional_model, strategy, tester, False)
def test_signed_op(signed_op, signed, strategy, tester):
functional_model = getattr(pe, signed_op)(signed)
run_test(functional_model, strategy, tester, signed)
|
import glob
import pytest
import pe
import fault
import os
from random import randint
import inspect
from pe_core import pe_core_genesis2
import glob
import itertools
import magma as m
# Generate the PE
pe_core = pe_core_genesis2.pe_core_wrapper.generator()()
pe_compute_unit = m.DefineFromVerilogFile(
'genesis_verif/test_pe_comp_unq1.sv')[0]
_tester = fault.Tester(pe_compute_unit)
_tester.compile(target='verilator', directory="test_pe_core/build",
include_directories=["../../genesis_verif"],
magma_output="verilog",
flags=['-Wno-fatal'])
@pytest.fixture
def tester(scope="module"):
return _tester
def teardown_module():
# Cleanup PE genesis2 collateral
for item in glob.glob('genesis_*'):
os.system(f"rm -r {item}")
os.system(f"rm PEtest_pe")
os.system(f"rm PECOMPtest_pe_comp_unq1")
os.system(f"rm REGMODEtest_opt_reg")
os.system(f"rm REGMODEtest_opt_reg_file")
ops, signed_ops = [], []
for name, op in inspect.getmembers(pe, inspect.isfunction):
signature = inspect.signature(op)
if "signed" in signature.parameters:
signed_ops.append(name)
else:
ops.append(name)
def pytest_generate_tests(metafunc):
if 'op' in metafunc.fixturenames:
metafunc.parametrize("op", ops)
if 'signed_op' in metafunc.fixturenames:
metafunc.parametrize("signed_op", signed_ops)
metafunc.parametrize("signed", [True, False])
if 'const_value' in metafunc.fixturenames:
metafunc.parametrize("const_value", range(16))
if 'strategy' in metafunc.fixturenames:
metafunc.parametrize("strategy", ["complete", "random"])
build_dir = "test_pe_core/build"
def run_test(functional_model, strategy, tester, signed):
tester.clear()
pe_compute_unit = tester.circuit
N = 4
_iter = None
if strategy == "complete":
_iter = itertools.product(range(0, N), range(0, N), range(0, 2))
elif strategy == "random":
n = 256
_iter = [
(randint(0, (1 << N) - 1), randint(0, (1 << N) - 1), randint(0, 1))
for _ in range(n)
]
tester.poke(pe_compute_unit.op_code, functional_model.instruction)
for op_a, op_b, op_d_p in _iter:
tester.poke(pe_compute_unit.op_a, op_a)
tester.poke(pe_compute_unit.op_b, op_b)
tester.poke(pe_compute_unit.op_d_p, op_d_p)
tester.eval()
res, res_p = functional_model._alu(op_a=op_a, op_b=op_b, op_d_p=op_d_p)
tester.expect(pe_compute_unit.res, res)
tester.expect(pe_compute_unit.res_p, res_p)
tester.eval()
tester.run(target='verilator')
def test_op(op, strategy, tester):
functional_model = getattr(pe, op)()
run_test(functional_model, strategy, tester, False)
def test_signed_op(signed_op, signed, strategy, tester):
functional_model = getattr(pe, signed_op)(signed)
run_test(functional_model, strategy, tester, signed)
|
en
| 0.327374
|
# Generate the PE # Cleanup PE genesis2 collateral
| 1.87233
| 2
|
src/cfnlint/rules/resources/properties/VpcId.py
|
LukasMusebrink/cfn-python-lint
| 0
|
6629600
|
<filename>src/cfnlint/rules/resources/properties/VpcId.py
"""
Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from cfnlint import CloudFormationLintRule
from cfnlint import RuleMatch
class VpcId(CloudFormationLintRule):
"""Check if VPC Parameters are of correct type"""
id = 'W2505'
shortdesc = 'Check if VpcID Parameters have the correct type'
description = 'See if there are any refs for VpcId to a parameter ' + \
'of innapropriate type. Appropriate Types are ' + \
'[AWS::EC2::VPC::Id, AWS::SSM::Parameter::Value<AWS::EC2::VPC::Id>]'
tags = ['base', 'parameters', 'vpcid']
def match(self, cfn):
"""Check CloudFormation VpcId Parameters"""
matches = list()
# Build the list of refs
trees = cfn.search_deep_keys('VpcId')
parameters = cfn.get_parameter_names()
allowed_types = [
'AWS::EC2::VPC::Id',
'AWS::SSM::Parameter::Value<AWS::EC2::VPC::Id>'
]
fix_param_types = set()
trees = [x for x in trees if x[0] == 'Resources']
for tree in trees:
obj = tree[-1]
if isinstance(obj, dict):
if len(obj) == 1:
for key in obj:
if key == 'Ref':
paramname = obj[key]
if paramname in parameters:
param = cfn.template['Parameters'][paramname]
if 'Type' in param:
paramtype = param['Type']
if paramtype not in allowed_types:
fix_param_types.add(paramname)
else:
message = 'Innappropriate map found for vpcid on %s' % (
'/'.join(map(str, tree[:-1])))
matches.append(RuleMatch(tree[:-1], message))
for paramname in fix_param_types:
message = 'Parameter %s should be of type %s' % (paramname, ', '.join(map(str, allowed_types)))
tree = ['Parameters', paramname]
matches.append(RuleMatch(tree, message))
return matches
|
<filename>src/cfnlint/rules/resources/properties/VpcId.py
"""
Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from cfnlint import CloudFormationLintRule
from cfnlint import RuleMatch
class VpcId(CloudFormationLintRule):
"""Check if VPC Parameters are of correct type"""
id = 'W2505'
shortdesc = 'Check if VpcID Parameters have the correct type'
description = 'See if there are any refs for VpcId to a parameter ' + \
'of innapropriate type. Appropriate Types are ' + \
'[AWS::EC2::VPC::Id, AWS::SSM::Parameter::Value<AWS::EC2::VPC::Id>]'
tags = ['base', 'parameters', 'vpcid']
def match(self, cfn):
"""Check CloudFormation VpcId Parameters"""
matches = list()
# Build the list of refs
trees = cfn.search_deep_keys('VpcId')
parameters = cfn.get_parameter_names()
allowed_types = [
'AWS::EC2::VPC::Id',
'AWS::SSM::Parameter::Value<AWS::EC2::VPC::Id>'
]
fix_param_types = set()
trees = [x for x in trees if x[0] == 'Resources']
for tree in trees:
obj = tree[-1]
if isinstance(obj, dict):
if len(obj) == 1:
for key in obj:
if key == 'Ref':
paramname = obj[key]
if paramname in parameters:
param = cfn.template['Parameters'][paramname]
if 'Type' in param:
paramtype = param['Type']
if paramtype not in allowed_types:
fix_param_types.add(paramname)
else:
message = 'Innappropriate map found for vpcid on %s' % (
'/'.join(map(str, tree[:-1])))
matches.append(RuleMatch(tree[:-1], message))
for paramname in fix_param_types:
message = 'Parameter %s should be of type %s' % (paramname, ', '.join(map(str, allowed_types)))
tree = ['Parameters', paramname]
matches.append(RuleMatch(tree, message))
return matches
|
en
| 0.73976
|
Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. Check if VPC Parameters are of correct type Check CloudFormation VpcId Parameters # Build the list of refs
| 2.169868
| 2
|
var/spack/repos/builtin/packages/py-spacy-models-en-core-web-sm/package.py
|
jeanbez/spack
| 0
|
6629601
|
<reponame>jeanbez/spack
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class PySpacyModelsEnCoreWebSm(PythonPackage):
"""English multi-task CNN trained on OntoNotes. Assigns context-specific
token vectors, POS tags, dependency parse and named entities."""
homepage = "https://spacy.io/models/en#en_core_web_sm"
url = "https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-2.3.1/en_core_web_sm-2.3.1.tar.gz"
version('2.3.1', sha256='06c80936324012d1223291d2af41a5229e746dc2dee8fe31a532666ee3d18aaa')
version('2.2.5', sha256='60b69065c97fd2e4972c33300205e1dead3501d2e0bfd6a182c3a033e337caee')
depends_on('py-setuptools', type='build')
depends_on('py-spacy@2.2.2:', type=('build', 'run'), when='@:2.2.5')
depends_on('py-spacy@2.3.0:2.3', type=('build', 'run'), when='@2.3.1:')
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class PySpacyModelsEnCoreWebSm(PythonPackage):
"""English multi-task CNN trained on OntoNotes. Assigns context-specific
token vectors, POS tags, dependency parse and named entities."""
homepage = "https://spacy.io/models/en#en_core_web_sm"
url = "https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-2.3.1/en_core_web_sm-2.3.1.tar.gz"
version('2.3.1', sha256='06c80936324012d1223291d2af41a5229e746dc2dee8fe31a532666ee3d18aaa')
version('2.2.5', sha256='60b69065c97fd2e4972c33300205e1dead3501d2e0bfd6a182c3a033e337caee')
depends_on('py-setuptools', type='build')
depends_on('py-spacy@2.2.2:', type=('build', 'run'), when='@:2.2.5')
depends_on('py-spacy@2.3.0:2.3', type=('build', 'run'), when='@2.3.1:')
|
en
| 0.656869
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) English multi-task CNN trained on OntoNotes. Assigns context-specific token vectors, POS tags, dependency parse and named entities. #en_core_web_sm"
| 1.44223
| 1
|
mitmproxy/addons/proxyserver.py
|
chenzhenguo/mitmproxy
| 2
|
6629602
|
import asyncio
import warnings
from typing import Optional
from mitmproxy import controller, ctx, flow, log, master, options, platform
from mitmproxy.flow import Error
from mitmproxy.proxy import commands
from mitmproxy.proxy import server
from mitmproxy.utils import asyncio_utils, human
class AsyncReply(controller.Reply):
"""
controller.Reply.q.get() is blocking, which we definitely want to avoid in a coroutine.
This stub adds a .done asyncio.Event() that can be used instead.
"""
def __init__(self, *args):
self.done = asyncio.Event()
self.loop = asyncio.get_event_loop()
super().__init__(*args)
def commit(self):
super().commit()
try:
self.loop.call_soon_threadsafe(lambda: self.done.set())
except RuntimeError: # pragma: no cover
pass # event loop may already be closed.
def kill(self, force=False): # pragma: no cover
warnings.warn("reply.kill() is deprecated, set the error attribute instead.", DeprecationWarning, stacklevel=2)
self.obj.error = flow.Error(Error.KILLED_MESSAGE)
class ProxyConnectionHandler(server.StreamConnectionHandler):
master: master.Master
def __init__(self, master, r, w, options):
self.master = master
super().__init__(r, w, options)
self.log_prefix = f"{human.format_address(self.client.peername)}: "
async def handle_hook(self, hook: commands.StartHook) -> None:
with self.timeout_watchdog.disarm():
# We currently only support single-argument hooks.
data, = hook.args()
data.reply = AsyncReply(data)
await self.master.addons.handle_lifecycle(hook)
await data.reply.done.wait()
data.reply = None
def log(self, message: str, level: str = "info") -> None:
x = log.LogEntry(self.log_prefix + message, level)
x.reply = controller.DummyReply() # type: ignore
asyncio_utils.create_task(
self.master.addons.handle_lifecycle(log.AddLogHook(x)),
name="ProxyConnectionHandler.log"
)
class Proxyserver:
"""
This addon runs the actual proxy server.
"""
server: Optional[asyncio.AbstractServer]
listen_port: int
master: master.Master
options: options.Options
is_running: bool
def __init__(self):
self._lock = asyncio.Lock()
self.server = None
self.is_running = False
def load(self, loader):
loader.add_option(
"connection_strategy", str, "lazy",
"Determine when server connections should be established.",
choices=("eager", "lazy")
)
loader.add_option(
"proxy_debug", bool, False,
"Enable debug logs in the proxy core.",
)
def running(self):
self.master = ctx.master
self.options = ctx.options
self.is_running = True
self.configure(["listen_port"])
def configure(self, updated):
if not self.is_running:
return
if "mode" in updated and ctx.options.mode == "transparent": # pragma: no cover
platform.init_transparent_mode()
if any(x in updated for x in ["server", "listen_host", "listen_port"]):
asyncio.create_task(self.refresh_server())
async def refresh_server(self):
async with self._lock:
if self.server:
await self.shutdown_server()
self.server = None
if ctx.options.server:
if not ctx.master.addons.get("nextlayer"):
ctx.log.warn("Warning: Running proxyserver without nextlayer addon!")
self.server = await asyncio.start_server(
self.handle_connection,
self.options.listen_host,
self.options.listen_port,
)
addrs = {f"http://{human.format_address(s.getsockname())}" for s in self.server.sockets}
ctx.log.info(f"Proxy server listening at {' and '.join(addrs)}")
async def shutdown_server(self):
ctx.log.info("Stopping server...")
self.server.close()
await self.server.wait_closed()
self.server = None
async def handle_connection(self, r, w):
asyncio_utils.set_task_debug_info(
asyncio.current_task(),
name=f"Proxyserver.handle_connection",
client=w.get_extra_info('peername'),
)
handler = ProxyConnectionHandler(
self.master,
r,
w,
self.options
)
await handler.handle_client()
|
import asyncio
import warnings
from typing import Optional
from mitmproxy import controller, ctx, flow, log, master, options, platform
from mitmproxy.flow import Error
from mitmproxy.proxy import commands
from mitmproxy.proxy import server
from mitmproxy.utils import asyncio_utils, human
class AsyncReply(controller.Reply):
"""
controller.Reply.q.get() is blocking, which we definitely want to avoid in a coroutine.
This stub adds a .done asyncio.Event() that can be used instead.
"""
def __init__(self, *args):
self.done = asyncio.Event()
self.loop = asyncio.get_event_loop()
super().__init__(*args)
def commit(self):
super().commit()
try:
self.loop.call_soon_threadsafe(lambda: self.done.set())
except RuntimeError: # pragma: no cover
pass # event loop may already be closed.
def kill(self, force=False): # pragma: no cover
warnings.warn("reply.kill() is deprecated, set the error attribute instead.", DeprecationWarning, stacklevel=2)
self.obj.error = flow.Error(Error.KILLED_MESSAGE)
class ProxyConnectionHandler(server.StreamConnectionHandler):
master: master.Master
def __init__(self, master, r, w, options):
self.master = master
super().__init__(r, w, options)
self.log_prefix = f"{human.format_address(self.client.peername)}: "
async def handle_hook(self, hook: commands.StartHook) -> None:
with self.timeout_watchdog.disarm():
# We currently only support single-argument hooks.
data, = hook.args()
data.reply = AsyncReply(data)
await self.master.addons.handle_lifecycle(hook)
await data.reply.done.wait()
data.reply = None
def log(self, message: str, level: str = "info") -> None:
x = log.LogEntry(self.log_prefix + message, level)
x.reply = controller.DummyReply() # type: ignore
asyncio_utils.create_task(
self.master.addons.handle_lifecycle(log.AddLogHook(x)),
name="ProxyConnectionHandler.log"
)
class Proxyserver:
"""
This addon runs the actual proxy server.
"""
server: Optional[asyncio.AbstractServer]
listen_port: int
master: master.Master
options: options.Options
is_running: bool
def __init__(self):
self._lock = asyncio.Lock()
self.server = None
self.is_running = False
def load(self, loader):
loader.add_option(
"connection_strategy", str, "lazy",
"Determine when server connections should be established.",
choices=("eager", "lazy")
)
loader.add_option(
"proxy_debug", bool, False,
"Enable debug logs in the proxy core.",
)
def running(self):
self.master = ctx.master
self.options = ctx.options
self.is_running = True
self.configure(["listen_port"])
def configure(self, updated):
if not self.is_running:
return
if "mode" in updated and ctx.options.mode == "transparent": # pragma: no cover
platform.init_transparent_mode()
if any(x in updated for x in ["server", "listen_host", "listen_port"]):
asyncio.create_task(self.refresh_server())
async def refresh_server(self):
async with self._lock:
if self.server:
await self.shutdown_server()
self.server = None
if ctx.options.server:
if not ctx.master.addons.get("nextlayer"):
ctx.log.warn("Warning: Running proxyserver without nextlayer addon!")
self.server = await asyncio.start_server(
self.handle_connection,
self.options.listen_host,
self.options.listen_port,
)
addrs = {f"http://{human.format_address(s.getsockname())}" for s in self.server.sockets}
ctx.log.info(f"Proxy server listening at {' and '.join(addrs)}")
async def shutdown_server(self):
ctx.log.info("Stopping server...")
self.server.close()
await self.server.wait_closed()
self.server = None
async def handle_connection(self, r, w):
asyncio_utils.set_task_debug_info(
asyncio.current_task(),
name=f"Proxyserver.handle_connection",
client=w.get_extra_info('peername'),
)
handler = ProxyConnectionHandler(
self.master,
r,
w,
self.options
)
await handler.handle_client()
|
en
| 0.794092
|
controller.Reply.q.get() is blocking, which we definitely want to avoid in a coroutine. This stub adds a .done asyncio.Event() that can be used instead. # pragma: no cover # event loop may already be closed. # pragma: no cover # We currently only support single-argument hooks. # type: ignore This addon runs the actual proxy server. # pragma: no cover
| 2.055914
| 2
|
test.py
|
Sanjana7395/face_segmentation
| 22
|
6629603
|
import os
import numpy as np
import cv2
import argparse
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
import seaborn as sns
from tensorflow.keras import Input
from model import u_net
from preprocessing.preprocess_utils import display
from experiments import lip_hair_color
def make_confusion_matrix(cf, categories,
group_names=None,
count=True,
percent=True,
color_bar=True,
xy_ticks=True,
xy_plot_labels=True,
sum_stats=True,
fig_size=None,
c_map='Blues',
title=None):
""" Code to generate text within each box and beautify confusion matrix.
:param cf: Confusion matrix.
:type cf: numpy array
:param categories: array of classes.
:type categories: numpy array
:param group_names: classes in the project.
:type group_names: numpy array
:param count: whether to display the count of each class.
:type count: boolean
:param percent: whether to display percentage for each class.
:type percent: boolean
:param color_bar: whether to display color bar for the heat map.
:type color_bar: boolean
:param xy_ticks: whether to display xy labels.
:type xy_ticks: boolean
:param xy_plot_labels: whether to display xy title.
:type xy_plot_labels: boolean
:param sum_stats: whether to display overall accuracy.
:type sum_stats: boolean
:param fig_size: size of the plot.
:type fig_size: tuple
:param c_map: color scheme to use.
:type c_map: str
:param title: Title of the plot.
:type title: str
"""
blanks = ['' for i in range(cf.size)]
if group_names and len(group_names) == cf.size:
group_labels = ["{}\n".format(value) for value in group_names]
else:
group_labels = blanks
if count:
group_counts = ["{0:0.0f}\n".format(value) for value in cf.flatten()]
else:
group_counts = blanks
if percent:
row_size = np.size(cf, 0)
col_size = np.size(cf, 1)
group_percentages = []
for i in range(row_size):
for j in range(col_size):
group_percentages.append(cf[i][j] / cf[i].sum())
group_percentages = ["{0:.2%}".format(value)
for value in group_percentages]
else:
group_percentages = blanks
box_labels = [f"{v1}{v2}{v3}".strip()
for v1, v2, v3 in zip(group_labels,
group_counts,
group_percentages)]
box_labels = np.asarray(box_labels).reshape(cf.shape[0], cf.shape[1])
# CODE TO GENERATE SUMMARY STATISTICS & TEXT FOR SUMMARY STATS
if sum_stats:
# Accuracy is sum of diagonal divided by total observations
accuracy = np.trace(cf) / float(np.sum(cf))
stats_text = "\n\nAccuracy={0:0.2%}".format(accuracy)
else:
stats_text = ""
# SET FIGURE PARAMETERS ACCORDING TO OTHER ARGUMENTS
if fig_size is None:
# Get default figure size if not set
fig_size = plt.rcParams.get('figure.figsize')
if not xy_ticks:
# Do not show categories if xyticks is False
categories = False
# MAKE THE HEAT MAP VISUALIZATION
plt.figure(figsize=fig_size)
sns.heatmap(cf, annot=box_labels, fmt="",
cmap=c_map, cbar=color_bar,
xticklabels=categories,
yticklabels=categories)
if xy_plot_labels:
plt.ylabel('True label')
plt.xlabel('Predicted label' + stats_text)
else:
plt.xlabel(stats_text)
if title:
plt.title(title)
def plot_confusion_matrix(predictions, masks, path):
""" Visualize confusion matrix.
:param predictions: predicted output of the model.
:type predictions: array
:param masks: true masks of the images.
:type masks: array
:param path: directory to store the output
:type path: str
"""
print('[INFO] Plotting confusion matrix...')
corr = confusion_matrix(masks.ravel(), predictions.ravel())
make_confusion_matrix(corr,
categories=['bg', 'skin', 'nose', 'eye_g', 'l_eye',
'r_eye', 'l_brow', 'r_brow', 'l_ear',
'r_ear', 'mouth', 'u_lip',
'l_lip', 'hair', 'hat', 'ear_r',
'neck_l', 'neck', 'cloth'],
count=True,
percent=False,
color_bar=False,
xy_ticks=True,
xy_plot_labels=True,
sum_stats=True,
fig_size=(20, 18),
c_map='coolwarm',
title='Confusion matrix')
# error correction - cropped heat map
b, t = plt.ylim() # discover the values for bottom and top
b += 0.5 # Add 0.5 to the bottom
t -= 0.5 # Subtract 0.5 from the top
plt.ylim(b, t) # update the ylim(bottom, top) values
plt.savefig(os.path.join(path, 'confusion_matrix.png'))
print('[ACTION] See results/visualization/confusion_matrix.png')
def plot_mask(prediction, mask, norm_image):
""" PLot segmentation mask for the given image.
:param prediction: predicted output of the model.
:type prediction: array
:param mask: true masks of the images.
:type mask: array
:param norm_image: original image.
:type norm_image: array
"""
image = (norm_image * 255.).astype(np.uint8)
im_base = np.zeros((256, 256, 3), dtype=np.uint8)
for idx, color in enumerate(color_list):
im_base[prediction == idx] = color
cv2.addWeighted(im_base, 0.8, image, 1, 0, im_base)
display([image, mask, im_base],
['Original image', 'True mask', 'Predicted mask'],
'predict')
def test(image, masks, action, color='red'):
""" Used to plot either confusion matrix or predicted mask or apply makeup.
:param image: original image.
:type image: bytearray
:param masks: true segmentation masks.
:type masks: array
:param action: user input specifying confusion matrix/mask
prediction/applying makeup.
:type action: str
:param color: if action is applying makeup, then color to apply.
Defaults to red.
:type color: str
"""
input_img = Input(shape=(256, 256, 3), name='img')
model = u_net.get_u_net(input_img, num_classes=19)
model.load_weights(os.path.join(MODEL_DIR, 'u_net.h5'))
print('[INFO] Predicting ...')
predictions = model.predict(image)
predictions = np.argmax(predictions, axis=-1)
table = {
'hair': 13,
'upper_lip': 11,
'lower_lip': 12
}
colors = {
'red': [212, 34, 34],
'purple': [128, 51, 125],
'pink': [247, 32, 125]
}
# Redirect to the function of specified action.
if action == 'confusion_matrix':
print('[INFO] Plotting confusion matrix ...')
plot_confusion_matrix(predictions, masks, VISUALIZATION_DIR)
elif action == 'mask':
print('[INFO] Plotting segmentation mask ...')
plot_mask(predictions[sample], masks[sample], image[sample])
elif action == 'hair_color':
print('[INFO] Applying hair color ...')
parts = [table['hair']]
changed = lip_hair_color.color_change(image[sample],
predictions[sample],
parts, colors[color])
display([image[sample], changed], 'hair')
elif action == "lip_color":
print('[INFO] Applying lip color ...')
parts = [table['upper_lip'], table['lower_lip']]
changed = lip_hair_color.color_change(image[sample],
predictions[sample],
parts, colors[color])
display([image[sample], changed], 'lip')
def main():
""" Define user arguments.
"""
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--visualize", type=str, required=True,
choices=("confusion_matrix", "mask",
"hair_color", "lip_color"),
help="type of model")
ap.add_argument("-c", "--color", type=str,
choices=("red", "pink", "purple"),
help="color to apply")
args = vars(ap.parse_args())
# print('[INFO] Getting test data...')
# test_data = get_test()
# imgs = []
# masks = []
# for img, label in test_data:
# for i in img:
# i = np.array(i, dtype='float32')
# imgs.append(i)
# for j in label:
# j = np.array(j, dtype='float32')
# masks.append(j)
# images = np.array(imgs)
# masks = np.array(masks)
# np.save('data/test_images.npy', images)
# np.save('data/test_mask.npy', masks)
# Load test images
images = np.load('data/test_images.npy')
masks = np.load('data/test_mask.npy')
test(images, masks, args["visualize"], args["color"])
if __name__ == '__main__':
VISUALIZATION_DIR = 'results/visualization/'
MODEL_DIR = 'results/models/'
color_list = [[0, 0, 0], [204, 0, 0], [255, 140, 26],
[204, 204, 0], [51, 51, 255], [204, 0, 204],
[0, 255, 255], [255, 204, 204], [102, 51, 0],
[255, 0, 0], [102, 204, 0], [255, 255, 0],
[0, 0, 153], [0, 0, 204], [255, 51, 153],
[0, 204, 204], [0, 51, 0], [255, 153, 51],
[0, 204, 0]]
sample = 4
main()
|
import os
import numpy as np
import cv2
import argparse
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
import seaborn as sns
from tensorflow.keras import Input
from model import u_net
from preprocessing.preprocess_utils import display
from experiments import lip_hair_color
def make_confusion_matrix(cf, categories,
group_names=None,
count=True,
percent=True,
color_bar=True,
xy_ticks=True,
xy_plot_labels=True,
sum_stats=True,
fig_size=None,
c_map='Blues',
title=None):
""" Code to generate text within each box and beautify confusion matrix.
:param cf: Confusion matrix.
:type cf: numpy array
:param categories: array of classes.
:type categories: numpy array
:param group_names: classes in the project.
:type group_names: numpy array
:param count: whether to display the count of each class.
:type count: boolean
:param percent: whether to display percentage for each class.
:type percent: boolean
:param color_bar: whether to display color bar for the heat map.
:type color_bar: boolean
:param xy_ticks: whether to display xy labels.
:type xy_ticks: boolean
:param xy_plot_labels: whether to display xy title.
:type xy_plot_labels: boolean
:param sum_stats: whether to display overall accuracy.
:type sum_stats: boolean
:param fig_size: size of the plot.
:type fig_size: tuple
:param c_map: color scheme to use.
:type c_map: str
:param title: Title of the plot.
:type title: str
"""
blanks = ['' for i in range(cf.size)]
if group_names and len(group_names) == cf.size:
group_labels = ["{}\n".format(value) for value in group_names]
else:
group_labels = blanks
if count:
group_counts = ["{0:0.0f}\n".format(value) for value in cf.flatten()]
else:
group_counts = blanks
if percent:
row_size = np.size(cf, 0)
col_size = np.size(cf, 1)
group_percentages = []
for i in range(row_size):
for j in range(col_size):
group_percentages.append(cf[i][j] / cf[i].sum())
group_percentages = ["{0:.2%}".format(value)
for value in group_percentages]
else:
group_percentages = blanks
box_labels = [f"{v1}{v2}{v3}".strip()
for v1, v2, v3 in zip(group_labels,
group_counts,
group_percentages)]
box_labels = np.asarray(box_labels).reshape(cf.shape[0], cf.shape[1])
# CODE TO GENERATE SUMMARY STATISTICS & TEXT FOR SUMMARY STATS
if sum_stats:
# Accuracy is sum of diagonal divided by total observations
accuracy = np.trace(cf) / float(np.sum(cf))
stats_text = "\n\nAccuracy={0:0.2%}".format(accuracy)
else:
stats_text = ""
# SET FIGURE PARAMETERS ACCORDING TO OTHER ARGUMENTS
if fig_size is None:
# Get default figure size if not set
fig_size = plt.rcParams.get('figure.figsize')
if not xy_ticks:
# Do not show categories if xyticks is False
categories = False
# MAKE THE HEAT MAP VISUALIZATION
plt.figure(figsize=fig_size)
sns.heatmap(cf, annot=box_labels, fmt="",
cmap=c_map, cbar=color_bar,
xticklabels=categories,
yticklabels=categories)
if xy_plot_labels:
plt.ylabel('True label')
plt.xlabel('Predicted label' + stats_text)
else:
plt.xlabel(stats_text)
if title:
plt.title(title)
def plot_confusion_matrix(predictions, masks, path):
""" Visualize confusion matrix.
:param predictions: predicted output of the model.
:type predictions: array
:param masks: true masks of the images.
:type masks: array
:param path: directory to store the output
:type path: str
"""
print('[INFO] Plotting confusion matrix...')
corr = confusion_matrix(masks.ravel(), predictions.ravel())
make_confusion_matrix(corr,
categories=['bg', 'skin', 'nose', 'eye_g', 'l_eye',
'r_eye', 'l_brow', 'r_brow', 'l_ear',
'r_ear', 'mouth', 'u_lip',
'l_lip', 'hair', 'hat', 'ear_r',
'neck_l', 'neck', 'cloth'],
count=True,
percent=False,
color_bar=False,
xy_ticks=True,
xy_plot_labels=True,
sum_stats=True,
fig_size=(20, 18),
c_map='coolwarm',
title='Confusion matrix')
# error correction - cropped heat map
b, t = plt.ylim() # discover the values for bottom and top
b += 0.5 # Add 0.5 to the bottom
t -= 0.5 # Subtract 0.5 from the top
plt.ylim(b, t) # update the ylim(bottom, top) values
plt.savefig(os.path.join(path, 'confusion_matrix.png'))
print('[ACTION] See results/visualization/confusion_matrix.png')
def plot_mask(prediction, mask, norm_image):
""" PLot segmentation mask for the given image.
:param prediction: predicted output of the model.
:type prediction: array
:param mask: true masks of the images.
:type mask: array
:param norm_image: original image.
:type norm_image: array
"""
image = (norm_image * 255.).astype(np.uint8)
im_base = np.zeros((256, 256, 3), dtype=np.uint8)
for idx, color in enumerate(color_list):
im_base[prediction == idx] = color
cv2.addWeighted(im_base, 0.8, image, 1, 0, im_base)
display([image, mask, im_base],
['Original image', 'True mask', 'Predicted mask'],
'predict')
def test(image, masks, action, color='red'):
""" Used to plot either confusion matrix or predicted mask or apply makeup.
:param image: original image.
:type image: bytearray
:param masks: true segmentation masks.
:type masks: array
:param action: user input specifying confusion matrix/mask
prediction/applying makeup.
:type action: str
:param color: if action is applying makeup, then color to apply.
Defaults to red.
:type color: str
"""
input_img = Input(shape=(256, 256, 3), name='img')
model = u_net.get_u_net(input_img, num_classes=19)
model.load_weights(os.path.join(MODEL_DIR, 'u_net.h5'))
print('[INFO] Predicting ...')
predictions = model.predict(image)
predictions = np.argmax(predictions, axis=-1)
table = {
'hair': 13,
'upper_lip': 11,
'lower_lip': 12
}
colors = {
'red': [212, 34, 34],
'purple': [128, 51, 125],
'pink': [247, 32, 125]
}
# Redirect to the function of specified action.
if action == 'confusion_matrix':
print('[INFO] Plotting confusion matrix ...')
plot_confusion_matrix(predictions, masks, VISUALIZATION_DIR)
elif action == 'mask':
print('[INFO] Plotting segmentation mask ...')
plot_mask(predictions[sample], masks[sample], image[sample])
elif action == 'hair_color':
print('[INFO] Applying hair color ...')
parts = [table['hair']]
changed = lip_hair_color.color_change(image[sample],
predictions[sample],
parts, colors[color])
display([image[sample], changed], 'hair')
elif action == "lip_color":
print('[INFO] Applying lip color ...')
parts = [table['upper_lip'], table['lower_lip']]
changed = lip_hair_color.color_change(image[sample],
predictions[sample],
parts, colors[color])
display([image[sample], changed], 'lip')
def main():
""" Define user arguments.
"""
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--visualize", type=str, required=True,
choices=("confusion_matrix", "mask",
"hair_color", "lip_color"),
help="type of model")
ap.add_argument("-c", "--color", type=str,
choices=("red", "pink", "purple"),
help="color to apply")
args = vars(ap.parse_args())
# print('[INFO] Getting test data...')
# test_data = get_test()
# imgs = []
# masks = []
# for img, label in test_data:
# for i in img:
# i = np.array(i, dtype='float32')
# imgs.append(i)
# for j in label:
# j = np.array(j, dtype='float32')
# masks.append(j)
# images = np.array(imgs)
# masks = np.array(masks)
# np.save('data/test_images.npy', images)
# np.save('data/test_mask.npy', masks)
# Load test images
images = np.load('data/test_images.npy')
masks = np.load('data/test_mask.npy')
test(images, masks, args["visualize"], args["color"])
if __name__ == '__main__':
VISUALIZATION_DIR = 'results/visualization/'
MODEL_DIR = 'results/models/'
color_list = [[0, 0, 0], [204, 0, 0], [255, 140, 26],
[204, 204, 0], [51, 51, 255], [204, 0, 204],
[0, 255, 255], [255, 204, 204], [102, 51, 0],
[255, 0, 0], [102, 204, 0], [255, 255, 0],
[0, 0, 153], [0, 0, 204], [255, 51, 153],
[0, 204, 204], [0, 51, 0], [255, 153, 51],
[0, 204, 0]]
sample = 4
main()
|
en
| 0.500422
|
Code to generate text within each box and beautify confusion matrix. :param cf: Confusion matrix. :type cf: numpy array :param categories: array of classes. :type categories: numpy array :param group_names: classes in the project. :type group_names: numpy array :param count: whether to display the count of each class. :type count: boolean :param percent: whether to display percentage for each class. :type percent: boolean :param color_bar: whether to display color bar for the heat map. :type color_bar: boolean :param xy_ticks: whether to display xy labels. :type xy_ticks: boolean :param xy_plot_labels: whether to display xy title. :type xy_plot_labels: boolean :param sum_stats: whether to display overall accuracy. :type sum_stats: boolean :param fig_size: size of the plot. :type fig_size: tuple :param c_map: color scheme to use. :type c_map: str :param title: Title of the plot. :type title: str # CODE TO GENERATE SUMMARY STATISTICS & TEXT FOR SUMMARY STATS # Accuracy is sum of diagonal divided by total observations # SET FIGURE PARAMETERS ACCORDING TO OTHER ARGUMENTS # Get default figure size if not set # Do not show categories if xyticks is False # MAKE THE HEAT MAP VISUALIZATION Visualize confusion matrix. :param predictions: predicted output of the model. :type predictions: array :param masks: true masks of the images. :type masks: array :param path: directory to store the output :type path: str # error correction - cropped heat map # discover the values for bottom and top # Add 0.5 to the bottom # Subtract 0.5 from the top # update the ylim(bottom, top) values PLot segmentation mask for the given image. :param prediction: predicted output of the model. :type prediction: array :param mask: true masks of the images. :type mask: array :param norm_image: original image. :type norm_image: array Used to plot either confusion matrix or predicted mask or apply makeup. :param image: original image. :type image: bytearray :param masks: true segmentation masks. :type masks: array :param action: user input specifying confusion matrix/mask prediction/applying makeup. :type action: str :param color: if action is applying makeup, then color to apply. Defaults to red. :type color: str # Redirect to the function of specified action. Define user arguments. # print('[INFO] Getting test data...') # test_data = get_test() # imgs = [] # masks = [] # for img, label in test_data: # for i in img: # i = np.array(i, dtype='float32') # imgs.append(i) # for j in label: # j = np.array(j, dtype='float32') # masks.append(j) # images = np.array(imgs) # masks = np.array(masks) # np.save('data/test_images.npy', images) # np.save('data/test_mask.npy', masks) # Load test images
| 3.079378
| 3
|
src/new_main.py
|
lorespaul/TextRecognition
| 0
|
6629604
|
import tensorflow as tf
from tensorflow import keras
import numpy as np
import matplotlib.pyplot as plt
import cv2
from scipy import ndimage
from os import path, sys
from FilePaths import FilePaths
from DataLoader import DataLoader, Batch
from WordsLoaderDataset import WordsLoaderDataset
from SamplePreprocessor import preprocess
def build_model():
model = keras.Sequential([
keras.layers.experimental.preprocessing.Rescaling(1./255, input_shape=(128, 32)),
keras.layers.Reshape((128, 32, 1)),
keras.layers.Conv2D(32, (5, 5), activation='relu', padding='same', input_shape=(128, 32, 1)),
keras.layers.MaxPooling2D((2, 2)),
keras.layers.Conv2D(64, (5, 5), activation='relu', padding='same'),
keras.layers.MaxPooling2D((2, 2)),
keras.layers.Conv2D(128, (3, 3), activation='relu', padding='same'),
keras.layers.MaxPooling2D((1, 2)),
keras.layers.Conv2D(128, (3, 3), activation='relu', padding='same'),
keras.layers.MaxPooling2D((1, 2)),
keras.layers.Conv2D(256, (3, 3), activation='relu', padding='same'),
keras.layers.MaxPooling2D((1, 2)),
keras.layers.Reshape((32, 256)),
keras.layers.Bidirectional(keras.layers.LSTM(256, return_sequences=True)),
keras.layers.Dense(80, activation='relu'),
keras.layers.Dropout(0.2),
keras.layers.Dense(80)
])
model.compile(
optimizer='adam',
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy']
)
return model
# def show_first_image():
# img = preprocess(cv2.imread(FilePaths.fnInfer, cv2.IMREAD_GRAYSCALE), (128, 32))
# # img = preprocess(cv2.imread('../data/words/a01/a01-000u/a01-000u-01-02.png', cv2.IMREAD_GRAYSCALE), (128, 32))
# plt.figure()
# plt.imshow(img, cmap=plt.cm.binary)
# plt.colorbar()
# plt.grid(False)
# plt.show()
# show_first_image()
# sys.exit(0)
batch_size = 32
img_size = (128, 32)
max_text_len = 32
model = build_model()
# model.summary()
loader = WordsLoaderDataset(FilePaths.fnTrain, batch_size, img_size, max_text_len)
train_ds = loader.get_train_dataset(img_size)
val_ds = loader.get_validation_dataset(img_size)
char_list = train_ds.class_names
print('-----------Char list-----------------', train_ds.class_names)
checkpoint_dir = path.dirname(FilePaths.fnCheckpoint)
lastest_cp = tf.train.latest_checkpoint(checkpoint_dir)
if lastest_cp is not None:
model.load_weights(lastest_cp)
else:
cp_callback = keras.callbacks.ModelCheckpoint(
filepath=FilePaths.fnCheckpoint,
save_weights_only=True,
save_best_only=True,
verbose=1
)
AUTOTUNE = tf.data.experimental.AUTOTUNE
train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=AUTOTUNE)
val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)
model.fit(
train_ds,
validation_data=val_ds,
epochs=5,
callbacks=[cp_callback]
)
probability_model = keras.Sequential([
model,
keras.layers.Softmax()
])
# probability_model.summary()
img = preprocess(cv2.imread(FilePaths.fnInfer, cv2.IMREAD_GRAYSCALE), img_size)
predictions = probability_model.predict(np.array([img]))
prediction = predictions[0]
word_predicted = ''
for i in range(len(prediction)):
step = prediction[i]
word_predicted += char_list[np.argmax(step)]
word_predicted = word_predicted.strip()
plt.figure()
rotate_img = ndimage.rotate(img, 90)
plt.imshow(rotate_img, origin='lower', cmap=plt.cm.binary)
plt.colorbar()
plt.grid(False)
plt.xlabel(word_predicted)
plt.show()
|
import tensorflow as tf
from tensorflow import keras
import numpy as np
import matplotlib.pyplot as plt
import cv2
from scipy import ndimage
from os import path, sys
from FilePaths import FilePaths
from DataLoader import DataLoader, Batch
from WordsLoaderDataset import WordsLoaderDataset
from SamplePreprocessor import preprocess
def build_model():
model = keras.Sequential([
keras.layers.experimental.preprocessing.Rescaling(1./255, input_shape=(128, 32)),
keras.layers.Reshape((128, 32, 1)),
keras.layers.Conv2D(32, (5, 5), activation='relu', padding='same', input_shape=(128, 32, 1)),
keras.layers.MaxPooling2D((2, 2)),
keras.layers.Conv2D(64, (5, 5), activation='relu', padding='same'),
keras.layers.MaxPooling2D((2, 2)),
keras.layers.Conv2D(128, (3, 3), activation='relu', padding='same'),
keras.layers.MaxPooling2D((1, 2)),
keras.layers.Conv2D(128, (3, 3), activation='relu', padding='same'),
keras.layers.MaxPooling2D((1, 2)),
keras.layers.Conv2D(256, (3, 3), activation='relu', padding='same'),
keras.layers.MaxPooling2D((1, 2)),
keras.layers.Reshape((32, 256)),
keras.layers.Bidirectional(keras.layers.LSTM(256, return_sequences=True)),
keras.layers.Dense(80, activation='relu'),
keras.layers.Dropout(0.2),
keras.layers.Dense(80)
])
model.compile(
optimizer='adam',
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy']
)
return model
# def show_first_image():
# img = preprocess(cv2.imread(FilePaths.fnInfer, cv2.IMREAD_GRAYSCALE), (128, 32))
# # img = preprocess(cv2.imread('../data/words/a01/a01-000u/a01-000u-01-02.png', cv2.IMREAD_GRAYSCALE), (128, 32))
# plt.figure()
# plt.imshow(img, cmap=plt.cm.binary)
# plt.colorbar()
# plt.grid(False)
# plt.show()
# show_first_image()
# sys.exit(0)
batch_size = 32
img_size = (128, 32)
max_text_len = 32
model = build_model()
# model.summary()
loader = WordsLoaderDataset(FilePaths.fnTrain, batch_size, img_size, max_text_len)
train_ds = loader.get_train_dataset(img_size)
val_ds = loader.get_validation_dataset(img_size)
char_list = train_ds.class_names
print('-----------Char list-----------------', train_ds.class_names)
checkpoint_dir = path.dirname(FilePaths.fnCheckpoint)
lastest_cp = tf.train.latest_checkpoint(checkpoint_dir)
if lastest_cp is not None:
model.load_weights(lastest_cp)
else:
cp_callback = keras.callbacks.ModelCheckpoint(
filepath=FilePaths.fnCheckpoint,
save_weights_only=True,
save_best_only=True,
verbose=1
)
AUTOTUNE = tf.data.experimental.AUTOTUNE
train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=AUTOTUNE)
val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)
model.fit(
train_ds,
validation_data=val_ds,
epochs=5,
callbacks=[cp_callback]
)
probability_model = keras.Sequential([
model,
keras.layers.Softmax()
])
# probability_model.summary()
img = preprocess(cv2.imread(FilePaths.fnInfer, cv2.IMREAD_GRAYSCALE), img_size)
predictions = probability_model.predict(np.array([img]))
prediction = predictions[0]
word_predicted = ''
for i in range(len(prediction)):
step = prediction[i]
word_predicted += char_list[np.argmax(step)]
word_predicted = word_predicted.strip()
plt.figure()
rotate_img = ndimage.rotate(img, 90)
plt.imshow(rotate_img, origin='lower', cmap=plt.cm.binary)
plt.colorbar()
plt.grid(False)
plt.xlabel(word_predicted)
plt.show()
|
en
| 0.248393
|
# def show_first_image(): # img = preprocess(cv2.imread(FilePaths.fnInfer, cv2.IMREAD_GRAYSCALE), (128, 32)) # # img = preprocess(cv2.imread('../data/words/a01/a01-000u/a01-000u-01-02.png', cv2.IMREAD_GRAYSCALE), (128, 32)) # plt.figure() # plt.imshow(img, cmap=plt.cm.binary) # plt.colorbar() # plt.grid(False) # plt.show() # show_first_image() # sys.exit(0) # model.summary() # probability_model.summary()
| 2.556963
| 3
|
setup.py
|
suAdminWen/cc-api
| 6
|
6629605
|
from setuptools import find_packages, setup
with open('README.md', 'rb') as f:
long_description = f.read().decode('utf-8')
with open('requirements-prod.txt') as f:
requirements = [l for l in f.read().splitlines() if l]
setup(
name='flask_cc_api',
version='0.9.0.dev1',
description="libs for flask-cc api projects",
long_description=long_description,
author='wen',
author_email='<EMAIL>',
packages=find_packages(exclude=('tests', 'tests.*')),
install_requires=requirements,
package_data={'': ['*.yaml']},
include_package_data=True,
zip_safe=False,
python_requires='>=3.5',
classifiers=[
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
entry_points='''
[console_scripts]
flask_cc_api=flask_cc_api.cli.main:cli
''',
)
|
from setuptools import find_packages, setup
with open('README.md', 'rb') as f:
long_description = f.read().decode('utf-8')
with open('requirements-prod.txt') as f:
requirements = [l for l in f.read().splitlines() if l]
setup(
name='flask_cc_api',
version='0.9.0.dev1',
description="libs for flask-cc api projects",
long_description=long_description,
author='wen',
author_email='<EMAIL>',
packages=find_packages(exclude=('tests', 'tests.*')),
install_requires=requirements,
package_data={'': ['*.yaml']},
include_package_data=True,
zip_safe=False,
python_requires='>=3.5',
classifiers=[
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
entry_points='''
[console_scripts]
flask_cc_api=flask_cc_api.cli.main:cli
''',
)
|
fr
| 0.126668
|
[console_scripts] flask_cc_api=flask_cc_api.cli.main:cli
| 1.377354
| 1
|
deepnlp/__init__.py
|
taishanfuxiao/NLP-
| 1,440
|
6629606
|
<reponame>taishanfuxiao/NLP-<filename>deepnlp/__init__.py<gh_stars>1000+
""" Deepnlp Package """
from __future__ import unicode_literals
__version__ = '0.1.7'
__license__ = 'MIT'
from deepnlp import downloader
from deepnlp import model_util
# global function for download pre-trained model from github
# https://github.com/rockingdingo
download = downloader.download
register_model = model_util.register_model
|
""" Deepnlp Package """
from __future__ import unicode_literals
__version__ = '0.1.7'
__license__ = 'MIT'
from deepnlp import downloader
from deepnlp import model_util
# global function for download pre-trained model from github
# https://github.com/rockingdingo
download = downloader.download
register_model = model_util.register_model
|
en
| 0.748069
|
Deepnlp Package # global function for download pre-trained model from github # https://github.com/rockingdingo
| 1.755931
| 2
|
tests/integration_tests/test_api.py
|
hfurkanbozkurt/ludwig
| 0
|
6629607
|
# Copyright (c) 2019 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import shutil
import tempfile
from unittest import mock
import pytest
import torch
from ludwig.api import LudwigModel
from ludwig.utils.data_utils import read_csv
from tests.integration_tests.utils import (
category_feature,
ENCODERS,
generate_data,
get_weights,
run_api_experiment,
sequence_feature,
)
def run_api_experiment_separated_datasets(input_features, output_features, data_csv):
"""Helper method to avoid code repetition in running an experiment.
:param input_features: input schema
:param output_features: output schema
:param data_csv: path to data
:return: None
"""
config = {
"input_features": input_features,
"output_features": output_features,
"combiner": {"type": "concat", "output_size": 14},
"training": {"epochs": 2},
}
model = LudwigModel(config)
# Training with dataframe
data_df = read_csv(data_csv)
train_df = data_df.sample(frac=0.8)
test_df = data_df.drop(train_df.index).sample(frac=0.5)
validation_df = data_df.drop(train_df.index).drop(test_df.index)
basename, ext = os.path.splitext(data_csv)
train_fname = basename + ".train" + ext
val_fname = basename + ".validation" + ext
test_fname = basename + ".test" + ext
output_dirs = []
try:
train_df.to_csv(train_fname)
validation_df.to_csv(val_fname)
test_df.to_csv(test_fname)
# Training with csv
_, _, output_dir = model.train(
training_set=train_fname,
skip_save_processed_input=True,
skip_save_progress=True,
skip_save_unprocessed_output=True,
)
output_dirs.append(output_dir)
_, _, output_dir = model.train(
training_set=train_fname,
validation_set=val_fname,
skip_save_processed_input=True,
skip_save_progress=True,
skip_save_unprocessed_output=True,
)
output_dirs.append(output_dir)
_, _, output_dir = model.train(
training_set=train_fname,
validation_set=val_fname,
test_set=test_fname,
skip_save_processed_input=True,
skip_save_progress=True,
skip_save_unprocessed_output=True,
)
output_dirs.append(output_dir)
_, output_dir = model.predict(dataset=test_fname)
output_dirs.append(output_dir)
finally:
# Remove results/intermediate data saved to disk
os.remove(train_fname)
os.remove(val_fname)
os.remove(test_fname)
for output_dir in output_dirs:
shutil.rmtree(output_dir, ignore_errors=True)
output_dirs = []
try:
_, _, output_dir = model.train(
training_set=train_df,
skip_save_processed_input=True,
skip_save_progress=True,
skip_save_unprocessed_output=True,
)
output_dirs.append(output_dir)
_, _, output_dir = model.train(
training_set=train_df,
validation_set=validation_df,
skip_save_processed_input=True,
skip_save_progress=True,
skip_save_unprocessed_output=True,
)
output_dirs.append(output_dir)
_, _, output_dir = model.train(
training_set=train_df,
validation_set=validation_df,
test_set=test_df,
skip_save_processed_input=True,
skip_save_progress=True,
skip_save_unprocessed_output=True,
)
output_dirs.append(output_dir)
_, output_dir = model.predict(dataset=data_df)
output_dirs.append(output_dir)
finally:
for output_dir in output_dirs:
shutil.rmtree(output_dir, ignore_errors=True)
def test_api_intent_classification(csv_filename):
# Single sequence input, single category output
input_features = [sequence_feature(reduce_output="sum")]
output_features = [category_feature(vocab_size=5, reduce_input="sum")]
# Generate test data
rel_path = generate_data(input_features, output_features, csv_filename)
for encoder in ENCODERS:
input_features[0]["encoder"] = encoder
run_api_experiment(input_features, output_features, data_csv=rel_path)
def test_api_intent_classification_separated(csv_filename):
# Single sequence input, single category output
input_features = [sequence_feature(reduce_output="sum")]
output_features = [category_feature(vocab_size=5, reduce_input="sum")]
# Generate test data
rel_path = generate_data(input_features, output_features, csv_filename)
for encoder in ENCODERS:
input_features[0]["encoder"] = encoder
run_api_experiment_separated_datasets(input_features, output_features, data_csv=rel_path)
def test_api_train_online(csv_filename):
input_features = [sequence_feature(reduce_output="sum")]
output_features = [category_feature(vocab_size=5, reduce_input="sum")]
data_csv = generate_data(input_features, output_features, csv_filename)
config = {
"input_features": input_features,
"output_features": output_features,
"combiner": {"type": "concat", "output_size": 14},
}
model = LudwigModel(config)
for i in range(2):
model.train_online(dataset=data_csv)
model.predict(dataset=data_csv)
def test_api_training_set(tmpdir):
input_features = [sequence_feature(reduce_output="sum")]
output_features = [category_feature(vocab_size=5, reduce_input="sum")]
data_csv = generate_data(input_features, output_features, os.path.join(tmpdir, "dataset.csv"))
val_csv = shutil.copyfile(data_csv, os.path.join(tmpdir, "validation.csv"))
test_csv = shutil.copyfile(data_csv, os.path.join(tmpdir, "test.csv"))
config = {
"input_features": input_features,
"output_features": output_features,
"combiner": {"type": "concat", "fc_size": 14},
}
model = LudwigModel(config)
model.train(training_set=data_csv, validation_set=val_csv, test_set=test_csv)
model.predict(dataset=test_csv)
# Train again, this time the HDF5 cache will be used
model.train(training_set=data_csv, validation_set=val_csv, test_set=test_csv)
def test_api_training_determinism(tmpdir):
input_features = [sequence_feature(reduce_output="sum")]
output_features = [category_feature(vocab_size=5, reduce_input="sum")]
data_csv = generate_data(input_features, output_features, os.path.join(tmpdir, "dataset.csv"))
config = {
"input_features": input_features,
"output_features": output_features,
"combiner": {"type": "concat", "fc_size": 14},
}
# Train the model 3 times:
#
# 1. seed x
# 2. seed y
# 3. seed x
#
# Check that models (1) and (3) produce the same weights,
# but (1) and (2) do not
rand_x = 42
rand_y = 24
model_1 = LudwigModel(config)
model_1.train(dataset=data_csv, output_directory=tmpdir, random_seed=rand_x)
model_2 = LudwigModel(config)
model_2.train(dataset=data_csv, output_directory=tmpdir, random_seed=rand_y)
model_3 = LudwigModel(config)
model_3.train(dataset=data_csv, output_directory=tmpdir, random_seed=rand_x)
model_weights_1 = get_weights(model_1.model)
model_weights_2 = get_weights(model_2.model)
model_weights_3 = get_weights(model_3.model)
divergence = False
for weight_1, weight_2 in zip(model_weights_1, model_weights_2):
if not torch.allclose(weight_1, weight_2):
divergence = True
break
assert divergence, "model_1 and model_2 have identical weights with different seeds!"
for weight_1, weight_3 in zip(model_weights_1, model_weights_3):
assert torch.allclose(weight_1, weight_3)
def run_api_commands(
input_features,
output_features,
data_csv,
output_dir,
skip_save_training_description=False,
skip_save_training_statistics=False,
skip_save_model=False,
skip_save_progress=False,
skip_save_log=False,
skip_save_processed_input=False,
skip_save_unprocessed_output=False,
skip_save_predictions=False,
skip_save_eval_stats=False,
skip_collect_predictions=False,
skip_collect_overall_stats=False,
):
"""Helper method to avoid code repetition in running an experiment.
:param input_features: input schema
:param output_features: output schema
:param data_csv: path to data
:return: None
"""
config = {
"input_features": input_features,
"output_features": output_features,
"combiner": {"type": "concat", "output_size": 14},
"training": {"epochs": 2},
}
model = LudwigModel(config)
# Training with csv
model.train(
dataset=data_csv,
skip_save_training_description=skip_save_training_description,
skip_save_training_statistics=skip_save_training_statistics,
skip_save_model=skip_save_model,
skip_save_progress=skip_save_progress,
skip_save_log=skip_save_log,
skip_save_processed_input=skip_save_processed_input,
output_directory=output_dir,
)
model.predict(
dataset=data_csv,
skip_save_unprocessed_output=skip_save_unprocessed_output,
skip_save_predictions=skip_save_predictions,
output_directory=output_dir,
)
model.evaluate(
dataset=data_csv,
skip_save_unprocessed_output=skip_save_unprocessed_output,
skip_save_predictions=skip_save_predictions,
skip_save_eval_stats=skip_save_eval_stats,
collect_predictions=not skip_collect_predictions,
collect_overall_stats=not skip_collect_overall_stats,
output_directory=output_dir,
)
model.experiment(
dataset=data_csv,
skip_save_training_description=skip_save_training_description,
skip_save_training_statistics=skip_save_training_statistics,
skip_save_model=skip_save_model,
skip_save_progress=skip_save_progress,
skip_save_log=skip_save_log,
skip_save_processed_input=skip_save_processed_input,
skip_save_unprocessed_output=skip_save_unprocessed_output,
skip_save_predictions=skip_save_predictions,
skip_save_eval_stats=skip_save_eval_stats,
skip_collect_predictions=skip_collect_predictions,
skip_collect_overall_stats=skip_collect_overall_stats,
output_directory=output_dir,
)
@pytest.mark.parametrize("skip_save_training_description", [False, True])
@pytest.mark.parametrize("skip_save_training_statistics", [False, True])
@pytest.mark.parametrize("skip_save_model", [False, True])
@pytest.mark.parametrize("skip_save_progress", [False, True])
@pytest.mark.parametrize("skip_save_log", [False, True])
@pytest.mark.parametrize("skip_save_processed_input", [False, True])
def test_api_skip_parameters_train(
csv_filename,
skip_save_training_description,
skip_save_training_statistics,
skip_save_model,
skip_save_progress,
skip_save_log,
skip_save_processed_input,
):
# Single sequence input, single category output
input_features = [category_feature(vocab_size=5)]
output_features = [category_feature(vocab_size=5)]
with tempfile.TemporaryDirectory() as output_dir:
# Generate test data
rel_path = generate_data(input_features, output_features, os.path.join(output_dir, csv_filename))
run_api_commands(
input_features,
output_features,
data_csv=rel_path,
output_dir=output_dir,
skip_save_training_description=skip_save_training_description,
skip_save_training_statistics=skip_save_training_statistics,
skip_save_model=skip_save_model,
skip_save_progress=skip_save_progress,
skip_save_log=skip_save_log,
skip_save_processed_input=skip_save_processed_input,
)
@pytest.mark.parametrize("skip_save_unprocessed_output", [False, True])
@pytest.mark.parametrize("skip_save_predictions", [False, True])
def test_api_skip_parameters_predict(
csv_filename,
skip_save_unprocessed_output,
skip_save_predictions,
):
# Single sequence input, single category output
input_features = [category_feature(vocab_size=5)]
output_features = [category_feature(vocab_size=5)]
with tempfile.TemporaryDirectory() as output_dir:
# Generate test data
rel_path = generate_data(input_features, output_features, os.path.join(output_dir, csv_filename))
run_api_commands(
input_features,
output_features,
data_csv=rel_path,
output_dir=output_dir,
skip_save_unprocessed_output=skip_save_unprocessed_output,
skip_save_predictions=skip_save_predictions,
)
@pytest.mark.parametrize("skip_save_unprocessed_output", [False, True])
@pytest.mark.parametrize("skip_save_predictions", [False, True])
@pytest.mark.parametrize("skip_save_eval_stats", [False, True])
@pytest.mark.parametrize("skip_collect_predictions", [False, True])
@pytest.mark.parametrize("skip_collect_overall_stats", [False, True])
def test_api_skip_parameters_evaluate(
csv_filename,
skip_save_unprocessed_output,
skip_save_predictions,
skip_save_eval_stats,
skip_collect_predictions,
skip_collect_overall_stats,
):
# Single sequence input, single category output
input_features = [category_feature(vocab_size=5)]
output_features = [category_feature(vocab_size=5)]
with tempfile.TemporaryDirectory() as output_dir:
# Generate test data
rel_path = generate_data(input_features, output_features, os.path.join(output_dir, csv_filename))
run_api_commands(
input_features,
output_features,
data_csv=rel_path,
output_dir=output_dir,
skip_save_unprocessed_output=skip_save_unprocessed_output,
skip_save_predictions=skip_save_predictions,
skip_save_eval_stats=skip_save_eval_stats,
skip_collect_predictions=skip_collect_predictions,
skip_collect_overall_stats=skip_collect_overall_stats,
)
def test_api_callbacks(csv_filename):
mock_callback = mock.Mock()
epochs = 2
batch_size = 8
num_examples = 32
with tempfile.TemporaryDirectory() as output_dir:
input_features = [sequence_feature(reduce_output="sum")]
output_features = [category_feature(vocab_size=5, reduce_input="sum")]
config = {
"input_features": input_features,
"output_features": output_features,
"combiner": {"type": "concat", "output_size": 14},
"training": {"epochs": epochs, "batch_size": batch_size},
}
model = LudwigModel(config, callbacks=[mock_callback])
data_csv = generate_data(
input_features, output_features, os.path.join(output_dir, csv_filename), num_examples=num_examples
)
val_csv = shutil.copyfile(data_csv, os.path.join(output_dir, "validation.csv"))
test_csv = shutil.copyfile(data_csv, os.path.join(output_dir, "test.csv"))
model.train(training_set=data_csv, validation_set=val_csv, test_set=test_csv)
assert mock_callback.on_epoch_start.call_count == epochs
assert mock_callback.on_epoch_end.call_count == epochs
assert mock_callback.on_validation_start.call_count == epochs
assert mock_callback.on_validation_end.call_count == epochs
assert mock_callback.on_test_start.call_count == epochs
assert mock_callback.on_test_end.call_count == epochs
assert mock_callback.on_batch_start.call_count == epochs * (num_examples / batch_size)
assert mock_callback.on_batch_end.call_count == epochs * (num_examples / batch_size)
|
# Copyright (c) 2019 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import shutil
import tempfile
from unittest import mock
import pytest
import torch
from ludwig.api import LudwigModel
from ludwig.utils.data_utils import read_csv
from tests.integration_tests.utils import (
category_feature,
ENCODERS,
generate_data,
get_weights,
run_api_experiment,
sequence_feature,
)
def run_api_experiment_separated_datasets(input_features, output_features, data_csv):
"""Helper method to avoid code repetition in running an experiment.
:param input_features: input schema
:param output_features: output schema
:param data_csv: path to data
:return: None
"""
config = {
"input_features": input_features,
"output_features": output_features,
"combiner": {"type": "concat", "output_size": 14},
"training": {"epochs": 2},
}
model = LudwigModel(config)
# Training with dataframe
data_df = read_csv(data_csv)
train_df = data_df.sample(frac=0.8)
test_df = data_df.drop(train_df.index).sample(frac=0.5)
validation_df = data_df.drop(train_df.index).drop(test_df.index)
basename, ext = os.path.splitext(data_csv)
train_fname = basename + ".train" + ext
val_fname = basename + ".validation" + ext
test_fname = basename + ".test" + ext
output_dirs = []
try:
train_df.to_csv(train_fname)
validation_df.to_csv(val_fname)
test_df.to_csv(test_fname)
# Training with csv
_, _, output_dir = model.train(
training_set=train_fname,
skip_save_processed_input=True,
skip_save_progress=True,
skip_save_unprocessed_output=True,
)
output_dirs.append(output_dir)
_, _, output_dir = model.train(
training_set=train_fname,
validation_set=val_fname,
skip_save_processed_input=True,
skip_save_progress=True,
skip_save_unprocessed_output=True,
)
output_dirs.append(output_dir)
_, _, output_dir = model.train(
training_set=train_fname,
validation_set=val_fname,
test_set=test_fname,
skip_save_processed_input=True,
skip_save_progress=True,
skip_save_unprocessed_output=True,
)
output_dirs.append(output_dir)
_, output_dir = model.predict(dataset=test_fname)
output_dirs.append(output_dir)
finally:
# Remove results/intermediate data saved to disk
os.remove(train_fname)
os.remove(val_fname)
os.remove(test_fname)
for output_dir in output_dirs:
shutil.rmtree(output_dir, ignore_errors=True)
output_dirs = []
try:
_, _, output_dir = model.train(
training_set=train_df,
skip_save_processed_input=True,
skip_save_progress=True,
skip_save_unprocessed_output=True,
)
output_dirs.append(output_dir)
_, _, output_dir = model.train(
training_set=train_df,
validation_set=validation_df,
skip_save_processed_input=True,
skip_save_progress=True,
skip_save_unprocessed_output=True,
)
output_dirs.append(output_dir)
_, _, output_dir = model.train(
training_set=train_df,
validation_set=validation_df,
test_set=test_df,
skip_save_processed_input=True,
skip_save_progress=True,
skip_save_unprocessed_output=True,
)
output_dirs.append(output_dir)
_, output_dir = model.predict(dataset=data_df)
output_dirs.append(output_dir)
finally:
for output_dir in output_dirs:
shutil.rmtree(output_dir, ignore_errors=True)
def test_api_intent_classification(csv_filename):
# Single sequence input, single category output
input_features = [sequence_feature(reduce_output="sum")]
output_features = [category_feature(vocab_size=5, reduce_input="sum")]
# Generate test data
rel_path = generate_data(input_features, output_features, csv_filename)
for encoder in ENCODERS:
input_features[0]["encoder"] = encoder
run_api_experiment(input_features, output_features, data_csv=rel_path)
def test_api_intent_classification_separated(csv_filename):
# Single sequence input, single category output
input_features = [sequence_feature(reduce_output="sum")]
output_features = [category_feature(vocab_size=5, reduce_input="sum")]
# Generate test data
rel_path = generate_data(input_features, output_features, csv_filename)
for encoder in ENCODERS:
input_features[0]["encoder"] = encoder
run_api_experiment_separated_datasets(input_features, output_features, data_csv=rel_path)
def test_api_train_online(csv_filename):
input_features = [sequence_feature(reduce_output="sum")]
output_features = [category_feature(vocab_size=5, reduce_input="sum")]
data_csv = generate_data(input_features, output_features, csv_filename)
config = {
"input_features": input_features,
"output_features": output_features,
"combiner": {"type": "concat", "output_size": 14},
}
model = LudwigModel(config)
for i in range(2):
model.train_online(dataset=data_csv)
model.predict(dataset=data_csv)
def test_api_training_set(tmpdir):
input_features = [sequence_feature(reduce_output="sum")]
output_features = [category_feature(vocab_size=5, reduce_input="sum")]
data_csv = generate_data(input_features, output_features, os.path.join(tmpdir, "dataset.csv"))
val_csv = shutil.copyfile(data_csv, os.path.join(tmpdir, "validation.csv"))
test_csv = shutil.copyfile(data_csv, os.path.join(tmpdir, "test.csv"))
config = {
"input_features": input_features,
"output_features": output_features,
"combiner": {"type": "concat", "fc_size": 14},
}
model = LudwigModel(config)
model.train(training_set=data_csv, validation_set=val_csv, test_set=test_csv)
model.predict(dataset=test_csv)
# Train again, this time the HDF5 cache will be used
model.train(training_set=data_csv, validation_set=val_csv, test_set=test_csv)
def test_api_training_determinism(tmpdir):
input_features = [sequence_feature(reduce_output="sum")]
output_features = [category_feature(vocab_size=5, reduce_input="sum")]
data_csv = generate_data(input_features, output_features, os.path.join(tmpdir, "dataset.csv"))
config = {
"input_features": input_features,
"output_features": output_features,
"combiner": {"type": "concat", "fc_size": 14},
}
# Train the model 3 times:
#
# 1. seed x
# 2. seed y
# 3. seed x
#
# Check that models (1) and (3) produce the same weights,
# but (1) and (2) do not
rand_x = 42
rand_y = 24
model_1 = LudwigModel(config)
model_1.train(dataset=data_csv, output_directory=tmpdir, random_seed=rand_x)
model_2 = LudwigModel(config)
model_2.train(dataset=data_csv, output_directory=tmpdir, random_seed=rand_y)
model_3 = LudwigModel(config)
model_3.train(dataset=data_csv, output_directory=tmpdir, random_seed=rand_x)
model_weights_1 = get_weights(model_1.model)
model_weights_2 = get_weights(model_2.model)
model_weights_3 = get_weights(model_3.model)
divergence = False
for weight_1, weight_2 in zip(model_weights_1, model_weights_2):
if not torch.allclose(weight_1, weight_2):
divergence = True
break
assert divergence, "model_1 and model_2 have identical weights with different seeds!"
for weight_1, weight_3 in zip(model_weights_1, model_weights_3):
assert torch.allclose(weight_1, weight_3)
def run_api_commands(
input_features,
output_features,
data_csv,
output_dir,
skip_save_training_description=False,
skip_save_training_statistics=False,
skip_save_model=False,
skip_save_progress=False,
skip_save_log=False,
skip_save_processed_input=False,
skip_save_unprocessed_output=False,
skip_save_predictions=False,
skip_save_eval_stats=False,
skip_collect_predictions=False,
skip_collect_overall_stats=False,
):
"""Helper method to avoid code repetition in running an experiment.
:param input_features: input schema
:param output_features: output schema
:param data_csv: path to data
:return: None
"""
config = {
"input_features": input_features,
"output_features": output_features,
"combiner": {"type": "concat", "output_size": 14},
"training": {"epochs": 2},
}
model = LudwigModel(config)
# Training with csv
model.train(
dataset=data_csv,
skip_save_training_description=skip_save_training_description,
skip_save_training_statistics=skip_save_training_statistics,
skip_save_model=skip_save_model,
skip_save_progress=skip_save_progress,
skip_save_log=skip_save_log,
skip_save_processed_input=skip_save_processed_input,
output_directory=output_dir,
)
model.predict(
dataset=data_csv,
skip_save_unprocessed_output=skip_save_unprocessed_output,
skip_save_predictions=skip_save_predictions,
output_directory=output_dir,
)
model.evaluate(
dataset=data_csv,
skip_save_unprocessed_output=skip_save_unprocessed_output,
skip_save_predictions=skip_save_predictions,
skip_save_eval_stats=skip_save_eval_stats,
collect_predictions=not skip_collect_predictions,
collect_overall_stats=not skip_collect_overall_stats,
output_directory=output_dir,
)
model.experiment(
dataset=data_csv,
skip_save_training_description=skip_save_training_description,
skip_save_training_statistics=skip_save_training_statistics,
skip_save_model=skip_save_model,
skip_save_progress=skip_save_progress,
skip_save_log=skip_save_log,
skip_save_processed_input=skip_save_processed_input,
skip_save_unprocessed_output=skip_save_unprocessed_output,
skip_save_predictions=skip_save_predictions,
skip_save_eval_stats=skip_save_eval_stats,
skip_collect_predictions=skip_collect_predictions,
skip_collect_overall_stats=skip_collect_overall_stats,
output_directory=output_dir,
)
@pytest.mark.parametrize("skip_save_training_description", [False, True])
@pytest.mark.parametrize("skip_save_training_statistics", [False, True])
@pytest.mark.parametrize("skip_save_model", [False, True])
@pytest.mark.parametrize("skip_save_progress", [False, True])
@pytest.mark.parametrize("skip_save_log", [False, True])
@pytest.mark.parametrize("skip_save_processed_input", [False, True])
def test_api_skip_parameters_train(
csv_filename,
skip_save_training_description,
skip_save_training_statistics,
skip_save_model,
skip_save_progress,
skip_save_log,
skip_save_processed_input,
):
# Single sequence input, single category output
input_features = [category_feature(vocab_size=5)]
output_features = [category_feature(vocab_size=5)]
with tempfile.TemporaryDirectory() as output_dir:
# Generate test data
rel_path = generate_data(input_features, output_features, os.path.join(output_dir, csv_filename))
run_api_commands(
input_features,
output_features,
data_csv=rel_path,
output_dir=output_dir,
skip_save_training_description=skip_save_training_description,
skip_save_training_statistics=skip_save_training_statistics,
skip_save_model=skip_save_model,
skip_save_progress=skip_save_progress,
skip_save_log=skip_save_log,
skip_save_processed_input=skip_save_processed_input,
)
@pytest.mark.parametrize("skip_save_unprocessed_output", [False, True])
@pytest.mark.parametrize("skip_save_predictions", [False, True])
def test_api_skip_parameters_predict(
csv_filename,
skip_save_unprocessed_output,
skip_save_predictions,
):
# Single sequence input, single category output
input_features = [category_feature(vocab_size=5)]
output_features = [category_feature(vocab_size=5)]
with tempfile.TemporaryDirectory() as output_dir:
# Generate test data
rel_path = generate_data(input_features, output_features, os.path.join(output_dir, csv_filename))
run_api_commands(
input_features,
output_features,
data_csv=rel_path,
output_dir=output_dir,
skip_save_unprocessed_output=skip_save_unprocessed_output,
skip_save_predictions=skip_save_predictions,
)
@pytest.mark.parametrize("skip_save_unprocessed_output", [False, True])
@pytest.mark.parametrize("skip_save_predictions", [False, True])
@pytest.mark.parametrize("skip_save_eval_stats", [False, True])
@pytest.mark.parametrize("skip_collect_predictions", [False, True])
@pytest.mark.parametrize("skip_collect_overall_stats", [False, True])
def test_api_skip_parameters_evaluate(
csv_filename,
skip_save_unprocessed_output,
skip_save_predictions,
skip_save_eval_stats,
skip_collect_predictions,
skip_collect_overall_stats,
):
# Single sequence input, single category output
input_features = [category_feature(vocab_size=5)]
output_features = [category_feature(vocab_size=5)]
with tempfile.TemporaryDirectory() as output_dir:
# Generate test data
rel_path = generate_data(input_features, output_features, os.path.join(output_dir, csv_filename))
run_api_commands(
input_features,
output_features,
data_csv=rel_path,
output_dir=output_dir,
skip_save_unprocessed_output=skip_save_unprocessed_output,
skip_save_predictions=skip_save_predictions,
skip_save_eval_stats=skip_save_eval_stats,
skip_collect_predictions=skip_collect_predictions,
skip_collect_overall_stats=skip_collect_overall_stats,
)
def test_api_callbacks(csv_filename):
mock_callback = mock.Mock()
epochs = 2
batch_size = 8
num_examples = 32
with tempfile.TemporaryDirectory() as output_dir:
input_features = [sequence_feature(reduce_output="sum")]
output_features = [category_feature(vocab_size=5, reduce_input="sum")]
config = {
"input_features": input_features,
"output_features": output_features,
"combiner": {"type": "concat", "output_size": 14},
"training": {"epochs": epochs, "batch_size": batch_size},
}
model = LudwigModel(config, callbacks=[mock_callback])
data_csv = generate_data(
input_features, output_features, os.path.join(output_dir, csv_filename), num_examples=num_examples
)
val_csv = shutil.copyfile(data_csv, os.path.join(output_dir, "validation.csv"))
test_csv = shutil.copyfile(data_csv, os.path.join(output_dir, "test.csv"))
model.train(training_set=data_csv, validation_set=val_csv, test_set=test_csv)
assert mock_callback.on_epoch_start.call_count == epochs
assert mock_callback.on_epoch_end.call_count == epochs
assert mock_callback.on_validation_start.call_count == epochs
assert mock_callback.on_validation_end.call_count == epochs
assert mock_callback.on_test_start.call_count == epochs
assert mock_callback.on_test_end.call_count == epochs
assert mock_callback.on_batch_start.call_count == epochs * (num_examples / batch_size)
assert mock_callback.on_batch_end.call_count == epochs * (num_examples / batch_size)
|
en
| 0.722161
|
# Copyright (c) 2019 Uber Technologies, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== Helper method to avoid code repetition in running an experiment. :param input_features: input schema :param output_features: output schema :param data_csv: path to data :return: None # Training with dataframe # Training with csv # Remove results/intermediate data saved to disk # Single sequence input, single category output # Generate test data # Single sequence input, single category output # Generate test data # Train again, this time the HDF5 cache will be used # Train the model 3 times: # # 1. seed x # 2. seed y # 3. seed x # # Check that models (1) and (3) produce the same weights, # but (1) and (2) do not Helper method to avoid code repetition in running an experiment. :param input_features: input schema :param output_features: output schema :param data_csv: path to data :return: None # Training with csv # Single sequence input, single category output # Generate test data # Single sequence input, single category output # Generate test data # Single sequence input, single category output # Generate test data
| 2.172477
| 2
|
example/h2o_angle.py
|
NLESC-JCER/DeepQMC
| 6
|
6629608
|
import numpy as np
from deepqmc.wavefunction.wf_orbital import Orbital
from deepqmc.solver.solver_orbital import SolverOrbital
from deepqmc.sampler.metropolis import Metropolis
from deepqmc.wavefunction.molecule import Molecule
def rot_mat(angles):
dA = (angles[1]-angles[0])*np.pi/180
c, s = np.cos(dA), np.sin(dA)
return np.array([[c, s, 0], [-s, c, 0], [0, 0, 1]])
def bend_molecule(mol, r, index=1):
mol.atom_coords[index] = (r @ np.array(
mol.atom_coords[index])).astype('float32')
return mol
# define the molecule
mol = Molecule(atom='water_line.xyz', unit='angs',
basis_type='gto', basis='sto-3g')
# define the wave function
wf = Orbital(mol, kinetic_jacobi=True)
# sampler
sampler = Metropolis(nwalkers=1000, nstep=5000, step_size=0.5,
ndim=wf.ndim, nelec=wf.nelec, move='one')
# solver
solver = SolverOrbital(wf=wf, sampler=sampler)
pos, e, v = solver.single_point()
sampler.nstep = 500
angles = np.linspace(0, 90, 10)
R = rot_mat(angles)
for iA in range(len(angles)):
# define the wave function
wf = Orbital(mol, kinetic_jacobi=True)
solver.wf = wf
pos, e, v = solver.single_point(pos=pos)
# bend the mol
mol = bend_molecule(mol, R)
|
import numpy as np
from deepqmc.wavefunction.wf_orbital import Orbital
from deepqmc.solver.solver_orbital import SolverOrbital
from deepqmc.sampler.metropolis import Metropolis
from deepqmc.wavefunction.molecule import Molecule
def rot_mat(angles):
dA = (angles[1]-angles[0])*np.pi/180
c, s = np.cos(dA), np.sin(dA)
return np.array([[c, s, 0], [-s, c, 0], [0, 0, 1]])
def bend_molecule(mol, r, index=1):
mol.atom_coords[index] = (r @ np.array(
mol.atom_coords[index])).astype('float32')
return mol
# define the molecule
mol = Molecule(atom='water_line.xyz', unit='angs',
basis_type='gto', basis='sto-3g')
# define the wave function
wf = Orbital(mol, kinetic_jacobi=True)
# sampler
sampler = Metropolis(nwalkers=1000, nstep=5000, step_size=0.5,
ndim=wf.ndim, nelec=wf.nelec, move='one')
# solver
solver = SolverOrbital(wf=wf, sampler=sampler)
pos, e, v = solver.single_point()
sampler.nstep = 500
angles = np.linspace(0, 90, 10)
R = rot_mat(angles)
for iA in range(len(angles)):
# define the wave function
wf = Orbital(mol, kinetic_jacobi=True)
solver.wf = wf
pos, e, v = solver.single_point(pos=pos)
# bend the mol
mol = bend_molecule(mol, R)
|
en
| 0.421635
|
# define the molecule # define the wave function # sampler # solver # define the wave function # bend the mol
| 2.461828
| 2
|
sjfxsz/codes/chap6.py
|
SaronZhou/python
| 0
|
6629609
|
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 26 19:50:38 2019
@author: admin
"""
import numpy as np
import pandas as pd
### pandas数据处理 ###
# 数据准备、数据转换、数据聚合
## 数据准备 ##
# 加载、组装:合并,拼接,组合、变形(轴向旋转)、删除
# 合并 #
# merge()函数
frame1 = pd.DataFrame({'id':['ball','pencil','pen','mug','ashtray'],
'price':[12.33,11.44,33.21,13.23,33.62]})
frame2 = pd.DataFrame({'id':['pencil','pencil','ball','pen'],
'color':['white','red','red','black']})
frame1
frame2
# 合并frame1与frame2
pd.merge(frame1, frame2)
# 指定基于哪一列合并,增加on选项
frame1 = pd.DataFrame({'id':['ball','pencil','pen','mug','ashtray'],
'color': ['white','red','red','black','green'],
'brand':['OMG','ABC','ABC','POD','POD']})
frame2 = pd.DataFrame({'id':['pencil','pencil','ball','pen'],
'brand':['OMG','POD','ABC','POD']})
frame1
frame2
# frame1与frame2有两个相同列名的列,对其执行合并操作得到空DataFrame对象
pd.merge(frame1, frame2)
# 指定其合并操作标准
pd.merge(frame1, frame2, on='id')
pd.merge(frame1, frame2, on='brand')
# 使用left_on和right_on指定frame1和frame2的基准列,即以frame1中id与frame2中是sid执行合并操作
frame2.columns = ['brand','sid']
pd.merge(frame1, frame2, left_on='id', right_on='brand')
# merge()函数默认执行内连接操作,how选项可以指定连接方式
frame2.columns = ['id','brand']
pd.merge(frame1, frame2, on='id')
pd.merge(frame1, frame2, on='id', how='outer')
pd.merge(frame1, frame2, on='id', how='left')
pd.merge(frame1, frame2, on='id', how='right')
# 合并多个键
pd.merge(frame1, frame2, on=['id','brand'], how='left')
# 根据索引合并
# 将left_index与right_index选项设置为Ture,可将索引而非键作为合并的基准
pd.merge(frame1,frame2,left_index=True,right_index=True)
# DataFrame对象的join()函数更适合根据索引进行合并,可以用于合并多个索引相同或索引相同但列却不一致的DataFrame对象
frame2.columns = ['brand2','id2']
frame1.join(frame2)
## 拼接 ##
# numpy中concatenate()函数
array1 = np.arange(9).reshape((3,3))
array2 = array1 + 6
np.concatenate([array1, array2])
np.concatenate([array1, array2], axis=1)
# pandas中concat()函数实现按轴拼接的功能
ser1 = pd.Series(np.random.rand(4), index=[1,2,3,4])
ser2 = pd.Series(np.random.rand(4), index=[5,6,7,8])
# 默认按照axis=0拼接数据
pd.concat([ser1, ser2])
# 结果中无重复数据,实际上执行的是外连接操作
ser3 = pd.concat([ser1,ser2], axis=1)
pd.concat([ser1,ser3], axis=1, join='inner')
# 在用于拼接的轴上创建等级索引,keys选项
pd.concat([ser1,ser2], keys=[1,2])
# axis=1时,指定的键变为DataFrame对象的列名
pd.concat([ser1,ser2], axis=1, keys=[1,2])
frame1 = pd.DataFrame(np.random.rand(9).reshape(3,3), index=[1,2,3],
columns=['A','B','C'])
frame2 = pd.DataFrame(np.random.rand(9).reshape(3,3), index=[4,5,6],
columns=['A','B','C'])
pd.concat([frame1, frame2])
pd.concat([frame1,frame2], axis=1)
# 组合 #
# combine函数可用来组合series对象并对其数据
ser1 = pd.Series(np.random.rand(5), index=[1,2,3,4,5])
ser2 = pd.Series(np.random.rand(4), index=[2,4,5,6])
ser1
ser2
# 相同索引处使用的是ser1的值
ser1.combine_first(ser2)
# 相同索引处使用的是ser2的值
ser2.combine_first(ser1)
# 进行部分合并,索引值1,2,3,4使用的都是ser1的值
ser1[:4].combine_first(ser2[:4])
# 轴向旋转 #
# 轴向旋转有两个基础操作:入栈-旋转数据结构,将列转换为行、出栈-行转为列
# 按等级索引旋转
frame1 = pd.DataFrame(np.arange(9).reshape(3,3), index=['white','red','black'],
columns=['ball','pen','pencil'])
# 列转为行,得到一个series对象
ser1 = frame1.stack()
ser1.unstack()
# 出栈操作可应用于不同的层级,为unstack函数传入表示层级的编号或名称
ser1.unstack(0)
ser1.unstack(1)
# 长格式转换为宽格式 pivot()函数,可以使用键的一列或多列作为参数
# 长格式:各列都有数据项,每一列后面的数据常常会根前面的有所重复,并且常常为列表形式,有一行行数据组成
longframe = pd.DataFrame({'color':['white','white','white','red','red','red','black','black','black'],
'item':['ball','pen','mug','ball','pen','mug','ball','pen','mug'],
'value':np.random.rand(9)})
longframe
longframe.pivot('color','item')
longframe.pivot('item','color')
# 删除 #
frame1 = pd.DataFrame(np.arange(9).reshape(3,3), index=['white','black','red'],
columns=['ball','en','pencil'])
# 删除一列 del
del frame1['ball']
frame1
# 删除一行drop函数
frame1.drop('white')
## 数据转换 ##
# 删除重复元素 #
dframe = pd.DataFrame({'color': ['white','white','red','red','white'],
'value':[2,1,3,3,2]})
dframe
# duplicated()函数可用来检测重复的行,返回元素为布尔型的Series对象
# 每个元素对应一行,如果该行与其他行重复则元素为True
dframe.duplicated()
# 查找重复的行
dframe[dframe.duplicated()]
# drop_duplicates()函数返回删除重复行后的DataFrame对象
dframe.drop_duplicates()
# 映射 #
# repacle():替换元素、map()新建一列、rename():替换索引
# 用映射替换元素
frame = pd.DataFrame({'item':['ball','mug','pen','pencil','ashtray'],
'color':['white','rosso','verde','black','yellow'],
'price':[5.56,4.20,1.30,0.56,2.75]})
frame
# 用新元素替换不正确的元素,需要定义一组映射关系,旧元素作为键,新元素作为值
newcolors = {'rosso':'red', 'verde':'green'}
frame.replace(newcolors)
# 将NaN替换为正确的元素
ser = pd.Series([1,3,np.nan,4,6,np.nan,2])
ser.replace(np.nan, 0)
# 用映射添加元素
frame = pd.DataFrame({'item':['ball','mug','pen','pencil','ashtray'],
'color':['white','red','green','black','yellow']})
price = {'ball':5.56, 'mug':4.20, 'bottle':1.3, 'scissors':3.41, 'pen':1.30, 'pencil':0.56, 'ashtray':2.75}
# map()函数可应用于series对象或是dataframe对象的一列,接收一个函数或表示映射关系的字典对象作为参数。
# item列应用映射关系,字典price作为参数
frame['price'] = frame['item'].map(price)
# 重命名轴索引
# rename()函数以表示映射关系的字典对象作为参数,替换轴的索引标签
reindex = {0:'first', 1:'second', 2:'third', 3:'fourth', 4:'fifth'}
frame.rename(reindex)
# 若要重命名各列,必须使用columns选项
recolumn = {'item':'object', 'price':'value'}
frame.rename(index=reindex, columns=recolumn)
frame.rename(index={1:'first'}, columns={'item':'object'})
# rename函数返回一个新的dataframe对象,原对象保持不变,如果要修改调用函数对象本身,可将inplace选项设置为True
## 离散化和面元划分 ##
frame.rename(columns={'item':'object'}, inplace=True)
frame
# 若实验数据范围为0~100,分为四部分即四个面元(bin)
# 定义一个数组,存储用于面元划分的各数值
bins = [0,25,50,75,100]
# 对results数组应用cut()函数,传入bins变量作为参数
results = np.random.randint(0, high=100, size=20)
cat = pd.cut(results, bins)
cat
# 显示第几个面元的index值
cat.codes
# cat()函数返回的对象为categorical类别型类型,可以看作一个字符串数组,元素为面元的名称
# levels数组为不同内部类别的名称,labels数组的元素数量跟results数组相同,labels数组的个数字表示results数组元素所属的面元
# 2.7cat.levels
# 2.7cat.labels
# 每个面元出现的次数
pd.value_counts(cat)
# 可以用字符串数组指定面元的名称,将其赋给cut函数的labels选项
bin_names = ['unlikely','less likely','likely','high likely']
pd.cut(results, bins, labels=bin_names)
# 若不指定面元的各界限,只传入一个整数作为参数,cut函数就会按照指定的数字,把数组元素取值范围划分为几部分
pd.cut(results, 5)
# 邓频分箱qcut()函数直接将样本划分为5个面元,每个面元样本数量相等,区间大小不等
quintiles = pd.qcut(results, 5)
pd.value_counts(quintiles)
# 异常值检测和过滤
# 生成3列每列1000个服从标准正态分布的随机数
randframe = pd.DataFrame(np.random.randn(1000, 3))
randframe.describe()
randframe.std()
# any函数
randframe[(np.abs(randframe) > (3*randframe.std())).any(1)]
## 排序 ##
# np.random.permutation()函数
nframe = pd.DataFrame(np.arange(25).reshape(5,5))
new_order = np.random.permutation(5)
new_order
# 对象所有行应用take函数,将新次序传给它
nframe.take(new_order)
# 对对象的一部分进行排序操作
new_order = [3,4,2]
nframe.take(new_order)
# 随机取样
# 从[0,5)内选取三个数可重复]
sample = np.random.randint(0, len(nframe), 3)
sample
nframe.take(sample)
## 字符串处理 ##
# 内置字符串处理方法 #
# split()函数分割
text = '16 Boston Avenue, Boston'
text.split(',')
address, city = [s.strip() for s in text.split(',')]
# 字符串拼接
address + ',' + city
# 拼接数量多时,join()函数
strings = ['a','a','b','b','c','c']
';'.join(strings)
# 查找字符串
# in关键字
'Boston' in text
# index()、find()
text.index('Boston')
text.find('Boston')
# 没能查找到子串时,index()函数报错,find()函数返回-1
text.index('aaa')
text.find('aaa')
# 字符串在文本中出现的次数
text.count('o')
text.count('Boston')
# 替换子串
text.replace('Avenue', 'Street')
# 空字符替换子串,相当于删除子串
text.replace('1', '')
# 正则表达式 #
# re模块用于操作regex对象
# re模块函数:模式匹配、替换、切分
import re
text = "This is an\t odd \n text!"
# 表示一个或多个空白字符的正则表达式为\s+,将正则表达式作为分隔符
re.split('\s+', text)
# 调用re.split()函数时,首先编译正则表达式,随后作为参数传入的文本上调用split函数
# 先用re.compile()函数编译正则表达式
regex = re.compile('\s+')
regex.split(text)
# findall()函数可匹配文本中所有符合正则表达式的子串,返回一个列表
text = "This is my address: 16 Bolton Avenue, Boston"
re.findall('A\w+', text)
re.findall('[A,a]\w+', text)
# search()函数返回第一处符合模式的子串,返回子串在字符串中的开始和结束位置
search = re.search('[A,a]\w+', text)
search.start()
search.end()
text[search.start(): search.end()]
# match()函数从字符串开头开始匹配,若第一个字符不匹配就不会再搜索字符串内部,若没能找到不会返回任何对象
re.match('[A,a]\w+', text)
match = re.match('T\w+', text)
text[match.start():match.end()]
## 数据聚合 ##
# groupby过程 #
# 分组、用函数处理、合并
# 实例 #
frame = pd.DataFrame({'color':['w','r','g','r','g'],
'object':['pen','pencil','pencil','ashtray','pen'],
'price1':[5.5,4.2,1.3,0.5,2.7],
'price2':[4.7,4.1,1.6,0.7,3.1]})
# 使用color列的组标签计算price1列的均值
group = frame['price1'].groupby(frame['color'])
# groups属性查看分组情况
group.groups
group.mean()
group.sum()
# 等级分组
# 使用多个键,按照等级关系分组
ggroup = frame['price1'].groupby([frame['color'], frame['object']])
ggroup.groups
ggroup.sum()
frame[['price1','price2']].groupby(frame['color']).mean()
frame.groupby(frame['color']).mean()
## 组迭代 ##
for name, group in frame.groupby('color'):
print(name)
print(group)
# 链式转换 #
result1 = frame['price1'].groupby(frame['color']).mean()
type(result1)
result2 = frame.groupby(frame['color']).mean()
type(result2)
# 分组操作的灵活性
frame['price1'].groupby(frame['color']).mean()
frame.groupby(frame['color'])['price'].mean()
(frame.groupby(frame['color']).mean())['price1']
# 列名添加前缀
means = frame.groupby(frame['color']).mean().add_prefix('mean_')
means
# 分组函数
group = frame.groupby('color')
group['price1'].quantile(0.6)
# 自定义聚合函数,将其作为参数传给agg()函数
def range(series):
return series.max() - series.min()
group['price1'].agg(range)
group.agg(range)
# 使用多个聚合函数
group['price1'].agg(['mean', 'std', range])
## 高级数据聚合 ##
# transform()函数、apply()函数
frame = pd.DataFrame({'color':['w','r','g','r','g'],
'price1':[5.5,4.2,1.3,0.5,2.7],
'price2':[4.7,4.1,1.6,0.7,3.1]})
sums = frame.groupby('color').sum().add_prefix('tot_')
pd.merge(frame, sums, left_on='color', right_index=True)
# transform()函数的参数必须生成一个标量聚合
frame.groupby('color').transform(np.sum).add_prefix('tot_')
frame = pd.DataFrame({'color':['w','r','g','r','g'],
'status':['u','u','d','d','d'],
'price1':[5.5,4.2,1.3,0.5,2.7],
'price2':[4.7,4.1,1.6,0.7,3.1]})
frame.groupby(['color','status']).apply(lambda x:x.max())
reindex = {0:'first', 1:'second', 2:'third', 3:'fourth', 4:'fifth'}
recolumn = {'item':'object', 'price':'value'}
frame.rename(index=reindex, columns=recolumn)
temp = pd.date_range('1/1/2015', periods=10, freq='H')
timeseries = pd.Series(np.random.rand(10), index=temp)
|
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 26 19:50:38 2019
@author: admin
"""
import numpy as np
import pandas as pd
### pandas数据处理 ###
# 数据准备、数据转换、数据聚合
## 数据准备 ##
# 加载、组装:合并,拼接,组合、变形(轴向旋转)、删除
# 合并 #
# merge()函数
frame1 = pd.DataFrame({'id':['ball','pencil','pen','mug','ashtray'],
'price':[12.33,11.44,33.21,13.23,33.62]})
frame2 = pd.DataFrame({'id':['pencil','pencil','ball','pen'],
'color':['white','red','red','black']})
frame1
frame2
# 合并frame1与frame2
pd.merge(frame1, frame2)
# 指定基于哪一列合并,增加on选项
frame1 = pd.DataFrame({'id':['ball','pencil','pen','mug','ashtray'],
'color': ['white','red','red','black','green'],
'brand':['OMG','ABC','ABC','POD','POD']})
frame2 = pd.DataFrame({'id':['pencil','pencil','ball','pen'],
'brand':['OMG','POD','ABC','POD']})
frame1
frame2
# frame1与frame2有两个相同列名的列,对其执行合并操作得到空DataFrame对象
pd.merge(frame1, frame2)
# 指定其合并操作标准
pd.merge(frame1, frame2, on='id')
pd.merge(frame1, frame2, on='brand')
# 使用left_on和right_on指定frame1和frame2的基准列,即以frame1中id与frame2中是sid执行合并操作
frame2.columns = ['brand','sid']
pd.merge(frame1, frame2, left_on='id', right_on='brand')
# merge()函数默认执行内连接操作,how选项可以指定连接方式
frame2.columns = ['id','brand']
pd.merge(frame1, frame2, on='id')
pd.merge(frame1, frame2, on='id', how='outer')
pd.merge(frame1, frame2, on='id', how='left')
pd.merge(frame1, frame2, on='id', how='right')
# 合并多个键
pd.merge(frame1, frame2, on=['id','brand'], how='left')
# 根据索引合并
# 将left_index与right_index选项设置为Ture,可将索引而非键作为合并的基准
pd.merge(frame1,frame2,left_index=True,right_index=True)
# DataFrame对象的join()函数更适合根据索引进行合并,可以用于合并多个索引相同或索引相同但列却不一致的DataFrame对象
frame2.columns = ['brand2','id2']
frame1.join(frame2)
## 拼接 ##
# numpy中concatenate()函数
array1 = np.arange(9).reshape((3,3))
array2 = array1 + 6
np.concatenate([array1, array2])
np.concatenate([array1, array2], axis=1)
# pandas中concat()函数实现按轴拼接的功能
ser1 = pd.Series(np.random.rand(4), index=[1,2,3,4])
ser2 = pd.Series(np.random.rand(4), index=[5,6,7,8])
# 默认按照axis=0拼接数据
pd.concat([ser1, ser2])
# 结果中无重复数据,实际上执行的是外连接操作
ser3 = pd.concat([ser1,ser2], axis=1)
pd.concat([ser1,ser3], axis=1, join='inner')
# 在用于拼接的轴上创建等级索引,keys选项
pd.concat([ser1,ser2], keys=[1,2])
# axis=1时,指定的键变为DataFrame对象的列名
pd.concat([ser1,ser2], axis=1, keys=[1,2])
frame1 = pd.DataFrame(np.random.rand(9).reshape(3,3), index=[1,2,3],
columns=['A','B','C'])
frame2 = pd.DataFrame(np.random.rand(9).reshape(3,3), index=[4,5,6],
columns=['A','B','C'])
pd.concat([frame1, frame2])
pd.concat([frame1,frame2], axis=1)
# 组合 #
# combine函数可用来组合series对象并对其数据
ser1 = pd.Series(np.random.rand(5), index=[1,2,3,4,5])
ser2 = pd.Series(np.random.rand(4), index=[2,4,5,6])
ser1
ser2
# 相同索引处使用的是ser1的值
ser1.combine_first(ser2)
# 相同索引处使用的是ser2的值
ser2.combine_first(ser1)
# 进行部分合并,索引值1,2,3,4使用的都是ser1的值
ser1[:4].combine_first(ser2[:4])
# 轴向旋转 #
# 轴向旋转有两个基础操作:入栈-旋转数据结构,将列转换为行、出栈-行转为列
# 按等级索引旋转
frame1 = pd.DataFrame(np.arange(9).reshape(3,3), index=['white','red','black'],
columns=['ball','pen','pencil'])
# 列转为行,得到一个series对象
ser1 = frame1.stack()
ser1.unstack()
# 出栈操作可应用于不同的层级,为unstack函数传入表示层级的编号或名称
ser1.unstack(0)
ser1.unstack(1)
# 长格式转换为宽格式 pivot()函数,可以使用键的一列或多列作为参数
# 长格式:各列都有数据项,每一列后面的数据常常会根前面的有所重复,并且常常为列表形式,有一行行数据组成
longframe = pd.DataFrame({'color':['white','white','white','red','red','red','black','black','black'],
'item':['ball','pen','mug','ball','pen','mug','ball','pen','mug'],
'value':np.random.rand(9)})
longframe
longframe.pivot('color','item')
longframe.pivot('item','color')
# 删除 #
frame1 = pd.DataFrame(np.arange(9).reshape(3,3), index=['white','black','red'],
columns=['ball','en','pencil'])
# 删除一列 del
del frame1['ball']
frame1
# 删除一行drop函数
frame1.drop('white')
## 数据转换 ##
# 删除重复元素 #
dframe = pd.DataFrame({'color': ['white','white','red','red','white'],
'value':[2,1,3,3,2]})
dframe
# duplicated()函数可用来检测重复的行,返回元素为布尔型的Series对象
# 每个元素对应一行,如果该行与其他行重复则元素为True
dframe.duplicated()
# 查找重复的行
dframe[dframe.duplicated()]
# drop_duplicates()函数返回删除重复行后的DataFrame对象
dframe.drop_duplicates()
# 映射 #
# repacle():替换元素、map()新建一列、rename():替换索引
# 用映射替换元素
frame = pd.DataFrame({'item':['ball','mug','pen','pencil','ashtray'],
'color':['white','rosso','verde','black','yellow'],
'price':[5.56,4.20,1.30,0.56,2.75]})
frame
# 用新元素替换不正确的元素,需要定义一组映射关系,旧元素作为键,新元素作为值
newcolors = {'rosso':'red', 'verde':'green'}
frame.replace(newcolors)
# 将NaN替换为正确的元素
ser = pd.Series([1,3,np.nan,4,6,np.nan,2])
ser.replace(np.nan, 0)
# 用映射添加元素
frame = pd.DataFrame({'item':['ball','mug','pen','pencil','ashtray'],
'color':['white','red','green','black','yellow']})
price = {'ball':5.56, 'mug':4.20, 'bottle':1.3, 'scissors':3.41, 'pen':1.30, 'pencil':0.56, 'ashtray':2.75}
# map()函数可应用于series对象或是dataframe对象的一列,接收一个函数或表示映射关系的字典对象作为参数。
# item列应用映射关系,字典price作为参数
frame['price'] = frame['item'].map(price)
# 重命名轴索引
# rename()函数以表示映射关系的字典对象作为参数,替换轴的索引标签
reindex = {0:'first', 1:'second', 2:'third', 3:'fourth', 4:'fifth'}
frame.rename(reindex)
# 若要重命名各列,必须使用columns选项
recolumn = {'item':'object', 'price':'value'}
frame.rename(index=reindex, columns=recolumn)
frame.rename(index={1:'first'}, columns={'item':'object'})
# rename函数返回一个新的dataframe对象,原对象保持不变,如果要修改调用函数对象本身,可将inplace选项设置为True
## 离散化和面元划分 ##
frame.rename(columns={'item':'object'}, inplace=True)
frame
# 若实验数据范围为0~100,分为四部分即四个面元(bin)
# 定义一个数组,存储用于面元划分的各数值
bins = [0,25,50,75,100]
# 对results数组应用cut()函数,传入bins变量作为参数
results = np.random.randint(0, high=100, size=20)
cat = pd.cut(results, bins)
cat
# 显示第几个面元的index值
cat.codes
# cat()函数返回的对象为categorical类别型类型,可以看作一个字符串数组,元素为面元的名称
# levels数组为不同内部类别的名称,labels数组的元素数量跟results数组相同,labels数组的个数字表示results数组元素所属的面元
# 2.7cat.levels
# 2.7cat.labels
# 每个面元出现的次数
pd.value_counts(cat)
# 可以用字符串数组指定面元的名称,将其赋给cut函数的labels选项
bin_names = ['unlikely','less likely','likely','high likely']
pd.cut(results, bins, labels=bin_names)
# 若不指定面元的各界限,只传入一个整数作为参数,cut函数就会按照指定的数字,把数组元素取值范围划分为几部分
pd.cut(results, 5)
# 邓频分箱qcut()函数直接将样本划分为5个面元,每个面元样本数量相等,区间大小不等
quintiles = pd.qcut(results, 5)
pd.value_counts(quintiles)
# 异常值检测和过滤
# 生成3列每列1000个服从标准正态分布的随机数
randframe = pd.DataFrame(np.random.randn(1000, 3))
randframe.describe()
randframe.std()
# any函数
randframe[(np.abs(randframe) > (3*randframe.std())).any(1)]
## 排序 ##
# np.random.permutation()函数
nframe = pd.DataFrame(np.arange(25).reshape(5,5))
new_order = np.random.permutation(5)
new_order
# 对象所有行应用take函数,将新次序传给它
nframe.take(new_order)
# 对对象的一部分进行排序操作
new_order = [3,4,2]
nframe.take(new_order)
# 随机取样
# 从[0,5)内选取三个数可重复]
sample = np.random.randint(0, len(nframe), 3)
sample
nframe.take(sample)
## 字符串处理 ##
# 内置字符串处理方法 #
# split()函数分割
text = '16 Boston Avenue, Boston'
text.split(',')
address, city = [s.strip() for s in text.split(',')]
# 字符串拼接
address + ',' + city
# 拼接数量多时,join()函数
strings = ['a','a','b','b','c','c']
';'.join(strings)
# 查找字符串
# in关键字
'Boston' in text
# index()、find()
text.index('Boston')
text.find('Boston')
# 没能查找到子串时,index()函数报错,find()函数返回-1
text.index('aaa')
text.find('aaa')
# 字符串在文本中出现的次数
text.count('o')
text.count('Boston')
# 替换子串
text.replace('Avenue', 'Street')
# 空字符替换子串,相当于删除子串
text.replace('1', '')
# 正则表达式 #
# re模块用于操作regex对象
# re模块函数:模式匹配、替换、切分
import re
text = "This is an\t odd \n text!"
# 表示一个或多个空白字符的正则表达式为\s+,将正则表达式作为分隔符
re.split('\s+', text)
# 调用re.split()函数时,首先编译正则表达式,随后作为参数传入的文本上调用split函数
# 先用re.compile()函数编译正则表达式
regex = re.compile('\s+')
regex.split(text)
# findall()函数可匹配文本中所有符合正则表达式的子串,返回一个列表
text = "This is my address: 16 Bolton Avenue, Boston"
re.findall('A\w+', text)
re.findall('[A,a]\w+', text)
# search()函数返回第一处符合模式的子串,返回子串在字符串中的开始和结束位置
search = re.search('[A,a]\w+', text)
search.start()
search.end()
text[search.start(): search.end()]
# match()函数从字符串开头开始匹配,若第一个字符不匹配就不会再搜索字符串内部,若没能找到不会返回任何对象
re.match('[A,a]\w+', text)
match = re.match('T\w+', text)
text[match.start():match.end()]
## 数据聚合 ##
# groupby过程 #
# 分组、用函数处理、合并
# 实例 #
frame = pd.DataFrame({'color':['w','r','g','r','g'],
'object':['pen','pencil','pencil','ashtray','pen'],
'price1':[5.5,4.2,1.3,0.5,2.7],
'price2':[4.7,4.1,1.6,0.7,3.1]})
# 使用color列的组标签计算price1列的均值
group = frame['price1'].groupby(frame['color'])
# groups属性查看分组情况
group.groups
group.mean()
group.sum()
# 等级分组
# 使用多个键,按照等级关系分组
ggroup = frame['price1'].groupby([frame['color'], frame['object']])
ggroup.groups
ggroup.sum()
frame[['price1','price2']].groupby(frame['color']).mean()
frame.groupby(frame['color']).mean()
## 组迭代 ##
for name, group in frame.groupby('color'):
print(name)
print(group)
# 链式转换 #
result1 = frame['price1'].groupby(frame['color']).mean()
type(result1)
result2 = frame.groupby(frame['color']).mean()
type(result2)
# 分组操作的灵活性
frame['price1'].groupby(frame['color']).mean()
frame.groupby(frame['color'])['price'].mean()
(frame.groupby(frame['color']).mean())['price1']
# 列名添加前缀
means = frame.groupby(frame['color']).mean().add_prefix('mean_')
means
# 分组函数
group = frame.groupby('color')
group['price1'].quantile(0.6)
# 自定义聚合函数,将其作为参数传给agg()函数
def range(series):
return series.max() - series.min()
group['price1'].agg(range)
group.agg(range)
# 使用多个聚合函数
group['price1'].agg(['mean', 'std', range])
## 高级数据聚合 ##
# transform()函数、apply()函数
frame = pd.DataFrame({'color':['w','r','g','r','g'],
'price1':[5.5,4.2,1.3,0.5,2.7],
'price2':[4.7,4.1,1.6,0.7,3.1]})
sums = frame.groupby('color').sum().add_prefix('tot_')
pd.merge(frame, sums, left_on='color', right_index=True)
# transform()函数的参数必须生成一个标量聚合
frame.groupby('color').transform(np.sum).add_prefix('tot_')
frame = pd.DataFrame({'color':['w','r','g','r','g'],
'status':['u','u','d','d','d'],
'price1':[5.5,4.2,1.3,0.5,2.7],
'price2':[4.7,4.1,1.6,0.7,3.1]})
frame.groupby(['color','status']).apply(lambda x:x.max())
reindex = {0:'first', 1:'second', 2:'third', 3:'fourth', 4:'fifth'}
recolumn = {'item':'object', 'price':'value'}
frame.rename(index=reindex, columns=recolumn)
temp = pd.date_range('1/1/2015', periods=10, freq='H')
timeseries = pd.Series(np.random.rand(10), index=temp)
|
zh
| 0.944465
|
# -*- coding: utf-8 -*- Created on Tue Feb 26 19:50:38 2019 @author: admin ### pandas数据处理 ### # 数据准备、数据转换、数据聚合 ## 数据准备 ## # 加载、组装:合并,拼接,组合、变形(轴向旋转)、删除 # 合并 # # merge()函数 # 合并frame1与frame2 # 指定基于哪一列合并,增加on选项 # frame1与frame2有两个相同列名的列,对其执行合并操作得到空DataFrame对象 # 指定其合并操作标准 # 使用left_on和right_on指定frame1和frame2的基准列,即以frame1中id与frame2中是sid执行合并操作 # merge()函数默认执行内连接操作,how选项可以指定连接方式 # 合并多个键 # 根据索引合并 # 将left_index与right_index选项设置为Ture,可将索引而非键作为合并的基准 # DataFrame对象的join()函数更适合根据索引进行合并,可以用于合并多个索引相同或索引相同但列却不一致的DataFrame对象 ## 拼接 ## # numpy中concatenate()函数 # pandas中concat()函数实现按轴拼接的功能 # 默认按照axis=0拼接数据 # 结果中无重复数据,实际上执行的是外连接操作 # 在用于拼接的轴上创建等级索引,keys选项 # axis=1时,指定的键变为DataFrame对象的列名 # 组合 # # combine函数可用来组合series对象并对其数据 # 相同索引处使用的是ser1的值 # 相同索引处使用的是ser2的值 # 进行部分合并,索引值1,2,3,4使用的都是ser1的值 # 轴向旋转 # # 轴向旋转有两个基础操作:入栈-旋转数据结构,将列转换为行、出栈-行转为列 # 按等级索引旋转 # 列转为行,得到一个series对象 # 出栈操作可应用于不同的层级,为unstack函数传入表示层级的编号或名称 # 长格式转换为宽格式 pivot()函数,可以使用键的一列或多列作为参数 # 长格式:各列都有数据项,每一列后面的数据常常会根前面的有所重复,并且常常为列表形式,有一行行数据组成 # 删除 # # 删除一列 del # 删除一行drop函数 ## 数据转换 ## # 删除重复元素 # # duplicated()函数可用来检测重复的行,返回元素为布尔型的Series对象 # 每个元素对应一行,如果该行与其他行重复则元素为True # 查找重复的行 # drop_duplicates()函数返回删除重复行后的DataFrame对象 # 映射 # # repacle():替换元素、map()新建一列、rename():替换索引 # 用映射替换元素 # 用新元素替换不正确的元素,需要定义一组映射关系,旧元素作为键,新元素作为值 # 将NaN替换为正确的元素 # 用映射添加元素 # map()函数可应用于series对象或是dataframe对象的一列,接收一个函数或表示映射关系的字典对象作为参数。 # item列应用映射关系,字典price作为参数 # 重命名轴索引 # rename()函数以表示映射关系的字典对象作为参数,替换轴的索引标签 # 若要重命名各列,必须使用columns选项 # rename函数返回一个新的dataframe对象,原对象保持不变,如果要修改调用函数对象本身,可将inplace选项设置为True ## 离散化和面元划分 ## # 若实验数据范围为0~100,分为四部分即四个面元(bin) # 定义一个数组,存储用于面元划分的各数值 # 对results数组应用cut()函数,传入bins变量作为参数 # 显示第几个面元的index值 # cat()函数返回的对象为categorical类别型类型,可以看作一个字符串数组,元素为面元的名称 # levels数组为不同内部类别的名称,labels数组的元素数量跟results数组相同,labels数组的个数字表示results数组元素所属的面元 # 2.7cat.levels # 2.7cat.labels # 每个面元出现的次数 # 可以用字符串数组指定面元的名称,将其赋给cut函数的labels选项 # 若不指定面元的各界限,只传入一个整数作为参数,cut函数就会按照指定的数字,把数组元素取值范围划分为几部分 # 邓频分箱qcut()函数直接将样本划分为5个面元,每个面元样本数量相等,区间大小不等 # 异常值检测和过滤 # 生成3列每列1000个服从标准正态分布的随机数 # any函数 ## 排序 ## # np.random.permutation()函数 # 对象所有行应用take函数,将新次序传给它 # 对对象的一部分进行排序操作 # 随机取样 # 从[0,5)内选取三个数可重复] ## 字符串处理 ## # 内置字符串处理方法 # # split()函数分割 # 字符串拼接 # 拼接数量多时,join()函数 # 查找字符串 # in关键字 # index()、find() # 没能查找到子串时,index()函数报错,find()函数返回-1 # 字符串在文本中出现的次数 # 替换子串 # 空字符替换子串,相当于删除子串 # 正则表达式 # # re模块用于操作regex对象 # re模块函数:模式匹配、替换、切分 # 表示一个或多个空白字符的正则表达式为\s+,将正则表达式作为分隔符 # 调用re.split()函数时,首先编译正则表达式,随后作为参数传入的文本上调用split函数 # 先用re.compile()函数编译正则表达式 # findall()函数可匹配文本中所有符合正则表达式的子串,返回一个列表 # search()函数返回第一处符合模式的子串,返回子串在字符串中的开始和结束位置 # match()函数从字符串开头开始匹配,若第一个字符不匹配就不会再搜索字符串内部,若没能找到不会返回任何对象 ## 数据聚合 ## # groupby过程 # # 分组、用函数处理、合并 # 实例 # # 使用color列的组标签计算price1列的均值 # groups属性查看分组情况 # 等级分组 # 使用多个键,按照等级关系分组 ## 组迭代 ## # 链式转换 # # 分组操作的灵活性 # 列名添加前缀 # 分组函数 # 自定义聚合函数,将其作为参数传给agg()函数 # 使用多个聚合函数 ## 高级数据聚合 ## # transform()函数、apply()函数 # transform()函数的参数必须生成一个标量聚合
| 2.65185
| 3
|
prueba.py
|
marlonvillaverde/condominio
| 0
|
6629610
|
' primero tenemos la lista con los diccionarios de atletas y el pais'
lista = []
lista.append( { 'nombre': 'Aaron', 'pais': 'Argentina'} )
lista.append( { 'nombre': 'Alain', 'pais': 'Alemania'} )
lista.append( { 'nombre': 'Juan', 'pais': 'Argelia'} )
lista.append( { 'nombre': 'Clevey', 'pais': 'Austria'} )
lista.append( { 'nombre': 'Betty', 'pais': 'Austria'} )
lista.append( { 'nombre': 'Davide', 'pais': 'Austria'} )
lista.append( { 'nombre': 'Betty', 'pais': 'Austria'} )
lista.append( { 'nombre': 'Luis', 'pais': 'Argelia'} )
lista.append( { 'nombre': 'Merry', 'pais': 'Alemania'} )
lista.append( { 'nombre': 'Trina', 'pais': 'Aargentina'} )
lista.append( { 'nombre': 'Nelly', 'pais': 'Andorra'} )
lista.append( { 'nombre': 'Carol', 'pais': 'Australia'} )
paises=[]
cuenta = 0
for elemento in lista:
variable = 0
for pais in paises:
if pais['nombre'] == elemento['pais']:
pais['cuenta'] = pais['cuenta']+1
variable = 1
if variable == 0:
paises.append({'nombre': elemento['pais'], 'cuenta': 1})
cuenta = cuenta + 1
for elemento in paises:
print (elemento)
|
' primero tenemos la lista con los diccionarios de atletas y el pais'
lista = []
lista.append( { 'nombre': 'Aaron', 'pais': 'Argentina'} )
lista.append( { 'nombre': 'Alain', 'pais': 'Alemania'} )
lista.append( { 'nombre': 'Juan', 'pais': 'Argelia'} )
lista.append( { 'nombre': 'Clevey', 'pais': 'Austria'} )
lista.append( { 'nombre': 'Betty', 'pais': 'Austria'} )
lista.append( { 'nombre': 'Davide', 'pais': 'Austria'} )
lista.append( { 'nombre': 'Betty', 'pais': 'Austria'} )
lista.append( { 'nombre': 'Luis', 'pais': 'Argelia'} )
lista.append( { 'nombre': 'Merry', 'pais': 'Alemania'} )
lista.append( { 'nombre': 'Trina', 'pais': 'Aargentina'} )
lista.append( { 'nombre': 'Nelly', 'pais': 'Andorra'} )
lista.append( { 'nombre': 'Carol', 'pais': 'Australia'} )
paises=[]
cuenta = 0
for elemento in lista:
variable = 0
for pais in paises:
if pais['nombre'] == elemento['pais']:
pais['cuenta'] = pais['cuenta']+1
variable = 1
if variable == 0:
paises.append({'nombre': elemento['pais'], 'cuenta': 1})
cuenta = cuenta + 1
for elemento in paises:
print (elemento)
|
none
| 1
| 3.684686
| 4
|
|
use_cases/MarzV3/backend/marzv2/admin.py
|
einshoe/ssv-examples
| 0
|
6629611
|
<reponame>einshoe/ssv-examples<filename>use_cases/MarzV3/backend/marzv2/admin.py
from django.contrib import admin
# Register your models here.
# Register your models here.
from .models import FitsFiles
from .models import JsonFiles
from .models import TemplateSpectra
from .models import EmissionSpectra
admin.site.register(FitsFiles)
admin.site.register(JsonFiles)
admin.site.register(TemplateSpectra)
admin.site.register(EmissionSpectra)
|
from django.contrib import admin
# Register your models here.
# Register your models here.
from .models import FitsFiles
from .models import JsonFiles
from .models import TemplateSpectra
from .models import EmissionSpectra
admin.site.register(FitsFiles)
admin.site.register(JsonFiles)
admin.site.register(TemplateSpectra)
admin.site.register(EmissionSpectra)
|
en
| 0.969169
|
# Register your models here. # Register your models here.
| 1.361419
| 1
|
opus/application/apps/help/test_help.py
|
fyellin/pds-opus
| 10
|
6629612
|
# help/test_help.py
import logging
from unittest import TestCase
from django.core.cache import cache
from django.http import Http404
from django.test import RequestFactory
from help.views import (api_about,
api_citing_opus,
api_api_guide,
api_faq,
api_gettingstarted,
api_splash,
api_volumes)
import settings
class helpTests(TestCase):
def setUp(self):
self.maxDiff = None
settings.OPUS_FAKE_API_DELAYS = 0
settings.OPUS_FAKE_SERVER_ERROR404_PROBABILITY = 0
settings.OPUS_FAKE_SERVER_ERROR500_PROBABILITY = 0
logging.disable(logging.ERROR)
cache.clear()
self.factory = RequestFactory()
def tearDown(self):
logging.disable(logging.NOTSET)
########################################
######### api_about UNIT TESTS #########
########################################
def test__api_about_no_request(self):
"[test_help.py] api_about: no request"
with self.assertRaisesRegex(Http404,
r'Internal error \(No request was provided\) for /__help/about.html'):
api_about(None, 'html')
def test__api_about_no_get(self):
"[test_help.py] api_about: no GET"
request = self.factory.get('__help/about.html')
request.GET = None
with self.assertRaisesRegex(Http404,
r'Internal error \(No request was provided\) for /__help/about.html'):
api_about(request, 'html')
##########################################
######### api_volumes UNIT TESTS #########
##########################################
def test__api_volumes_no_request(self):
"[test_help.py] api_volumes: no request"
with self.assertRaisesRegex(Http404,
r'Internal error \(No request was provided\) for /__help/volumes.html'):
api_volumes(None, 'html')
def test__api_volumes_no_get(self):
"[test_help.py] api_volumes: no GET"
request = self.factory.get('__help/volumes.html')
request.GET = None
with self.assertRaisesRegex(Http404,
r'Internal error \(No request was provided\) for /__help/volumes.html'):
api_volumes(request, 'html')
######################################
######### api_faq UNIT TESTS #########
######################################
def test__api_faq_no_request(self):
"[test_help.py] api_faq: no request"
with self.assertRaisesRegex(Http404,
r'Internal error \(No request was provided\) for /__help/faq.html'):
api_faq(None, 'html')
def test__api_faq_no_get(self):
"[test_help.py] api_faq: no GET"
request = self.factory.get('__help/faq.html')
request.GET = None
with self.assertRaisesRegex(Http404,
r'Internal error \(No request was provided\) for /__help/faq.html'):
api_faq(request, 'html')
########################################
######### api_guide UNIT TESTS #########
########################################
def test__api_api_guide_no_request(self):
"[test_help.py] api_api_guide: no request"
with self.assertRaisesRegex(Http404,
r'Internal error \(No request was provided\) for /__help/apiguide.html'):
api_api_guide(None, 'html')
def test__api_api_guide_no_get(self):
"[test_help.py] api_api_guide: no GET"
request = self.factory.get('__help/apiguide.html')
request.GET = None
with self.assertRaisesRegex(Http404,
r'Internal error \(No request was provided\) for /__help/apiguide.html'):
api_api_guide(request, 'html')
#################################################
######### api_gettingstarted UNIT TESTS #########
#################################################
def test__api_gettingstarted_no_request(self):
"[test_help.py] api_gettingstarted: no request"
with self.assertRaisesRegex(Http404,
r'Internal error \(No request was provided\) for /__help/gettingstarted.html'):
api_gettingstarted(None, 'html')
def test__api_gettingstarted_no_get(self):
"[test_help.py] api_gettingstarted: no GET"
request = self.factory.get('__help/gettingstarted.html')
request.GET = None
with self.assertRaisesRegex(Http404,
r'Internal error \(No request was provided\) for /__help/gettingstarted.html'):
api_gettingstarted(request, 'html')
#########################################
######### api_splash UNIT TESTS #########
#########################################
def test__api_splash_no_request(self):
"[test_help.py] api_splash: no request"
with self.assertRaisesRegex(Http404,
r'Internal error \(No request was provided\) for /__help/splash.html'):
api_splash(None)
def test__api_splash_no_get(self):
"[test_help.py] api_splash: no GET"
request = self.factory.get('__help/splash.html')
request.GET = None
with self.assertRaisesRegex(Http404,
r'Internal error \(No request was provided\) for /__help/splash.html'):
api_splash(request)
##############################################
######### api_citing_opus UNIT TESTS #########
##############################################
def test__api_citing_opus_no_request(self):
"[test_help.py] api_citing_opus: no request"
with self.assertRaisesRegex(Http404,
r'Internal error \(No request was provided\) for /__help/citing.html'):
api_citing_opus(None, 'html')
def test__api_citing_opus_no_get(self):
"[test_help.py] api_citing_opus: no GET"
request = self.factory.get('__help/citing.html')
request.GET = None
with self.assertRaisesRegex(Http404,
r'Internal error \(No request was provided\) for /__help/citing.html'):
api_citing_opus(request, 'html')
|
# help/test_help.py
import logging
from unittest import TestCase
from django.core.cache import cache
from django.http import Http404
from django.test import RequestFactory
from help.views import (api_about,
api_citing_opus,
api_api_guide,
api_faq,
api_gettingstarted,
api_splash,
api_volumes)
import settings
class helpTests(TestCase):
def setUp(self):
self.maxDiff = None
settings.OPUS_FAKE_API_DELAYS = 0
settings.OPUS_FAKE_SERVER_ERROR404_PROBABILITY = 0
settings.OPUS_FAKE_SERVER_ERROR500_PROBABILITY = 0
logging.disable(logging.ERROR)
cache.clear()
self.factory = RequestFactory()
def tearDown(self):
logging.disable(logging.NOTSET)
########################################
######### api_about UNIT TESTS #########
########################################
def test__api_about_no_request(self):
"[test_help.py] api_about: no request"
with self.assertRaisesRegex(Http404,
r'Internal error \(No request was provided\) for /__help/about.html'):
api_about(None, 'html')
def test__api_about_no_get(self):
"[test_help.py] api_about: no GET"
request = self.factory.get('__help/about.html')
request.GET = None
with self.assertRaisesRegex(Http404,
r'Internal error \(No request was provided\) for /__help/about.html'):
api_about(request, 'html')
##########################################
######### api_volumes UNIT TESTS #########
##########################################
def test__api_volumes_no_request(self):
"[test_help.py] api_volumes: no request"
with self.assertRaisesRegex(Http404,
r'Internal error \(No request was provided\) for /__help/volumes.html'):
api_volumes(None, 'html')
def test__api_volumes_no_get(self):
"[test_help.py] api_volumes: no GET"
request = self.factory.get('__help/volumes.html')
request.GET = None
with self.assertRaisesRegex(Http404,
r'Internal error \(No request was provided\) for /__help/volumes.html'):
api_volumes(request, 'html')
######################################
######### api_faq UNIT TESTS #########
######################################
def test__api_faq_no_request(self):
"[test_help.py] api_faq: no request"
with self.assertRaisesRegex(Http404,
r'Internal error \(No request was provided\) for /__help/faq.html'):
api_faq(None, 'html')
def test__api_faq_no_get(self):
"[test_help.py] api_faq: no GET"
request = self.factory.get('__help/faq.html')
request.GET = None
with self.assertRaisesRegex(Http404,
r'Internal error \(No request was provided\) for /__help/faq.html'):
api_faq(request, 'html')
########################################
######### api_guide UNIT TESTS #########
########################################
def test__api_api_guide_no_request(self):
"[test_help.py] api_api_guide: no request"
with self.assertRaisesRegex(Http404,
r'Internal error \(No request was provided\) for /__help/apiguide.html'):
api_api_guide(None, 'html')
def test__api_api_guide_no_get(self):
"[test_help.py] api_api_guide: no GET"
request = self.factory.get('__help/apiguide.html')
request.GET = None
with self.assertRaisesRegex(Http404,
r'Internal error \(No request was provided\) for /__help/apiguide.html'):
api_api_guide(request, 'html')
#################################################
######### api_gettingstarted UNIT TESTS #########
#################################################
def test__api_gettingstarted_no_request(self):
"[test_help.py] api_gettingstarted: no request"
with self.assertRaisesRegex(Http404,
r'Internal error \(No request was provided\) for /__help/gettingstarted.html'):
api_gettingstarted(None, 'html')
def test__api_gettingstarted_no_get(self):
"[test_help.py] api_gettingstarted: no GET"
request = self.factory.get('__help/gettingstarted.html')
request.GET = None
with self.assertRaisesRegex(Http404,
r'Internal error \(No request was provided\) for /__help/gettingstarted.html'):
api_gettingstarted(request, 'html')
#########################################
######### api_splash UNIT TESTS #########
#########################################
def test__api_splash_no_request(self):
"[test_help.py] api_splash: no request"
with self.assertRaisesRegex(Http404,
r'Internal error \(No request was provided\) for /__help/splash.html'):
api_splash(None)
def test__api_splash_no_get(self):
"[test_help.py] api_splash: no GET"
request = self.factory.get('__help/splash.html')
request.GET = None
with self.assertRaisesRegex(Http404,
r'Internal error \(No request was provided\) for /__help/splash.html'):
api_splash(request)
##############################################
######### api_citing_opus UNIT TESTS #########
##############################################
def test__api_citing_opus_no_request(self):
"[test_help.py] api_citing_opus: no request"
with self.assertRaisesRegex(Http404,
r'Internal error \(No request was provided\) for /__help/citing.html'):
api_citing_opus(None, 'html')
def test__api_citing_opus_no_get(self):
"[test_help.py] api_citing_opus: no GET"
request = self.factory.get('__help/citing.html')
request.GET = None
with self.assertRaisesRegex(Http404,
r'Internal error \(No request was provided\) for /__help/citing.html'):
api_citing_opus(request, 'html')
|
de
| 0.784306
|
# help/test_help.py ######################################## ######### api_about UNIT TESTS ######### ######################################## ########################################## ######### api_volumes UNIT TESTS ######### ########################################## ###################################### ######### api_faq UNIT TESTS ######### ###################################### ######################################## ######### api_guide UNIT TESTS ######### ######################################## ################################################# ######### api_gettingstarted UNIT TESTS ######### ################################################# ######################################### ######### api_splash UNIT TESTS ######### ######################################### ############################################## ######### api_citing_opus UNIT TESTS ######### ##############################################
| 2.125292
| 2
|
setup.py
|
simbuerg/benchbuild
| 0
|
6629613
|
<filename>setup.py
#!/usr/bin/env python3
from setuptools import setup, find_packages
extra_files = [
"templates/compiler.py.inc",
"templates/run_static.py.inc",
"templates/run_dynamic.py.inc",
"templates/slurm-prepare-node.sh.inc",
"templates/slurm-cleanup-node.sh.inc"
]
sql_extra_files = [
"func.compare_region_wise2.sql",
"func.experiments.sql",
"func.recompilation.sql",
"func.run_regions.sql",
"func.total_dyncov_clean.sql",
"func.total_speedup.sql",
"func.compare_region_wise.sql",
"func.project_region_time.sql",
"func.run_durations.sql",
"func.speedup.sql",
"func.total_dyncov.sql",
"func.pj-test-eval.sql"
]
setup(
name='benchbuild',
version='1.3.2',
url='https://github.com/PolyJIT/benchbuild',
packages=find_packages(exclude=["docs", "extern", "filters", "linker",
"src", "statistics", "tests", "results"]),
package_data={"benchbuild.utils": extra_files,
"benchbuild": sql_extra_files},
include_package_data=True,
install_requires=[
"lazy==1.2", "SQLAlchemy==1.0.4", "dill==0.2.4", "plumbum>=1.5.0",
"regex==2015.5.28", "wheel==0.24.0", "parse==1.6.6",
"virtualenv==13.1.0", "sphinxcontrib-napoleon", "psycopg2",
"sqlalchemy-migrate", "six>=1.7.0", "psutil>=4.0.0", "pylint>=1.5.5",
"seaborn>=0.7.1", "pandas>=0.19.2", "matplotlib==1.5.3"
],
author="<NAME>",
author_email="<EMAIL>",
description="This is the experiment driver for the benchbuild study",
license="MIT",
entry_points={
'console_scripts': ['benchbuild=benchbuild.driver:main',
'container=benchbuild.container:main']
},
classifiers=[
'Development Status :: 4 - Beta', 'Intended Audience :: Developers',
'Topic :: Software Development :: Testing',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3'
],
keywords="benchbuild experiments run-time", )
|
<filename>setup.py
#!/usr/bin/env python3
from setuptools import setup, find_packages
extra_files = [
"templates/compiler.py.inc",
"templates/run_static.py.inc",
"templates/run_dynamic.py.inc",
"templates/slurm-prepare-node.sh.inc",
"templates/slurm-cleanup-node.sh.inc"
]
sql_extra_files = [
"func.compare_region_wise2.sql",
"func.experiments.sql",
"func.recompilation.sql",
"func.run_regions.sql",
"func.total_dyncov_clean.sql",
"func.total_speedup.sql",
"func.compare_region_wise.sql",
"func.project_region_time.sql",
"func.run_durations.sql",
"func.speedup.sql",
"func.total_dyncov.sql",
"func.pj-test-eval.sql"
]
setup(
name='benchbuild',
version='1.3.2',
url='https://github.com/PolyJIT/benchbuild',
packages=find_packages(exclude=["docs", "extern", "filters", "linker",
"src", "statistics", "tests", "results"]),
package_data={"benchbuild.utils": extra_files,
"benchbuild": sql_extra_files},
include_package_data=True,
install_requires=[
"lazy==1.2", "SQLAlchemy==1.0.4", "dill==0.2.4", "plumbum>=1.5.0",
"regex==2015.5.28", "wheel==0.24.0", "parse==1.6.6",
"virtualenv==13.1.0", "sphinxcontrib-napoleon", "psycopg2",
"sqlalchemy-migrate", "six>=1.7.0", "psutil>=4.0.0", "pylint>=1.5.5",
"seaborn>=0.7.1", "pandas>=0.19.2", "matplotlib==1.5.3"
],
author="<NAME>",
author_email="<EMAIL>",
description="This is the experiment driver for the benchbuild study",
license="MIT",
entry_points={
'console_scripts': ['benchbuild=benchbuild.driver:main',
'container=benchbuild.container:main']
},
classifiers=[
'Development Status :: 4 - Beta', 'Intended Audience :: Developers',
'Topic :: Software Development :: Testing',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3'
],
keywords="benchbuild experiments run-time", )
|
fr
| 0.221828
|
#!/usr/bin/env python3
| 1.256367
| 1
|
steemd-export/etc/unicode.py
|
metaperl/vivacoin
| 0
|
6629614
|
<reponame>metaperl/vivacoin
import pprint
a = u'bats\u00E0'
pprint.pformat(a) # Works
str(a) # Works
print(a) # UnicodeEncodeError: 'ascii' codec can't encode character '\xe0' in position 4: ordinal not in range(128)
|
import pprint
a = u'bats\u00E0'
pprint.pformat(a) # Works
str(a) # Works
print(a) # UnicodeEncodeError: 'ascii' codec can't encode character '\xe0' in position 4: ordinal not in range(128)
|
en
| 0.665277
|
# Works # Works # UnicodeEncodeError: 'ascii' codec can't encode character '\xe0' in position 4: ordinal not in range(128)
| 2.597328
| 3
|
code.py
|
git-gagan/python-mini-challenges
| 0
|
6629615
|
# --------------
#Code starts here
'''def palindrome(num):
while(num):
num=int(num)+1
num=str(num)
if num[::-1]==num:
return num
res=palindrome(13456)
print(res)'''
#Function to check for palindrome
def palindrome_check(num):
num=str(num)
return (num[::-1]==num)
#Function to find the smallest palindrome
def palindrome(num):
while(1):
num=num+1
if palindrome_check(num):
return num
# --------------
#Code starts here
def a_scramble(str_1,str_2):
str_1=str_1.lower().replace(" ","")
print(str_2)
for i in str_2.lower():
if i in str_1:
str_1=str_1.replace(i,"",1)
print(str_1)
continue
else:
return False
return True
a=a_scramble("eatcher","teacher")
print(a)
# --------------
#Code starts here
def check_fib(num):
li=[0,1]
for i in range(2,int(num/2)):
li.append(li[i-1]+li[i-2])
if num in li:
return True
return False
res=check_fib(377)
print(res)
# --------------
#Code starts here
#Function to compress string
def compress(word):
word=word.lower()
mist=[]
l=0
while(l<len(word)):
m=word[l]
j=0
while(l<len(word) and word[l]==m):
j=j+1
l=l+1
mist.append(m)
mist.append(str(j))
return ''.join(mist)
#Code ends here
# --------------
#Code starts here
def k_distinct(string,k):
s=''
for v in string.lower():
if v not in s:
s+=v
if len(s)==k:
return True
return False
res=k_distinct('Falafel',5)
print(res)
|
# --------------
#Code starts here
'''def palindrome(num):
while(num):
num=int(num)+1
num=str(num)
if num[::-1]==num:
return num
res=palindrome(13456)
print(res)'''
#Function to check for palindrome
def palindrome_check(num):
num=str(num)
return (num[::-1]==num)
#Function to find the smallest palindrome
def palindrome(num):
while(1):
num=num+1
if palindrome_check(num):
return num
# --------------
#Code starts here
def a_scramble(str_1,str_2):
str_1=str_1.lower().replace(" ","")
print(str_2)
for i in str_2.lower():
if i in str_1:
str_1=str_1.replace(i,"",1)
print(str_1)
continue
else:
return False
return True
a=a_scramble("eatcher","teacher")
print(a)
# --------------
#Code starts here
def check_fib(num):
li=[0,1]
for i in range(2,int(num/2)):
li.append(li[i-1]+li[i-2])
if num in li:
return True
return False
res=check_fib(377)
print(res)
# --------------
#Code starts here
#Function to compress string
def compress(word):
word=word.lower()
mist=[]
l=0
while(l<len(word)):
m=word[l]
j=0
while(l<len(word) and word[l]==m):
j=j+1
l=l+1
mist.append(m)
mist.append(str(j))
return ''.join(mist)
#Code ends here
# --------------
#Code starts here
def k_distinct(string,k):
s=''
for v in string.lower():
if v not in s:
s+=v
if len(s)==k:
return True
return False
res=k_distinct('Falafel',5)
print(res)
|
en
| 0.38884
|
# -------------- #Code starts here def palindrome(num):
while(num):
num=int(num)+1
num=str(num)
if num[::-1]==num:
return num
res=palindrome(13456)
print(res) #Function to check for palindrome #Function to find the smallest palindrome # -------------- #Code starts here # -------------- #Code starts here # -------------- #Code starts here #Function to compress string #Code ends here # -------------- #Code starts here
| 3.716428
| 4
|
init_repo.py
|
Serfentum/xcms_finder
| 0
|
6629616
|
<reponame>Serfentum/xcms_finder
from pathlib import Path
import git
def init_repo(repo_clone_url, path, version):
"""
Clone repo from url to specified path, dir with it will be named as version
:param repo_clone_url: str - url from gihub to clone
:param path: str - path, where dir with repo will be places
:param version: str - future name of repo dir
:return: git.repo.base.Repo, str - repository object and path to the correspondent local repository
"""
# Create path for repo
local_repo = Path(path) / version
local_repo = local_repo.expanduser()
# Initialize repository
repo = git.Repo.clone_from(repo_clone_url, local_repo)
return repo, local_repo
|
from pathlib import Path
import git
def init_repo(repo_clone_url, path, version):
"""
Clone repo from url to specified path, dir with it will be named as version
:param repo_clone_url: str - url from gihub to clone
:param path: str - path, where dir with repo will be places
:param version: str - future name of repo dir
:return: git.repo.base.Repo, str - repository object and path to the correspondent local repository
"""
# Create path for repo
local_repo = Path(path) / version
local_repo = local_repo.expanduser()
# Initialize repository
repo = git.Repo.clone_from(repo_clone_url, local_repo)
return repo, local_repo
|
en
| 0.749447
|
Clone repo from url to specified path, dir with it will be named as version :param repo_clone_url: str - url from gihub to clone :param path: str - path, where dir with repo will be places :param version: str - future name of repo dir :return: git.repo.base.Repo, str - repository object and path to the correspondent local repository # Create path for repo # Initialize repository
| 3.118162
| 3
|
lib/model/faster_rcnn/tracking_utils.py
|
lzhangbj/Tracking
| 0
|
6629617
|
import torch
import numpy as np
EPISILON = 1e-6
# we can track dynamic infomation
# naively, we can also directly insert all information into LSTM
def TrackingDynamicInfos(prevROIFeature, prevROI, currROIFeature, currROI, kernel=5):
'''
calculate the dynamic movement info and feed into our TrackingLocGRU
input length are all tracking module capacity
input features should be the same shape for convenience
we use the matchTrans principle here.
inputs
@prevROIFeature: info : tracking objects' features in previous frame
type : torch float tensor
shape : (numObjects, C, H, W) h=w=32
@prevROI: info : previous frames tracking objects' rois
type : torch tensor int
shape : (numObjects, 4) which dim 2 contains (x1, y1, x2, y2)
@currROIFeature: info : tracking objects' features in current frame
type : torch float tensor
shape : (numObjects, C, H, W)
@currROI: info : current frames tracking objects' rois
type : torch tensor int
shape : (numObjects, 4) which dim 2 contains (x1, y1, x2, y2)
return
@trackingDynamicInfos: type : torch float tensor
shape : (numObjects, 3*, H, W), dim 1 contains (deltaX, deltaY) wrt previous frame
'''
numObjects, C, H, W = prevROIFeature.size()
assert prevROIFeature.size() == currROIFeature.size(), [prevROIFeature.size(), currROIFeature.size()]
# assert H == 16 and W == 32, W
assert len(prevROI.size()) == 2 and prevROI.size(1) == 4, prevROI.size()
assert len(currROI.size()) == 2 and currROI.size(1) == 4, currROI.size()
trackingDynamicInfos = prevROIFeature.new(numObjects, 3*kernel*kernel, H, W).zero_()
trackingLocInfo = prevROIFeature.new(numObjects, 2, 2, H, W).zero_()
for i in torch.arange(numObjects):
# if tracking object exist in last frame
# we calculate info
prevROIXLoc = torch.arange(W).float()
prevROIXLoc = prevROIXLoc*(prevROI[i, 2] - prevROI[i, 0])/(W-1) + prevROI[i, 0]
assert prevROIXLoc.size(0) == W, prevROIXLoc.size(0)
prevROIXLoc = prevROIXLoc.expand(H, -1)
currROIXLoc = torch.arange(W).float()
currROIXLoc = currROIXLoc*(currROI[i, 2] - currROI[i, 0])/(W-1) + currROI[i, 0]
assert currROIXLoc.size(0) == W, currROIXLoc.size(0)
currROIXLoc = currROIXLoc.expand(H, -1)
prevROIYLoc = torch.arange(H).float()
prevROIYLoc = prevROIYLoc*(prevROI[i, 3] - prevROI[i, 1])/(H-1) + prevROI[i, 1]
assert prevROIYLoc.size(0) == H, prevROIYLoc.size(0)
prevROIYLoc = prevROIYLoc.expand(W, -1)
prevROIYLoc = prevROIYLoc.transpose(1, 0).contiguous()
currROIYLoc = torch.arange(H).float()
currROIYLoc = currROIYLoc*(currROI[i, 3] - currROI[i, 1])/(H-1) + currROI[i, 1]
assert currROIYLoc.size(0) == H, currROIYLoc.size(0)
currROIYLoc = currROIYLoc.expand(W, -1)
currROIYLoc = currROIYLoc.transpose(1, 0).contiguous()
trackingLocInfo[i, 0, 0] = prevROIXLoc
trackingLocInfo[i, 0, 1] = prevROIYLoc
trackingLocInfo[i, 1, 0] = currROIXLoc
trackingLocInfo[i, 1, 1] = currROIYLoc
k_min = int(-(kernel-1)/2)
k_max = int((kernel+1)/2)
for i in torch.arange(k_min, k_max):
for j in torch.arange(k_min, k_max):
compare_prev_features = prevROIFeature.new(prevROIFeature.size()).zero_()
compare_prev_loc = trackingLocInfo.new(numObjects, 2, H, W).zero_()
compare_prev_features[:, :, max(0, -i):min(H-i, H), max(0,-j):min(W-j, W)] = \
prevROIFeature[:, :, max(0,i):min(H+i, H), max(0,j):min(W+j,W)]
# assert compare_prev_loc[:, 0].size() == trackingLocInfo[:, 0, 0].size() and trackingLocInfo[:, 0, 0].size() == prevROI[:, 2].size(),\
# [compare_prev_loc.size(), trackingLocInfo.size(), prevROI.size()]
compare_prev_loc[:, 0] = trackingLocInfo[:, 0, 0] +(i.float()*(prevROI[:, 2] - prevROI[:, 0])/(W-1)).view(-1, 1, 1)
compare_prev_loc[:, 1] = trackingLocInfo[:, 0, 1] +(j.float()*(prevROI[:, 3] - prevROI[:, 1])/(H-1)).view(-1, 1, 1)
# print([ (3*((i-k_min)*kernel + (j-k_min))).item(), (3*((i-k_min)*kernel + (j-k_min))+2).item()])
# print(trackingDynamicInfos[:, 3*((i-k_min)*kernel + (j-k_min)):3*((i-k_min)*kernel + (j-k_min))+2].size())
trackingDynamicInfos[:, 3*((i-k_min)*kernel + (j-k_min)):3*((i-k_min)*kernel + (j-k_min))+2] = \
trackingLocInfo[:, 1]-compare_prev_loc
temp = compare_prev_features*currROIFeature
trackingDynamicInfos[:, 3*((i-k_min)*kernel + (j-k_min))+2] = torch.sum(temp, dim=1)
del compare_prev_features
del compare_prev_loc
del temp
# torch.cuda.empty_cache()
return trackingDynamicInfos
def clip_tracking_boxes(boxes, im_info):
'''
im_info : [h,w]
'''
boxes[:,0::4].clamp_(0, im_info[1]-1)
boxes[:,1::4].clamp_(0, im_info[0]-1)
boxes[:,2::4].clamp_(0, im_info[1]-1)
boxes[:,3::4].clamp_(0, im_info[0]-1)
return boxes
def tracking_boxes_validation_check(boxes):
count=0
valid_indexes =-boxes.new(boxes.size(0)).fill_(1).long()
for i in torch.arange(boxes.size(0)):
if boxes[i, 2]<=boxes[i, 0] or boxes[i, 3]<=boxes[i, 1]:
boxes[i] = 0
else:
valid_indexes[count] = i
count+=1
valid_indexes = valid_indexes[:count]
return boxes, valid_indexes
|
import torch
import numpy as np
EPISILON = 1e-6
# we can track dynamic infomation
# naively, we can also directly insert all information into LSTM
def TrackingDynamicInfos(prevROIFeature, prevROI, currROIFeature, currROI, kernel=5):
'''
calculate the dynamic movement info and feed into our TrackingLocGRU
input length are all tracking module capacity
input features should be the same shape for convenience
we use the matchTrans principle here.
inputs
@prevROIFeature: info : tracking objects' features in previous frame
type : torch float tensor
shape : (numObjects, C, H, W) h=w=32
@prevROI: info : previous frames tracking objects' rois
type : torch tensor int
shape : (numObjects, 4) which dim 2 contains (x1, y1, x2, y2)
@currROIFeature: info : tracking objects' features in current frame
type : torch float tensor
shape : (numObjects, C, H, W)
@currROI: info : current frames tracking objects' rois
type : torch tensor int
shape : (numObjects, 4) which dim 2 contains (x1, y1, x2, y2)
return
@trackingDynamicInfos: type : torch float tensor
shape : (numObjects, 3*, H, W), dim 1 contains (deltaX, deltaY) wrt previous frame
'''
numObjects, C, H, W = prevROIFeature.size()
assert prevROIFeature.size() == currROIFeature.size(), [prevROIFeature.size(), currROIFeature.size()]
# assert H == 16 and W == 32, W
assert len(prevROI.size()) == 2 and prevROI.size(1) == 4, prevROI.size()
assert len(currROI.size()) == 2 and currROI.size(1) == 4, currROI.size()
trackingDynamicInfos = prevROIFeature.new(numObjects, 3*kernel*kernel, H, W).zero_()
trackingLocInfo = prevROIFeature.new(numObjects, 2, 2, H, W).zero_()
for i in torch.arange(numObjects):
# if tracking object exist in last frame
# we calculate info
prevROIXLoc = torch.arange(W).float()
prevROIXLoc = prevROIXLoc*(prevROI[i, 2] - prevROI[i, 0])/(W-1) + prevROI[i, 0]
assert prevROIXLoc.size(0) == W, prevROIXLoc.size(0)
prevROIXLoc = prevROIXLoc.expand(H, -1)
currROIXLoc = torch.arange(W).float()
currROIXLoc = currROIXLoc*(currROI[i, 2] - currROI[i, 0])/(W-1) + currROI[i, 0]
assert currROIXLoc.size(0) == W, currROIXLoc.size(0)
currROIXLoc = currROIXLoc.expand(H, -1)
prevROIYLoc = torch.arange(H).float()
prevROIYLoc = prevROIYLoc*(prevROI[i, 3] - prevROI[i, 1])/(H-1) + prevROI[i, 1]
assert prevROIYLoc.size(0) == H, prevROIYLoc.size(0)
prevROIYLoc = prevROIYLoc.expand(W, -1)
prevROIYLoc = prevROIYLoc.transpose(1, 0).contiguous()
currROIYLoc = torch.arange(H).float()
currROIYLoc = currROIYLoc*(currROI[i, 3] - currROI[i, 1])/(H-1) + currROI[i, 1]
assert currROIYLoc.size(0) == H, currROIYLoc.size(0)
currROIYLoc = currROIYLoc.expand(W, -1)
currROIYLoc = currROIYLoc.transpose(1, 0).contiguous()
trackingLocInfo[i, 0, 0] = prevROIXLoc
trackingLocInfo[i, 0, 1] = prevROIYLoc
trackingLocInfo[i, 1, 0] = currROIXLoc
trackingLocInfo[i, 1, 1] = currROIYLoc
k_min = int(-(kernel-1)/2)
k_max = int((kernel+1)/2)
for i in torch.arange(k_min, k_max):
for j in torch.arange(k_min, k_max):
compare_prev_features = prevROIFeature.new(prevROIFeature.size()).zero_()
compare_prev_loc = trackingLocInfo.new(numObjects, 2, H, W).zero_()
compare_prev_features[:, :, max(0, -i):min(H-i, H), max(0,-j):min(W-j, W)] = \
prevROIFeature[:, :, max(0,i):min(H+i, H), max(0,j):min(W+j,W)]
# assert compare_prev_loc[:, 0].size() == trackingLocInfo[:, 0, 0].size() and trackingLocInfo[:, 0, 0].size() == prevROI[:, 2].size(),\
# [compare_prev_loc.size(), trackingLocInfo.size(), prevROI.size()]
compare_prev_loc[:, 0] = trackingLocInfo[:, 0, 0] +(i.float()*(prevROI[:, 2] - prevROI[:, 0])/(W-1)).view(-1, 1, 1)
compare_prev_loc[:, 1] = trackingLocInfo[:, 0, 1] +(j.float()*(prevROI[:, 3] - prevROI[:, 1])/(H-1)).view(-1, 1, 1)
# print([ (3*((i-k_min)*kernel + (j-k_min))).item(), (3*((i-k_min)*kernel + (j-k_min))+2).item()])
# print(trackingDynamicInfos[:, 3*((i-k_min)*kernel + (j-k_min)):3*((i-k_min)*kernel + (j-k_min))+2].size())
trackingDynamicInfos[:, 3*((i-k_min)*kernel + (j-k_min)):3*((i-k_min)*kernel + (j-k_min))+2] = \
trackingLocInfo[:, 1]-compare_prev_loc
temp = compare_prev_features*currROIFeature
trackingDynamicInfos[:, 3*((i-k_min)*kernel + (j-k_min))+2] = torch.sum(temp, dim=1)
del compare_prev_features
del compare_prev_loc
del temp
# torch.cuda.empty_cache()
return trackingDynamicInfos
def clip_tracking_boxes(boxes, im_info):
'''
im_info : [h,w]
'''
boxes[:,0::4].clamp_(0, im_info[1]-1)
boxes[:,1::4].clamp_(0, im_info[0]-1)
boxes[:,2::4].clamp_(0, im_info[1]-1)
boxes[:,3::4].clamp_(0, im_info[0]-1)
return boxes
def tracking_boxes_validation_check(boxes):
count=0
valid_indexes =-boxes.new(boxes.size(0)).fill_(1).long()
for i in torch.arange(boxes.size(0)):
if boxes[i, 2]<=boxes[i, 0] or boxes[i, 3]<=boxes[i, 1]:
boxes[i] = 0
else:
valid_indexes[count] = i
count+=1
valid_indexes = valid_indexes[:count]
return boxes, valid_indexes
|
en
| 0.523442
|
# we can track dynamic infomation # naively, we can also directly insert all information into LSTM calculate the dynamic movement info and feed into our TrackingLocGRU input length are all tracking module capacity input features should be the same shape for convenience we use the matchTrans principle here. inputs @prevROIFeature: info : tracking objects' features in previous frame type : torch float tensor shape : (numObjects, C, H, W) h=w=32 @prevROI: info : previous frames tracking objects' rois type : torch tensor int shape : (numObjects, 4) which dim 2 contains (x1, y1, x2, y2) @currROIFeature: info : tracking objects' features in current frame type : torch float tensor shape : (numObjects, C, H, W) @currROI: info : current frames tracking objects' rois type : torch tensor int shape : (numObjects, 4) which dim 2 contains (x1, y1, x2, y2) return @trackingDynamicInfos: type : torch float tensor shape : (numObjects, 3*, H, W), dim 1 contains (deltaX, deltaY) wrt previous frame # assert H == 16 and W == 32, W # if tracking object exist in last frame # we calculate info # assert compare_prev_loc[:, 0].size() == trackingLocInfo[:, 0, 0].size() and trackingLocInfo[:, 0, 0].size() == prevROI[:, 2].size(),\ # [compare_prev_loc.size(), trackingLocInfo.size(), prevROI.size()] # print([ (3*((i-k_min)*kernel + (j-k_min))).item(), (3*((i-k_min)*kernel + (j-k_min))+2).item()]) # print(trackingDynamicInfos[:, 3*((i-k_min)*kernel + (j-k_min)):3*((i-k_min)*kernel + (j-k_min))+2].size()) # torch.cuda.empty_cache() im_info : [h,w]
| 2.854706
| 3
|
chirp/library/import_transaction.py
|
chirpradio/chirpradio-machine
| 8
|
6629618
|
import os
import sys
from chirp.common.printing import cprint
from chirp.library import album
from chirp.library import audio_file
from chirp.library import import_file
from chirp.library import ufid
class ImportTransaction(object):
def __init__(self, db, volume, import_timestamp, tmp_prefix,
dry_run=True):
self._db = db
self._volume = volume
self._import_timestamp = import_timestamp
self._tmp_prefix = tmp_prefix
self._dry_run = dry_run
self.total_size_in_bytes = 0
self.num_albums = 0
self._all_au_files = []
@property
def num_tracks(self):
return len(self._all_au_files)
def add_album(self, alb, new_album_name=None):
# Plug in the volume and import timestamp for this transaction.
alb.set_volume_and_import_timestamp(
self._volume, self._import_timestamp)
alb.ensure_payloads()
cprint(u'Adding Album "%s"' % alb.title())
sys.stdout.flush()
# Write the files to our temporary prefix.
for au_file in alb.all_au_files:
# Might raise an ImportFileError.
if not self._dry_run:
import_file.write_file(au_file, self._tmp_prefix)
# We forget the payloads immediately to save RAM.
alb.drop_payloads()
# Everything checks out!
self._all_au_files.extend(alb.all_au_files)
self.num_albums += 1
self.total_size_in_bytes += sum(
au.frame_size for au in alb.all_au_files)
def commit(self, target_prefix):
if self._dry_run:
return
# Start a database transaction to add the files.
txn = self._db.begin_add(self._volume, self._import_timestamp)
# Write each new file into the database.
for au_file in self._all_au_files:
txn.add(au_file)
ufid_prefix = ufid.ufid_prefix(self._volume, self._import_timestamp)
# Strip off trailing "/"
if ufid_prefix.endswith("/"):
ufid_prefix = ufid_prefix[:-1]
tmp_dir = os.path.join(self._tmp_prefix, ufid_prefix)
real_dir = os.path.join(target_prefix, ufid_prefix)
cprint("*** Committing %d albums / %d bytes" % (
self.num_albums, self.total_size_in_bytes))
cprint("*** tmp_dir=%s" % tmp_dir)
cprint("*** real_dir=%s" % real_dir)
sys.stdout.flush()
os.renames(tmp_dir, real_dir)
txn.commit()
# Write out a list of source files that were just committed.
out = open(os.path.join(real_dir, "_source_files"), "w")
for path in sorted(af.path for af in self._all_au_files):
out.write(path)
out.write("\n")
out.close()
|
import os
import sys
from chirp.common.printing import cprint
from chirp.library import album
from chirp.library import audio_file
from chirp.library import import_file
from chirp.library import ufid
class ImportTransaction(object):
def __init__(self, db, volume, import_timestamp, tmp_prefix,
dry_run=True):
self._db = db
self._volume = volume
self._import_timestamp = import_timestamp
self._tmp_prefix = tmp_prefix
self._dry_run = dry_run
self.total_size_in_bytes = 0
self.num_albums = 0
self._all_au_files = []
@property
def num_tracks(self):
return len(self._all_au_files)
def add_album(self, alb, new_album_name=None):
# Plug in the volume and import timestamp for this transaction.
alb.set_volume_and_import_timestamp(
self._volume, self._import_timestamp)
alb.ensure_payloads()
cprint(u'Adding Album "%s"' % alb.title())
sys.stdout.flush()
# Write the files to our temporary prefix.
for au_file in alb.all_au_files:
# Might raise an ImportFileError.
if not self._dry_run:
import_file.write_file(au_file, self._tmp_prefix)
# We forget the payloads immediately to save RAM.
alb.drop_payloads()
# Everything checks out!
self._all_au_files.extend(alb.all_au_files)
self.num_albums += 1
self.total_size_in_bytes += sum(
au.frame_size for au in alb.all_au_files)
def commit(self, target_prefix):
if self._dry_run:
return
# Start a database transaction to add the files.
txn = self._db.begin_add(self._volume, self._import_timestamp)
# Write each new file into the database.
for au_file in self._all_au_files:
txn.add(au_file)
ufid_prefix = ufid.ufid_prefix(self._volume, self._import_timestamp)
# Strip off trailing "/"
if ufid_prefix.endswith("/"):
ufid_prefix = ufid_prefix[:-1]
tmp_dir = os.path.join(self._tmp_prefix, ufid_prefix)
real_dir = os.path.join(target_prefix, ufid_prefix)
cprint("*** Committing %d albums / %d bytes" % (
self.num_albums, self.total_size_in_bytes))
cprint("*** tmp_dir=%s" % tmp_dir)
cprint("*** real_dir=%s" % real_dir)
sys.stdout.flush()
os.renames(tmp_dir, real_dir)
txn.commit()
# Write out a list of source files that were just committed.
out = open(os.path.join(real_dir, "_source_files"), "w")
for path in sorted(af.path for af in self._all_au_files):
out.write(path)
out.write("\n")
out.close()
|
en
| 0.882771
|
# Plug in the volume and import timestamp for this transaction. # Write the files to our temporary prefix. # Might raise an ImportFileError. # We forget the payloads immediately to save RAM. # Everything checks out! # Start a database transaction to add the files. # Write each new file into the database. # Strip off trailing "/" # Write out a list of source files that were just committed.
| 2.420492
| 2
|
src/strategy.py
|
tobby2002/ebisu
| 0
|
6629619
|
<reponame>tobby2002/ebisu<filename>src/strategy.py
# coding: UTF-8
import os
import random
import math
import re
import numpy
import time
from hyperopt import hp
from src import highest, lowest, sma, crossover, crossunder, over, under, last, rci, rsi, double_ema, ema, triple_ema, wma, \
ssma, hull, logger, notify, atr, willr, bbands, supertrend, heikinashi
from src.bitmex import BitMex
from src.bitmex_stub import BitMexStub
from src.bot import Bot
from src.gmail_sub import GmailSub
import pandas as pd
class Will_Rci(Bot):
inlong = False
inshort = False
def __init__(self):
Bot.__init__(self, '1m')
def options(self):
return {
'rcv_short_len': hp.quniform('rcv_short_len', 1, 21, 1),
'rcv_medium_len': hp.quniform('rcv_medium_len', 21, 34, 1),
'rcv_long_len': hp.quniform('rcv_long_len', 34, 55, 1),
}
def strategy(self, open, close, high, low, volume):
# logger.info('strategy start ctime : %s' % time.ctime())
# start = time.time() # 시작 시간 저장
# lot = self.exchange.get_lot()
lot = 100
itv_s = self.input('rcv_short_len', int, 21)
itv_m = self.input('rcv_medium_len', int, 34)
itv_l = self.input('rcv_long_len', int, 55)
rci_s = rci(close, itv_s)
rci_m = rci(close, itv_m)
rci_l = rci(close, itv_l)
ra = rci_s[-1] / 2 - 50
rb = rci_m[-1] / 2 - 50
rc = rci_l[-1] / 2 - 50
# willr for five willilams
a = willr(high, low, close, period=55)
b = willr(high, low, close, period=144)
c = willr(high, low, close, period=610)
x = willr(high, low, close, period=4181)
y = willr(high, low, close, period=6785)
# logger.info('---- a ----')
# for i in range(1, 5):
# logger.info('a [%s] *******: %s' % (-i, a[-i]))
# logger.info('---- b ----')
# for i in range(1, 5):
# logger.info('b [%s] *******: %s' % (-i, b[-i]))
# logger.info('---- c ----')
# for i in range(1, 5):
# logger.info('c [%s] *******: %s' % (-i, c[-i]))
# logger.info('---- x ----')
# for i in range(1, 5):
# logger.info('x [%s] *******: %s' % (-i, x[-i]))
# logger.info('---- y ----')
# for i in range(1, 5):
# logger.info('x [%s] *******: %s' % (-i, y[-i]))
buycon1 = True if (a[-1] < -97 and (b[-1] < -97 or c[-1] < -97) and (x[-1] < -80 or y[-1] < -80)) else False
buycon2 = True if (a[-1] < -97 and (b[-1] < -97 and c[-1] < -90) and (x[-1] > -35 or y[-1] > -35)) else False
buycon3 = True if (a[-1] < -97 and (b[-1] < -97 and c[-1] > -70) and (x[-1] > -50 or y[-1] > -25)) else False
buycon4 = True if (a[-1] < -97 and (b[-1] < -97 and c[-1] < -97) and (x[-1] > -50 or y[-1] > -50)) else False
buycon5 = True if (a[-1] < -97 and (b[-1] < -97 and c[-1] < -75) and (x[-1] > -25 or y[-1] > -25)) else False
buycon6 = True if ((b[-1] + 100) * (c[-1] + 100) == 0 and (c[-1] < -75 and x[-1] > -30 or y[-1] > -30)) else False
buycon7 = True if ((b[-1] + 100) == 0 and (c[-1] > -30 and x[-1] > -30 or y[-1] > -30)) else False
buycon8 = True if c[-1] < -97 else False
buycon9 = True if a[-1] < -97 and b[-1] < -97 and c[-1] > -50 else False
sellcon1 = True if (a[-1] > -3 and (b[-1] > -3 or c[-1] > -3) and (x[-1] > -20 or y[-1] > -20)) else False
sellcon2 = True if (a[-1] > -3 and (b[-1] > -3 and c[-1] > -10) and (x[-1] < -65 or y[-1] < -65)) else False
sellcon3 = True if (a[-1] > -3 and (b[-1] > -3 and c[-1] < -30) and (x[-1] < -50 or y[-1] < -75)) else False
sellcon4 = True if (a[-1] > -3 and (b[-1] > -3 and c[-1] > -3) and (x[-1] < -50 or y[-1] < -50)) else False
sellcon5 = True if (a[-1] > -3 and (b[-1] > -3 and c[-1] < -25) and (x[-1] < -75 or y[-1] < -75)) else False
sellcon6 = True if (((b[-1]) * (c[-1])) == 0 and c[-1] > -25 and (x[-1] < -70 or y[-1] < -70)) else False
sellcon7 = True if ((b[-1]) == 0 and (c[-1] < -70 and x[-1] < -70 or y[-1] < -70)) else False
sellcon8 = True if c[-1] > -3 else False
sellcon9 = True if a[-1] > -3 and b[-1] > -3 and c[-1] < -50 else False
buyRCIfillerCon = True if rc < -80 else False
sellRCIfillerCon = True if rc > -20 else False
buyWillfilterCon = buycon1 or buycon2 or buycon3 or buycon4 or buycon5 or buycon6 or buycon7 or buycon8 or buycon9
sellWillFilrerCon = sellcon1 or sellcon2 or sellcon3 or sellcon4 or sellcon5 or sellcon6 or sellcon7 or sellcon8 or sellcon9
# set condition
buyCons = buyWillfilterCon and buyRCIfillerCon
sellCons = sellWillFilrerCon and sellRCIfillerCon
buyCon = True if buyCons else False
sellCon = True if sellCons else False
# buyCloseCon = sellRCIfillerCon
buyCloseCon = sellWillFilrerCon
# sellCloseCon = buyRCIfillerCon
sellCloseCon = buyWillfilterCon
if buyCon:
self.exchange.entry("Long", True, lot)
if sellCon:
self.exchange.entry("Short", False, lot)
# if buyCon:
# self.exchange.entry("Long", True, lot)
# self.inlong = True
# if buyCloseCon and self.inlong:
# self.exchange.close_all()
# self.inlong = False
# if sellCon:
# self.exchange.entry("Short", False, lot)
# self.inshort = True
# if sellCloseCon and self.inlong:
# self.exchange.close_all()
# self.inshort = False
# logger.info('all strategy processing time : %s' % str(time.time() - start))
# channel break out
class Doten(Bot):
def __init__(self):
Bot.__init__(self, '1m')
def options(self):
return {
'length': hp.randint('length', 1, 30, 1),
}
def strategy(self, open, close, high, low, volume):
lot = self.exchange.get_lot()
length = self.input('length', int, 9)
up = last(highest(high, length))
dn = last(lowest(low, length))
self.exchange.plot('up', up, 'b')
self.exchange.plot('dn', dn, 'r')
self.exchange.entry("Long", True, round(lot / 2), stop=up)
self.exchange.entry("Short", False, round(lot / 2), stop=dn)
# sma cross
class SMA(Bot):
def __init__(self):
Bot.__init__(self, '1m')
def options(self):
return {
'fast_len': hp.quniform('fast_len', 1, 30, 1),
'slow_len': hp.quniform('slow_len', 1, 30, 1),
}
def strategy(self, open, close, high, low, volume):
lot = self.exchange.get_lot()
fast_len = self.input('fast_len', int, 9)
slow_len = self.input('slow_len', int, 16)
fast_sma = sma(close, fast_len)
slow_sma = sma(close, slow_len)
golden_cross = crossover(fast_sma, slow_sma)
dead_cross = crossunder(fast_sma, slow_sma)
if golden_cross:
self.exchange.entry("Long", True, lot)
if dead_cross:
self.exchange.entry("Short", False, lot)
class YYY(Bot):
def __init__(self):
Bot.__init__(self, '1m')
def options(self):
return {
'fast_len': hp.quniform('fast_len', 1, 200, 1),
'slow_len': hp.quniform('slow_len', 1, 600, 1),
}
def strategy(self, open, close, high, low, volume):
lot = self.exchange.get_lot()
# for test
# lot = int(round(lot / 10))
lot = 100
logger.info('lot:%s' % lot)
bitmex = BitMex(threading=False)
price = bitmex.get_market_price()
fast_len = self.input('fast_len', int, 5)
slow_len = self.input('slow_len', int, 18)
trend_len = self.input('slow_len', int, 1200)
logger.info('fast_len:%s' % fast_len)
logger.info('slow_len:%s' % slow_len)
fast_sma = sma(close, fast_len)
slow_sma = sma(close, slow_len)
trend_sma = sma(close, trend_len)
uptrend = False
downtrend = False
if trend_sma[-1] > trend_sma[-3] or trend_sma[-1] > trend_sma[-10]:
uptrend = True
if trend_sma[-1] < trend_sma[-3] or trend_sma[-1] < trend_sma[-10]:
downtrend = True
golden_cross = crossover(fast_sma, slow_sma)
dead_cross = crossunder(fast_sma, slow_sma)
logger.info('golden_cross:%s' % golden_cross)
logger.info('dead_cross:%s' % dead_cross)
logger.info('price:%s' % price)
logger.info('trend_sma:%s' % trend_sma[-1])
logger.info('uptrend : %s' % str(uptrend))
logger.info('downtrend : %s' % str(downtrend))
# long
if dead_cross and uptrend:
self.exchange.order("Long", True, lot, limit=price-0.5, when=True, post_only=True)
logger.info('in dead_cross and uptrend for long')
if bitmex.get_whichpositon() == 'LONG':
self.exchange.order("Long", False, lot, limit=price + 0.5, when=golden_cross, post_only=True) # similar stop function
# short
if golden_cross and downtrend:
logger.info('in golden_cross and uptrend for short')
self.exchange.entry("Short", False, lot, limit=price+0.5, when=True, post_only=True)
if bitmex.get_whichpositon() == 'SHORT':
self.exchange.order("Short", True, lot, limit=price-0.5, stop=(price-0.5), when=dead_cross, post_only=True)
logger.info('--------------------------------------------------')
# supertrend
class SuperTrend(Bot):
prebalance = BitMex(threading=False).get_balance()
dealcount = 0
def __init__(self):
Bot.__init__(self, '15m')
def options(self):
return {
'factor': hp.randint('factor', 1, 30, 1),
'period': hp.randint('period', 1, 30, 1),
}
def strategy(self, open, close, high, low, volume):
lot = self.exchange.get_lot()
# for test
# lot = int(round(lot / 100))
bitmex = BitMex(threading=False)
price = bitmex.get_market_price()
factor = self.input('factor', int, 3)
period = self.input('period', int, 7)
atrvar = atr(high, low, close, period=period)
# up = (high + low) / 2 - (factor * atr(high, low, close, period=period))
# logger.info('up:%s\n' % up)
# dn = (high + low) / 2 + (factor * atr(high, low, close, period=period))
# logger.info('atrvar: %s' % atrvar[-1])
resolution = self.input(defval=15, title="resolution", type=int) # defval 변경, 예) 5분 --> 5
source = self.exchange.security(str(resolution) + 'm') # init 참고
supertrenddf = supertrend(source, factor, period)
# logger.info('supertrend:%s' % supertrenddf.describe())
# logger.info('supertrend:%s' % supertrenddf.columns)
logger.info('price:%s\n' % price)
# logger.info('source:%s\n' % source[-1])
logger.info('supertrend value:%s' % supertrenddf['SuperTrend'][-1])
logger.info('supertrend Upper Band:%s' % supertrenddf['Upper Band'][-1])
logger.info('supertrend Lower Band:%s' % supertrenddf['Lower Band'][-1])
logger.info('supertrenddf[Trend][-1]:%s' % supertrenddf['Trend'][-1])
logger.info('supertrenddf[TSL][-1]:%s' % supertrenddf['TSL'][-1])
logger.info('supertrenddf[ATR][-1]:%s' % supertrenddf['ATR'][-1])
longCondition_supertrend = crossover(close, supertrenddf['SuperTrend']) and close[-1] > supertrenddf['SuperTrend'][-1]
shortCondition_supertrend = crossunder(close, supertrenddf['SuperTrend']) and close[-1] < supertrenddf['SuperTrend'][-1]
if longCondition_supertrend:
self.exchange.entry("Long", True, lot)
logger.info('longCondition_supertrend:%s\n' % longCondition_supertrend)
elif shortCondition_supertrend:
self.exchange.entry("Short", False, lot)
logger.info('shortCondition_supertrend:%s\n' % shortCondition_supertrend)
else:
# self.exchange.close_all()
logger.info('Condition_supertrend:%s\n' % 'else')
self.dealcount += 1
diff = (abs(bitmex.get_balance() - abs(self.prebalance)))
realised_pnl = bitmex.get_margin()['realisedPnl']
logger.info('dealcount:%s' % self.dealcount)
logger.info('prebalance():%s' % self.prebalance)
logger.info('bitmex.get_balance():%s' % bitmex.get_balance())
logger.info('diff:%s' % diff)
logger.info('realised_pnl:%s' % realised_pnl)
# logger.info('bitmex.get_margin():%s' % bitmex.get_margin())
# logger.info('bitmex.get_position():%s' % bitmex.get_position())
# logger.info('bitmex.get_balance():%s' % bitmex.get_balance())
# logger.info('get_pre_prebalance:%s' % get_pre_prebalance(self.prebalance, bitmex.get_balance()))
# # self.exchange.close_all()
# # self.exchange.cancel_all()
logger.info('--------------------------------------------------')
class DoubleSuperRSI(Bot): # logic https: // stock79.tistory.com / 177
prebalance = BitMex(threading=False).get_balance()
dealcount = 0
def __init__(self):
Bot.__init__(self, '1m')
def options(self):
return {
'length': hp.randint('length', 1, 30, 1),
}
def strategy(self, open, close, high, low, volume):
lot = self.exchange.get_lot()
# for test lot
lot = int(round(lot / 50))
bitmex = BitMex(threading=False)
price = bitmex.get_market_price()
position_avg_price = bitmex.get_position_avg_price()
# variants settings
rsi2_len = self.input('length', int, 2)
rsi50_len = self.input('length50', int, 50)
rsi2 = rsi(close, rsi2_len)
rsi50 = rsi(close, rsi50_len)
factor = self.input('factor', int, 3)
period = self.input('period', int, 7)
factor2 = self.input('factor2', int, 20)
period2 = self.input('period2', int, 7)
resolution = self.input(defval=1, title="resolution", type=int) # defval 변경, 예) 5분 --> 5
source = self.exchange.security(str(resolution) + 'm') # def __init__ 비교
supertrenddf = supertrend(source, factor, period)
supertrenddf2 = supertrend(source, factor2, period2)
print('supertrenddf:%s' % supertrenddf)
print('supertrenddf2:%s' % supertrenddf2)
fast_len = self.input('fast_len', int, 5)
half_len = self.input('half_len', int, 50)
slow_len = self.input('slow_len', int, 200)
fast_sma = sma(close, fast_len)
half_sma = sma(close, half_len)
slow_sma = sma(close, slow_len)
# conditions
sma_long = over(fast_sma[-1], slow_sma[-1])
sma_short = under(fast_sma[-1], slow_sma[-1])
super_long = over(close[-1], supertrenddf['SuperTrend'][-1])
super_short = under(close[-1], supertrenddf['SuperTrend'][-1])
supertrendtrend = supertrenddf['Trend'][-1]
super2_long = over(close[-1], supertrenddf2['SuperTrend'][-1])
super2_short = under(close[-1], supertrenddf2['SuperTrend'][-1])
supertrendtrend2 = supertrenddf2['Trend'][-1]
super_centerline = (supertrenddf['SuperTrend'][-1] + supertrenddf2['SuperTrend'][-1])/2
rsi2_overbought = over(rsi2[-1], 95)
rsi2_oversold = under(rsi2[-1], 5)
rsi50_over = over(rsi50[-1], 50)
rsi50_under = under(rsi50[-1], 50)
price_under = under(price, half_sma[-1])
price_over = over(price, half_sma[-1])
half_before = over(close[-1], half_sma[-1])
half_after = under(close[-1], half_sma[-1])
# show infomations
logger.info('price: %s' % price)
logger.info('fast_sma[-1]: %s' % fast_sma[-1])
logger.info('slow_sma[-1]: %s' % slow_sma[-1])
logger.info('sma_long: %s' % sma_long)
logger.info('sma_short: %s' % sma_short)
logger.info('super_long: %s' % super_long)
logger.info('super_short: %s' % super_short)
logger.info('sma_trend: %s\n' % supertrendtrend)
logger.info('supertrend value:%s' % supertrenddf['SuperTrend'][-1])
logger.info('supertrend Upper Band:%s' % supertrenddf['Upper Band'][-1])
logger.info('supertrend Lower Band:%s' % supertrenddf['Lower Band'][-1])
logger.info('supertrenddf[Trend][-1]:%s' % supertrenddf['Trend'][-1])
logger.info('supertrenddf[TSL][-1]:%s' % supertrenddf['TSL'][-1])
logger.info('supertrenddf[ATR][-1]:%s\n' % supertrenddf['ATR'][-1])
logger.info('supertrend2 value:%s' % supertrenddf2['SuperTrend'][-1])
logger.info('supertrend2 Upper Band:%s' % supertrenddf2['Upper Band'][-1])
logger.info('supertrend2 Lower Band:%s' % supertrenddf2['Lower Band'][-1])
logger.info('supertrenddf2[Trend][-1]:%s' % supertrenddf2['Trend'][-1])
logger.info('supertrenddf2[TSL][-1]:%s' % supertrenddf2['TSL'][-1])
logger.info('supertrenddf2[ATR][-1]:%s\n' % supertrenddf2['ATR'][-1])
logger.info('supertrenddf[SuperTrend][-1]:%s + supertrenddf2[SuperTrend][-1]:%s ' % (supertrenddf['SuperTrend'][-1], supertrenddf2['SuperTrend'][-1]))
logger.info('super_centerline: %s' % super_centerline)
logger.info('rsi2[-1 ]%s' % rsi2[-1])
logger.info('rsi50[-1 ]%s' % rsi50[-1])
logger.info('rsi2_oversold: %s' % rsi2_oversold)
logger.info('rsi2_overbought: %s' % rsi2_overbought)
logger.info('price_under: %s' % price_under)
logger.info('price_over: %s' % price_over)
logger.info('half_before: %s' % half_before)
logger.info('half_after: %s' % half_after)
logger.info('get_whichpositon(): %s' % bitmex.get_whichpositon())
logger.info('position_size(): %s' % bitmex.get_position_size())
# entry
if super2_long:
logger.info('+ + + + + LONG + + + + + LONG + + + + + LONG + + + + + ')
if bitmex.get_whichpositon() is None: # and (not supertrendtrend and supertrendtrend2) and rsi2_overbought:
logger.info('postion condition > None')
if bitmex.get_open_order('Short'):
self.exchange.cancel('Short')
self.exchange.entry("Long", True, lot, limit=math.ceil(super_centerline), post_only=True)
elif bitmex.get_whichpositon() == 'LONG':
logger.info('postion condition > LONG')
if supertrendtrend and supertrendtrend2 and rsi2_oversold: # closing
logger.info('postion condition > LONG > Closing')
self.exchange.order("Long", False, abs(bitmex.get_position_size()), limit=price + 2.5, post_only=True)
elif rsi2_overbought: # add more entry
logger.info('postion condition > LONG > Rsi2 overbout')
self.exchange.entry("LongAdd", True, lot, limit=price - 0.5, post_only=True)
elif super_short: # stop loss
logger.info('postion condition > LONG > super_short(stop loss)')
self.exchange.entry("Long", True, lot)
self.exchange.entry("LongAdd", True, lot)
else:
logger.info('postion condition > LONG > else')
self.exchange.order("Long", False, abs(bitmex.get_position_size()), limit=price + 10, post_only=True)
elif bitmex.get_whichpositon() == 'SHORT':
logger.info('cancel SHORT on long trend')
# self.exchange.cancel_all()
self.exchange.close_all()
self.exchange.close_all()
else:
logger.info('Super Shot --> Else')
if super2_short:
logger.info('- - - - - SHORT - - - - - SHORT - - - - - SHORT - - - - - ')
if bitmex.get_whichpositon() is None: #and rsi2_overbought and price_over:
logger.info('postion condition > None')
if bitmex.get_open_order('Long'):
self.exchange.cancel('Long')
self.exchange.entry("Short", False, lot, limit=math.floor(super_centerline), post_only=True)
elif bitmex.get_whichpositon() == 'SHORT':
logger.info('postion condition > SHORT')
if price_under: # closing
logger.info('postion condition > SHORT > price_under(closing)')
self.exchange.order("Short", True, abs(bitmex.get_position_size()), limit=price-2.5, when=price_under, post_only=True)
elif rsi2_oversold: # add more entry
logger.info('postion condition > SHORT > rsi2_oversold(add more entry)')
self.exchange.entry("ShortAdd", False, lot, limit=price - 0.5, post_only=True)
elif super_long: # stop loss
logger.info('postion condition > SHORT > super_short(stop loss)')
self.exchange.entry("Short", True, lot)
self.exchange.entry("ShortAdd", True, lot)
else:
logger.info('postion condition > SHORT > else')
self.exchange.order("Short", True, abs(bitmex.get_position_size()), limit=price - 10, post_only=True)
elif bitmex.get_whichpositon() == 'LONG':
logger.info('cancel LONG on short trend')
self.exchange.close_all()
else:
logger.info('Super Shot --> Else')
self.dealcount += 1
diff = (abs(bitmex.get_balance() - abs(self.prebalance)))
realised_pnl = bitmex.get_margin()['realisedPnl']
logger.info('dealcount:%s' % self.dealcount)
logger.info('prebalance():%s' % self.prebalance)
logger.info('bitmex.get_balance():%s' % bitmex.get_balance())
logger.info('diff:%s' % diff)
logger.info('realised_pnl:%s' % realised_pnl)
logger.info('--------------------------------------------------')
# <NAME> and rci
class Willr(Bot):
prebalance = BitMex(threading=False).get_balance()
start = 0
pre_fb0 = 0
pre_fb100 = 0
inlong = False
inshort = False
firstlong = False
firstshort = False
def __init__(self):
Bot.__init__(self, '1m')
def options(self):
return {
'rcv_short_len': hp.quniform('rcv_short_len', 1, 21, 1),
'rcv_medium_len': hp.quniform('rcv_medium_len', 21, 34, 1),
'rcv_long_len': hp.quniform('rcv_long_len', 34, 55, 1),
}
def strategy(self, open, close, high, low, volume):
start = time.time() # 시작 시간 저장
self.start += 1
flg_changed_timezone = False
# lot = self.exchange.get_lot()
# for test lot
# lot = int(round(lot / 20))
lot = 100
bitmex = BitMex(threading=False)
price = bitmex.get_market_price()
itv_s = self.input('rcv_short_len', int, 21)
itv_m = self.input('rcv_medium_len', int, 34)
itv_l = self.input('rcv_long_len', int, 55)
rci_s = rci(close, itv_s)
rci_m = rci(close, itv_m)
rci_l = rci(close, itv_l)
ra = rci_s[-1] / 2 - 50
rb = rci_m[-1] / 2 - 50
rc = rci_l[-1] / 2 - 50
# willr for five willilams
a = willr(high, low, close, period=55)
b = willr(high, low, close, period=144)
c = willr(high, low, close, period=610)
x = willr(high, low, close, period=4181)
y = willr(high, low, close, period=6785)
# logger.info('---- a ----')
# for i in range(1, 5):
# logger.info('a [%s] *******: %s' % (-i, a[-i]))
# logger.info('---- b ----')
# for i in range(1, 5):
# logger.info('b [%s] *******: %s' % (-i, b[-i]))
# logger.info('---- c ----')
# for i in range(1, 5):
# logger.info('c [%s] *******: %s' % (-i, c[-i]))
# logger.info('---- x ----')
# for i in range(1, 5):
# logger.info('x [%s] *******: %s' % (-i, x[-i]))
# logger.info('---- y ----')
# for i in range(1, 5):
# logger.info('x [%s] *******: %s' % (-i, y[-i]))
logger.info('-----------------price / lot ----------------')
logger.info('price:%s' % price)
logger.info('lot:%s' % str(lot))
logger.info('-----------------o h l c v ----------------')
logger.info('open:%s' % open[-1])
logger.info('high:%s' % high[-1])
logger.info('low:%s' % low[-1])
logger.info('close:%s' % close[-1])
logger.info('volume:%s' % volume[-1])
logger.info('-----------------a b c x y ----------------')
logger.info('willr_a : %s' % a[-1])
logger.info('willr_b : %s' % b[-1])
logger.info('willr_c : %s' % c[-1])
logger.info('willr_x : %s' % x[-1])
logger.info('willr_y : %s' % y[-1])
logger.info('willr_rc : %s' % rc)
buycon1 = True if (a[-1] < -97 and (b[-1] < -97 or c[-1] < -97) and (x[-1] < -80 or y[-1] < -80)) else False
buycon2 = True if (a[-1] < -97 and (b[-1] < -97 and c[-1] < -90) and (x[-1] > -35 or y[-1] > -35)) else False
buycon3 = True if (a[-1] < -97 and (b[-1] < -97 and c[-1] > -70) and (x[-1] > -50 or y[-1] > -25)) else False
buycon4 = True if (a[-1] < -97 and (b[-1] < -97 and c[-1] < -97) and (x[-1] > -50 or y[-1] > -50)) else False
buycon5 = True if (a[-1] < -97 and (b[-1] < -97 and c[-1] < -75) and (x[-1] > -25 or y[-1] > -25)) else False
buycon6 = True if ((b[-1] + 100) * (c[-1] + 100) == 0 and (c[-1] < -75 and x[-1] > -30 or y[-1] > -30)) else False
buycon7 = True if ((b[-1] + 100) == 0 and (c[-1] > -30 and x[-1] > -30 or y[-1] > -30)) else False
buycon8 = True if c[-1] < -97 else False
buycon9 = True if a[-1] < -97 and b[-1] < -97 and c[-1] > -50 else False
sellcon1 = True if (a[-1] > -3 and (b[-1] > -3 or c[-1] > -3) and (x[-1] > -20 or y[-1] > -20)) else False
sellcon2 = True if (a[-1] > -3 and (b[-1] > -3 and c[-1] > -10) and (x[-1] < -65 or y[-1] < -65)) else False
sellcon3 = True if (a[-1] > -3 and (b[-1] > -3 and c[-1] < -30) and (x[-1] < -50 or y[-1] < -75)) else False
sellcon4 = True if (a[-1] > -3 and (b[-1] > -3 and c[-1] > -3) and (x[-1] < -50 or y[-1] < -50)) else False
sellcon5 = True if (a[-1] > -3 and (b[-1] > -3 and c[-1] < -25) and (x[-1] < -75 or y[-1] < -75)) else False
sellcon6 = True if (((b[-1]) * (c[-1])) == 0 and c[-1] > -25 and (x[-1] < -70 or y[-1] < -70)) else False
sellcon7 = True if ((b[-1]) == 0 and (c[-1] < -70 and x[-1] < -70 or y[-1] < -70)) else False
sellcon8 = True if c[-1] > -3 else False
sellcon9 = True if a[-1] > -3 and b[-1] > -3 and c[-1] < -50 else False
# buyCloseCon = True if a[-1] > -10 else False
# sellCloseCon = True if a[-1] < -90 else False
buyRCIfillerCon = True if rc < -80 else False
sellRCIfillerCon = True if rc > -20 else False
buyWillfilterCon = buycon1 or buycon2 or buycon3 or buycon4 or buycon5 or buycon6 or buycon7 or buycon8 or buycon9
sellWillFilrerCon = sellcon1 or sellcon2 or sellcon3 or sellcon4 or sellcon5 or sellcon6 or sellcon7 or sellcon8 or sellcon9
# set condition
buyCons = buyWillfilterCon and buyRCIfillerCon
sellCons = sellWillFilrerCon and sellRCIfillerCon
buyCon = True if buyCons else False
sellCon = True if sellCons else False
# buyCloseCon = sellRCIfillerCon
buyCloseCon = sellWillFilrerCon
# sellCloseCon = buyRCIfillerCon
sellCloseCon = buyWillfilterCon
logger.info('-----------------inlong / inshort ----------------')
logger.info('inlong:%s' % self.inlong)
logger.info('inshort:%s' % self.inshort)
logger.info('-----------------buyCon / sellCon ----------------')
logger.info('buyCon:%s' % buyCon)
logger.info('sellCon:%s' % sellCon)
logger.info('buyCloseCon:%s' % buyCloseCon)
logger.info('sellCloseCon:%s' % sellCloseCon)
logger.info('bitmex.get_whichpositon():%s' % bitmex.get_whichpositon())
logger.info('bitmex.get_position_size():%s' % bitmex.get_position_size())
if bitmex.get_position_size() > 0:
logger.info('-- >> bitmex.get_position_size > 0 --')
self.inlong = True
elif bitmex.get_position_size() < 0:
logger.info('-- >> bitmex.get_position_size < 0 --')
self.inshort = True
if self.start==1:
logger.info('-- self.start==1 --')
self.exchange.cancel_all()
elif (flg_changed_timezone):
logger.info('-- (flg_changed_timezone')
self.exchange.cancel_all()
# init
if bitmex.get_whichpositon() is None and (self.inlong is True or self.inshort is True):
logger.info('-- (flg_changed_timezone >> init: inlone --> %s, inshort --> %s' % (self.inlong, self.inshort))
self.inlong = False
self.inshort = False
else:
logger.info('-- else and pass --')
pass
if (buyCloseCon) and (self.inlong):
# self.exchange.close("Long")
logger.info('-- (buyCloseCon) and (self.inlong) --')
self.exchange.close_all()
self.inlong = False
if (sellCloseCon) and (self.inshort):
# self.exchange.close("Short")
logger.info('-- (sellCloseCon) and (self.inshort) --')
self.exchange.close_all()
self.inshort = False
if (buyCon) and (not self.inlong):
logger.info('if (buyCon) and (not self.inlong)::')
if price <= close[-1]:
logger.info('>> in +++ price <= close[-1] and ++++ get_position_size: %s' % bitmex.get_position_size())
if bitmex.get_position_size() != 0:
logger.info('-- bitmex.get_position_size() != 0 --')
self.exchange.order("Long", True, bitmex.get_position_size()*2, limit=price-0.5, post_only=True)
else:
logger.info('-- bitmex.get_position_size() != 0 / else --')
self.exchange.order("Long", True, lot, limit=price-0.5, post_only=True)
elif price < low[-1]:
logger.info('-- price < low[-1] --')
self.exchange.order("Long", True, lot, limit=price-0.5, post_only=True)
else:
pass
if (sellCon) and (not self.inshort):
logger.info('if (sellCon) and (not self.inlong)::')
if price >= close[-1]:
logger.info('>> in +++ price >= close[-1] and ++++ get_position_size: %s' % bitmex.get_position_size())
if bitmex.get_position_size() != 0:
logger.info('-- bitmex.get_position_size() != 0 --')
self.exchange.order("Short", False, bitmex.get_position_size()*2, limit=price+0.5, post_only=True)
else:
logger.info('-- bitmex.get_position_size() != 0 / else --')
self.exchange.order("Short", False, lot, limit=price+0.5, post_only=True)
elif price > high[-1]:
logger.info('-- price > high[-1] --')
self.exchange.order("Long", False, lot, limit=price+0.5, post_only=True)
else:
pass
diff = (abs(bitmex.get_balance() - abs(self.prebalance)))
realised_pnl = bitmex.get_margin()['realisedPnl']
logger.info('----------------- realised_pnl ---------')
logger.info('prebalance():%s' % self.prebalance)
logger.info('bitmex.get_balance():%s' % bitmex.get_balance())
logger.info('diff:%s' % diff)
logger.info('realised_pnl:%s' % realised_pnl)
logger.info("time2 : %s" % str(time.time() - start))
logger.info('----------------- END ---------------- END ----------------')
# <NAME> and Fibo
class WillnFibo(Bot):
prebalance = BitMex(threading=False).get_balance()
start = 0
pre_fb0 = 0
pre_fb100 = 0
inlong = False
inshort = False
firstlong = False
firstshort = False
def __init__(self):
Bot.__init__(self, '1m')
def options(self):
return {
'rcv_short_len': hp.quniform('rcv_short_len', 1, 10, 1),
'rcv_medium_len': hp.quniform('rcv_medium_len', 5, 15, 1),
'rcv_long_len': hp.quniform('rcv_long_len', 10, 20, 1),
}
def strategy(self, open, close, high, low, volume):
start = time.time() # 시작 시간 저장
self.start += 1
flg_changed_timezone = False
# lot = self.exchange.get_lot()
# # for test lot
# # lot = int(round(lot / 20))
lot = 100
bitmex = BitMex(threading=False)
price = bitmex.get_market_price()
# channel breakout for 1D
resolution_d = self.input(defval=1, title="resolution", type=int)
source_d = self.exchange.security(str(resolution_d) + 'd')
series_high_d = source_d['high'].values
series_low_d = source_d['low'].values
up = last(highest(series_high_d, 1))
dn = last(lowest(series_low_d, 1))
logger.info("time1 :%s" % str(time.time() - start))
start = time.time() # 시작 시간 저장
# self.exchange.entry("ChLong", True, round(lot), stop=up)
# self.exchange.entry("ChShort", False, round(lot), stop=dn)
# fibo for 1h
resolution = self.input(defval=1, title="resolution", type=int)
source = self.exchange.security(str(resolution) + 'h')
series_high = source['high'].values
series_low = source['low'].values
fb100 = last(highest(series_high, 1)) # 1시간 1, 1D의 경우는 resolution도 변경
fb0 = last(lowest(series_low, 1))
logger.info('resolution: %s' % resolution)
logger.info('fb100_resol: %s' % fb100)
logger.info('fb0_resol: %s' % fb0)
logger.info('self.pre_fb100: %s' % self.pre_fb100)
logger.info('self.pre_fb0: %s' % self.pre_fb0)
if self.pre_fb0 != 0 and fb0 != self.pre_fb0 and fb100 != self.pre_fb100:
flg_changed_timezone = True
logger.info('+++++++ flg_changed_timezone: %s' % flg_changed_timezone)
if bitmex.get_whichpositon() is None:
self.exchange.cancel_all()
fb62 = math.ceil((fb100 - fb0) * 0.618 + fb0)
fb38 = math.ceil((fb100 - fb0) * 0.382 + fb0)
fb50 = math.ceil((fb100 - fb0) / 2 + fb0)
fb200 = math.ceil((fb100 - fb0) * 1.0 + fb100)
fb162 = math.ceil((fb100 - fb0) * 0.618 + fb100)
fb138 = math.ceil((fb100 - fb0) * 0.382 + fb100)
fb038 = math.ceil(fb0 - (fb100 - fb0) * 0.382)
fb062 = math.ceil(fb0 - (fb100 - fb0) * 0.618)
fb0100 = math.ceil(fb0 - (fb100 - fb0) * 1.00)
# willr for five willilams
a = willr(high, low, close, period=55)
b = willr(high, low, close, period=144)
c = willr(high, low, close, period=610)
x = willr(high, low, close, period=4181)
y = willr(high, low, close, period=6785)
# logger.info('---- a ----')
# for i in range(1, 5):
# logger.info('a [%s] *******: %s' % (-i, a[-i]))
# logger.info('---- b ----')
# for i in range(1, 5):
# logger.info('b [%s] *******: %s' % (-i, b[-i]))
# logger.info('---- c ----')
# for i in range(1, 5):
# logger.info('c [%s] *******: %s' % (-i, c[-i]))
# logger.info('---- x ----')
# for i in range(1, 5):
# logger.info('x [%s] *******: %s' % (-i, x[-i]))
# logger.info('---- y ----')
# for i in range(1, 5):
# logger.info('x [%s] *******: %s' % (-i, y[-i]))
logger.info('-----------------price / lot ----------------')
logger.info('price:%s' % price)
logger.info('lot:%s' % str(lot))
logger.info('-----------------o h l c v ----------------')
logger.info('open:%s' % open[-1])
logger.info('high:%s' % high[-1])
logger.info('low:%s' % low[-1])
logger.info('close:%s' % close[-1])
logger.info('volume:%s' % volume[-1])
logger.info('-----------------a b c x y ----------------')
logger.info('willr_a : %s' % a[-1])
logger.info('willr_b : %s' % b[-1])
logger.info('willr_c : %s' % c[-1])
logger.info('willr_x : %s' % x[-1])
logger.info('willr_y : %s' % y[-1])
buycon1 = True if (a[-1] < -97 and (b[-1] < -97 or c[-1] < -97) and (x[-1] < -80 or y[-1] < -80)) else False
buycon2 = True if (a[-1] < -97 and (b[-1] < -97 and c[-1] < -90) and (x[-1] > -35 or y[-1] > -35)) else False
buycon3 = True if (a[-1] < -97 and (b[-1] < -97 and c[-1] > -70) and (x[-1] > -50 or y[-1] > -25)) else False
buycon4 = True if (a[-1] < -97 and (b[-1] < -97 and c[-1] < -97) and (x[-1] > -50 or y[-1] > -50)) else False
buycon5 = True if (a[-1] < -97 and (b[-1] < -97 and c[-1] < -75) and (x[-1] > -25 or y[-1] > -25)) else False
buycon6 = True if ((b[-1] + 100) * (c[-1] + 100) == 0 and (c[-1] < -75 and x[-1] > -30 or y[-1] > -30)) else False
buycon7 = True if ((b[-1] + 100) == 0 and (c[-1] > -30 and x[-1] > -30 or y[-1] > -30)) else False
buycon8 = True if c[-1] < -97 else False
buycon9 = True if a[-1] < -97 and b[-1] < -97 and c[-1] > -50 else False
sellcon1 = True if (a[-1] > -3 and (b[-1] > -3 or c[-1] > -3) and (x[-1] > -20 or y[-1] > -20)) else False
sellcon2 = True if (a[-1] > -3 and (b[-1] > -3 and c[-1] > -10) and (x[-1] < -65 or y[-1] < -65)) else False
sellcon3 = True if (a[-1] > -3 and (b[-1] > -3 and c[-1] < -30) and (x[-1] < -50 or y[-1] < -75)) else False
sellcon4 = True if (a[-1] > -3 and (b[-1] > -3 and c[-1] > -3) and (x[-1] < -50 or y[-1] < -50)) else False
sellcon5 = True if (a[-1] > -3 and (b[-1] > -3 and c[-1] < -25) and (x[-1] < -75 or y[-1] < -75)) else False
sellcon6 = True if (((b[-1]) * (c[-1])) == 0 and c[-1] > -25 and (x[-1] < -70 or y[-1] < -70)) else False
sellcon7 = True if ((b[-1]) == 0 and (c[-1] < -70 and x[-1] < -70 or y[-1] < -70)) else False
sellcon8 = True if c[-1] > -3 else False
sellcon9 = True if a[-1] > -3 and b[-1] > -3 and c[-1] < -50 else False
buyCon = True if buycon1 or buycon2 or buycon3 or buycon4 or buycon5 or buycon6 or buycon7 or buycon8 or buycon9 else False
sellCon = True if sellcon1 or sellcon2 or sellcon3 or sellcon4 or sellcon5 or sellcon6 or sellcon7 or sellcon8 or sellcon9 else False
buyCloseCon = True if a[-1] > -10 else False
sellCloseCon = True if a[-1] < -90 else False
logger.info('-----------------inlong / inshort ----------------')
logger.info('inlong:%s' % self.inlong)
logger.info('inshort:%s' % self.inshort)
logger.info('-----------------buyCon / sellCon ----------------')
logger.info('buyCon:%s' % buyCon)
logger.info('sellCon:%s' % sellCon)
logger.info('buyCloseCon:%s' % buyCloseCon)
logger.info('sellCloseCon:%s' % sellCloseCon)
# if self.inlong:
# self.inlong = True
#
# if self.inshort:
# self.inshort = True
fb100_4h = last(highest(series_high, 4)) # 1시간 1, 1D의 경우는 resolution도 변경
fb0_4h = last(lowest(series_low, 4))
fiboBuyCon = True if fb0 <= fb0_4h else False
logger.info('fiboBuyCon:%s' % fiboBuyCon)
fiboSellCon = True if fb100 >= fb100_4h else False
logger.info('fiboSellCon:%s' % fiboSellCon)
logger.info('bitmex.get_whichpositon():%s' % bitmex.get_whichpositon())
logger.info('bitmex.get_position_size():%s' % bitmex.get_position_size())
# if bitmex.get_whichpositon() is not None:
# logger.info('-- bitmex.get_whichpositon is not None --')
if bitmex.get_position_size() > 0:
logger.info('-- >> bitmex.get_position_size > 0 --')
self.inlong = True
elif bitmex.get_position_size() < 0:
logger.info('-- >> bitmex.get_position_size < 0 --')
self.inshort = True
if self.start==1:
logger.info('-- self.start==1 --')
self.exchange.cancel_all()
if fiboBuyCon:
logger.info('if fiboBuyCon:%s' % fiboBuyCon)
self.exchange.order("FLong", True, lot, limit=fb062, post_only=True)
if fiboSellCon:
logger.info('if fiboSellCon:%s' % fiboSellCon)
self.exchange.order("FShort", False, lot, limit=fb162, post_only=True)
if price < up:
logger.info('price < up: %s' % up)
self.exchange.order("ChLong", True, lot, stop=up)
if price > dn:
logger.info('price > dn: %s' % dn)
self.exchange.order("ChShort", False, lot, stop=dn)
elif (flg_changed_timezone): # and (not self.inlong)) and (not self.inshort):
logger.info('-- (flg_changed_timezone') #and (not self.inlong)) and (not self.inshort) --')
self.exchange.cancel_all()
# init
if bitmex.get_whichpositon() is None and (self.inlong is True or self.inshort is True):
logger.info('-- (flg_changed_timezone >> init: inlone --> %s, inshort --> %s' % (self.inlong, self.inshort))
self.inlong = False
self.inshort = False
# set fibo conditions
if fiboBuyCon:
logger.info('if fiboBuyCon:%s' % fiboBuyCon)
self.exchange.order("FLong", True, lot, limit=fb062, post_only=True)
if fiboSellCon:
logger.info('if fiboSellCon:%s' % fiboSellCon)
self.exchange.order("FShort", False, lot, limit=fb162, post_only=True)
if price < up:
logger.info('price < up: %s' % up)
self.exchange.order("ChLong", True, lot, stop=up)
if price > dn:
logger.info('price > dn: %s' % dn)
self.exchange.order("ChShort", False, lot, stop=dn)
# elif (flg_changed_timezone and self.inlong and not self.inshort):
# logger.info('-- (flg_changed_timezone and self.inlong and not self.inshort) --')
# self.exchange.order("FShort", False, lot, limit=fb200, post_only=True)
# elif (flg_changed_timezone and not self.inlong and self.inshort):
# logger.info('-- (flg_changed_timezone and not self.inlong and self.inshort) --')
# self.exchange.order("FLong", True, lot, limit=fb0100, post_only=True)
else:
logger.info('-- else and pass --')
pass
if (buyCloseCon) and (self.inlong):
# self.exchange.close("Long")
logger.info('-- (buyCloseCon) and (self.inlong) --')
self.exchange.close_all()
self.inlong = False
if (sellCloseCon) and (self.inshort):
# self.exchange.close("Short")
logger.info('-- (sellCloseCon) and (self.inshort) --')
self.exchange.close_all()
self.inshort = False
if (buyCon) and (not self.inlong):
logger.info('if (buyCon) and (not self.inlong)::')
if price <= close[-1]:
logger.info('>> in +++ price <= close[-1] and ++++ get_position_size: %s' % bitmex.get_position_size())
if bitmex.get_position_size() != 0:
logger.info('-- bitmex.get_position_size() != 0 --')
self.exchange.order("Long", True, bitmex.get_position_size()*2, limit=price-0.5, post_only=True)
else:
logger.info('-- bitmex.get_position_size() != 0 / else --')
self.exchange.order("Long", True, lot, limit=price-0.5, post_only=True)
# self.inlong = True
else:
pass
if (sellCon) and (not self.inshort):
logger.info('if (sellCon) and (not self.inlong)::')
if price >= close[-1]:
logger.info('>> in +++ price >= close[-1] and ++++ get_position_size: %s' % bitmex.get_position_size())
if bitmex.get_position_size() != 0:
logger.info('-- bitmex.get_position_size() != 0 --')
self.exchange.order("Short", False, bitmex.get_position_size()*2, limit=price+0.5, post_only=True)
else:
logger.info('-- bitmex.get_position_size() != 0 / else --')
self.exchange.order("Short", False, lot, limit=price+0.5, post_only=True)
# self.inshort = True
else:
pass
# save pre-timezone's fb0, fb100 values
self.pre_fb0 = fb0
self.pre_fb100 = fb100
diff = (abs(bitmex.get_balance() - abs(self.prebalance)))
realised_pnl = bitmex.get_margin()['realisedPnl']
logger.info('----------------- realised_pnl ---------')
logger.info('prebalance():%s' % self.prebalance)
logger.info('bitmex.get_balance():%s' % bitmex.get_balance())
logger.info('diff:%s' % diff)
logger.info('realised_pnl:%s' % realised_pnl)
logger.info("time2 : %s" % str(time.time() - start))
logger.info('----------------- END ---------------- END ----------------')
# rci
class Rci(Bot):
def __init__(self):
Bot.__init__(self, '5m')
def options(self):
return {
'rcv_short_len': hp.quniform('rcv_short_len', 1, 10, 1),
'rcv_medium_len': hp.quniform('rcv_medium_len', 5, 15, 1),
'rcv_long_len': hp.quniform('rcv_long_len', 10, 20, 1),
}
def strategy(self, open, close, high, low, volume):
lot = self.exchange.get_lot()
itv_s = self.input('rcv_short_len', int, 5)
itv_m = self.input('rcv_medium_len', int, 9)
itv_l = self.input('rcv_long_len', int, 15)
rci_s = rci(close, itv_s)
rci_m = rci(close, itv_m)
rci_l = rci(close, itv_l)
long = ((-80 > rci_s[-1] > rci_s[-2]) or (-82 > rci_m[-1] > rci_m[-2])) \
and (rci_l[-1] < -10 and rci_l[-2] > rci_l[-2])
short = ((80 < rci_s[-1] < rci_s[-2]) or (rci_m[-1] < -82 and rci_m[-1] < rci_m[-2])) \
and (10 < rci_l[-1] < rci_l[-2])
close_all = 80 < rci_m[-1] < rci_m[-2] or -80 > rci_m[-1] > rci_m[-2]
if long:
self.exchange.entry("Long", True, lot)
elif short:
self.exchange.entry("Short", False, lot)
elif close_all:
self.exchange.close_all()
# Fibonacci Retracement & Expansion Strategy
# class Fibo(Bot):
# prebalance = BitMex(threading=False).get_balance()
# start = 0
# pre_fb0 = 0
# pre_fb100 = 0
# idx = 0
# def __init__(self):
# Bot.__init__(self, '1m')
#
# def options(self):
# return {
# 'rcv_short_len': hp.quniform('rcv_short_len', 1, 10, 1),
# }
#
# def strategy(self, open, close, high, low, volume):
# self.start += 1
# flg_changed_timezone = False
#
# lot = self.exchange.get_lot()
# # for test lot
# # lot = int(round(lot / 20))
# lot = 500
# bitmex = BitMex(threading=False)
# price = bitmex.get_market_price()
#
#
# sma_base_l = self.input('sma_short_len', int, 200)
#
# resolution = self.input(defval=5, title="resolution", type=int) # defval 변경, 예) 5분 --> 5, 'm' or 1시간 1, 'h', 1Day 1, 'd'
# source = self.exchange.security(str(resolution) + 'm') # def __init__ 비교
# logger.info('source: %s' % source)
#
# series_high = source['high'].values
# series_low = source['low'].values
#
# fb100 = last(highest(series_high, 1)) # 1시간 1, 1D의 경우는 resolution도 변경
# fb0 = last(lowest(series_low, 1))
#
# logger.info('resolution: %s' % resolution)
# logger.info('fb100_resol: %s' % fb100)
# logger.info('self.pre_fb100: %s' % self.pre_fb100)
# logger.info('fb0_resol: %s' % fb0)
# logger.info('self.pre_fb0: %s' % self.pre_fb0)
#
#
#
# # for test
# # fb100 = price + 15
# # fb0 = price - 15
#
# # 최근 1시간을 본봉단위로 획득
# # fibo_l = self.input('length', int, 1440) # 1Day = 60min * 24hr
# # fibo_l = self.input('length', int, 60) # 1Day = 60min * 24hr
# # fibo100 = last(highest(high, fibo_l))
# # fibo0 = last(lowest(low, fibo_l))
#
# fb62 = math.ceil((fb100 - fb0) * 0.618 + fb0)
# fb38 = math.ceil((fb100 - fb0) * 0.382 + fb0)
# fb50 = math.ceil((fb100 - fb0) / 2 + fb0)
#
# fb200 = math.ceil((fb100 - fb0) * 1.0 + fb100)
# fb162 = math.ceil((fb100 - fb0) * 0.618 + fb100)
# fb138 = math.ceil((fb100 - fb0) * 0.382 + fb100)
#
# fb038 = math.ceil(fb0 - (fb100 - fb0) * 0.382)
# fb062 = math.ceil(fb0 - (fb100 - fb0) * 0.618)
# fb0100 = math.ceil(fb0 - (fb100 - fb0) * 1.00)
#
# qty= bitmex.get_position_size()
#
# # 익손평가
# longstatus = bitmex.get_position_avg_price() - fb0
# shortstatus = bitmex.get_position_avg_price() - fb100
# gprice = price
#
# # if bitmex.get_whichpositon() == 'LONG' and longstatus > 0:
# # qL0 = lot * 1
# # qS100 = abs(qty) + lot * 1
# # gprice = price - 1
# # elif bitmex.get_whichpositon() == 'SHORT'and shortstatus > 0:
# # qL0 = abs(qty) + lot * 1
# # qS100 = lot * 1
# # gprice = price + 1
# # else:
# # qL0 = lot * 1
# # qS100 = lot * 1
#
# qS100 = lot*1
# qL0 = lot*1
#
# if self.pre_fb0 != 0 and fb0 != self.pre_fb0 and fb100 != self.pre_fb100:
# flg_changed_timezone = True
# logger.info('+++++++ flg_changed_timezone: %s' % flg_changed_timezone)
# if bitmex.get_whichpositon() is None:
# self.exchange.cancel_all()
#
#
# if self.start == 1:
# # short position
# self.exchange.order("S200"+str(self.idx), False, lot*2, limit=fb200, post_only=True)
# # self.exchange.order("S162"+str(self.idx), False, lot*1, limit=fb162, post_only=True)
# self.exchange.order("S138"+str(self.idx), False, lot*1, limit=fb138, post_only=True)
# self.exchange.order("S100"+str(self.idx), False, qS100, limit=fb100, post_only=True)
#
# # long position
# self.exchange.order("L0"+str(self.idx), True, qL0, limit=fb0, post_only=True)
# self.exchange.order("L038"+str(self.idx), True, lot*1, limit=fb038, post_only=True)
# # self.exchange.order("L062"+str(self.idx), True, lot*1, limit=fb062, post_only=True)
# self.exchange.order("L0100"+str(self.idx), True, lot*2, limit=fb0100, post_only=True)
#
#
# L0 = bitmex.get_open_order("L0"+str(self.idx))
# L038 = bitmex.get_open_order("L038"+str(self.idx))
# L062 = bitmex.get_open_order("L062"+str(self.idx))
# L0100 = bitmex.get_open_order("L0100"+str(self.idx))
#
# S200 = bitmex.get_open_order("S200"+str(self.idx))
# S162 = bitmex.get_open_order("S162"+str(self.idx))
# S138 = bitmex.get_open_order("S138"+str(self.idx))
# S100 = bitmex.get_open_order("S100"+str(self.idx))
#
# #
# # logger.info('(L0 is None): %s' % (L0 is None))
# if flg_changed_timezone is True:
# self.idx += 1
#
# # 이전 self.idx-1 타임존의 기본 주문만 취소, 나머지 역지정 된것 들은 그냥 둔다.
# # self.exchange.cancel("L0"+str(self.idx-1))
# # self.exchange.cancel("L038"+str(self.idx-1))
# # self.exchange.cancel("L062"+str(self.idx-1))
# # self.exchange.cancel("L0100"+str(self.idx-1))
# # self.exchange.cancel("S200"+str(self.idx-1))
# # self.exchange.cancel("S162"+str(self.idx-1))
# # self.exchange.cancel("S138"+str(self.idx-1))
# # self.exchange.cancel("S100"+str(self.idx-1))
# self.exchange.cancel_all()
# longshort = True
# if bitmex.get_position_size() > 0:
# longshort = False
# if bitmex.get_position_size() < 0:
# longshort = True
#
# logger.info('bitmex.get_position_size(): %s' % bitmex.get_position_size())
# if bitmex.get_position_size() != 0:
# self.exchange.order("Garbage", longshort, bitmex.get_position_size(), limit=gprice, post_only=True)
#
# # self.exchange.cancel_all()
# # self.exchange.close_all() # entry order
# # long position
#
# if price > fb0:
# logger.info('price > fb0:%')
# logger.info('flg_changed_timezone: %s' % flg_changed_timezone)
# self.exchange.order("L0"+str(self.idx), True, qL0, limit=fb0, when=(L0 is None or flg_changed_timezone), post_only=True)
# self.exchange.order("L038"+str(self.idx), True, lot*1, limit=fb038, when=(L038 is None or flg_changed_timezone), post_only=True)
# # self.exchange.order("L062"+str(self.idx), True, lot*1, limit=fb062, when=(L062 is None or flg_changed_timezone), post_only=True)
# self.exchange.order("L0100"+str(self.idx), True, lot*2, limit=fb0100, when=(L0100 is None or flg_changed_timezone), post_only=True)
#
# # short position
# if price < fb100:
# logger.info('price < fb100' )
# logger.info('flg_changed_timezone: %s' % flg_changed_timezone)
#
# self.exchange.order("S200"+str(self.idx), False, lot*2, limit=fb200, when=(S200 is None or flg_changed_timezone), post_only=True)
# self.exchange.order("S162"+str(self.idx), False, lot*1, limit=fb162, when=(S162 is None or flg_changed_timezone), post_only=True)
# self.exchange.order("S138"+str(self.idx), False, lot*1, limit=fb138, when=(S138 is None or flg_changed_timezone), post_only=True)
# self.exchange.order("S100"+str(self.idx), False, qS100, limit=fb100, when=(S100 is None or flg_changed_timezone), post_only=True)
#
# L0_w = bitmex.get_open_order("L0_w"+str(self.idx))
# L038_w = bitmex.get_open_order("L038_w"+str(self.idx))
# L062_w = bitmex.get_open_order("L062_w"+str(self.idx))
# L0100_w = bitmex.get_open_order("L0100_w"+str(self.idx))
#
# S100_w = bitmex.get_open_order("S100_w"+str(self.idx))
# S138_w = bitmex.get_open_order("S138_w"+str(self.idx))
# S162_w = bitmex.get_open_order("S162_w"+str(self.idx))
# S200_w = bitmex.get_open_order("S200_w"+str(self.idx))
#
#
# # win order of stoplimit
# if price <= fb0: #and L0 is None:
# self.exchange.order("L0_w"+str(self.idx), False, lot*1, limit=fb38, stop=fb0) # post_only=True)
# logger.info('rice <= fb0: %s' % fb0)
# if price <= fb038: # and L038 is None:
# self.exchange.order("L038_w"+str(self.idx), False, lot*1, limit=fb0, stop=fb038)
# logger.info('price <= fb038: %s' % fb038)
# if price <= fb062: # and L062 is None:
# self.exchange.order("L062_w"+str(self.idx), False, lot*1, limit=fb038, stop=fb062)
# logger.info('price <= fb062: %s' % fb062)
# if price <= fb0100: # and L0100 is None:
# self.exchange.order("L0100_w"+str(self.idx), False, lot*2, limit=fb062, stop=fb0100)
# logger.info('price <= fb0100: %s' % fb0100)
#
#
# if price >= fb100: # and S100 is None:
# logger.info('price >= fb100: %s' % fb100)
# self.exchange.order("S100_w"+str(self.idx), True, lot*1, limit=fb62, stop=fb0100)
# if price >= fb138: # and S138 is None:
# self.exchange.order("S138_w"+str(self.idx), True, lot*1, limit=fb100, stop=fb138)
# logger.info('price >= fb138: %s' % fb138)
# if price >=fb162: # and S162 is None:
# self.exchange.order("S162_w"+str(self.idx), True, lot*1, limit=fb138, stop=fb162)
# logger.info('price >= fb162 %s' % fb162)
# if price >= fb200: # and S200 is None:
# self.exchange.order("S200_w"+str(self.idx), True, lot*2, limit=fb162, stop=fb200)
# logger.info('price >= fb200: %s' % fb200)
#
# # logger.info('bitmex.get_margin():%s' % bitmex.get_margin())
# # logger.info('bitmex.get_position():%s' % bitmex.get_position())
#
# self.pre_fb0 = fb0
# self.pre_fb100 = fb100
#
# # for debug
# logger.info('fb200: %s' % fb200)
# logger.info('fb162: %s' % fb162)
# logger.info('fb138: %s' % fb138)
# logger.info('fb100: %s' % fb100)
# logger.info('fb62: %s' % fb62)
# logger.info('fb50: %s' % fb50)
# logger.info('fb38: %s' % fb38)
# logger.info('fb0: %s' % fb0)
# logger.info('fb038: %s' % fb038)
# logger.info('fb062: %s' % fb062)
# logger.info('fb0100: %s' % fb0100)
#
# diff = (abs(bitmex.get_balance() - abs(self.prebalance)))
#
# realised_pnl = bitmex.get_margin()['realisedPnl']
#
# logger.info('prebalance():%s' % self.prebalance)
# logger.info('bitmex.get_balance():%s' % bitmex.get_balance())
# logger.info('diff:%s' % diff)
# logger.info('realised_pnl:%s' % realised_pnl)
#
# logger.info('--------------------------------------------------')
# rsi2
class RSI2(Bot): # logic https: // stock79.tistory.com / 177
prebalance = BitMex(threading=False).get_balance()
dealcount = 0
def __init__(self):
Bot.__init__(self, '1m')
def options(self):
return {
'length': hp.randint('length', 1, 30, 1),
}
def strategy(self, open, close, high, low, volume):
lot = self.exchange.get_lot()
# for test lot
lot = int(round(lot / 50))
bitmex = BitMex(threading=False)
price = bitmex.get_market_price()
# variants settings
rsi2_len = self.input('length', int, 2)
rsi50_len = self.input('length', int, 50)
rsi2 = rsi(close, rsi2_len)
rsi50 = rsi(close, rsi50_len)
factor = self.input('factor', int, 20)
period = self.input('period', int, 7)
resolution = self.input(defval=1, title="resolution", type=int) # defval 변경, 예) 5분 --> 5
source = self.exchange.security(str(resolution) + 'm') # def __init__ 비교
supertrenddf = supertrend(source, factor, period)
fast_len = self.input('fast_len', int, 5)
half_len = self.input('half_len', int, 5)
slow_len = self.input('slow_len', int, 200)
fast_sma = sma(close, fast_len)
half_sma = sma(close, half_len)
slow_sma = sma(close, slow_len)
# conditions
sma_long = over(fast_sma[-1], slow_sma[-1])
sma_short = under(fast_sma[-1], slow_sma[-1])
super_long = over(close[-1], supertrenddf['TSL'][-1])
super_short = under(close[-1], supertrenddf['TSL'][-1])
super_stoploss = supertrenddf['TSL'][-1]
supertrendtrend = supertrenddf['Trend'][-1]
rsi2_overbought = over(rsi2[-1], 95)
rsi2_oversold = under(rsi2[-1], 5)
rsi50_over = over(rsi50[-1], 50)
rsi50_under = under(rsi50[-1], 50)
price_under = under(price, half_sma[-1])
price_over = over(price, half_sma[-1])
half_before = over(close[-1], half_sma[-1])
half_after = under(close[-1], half_sma[-1])
# show infomations
logger.info('price: %s' % price)
logger.info('fast_sma[-1]: %s' % fast_sma[-1])
logger.info('slow_sma[-1]: %s' % slow_sma[-1])
logger.info('sma_long: %s' % sma_long)
logger.info('sma_short: %s' % sma_short)
logger.info('super_long: %s' % super_long)
logger.info('super_short: %s' % super_short)
logger.info('super_stoploss: %s' % super_stoploss)
logger.info('sma_trend: %s' % supertrendtrend)
logger.info('rsi2[-1 ]%s' % rsi2[-1])
logger.info('rsi50[-1 ]%s' % rsi50[-1])
logger.info('rsi2_oversold: %s' % rsi2_oversold)
logger.info('rsi2_overbought: %s' % rsi2_overbought)
logger.info('price_under: %s' % price_under)
logger.info('price_over: %s' % price_over)
logger.info('half_before: %s' % half_before)
logger.info('half_after: %s' % half_after)
logger.info('get_whichpositon(): %s' % bitmex.get_whichpositon())
logger.info('position_size(): %s' % bitmex.get_position_size())
# entry
if super_long: #long trend
logger.info('+ + + + + LONG + + + + + LONG + + + + + LONG + + + + + ')
if bitmex.get_whichpositon() is None:
if sma_long and rsi2_oversold or price_under:
logger.info('postion condition > None > and all short condition order')
self.exchange.entry("Long", True, lot, limit=price-0.5, post_only=True)
else:
logger.info('postion condition > None > default long order')
self.exchange.entry("Long", True, lot, limit=math.ceil(super_stoploss), post_only=True)
elif bitmex.get_whichpositon() == 'LONG':
logger.info('postion condition > LONG')
if price_over: # closing
logger.info('postion condition > LONG > Closing')
self.exchange.order("Long", False, abs(bitmex.get_position_size()), limit=price + 1.5, post_only=True)
elif rsi2_overbought: # add more entry
logger.info('postion condition > LONG > Rsi2 overbougt add more entry')
self.exchange.entry("LongAdd", True, lot, limit=price - 0.5, post_only=True)
elif super_short: # stop loss
logger.info('postion condition > LONG > super_short(stop loss)')
self.exchange.entry("Long", True, lot)
self.exchange.entry("LongAdd", True, lot)
elif bitmex.get_whichpositon() == 'SHORT':
logger.info('cancel SHORT on long trend')
# self.exchange.cancel_all()
self.exchange.close_all()
else:
# self.exchange.cancel_all()
logger.info('Super Long --> Else')
if super_short: # short trend
logger.info('- - - - - SHORT - - - - - SHORT - - - - - SHORT - - - - - ')
if bitmex.get_whichpositon() is None:
if sma_short and rsi2_overbought or price_over:
logger.info('postion condition > None > and all short condition order')
self.exchange.entry("Short", False, lot, limit=price+0.5, post_only=True)
else:
logger.info('postion condition > None > default short order')
self.exchange.entry("Short", False, lot, limit=math.floor(super_stoploss), post_only=True)
elif bitmex.get_whichpositon() == 'SHORT':
logger.info('postion condition > SHORT')
if price_under: # closing
logger.info('postion condition > SHORT > price_under(closing)')
self.exchange.order("Short", True, abs(bitmex.get_position_size()), limit=price - 1.5, post_only=True)
elif rsi2_oversold: # add more entry
logger.info('postion condition > SHORT > rsi2_oversold(add more entry)')
self.exchange.entry("ShortAdd", False, lot, limit=price - 0.5, post_only=True)
elif super_long: # stop loss
logger.info('postion condition > SHORT > super_short(stop loss)')
self.exchange.entry("Short", True, lot)
self.exchange.entry("ShortAdd", True, lot)
elif bitmex.get_whichpositon() == 'LONG':
logger.info('cancel LONG on short trend')
self.exchange.close_all()
else:
logger.info('Super Shot --> Else')
self.dealcount += 1
diff = (abs(bitmex.get_balance() - abs(self.prebalance)))
realised_pnl = bitmex.get_margin()['realisedPnl']
logger.info('dealcount:%s' % self.dealcount)
logger.info('prebalance():%s' % self.prebalance)
logger.info('bitmex.get_balance():%s' % bitmex.get_balance())
logger.info('diff:%s' % diff)
logger.info('realised_pnl:%s' % realised_pnl)
logger.info('--------------------------------------------------')
class R2H5(Bot):
variants = [sma, ema, double_ema, triple_ema, wma, ssma, hull, heikinashi]
eval_time = None
def __init__(self):
Bot.__init__(self, '1m')
def options(self):
return {
'fast_len': hp.quniform('fast_len', 1, 60, 1),
'slow_len': hp.quniform('slow_len', 1, 240, 1),
}
def strategy(self, open, close, high, low, volume):
lot = self.exchange.get_lot()
# for test
# lot = int(round(lot / 2))
# lot = 10
logger.info('lot:%s' % lot)
bitmex = BitMex(threading=False)
price = bitmex.get_market_price()
resolution = self.input(defval=1, title="resolution", type=int)
variant_type = self.input(defval=5, title="variant_type", type=int)
basis_len = self.input(defval=19, title="basis_len", type=int)
logger.info('price:%s\n' % price)
# fast_len = self.input('fast_len', int, 1)
# slow_len = self.input('slow_len', int, 21)
# trend_len = self.input('slow_len', int, 55)
# longtrend_len = self.input('slow_len', int, 233)
# fast_len = self.input('fast_len', int, 1)
# slow_len = self.input('slow_len', int, 55)
# trend_len = self.input('slow_len', int, 240)
# longtrend_len = self.input('slow_len', int, 233)
fast_len = self.input('fast_len', int, 1)
slow_len = self.input('slow_len', int, 30)
trend_len = self.input('slow_len', int, 60)
longtrend_len = self.input('slow_len', int, 120)
logger.info('fast_len:%s' % fast_len)
logger.info('slow_len:%s' % slow_len)
logger.info('trend_len:%s' % trend_len)
logger.info('longtrend_len:%s' % longtrend_len)
# for various minutes source
source = self.exchange.security(str(resolution) + 'm')
hadf = heikinashi(source)
hadf_fast = heikinashi(hadf)
ha_open_values = hadf_fast['HA_open'].values
ha_close_values = hadf_fast['HA_close'].values
variant = self.variants[variant_type]
ha_open_fast = variant(ha_open_values, fast_len)
ha_close_fast = variant(ha_close_values, fast_len)
haopen_fast = ha_open_fast[-1]
haclose_fast = ha_close_fast[-1]
haup_fast = haclose_fast > haopen_fast
hadown_fast = haclose_fast <= haopen_fast
logger.info('haup_fast:%s\n' % haup_fast)
ha_open_slow = variant(ha_open_values, slow_len)
ha_close_slow = variant(ha_close_values, slow_len)
haopen_slow = ha_open_slow[-1]
haclose_slow = ha_close_slow[-1]
haup_slow = haclose_slow > haopen_slow
hadown_slow = haclose_slow <= haopen_slow
logger.info('haup_slow:%s\n' % haup_slow)
ha_open_trend = variant(ha_open_values, trend_len)
ha_close_trend = variant(ha_close_values, trend_len)
haopen_trend = ha_open_trend[-1]
haclose_trend = ha_close_trend[-1]
haup_trend = haclose_trend > haopen_trend
hadown_trend = haclose_trend <= haopen_trend
logger.info('haup_trend:%s\n' % haup_trend)
ha_open_longtrend = variant(ha_open_values, longtrend_len)
ha_close_longtrend = variant(ha_close_values, longtrend_len)
haopen_longtrend = ha_open_longtrend[-1]
haclose_longtrend = ha_close_longtrend[-1]
haup_longtrend = haclose_longtrend > haopen_longtrend
hadown_longtrend = haclose_longtrend <= haopen_longtrend
logger.info('haup_longtrend:%s\n' % haup_longtrend)
# resol_fast = self.input(defval=1, title="resolution", type=int) # defval 변경, 예) 5분 --> 5
# source_fast = self.exchange.security(str(resol_fast) + 'm') # init 참고
# hadf_fast = heikinashi(source_fast, 1)
# haopen_fast = hadf_fast['HA_open'][-1]
# haclose_fast = hadf_fast['HA_close'][-1]
# haup_fast = haclose_fast > haopen_fast
# hadown_fast = haclose_fast <= haopen_fast
# logger.info('haup_fast:%s\n' % haup_fast)
# logger.info('hadown_fast:%s\n' % hadown_fast)
# resol_slow = self.input(defval=4, title="resolution", type=int) # defval 변경, 예) 5분 --> 5
# source_slow = self.exchange.security(str(resol_slow) + 'h') # init 참고
# hadf_slow = heikinashi(source_slow, 1)
# haopen_slow = hadf_slow['HA_open'][-1]
# haclose_slow = hadf_slow['HA_close'][-1]
# haup_slow = haclose_slow > haopen_slow
# hadown_slow = haclose_slow <= haopen_slow
# logger.info('haup_slow:%s\n' % haup_slow)
# logger.info('hadown_slow:%s\n' % hadown_slow)
# resol_trend = self.input(defval=1, title="resolution", type=int) # defval 변경, 예) 5분 --> 5
# source_trend = self.exchange.security(str(resol_trend) + 'd') # init 참고:wq!:wq!
# hadf_trend = heikinashi(source_trend)
# haopen_trend = hadf_trend['HA_open'][-1]
# haclose_trend = hadf_trend['HA_close'][-1]
# haup_trend = haclose_trend > haopen_trend
# hadown_trend = haclose_trend <= haopen_trend
# logger.info('haup_trend:%s\n' % haup_trend)
# logger.info('hadown_trend:%s\n' % hadown_slow)
" long "
self.exchange.entry("Long", True, lot, when=crossover(ha_close_longtrend, ha_open_longtrend))
" short "
self.exchange.entry("Short", False, lot, when=crossunder(ha_close_longtrend, ha_open_longtrend))
# source_entry = self.exchange.security('1h')
#
# hadf_entry = heikinashi(source_entry)
# hadf_trading = heikinashi(hadf_entry)
#
# ha_open_longtrend_entry = variant(ha_open_values, 2) # 2h
# ha_close_longtrend_entry = variant(ha_close_values, 2)
#
# haopen_longtrend_entry = ha_open_longtrend_entry[-1]
# haclose_longtrend_entry = ha_close_longtrend_entry[-1]
# haup_longtrend_entry = haclose_longtrend_entry > haopen_longtrend_entry
# hadown_longtrend_entry = haclose_longtrend_entry <= haopen_longtrend_entry
#
# logger.info('1h기준 2h\n')
# logger.info('haup_longtrend_enty:%s\n' % haup_longtrend_entry)
# logger.info('hadown_longtrend_entry:%s\n' % hadown_longtrend_entry)
#
# " long "
# self.exchange.entry("Long", True, lot, when=crossover(ha_close_longtrend_entry, ha_open_longtrend_entry))
# " short "
# self.exchange.entry("Short", False, lot, when=crossunder(ha_close_longtrend_entry, ha_open_longtrend_entry))
# heikinashi
class Heikinashi(Bot):
variants = [sma, ema, double_ema, triple_ema, wma, ssma, hull, heikinashi]
eval_time = None
def __init__(self):
Bot.__init__(self, '1m')
def options(self):
return {
'fast_len': hp.quniform('fast_len', 1, 60, 1),
'slow_len': hp.quniform('slow_len', 1, 240, 1),
}
def strategy(self, open, close, high, low, volume):
lot = self.exchange.get_lot()
# for test
# lot = int(round(lot / 2))
# lot = 10
logger.info('lot:%s' % lot)
bitmex = BitMex(threading=False)
price = bitmex.get_market_price()
resolution = self.input(defval=1, title="resolution", type=int)
variant_type = self.input(defval=5, title="variant_type", type=int)
basis_len = self.input(defval=19, title="basis_len", type=int)
logger.info('price:%s\n' % price)
# fast_len = self.input('fast_len', int, 1)
# slow_len = self.input('slow_len', int, 21)
# trend_len = self.input('slow_len', int, 55)
# longtrend_len = self.input('slow_len', int, 233)
# fast_len = self.input('fast_len', int, 1)
# slow_len = self.input('slow_len', int, 55)
# trend_len = self.input('slow_len', int, 240)
# longtrend_len = self.input('slow_len', int, 233)
fast_len = self.input('fast_len', int, 1)
slow_len = self.input('slow_len', int, 30)
trend_len = self.input('slow_len', int, 60)
longtrend_len = self.input('slow_len', int, 120)
logger.info('fast_len:%s' % fast_len)
logger.info('slow_len:%s' % slow_len)
logger.info('trend_len:%s' % trend_len)
logger.info('longtrend_len:%s' % longtrend_len)
# for various minutes source
source = self.exchange.security(str(resolution) + 'm')
hadf = heikinashi(source)
hadf_fast = heikinashi(hadf)
ha_open_values = hadf_fast['HA_open'].values
ha_close_values = hadf_fast['HA_close'].values
variant = self.variants[variant_type]
ha_open_fast = variant(ha_open_values, fast_len)
ha_close_fast = variant(ha_close_values, fast_len)
haopen_fast = ha_open_fast[-1]
haclose_fast = ha_close_fast[-1]
haup_fast = haclose_fast > haopen_fast
hadown_fast = haclose_fast <= haopen_fast
logger.info('haup_fast:%s\n' % haup_fast)
ha_open_slow = variant(ha_open_values, slow_len)
ha_close_slow = variant(ha_close_values, slow_len)
haopen_slow = ha_open_slow[-1]
haclose_slow = ha_close_slow[-1]
haup_slow = haclose_slow > haopen_slow
hadown_slow = haclose_slow <= haopen_slow
logger.info('haup_slow:%s\n' % haup_slow)
ha_open_trend = variant(ha_open_values, trend_len)
ha_close_trend = variant(ha_close_values, trend_len)
haopen_trend = ha_open_trend[-1]
haclose_trend = ha_close_trend[-1]
haup_trend = haclose_trend > haopen_trend
hadown_trend = haclose_trend <= haopen_trend
logger.info('haup_trend:%s\n' % haup_trend)
ha_open_longtrend = variant(ha_open_values, longtrend_len)
ha_close_longtrend = variant(ha_close_values, longtrend_len)
haopen_longtrend = ha_open_longtrend[-1]
haclose_longtrend = ha_close_longtrend[-1]
haup_longtrend = haclose_longtrend > haopen_longtrend
hadown_longtrend = haclose_longtrend <= haopen_longtrend
logger.info('haup_longtrend:%s\n' % haup_longtrend)
# resol_fast = self.input(defval=1, title="resolution", type=int) # defval 변경, 예) 5분 --> 5
# source_fast = self.exchange.security(str(resol_fast) + 'm') # init 참고
# hadf_fast = heikinashi(source_fast, 1)
# haopen_fast = hadf_fast['HA_open'][-1]
# haclose_fast = hadf_fast['HA_close'][-1]
# haup_fast = haclose_fast > haopen_fast
# hadown_fast = haclose_fast <= haopen_fast
# logger.info('haup_fast:%s\n' % haup_fast)
# logger.info('hadown_fast:%s\n' % hadown_fast)
# resol_slow = self.input(defval=4, title="resolution", type=int) # defval 변경, 예) 5분 --> 5
# source_slow = self.exchange.security(str(resol_slow) + 'h') # init 참고
# hadf_slow = heikinashi(source_slow, 1)
# haopen_slow = hadf_slow['HA_open'][-1]
# haclose_slow = hadf_slow['HA_close'][-1]
# haup_slow = haclose_slow > haopen_slow
# hadown_slow = haclose_slow <= haopen_slow
# logger.info('haup_slow:%s\n' % haup_slow)
# logger.info('hadown_slow:%s\n' % hadown_slow)
# resol_trend = self.input(defval=1, title="resolution", type=int) # defval 변경, 예) 5분 --> 5
# source_trend = self.exchange.security(str(resol_trend) + 'd') # init 참고:wq!:wq!
# hadf_trend = heikinashi(source_trend)
# haopen_trend = hadf_trend['HA_open'][-1]
# haclose_trend = hadf_trend['HA_close'][-1]
# haup_trend = haclose_trend > haopen_trend
# hadown_trend = haclose_trend <= haopen_trend
# logger.info('haup_trend:%s\n' % haup_trend)
# logger.info('hadown_trend:%s\n' % hadown_slow)
" long "
self.exchange.entry("Long", True, lot, when=crossover(ha_close_longtrend, ha_open_longtrend))
" short "
self.exchange.entry("Short", False, lot, when=crossunder(ha_close_longtrend, ha_open_longtrend))
# source_entry = self.exchange.security('1h')
#
# hadf_entry = heikinashi(source_entry)
# hadf_trading = heikinashi(hadf_entry)
#
# ha_open_longtrend_entry = variant(ha_open_values, 2) # 2h
# ha_close_longtrend_entry = variant(ha_close_values, 2)
#
# haopen_longtrend_entry = ha_open_longtrend_entry[-1]
# haclose_longtrend_entry = ha_close_longtrend_entry[-1]
# haup_longtrend_entry = haclose_longtrend_entry > haopen_longtrend_entry
# hadown_longtrend_entry = haclose_longtrend_entry <= haopen_longtrend_entry
#
# logger.info('1h기준 2h\n')
# logger.info('haup_longtrend_enty:%s\n' % haup_longtrend_entry)
# logger.info('hadown_longtrend_entry:%s\n' % hadown_longtrend_entry)
#
# " long "
# self.exchange.entry("Long", True, lot, when=crossover(ha_close_longtrend_entry, ha_open_longtrend_entry))
# " short "
# self.exchange.entry("Short", False, lot, when=crossunder(ha_close_longtrend_entry, ha_open_longtrend_entry))
# OCC
class OCC(Bot):
variants = [sma, ema, double_ema, triple_ema, wma, ssma, hull]
eval_time = None
def __init__(self):
Bot.__init__(self, '1m')
def ohlcv_len(self):
return 15 * 30
def options(self):
return {
'variant_type': hp.quniform('variant_type', 0, len(self.variants) - 1, 1),
'basis_len': hp.quniform('basis_len', 1, 30, 1),
'resolution': hp.quniform('resolution', 1, 10, 1),
'sma_len': hp.quniform('sma_len', 1, 15, 1),
'div_threshold': hp.quniform('div_threshold', 1, 6, 0.1),
}
def strategy(self, open, close, high, low, volume):
lot = self.exchange.get_lot()
variant_type = self.input(defval=5, title="variant_type", type=int)
basis_len = self.input(defval=19, title="basis_len", type=int)
resolution = self.input(defval=2, title="resolution", type=int)
sma_len = self.input(defval=9, title="sma_len", type=int)
div_threshold = self.input(defval=3.0, title="div_threshold", type=float)
source = self.exchange.security(str(resolution) + 'm')
if self.eval_time is not None and \
self.eval_time == source.iloc[-1].name:
return
series_open = source['open'].values
series_close = source['close'].values
variant = self.variants[variant_type]
val_open = variant(series_open, basis_len)
val_close = variant(series_close, basis_len)
if val_open[-1] > val_close[-1]:
high_val = val_open[-1]
low_val = val_close[-1]
else:
high_val = val_close[-1]
low_val = val_open[-1]
sma_val = sma(close, sma_len)
self.exchange.plot('val_open', val_open[-1], 'b')
self.exchange.plot('val_close', val_close[-1], 'r')
logger.info("occ:sma_val[-1]:" + str(sma_val[-1]))
logger.info("occ:low_val:" + str(low_val))
logger.info("occ:high_val:" + str(high_val))
logger.info("lot:" + str(lot))
logger.info("------------")
self.exchange.entry("Long", True, lot, stop=math.floor(low_val), when=(sma_val[-1] < low_val))
self.exchange.entry("Short", False, lot, stop=math.ceil(high_val), when=(sma_val[-1] > high_val))
open_close_div = sma(numpy.abs(val_open - val_close), sma_len)
if open_close_div[-1] > div_threshold and \
open_close_div[-2] > div_threshold < open_close_div[-2]:
self.exchange.close_all()
self.eval_time = source.iloc[-1].name
# TradingView
class TV(Bot):
subscriber = None
def __init__(self):
Bot.__init__(self, '1m')
user_id = os.environ.get("GMAIL_ADDRESS")
if user_id is None:
raise Exception("Please set GMAIL_ADDRESS into env to use Trading View Strategy.")
self.subscriber = GmailSub(user_id)
self.subscriber.set_from_address('<EMAIL>')
def __on_message(self, messages):
for message in messages:
if 'payload' not in message:
continue
if 'headers' not in message['payload']:
continue
subject_list = [header['value']
for header in message['payload']['headers'] if header['name'] == 'Subject']
if len(subject_list) == 0:
continue
subject = subject_list[0]
if subject.startswith('TradingViewアラート:'):
action = subject.replace('TradingViewアラート:', '')
self.__action(action)
def __action(self, action):
lot = self.exchange.get_lot()
if re.search('buy', action, re.IGNORECASE):
self.exchange.entry('Long', True, lot)
elif re.search('sell', action, re.IGNORECASE):
self.exchange.entry('Short', True, lot)
elif re.search('exit', action, re.IGNORECASE):
self.exchange.close_all()
def run(self):
if self.hyperopt:
raise Exception("Trading View Strategy dose not support hyperopt Mode.")
elif self.back_test:
raise Exception("Trading View Strategy dose not support backtest Mode.")
elif self.stub_test:
self.exchange = BitMexStub()
logger.info(f"Bot Mode : Stub")
else:
self.exchange = BitMex(demo=self.test_net)
logger.info(f"Bot Mode : Trade")
logger.info(f"Starting Bot")
logger.info(f"Strategy : {type(self).__name__}")
logger.info(f"Resolution : {self.resolution()}")
logger.info(f"Balance : {self.exchange.get_balance()}")
notify(f"Starting Bot\n"
f"Strategy : {type(self).__name__}\n"
f"Resolution : {self.resolution()}\n"
f"Balance : {self.exchange.get_balance()/100000000} XBT")
self.subscriber.on_message(self.__on_message)
def stop(self):
self.subscriber.stop()
# サンプル戦略
class Sample(Bot):
def __init__(self):
# 第一引数: 戦略で使う足幅
# 1分足で直近10期間の情報を戦略で必要とする場合
Bot.__init__(self, '1m')
def options(self):
return {}
def strategy(self, open, close, high, low, volume):
lot = self.exchange.get_lot()
which = random.randrange(2)
if which == 0:
self.exchange.entry("Long", True, round(lot/1000))
logger.info(f"Trade:Long")
else:
self.exchange.entry("Short", False, round(lot/1000))
logger.info(f"Trade:Short")
print(lot)
class Cross(Bot):
def __init__(self):
Bot.__init__(self, '1m')
def options(self):
return {
'fast_len': hp.quniform('fast_len', 1, 30, 1),
'slow_len': hp.quniform('slow_len', 1, 30, 1),
}
def strategy(self, open, close, high, low, volume):
lot = self.exchange.get_lot()
fast_len = self.input('fast_len', int, 9)
slow_len = self.input('slow_len', int, 16)
fast_sma = sma(close, fast_len)
slow_sma = sma(close, slow_len)
golden_cross = crossover(fast_sma, slow_sma)
dead_cross = crossunder(fast_sma, slow_sma)
if golden_cross:
self.exchange.entry("Long", True, lot)
if dead_cross:
self.exchange.entry("Short", False, lot)
class Fibo2(Bot):
prebalance = BitMex(threading=False).get_balance()
start = 0
pre_fb0 = 0
pre_fb100 = 0
idx = 0
def __init__(self):
Bot.__init__(self, '1m')
def options(self):
return {
'rcv_short_len': hp.quniform('rcv_short_len', 1, 10, 1),
}
def strategy(self, open, close, high, low, volume):
self.start += 1
flg_changed_timezone = False
lot = self.exchange.get_lot()
# for test lot
lot = int(round(lot / 50))
# lot = 1
bitmex = BitMex(threading=False)
price = bitmex.get_market_price()
resolution = self.input(defval=10, title="resolution", type=int) # defval 변경, 예) 5분 --> 5, 'm' or 1시간 1, 'h', 1Day 1, 'd'
source = self.exchange.security(str(resolution) + 'm') # def __init__ 비교
series_high = source['high'].values
series_low = source['low'].values
fb100 = last(highest(series_high, 1)) # 1시간 1, 1D의 경우는 resolution도 변경
fb0 = last(lowest(series_low, 1))
# logger.info('source: %s' % source)
logger.info('resolution: %s' % resolution)
logger.info('fb100_resol: %s' % fb100)
logger.info('self.pre_fb100: %s' % self.pre_fb100)
logger.info('fb0_resol: %s' % fb0)
logger.info('self.pre_fb0: %s' % self.pre_fb0)
# for test
# fb100 = price + 15
# fb0 = price - 15
# 최근 1시간을 본봉단위로 획득
# fibo_l = self.input('length', int, 1440) # 1Day = 60min * 24hr
# fibo_l = self.input('length', int, 60) # 1Day = 60min * 24hr
# fibo100 = last(highest(high, fibo_l))
# fibo0 = last(lowest(low, fibo_l))
fb262 = math.ceil((fb100 - fb0) * 1.628 + fb100)
fb200 = math.ceil((fb100 - fb0) * 1.0 + fb100)
# fb162 = math.ceil((fb100 - fb0) * 0.618 + fb100)
fb138 = math.ceil((fb100 - fb0) * 0.382 + fb100)
fb62 = math.ceil((fb100 - fb0) * 0.618 + fb0)
fb50 = math.ceil((fb100 - fb0) / 2 + fb0)
fb38 = math.ceil((fb100 - fb0) * 0.382 + fb0)
fb038 = math.ceil(fb0 - (fb100 - fb0) * 0.382)
# fb062 = math.ceil(fb0 - (fb100 - fb0) * 0.618)
fb0100 = math.ceil(fb0 - (fb100 - fb0) * 1.00)
fb0162 = math.ceil(fb0 - (fb100 - fb0) * 1.60)
qty= bitmex.get_position_size()
logger.info('current position qty: %s' % qty)
# 익손평가
longstatus = bitmex.get_position_avg_price() - fb0
shortstatus = bitmex.get_position_avg_price() - fb100
if bitmex.get_whichpositon() == 'LONG' and longstatus > 0:
qL0 = lot * 1
qS100 = abs(qty) + lot * 1
elif bitmex.get_whichpositon() == 'SHORT'and shortstatus > 0:
qL0 = abs(qty) + lot * 1
qS100 = lot * 1
else:
qL0 = lot * 1
qS100 = lot * 1
if self.pre_fb0 != 0 and fb0 != self.pre_fb0 and fb100 != self.pre_fb100 :
flg_changed_timezone = True
logger.info('+++++++ flg_changed_timezone: %s' % flg_changed_timezone)
logger.info('cancel_all orders because new time zone')
# when this program start, execute only once
if self.start == 1 or flg_changed_timezone:
self.exchange.cancel_all()
stopprice = price
if bitmex.get_whichpositon() == 'LONG':
if price > fb50:
stopprice = fb50
logger.info('fb50')
else:
stopprice = fb0
logger.info('fb0')
logger.info('CL000 stopprice: %s' % stopprice)
logger.info('CL000 --> Clear Long')
self.exchange.order("CL000", False, qty, limit=stopprice, post_only=True)
pass
elif bitmex.get_whichpositon() == 'SHORT':
if price <= fb50:
stopprice = fb50
logger.info('fb50')
else:
stopprice = fb100
logger.info('fb100')
logger.info('CS000 stopprice: %s' % stopprice)
self.exchange.order("CS000", True, qty, limit=stopprice, post_only=True)
else:
logger.info('else case when self.start == 1 or flg_changed_timezone: %s ' % bitmex.get_whichpositon())
pass
# short position
self.exchange.order("S262"+str(self.idx), False, lot*3, limit=fb262, post_only=True)
self.exchange.order("S200"+str(self.idx), False, lot*2, limit=fb200, post_only=True)
# self.exchange.order("S162"+str(self.idx), False, lot*2, limit=fb162, post_only=True)
self.exchange.order("S138"+str(self.idx), False, lot*1, limit=fb138, post_only=True)
# self.exchange.order("S100"+str(self.idx), False, qS100, limit=fb100, post_only=True)
# long position
# self.exchange.order("L0"+str(self.idx), True, qL0, limit=fb0, post_only=True)
self.exchange.order("L038"+str(self.idx), True, lot*1, limit=fb038, post_only=True)
# self.exchange.order("L062"+str(self.idx), True, lot*2, limit=fb062, post_only=True)
self.exchange.order("L0100"+str(self.idx), True, lot*2, limit=fb0100, post_only=True)
self.exchange.order("L0162"+str(self.idx), True, lot*3, limit=fb0162, post_only=True)
if flg_changed_timezone is True:
self.idx += 1
L0 = bitmex.get_open_order("L0"+str(self.idx))
L038 = bitmex.get_open_order("L038"+str(self.idx))
# L062 = bitmex.get_open_order("L062"+str(self.idx))
L0100 = bitmex.get_open_order("L0100"+str(self.idx))
L0162 = bitmex.get_open_order("L0162"+str(self.idx))
S262 = bitmex.get_open_order("S262"+str(self.idx))
S200 = bitmex.get_open_order("S200"+str(self.idx))
# S162 = bitmex.get_open_order("S162"+str(self.idx))
S138 = bitmex.get_open_order("S138"+str(self.idx))
S100 = bitmex.get_open_order("S100"+str(self.idx))
S262_w = bitmex.get_open_order("S262_w"+str(self.idx))
S200_w = bitmex.get_open_order("S200_w"+str(self.idx))
# S162_w = bitmex.get_open_order("S162_w"+str(self.idx))
S138_w = bitmex.get_open_order("S138_w"+str(self.idx))
S100_w = bitmex.get_open_order("S100_w"+str(self.idx))
L0_w = bitmex.get_open_order("L0_w"+str(self.idx))
L038_w = bitmex.get_open_order("L038_w"+str(self.idx))
# L062_w = bitmex.get_open_order("L062_w"+str(self.idx))
L0100_w = bitmex.get_open_order("L0100_w"+str(self.idx))
L0162_w = bitmex.get_open_order("L0162_w"+str(self.idx))
#
# logger.info('(L0 is None): %s' % (L0 is None))
# new entry order
# new short position
# if flg_changed_timezone or price < fb100:
# self.exchange.order("S262"+str(self.idx), False, lot*2, limit=fb262, when=(S262 is None), post_only=True)
# self.exchange.order("S200"+str(self.idx), False, lot*1, limit=fb200, when=(S200 is None), post_only=True)
# # self.exchange.order("S162"+str(self.idx), False, lot*2, limit=fb162, when=(S162 is None), post_only=True)
# self.exchange.order("S138"+str(self.idx), False, lot*1, limit=fb138, when=(S138 is None), post_only=True)
# # self.exchange.order("S100"+str(self.idx), False, qS100, limit=fb100, when=(S100 is None), post_only=True)
#
# # new long position
# if flg_changed_timezone or price > fb0:
# # self.exchange.order("L0"+str(self.idx), True, qL0, limit=fb0, when=(L0 is None), post_only=True)
# self.exchange.order("L038"+str(self.idx), True, lot*1, limit=fb038, when=(L038 is None), post_only=True)
# # self.exchange.order("L062"+str(self.idx), True, lot*1, limit=fb062, when=(L062 is None), post_only=True)
# self.exchange.order("L0100"+str(self.idx), True, lot*1, limit=fb0100, when=(L0100 is None), post_only=True)
# self.exchange.order("L0162"+str(self.idx), True, lot*2, limit=fb0162, when=(L0162 is None), post_only=True)
# self.exchange.order("L038_w"+str(self.idx), False, lot*1, limit=fb0, when=fb038, post_only=True)
# self.exchange.order("L0100_w" + str(self.idx), False, lot * 1, limit=fb038, when=fb0100, post_only=True)
# self.exchange.order("L0160_w" + str(self.idx), False, lot * 2, limit=fb0100, when=fb0162, post_only=True)
#
# self.exchange.order("S138_w" + str(self.idx), True, lot * 1, limit=fb100, when=fb138, post_only=True)
# self.exchange.order("S200_w" + str(self.idx), True, lot * 1, limit=fb138, when=fb200, post_only=True)
# self.exchange.order("S262_w" + str(self.idx), True, lot * 2, limit=fb200, when=fb262, post_only=True)
# stop order
# if price < fb0 and L0 is None:
# self.exchange.order("L0_w"+str(self.idx), False, lot*1, limit=fb38, post_only=True)
# logger.info('rice <= fb0: %s' % fb0)
if price <= fb038 and L038 is None:
self.exchange.order("L038_w"+str(self.idx), False, lot*1, limit=fb0, post_only=True)
logger.info('price <= fb038: %s' % fb038)
# if price < fb062 and L062 is None:
# self.exchange.order("L062_w"+str(self.idx), False, lot*2, limit=fb038, post_only=True)
# logger.info('price <= fb062: %s' % fb062)
if price <= fb0100 and L0100 is None:
self.exchange.order("L0100_w"+str(self.idx), False, lot*1, limit=fb038, post_only=True)
logger.info('price <= fb0100: %s' % fb0100)
if price <= fb0162 and L0162 is not None:
self.exchange.order("L0100_w"+str(self.idx), False, lot*2, limit=fb100, post_only=True)
logger.info('price <= fb0162: %s' % fb0162)
# if price > fb100 and S100 is None:
# logger.info('price >= fb100: %s' % fb100)
# self.exchange.order("S100_w"+str(self.idx), True, lot*1, limit=fb62, post_only=True)
if price >= fb138 and S138 is None:
self.exchange.order("S138_w"+str(self.idx), True, lot*1, limit=fb100, post_only=True)
logger.info('price >= fb138: %s' % fb138)
# if price > fb162 and S162 is None:
# self.exchange.order("S162_w"+str(self.idx), True, lot*2, limit=fb138, post_only=True)
# logger.info('price >= fb162 %s' % fb162)
if price >= fb200 and S200 is None:
self.exchange.order("S200_w"+str(self.idx), True, lot*1, limit=fb138, post_only=True)
logger.info('price >= fb200: %s' % fb200)
if price >= fb262 and S262 is None:
self.exchange.order("S262_w"+str(self.idx), True, lot*2, limit=fb200, post_only=True)
# logger.info('price >= fb262: %s' % fb262)
# logger.info('bitmex.get_margin():%s' % bitmex.get_margin())
# logger.info('bitmex.get_position():%s' % bitmex.get_position())
# save pre-timezone's fb0, fb100 values
self.pre_fb0 = fb0
self.pre_fb100 = fb100
# for debug
logger.info('fb200: %s' % fb200)
# logger.info('fb162: %s' % fb162)
logger.info('fb138: %s' % fb138)
logger.info('fb100: %s' % fb100)
logger.info('fb62: %s' % fb62)
logger.info('fb50: %s' % fb50)
logger.info('fb38: %s' % fb38)
logger.info('fb0: %s' % fb0)
logger.info('fb038: %s' % fb038)
# logger.info('fb062: %s' % fb062)
logger.info('fb0100: %s' % fb0100)
diff = (abs(bitmex.get_balance() - abs(self.prebalance)))
realised_pnl = bitmex.get_margin()['realisedPnl']
logger.info('prebalance():%s' % self.prebalance)
logger.info('bitmex.get_balance():%s' % bitmex.get_balance())
logger.info('diff:%s' % diff)
logger.info('realised_pnl:%s' % realised_pnl)
logger.info('--------------------------------------------------')
class R2H5(Bot):
prebalance = BitMex(threading=False).get_balance()
start = 0
pre_fb0 = 0
pre_fb100 = 0
idx = 0
stratey_mode = 'R2' # or H5
variants = [sma, ema, double_ema, triple_ema, wma, ssma, hull, heikinashi]
eval_time = None
def __init__(self):
Bot.__init__(self, '1m')
def options(self):
return {
'rcv_short_len': hp.quniform('rcv_short_len', 1, 10, 1),
}
def strategy(self, open, close, high, low, volume):
logger.info('-------------------------strategy start-----------------------\n')
lot = self.exchange.get_lot()
# for test
lot = int(round(lot / 2))
lot = 10
logger.info('lot:%s' % lot)
bitmex = BitMex(threading=False)
price = bitmex.get_market_price()
logger.info('price:%s\n' % price)
fast_len = self.input('fast_len', int, 20)
slow_len = self.input('slow_len', int, 120)
fast_sma = sma(close, fast_len)
slow_sma = sma(close, slow_len)
# sma
# for i in range(1, 5):
# logger.info('fast_sma 20 [%s] *******: %s' % (-i, fast_sma[-i]))
# logger.info('slow_sma 120 ******: %s' % slow_sma[-1])
# rsi
rsi_len = self.input('rsi_len', int, 2)
fast_rsi = rsi(close, rsi_len)
rsi_stoplen = self.input('rsi_len', int, 5)
fast_slow_rsi = rsi(close, rsi_len)
for i in range(1, 21):
if fast_rsi[-i] >= 95:
logger.info('fast_rsi2 ***** [%s]: %s >= 95' % (-i, fast_rsi[-i]))
elif fast_rsi[-i] <= 5:
logger.info('fast_rsi2 ***** [%s]: %s <= 5' % (-i, fast_rsi[-i]))
else:
logger.info('fast_rsi2 ***** [%s]: %s' % (-i, fast_rsi[-i]))
# willr
slow_willr = willr(high, low, close, period=960)
logger.info('fast_willr ***** %s' % slow_willr[-1])
# bband
# bband_len = self.input('bbandup_len', int, 20)
# bbup, bbmid, bblow = bbands(close, timeperiod=bband_len, nbdevup=2, nbdevdn=2, matype=0)
# for i in range(1, 2):
# logger.info('fast_bband ***** [%s], bbup: %s, bbmid: %s, bblow: %s' % (-i, bbup[-i], bbmid[-i], bblow[-i]))
# heikinashi
resolution = self.input(defval=1, title="resolution", type=int)
variant_type = self.input(defval=5, title="variant_type", type=int)
source = self.exchange.security(str(resolution) + 'm')
fast_len = self.input('fast_len', int, 1)
middle_len = self.input('middle_len', int, 5)
slow_len = self.input('slow_len', int, 30)
trend_len = self.input('slow_len', int, 60)
longtrend_len = self.input('slow_len', int, 120)
longlongtrend_len = self.input('slow_len', int, 240)
hadf = heikinashi(source)
hadf_fast = heikinashi(hadf)
ha_open_values = hadf_fast['HA_open'].values
ha_close_values = hadf_fast['HA_close'].values
variant = self.variants[variant_type]
ha_open_fast = variant(ha_open_values, fast_len)
ha_close_fast = variant(ha_close_values, fast_len)
haopen_fast = ha_open_fast[-1]
haclose_fast = ha_close_fast[-1]
haup_fast = haclose_fast > haopen_fast
hadown_fast = haclose_fast <= haopen_fast
logger.info('haup_fast:%s' % haup_fast)
ha_open_middle = variant(ha_open_values, middle_len)
ha_close_middle = variant(ha_close_values, middle_len)
haopen_middle = ha_open_middle[-1]
haclose_middle = ha_close_middle[-1]
haup_middle = haclose_middle > haopen_middle
hadown_middle = haclose_middle <= haopen_middle
logger.info('haup_middle:%s' % haup_middle)
ha_open_slow = variant(ha_open_values, slow_len)
ha_close_slow = variant(ha_close_values, slow_len)
haopen_slow = ha_open_slow[-1]
haclose_slow = ha_close_slow[-1]
haup_slow = haclose_slow > haopen_slow
hadown_slow = haclose_slow <= haopen_slow
logger.info('haup_slow:%s' % haup_slow)
ha_open_trend = variant(ha_open_values, trend_len)
ha_close_trend = variant(ha_close_values, trend_len)
haopen_trend = ha_open_trend[-1]
haclose_trend = ha_close_trend[-1]
haup_trend = haclose_trend > haopen_trend
hadown_trend = haclose_trend <= haopen_trend
logger.info('haup_trend:%s' % haup_trend)
ha_open_longtrend = variant(ha_open_values, longtrend_len)
ha_close_longtrend = variant(ha_close_values, longtrend_len)
haopen_longtrend = ha_open_longtrend[-1]
haclose_longtrend = ha_close_longtrend[-1]
haup_longtrend = haclose_longtrend > haopen_longtrend
hadown_longtrend = haclose_longtrend <= haopen_longtrend
logger.info('haup_longtrend:%s\n' % haup_longtrend)
ha_open_longlongtrend = variant(ha_open_values, longlongtrend_len)
ha_close_longlongtrend = variant(ha_close_values, longlongtrend_len)
haopen_longlongtrend = ha_open_longlongtrend[-1]
haclose_longlongtrend = ha_close_longlongtrend[-1]
haup_longlongtrend = haclose_longlongtrend > haopen_longlongtrend
hadown_longlongtrend = haclose_longlongtrend <= haopen_longlongtrend
logger.info('haup_longlongtrend:%s\n' % haup_longlongtrend)
# resolutionh = self.input(defval=1, title="resolution", type=int)
# variant_type = self.input(defval=5, title="variant_type", type=int)
# sourceh = self.exchange.security(str(resolutionh) + 'h')
#
# hadf_h = heikinashi(sourceh)
# hadf_longtrend_h = heikinashi(hadf_h)
#
# ha_open_values_h = hadf_longtrend_h['HA_open'].values
# ha_close_values_h = hadf_longtrend_h['HA_close'].values
# variant = self.variants[variant_type]
#
# ha_open_longtrend_h = variant(ha_open_values_h, 4) # 1시간 1, 2시간 2
# ha_close_longtrend_h = variant(ha_close_values_h, 4)
# haopen_longtrend_h = ha_open_longtrend_h[-1]
# haclose_longtrend_h = ha_close_longtrend_h[-1]
# haup_longtrend_h = haclose_longtrend_h > haopen_longtrend_h
# hadown_longtrend_h = haclose_longtrend_h <= haopen_longtrend_h
# # logger.info('haup_longtrend_h:%s\n' % haup_longtrend_h)
self.start += 1
flg_changed_timezone = False
lot = self.exchange.get_lot()
# for test lot
lot = int(round(lot / 10))
# lot = 1
bitmex = BitMex(threading=False)
price = bitmex.get_market_price()
resolution = self.input(defval=1, title="resolution", type=int) # defval 변경, 예) 5분 --> 5, 'm' or 1시간 1, 'h', 1Day 1, 'd'
source = self.exchange.security(str(resolution) + 'h') # def __init__ 비교
series_high = source['high'].values
series_low = source['low'].values
fb100 = last(highest(series_high, 1)) # 1시간 1, 1D의 경우는 resolution도 변경
fb0 = last(lowest(series_low, 1))
# for test
# fb100 = price + 15
# fb0 = price - 15
# fb262 = math.ceil((fb100 - fb0) * 1.628 + fb100)
# fb200 = math.ceil((fb100 - fb0) * 1.0 + fb100)
# fb162 = math.ceil((fb100 - fb0) * 0.618 + fb100)
# fb138 = math.ceil((fb100 - fb0) * 0.382 + fb100)
fb62 = math.ceil((fb100 - fb0) * 0.618 + fb0)
fb50 = math.ceil((fb100 - fb0) / 2 + fb0)
fb38 = math.ceil((fb100 - fb0) * 0.382 + fb0)
# fb038 = math.ceil(fb0 - (fb100 - fb0) * 0.382)
# fb062 = math.ceil(fb0 - (fb100 - fb0) * 0.618)
# fb0100 = math.ceil(fb0 - (fb100 - fb0) * 1.00)
# fb0162 = math.ceil(fb0 - (fb100 - fb0) * 1.60)
qty= bitmex.get_position_size()
logger.info('current position qty: %s' % qty)
# 익손평가
longstatus = bitmex.get_position_avg_price() - fb0
shortstatus = bitmex.get_position_avg_price() - fb100
if bitmex.get_whichpositon() == 'LONG' and longstatus > 0:
qL0 = lot * 1
qS100 = abs(qty) + lot * 1
elif bitmex.get_whichpositon() == 'SHORT'and shortstatus > 0:
qL0 = abs(qty) + lot * 1
qS100 = lot * 1
else:
qL0 = lot * 1
qS100 = lot * 1
if self.pre_fb0 != 0 and fb0 != self.pre_fb0 and fb100 != self.pre_fb100:
flg_changed_timezone = True
logger.info('+++++++ flg_changed_timezone: %s' % flg_changed_timezone)
logger.info('cancel_all orders because new time zone')
# when this program start, execute only once
# if self.start == 1 or flg_changed_timezone:
# for debug
logger.info('fb100: %s' % fb100)
logger.info('fb62: %s' % fb62)
# logger.info('fb50: %s' % fb50)
logger.info('fb38: %s' % fb38)
logger.info('fb0: %s' % fb0)
logger.info('bitmex.get_open_order(Long): %s ' % bitmex.get_open_order('Long'))
logger.info('bitmex.get_open_order(Short): %s ' % bitmex.get_open_order('Short'))
logger.info('bitmex.get_open_order(LCatch): %s ' % bitmex.get_open_order('LCatch'))
logger.info('bitmex.get_open_order(SCatch): %s ' % bitmex.get_open_order('SCatch'))
if self.stratey_mode == 'R2':
logger.info('=============== stratey_mode = R2 ==============')
if self.start == 1:
self.exchange.cancel_all()
# 공격적인 로직 추가시
# if haup_middle and haup_slow and haup_trend and haup_longtrend and fast_rsi[-1] <= 20: # entry long condition on Short Trend
# logger.info('Now Short conditions: fast_rsi[-1] <= 10 0k --> %s' % fast_rsi[-1])
# self.exchange.order('Long', True, lot, limit=price - 0.5, post_only=True)
# elif hadown_middle and hadown_slow and hadown_trend and hadown_longtrend and fast_rsi[-1] >= 80: # # entry short condition on Long Trend
# logger.info('Now Long conditions: fast_rsi[-1] >= 90 0k --> %s' % fast_rsi[-1])
# self.exchange.order('Short', False, lot, limit=price + 0.5, post_only=True)
# 일반적인 로직
if bitmex.get_whichpositon() == 'LONG':
logger.info('---------------------->>LONG order status')
logger.info('ordered price:%s' % bitmex.get_position_avg_price())
if fast_rsi[-1] >= 95: # 스탑로직
logger.info('>>fast_rsi[-1] >= 95')
self.exchange.order('StopLong', False, qty, limit=price + 0.5, post_only=True)
elif hadown_slow and hadown_trend: # 손절로직
logger.info('>>hadown_trend and hadown_longtrend Long -> Short : slow trend changed')
self.exchange.close_all()
self.exchange.cancel_all()
# # 돌파로직
# if hadown_fast and hadown_middle and hadown_slow: # 모두 숏이면
# self.exchange.order('ChBrkShort', False, qty*2)
# self.exchange.cancel_all()
# else:
# self.exchange.close_all()
# elif hadown_trend and hadown_longtrend: # 손절로직 or 돌파로직
# logger.info('>>hadown_trend and hadown_longtrend Long -> Short : trend changed')
# # 돌파로직
# if hadown_fast and hadown_middle and hadown_slow: # 모두 숏이면
# self.exchange.order('ChBrkShort', False, qty*2)
# self.exchange.cancel_all()
# else:
# self.exchange.close_all()
# else:
# logger.info('>>StopLong')
# if price < fb100: # 초과익절 로직
# self.exchange.order('StopLong1', False, qty, limit=fb100, post_only=True)
# # self.exchange.order('StopLong2', False, qty, limit=fb62, post_only=True)
elif bitmex.get_whichpositon() == 'SHORT':
logger.info('---------------------->>SHORT order status')
logger.info('ordered price:%s' % bitmex.get_position_avg_price())
if fast_rsi[-1] <= 5: # 스탑로직
logger.info('>>fast_rsi[-1] <= 5')
self.exchange.order('StopShort', True, qty, limit=price - 0.5, post_only=True)
elif haup_slow and haup_trend: # 손절로직
logger.info('>>haup_trend and haup_longtrend Short -> Long : slow trend changed')
self.exchange.close_all()
self.exchange.cancel_all()
# elif haup_trend and haup_longtrend: # 손절로직 or 돌파로직
# logger.info('>>haup_trend and haup_longtrend Short -> Long : trend changed')
# # 돌파로직
# if haup_fast and haup_middle and haup_slow: # 모두 롱이면
# self.exchange.order('ChBrkLong', True, qty*2)
# self.exchange.cancel_all()
# else:
# self.exchange.close_all()
# else:
# logger.info('>>StopShort')
# if price > fb0: # 초과익절로직
# self.exchange.order('StopShort1', False, qty, limit=fb0, post_only=True)
# # self.exchange.order('StopShort2', False, qty, limit=fb38, post_only=True)
# logger.info('>>StopShort')
else:
logger.info('else: %s ' % bitmex.get_whichpositon())
if haup_trend and haup_longtrend:
logger.info('+++++++++++ LLLLLLong Trend +++++++++++++++')
if bitmex.get_open_order('LCatch') is not None: # catch shooting logic
logger.info('There are LongOver orders , now changed trend, and cancel all')
self.exchange.cancel_all()
self.exchange.order('SCatch', True, lot, limit=fb0, post_only=True)
elif hadown_trend and hadown_longtrend:
logger.info('- - - - - - SSSSSSort Trend - - - - - - -')
if bitmex.get_open_order('SCatch') is not None: # catch shooting logic
logger.info('There are ShortOver orders , now changed trend, and cancel all')
self.exchange.cancel_all()
self.exchange.order('LCatch', False, lot, limit=fb100, post_only=True)
# if bitmex.get_open_order('LCatch') is not None and haup_trend:
# bitmex.cancel('LCatch')
#
# if bitmex.get_open_order('SCatch') is not None and hadown_trend:
# bitmex.cancel('SCatch')
if haup_slow and haup_trend and haup_longtrend and fast_rsi[-1] <= 5: # entry long condition on Short Trend
logger.info('Now Short conditions: fast_rsi[-1] <= 5 0k --> %s' % fast_rsi[-1])
self.exchange.order('Long', True, lot, limit=price - 0.5, post_only=True)
elif hadown_slow and hadown_trend and hadown_longtrend and fast_rsi[-1] >= 95: # # entry short condition on Long Trend
logger.info('Now Long conditions: fast_rsi[-1] >= 95 0k --> %s' % fast_rsi[-1])
self.exchange.order('Short', False, lot, limit=price + 0.5, post_only=True)
if (slow_willr[-1] < -30) and haup_fast and haup_middle and haup_slow and haup_trend and haup_longtrend and haup_longlongtrend:
logger.info('in for H5UP')
self.stratey_mode = 'H5UP'
self.exchange.order('H5UP', True, lot)
bitmex.cancel_all()
elif (slow_willr[-1] > -70) and hadown_fast and hadown_middle and hadown_slow and hadown_trend and hadown_longtrend and hadown_longtrend:
logger.info('in for H5DOWN')
self.stratey_mode = 'H5DOWN'
self.exchange.order('H5DOWN', False, lot)
bitmex.cancel_all()
elif self.stratey_mode == 'H5UP':
logger.info('=============== stratey_mode = H5UP ==============')
if bitmex.get_whichpositon() != 'LONG' and (slow_willr[-1] < -30) and haup_fast and haup_middle and haup_slow and haup_trend and haup_longtrend and haup_longlongtrend:
self.exchange.order('H5UP', True, lot)
elif bitmex.get_whichpositon() != 'LONG' and not haup_fast and not haup_middle:
# back to R2 mode
self.stratey_mode = 'R2'
bitmex.cancel_all()
elif bitmex.get_whichpositon() == 'LONG' and (slow_willr[-1] > -11) or (haup_fast and not haup_middle and not haup_slow and not haup_longtrend):
# stop order and back to R2 mode
self.exchange.order('H5UPStop', False, lot)
self.stratey_mode = 'R2'
bitmex.cancel_all()
elif self.stratey_mode == 'H5DOWN':
logger.info('=============== stratey_mode = H5DOWN ==============')
if bitmex.get_whichpositon() != 'SHORT' and (slow_willr[-1] > -70) and (hadown_fast and hadown_middle and hadown_slow and hadown_trend and hadown_longtrend and hadown_longlongtrend):
self.exchange.order('H5DOWN', False, lot)
elif bitmex.get_whichpositon() != 'SHORT' and not hadown_fast and not hadown_middle:
# back to R2 mode
self.stratey_mode = 'R2'
bitmex.cancel_all()
elif bitmex.get_whichpositon() == 'SHORT' and (slow_willr[-1] < -91) or (not hadown_fast and not hadown_middle and not hadown_slow and not hadown_longtrend): # and not hadown_slow): #(fast_slow_rsi <= 93) or
# stop order and back to R2 mode
self.exchange.order('H5DOWNStop', True, lot)
self.stratey_mode = 'R2'
bitmex.cancel_all()
else:
logger.info('=============== stratey_mode = Else ==============')
# back to R2 mode
self.stratey_mode = 'R2'
bitmex.cancel_all()
if flg_changed_timezone is True:
self.idx += 1
# save pre-timezone's fb0, fb100 values
self.pre_fb0 = fb0
self.pre_fb100 = fb100
diff = (abs(bitmex.get_balance() - abs(self.prebalance)))
realised_pnl = bitmex.get_margin()['realisedPnl']
logger.info('prebalance():%s' % self.prebalance)
logger.info('bitmex.get_balance():%s' % bitmex.get_balance())
logger.info('diff:%s' % diff)
logger.info('realised_pnl:%s' % realised_pnl)
logger.info('-------------------------strategy end-----------------------\n')
|
# coding: UTF-8
import os
import random
import math
import re
import numpy
import time
from hyperopt import hp
from src import highest, lowest, sma, crossover, crossunder, over, under, last, rci, rsi, double_ema, ema, triple_ema, wma, \
ssma, hull, logger, notify, atr, willr, bbands, supertrend, heikinashi
from src.bitmex import BitMex
from src.bitmex_stub import BitMexStub
from src.bot import Bot
from src.gmail_sub import GmailSub
import pandas as pd
class Will_Rci(Bot):
inlong = False
inshort = False
def __init__(self):
Bot.__init__(self, '1m')
def options(self):
return {
'rcv_short_len': hp.quniform('rcv_short_len', 1, 21, 1),
'rcv_medium_len': hp.quniform('rcv_medium_len', 21, 34, 1),
'rcv_long_len': hp.quniform('rcv_long_len', 34, 55, 1),
}
def strategy(self, open, close, high, low, volume):
# logger.info('strategy start ctime : %s' % time.ctime())
# start = time.time() # 시작 시간 저장
# lot = self.exchange.get_lot()
lot = 100
itv_s = self.input('rcv_short_len', int, 21)
itv_m = self.input('rcv_medium_len', int, 34)
itv_l = self.input('rcv_long_len', int, 55)
rci_s = rci(close, itv_s)
rci_m = rci(close, itv_m)
rci_l = rci(close, itv_l)
ra = rci_s[-1] / 2 - 50
rb = rci_m[-1] / 2 - 50
rc = rci_l[-1] / 2 - 50
# willr for five willilams
a = willr(high, low, close, period=55)
b = willr(high, low, close, period=144)
c = willr(high, low, close, period=610)
x = willr(high, low, close, period=4181)
y = willr(high, low, close, period=6785)
# logger.info('---- a ----')
# for i in range(1, 5):
# logger.info('a [%s] *******: %s' % (-i, a[-i]))
# logger.info('---- b ----')
# for i in range(1, 5):
# logger.info('b [%s] *******: %s' % (-i, b[-i]))
# logger.info('---- c ----')
# for i in range(1, 5):
# logger.info('c [%s] *******: %s' % (-i, c[-i]))
# logger.info('---- x ----')
# for i in range(1, 5):
# logger.info('x [%s] *******: %s' % (-i, x[-i]))
# logger.info('---- y ----')
# for i in range(1, 5):
# logger.info('x [%s] *******: %s' % (-i, y[-i]))
buycon1 = True if (a[-1] < -97 and (b[-1] < -97 or c[-1] < -97) and (x[-1] < -80 or y[-1] < -80)) else False
buycon2 = True if (a[-1] < -97 and (b[-1] < -97 and c[-1] < -90) and (x[-1] > -35 or y[-1] > -35)) else False
buycon3 = True if (a[-1] < -97 and (b[-1] < -97 and c[-1] > -70) and (x[-1] > -50 or y[-1] > -25)) else False
buycon4 = True if (a[-1] < -97 and (b[-1] < -97 and c[-1] < -97) and (x[-1] > -50 or y[-1] > -50)) else False
buycon5 = True if (a[-1] < -97 and (b[-1] < -97 and c[-1] < -75) and (x[-1] > -25 or y[-1] > -25)) else False
buycon6 = True if ((b[-1] + 100) * (c[-1] + 100) == 0 and (c[-1] < -75 and x[-1] > -30 or y[-1] > -30)) else False
buycon7 = True if ((b[-1] + 100) == 0 and (c[-1] > -30 and x[-1] > -30 or y[-1] > -30)) else False
buycon8 = True if c[-1] < -97 else False
buycon9 = True if a[-1] < -97 and b[-1] < -97 and c[-1] > -50 else False
sellcon1 = True if (a[-1] > -3 and (b[-1] > -3 or c[-1] > -3) and (x[-1] > -20 or y[-1] > -20)) else False
sellcon2 = True if (a[-1] > -3 and (b[-1] > -3 and c[-1] > -10) and (x[-1] < -65 or y[-1] < -65)) else False
sellcon3 = True if (a[-1] > -3 and (b[-1] > -3 and c[-1] < -30) and (x[-1] < -50 or y[-1] < -75)) else False
sellcon4 = True if (a[-1] > -3 and (b[-1] > -3 and c[-1] > -3) and (x[-1] < -50 or y[-1] < -50)) else False
sellcon5 = True if (a[-1] > -3 and (b[-1] > -3 and c[-1] < -25) and (x[-1] < -75 or y[-1] < -75)) else False
sellcon6 = True if (((b[-1]) * (c[-1])) == 0 and c[-1] > -25 and (x[-1] < -70 or y[-1] < -70)) else False
sellcon7 = True if ((b[-1]) == 0 and (c[-1] < -70 and x[-1] < -70 or y[-1] < -70)) else False
sellcon8 = True if c[-1] > -3 else False
sellcon9 = True if a[-1] > -3 and b[-1] > -3 and c[-1] < -50 else False
buyRCIfillerCon = True if rc < -80 else False
sellRCIfillerCon = True if rc > -20 else False
buyWillfilterCon = buycon1 or buycon2 or buycon3 or buycon4 or buycon5 or buycon6 or buycon7 or buycon8 or buycon9
sellWillFilrerCon = sellcon1 or sellcon2 or sellcon3 or sellcon4 or sellcon5 or sellcon6 or sellcon7 or sellcon8 or sellcon9
# set condition
buyCons = buyWillfilterCon and buyRCIfillerCon
sellCons = sellWillFilrerCon and sellRCIfillerCon
buyCon = True if buyCons else False
sellCon = True if sellCons else False
# buyCloseCon = sellRCIfillerCon
buyCloseCon = sellWillFilrerCon
# sellCloseCon = buyRCIfillerCon
sellCloseCon = buyWillfilterCon
if buyCon:
self.exchange.entry("Long", True, lot)
if sellCon:
self.exchange.entry("Short", False, lot)
# if buyCon:
# self.exchange.entry("Long", True, lot)
# self.inlong = True
# if buyCloseCon and self.inlong:
# self.exchange.close_all()
# self.inlong = False
# if sellCon:
# self.exchange.entry("Short", False, lot)
# self.inshort = True
# if sellCloseCon and self.inlong:
# self.exchange.close_all()
# self.inshort = False
# logger.info('all strategy processing time : %s' % str(time.time() - start))
# channel break out
class Doten(Bot):
def __init__(self):
Bot.__init__(self, '1m')
def options(self):
return {
'length': hp.randint('length', 1, 30, 1),
}
def strategy(self, open, close, high, low, volume):
lot = self.exchange.get_lot()
length = self.input('length', int, 9)
up = last(highest(high, length))
dn = last(lowest(low, length))
self.exchange.plot('up', up, 'b')
self.exchange.plot('dn', dn, 'r')
self.exchange.entry("Long", True, round(lot / 2), stop=up)
self.exchange.entry("Short", False, round(lot / 2), stop=dn)
# sma cross
class SMA(Bot):
def __init__(self):
Bot.__init__(self, '1m')
def options(self):
return {
'fast_len': hp.quniform('fast_len', 1, 30, 1),
'slow_len': hp.quniform('slow_len', 1, 30, 1),
}
def strategy(self, open, close, high, low, volume):
lot = self.exchange.get_lot()
fast_len = self.input('fast_len', int, 9)
slow_len = self.input('slow_len', int, 16)
fast_sma = sma(close, fast_len)
slow_sma = sma(close, slow_len)
golden_cross = crossover(fast_sma, slow_sma)
dead_cross = crossunder(fast_sma, slow_sma)
if golden_cross:
self.exchange.entry("Long", True, lot)
if dead_cross:
self.exchange.entry("Short", False, lot)
class YYY(Bot):
def __init__(self):
Bot.__init__(self, '1m')
def options(self):
return {
'fast_len': hp.quniform('fast_len', 1, 200, 1),
'slow_len': hp.quniform('slow_len', 1, 600, 1),
}
def strategy(self, open, close, high, low, volume):
lot = self.exchange.get_lot()
# for test
# lot = int(round(lot / 10))
lot = 100
logger.info('lot:%s' % lot)
bitmex = BitMex(threading=False)
price = bitmex.get_market_price()
fast_len = self.input('fast_len', int, 5)
slow_len = self.input('slow_len', int, 18)
trend_len = self.input('slow_len', int, 1200)
logger.info('fast_len:%s' % fast_len)
logger.info('slow_len:%s' % slow_len)
fast_sma = sma(close, fast_len)
slow_sma = sma(close, slow_len)
trend_sma = sma(close, trend_len)
uptrend = False
downtrend = False
if trend_sma[-1] > trend_sma[-3] or trend_sma[-1] > trend_sma[-10]:
uptrend = True
if trend_sma[-1] < trend_sma[-3] or trend_sma[-1] < trend_sma[-10]:
downtrend = True
golden_cross = crossover(fast_sma, slow_sma)
dead_cross = crossunder(fast_sma, slow_sma)
logger.info('golden_cross:%s' % golden_cross)
logger.info('dead_cross:%s' % dead_cross)
logger.info('price:%s' % price)
logger.info('trend_sma:%s' % trend_sma[-1])
logger.info('uptrend : %s' % str(uptrend))
logger.info('downtrend : %s' % str(downtrend))
# long
if dead_cross and uptrend:
self.exchange.order("Long", True, lot, limit=price-0.5, when=True, post_only=True)
logger.info('in dead_cross and uptrend for long')
if bitmex.get_whichpositon() == 'LONG':
self.exchange.order("Long", False, lot, limit=price + 0.5, when=golden_cross, post_only=True) # similar stop function
# short
if golden_cross and downtrend:
logger.info('in golden_cross and uptrend for short')
self.exchange.entry("Short", False, lot, limit=price+0.5, when=True, post_only=True)
if bitmex.get_whichpositon() == 'SHORT':
self.exchange.order("Short", True, lot, limit=price-0.5, stop=(price-0.5), when=dead_cross, post_only=True)
logger.info('--------------------------------------------------')
# supertrend
class SuperTrend(Bot):
prebalance = BitMex(threading=False).get_balance()
dealcount = 0
def __init__(self):
Bot.__init__(self, '15m')
def options(self):
return {
'factor': hp.randint('factor', 1, 30, 1),
'period': hp.randint('period', 1, 30, 1),
}
def strategy(self, open, close, high, low, volume):
lot = self.exchange.get_lot()
# for test
# lot = int(round(lot / 100))
bitmex = BitMex(threading=False)
price = bitmex.get_market_price()
factor = self.input('factor', int, 3)
period = self.input('period', int, 7)
atrvar = atr(high, low, close, period=period)
# up = (high + low) / 2 - (factor * atr(high, low, close, period=period))
# logger.info('up:%s\n' % up)
# dn = (high + low) / 2 + (factor * atr(high, low, close, period=period))
# logger.info('atrvar: %s' % atrvar[-1])
resolution = self.input(defval=15, title="resolution", type=int) # defval 변경, 예) 5분 --> 5
source = self.exchange.security(str(resolution) + 'm') # init 참고
supertrenddf = supertrend(source, factor, period)
# logger.info('supertrend:%s' % supertrenddf.describe())
# logger.info('supertrend:%s' % supertrenddf.columns)
logger.info('price:%s\n' % price)
# logger.info('source:%s\n' % source[-1])
logger.info('supertrend value:%s' % supertrenddf['SuperTrend'][-1])
logger.info('supertrend Upper Band:%s' % supertrenddf['Upper Band'][-1])
logger.info('supertrend Lower Band:%s' % supertrenddf['Lower Band'][-1])
logger.info('supertrenddf[Trend][-1]:%s' % supertrenddf['Trend'][-1])
logger.info('supertrenddf[TSL][-1]:%s' % supertrenddf['TSL'][-1])
logger.info('supertrenddf[ATR][-1]:%s' % supertrenddf['ATR'][-1])
longCondition_supertrend = crossover(close, supertrenddf['SuperTrend']) and close[-1] > supertrenddf['SuperTrend'][-1]
shortCondition_supertrend = crossunder(close, supertrenddf['SuperTrend']) and close[-1] < supertrenddf['SuperTrend'][-1]
if longCondition_supertrend:
self.exchange.entry("Long", True, lot)
logger.info('longCondition_supertrend:%s\n' % longCondition_supertrend)
elif shortCondition_supertrend:
self.exchange.entry("Short", False, lot)
logger.info('shortCondition_supertrend:%s\n' % shortCondition_supertrend)
else:
# self.exchange.close_all()
logger.info('Condition_supertrend:%s\n' % 'else')
self.dealcount += 1
diff = (abs(bitmex.get_balance() - abs(self.prebalance)))
realised_pnl = bitmex.get_margin()['realisedPnl']
logger.info('dealcount:%s' % self.dealcount)
logger.info('prebalance():%s' % self.prebalance)
logger.info('bitmex.get_balance():%s' % bitmex.get_balance())
logger.info('diff:%s' % diff)
logger.info('realised_pnl:%s' % realised_pnl)
# logger.info('bitmex.get_margin():%s' % bitmex.get_margin())
# logger.info('bitmex.get_position():%s' % bitmex.get_position())
# logger.info('bitmex.get_balance():%s' % bitmex.get_balance())
# logger.info('get_pre_prebalance:%s' % get_pre_prebalance(self.prebalance, bitmex.get_balance()))
# # self.exchange.close_all()
# # self.exchange.cancel_all()
logger.info('--------------------------------------------------')
class DoubleSuperRSI(Bot): # logic https: // stock79.tistory.com / 177
prebalance = BitMex(threading=False).get_balance()
dealcount = 0
def __init__(self):
Bot.__init__(self, '1m')
def options(self):
return {
'length': hp.randint('length', 1, 30, 1),
}
def strategy(self, open, close, high, low, volume):
lot = self.exchange.get_lot()
# for test lot
lot = int(round(lot / 50))
bitmex = BitMex(threading=False)
price = bitmex.get_market_price()
position_avg_price = bitmex.get_position_avg_price()
# variants settings
rsi2_len = self.input('length', int, 2)
rsi50_len = self.input('length50', int, 50)
rsi2 = rsi(close, rsi2_len)
rsi50 = rsi(close, rsi50_len)
factor = self.input('factor', int, 3)
period = self.input('period', int, 7)
factor2 = self.input('factor2', int, 20)
period2 = self.input('period2', int, 7)
resolution = self.input(defval=1, title="resolution", type=int) # defval 변경, 예) 5분 --> 5
source = self.exchange.security(str(resolution) + 'm') # def __init__ 비교
supertrenddf = supertrend(source, factor, period)
supertrenddf2 = supertrend(source, factor2, period2)
print('supertrenddf:%s' % supertrenddf)
print('supertrenddf2:%s' % supertrenddf2)
fast_len = self.input('fast_len', int, 5)
half_len = self.input('half_len', int, 50)
slow_len = self.input('slow_len', int, 200)
fast_sma = sma(close, fast_len)
half_sma = sma(close, half_len)
slow_sma = sma(close, slow_len)
# conditions
sma_long = over(fast_sma[-1], slow_sma[-1])
sma_short = under(fast_sma[-1], slow_sma[-1])
super_long = over(close[-1], supertrenddf['SuperTrend'][-1])
super_short = under(close[-1], supertrenddf['SuperTrend'][-1])
supertrendtrend = supertrenddf['Trend'][-1]
super2_long = over(close[-1], supertrenddf2['SuperTrend'][-1])
super2_short = under(close[-1], supertrenddf2['SuperTrend'][-1])
supertrendtrend2 = supertrenddf2['Trend'][-1]
super_centerline = (supertrenddf['SuperTrend'][-1] + supertrenddf2['SuperTrend'][-1])/2
rsi2_overbought = over(rsi2[-1], 95)
rsi2_oversold = under(rsi2[-1], 5)
rsi50_over = over(rsi50[-1], 50)
rsi50_under = under(rsi50[-1], 50)
price_under = under(price, half_sma[-1])
price_over = over(price, half_sma[-1])
half_before = over(close[-1], half_sma[-1])
half_after = under(close[-1], half_sma[-1])
# show infomations
logger.info('price: %s' % price)
logger.info('fast_sma[-1]: %s' % fast_sma[-1])
logger.info('slow_sma[-1]: %s' % slow_sma[-1])
logger.info('sma_long: %s' % sma_long)
logger.info('sma_short: %s' % sma_short)
logger.info('super_long: %s' % super_long)
logger.info('super_short: %s' % super_short)
logger.info('sma_trend: %s\n' % supertrendtrend)
logger.info('supertrend value:%s' % supertrenddf['SuperTrend'][-1])
logger.info('supertrend Upper Band:%s' % supertrenddf['Upper Band'][-1])
logger.info('supertrend Lower Band:%s' % supertrenddf['Lower Band'][-1])
logger.info('supertrenddf[Trend][-1]:%s' % supertrenddf['Trend'][-1])
logger.info('supertrenddf[TSL][-1]:%s' % supertrenddf['TSL'][-1])
logger.info('supertrenddf[ATR][-1]:%s\n' % supertrenddf['ATR'][-1])
logger.info('supertrend2 value:%s' % supertrenddf2['SuperTrend'][-1])
logger.info('supertrend2 Upper Band:%s' % supertrenddf2['Upper Band'][-1])
logger.info('supertrend2 Lower Band:%s' % supertrenddf2['Lower Band'][-1])
logger.info('supertrenddf2[Trend][-1]:%s' % supertrenddf2['Trend'][-1])
logger.info('supertrenddf2[TSL][-1]:%s' % supertrenddf2['TSL'][-1])
logger.info('supertrenddf2[ATR][-1]:%s\n' % supertrenddf2['ATR'][-1])
logger.info('supertrenddf[SuperTrend][-1]:%s + supertrenddf2[SuperTrend][-1]:%s ' % (supertrenddf['SuperTrend'][-1], supertrenddf2['SuperTrend'][-1]))
logger.info('super_centerline: %s' % super_centerline)
logger.info('rsi2[-1 ]%s' % rsi2[-1])
logger.info('rsi50[-1 ]%s' % rsi50[-1])
logger.info('rsi2_oversold: %s' % rsi2_oversold)
logger.info('rsi2_overbought: %s' % rsi2_overbought)
logger.info('price_under: %s' % price_under)
logger.info('price_over: %s' % price_over)
logger.info('half_before: %s' % half_before)
logger.info('half_after: %s' % half_after)
logger.info('get_whichpositon(): %s' % bitmex.get_whichpositon())
logger.info('position_size(): %s' % bitmex.get_position_size())
# entry
if super2_long:
logger.info('+ + + + + LONG + + + + + LONG + + + + + LONG + + + + + ')
if bitmex.get_whichpositon() is None: # and (not supertrendtrend and supertrendtrend2) and rsi2_overbought:
logger.info('postion condition > None')
if bitmex.get_open_order('Short'):
self.exchange.cancel('Short')
self.exchange.entry("Long", True, lot, limit=math.ceil(super_centerline), post_only=True)
elif bitmex.get_whichpositon() == 'LONG':
logger.info('postion condition > LONG')
if supertrendtrend and supertrendtrend2 and rsi2_oversold: # closing
logger.info('postion condition > LONG > Closing')
self.exchange.order("Long", False, abs(bitmex.get_position_size()), limit=price + 2.5, post_only=True)
elif rsi2_overbought: # add more entry
logger.info('postion condition > LONG > Rsi2 overbout')
self.exchange.entry("LongAdd", True, lot, limit=price - 0.5, post_only=True)
elif super_short: # stop loss
logger.info('postion condition > LONG > super_short(stop loss)')
self.exchange.entry("Long", True, lot)
self.exchange.entry("LongAdd", True, lot)
else:
logger.info('postion condition > LONG > else')
self.exchange.order("Long", False, abs(bitmex.get_position_size()), limit=price + 10, post_only=True)
elif bitmex.get_whichpositon() == 'SHORT':
logger.info('cancel SHORT on long trend')
# self.exchange.cancel_all()
self.exchange.close_all()
self.exchange.close_all()
else:
logger.info('Super Shot --> Else')
if super2_short:
logger.info('- - - - - SHORT - - - - - SHORT - - - - - SHORT - - - - - ')
if bitmex.get_whichpositon() is None: #and rsi2_overbought and price_over:
logger.info('postion condition > None')
if bitmex.get_open_order('Long'):
self.exchange.cancel('Long')
self.exchange.entry("Short", False, lot, limit=math.floor(super_centerline), post_only=True)
elif bitmex.get_whichpositon() == 'SHORT':
logger.info('postion condition > SHORT')
if price_under: # closing
logger.info('postion condition > SHORT > price_under(closing)')
self.exchange.order("Short", True, abs(bitmex.get_position_size()), limit=price-2.5, when=price_under, post_only=True)
elif rsi2_oversold: # add more entry
logger.info('postion condition > SHORT > rsi2_oversold(add more entry)')
self.exchange.entry("ShortAdd", False, lot, limit=price - 0.5, post_only=True)
elif super_long: # stop loss
logger.info('postion condition > SHORT > super_short(stop loss)')
self.exchange.entry("Short", True, lot)
self.exchange.entry("ShortAdd", True, lot)
else:
logger.info('postion condition > SHORT > else')
self.exchange.order("Short", True, abs(bitmex.get_position_size()), limit=price - 10, post_only=True)
elif bitmex.get_whichpositon() == 'LONG':
logger.info('cancel LONG on short trend')
self.exchange.close_all()
else:
logger.info('Super Shot --> Else')
self.dealcount += 1
diff = (abs(bitmex.get_balance() - abs(self.prebalance)))
realised_pnl = bitmex.get_margin()['realisedPnl']
logger.info('dealcount:%s' % self.dealcount)
logger.info('prebalance():%s' % self.prebalance)
logger.info('bitmex.get_balance():%s' % bitmex.get_balance())
logger.info('diff:%s' % diff)
logger.info('realised_pnl:%s' % realised_pnl)
logger.info('--------------------------------------------------')
# <NAME> and rci
class Willr(Bot):
prebalance = BitMex(threading=False).get_balance()
start = 0
pre_fb0 = 0
pre_fb100 = 0
inlong = False
inshort = False
firstlong = False
firstshort = False
def __init__(self):
Bot.__init__(self, '1m')
def options(self):
return {
'rcv_short_len': hp.quniform('rcv_short_len', 1, 21, 1),
'rcv_medium_len': hp.quniform('rcv_medium_len', 21, 34, 1),
'rcv_long_len': hp.quniform('rcv_long_len', 34, 55, 1),
}
def strategy(self, open, close, high, low, volume):
start = time.time() # 시작 시간 저장
self.start += 1
flg_changed_timezone = False
# lot = self.exchange.get_lot()
# for test lot
# lot = int(round(lot / 20))
lot = 100
bitmex = BitMex(threading=False)
price = bitmex.get_market_price()
itv_s = self.input('rcv_short_len', int, 21)
itv_m = self.input('rcv_medium_len', int, 34)
itv_l = self.input('rcv_long_len', int, 55)
rci_s = rci(close, itv_s)
rci_m = rci(close, itv_m)
rci_l = rci(close, itv_l)
ra = rci_s[-1] / 2 - 50
rb = rci_m[-1] / 2 - 50
rc = rci_l[-1] / 2 - 50
# willr for five willilams
a = willr(high, low, close, period=55)
b = willr(high, low, close, period=144)
c = willr(high, low, close, period=610)
x = willr(high, low, close, period=4181)
y = willr(high, low, close, period=6785)
# logger.info('---- a ----')
# for i in range(1, 5):
# logger.info('a [%s] *******: %s' % (-i, a[-i]))
# logger.info('---- b ----')
# for i in range(1, 5):
# logger.info('b [%s] *******: %s' % (-i, b[-i]))
# logger.info('---- c ----')
# for i in range(1, 5):
# logger.info('c [%s] *******: %s' % (-i, c[-i]))
# logger.info('---- x ----')
# for i in range(1, 5):
# logger.info('x [%s] *******: %s' % (-i, x[-i]))
# logger.info('---- y ----')
# for i in range(1, 5):
# logger.info('x [%s] *******: %s' % (-i, y[-i]))
logger.info('-----------------price / lot ----------------')
logger.info('price:%s' % price)
logger.info('lot:%s' % str(lot))
logger.info('-----------------o h l c v ----------------')
logger.info('open:%s' % open[-1])
logger.info('high:%s' % high[-1])
logger.info('low:%s' % low[-1])
logger.info('close:%s' % close[-1])
logger.info('volume:%s' % volume[-1])
logger.info('-----------------a b c x y ----------------')
logger.info('willr_a : %s' % a[-1])
logger.info('willr_b : %s' % b[-1])
logger.info('willr_c : %s' % c[-1])
logger.info('willr_x : %s' % x[-1])
logger.info('willr_y : %s' % y[-1])
logger.info('willr_rc : %s' % rc)
buycon1 = True if (a[-1] < -97 and (b[-1] < -97 or c[-1] < -97) and (x[-1] < -80 or y[-1] < -80)) else False
buycon2 = True if (a[-1] < -97 and (b[-1] < -97 and c[-1] < -90) and (x[-1] > -35 or y[-1] > -35)) else False
buycon3 = True if (a[-1] < -97 and (b[-1] < -97 and c[-1] > -70) and (x[-1] > -50 or y[-1] > -25)) else False
buycon4 = True if (a[-1] < -97 and (b[-1] < -97 and c[-1] < -97) and (x[-1] > -50 or y[-1] > -50)) else False
buycon5 = True if (a[-1] < -97 and (b[-1] < -97 and c[-1] < -75) and (x[-1] > -25 or y[-1] > -25)) else False
buycon6 = True if ((b[-1] + 100) * (c[-1] + 100) == 0 and (c[-1] < -75 and x[-1] > -30 or y[-1] > -30)) else False
buycon7 = True if ((b[-1] + 100) == 0 and (c[-1] > -30 and x[-1] > -30 or y[-1] > -30)) else False
buycon8 = True if c[-1] < -97 else False
buycon9 = True if a[-1] < -97 and b[-1] < -97 and c[-1] > -50 else False
sellcon1 = True if (a[-1] > -3 and (b[-1] > -3 or c[-1] > -3) and (x[-1] > -20 or y[-1] > -20)) else False
sellcon2 = True if (a[-1] > -3 and (b[-1] > -3 and c[-1] > -10) and (x[-1] < -65 or y[-1] < -65)) else False
sellcon3 = True if (a[-1] > -3 and (b[-1] > -3 and c[-1] < -30) and (x[-1] < -50 or y[-1] < -75)) else False
sellcon4 = True if (a[-1] > -3 and (b[-1] > -3 and c[-1] > -3) and (x[-1] < -50 or y[-1] < -50)) else False
sellcon5 = True if (a[-1] > -3 and (b[-1] > -3 and c[-1] < -25) and (x[-1] < -75 or y[-1] < -75)) else False
sellcon6 = True if (((b[-1]) * (c[-1])) == 0 and c[-1] > -25 and (x[-1] < -70 or y[-1] < -70)) else False
sellcon7 = True if ((b[-1]) == 0 and (c[-1] < -70 and x[-1] < -70 or y[-1] < -70)) else False
sellcon8 = True if c[-1] > -3 else False
sellcon9 = True if a[-1] > -3 and b[-1] > -3 and c[-1] < -50 else False
# buyCloseCon = True if a[-1] > -10 else False
# sellCloseCon = True if a[-1] < -90 else False
buyRCIfillerCon = True if rc < -80 else False
sellRCIfillerCon = True if rc > -20 else False
buyWillfilterCon = buycon1 or buycon2 or buycon3 or buycon4 or buycon5 or buycon6 or buycon7 or buycon8 or buycon9
sellWillFilrerCon = sellcon1 or sellcon2 or sellcon3 or sellcon4 or sellcon5 or sellcon6 or sellcon7 or sellcon8 or sellcon9
# set condition
buyCons = buyWillfilterCon and buyRCIfillerCon
sellCons = sellWillFilrerCon and sellRCIfillerCon
buyCon = True if buyCons else False
sellCon = True if sellCons else False
# buyCloseCon = sellRCIfillerCon
buyCloseCon = sellWillFilrerCon
# sellCloseCon = buyRCIfillerCon
sellCloseCon = buyWillfilterCon
logger.info('-----------------inlong / inshort ----------------')
logger.info('inlong:%s' % self.inlong)
logger.info('inshort:%s' % self.inshort)
logger.info('-----------------buyCon / sellCon ----------------')
logger.info('buyCon:%s' % buyCon)
logger.info('sellCon:%s' % sellCon)
logger.info('buyCloseCon:%s' % buyCloseCon)
logger.info('sellCloseCon:%s' % sellCloseCon)
logger.info('bitmex.get_whichpositon():%s' % bitmex.get_whichpositon())
logger.info('bitmex.get_position_size():%s' % bitmex.get_position_size())
if bitmex.get_position_size() > 0:
logger.info('-- >> bitmex.get_position_size > 0 --')
self.inlong = True
elif bitmex.get_position_size() < 0:
logger.info('-- >> bitmex.get_position_size < 0 --')
self.inshort = True
if self.start==1:
logger.info('-- self.start==1 --')
self.exchange.cancel_all()
elif (flg_changed_timezone):
logger.info('-- (flg_changed_timezone')
self.exchange.cancel_all()
# init
if bitmex.get_whichpositon() is None and (self.inlong is True or self.inshort is True):
logger.info('-- (flg_changed_timezone >> init: inlone --> %s, inshort --> %s' % (self.inlong, self.inshort))
self.inlong = False
self.inshort = False
else:
logger.info('-- else and pass --')
pass
if (buyCloseCon) and (self.inlong):
# self.exchange.close("Long")
logger.info('-- (buyCloseCon) and (self.inlong) --')
self.exchange.close_all()
self.inlong = False
if (sellCloseCon) and (self.inshort):
# self.exchange.close("Short")
logger.info('-- (sellCloseCon) and (self.inshort) --')
self.exchange.close_all()
self.inshort = False
if (buyCon) and (not self.inlong):
logger.info('if (buyCon) and (not self.inlong)::')
if price <= close[-1]:
logger.info('>> in +++ price <= close[-1] and ++++ get_position_size: %s' % bitmex.get_position_size())
if bitmex.get_position_size() != 0:
logger.info('-- bitmex.get_position_size() != 0 --')
self.exchange.order("Long", True, bitmex.get_position_size()*2, limit=price-0.5, post_only=True)
else:
logger.info('-- bitmex.get_position_size() != 0 / else --')
self.exchange.order("Long", True, lot, limit=price-0.5, post_only=True)
elif price < low[-1]:
logger.info('-- price < low[-1] --')
self.exchange.order("Long", True, lot, limit=price-0.5, post_only=True)
else:
pass
if (sellCon) and (not self.inshort):
logger.info('if (sellCon) and (not self.inlong)::')
if price >= close[-1]:
logger.info('>> in +++ price >= close[-1] and ++++ get_position_size: %s' % bitmex.get_position_size())
if bitmex.get_position_size() != 0:
logger.info('-- bitmex.get_position_size() != 0 --')
self.exchange.order("Short", False, bitmex.get_position_size()*2, limit=price+0.5, post_only=True)
else:
logger.info('-- bitmex.get_position_size() != 0 / else --')
self.exchange.order("Short", False, lot, limit=price+0.5, post_only=True)
elif price > high[-1]:
logger.info('-- price > high[-1] --')
self.exchange.order("Long", False, lot, limit=price+0.5, post_only=True)
else:
pass
diff = (abs(bitmex.get_balance() - abs(self.prebalance)))
realised_pnl = bitmex.get_margin()['realisedPnl']
logger.info('----------------- realised_pnl ---------')
logger.info('prebalance():%s' % self.prebalance)
logger.info('bitmex.get_balance():%s' % bitmex.get_balance())
logger.info('diff:%s' % diff)
logger.info('realised_pnl:%s' % realised_pnl)
logger.info("time2 : %s" % str(time.time() - start))
logger.info('----------------- END ---------------- END ----------------')
# <NAME> and Fibo
class WillnFibo(Bot):
prebalance = BitMex(threading=False).get_balance()
start = 0
pre_fb0 = 0
pre_fb100 = 0
inlong = False
inshort = False
firstlong = False
firstshort = False
def __init__(self):
Bot.__init__(self, '1m')
def options(self):
return {
'rcv_short_len': hp.quniform('rcv_short_len', 1, 10, 1),
'rcv_medium_len': hp.quniform('rcv_medium_len', 5, 15, 1),
'rcv_long_len': hp.quniform('rcv_long_len', 10, 20, 1),
}
def strategy(self, open, close, high, low, volume):
start = time.time() # 시작 시간 저장
self.start += 1
flg_changed_timezone = False
# lot = self.exchange.get_lot()
# # for test lot
# # lot = int(round(lot / 20))
lot = 100
bitmex = BitMex(threading=False)
price = bitmex.get_market_price()
# channel breakout for 1D
resolution_d = self.input(defval=1, title="resolution", type=int)
source_d = self.exchange.security(str(resolution_d) + 'd')
series_high_d = source_d['high'].values
series_low_d = source_d['low'].values
up = last(highest(series_high_d, 1))
dn = last(lowest(series_low_d, 1))
logger.info("time1 :%s" % str(time.time() - start))
start = time.time() # 시작 시간 저장
# self.exchange.entry("ChLong", True, round(lot), stop=up)
# self.exchange.entry("ChShort", False, round(lot), stop=dn)
# fibo for 1h
resolution = self.input(defval=1, title="resolution", type=int)
source = self.exchange.security(str(resolution) + 'h')
series_high = source['high'].values
series_low = source['low'].values
fb100 = last(highest(series_high, 1)) # 1시간 1, 1D의 경우는 resolution도 변경
fb0 = last(lowest(series_low, 1))
logger.info('resolution: %s' % resolution)
logger.info('fb100_resol: %s' % fb100)
logger.info('fb0_resol: %s' % fb0)
logger.info('self.pre_fb100: %s' % self.pre_fb100)
logger.info('self.pre_fb0: %s' % self.pre_fb0)
if self.pre_fb0 != 0 and fb0 != self.pre_fb0 and fb100 != self.pre_fb100:
flg_changed_timezone = True
logger.info('+++++++ flg_changed_timezone: %s' % flg_changed_timezone)
if bitmex.get_whichpositon() is None:
self.exchange.cancel_all()
fb62 = math.ceil((fb100 - fb0) * 0.618 + fb0)
fb38 = math.ceil((fb100 - fb0) * 0.382 + fb0)
fb50 = math.ceil((fb100 - fb0) / 2 + fb0)
fb200 = math.ceil((fb100 - fb0) * 1.0 + fb100)
fb162 = math.ceil((fb100 - fb0) * 0.618 + fb100)
fb138 = math.ceil((fb100 - fb0) * 0.382 + fb100)
fb038 = math.ceil(fb0 - (fb100 - fb0) * 0.382)
fb062 = math.ceil(fb0 - (fb100 - fb0) * 0.618)
fb0100 = math.ceil(fb0 - (fb100 - fb0) * 1.00)
# willr for five willilams
a = willr(high, low, close, period=55)
b = willr(high, low, close, period=144)
c = willr(high, low, close, period=610)
x = willr(high, low, close, period=4181)
y = willr(high, low, close, period=6785)
# logger.info('---- a ----')
# for i in range(1, 5):
# logger.info('a [%s] *******: %s' % (-i, a[-i]))
# logger.info('---- b ----')
# for i in range(1, 5):
# logger.info('b [%s] *******: %s' % (-i, b[-i]))
# logger.info('---- c ----')
# for i in range(1, 5):
# logger.info('c [%s] *******: %s' % (-i, c[-i]))
# logger.info('---- x ----')
# for i in range(1, 5):
# logger.info('x [%s] *******: %s' % (-i, x[-i]))
# logger.info('---- y ----')
# for i in range(1, 5):
# logger.info('x [%s] *******: %s' % (-i, y[-i]))
logger.info('-----------------price / lot ----------------')
logger.info('price:%s' % price)
logger.info('lot:%s' % str(lot))
logger.info('-----------------o h l c v ----------------')
logger.info('open:%s' % open[-1])
logger.info('high:%s' % high[-1])
logger.info('low:%s' % low[-1])
logger.info('close:%s' % close[-1])
logger.info('volume:%s' % volume[-1])
logger.info('-----------------a b c x y ----------------')
logger.info('willr_a : %s' % a[-1])
logger.info('willr_b : %s' % b[-1])
logger.info('willr_c : %s' % c[-1])
logger.info('willr_x : %s' % x[-1])
logger.info('willr_y : %s' % y[-1])
buycon1 = True if (a[-1] < -97 and (b[-1] < -97 or c[-1] < -97) and (x[-1] < -80 or y[-1] < -80)) else False
buycon2 = True if (a[-1] < -97 and (b[-1] < -97 and c[-1] < -90) and (x[-1] > -35 or y[-1] > -35)) else False
buycon3 = True if (a[-1] < -97 and (b[-1] < -97 and c[-1] > -70) and (x[-1] > -50 or y[-1] > -25)) else False
buycon4 = True if (a[-1] < -97 and (b[-1] < -97 and c[-1] < -97) and (x[-1] > -50 or y[-1] > -50)) else False
buycon5 = True if (a[-1] < -97 and (b[-1] < -97 and c[-1] < -75) and (x[-1] > -25 or y[-1] > -25)) else False
buycon6 = True if ((b[-1] + 100) * (c[-1] + 100) == 0 and (c[-1] < -75 and x[-1] > -30 or y[-1] > -30)) else False
buycon7 = True if ((b[-1] + 100) == 0 and (c[-1] > -30 and x[-1] > -30 or y[-1] > -30)) else False
buycon8 = True if c[-1] < -97 else False
buycon9 = True if a[-1] < -97 and b[-1] < -97 and c[-1] > -50 else False
sellcon1 = True if (a[-1] > -3 and (b[-1] > -3 or c[-1] > -3) and (x[-1] > -20 or y[-1] > -20)) else False
sellcon2 = True if (a[-1] > -3 and (b[-1] > -3 and c[-1] > -10) and (x[-1] < -65 or y[-1] < -65)) else False
sellcon3 = True if (a[-1] > -3 and (b[-1] > -3 and c[-1] < -30) and (x[-1] < -50 or y[-1] < -75)) else False
sellcon4 = True if (a[-1] > -3 and (b[-1] > -3 and c[-1] > -3) and (x[-1] < -50 or y[-1] < -50)) else False
sellcon5 = True if (a[-1] > -3 and (b[-1] > -3 and c[-1] < -25) and (x[-1] < -75 or y[-1] < -75)) else False
sellcon6 = True if (((b[-1]) * (c[-1])) == 0 and c[-1] > -25 and (x[-1] < -70 or y[-1] < -70)) else False
sellcon7 = True if ((b[-1]) == 0 and (c[-1] < -70 and x[-1] < -70 or y[-1] < -70)) else False
sellcon8 = True if c[-1] > -3 else False
sellcon9 = True if a[-1] > -3 and b[-1] > -3 and c[-1] < -50 else False
buyCon = True if buycon1 or buycon2 or buycon3 or buycon4 or buycon5 or buycon6 or buycon7 or buycon8 or buycon9 else False
sellCon = True if sellcon1 or sellcon2 or sellcon3 or sellcon4 or sellcon5 or sellcon6 or sellcon7 or sellcon8 or sellcon9 else False
buyCloseCon = True if a[-1] > -10 else False
sellCloseCon = True if a[-1] < -90 else False
logger.info('-----------------inlong / inshort ----------------')
logger.info('inlong:%s' % self.inlong)
logger.info('inshort:%s' % self.inshort)
logger.info('-----------------buyCon / sellCon ----------------')
logger.info('buyCon:%s' % buyCon)
logger.info('sellCon:%s' % sellCon)
logger.info('buyCloseCon:%s' % buyCloseCon)
logger.info('sellCloseCon:%s' % sellCloseCon)
# if self.inlong:
# self.inlong = True
#
# if self.inshort:
# self.inshort = True
fb100_4h = last(highest(series_high, 4)) # 1시간 1, 1D의 경우는 resolution도 변경
fb0_4h = last(lowest(series_low, 4))
fiboBuyCon = True if fb0 <= fb0_4h else False
logger.info('fiboBuyCon:%s' % fiboBuyCon)
fiboSellCon = True if fb100 >= fb100_4h else False
logger.info('fiboSellCon:%s' % fiboSellCon)
logger.info('bitmex.get_whichpositon():%s' % bitmex.get_whichpositon())
logger.info('bitmex.get_position_size():%s' % bitmex.get_position_size())
# if bitmex.get_whichpositon() is not None:
# logger.info('-- bitmex.get_whichpositon is not None --')
if bitmex.get_position_size() > 0:
logger.info('-- >> bitmex.get_position_size > 0 --')
self.inlong = True
elif bitmex.get_position_size() < 0:
logger.info('-- >> bitmex.get_position_size < 0 --')
self.inshort = True
if self.start==1:
logger.info('-- self.start==1 --')
self.exchange.cancel_all()
if fiboBuyCon:
logger.info('if fiboBuyCon:%s' % fiboBuyCon)
self.exchange.order("FLong", True, lot, limit=fb062, post_only=True)
if fiboSellCon:
logger.info('if fiboSellCon:%s' % fiboSellCon)
self.exchange.order("FShort", False, lot, limit=fb162, post_only=True)
if price < up:
logger.info('price < up: %s' % up)
self.exchange.order("ChLong", True, lot, stop=up)
if price > dn:
logger.info('price > dn: %s' % dn)
self.exchange.order("ChShort", False, lot, stop=dn)
elif (flg_changed_timezone): # and (not self.inlong)) and (not self.inshort):
logger.info('-- (flg_changed_timezone') #and (not self.inlong)) and (not self.inshort) --')
self.exchange.cancel_all()
# init
if bitmex.get_whichpositon() is None and (self.inlong is True or self.inshort is True):
logger.info('-- (flg_changed_timezone >> init: inlone --> %s, inshort --> %s' % (self.inlong, self.inshort))
self.inlong = False
self.inshort = False
# set fibo conditions
if fiboBuyCon:
logger.info('if fiboBuyCon:%s' % fiboBuyCon)
self.exchange.order("FLong", True, lot, limit=fb062, post_only=True)
if fiboSellCon:
logger.info('if fiboSellCon:%s' % fiboSellCon)
self.exchange.order("FShort", False, lot, limit=fb162, post_only=True)
if price < up:
logger.info('price < up: %s' % up)
self.exchange.order("ChLong", True, lot, stop=up)
if price > dn:
logger.info('price > dn: %s' % dn)
self.exchange.order("ChShort", False, lot, stop=dn)
# elif (flg_changed_timezone and self.inlong and not self.inshort):
# logger.info('-- (flg_changed_timezone and self.inlong and not self.inshort) --')
# self.exchange.order("FShort", False, lot, limit=fb200, post_only=True)
# elif (flg_changed_timezone and not self.inlong and self.inshort):
# logger.info('-- (flg_changed_timezone and not self.inlong and self.inshort) --')
# self.exchange.order("FLong", True, lot, limit=fb0100, post_only=True)
else:
logger.info('-- else and pass --')
pass
if (buyCloseCon) and (self.inlong):
# self.exchange.close("Long")
logger.info('-- (buyCloseCon) and (self.inlong) --')
self.exchange.close_all()
self.inlong = False
if (sellCloseCon) and (self.inshort):
# self.exchange.close("Short")
logger.info('-- (sellCloseCon) and (self.inshort) --')
self.exchange.close_all()
self.inshort = False
if (buyCon) and (not self.inlong):
logger.info('if (buyCon) and (not self.inlong)::')
if price <= close[-1]:
logger.info('>> in +++ price <= close[-1] and ++++ get_position_size: %s' % bitmex.get_position_size())
if bitmex.get_position_size() != 0:
logger.info('-- bitmex.get_position_size() != 0 --')
self.exchange.order("Long", True, bitmex.get_position_size()*2, limit=price-0.5, post_only=True)
else:
logger.info('-- bitmex.get_position_size() != 0 / else --')
self.exchange.order("Long", True, lot, limit=price-0.5, post_only=True)
# self.inlong = True
else:
pass
if (sellCon) and (not self.inshort):
logger.info('if (sellCon) and (not self.inlong)::')
if price >= close[-1]:
logger.info('>> in +++ price >= close[-1] and ++++ get_position_size: %s' % bitmex.get_position_size())
if bitmex.get_position_size() != 0:
logger.info('-- bitmex.get_position_size() != 0 --')
self.exchange.order("Short", False, bitmex.get_position_size()*2, limit=price+0.5, post_only=True)
else:
logger.info('-- bitmex.get_position_size() != 0 / else --')
self.exchange.order("Short", False, lot, limit=price+0.5, post_only=True)
# self.inshort = True
else:
pass
# save pre-timezone's fb0, fb100 values
self.pre_fb0 = fb0
self.pre_fb100 = fb100
diff = (abs(bitmex.get_balance() - abs(self.prebalance)))
realised_pnl = bitmex.get_margin()['realisedPnl']
logger.info('----------------- realised_pnl ---------')
logger.info('prebalance():%s' % self.prebalance)
logger.info('bitmex.get_balance():%s' % bitmex.get_balance())
logger.info('diff:%s' % diff)
logger.info('realised_pnl:%s' % realised_pnl)
logger.info("time2 : %s" % str(time.time() - start))
logger.info('----------------- END ---------------- END ----------------')
# rci
class Rci(Bot):
def __init__(self):
Bot.__init__(self, '5m')
def options(self):
return {
'rcv_short_len': hp.quniform('rcv_short_len', 1, 10, 1),
'rcv_medium_len': hp.quniform('rcv_medium_len', 5, 15, 1),
'rcv_long_len': hp.quniform('rcv_long_len', 10, 20, 1),
}
def strategy(self, open, close, high, low, volume):
lot = self.exchange.get_lot()
itv_s = self.input('rcv_short_len', int, 5)
itv_m = self.input('rcv_medium_len', int, 9)
itv_l = self.input('rcv_long_len', int, 15)
rci_s = rci(close, itv_s)
rci_m = rci(close, itv_m)
rci_l = rci(close, itv_l)
long = ((-80 > rci_s[-1] > rci_s[-2]) or (-82 > rci_m[-1] > rci_m[-2])) \
and (rci_l[-1] < -10 and rci_l[-2] > rci_l[-2])
short = ((80 < rci_s[-1] < rci_s[-2]) or (rci_m[-1] < -82 and rci_m[-1] < rci_m[-2])) \
and (10 < rci_l[-1] < rci_l[-2])
close_all = 80 < rci_m[-1] < rci_m[-2] or -80 > rci_m[-1] > rci_m[-2]
if long:
self.exchange.entry("Long", True, lot)
elif short:
self.exchange.entry("Short", False, lot)
elif close_all:
self.exchange.close_all()
# Fibonacci Retracement & Expansion Strategy
# class Fibo(Bot):
# prebalance = BitMex(threading=False).get_balance()
# start = 0
# pre_fb0 = 0
# pre_fb100 = 0
# idx = 0
# def __init__(self):
# Bot.__init__(self, '1m')
#
# def options(self):
# return {
# 'rcv_short_len': hp.quniform('rcv_short_len', 1, 10, 1),
# }
#
# def strategy(self, open, close, high, low, volume):
# self.start += 1
# flg_changed_timezone = False
#
# lot = self.exchange.get_lot()
# # for test lot
# # lot = int(round(lot / 20))
# lot = 500
# bitmex = BitMex(threading=False)
# price = bitmex.get_market_price()
#
#
# sma_base_l = self.input('sma_short_len', int, 200)
#
# resolution = self.input(defval=5, title="resolution", type=int) # defval 변경, 예) 5분 --> 5, 'm' or 1시간 1, 'h', 1Day 1, 'd'
# source = self.exchange.security(str(resolution) + 'm') # def __init__ 비교
# logger.info('source: %s' % source)
#
# series_high = source['high'].values
# series_low = source['low'].values
#
# fb100 = last(highest(series_high, 1)) # 1시간 1, 1D의 경우는 resolution도 변경
# fb0 = last(lowest(series_low, 1))
#
# logger.info('resolution: %s' % resolution)
# logger.info('fb100_resol: %s' % fb100)
# logger.info('self.pre_fb100: %s' % self.pre_fb100)
# logger.info('fb0_resol: %s' % fb0)
# logger.info('self.pre_fb0: %s' % self.pre_fb0)
#
#
#
# # for test
# # fb100 = price + 15
# # fb0 = price - 15
#
# # 최근 1시간을 본봉단위로 획득
# # fibo_l = self.input('length', int, 1440) # 1Day = 60min * 24hr
# # fibo_l = self.input('length', int, 60) # 1Day = 60min * 24hr
# # fibo100 = last(highest(high, fibo_l))
# # fibo0 = last(lowest(low, fibo_l))
#
# fb62 = math.ceil((fb100 - fb0) * 0.618 + fb0)
# fb38 = math.ceil((fb100 - fb0) * 0.382 + fb0)
# fb50 = math.ceil((fb100 - fb0) / 2 + fb0)
#
# fb200 = math.ceil((fb100 - fb0) * 1.0 + fb100)
# fb162 = math.ceil((fb100 - fb0) * 0.618 + fb100)
# fb138 = math.ceil((fb100 - fb0) * 0.382 + fb100)
#
# fb038 = math.ceil(fb0 - (fb100 - fb0) * 0.382)
# fb062 = math.ceil(fb0 - (fb100 - fb0) * 0.618)
# fb0100 = math.ceil(fb0 - (fb100 - fb0) * 1.00)
#
# qty= bitmex.get_position_size()
#
# # 익손평가
# longstatus = bitmex.get_position_avg_price() - fb0
# shortstatus = bitmex.get_position_avg_price() - fb100
# gprice = price
#
# # if bitmex.get_whichpositon() == 'LONG' and longstatus > 0:
# # qL0 = lot * 1
# # qS100 = abs(qty) + lot * 1
# # gprice = price - 1
# # elif bitmex.get_whichpositon() == 'SHORT'and shortstatus > 0:
# # qL0 = abs(qty) + lot * 1
# # qS100 = lot * 1
# # gprice = price + 1
# # else:
# # qL0 = lot * 1
# # qS100 = lot * 1
#
# qS100 = lot*1
# qL0 = lot*1
#
# if self.pre_fb0 != 0 and fb0 != self.pre_fb0 and fb100 != self.pre_fb100:
# flg_changed_timezone = True
# logger.info('+++++++ flg_changed_timezone: %s' % flg_changed_timezone)
# if bitmex.get_whichpositon() is None:
# self.exchange.cancel_all()
#
#
# if self.start == 1:
# # short position
# self.exchange.order("S200"+str(self.idx), False, lot*2, limit=fb200, post_only=True)
# # self.exchange.order("S162"+str(self.idx), False, lot*1, limit=fb162, post_only=True)
# self.exchange.order("S138"+str(self.idx), False, lot*1, limit=fb138, post_only=True)
# self.exchange.order("S100"+str(self.idx), False, qS100, limit=fb100, post_only=True)
#
# # long position
# self.exchange.order("L0"+str(self.idx), True, qL0, limit=fb0, post_only=True)
# self.exchange.order("L038"+str(self.idx), True, lot*1, limit=fb038, post_only=True)
# # self.exchange.order("L062"+str(self.idx), True, lot*1, limit=fb062, post_only=True)
# self.exchange.order("L0100"+str(self.idx), True, lot*2, limit=fb0100, post_only=True)
#
#
# L0 = bitmex.get_open_order("L0"+str(self.idx))
# L038 = bitmex.get_open_order("L038"+str(self.idx))
# L062 = bitmex.get_open_order("L062"+str(self.idx))
# L0100 = bitmex.get_open_order("L0100"+str(self.idx))
#
# S200 = bitmex.get_open_order("S200"+str(self.idx))
# S162 = bitmex.get_open_order("S162"+str(self.idx))
# S138 = bitmex.get_open_order("S138"+str(self.idx))
# S100 = bitmex.get_open_order("S100"+str(self.idx))
#
# #
# # logger.info('(L0 is None): %s' % (L0 is None))
# if flg_changed_timezone is True:
# self.idx += 1
#
# # 이전 self.idx-1 타임존의 기본 주문만 취소, 나머지 역지정 된것 들은 그냥 둔다.
# # self.exchange.cancel("L0"+str(self.idx-1))
# # self.exchange.cancel("L038"+str(self.idx-1))
# # self.exchange.cancel("L062"+str(self.idx-1))
# # self.exchange.cancel("L0100"+str(self.idx-1))
# # self.exchange.cancel("S200"+str(self.idx-1))
# # self.exchange.cancel("S162"+str(self.idx-1))
# # self.exchange.cancel("S138"+str(self.idx-1))
# # self.exchange.cancel("S100"+str(self.idx-1))
# self.exchange.cancel_all()
# longshort = True
# if bitmex.get_position_size() > 0:
# longshort = False
# if bitmex.get_position_size() < 0:
# longshort = True
#
# logger.info('bitmex.get_position_size(): %s' % bitmex.get_position_size())
# if bitmex.get_position_size() != 0:
# self.exchange.order("Garbage", longshort, bitmex.get_position_size(), limit=gprice, post_only=True)
#
# # self.exchange.cancel_all()
# # self.exchange.close_all() # entry order
# # long position
#
# if price > fb0:
# logger.info('price > fb0:%')
# logger.info('flg_changed_timezone: %s' % flg_changed_timezone)
# self.exchange.order("L0"+str(self.idx), True, qL0, limit=fb0, when=(L0 is None or flg_changed_timezone), post_only=True)
# self.exchange.order("L038"+str(self.idx), True, lot*1, limit=fb038, when=(L038 is None or flg_changed_timezone), post_only=True)
# # self.exchange.order("L062"+str(self.idx), True, lot*1, limit=fb062, when=(L062 is None or flg_changed_timezone), post_only=True)
# self.exchange.order("L0100"+str(self.idx), True, lot*2, limit=fb0100, when=(L0100 is None or flg_changed_timezone), post_only=True)
#
# # short position
# if price < fb100:
# logger.info('price < fb100' )
# logger.info('flg_changed_timezone: %s' % flg_changed_timezone)
#
# self.exchange.order("S200"+str(self.idx), False, lot*2, limit=fb200, when=(S200 is None or flg_changed_timezone), post_only=True)
# self.exchange.order("S162"+str(self.idx), False, lot*1, limit=fb162, when=(S162 is None or flg_changed_timezone), post_only=True)
# self.exchange.order("S138"+str(self.idx), False, lot*1, limit=fb138, when=(S138 is None or flg_changed_timezone), post_only=True)
# self.exchange.order("S100"+str(self.idx), False, qS100, limit=fb100, when=(S100 is None or flg_changed_timezone), post_only=True)
#
# L0_w = bitmex.get_open_order("L0_w"+str(self.idx))
# L038_w = bitmex.get_open_order("L038_w"+str(self.idx))
# L062_w = bitmex.get_open_order("L062_w"+str(self.idx))
# L0100_w = bitmex.get_open_order("L0100_w"+str(self.idx))
#
# S100_w = bitmex.get_open_order("S100_w"+str(self.idx))
# S138_w = bitmex.get_open_order("S138_w"+str(self.idx))
# S162_w = bitmex.get_open_order("S162_w"+str(self.idx))
# S200_w = bitmex.get_open_order("S200_w"+str(self.idx))
#
#
# # win order of stoplimit
# if price <= fb0: #and L0 is None:
# self.exchange.order("L0_w"+str(self.idx), False, lot*1, limit=fb38, stop=fb0) # post_only=True)
# logger.info('rice <= fb0: %s' % fb0)
# if price <= fb038: # and L038 is None:
# self.exchange.order("L038_w"+str(self.idx), False, lot*1, limit=fb0, stop=fb038)
# logger.info('price <= fb038: %s' % fb038)
# if price <= fb062: # and L062 is None:
# self.exchange.order("L062_w"+str(self.idx), False, lot*1, limit=fb038, stop=fb062)
# logger.info('price <= fb062: %s' % fb062)
# if price <= fb0100: # and L0100 is None:
# self.exchange.order("L0100_w"+str(self.idx), False, lot*2, limit=fb062, stop=fb0100)
# logger.info('price <= fb0100: %s' % fb0100)
#
#
# if price >= fb100: # and S100 is None:
# logger.info('price >= fb100: %s' % fb100)
# self.exchange.order("S100_w"+str(self.idx), True, lot*1, limit=fb62, stop=fb0100)
# if price >= fb138: # and S138 is None:
# self.exchange.order("S138_w"+str(self.idx), True, lot*1, limit=fb100, stop=fb138)
# logger.info('price >= fb138: %s' % fb138)
# if price >=fb162: # and S162 is None:
# self.exchange.order("S162_w"+str(self.idx), True, lot*1, limit=fb138, stop=fb162)
# logger.info('price >= fb162 %s' % fb162)
# if price >= fb200: # and S200 is None:
# self.exchange.order("S200_w"+str(self.idx), True, lot*2, limit=fb162, stop=fb200)
# logger.info('price >= fb200: %s' % fb200)
#
# # logger.info('bitmex.get_margin():%s' % bitmex.get_margin())
# # logger.info('bitmex.get_position():%s' % bitmex.get_position())
#
# self.pre_fb0 = fb0
# self.pre_fb100 = fb100
#
# # for debug
# logger.info('fb200: %s' % fb200)
# logger.info('fb162: %s' % fb162)
# logger.info('fb138: %s' % fb138)
# logger.info('fb100: %s' % fb100)
# logger.info('fb62: %s' % fb62)
# logger.info('fb50: %s' % fb50)
# logger.info('fb38: %s' % fb38)
# logger.info('fb0: %s' % fb0)
# logger.info('fb038: %s' % fb038)
# logger.info('fb062: %s' % fb062)
# logger.info('fb0100: %s' % fb0100)
#
# diff = (abs(bitmex.get_balance() - abs(self.prebalance)))
#
# realised_pnl = bitmex.get_margin()['realisedPnl']
#
# logger.info('prebalance():%s' % self.prebalance)
# logger.info('bitmex.get_balance():%s' % bitmex.get_balance())
# logger.info('diff:%s' % diff)
# logger.info('realised_pnl:%s' % realised_pnl)
#
# logger.info('--------------------------------------------------')
# rsi2
class RSI2(Bot): # logic https: // stock79.tistory.com / 177
prebalance = BitMex(threading=False).get_balance()
dealcount = 0
def __init__(self):
Bot.__init__(self, '1m')
def options(self):
return {
'length': hp.randint('length', 1, 30, 1),
}
def strategy(self, open, close, high, low, volume):
lot = self.exchange.get_lot()
# for test lot
lot = int(round(lot / 50))
bitmex = BitMex(threading=False)
price = bitmex.get_market_price()
# variants settings
rsi2_len = self.input('length', int, 2)
rsi50_len = self.input('length', int, 50)
rsi2 = rsi(close, rsi2_len)
rsi50 = rsi(close, rsi50_len)
factor = self.input('factor', int, 20)
period = self.input('period', int, 7)
resolution = self.input(defval=1, title="resolution", type=int) # defval 변경, 예) 5분 --> 5
source = self.exchange.security(str(resolution) + 'm') # def __init__ 비교
supertrenddf = supertrend(source, factor, period)
fast_len = self.input('fast_len', int, 5)
half_len = self.input('half_len', int, 5)
slow_len = self.input('slow_len', int, 200)
fast_sma = sma(close, fast_len)
half_sma = sma(close, half_len)
slow_sma = sma(close, slow_len)
# conditions
sma_long = over(fast_sma[-1], slow_sma[-1])
sma_short = under(fast_sma[-1], slow_sma[-1])
super_long = over(close[-1], supertrenddf['TSL'][-1])
super_short = under(close[-1], supertrenddf['TSL'][-1])
super_stoploss = supertrenddf['TSL'][-1]
supertrendtrend = supertrenddf['Trend'][-1]
rsi2_overbought = over(rsi2[-1], 95)
rsi2_oversold = under(rsi2[-1], 5)
rsi50_over = over(rsi50[-1], 50)
rsi50_under = under(rsi50[-1], 50)
price_under = under(price, half_sma[-1])
price_over = over(price, half_sma[-1])
half_before = over(close[-1], half_sma[-1])
half_after = under(close[-1], half_sma[-1])
# show infomations
logger.info('price: %s' % price)
logger.info('fast_sma[-1]: %s' % fast_sma[-1])
logger.info('slow_sma[-1]: %s' % slow_sma[-1])
logger.info('sma_long: %s' % sma_long)
logger.info('sma_short: %s' % sma_short)
logger.info('super_long: %s' % super_long)
logger.info('super_short: %s' % super_short)
logger.info('super_stoploss: %s' % super_stoploss)
logger.info('sma_trend: %s' % supertrendtrend)
logger.info('rsi2[-1 ]%s' % rsi2[-1])
logger.info('rsi50[-1 ]%s' % rsi50[-1])
logger.info('rsi2_oversold: %s' % rsi2_oversold)
logger.info('rsi2_overbought: %s' % rsi2_overbought)
logger.info('price_under: %s' % price_under)
logger.info('price_over: %s' % price_over)
logger.info('half_before: %s' % half_before)
logger.info('half_after: %s' % half_after)
logger.info('get_whichpositon(): %s' % bitmex.get_whichpositon())
logger.info('position_size(): %s' % bitmex.get_position_size())
# entry
if super_long: #long trend
logger.info('+ + + + + LONG + + + + + LONG + + + + + LONG + + + + + ')
if bitmex.get_whichpositon() is None:
if sma_long and rsi2_oversold or price_under:
logger.info('postion condition > None > and all short condition order')
self.exchange.entry("Long", True, lot, limit=price-0.5, post_only=True)
else:
logger.info('postion condition > None > default long order')
self.exchange.entry("Long", True, lot, limit=math.ceil(super_stoploss), post_only=True)
elif bitmex.get_whichpositon() == 'LONG':
logger.info('postion condition > LONG')
if price_over: # closing
logger.info('postion condition > LONG > Closing')
self.exchange.order("Long", False, abs(bitmex.get_position_size()), limit=price + 1.5, post_only=True)
elif rsi2_overbought: # add more entry
logger.info('postion condition > LONG > Rsi2 overbougt add more entry')
self.exchange.entry("LongAdd", True, lot, limit=price - 0.5, post_only=True)
elif super_short: # stop loss
logger.info('postion condition > LONG > super_short(stop loss)')
self.exchange.entry("Long", True, lot)
self.exchange.entry("LongAdd", True, lot)
elif bitmex.get_whichpositon() == 'SHORT':
logger.info('cancel SHORT on long trend')
# self.exchange.cancel_all()
self.exchange.close_all()
else:
# self.exchange.cancel_all()
logger.info('Super Long --> Else')
if super_short: # short trend
logger.info('- - - - - SHORT - - - - - SHORT - - - - - SHORT - - - - - ')
if bitmex.get_whichpositon() is None:
if sma_short and rsi2_overbought or price_over:
logger.info('postion condition > None > and all short condition order')
self.exchange.entry("Short", False, lot, limit=price+0.5, post_only=True)
else:
logger.info('postion condition > None > default short order')
self.exchange.entry("Short", False, lot, limit=math.floor(super_stoploss), post_only=True)
elif bitmex.get_whichpositon() == 'SHORT':
logger.info('postion condition > SHORT')
if price_under: # closing
logger.info('postion condition > SHORT > price_under(closing)')
self.exchange.order("Short", True, abs(bitmex.get_position_size()), limit=price - 1.5, post_only=True)
elif rsi2_oversold: # add more entry
logger.info('postion condition > SHORT > rsi2_oversold(add more entry)')
self.exchange.entry("ShortAdd", False, lot, limit=price - 0.5, post_only=True)
elif super_long: # stop loss
logger.info('postion condition > SHORT > super_short(stop loss)')
self.exchange.entry("Short", True, lot)
self.exchange.entry("ShortAdd", True, lot)
elif bitmex.get_whichpositon() == 'LONG':
logger.info('cancel LONG on short trend')
self.exchange.close_all()
else:
logger.info('Super Shot --> Else')
self.dealcount += 1
diff = (abs(bitmex.get_balance() - abs(self.prebalance)))
realised_pnl = bitmex.get_margin()['realisedPnl']
logger.info('dealcount:%s' % self.dealcount)
logger.info('prebalance():%s' % self.prebalance)
logger.info('bitmex.get_balance():%s' % bitmex.get_balance())
logger.info('diff:%s' % diff)
logger.info('realised_pnl:%s' % realised_pnl)
logger.info('--------------------------------------------------')
class R2H5(Bot):
variants = [sma, ema, double_ema, triple_ema, wma, ssma, hull, heikinashi]
eval_time = None
def __init__(self):
Bot.__init__(self, '1m')
def options(self):
return {
'fast_len': hp.quniform('fast_len', 1, 60, 1),
'slow_len': hp.quniform('slow_len', 1, 240, 1),
}
def strategy(self, open, close, high, low, volume):
lot = self.exchange.get_lot()
# for test
# lot = int(round(lot / 2))
# lot = 10
logger.info('lot:%s' % lot)
bitmex = BitMex(threading=False)
price = bitmex.get_market_price()
resolution = self.input(defval=1, title="resolution", type=int)
variant_type = self.input(defval=5, title="variant_type", type=int)
basis_len = self.input(defval=19, title="basis_len", type=int)
logger.info('price:%s\n' % price)
# fast_len = self.input('fast_len', int, 1)
# slow_len = self.input('slow_len', int, 21)
# trend_len = self.input('slow_len', int, 55)
# longtrend_len = self.input('slow_len', int, 233)
# fast_len = self.input('fast_len', int, 1)
# slow_len = self.input('slow_len', int, 55)
# trend_len = self.input('slow_len', int, 240)
# longtrend_len = self.input('slow_len', int, 233)
fast_len = self.input('fast_len', int, 1)
slow_len = self.input('slow_len', int, 30)
trend_len = self.input('slow_len', int, 60)
longtrend_len = self.input('slow_len', int, 120)
logger.info('fast_len:%s' % fast_len)
logger.info('slow_len:%s' % slow_len)
logger.info('trend_len:%s' % trend_len)
logger.info('longtrend_len:%s' % longtrend_len)
# for various minutes source
source = self.exchange.security(str(resolution) + 'm')
hadf = heikinashi(source)
hadf_fast = heikinashi(hadf)
ha_open_values = hadf_fast['HA_open'].values
ha_close_values = hadf_fast['HA_close'].values
variant = self.variants[variant_type]
ha_open_fast = variant(ha_open_values, fast_len)
ha_close_fast = variant(ha_close_values, fast_len)
haopen_fast = ha_open_fast[-1]
haclose_fast = ha_close_fast[-1]
haup_fast = haclose_fast > haopen_fast
hadown_fast = haclose_fast <= haopen_fast
logger.info('haup_fast:%s\n' % haup_fast)
ha_open_slow = variant(ha_open_values, slow_len)
ha_close_slow = variant(ha_close_values, slow_len)
haopen_slow = ha_open_slow[-1]
haclose_slow = ha_close_slow[-1]
haup_slow = haclose_slow > haopen_slow
hadown_slow = haclose_slow <= haopen_slow
logger.info('haup_slow:%s\n' % haup_slow)
ha_open_trend = variant(ha_open_values, trend_len)
ha_close_trend = variant(ha_close_values, trend_len)
haopen_trend = ha_open_trend[-1]
haclose_trend = ha_close_trend[-1]
haup_trend = haclose_trend > haopen_trend
hadown_trend = haclose_trend <= haopen_trend
logger.info('haup_trend:%s\n' % haup_trend)
ha_open_longtrend = variant(ha_open_values, longtrend_len)
ha_close_longtrend = variant(ha_close_values, longtrend_len)
haopen_longtrend = ha_open_longtrend[-1]
haclose_longtrend = ha_close_longtrend[-1]
haup_longtrend = haclose_longtrend > haopen_longtrend
hadown_longtrend = haclose_longtrend <= haopen_longtrend
logger.info('haup_longtrend:%s\n' % haup_longtrend)
# resol_fast = self.input(defval=1, title="resolution", type=int) # defval 변경, 예) 5분 --> 5
# source_fast = self.exchange.security(str(resol_fast) + 'm') # init 참고
# hadf_fast = heikinashi(source_fast, 1)
# haopen_fast = hadf_fast['HA_open'][-1]
# haclose_fast = hadf_fast['HA_close'][-1]
# haup_fast = haclose_fast > haopen_fast
# hadown_fast = haclose_fast <= haopen_fast
# logger.info('haup_fast:%s\n' % haup_fast)
# logger.info('hadown_fast:%s\n' % hadown_fast)
# resol_slow = self.input(defval=4, title="resolution", type=int) # defval 변경, 예) 5분 --> 5
# source_slow = self.exchange.security(str(resol_slow) + 'h') # init 참고
# hadf_slow = heikinashi(source_slow, 1)
# haopen_slow = hadf_slow['HA_open'][-1]
# haclose_slow = hadf_slow['HA_close'][-1]
# haup_slow = haclose_slow > haopen_slow
# hadown_slow = haclose_slow <= haopen_slow
# logger.info('haup_slow:%s\n' % haup_slow)
# logger.info('hadown_slow:%s\n' % hadown_slow)
# resol_trend = self.input(defval=1, title="resolution", type=int) # defval 변경, 예) 5분 --> 5
# source_trend = self.exchange.security(str(resol_trend) + 'd') # init 참고:wq!:wq!
# hadf_trend = heikinashi(source_trend)
# haopen_trend = hadf_trend['HA_open'][-1]
# haclose_trend = hadf_trend['HA_close'][-1]
# haup_trend = haclose_trend > haopen_trend
# hadown_trend = haclose_trend <= haopen_trend
# logger.info('haup_trend:%s\n' % haup_trend)
# logger.info('hadown_trend:%s\n' % hadown_slow)
" long "
self.exchange.entry("Long", True, lot, when=crossover(ha_close_longtrend, ha_open_longtrend))
" short "
self.exchange.entry("Short", False, lot, when=crossunder(ha_close_longtrend, ha_open_longtrend))
# source_entry = self.exchange.security('1h')
#
# hadf_entry = heikinashi(source_entry)
# hadf_trading = heikinashi(hadf_entry)
#
# ha_open_longtrend_entry = variant(ha_open_values, 2) # 2h
# ha_close_longtrend_entry = variant(ha_close_values, 2)
#
# haopen_longtrend_entry = ha_open_longtrend_entry[-1]
# haclose_longtrend_entry = ha_close_longtrend_entry[-1]
# haup_longtrend_entry = haclose_longtrend_entry > haopen_longtrend_entry
# hadown_longtrend_entry = haclose_longtrend_entry <= haopen_longtrend_entry
#
# logger.info('1h기준 2h\n')
# logger.info('haup_longtrend_enty:%s\n' % haup_longtrend_entry)
# logger.info('hadown_longtrend_entry:%s\n' % hadown_longtrend_entry)
#
# " long "
# self.exchange.entry("Long", True, lot, when=crossover(ha_close_longtrend_entry, ha_open_longtrend_entry))
# " short "
# self.exchange.entry("Short", False, lot, when=crossunder(ha_close_longtrend_entry, ha_open_longtrend_entry))
# heikinashi
class Heikinashi(Bot):
variants = [sma, ema, double_ema, triple_ema, wma, ssma, hull, heikinashi]
eval_time = None
def __init__(self):
Bot.__init__(self, '1m')
def options(self):
return {
'fast_len': hp.quniform('fast_len', 1, 60, 1),
'slow_len': hp.quniform('slow_len', 1, 240, 1),
}
def strategy(self, open, close, high, low, volume):
lot = self.exchange.get_lot()
# for test
# lot = int(round(lot / 2))
# lot = 10
logger.info('lot:%s' % lot)
bitmex = BitMex(threading=False)
price = bitmex.get_market_price()
resolution = self.input(defval=1, title="resolution", type=int)
variant_type = self.input(defval=5, title="variant_type", type=int)
basis_len = self.input(defval=19, title="basis_len", type=int)
logger.info('price:%s\n' % price)
# fast_len = self.input('fast_len', int, 1)
# slow_len = self.input('slow_len', int, 21)
# trend_len = self.input('slow_len', int, 55)
# longtrend_len = self.input('slow_len', int, 233)
# fast_len = self.input('fast_len', int, 1)
# slow_len = self.input('slow_len', int, 55)
# trend_len = self.input('slow_len', int, 240)
# longtrend_len = self.input('slow_len', int, 233)
fast_len = self.input('fast_len', int, 1)
slow_len = self.input('slow_len', int, 30)
trend_len = self.input('slow_len', int, 60)
longtrend_len = self.input('slow_len', int, 120)
logger.info('fast_len:%s' % fast_len)
logger.info('slow_len:%s' % slow_len)
logger.info('trend_len:%s' % trend_len)
logger.info('longtrend_len:%s' % longtrend_len)
# for various minutes source
source = self.exchange.security(str(resolution) + 'm')
hadf = heikinashi(source)
hadf_fast = heikinashi(hadf)
ha_open_values = hadf_fast['HA_open'].values
ha_close_values = hadf_fast['HA_close'].values
variant = self.variants[variant_type]
ha_open_fast = variant(ha_open_values, fast_len)
ha_close_fast = variant(ha_close_values, fast_len)
haopen_fast = ha_open_fast[-1]
haclose_fast = ha_close_fast[-1]
haup_fast = haclose_fast > haopen_fast
hadown_fast = haclose_fast <= haopen_fast
logger.info('haup_fast:%s\n' % haup_fast)
ha_open_slow = variant(ha_open_values, slow_len)
ha_close_slow = variant(ha_close_values, slow_len)
haopen_slow = ha_open_slow[-1]
haclose_slow = ha_close_slow[-1]
haup_slow = haclose_slow > haopen_slow
hadown_slow = haclose_slow <= haopen_slow
logger.info('haup_slow:%s\n' % haup_slow)
ha_open_trend = variant(ha_open_values, trend_len)
ha_close_trend = variant(ha_close_values, trend_len)
haopen_trend = ha_open_trend[-1]
haclose_trend = ha_close_trend[-1]
haup_trend = haclose_trend > haopen_trend
hadown_trend = haclose_trend <= haopen_trend
logger.info('haup_trend:%s\n' % haup_trend)
ha_open_longtrend = variant(ha_open_values, longtrend_len)
ha_close_longtrend = variant(ha_close_values, longtrend_len)
haopen_longtrend = ha_open_longtrend[-1]
haclose_longtrend = ha_close_longtrend[-1]
haup_longtrend = haclose_longtrend > haopen_longtrend
hadown_longtrend = haclose_longtrend <= haopen_longtrend
logger.info('haup_longtrend:%s\n' % haup_longtrend)
# resol_fast = self.input(defval=1, title="resolution", type=int) # defval 변경, 예) 5분 --> 5
# source_fast = self.exchange.security(str(resol_fast) + 'm') # init 참고
# hadf_fast = heikinashi(source_fast, 1)
# haopen_fast = hadf_fast['HA_open'][-1]
# haclose_fast = hadf_fast['HA_close'][-1]
# haup_fast = haclose_fast > haopen_fast
# hadown_fast = haclose_fast <= haopen_fast
# logger.info('haup_fast:%s\n' % haup_fast)
# logger.info('hadown_fast:%s\n' % hadown_fast)
# resol_slow = self.input(defval=4, title="resolution", type=int) # defval 변경, 예) 5분 --> 5
# source_slow = self.exchange.security(str(resol_slow) + 'h') # init 참고
# hadf_slow = heikinashi(source_slow, 1)
# haopen_slow = hadf_slow['HA_open'][-1]
# haclose_slow = hadf_slow['HA_close'][-1]
# haup_slow = haclose_slow > haopen_slow
# hadown_slow = haclose_slow <= haopen_slow
# logger.info('haup_slow:%s\n' % haup_slow)
# logger.info('hadown_slow:%s\n' % hadown_slow)
# resol_trend = self.input(defval=1, title="resolution", type=int) # defval 변경, 예) 5분 --> 5
# source_trend = self.exchange.security(str(resol_trend) + 'd') # init 참고:wq!:wq!
# hadf_trend = heikinashi(source_trend)
# haopen_trend = hadf_trend['HA_open'][-1]
# haclose_trend = hadf_trend['HA_close'][-1]
# haup_trend = haclose_trend > haopen_trend
# hadown_trend = haclose_trend <= haopen_trend
# logger.info('haup_trend:%s\n' % haup_trend)
# logger.info('hadown_trend:%s\n' % hadown_slow)
" long "
self.exchange.entry("Long", True, lot, when=crossover(ha_close_longtrend, ha_open_longtrend))
" short "
self.exchange.entry("Short", False, lot, when=crossunder(ha_close_longtrend, ha_open_longtrend))
# source_entry = self.exchange.security('1h')
#
# hadf_entry = heikinashi(source_entry)
# hadf_trading = heikinashi(hadf_entry)
#
# ha_open_longtrend_entry = variant(ha_open_values, 2) # 2h
# ha_close_longtrend_entry = variant(ha_close_values, 2)
#
# haopen_longtrend_entry = ha_open_longtrend_entry[-1]
# haclose_longtrend_entry = ha_close_longtrend_entry[-1]
# haup_longtrend_entry = haclose_longtrend_entry > haopen_longtrend_entry
# hadown_longtrend_entry = haclose_longtrend_entry <= haopen_longtrend_entry
#
# logger.info('1h기준 2h\n')
# logger.info('haup_longtrend_enty:%s\n' % haup_longtrend_entry)
# logger.info('hadown_longtrend_entry:%s\n' % hadown_longtrend_entry)
#
# " long "
# self.exchange.entry("Long", True, lot, when=crossover(ha_close_longtrend_entry, ha_open_longtrend_entry))
# " short "
# self.exchange.entry("Short", False, lot, when=crossunder(ha_close_longtrend_entry, ha_open_longtrend_entry))
# OCC
class OCC(Bot):
variants = [sma, ema, double_ema, triple_ema, wma, ssma, hull]
eval_time = None
def __init__(self):
Bot.__init__(self, '1m')
def ohlcv_len(self):
return 15 * 30
def options(self):
return {
'variant_type': hp.quniform('variant_type', 0, len(self.variants) - 1, 1),
'basis_len': hp.quniform('basis_len', 1, 30, 1),
'resolution': hp.quniform('resolution', 1, 10, 1),
'sma_len': hp.quniform('sma_len', 1, 15, 1),
'div_threshold': hp.quniform('div_threshold', 1, 6, 0.1),
}
def strategy(self, open, close, high, low, volume):
lot = self.exchange.get_lot()
variant_type = self.input(defval=5, title="variant_type", type=int)
basis_len = self.input(defval=19, title="basis_len", type=int)
resolution = self.input(defval=2, title="resolution", type=int)
sma_len = self.input(defval=9, title="sma_len", type=int)
div_threshold = self.input(defval=3.0, title="div_threshold", type=float)
source = self.exchange.security(str(resolution) + 'm')
if self.eval_time is not None and \
self.eval_time == source.iloc[-1].name:
return
series_open = source['open'].values
series_close = source['close'].values
variant = self.variants[variant_type]
val_open = variant(series_open, basis_len)
val_close = variant(series_close, basis_len)
if val_open[-1] > val_close[-1]:
high_val = val_open[-1]
low_val = val_close[-1]
else:
high_val = val_close[-1]
low_val = val_open[-1]
sma_val = sma(close, sma_len)
self.exchange.plot('val_open', val_open[-1], 'b')
self.exchange.plot('val_close', val_close[-1], 'r')
logger.info("occ:sma_val[-1]:" + str(sma_val[-1]))
logger.info("occ:low_val:" + str(low_val))
logger.info("occ:high_val:" + str(high_val))
logger.info("lot:" + str(lot))
logger.info("------------")
self.exchange.entry("Long", True, lot, stop=math.floor(low_val), when=(sma_val[-1] < low_val))
self.exchange.entry("Short", False, lot, stop=math.ceil(high_val), when=(sma_val[-1] > high_val))
open_close_div = sma(numpy.abs(val_open - val_close), sma_len)
if open_close_div[-1] > div_threshold and \
open_close_div[-2] > div_threshold < open_close_div[-2]:
self.exchange.close_all()
self.eval_time = source.iloc[-1].name
# TradingView
class TV(Bot):
subscriber = None
def __init__(self):
Bot.__init__(self, '1m')
user_id = os.environ.get("GMAIL_ADDRESS")
if user_id is None:
raise Exception("Please set GMAIL_ADDRESS into env to use Trading View Strategy.")
self.subscriber = GmailSub(user_id)
self.subscriber.set_from_address('<EMAIL>')
def __on_message(self, messages):
for message in messages:
if 'payload' not in message:
continue
if 'headers' not in message['payload']:
continue
subject_list = [header['value']
for header in message['payload']['headers'] if header['name'] == 'Subject']
if len(subject_list) == 0:
continue
subject = subject_list[0]
if subject.startswith('TradingViewアラート:'):
action = subject.replace('TradingViewアラート:', '')
self.__action(action)
def __action(self, action):
lot = self.exchange.get_lot()
if re.search('buy', action, re.IGNORECASE):
self.exchange.entry('Long', True, lot)
elif re.search('sell', action, re.IGNORECASE):
self.exchange.entry('Short', True, lot)
elif re.search('exit', action, re.IGNORECASE):
self.exchange.close_all()
def run(self):
if self.hyperopt:
raise Exception("Trading View Strategy dose not support hyperopt Mode.")
elif self.back_test:
raise Exception("Trading View Strategy dose not support backtest Mode.")
elif self.stub_test:
self.exchange = BitMexStub()
logger.info(f"Bot Mode : Stub")
else:
self.exchange = BitMex(demo=self.test_net)
logger.info(f"Bot Mode : Trade")
logger.info(f"Starting Bot")
logger.info(f"Strategy : {type(self).__name__}")
logger.info(f"Resolution : {self.resolution()}")
logger.info(f"Balance : {self.exchange.get_balance()}")
notify(f"Starting Bot\n"
f"Strategy : {type(self).__name__}\n"
f"Resolution : {self.resolution()}\n"
f"Balance : {self.exchange.get_balance()/100000000} XBT")
self.subscriber.on_message(self.__on_message)
def stop(self):
self.subscriber.stop()
# サンプル戦略
class Sample(Bot):
def __init__(self):
# 第一引数: 戦略で使う足幅
# 1分足で直近10期間の情報を戦略で必要とする場合
Bot.__init__(self, '1m')
def options(self):
return {}
def strategy(self, open, close, high, low, volume):
lot = self.exchange.get_lot()
which = random.randrange(2)
if which == 0:
self.exchange.entry("Long", True, round(lot/1000))
logger.info(f"Trade:Long")
else:
self.exchange.entry("Short", False, round(lot/1000))
logger.info(f"Trade:Short")
print(lot)
class Cross(Bot):
def __init__(self):
Bot.__init__(self, '1m')
def options(self):
return {
'fast_len': hp.quniform('fast_len', 1, 30, 1),
'slow_len': hp.quniform('slow_len', 1, 30, 1),
}
def strategy(self, open, close, high, low, volume):
lot = self.exchange.get_lot()
fast_len = self.input('fast_len', int, 9)
slow_len = self.input('slow_len', int, 16)
fast_sma = sma(close, fast_len)
slow_sma = sma(close, slow_len)
golden_cross = crossover(fast_sma, slow_sma)
dead_cross = crossunder(fast_sma, slow_sma)
if golden_cross:
self.exchange.entry("Long", True, lot)
if dead_cross:
self.exchange.entry("Short", False, lot)
class Fibo2(Bot):
prebalance = BitMex(threading=False).get_balance()
start = 0
pre_fb0 = 0
pre_fb100 = 0
idx = 0
def __init__(self):
Bot.__init__(self, '1m')
def options(self):
return {
'rcv_short_len': hp.quniform('rcv_short_len', 1, 10, 1),
}
def strategy(self, open, close, high, low, volume):
self.start += 1
flg_changed_timezone = False
lot = self.exchange.get_lot()
# for test lot
lot = int(round(lot / 50))
# lot = 1
bitmex = BitMex(threading=False)
price = bitmex.get_market_price()
resolution = self.input(defval=10, title="resolution", type=int) # defval 변경, 예) 5분 --> 5, 'm' or 1시간 1, 'h', 1Day 1, 'd'
source = self.exchange.security(str(resolution) + 'm') # def __init__ 비교
series_high = source['high'].values
series_low = source['low'].values
fb100 = last(highest(series_high, 1)) # 1시간 1, 1D의 경우는 resolution도 변경
fb0 = last(lowest(series_low, 1))
# logger.info('source: %s' % source)
logger.info('resolution: %s' % resolution)
logger.info('fb100_resol: %s' % fb100)
logger.info('self.pre_fb100: %s' % self.pre_fb100)
logger.info('fb0_resol: %s' % fb0)
logger.info('self.pre_fb0: %s' % self.pre_fb0)
# for test
# fb100 = price + 15
# fb0 = price - 15
# 최근 1시간을 본봉단위로 획득
# fibo_l = self.input('length', int, 1440) # 1Day = 60min * 24hr
# fibo_l = self.input('length', int, 60) # 1Day = 60min * 24hr
# fibo100 = last(highest(high, fibo_l))
# fibo0 = last(lowest(low, fibo_l))
fb262 = math.ceil((fb100 - fb0) * 1.628 + fb100)
fb200 = math.ceil((fb100 - fb0) * 1.0 + fb100)
# fb162 = math.ceil((fb100 - fb0) * 0.618 + fb100)
fb138 = math.ceil((fb100 - fb0) * 0.382 + fb100)
fb62 = math.ceil((fb100 - fb0) * 0.618 + fb0)
fb50 = math.ceil((fb100 - fb0) / 2 + fb0)
fb38 = math.ceil((fb100 - fb0) * 0.382 + fb0)
fb038 = math.ceil(fb0 - (fb100 - fb0) * 0.382)
# fb062 = math.ceil(fb0 - (fb100 - fb0) * 0.618)
fb0100 = math.ceil(fb0 - (fb100 - fb0) * 1.00)
fb0162 = math.ceil(fb0 - (fb100 - fb0) * 1.60)
qty= bitmex.get_position_size()
logger.info('current position qty: %s' % qty)
# 익손평가
longstatus = bitmex.get_position_avg_price() - fb0
shortstatus = bitmex.get_position_avg_price() - fb100
if bitmex.get_whichpositon() == 'LONG' and longstatus > 0:
qL0 = lot * 1
qS100 = abs(qty) + lot * 1
elif bitmex.get_whichpositon() == 'SHORT'and shortstatus > 0:
qL0 = abs(qty) + lot * 1
qS100 = lot * 1
else:
qL0 = lot * 1
qS100 = lot * 1
if self.pre_fb0 != 0 and fb0 != self.pre_fb0 and fb100 != self.pre_fb100 :
flg_changed_timezone = True
logger.info('+++++++ flg_changed_timezone: %s' % flg_changed_timezone)
logger.info('cancel_all orders because new time zone')
# when this program start, execute only once
if self.start == 1 or flg_changed_timezone:
self.exchange.cancel_all()
stopprice = price
if bitmex.get_whichpositon() == 'LONG':
if price > fb50:
stopprice = fb50
logger.info('fb50')
else:
stopprice = fb0
logger.info('fb0')
logger.info('CL000 stopprice: %s' % stopprice)
logger.info('CL000 --> Clear Long')
self.exchange.order("CL000", False, qty, limit=stopprice, post_only=True)
pass
elif bitmex.get_whichpositon() == 'SHORT':
if price <= fb50:
stopprice = fb50
logger.info('fb50')
else:
stopprice = fb100
logger.info('fb100')
logger.info('CS000 stopprice: %s' % stopprice)
self.exchange.order("CS000", True, qty, limit=stopprice, post_only=True)
else:
logger.info('else case when self.start == 1 or flg_changed_timezone: %s ' % bitmex.get_whichpositon())
pass
# short position
self.exchange.order("S262"+str(self.idx), False, lot*3, limit=fb262, post_only=True)
self.exchange.order("S200"+str(self.idx), False, lot*2, limit=fb200, post_only=True)
# self.exchange.order("S162"+str(self.idx), False, lot*2, limit=fb162, post_only=True)
self.exchange.order("S138"+str(self.idx), False, lot*1, limit=fb138, post_only=True)
# self.exchange.order("S100"+str(self.idx), False, qS100, limit=fb100, post_only=True)
# long position
# self.exchange.order("L0"+str(self.idx), True, qL0, limit=fb0, post_only=True)
self.exchange.order("L038"+str(self.idx), True, lot*1, limit=fb038, post_only=True)
# self.exchange.order("L062"+str(self.idx), True, lot*2, limit=fb062, post_only=True)
self.exchange.order("L0100"+str(self.idx), True, lot*2, limit=fb0100, post_only=True)
self.exchange.order("L0162"+str(self.idx), True, lot*3, limit=fb0162, post_only=True)
if flg_changed_timezone is True:
self.idx += 1
L0 = bitmex.get_open_order("L0"+str(self.idx))
L038 = bitmex.get_open_order("L038"+str(self.idx))
# L062 = bitmex.get_open_order("L062"+str(self.idx))
L0100 = bitmex.get_open_order("L0100"+str(self.idx))
L0162 = bitmex.get_open_order("L0162"+str(self.idx))
S262 = bitmex.get_open_order("S262"+str(self.idx))
S200 = bitmex.get_open_order("S200"+str(self.idx))
# S162 = bitmex.get_open_order("S162"+str(self.idx))
S138 = bitmex.get_open_order("S138"+str(self.idx))
S100 = bitmex.get_open_order("S100"+str(self.idx))
S262_w = bitmex.get_open_order("S262_w"+str(self.idx))
S200_w = bitmex.get_open_order("S200_w"+str(self.idx))
# S162_w = bitmex.get_open_order("S162_w"+str(self.idx))
S138_w = bitmex.get_open_order("S138_w"+str(self.idx))
S100_w = bitmex.get_open_order("S100_w"+str(self.idx))
L0_w = bitmex.get_open_order("L0_w"+str(self.idx))
L038_w = bitmex.get_open_order("L038_w"+str(self.idx))
# L062_w = bitmex.get_open_order("L062_w"+str(self.idx))
L0100_w = bitmex.get_open_order("L0100_w"+str(self.idx))
L0162_w = bitmex.get_open_order("L0162_w"+str(self.idx))
#
# logger.info('(L0 is None): %s' % (L0 is None))
# new entry order
# new short position
# if flg_changed_timezone or price < fb100:
# self.exchange.order("S262"+str(self.idx), False, lot*2, limit=fb262, when=(S262 is None), post_only=True)
# self.exchange.order("S200"+str(self.idx), False, lot*1, limit=fb200, when=(S200 is None), post_only=True)
# # self.exchange.order("S162"+str(self.idx), False, lot*2, limit=fb162, when=(S162 is None), post_only=True)
# self.exchange.order("S138"+str(self.idx), False, lot*1, limit=fb138, when=(S138 is None), post_only=True)
# # self.exchange.order("S100"+str(self.idx), False, qS100, limit=fb100, when=(S100 is None), post_only=True)
#
# # new long position
# if flg_changed_timezone or price > fb0:
# # self.exchange.order("L0"+str(self.idx), True, qL0, limit=fb0, when=(L0 is None), post_only=True)
# self.exchange.order("L038"+str(self.idx), True, lot*1, limit=fb038, when=(L038 is None), post_only=True)
# # self.exchange.order("L062"+str(self.idx), True, lot*1, limit=fb062, when=(L062 is None), post_only=True)
# self.exchange.order("L0100"+str(self.idx), True, lot*1, limit=fb0100, when=(L0100 is None), post_only=True)
# self.exchange.order("L0162"+str(self.idx), True, lot*2, limit=fb0162, when=(L0162 is None), post_only=True)
# self.exchange.order("L038_w"+str(self.idx), False, lot*1, limit=fb0, when=fb038, post_only=True)
# self.exchange.order("L0100_w" + str(self.idx), False, lot * 1, limit=fb038, when=fb0100, post_only=True)
# self.exchange.order("L0160_w" + str(self.idx), False, lot * 2, limit=fb0100, when=fb0162, post_only=True)
#
# self.exchange.order("S138_w" + str(self.idx), True, lot * 1, limit=fb100, when=fb138, post_only=True)
# self.exchange.order("S200_w" + str(self.idx), True, lot * 1, limit=fb138, when=fb200, post_only=True)
# self.exchange.order("S262_w" + str(self.idx), True, lot * 2, limit=fb200, when=fb262, post_only=True)
# stop order
# if price < fb0 and L0 is None:
# self.exchange.order("L0_w"+str(self.idx), False, lot*1, limit=fb38, post_only=True)
# logger.info('rice <= fb0: %s' % fb0)
if price <= fb038 and L038 is None:
self.exchange.order("L038_w"+str(self.idx), False, lot*1, limit=fb0, post_only=True)
logger.info('price <= fb038: %s' % fb038)
# if price < fb062 and L062 is None:
# self.exchange.order("L062_w"+str(self.idx), False, lot*2, limit=fb038, post_only=True)
# logger.info('price <= fb062: %s' % fb062)
if price <= fb0100 and L0100 is None:
self.exchange.order("L0100_w"+str(self.idx), False, lot*1, limit=fb038, post_only=True)
logger.info('price <= fb0100: %s' % fb0100)
if price <= fb0162 and L0162 is not None:
self.exchange.order("L0100_w"+str(self.idx), False, lot*2, limit=fb100, post_only=True)
logger.info('price <= fb0162: %s' % fb0162)
# if price > fb100 and S100 is None:
# logger.info('price >= fb100: %s' % fb100)
# self.exchange.order("S100_w"+str(self.idx), True, lot*1, limit=fb62, post_only=True)
if price >= fb138 and S138 is None:
self.exchange.order("S138_w"+str(self.idx), True, lot*1, limit=fb100, post_only=True)
logger.info('price >= fb138: %s' % fb138)
# if price > fb162 and S162 is None:
# self.exchange.order("S162_w"+str(self.idx), True, lot*2, limit=fb138, post_only=True)
# logger.info('price >= fb162 %s' % fb162)
if price >= fb200 and S200 is None:
self.exchange.order("S200_w"+str(self.idx), True, lot*1, limit=fb138, post_only=True)
logger.info('price >= fb200: %s' % fb200)
if price >= fb262 and S262 is None:
self.exchange.order("S262_w"+str(self.idx), True, lot*2, limit=fb200, post_only=True)
# logger.info('price >= fb262: %s' % fb262)
# logger.info('bitmex.get_margin():%s' % bitmex.get_margin())
# logger.info('bitmex.get_position():%s' % bitmex.get_position())
# save pre-timezone's fb0, fb100 values
self.pre_fb0 = fb0
self.pre_fb100 = fb100
# for debug
logger.info('fb200: %s' % fb200)
# logger.info('fb162: %s' % fb162)
logger.info('fb138: %s' % fb138)
logger.info('fb100: %s' % fb100)
logger.info('fb62: %s' % fb62)
logger.info('fb50: %s' % fb50)
logger.info('fb38: %s' % fb38)
logger.info('fb0: %s' % fb0)
logger.info('fb038: %s' % fb038)
# logger.info('fb062: %s' % fb062)
logger.info('fb0100: %s' % fb0100)
diff = (abs(bitmex.get_balance() - abs(self.prebalance)))
realised_pnl = bitmex.get_margin()['realisedPnl']
logger.info('prebalance():%s' % self.prebalance)
logger.info('bitmex.get_balance():%s' % bitmex.get_balance())
logger.info('diff:%s' % diff)
logger.info('realised_pnl:%s' % realised_pnl)
logger.info('--------------------------------------------------')
class R2H5(Bot):
prebalance = BitMex(threading=False).get_balance()
start = 0
pre_fb0 = 0
pre_fb100 = 0
idx = 0
stratey_mode = 'R2' # or H5
variants = [sma, ema, double_ema, triple_ema, wma, ssma, hull, heikinashi]
eval_time = None
def __init__(self):
Bot.__init__(self, '1m')
def options(self):
return {
'rcv_short_len': hp.quniform('rcv_short_len', 1, 10, 1),
}
def strategy(self, open, close, high, low, volume):
logger.info('-------------------------strategy start-----------------------\n')
lot = self.exchange.get_lot()
# for test
lot = int(round(lot / 2))
lot = 10
logger.info('lot:%s' % lot)
bitmex = BitMex(threading=False)
price = bitmex.get_market_price()
logger.info('price:%s\n' % price)
fast_len = self.input('fast_len', int, 20)
slow_len = self.input('slow_len', int, 120)
fast_sma = sma(close, fast_len)
slow_sma = sma(close, slow_len)
# sma
# for i in range(1, 5):
# logger.info('fast_sma 20 [%s] *******: %s' % (-i, fast_sma[-i]))
# logger.info('slow_sma 120 ******: %s' % slow_sma[-1])
# rsi
rsi_len = self.input('rsi_len', int, 2)
fast_rsi = rsi(close, rsi_len)
rsi_stoplen = self.input('rsi_len', int, 5)
fast_slow_rsi = rsi(close, rsi_len)
for i in range(1, 21):
if fast_rsi[-i] >= 95:
logger.info('fast_rsi2 ***** [%s]: %s >= 95' % (-i, fast_rsi[-i]))
elif fast_rsi[-i] <= 5:
logger.info('fast_rsi2 ***** [%s]: %s <= 5' % (-i, fast_rsi[-i]))
else:
logger.info('fast_rsi2 ***** [%s]: %s' % (-i, fast_rsi[-i]))
# willr
slow_willr = willr(high, low, close, period=960)
logger.info('fast_willr ***** %s' % slow_willr[-1])
# bband
# bband_len = self.input('bbandup_len', int, 20)
# bbup, bbmid, bblow = bbands(close, timeperiod=bband_len, nbdevup=2, nbdevdn=2, matype=0)
# for i in range(1, 2):
# logger.info('fast_bband ***** [%s], bbup: %s, bbmid: %s, bblow: %s' % (-i, bbup[-i], bbmid[-i], bblow[-i]))
# heikinashi
resolution = self.input(defval=1, title="resolution", type=int)
variant_type = self.input(defval=5, title="variant_type", type=int)
source = self.exchange.security(str(resolution) + 'm')
fast_len = self.input('fast_len', int, 1)
middle_len = self.input('middle_len', int, 5)
slow_len = self.input('slow_len', int, 30)
trend_len = self.input('slow_len', int, 60)
longtrend_len = self.input('slow_len', int, 120)
longlongtrend_len = self.input('slow_len', int, 240)
hadf = heikinashi(source)
hadf_fast = heikinashi(hadf)
ha_open_values = hadf_fast['HA_open'].values
ha_close_values = hadf_fast['HA_close'].values
variant = self.variants[variant_type]
ha_open_fast = variant(ha_open_values, fast_len)
ha_close_fast = variant(ha_close_values, fast_len)
haopen_fast = ha_open_fast[-1]
haclose_fast = ha_close_fast[-1]
haup_fast = haclose_fast > haopen_fast
hadown_fast = haclose_fast <= haopen_fast
logger.info('haup_fast:%s' % haup_fast)
ha_open_middle = variant(ha_open_values, middle_len)
ha_close_middle = variant(ha_close_values, middle_len)
haopen_middle = ha_open_middle[-1]
haclose_middle = ha_close_middle[-1]
haup_middle = haclose_middle > haopen_middle
hadown_middle = haclose_middle <= haopen_middle
logger.info('haup_middle:%s' % haup_middle)
ha_open_slow = variant(ha_open_values, slow_len)
ha_close_slow = variant(ha_close_values, slow_len)
haopen_slow = ha_open_slow[-1]
haclose_slow = ha_close_slow[-1]
haup_slow = haclose_slow > haopen_slow
hadown_slow = haclose_slow <= haopen_slow
logger.info('haup_slow:%s' % haup_slow)
ha_open_trend = variant(ha_open_values, trend_len)
ha_close_trend = variant(ha_close_values, trend_len)
haopen_trend = ha_open_trend[-1]
haclose_trend = ha_close_trend[-1]
haup_trend = haclose_trend > haopen_trend
hadown_trend = haclose_trend <= haopen_trend
logger.info('haup_trend:%s' % haup_trend)
ha_open_longtrend = variant(ha_open_values, longtrend_len)
ha_close_longtrend = variant(ha_close_values, longtrend_len)
haopen_longtrend = ha_open_longtrend[-1]
haclose_longtrend = ha_close_longtrend[-1]
haup_longtrend = haclose_longtrend > haopen_longtrend
hadown_longtrend = haclose_longtrend <= haopen_longtrend
logger.info('haup_longtrend:%s\n' % haup_longtrend)
ha_open_longlongtrend = variant(ha_open_values, longlongtrend_len)
ha_close_longlongtrend = variant(ha_close_values, longlongtrend_len)
haopen_longlongtrend = ha_open_longlongtrend[-1]
haclose_longlongtrend = ha_close_longlongtrend[-1]
haup_longlongtrend = haclose_longlongtrend > haopen_longlongtrend
hadown_longlongtrend = haclose_longlongtrend <= haopen_longlongtrend
logger.info('haup_longlongtrend:%s\n' % haup_longlongtrend)
# resolutionh = self.input(defval=1, title="resolution", type=int)
# variant_type = self.input(defval=5, title="variant_type", type=int)
# sourceh = self.exchange.security(str(resolutionh) + 'h')
#
# hadf_h = heikinashi(sourceh)
# hadf_longtrend_h = heikinashi(hadf_h)
#
# ha_open_values_h = hadf_longtrend_h['HA_open'].values
# ha_close_values_h = hadf_longtrend_h['HA_close'].values
# variant = self.variants[variant_type]
#
# ha_open_longtrend_h = variant(ha_open_values_h, 4) # 1시간 1, 2시간 2
# ha_close_longtrend_h = variant(ha_close_values_h, 4)
# haopen_longtrend_h = ha_open_longtrend_h[-1]
# haclose_longtrend_h = ha_close_longtrend_h[-1]
# haup_longtrend_h = haclose_longtrend_h > haopen_longtrend_h
# hadown_longtrend_h = haclose_longtrend_h <= haopen_longtrend_h
# # logger.info('haup_longtrend_h:%s\n' % haup_longtrend_h)
self.start += 1
flg_changed_timezone = False
lot = self.exchange.get_lot()
# for test lot
lot = int(round(lot / 10))
# lot = 1
bitmex = BitMex(threading=False)
price = bitmex.get_market_price()
resolution = self.input(defval=1, title="resolution", type=int) # defval 변경, 예) 5분 --> 5, 'm' or 1시간 1, 'h', 1Day 1, 'd'
source = self.exchange.security(str(resolution) + 'h') # def __init__ 비교
series_high = source['high'].values
series_low = source['low'].values
fb100 = last(highest(series_high, 1)) # 1시간 1, 1D의 경우는 resolution도 변경
fb0 = last(lowest(series_low, 1))
# for test
# fb100 = price + 15
# fb0 = price - 15
# fb262 = math.ceil((fb100 - fb0) * 1.628 + fb100)
# fb200 = math.ceil((fb100 - fb0) * 1.0 + fb100)
# fb162 = math.ceil((fb100 - fb0) * 0.618 + fb100)
# fb138 = math.ceil((fb100 - fb0) * 0.382 + fb100)
fb62 = math.ceil((fb100 - fb0) * 0.618 + fb0)
fb50 = math.ceil((fb100 - fb0) / 2 + fb0)
fb38 = math.ceil((fb100 - fb0) * 0.382 + fb0)
# fb038 = math.ceil(fb0 - (fb100 - fb0) * 0.382)
# fb062 = math.ceil(fb0 - (fb100 - fb0) * 0.618)
# fb0100 = math.ceil(fb0 - (fb100 - fb0) * 1.00)
# fb0162 = math.ceil(fb0 - (fb100 - fb0) * 1.60)
qty= bitmex.get_position_size()
logger.info('current position qty: %s' % qty)
# 익손평가
longstatus = bitmex.get_position_avg_price() - fb0
shortstatus = bitmex.get_position_avg_price() - fb100
if bitmex.get_whichpositon() == 'LONG' and longstatus > 0:
qL0 = lot * 1
qS100 = abs(qty) + lot * 1
elif bitmex.get_whichpositon() == 'SHORT'and shortstatus > 0:
qL0 = abs(qty) + lot * 1
qS100 = lot * 1
else:
qL0 = lot * 1
qS100 = lot * 1
if self.pre_fb0 != 0 and fb0 != self.pre_fb0 and fb100 != self.pre_fb100:
flg_changed_timezone = True
logger.info('+++++++ flg_changed_timezone: %s' % flg_changed_timezone)
logger.info('cancel_all orders because new time zone')
# when this program start, execute only once
# if self.start == 1 or flg_changed_timezone:
# for debug
logger.info('fb100: %s' % fb100)
logger.info('fb62: %s' % fb62)
# logger.info('fb50: %s' % fb50)
logger.info('fb38: %s' % fb38)
logger.info('fb0: %s' % fb0)
logger.info('bitmex.get_open_order(Long): %s ' % bitmex.get_open_order('Long'))
logger.info('bitmex.get_open_order(Short): %s ' % bitmex.get_open_order('Short'))
logger.info('bitmex.get_open_order(LCatch): %s ' % bitmex.get_open_order('LCatch'))
logger.info('bitmex.get_open_order(SCatch): %s ' % bitmex.get_open_order('SCatch'))
if self.stratey_mode == 'R2':
logger.info('=============== stratey_mode = R2 ==============')
if self.start == 1:
self.exchange.cancel_all()
# 공격적인 로직 추가시
# if haup_middle and haup_slow and haup_trend and haup_longtrend and fast_rsi[-1] <= 20: # entry long condition on Short Trend
# logger.info('Now Short conditions: fast_rsi[-1] <= 10 0k --> %s' % fast_rsi[-1])
# self.exchange.order('Long', True, lot, limit=price - 0.5, post_only=True)
# elif hadown_middle and hadown_slow and hadown_trend and hadown_longtrend and fast_rsi[-1] >= 80: # # entry short condition on Long Trend
# logger.info('Now Long conditions: fast_rsi[-1] >= 90 0k --> %s' % fast_rsi[-1])
# self.exchange.order('Short', False, lot, limit=price + 0.5, post_only=True)
# 일반적인 로직
if bitmex.get_whichpositon() == 'LONG':
logger.info('---------------------->>LONG order status')
logger.info('ordered price:%s' % bitmex.get_position_avg_price())
if fast_rsi[-1] >= 95: # 스탑로직
logger.info('>>fast_rsi[-1] >= 95')
self.exchange.order('StopLong', False, qty, limit=price + 0.5, post_only=True)
elif hadown_slow and hadown_trend: # 손절로직
logger.info('>>hadown_trend and hadown_longtrend Long -> Short : slow trend changed')
self.exchange.close_all()
self.exchange.cancel_all()
# # 돌파로직
# if hadown_fast and hadown_middle and hadown_slow: # 모두 숏이면
# self.exchange.order('ChBrkShort', False, qty*2)
# self.exchange.cancel_all()
# else:
# self.exchange.close_all()
# elif hadown_trend and hadown_longtrend: # 손절로직 or 돌파로직
# logger.info('>>hadown_trend and hadown_longtrend Long -> Short : trend changed')
# # 돌파로직
# if hadown_fast and hadown_middle and hadown_slow: # 모두 숏이면
# self.exchange.order('ChBrkShort', False, qty*2)
# self.exchange.cancel_all()
# else:
# self.exchange.close_all()
# else:
# logger.info('>>StopLong')
# if price < fb100: # 초과익절 로직
# self.exchange.order('StopLong1', False, qty, limit=fb100, post_only=True)
# # self.exchange.order('StopLong2', False, qty, limit=fb62, post_only=True)
elif bitmex.get_whichpositon() == 'SHORT':
logger.info('---------------------->>SHORT order status')
logger.info('ordered price:%s' % bitmex.get_position_avg_price())
if fast_rsi[-1] <= 5: # 스탑로직
logger.info('>>fast_rsi[-1] <= 5')
self.exchange.order('StopShort', True, qty, limit=price - 0.5, post_only=True)
elif haup_slow and haup_trend: # 손절로직
logger.info('>>haup_trend and haup_longtrend Short -> Long : slow trend changed')
self.exchange.close_all()
self.exchange.cancel_all()
# elif haup_trend and haup_longtrend: # 손절로직 or 돌파로직
# logger.info('>>haup_trend and haup_longtrend Short -> Long : trend changed')
# # 돌파로직
# if haup_fast and haup_middle and haup_slow: # 모두 롱이면
# self.exchange.order('ChBrkLong', True, qty*2)
# self.exchange.cancel_all()
# else:
# self.exchange.close_all()
# else:
# logger.info('>>StopShort')
# if price > fb0: # 초과익절로직
# self.exchange.order('StopShort1', False, qty, limit=fb0, post_only=True)
# # self.exchange.order('StopShort2', False, qty, limit=fb38, post_only=True)
# logger.info('>>StopShort')
else:
logger.info('else: %s ' % bitmex.get_whichpositon())
if haup_trend and haup_longtrend:
logger.info('+++++++++++ LLLLLLong Trend +++++++++++++++')
if bitmex.get_open_order('LCatch') is not None: # catch shooting logic
logger.info('There are LongOver orders , now changed trend, and cancel all')
self.exchange.cancel_all()
self.exchange.order('SCatch', True, lot, limit=fb0, post_only=True)
elif hadown_trend and hadown_longtrend:
logger.info('- - - - - - SSSSSSort Trend - - - - - - -')
if bitmex.get_open_order('SCatch') is not None: # catch shooting logic
logger.info('There are ShortOver orders , now changed trend, and cancel all')
self.exchange.cancel_all()
self.exchange.order('LCatch', False, lot, limit=fb100, post_only=True)
# if bitmex.get_open_order('LCatch') is not None and haup_trend:
# bitmex.cancel('LCatch')
#
# if bitmex.get_open_order('SCatch') is not None and hadown_trend:
# bitmex.cancel('SCatch')
if haup_slow and haup_trend and haup_longtrend and fast_rsi[-1] <= 5: # entry long condition on Short Trend
logger.info('Now Short conditions: fast_rsi[-1] <= 5 0k --> %s' % fast_rsi[-1])
self.exchange.order('Long', True, lot, limit=price - 0.5, post_only=True)
elif hadown_slow and hadown_trend and hadown_longtrend and fast_rsi[-1] >= 95: # # entry short condition on Long Trend
logger.info('Now Long conditions: fast_rsi[-1] >= 95 0k --> %s' % fast_rsi[-1])
self.exchange.order('Short', False, lot, limit=price + 0.5, post_only=True)
if (slow_willr[-1] < -30) and haup_fast and haup_middle and haup_slow and haup_trend and haup_longtrend and haup_longlongtrend:
logger.info('in for H5UP')
self.stratey_mode = 'H5UP'
self.exchange.order('H5UP', True, lot)
bitmex.cancel_all()
elif (slow_willr[-1] > -70) and hadown_fast and hadown_middle and hadown_slow and hadown_trend and hadown_longtrend and hadown_longtrend:
logger.info('in for H5DOWN')
self.stratey_mode = 'H5DOWN'
self.exchange.order('H5DOWN', False, lot)
bitmex.cancel_all()
elif self.stratey_mode == 'H5UP':
logger.info('=============== stratey_mode = H5UP ==============')
if bitmex.get_whichpositon() != 'LONG' and (slow_willr[-1] < -30) and haup_fast and haup_middle and haup_slow and haup_trend and haup_longtrend and haup_longlongtrend:
self.exchange.order('H5UP', True, lot)
elif bitmex.get_whichpositon() != 'LONG' and not haup_fast and not haup_middle:
# back to R2 mode
self.stratey_mode = 'R2'
bitmex.cancel_all()
elif bitmex.get_whichpositon() == 'LONG' and (slow_willr[-1] > -11) or (haup_fast and not haup_middle and not haup_slow and not haup_longtrend):
# stop order and back to R2 mode
self.exchange.order('H5UPStop', False, lot)
self.stratey_mode = 'R2'
bitmex.cancel_all()
elif self.stratey_mode == 'H5DOWN':
logger.info('=============== stratey_mode = H5DOWN ==============')
if bitmex.get_whichpositon() != 'SHORT' and (slow_willr[-1] > -70) and (hadown_fast and hadown_middle and hadown_slow and hadown_trend and hadown_longtrend and hadown_longlongtrend):
self.exchange.order('H5DOWN', False, lot)
elif bitmex.get_whichpositon() != 'SHORT' and not hadown_fast and not hadown_middle:
# back to R2 mode
self.stratey_mode = 'R2'
bitmex.cancel_all()
elif bitmex.get_whichpositon() == 'SHORT' and (slow_willr[-1] < -91) or (not hadown_fast and not hadown_middle and not hadown_slow and not hadown_longtrend): # and not hadown_slow): #(fast_slow_rsi <= 93) or
# stop order and back to R2 mode
self.exchange.order('H5DOWNStop', True, lot)
self.stratey_mode = 'R2'
bitmex.cancel_all()
else:
logger.info('=============== stratey_mode = Else ==============')
# back to R2 mode
self.stratey_mode = 'R2'
bitmex.cancel_all()
if flg_changed_timezone is True:
self.idx += 1
# save pre-timezone's fb0, fb100 values
self.pre_fb0 = fb0
self.pre_fb100 = fb100
diff = (abs(bitmex.get_balance() - abs(self.prebalance)))
realised_pnl = bitmex.get_margin()['realisedPnl']
logger.info('prebalance():%s' % self.prebalance)
logger.info('bitmex.get_balance():%s' % bitmex.get_balance())
logger.info('diff:%s' % diff)
logger.info('realised_pnl:%s' % realised_pnl)
logger.info('-------------------------strategy end-----------------------\n')
|
en
| 0.32305
|
# coding: UTF-8 # logger.info('strategy start ctime : %s' % time.ctime()) # start = time.time() # 시작 시간 저장 # lot = self.exchange.get_lot() # willr for five willilams # logger.info('---- a ----') # for i in range(1, 5): # logger.info('a [%s] *******: %s' % (-i, a[-i])) # logger.info('---- b ----') # for i in range(1, 5): # logger.info('b [%s] *******: %s' % (-i, b[-i])) # logger.info('---- c ----') # for i in range(1, 5): # logger.info('c [%s] *******: %s' % (-i, c[-i])) # logger.info('---- x ----') # for i in range(1, 5): # logger.info('x [%s] *******: %s' % (-i, x[-i])) # logger.info('---- y ----') # for i in range(1, 5): # logger.info('x [%s] *******: %s' % (-i, y[-i])) # set condition # buyCloseCon = sellRCIfillerCon # sellCloseCon = buyRCIfillerCon # if buyCon: # self.exchange.entry("Long", True, lot) # self.inlong = True # if buyCloseCon and self.inlong: # self.exchange.close_all() # self.inlong = False # if sellCon: # self.exchange.entry("Short", False, lot) # self.inshort = True # if sellCloseCon and self.inlong: # self.exchange.close_all() # self.inshort = False # logger.info('all strategy processing time : %s' % str(time.time() - start)) # channel break out # sma cross # for test # lot = int(round(lot / 10)) # long # similar stop function # short # supertrend # for test # lot = int(round(lot / 100)) # up = (high + low) / 2 - (factor * atr(high, low, close, period=period)) # logger.info('up:%s\n' % up) # dn = (high + low) / 2 + (factor * atr(high, low, close, period=period)) # logger.info('atrvar: %s' % atrvar[-1]) # defval 변경, 예) 5분 --> 5 # init 참고 # logger.info('supertrend:%s' % supertrenddf.describe()) # logger.info('supertrend:%s' % supertrenddf.columns) # logger.info('source:%s\n' % source[-1]) # self.exchange.close_all() # logger.info('bitmex.get_margin():%s' % bitmex.get_margin()) # logger.info('bitmex.get_position():%s' % bitmex.get_position()) # logger.info('bitmex.get_balance():%s' % bitmex.get_balance()) # logger.info('get_pre_prebalance:%s' % get_pre_prebalance(self.prebalance, bitmex.get_balance())) # # self.exchange.close_all() # # self.exchange.cancel_all() # logic https: // stock79.tistory.com / 177 # for test lot # variants settings # defval 변경, 예) 5분 --> 5 # def __init__ 비교 # conditions # show infomations # entry # and (not supertrendtrend and supertrendtrend2) and rsi2_overbought: # closing # add more entry # stop loss # self.exchange.cancel_all() #and rsi2_overbought and price_over: # closing # add more entry # stop loss # <NAME> and rci # 시작 시간 저장 # lot = self.exchange.get_lot() # for test lot # lot = int(round(lot / 20)) # willr for five willilams # logger.info('---- a ----') # for i in range(1, 5): # logger.info('a [%s] *******: %s' % (-i, a[-i])) # logger.info('---- b ----') # for i in range(1, 5): # logger.info('b [%s] *******: %s' % (-i, b[-i])) # logger.info('---- c ----') # for i in range(1, 5): # logger.info('c [%s] *******: %s' % (-i, c[-i])) # logger.info('---- x ----') # for i in range(1, 5): # logger.info('x [%s] *******: %s' % (-i, x[-i])) # logger.info('---- y ----') # for i in range(1, 5): # logger.info('x [%s] *******: %s' % (-i, y[-i])) # buyCloseCon = True if a[-1] > -10 else False # sellCloseCon = True if a[-1] < -90 else False # set condition # buyCloseCon = sellRCIfillerCon # sellCloseCon = buyRCIfillerCon # init # self.exchange.close("Long") # self.exchange.close("Short") # <NAME> and Fibo # 시작 시간 저장 # lot = self.exchange.get_lot() # # for test lot # # lot = int(round(lot / 20)) # channel breakout for 1D # 시작 시간 저장 # self.exchange.entry("ChLong", True, round(lot), stop=up) # self.exchange.entry("ChShort", False, round(lot), stop=dn) # fibo for 1h # 1시간 1, 1D의 경우는 resolution도 변경 # willr for five willilams # logger.info('---- a ----') # for i in range(1, 5): # logger.info('a [%s] *******: %s' % (-i, a[-i])) # logger.info('---- b ----') # for i in range(1, 5): # logger.info('b [%s] *******: %s' % (-i, b[-i])) # logger.info('---- c ----') # for i in range(1, 5): # logger.info('c [%s] *******: %s' % (-i, c[-i])) # logger.info('---- x ----') # for i in range(1, 5): # logger.info('x [%s] *******: %s' % (-i, x[-i])) # logger.info('---- y ----') # for i in range(1, 5): # logger.info('x [%s] *******: %s' % (-i, y[-i])) # if self.inlong: # self.inlong = True # # if self.inshort: # self.inshort = True # 1시간 1, 1D의 경우는 resolution도 변경 # if bitmex.get_whichpositon() is not None: # logger.info('-- bitmex.get_whichpositon is not None --') # and (not self.inlong)) and (not self.inshort): #and (not self.inlong)) and (not self.inshort) --') # init # set fibo conditions # elif (flg_changed_timezone and self.inlong and not self.inshort): # logger.info('-- (flg_changed_timezone and self.inlong and not self.inshort) --') # self.exchange.order("FShort", False, lot, limit=fb200, post_only=True) # elif (flg_changed_timezone and not self.inlong and self.inshort): # logger.info('-- (flg_changed_timezone and not self.inlong and self.inshort) --') # self.exchange.order("FLong", True, lot, limit=fb0100, post_only=True) # self.exchange.close("Long") # self.exchange.close("Short") # self.inlong = True # self.inshort = True # save pre-timezone's fb0, fb100 values # rci # Fibonacci Retracement & Expansion Strategy # class Fibo(Bot): # prebalance = BitMex(threading=False).get_balance() # start = 0 # pre_fb0 = 0 # pre_fb100 = 0 # idx = 0 # def __init__(self): # Bot.__init__(self, '1m') # # def options(self): # return { # 'rcv_short_len': hp.quniform('rcv_short_len', 1, 10, 1), # } # # def strategy(self, open, close, high, low, volume): # self.start += 1 # flg_changed_timezone = False # # lot = self.exchange.get_lot() # # for test lot # # lot = int(round(lot / 20)) # lot = 500 # bitmex = BitMex(threading=False) # price = bitmex.get_market_price() # # # sma_base_l = self.input('sma_short_len', int, 200) # # resolution = self.input(defval=5, title="resolution", type=int) # defval 변경, 예) 5분 --> 5, 'm' or 1시간 1, 'h', 1Day 1, 'd' # source = self.exchange.security(str(resolution) + 'm') # def __init__ 비교 # logger.info('source: %s' % source) # # series_high = source['high'].values # series_low = source['low'].values # # fb100 = last(highest(series_high, 1)) # 1시간 1, 1D의 경우는 resolution도 변경 # fb0 = last(lowest(series_low, 1)) # # logger.info('resolution: %s' % resolution) # logger.info('fb100_resol: %s' % fb100) # logger.info('self.pre_fb100: %s' % self.pre_fb100) # logger.info('fb0_resol: %s' % fb0) # logger.info('self.pre_fb0: %s' % self.pre_fb0) # # # # # for test # # fb100 = price + 15 # # fb0 = price - 15 # # # 최근 1시간을 본봉단위로 획득 # # fibo_l = self.input('length', int, 1440) # 1Day = 60min * 24hr # # fibo_l = self.input('length', int, 60) # 1Day = 60min * 24hr # # fibo100 = last(highest(high, fibo_l)) # # fibo0 = last(lowest(low, fibo_l)) # # fb62 = math.ceil((fb100 - fb0) * 0.618 + fb0) # fb38 = math.ceil((fb100 - fb0) * 0.382 + fb0) # fb50 = math.ceil((fb100 - fb0) / 2 + fb0) # # fb200 = math.ceil((fb100 - fb0) * 1.0 + fb100) # fb162 = math.ceil((fb100 - fb0) * 0.618 + fb100) # fb138 = math.ceil((fb100 - fb0) * 0.382 + fb100) # # fb038 = math.ceil(fb0 - (fb100 - fb0) * 0.382) # fb062 = math.ceil(fb0 - (fb100 - fb0) * 0.618) # fb0100 = math.ceil(fb0 - (fb100 - fb0) * 1.00) # # qty= bitmex.get_position_size() # # # 익손평가 # longstatus = bitmex.get_position_avg_price() - fb0 # shortstatus = bitmex.get_position_avg_price() - fb100 # gprice = price # # # if bitmex.get_whichpositon() == 'LONG' and longstatus > 0: # # qL0 = lot * 1 # # qS100 = abs(qty) + lot * 1 # # gprice = price - 1 # # elif bitmex.get_whichpositon() == 'SHORT'and shortstatus > 0: # # qL0 = abs(qty) + lot * 1 # # qS100 = lot * 1 # # gprice = price + 1 # # else: # # qL0 = lot * 1 # # qS100 = lot * 1 # # qS100 = lot*1 # qL0 = lot*1 # # if self.pre_fb0 != 0 and fb0 != self.pre_fb0 and fb100 != self.pre_fb100: # flg_changed_timezone = True # logger.info('+++++++ flg_changed_timezone: %s' % flg_changed_timezone) # if bitmex.get_whichpositon() is None: # self.exchange.cancel_all() # # # if self.start == 1: # # short position # self.exchange.order("S200"+str(self.idx), False, lot*2, limit=fb200, post_only=True) # # self.exchange.order("S162"+str(self.idx), False, lot*1, limit=fb162, post_only=True) # self.exchange.order("S138"+str(self.idx), False, lot*1, limit=fb138, post_only=True) # self.exchange.order("S100"+str(self.idx), False, qS100, limit=fb100, post_only=True) # # # long position # self.exchange.order("L0"+str(self.idx), True, qL0, limit=fb0, post_only=True) # self.exchange.order("L038"+str(self.idx), True, lot*1, limit=fb038, post_only=True) # # self.exchange.order("L062"+str(self.idx), True, lot*1, limit=fb062, post_only=True) # self.exchange.order("L0100"+str(self.idx), True, lot*2, limit=fb0100, post_only=True) # # # L0 = bitmex.get_open_order("L0"+str(self.idx)) # L038 = bitmex.get_open_order("L038"+str(self.idx)) # L062 = bitmex.get_open_order("L062"+str(self.idx)) # L0100 = bitmex.get_open_order("L0100"+str(self.idx)) # # S200 = bitmex.get_open_order("S200"+str(self.idx)) # S162 = bitmex.get_open_order("S162"+str(self.idx)) # S138 = bitmex.get_open_order("S138"+str(self.idx)) # S100 = bitmex.get_open_order("S100"+str(self.idx)) # # # # # logger.info('(L0 is None): %s' % (L0 is None)) # if flg_changed_timezone is True: # self.idx += 1 # # # 이전 self.idx-1 타임존의 기본 주문만 취소, 나머지 역지정 된것 들은 그냥 둔다. # # self.exchange.cancel("L0"+str(self.idx-1)) # # self.exchange.cancel("L038"+str(self.idx-1)) # # self.exchange.cancel("L062"+str(self.idx-1)) # # self.exchange.cancel("L0100"+str(self.idx-1)) # # self.exchange.cancel("S200"+str(self.idx-1)) # # self.exchange.cancel("S162"+str(self.idx-1)) # # self.exchange.cancel("S138"+str(self.idx-1)) # # self.exchange.cancel("S100"+str(self.idx-1)) # self.exchange.cancel_all() # longshort = True # if bitmex.get_position_size() > 0: # longshort = False # if bitmex.get_position_size() < 0: # longshort = True # # logger.info('bitmex.get_position_size(): %s' % bitmex.get_position_size()) # if bitmex.get_position_size() != 0: # self.exchange.order("Garbage", longshort, bitmex.get_position_size(), limit=gprice, post_only=True) # # # self.exchange.cancel_all() # # self.exchange.close_all() # entry order # # long position # # if price > fb0: # logger.info('price > fb0:%') # logger.info('flg_changed_timezone: %s' % flg_changed_timezone) # self.exchange.order("L0"+str(self.idx), True, qL0, limit=fb0, when=(L0 is None or flg_changed_timezone), post_only=True) # self.exchange.order("L038"+str(self.idx), True, lot*1, limit=fb038, when=(L038 is None or flg_changed_timezone), post_only=True) # # self.exchange.order("L062"+str(self.idx), True, lot*1, limit=fb062, when=(L062 is None or flg_changed_timezone), post_only=True) # self.exchange.order("L0100"+str(self.idx), True, lot*2, limit=fb0100, when=(L0100 is None or flg_changed_timezone), post_only=True) # # # short position # if price < fb100: # logger.info('price < fb100' ) # logger.info('flg_changed_timezone: %s' % flg_changed_timezone) # # self.exchange.order("S200"+str(self.idx), False, lot*2, limit=fb200, when=(S200 is None or flg_changed_timezone), post_only=True) # self.exchange.order("S162"+str(self.idx), False, lot*1, limit=fb162, when=(S162 is None or flg_changed_timezone), post_only=True) # self.exchange.order("S138"+str(self.idx), False, lot*1, limit=fb138, when=(S138 is None or flg_changed_timezone), post_only=True) # self.exchange.order("S100"+str(self.idx), False, qS100, limit=fb100, when=(S100 is None or flg_changed_timezone), post_only=True) # # L0_w = bitmex.get_open_order("L0_w"+str(self.idx)) # L038_w = bitmex.get_open_order("L038_w"+str(self.idx)) # L062_w = bitmex.get_open_order("L062_w"+str(self.idx)) # L0100_w = bitmex.get_open_order("L0100_w"+str(self.idx)) # # S100_w = bitmex.get_open_order("S100_w"+str(self.idx)) # S138_w = bitmex.get_open_order("S138_w"+str(self.idx)) # S162_w = bitmex.get_open_order("S162_w"+str(self.idx)) # S200_w = bitmex.get_open_order("S200_w"+str(self.idx)) # # # # win order of stoplimit # if price <= fb0: #and L0 is None: # self.exchange.order("L0_w"+str(self.idx), False, lot*1, limit=fb38, stop=fb0) # post_only=True) # logger.info('rice <= fb0: %s' % fb0) # if price <= fb038: # and L038 is None: # self.exchange.order("L038_w"+str(self.idx), False, lot*1, limit=fb0, stop=fb038) # logger.info('price <= fb038: %s' % fb038) # if price <= fb062: # and L062 is None: # self.exchange.order("L062_w"+str(self.idx), False, lot*1, limit=fb038, stop=fb062) # logger.info('price <= fb062: %s' % fb062) # if price <= fb0100: # and L0100 is None: # self.exchange.order("L0100_w"+str(self.idx), False, lot*2, limit=fb062, stop=fb0100) # logger.info('price <= fb0100: %s' % fb0100) # # # if price >= fb100: # and S100 is None: # logger.info('price >= fb100: %s' % fb100) # self.exchange.order("S100_w"+str(self.idx), True, lot*1, limit=fb62, stop=fb0100) # if price >= fb138: # and S138 is None: # self.exchange.order("S138_w"+str(self.idx), True, lot*1, limit=fb100, stop=fb138) # logger.info('price >= fb138: %s' % fb138) # if price >=fb162: # and S162 is None: # self.exchange.order("S162_w"+str(self.idx), True, lot*1, limit=fb138, stop=fb162) # logger.info('price >= fb162 %s' % fb162) # if price >= fb200: # and S200 is None: # self.exchange.order("S200_w"+str(self.idx), True, lot*2, limit=fb162, stop=fb200) # logger.info('price >= fb200: %s' % fb200) # # # logger.info('bitmex.get_margin():%s' % bitmex.get_margin()) # # logger.info('bitmex.get_position():%s' % bitmex.get_position()) # # self.pre_fb0 = fb0 # self.pre_fb100 = fb100 # # # for debug # logger.info('fb200: %s' % fb200) # logger.info('fb162: %s' % fb162) # logger.info('fb138: %s' % fb138) # logger.info('fb100: %s' % fb100) # logger.info('fb62: %s' % fb62) # logger.info('fb50: %s' % fb50) # logger.info('fb38: %s' % fb38) # logger.info('fb0: %s' % fb0) # logger.info('fb038: %s' % fb038) # logger.info('fb062: %s' % fb062) # logger.info('fb0100: %s' % fb0100) # # diff = (abs(bitmex.get_balance() - abs(self.prebalance))) # # realised_pnl = bitmex.get_margin()['realisedPnl'] # # logger.info('prebalance():%s' % self.prebalance) # logger.info('bitmex.get_balance():%s' % bitmex.get_balance()) # logger.info('diff:%s' % diff) # logger.info('realised_pnl:%s' % realised_pnl) # # logger.info('--------------------------------------------------') # rsi2 # logic https: // stock79.tistory.com / 177 # for test lot # variants settings # defval 변경, 예) 5분 --> 5 # def __init__ 비교 # conditions # show infomations # entry #long trend # closing # add more entry # stop loss # self.exchange.cancel_all() # self.exchange.cancel_all() # short trend # closing # add more entry # stop loss # for test # lot = int(round(lot / 2)) # lot = 10 # fast_len = self.input('fast_len', int, 1) # slow_len = self.input('slow_len', int, 21) # trend_len = self.input('slow_len', int, 55) # longtrend_len = self.input('slow_len', int, 233) # fast_len = self.input('fast_len', int, 1) # slow_len = self.input('slow_len', int, 55) # trend_len = self.input('slow_len', int, 240) # longtrend_len = self.input('slow_len', int, 233) # for various minutes source # resol_fast = self.input(defval=1, title="resolution", type=int) # defval 변경, 예) 5분 --> 5 # source_fast = self.exchange.security(str(resol_fast) + 'm') # init 참고 # hadf_fast = heikinashi(source_fast, 1) # haopen_fast = hadf_fast['HA_open'][-1] # haclose_fast = hadf_fast['HA_close'][-1] # haup_fast = haclose_fast > haopen_fast # hadown_fast = haclose_fast <= haopen_fast # logger.info('haup_fast:%s\n' % haup_fast) # logger.info('hadown_fast:%s\n' % hadown_fast) # resol_slow = self.input(defval=4, title="resolution", type=int) # defval 변경, 예) 5분 --> 5 # source_slow = self.exchange.security(str(resol_slow) + 'h') # init 참고 # hadf_slow = heikinashi(source_slow, 1) # haopen_slow = hadf_slow['HA_open'][-1] # haclose_slow = hadf_slow['HA_close'][-1] # haup_slow = haclose_slow > haopen_slow # hadown_slow = haclose_slow <= haopen_slow # logger.info('haup_slow:%s\n' % haup_slow) # logger.info('hadown_slow:%s\n' % hadown_slow) # resol_trend = self.input(defval=1, title="resolution", type=int) # defval 변경, 예) 5분 --> 5 # source_trend = self.exchange.security(str(resol_trend) + 'd') # init 참고:wq!:wq! # hadf_trend = heikinashi(source_trend) # haopen_trend = hadf_trend['HA_open'][-1] # haclose_trend = hadf_trend['HA_close'][-1] # haup_trend = haclose_trend > haopen_trend # hadown_trend = haclose_trend <= haopen_trend # logger.info('haup_trend:%s\n' % haup_trend) # logger.info('hadown_trend:%s\n' % hadown_slow) # source_entry = self.exchange.security('1h') # # hadf_entry = heikinashi(source_entry) # hadf_trading = heikinashi(hadf_entry) # # ha_open_longtrend_entry = variant(ha_open_values, 2) # 2h # ha_close_longtrend_entry = variant(ha_close_values, 2) # # haopen_longtrend_entry = ha_open_longtrend_entry[-1] # haclose_longtrend_entry = ha_close_longtrend_entry[-1] # haup_longtrend_entry = haclose_longtrend_entry > haopen_longtrend_entry # hadown_longtrend_entry = haclose_longtrend_entry <= haopen_longtrend_entry # # logger.info('1h기준 2h\n') # logger.info('haup_longtrend_enty:%s\n' % haup_longtrend_entry) # logger.info('hadown_longtrend_entry:%s\n' % hadown_longtrend_entry) # # " long " # self.exchange.entry("Long", True, lot, when=crossover(ha_close_longtrend_entry, ha_open_longtrend_entry)) # " short " # self.exchange.entry("Short", False, lot, when=crossunder(ha_close_longtrend_entry, ha_open_longtrend_entry)) # heikinashi # for test # lot = int(round(lot / 2)) # lot = 10 # fast_len = self.input('fast_len', int, 1) # slow_len = self.input('slow_len', int, 21) # trend_len = self.input('slow_len', int, 55) # longtrend_len = self.input('slow_len', int, 233) # fast_len = self.input('fast_len', int, 1) # slow_len = self.input('slow_len', int, 55) # trend_len = self.input('slow_len', int, 240) # longtrend_len = self.input('slow_len', int, 233) # for various minutes source # resol_fast = self.input(defval=1, title="resolution", type=int) # defval 변경, 예) 5분 --> 5 # source_fast = self.exchange.security(str(resol_fast) + 'm') # init 참고 # hadf_fast = heikinashi(source_fast, 1) # haopen_fast = hadf_fast['HA_open'][-1] # haclose_fast = hadf_fast['HA_close'][-1] # haup_fast = haclose_fast > haopen_fast # hadown_fast = haclose_fast <= haopen_fast # logger.info('haup_fast:%s\n' % haup_fast) # logger.info('hadown_fast:%s\n' % hadown_fast) # resol_slow = self.input(defval=4, title="resolution", type=int) # defval 변경, 예) 5분 --> 5 # source_slow = self.exchange.security(str(resol_slow) + 'h') # init 참고 # hadf_slow = heikinashi(source_slow, 1) # haopen_slow = hadf_slow['HA_open'][-1] # haclose_slow = hadf_slow['HA_close'][-1] # haup_slow = haclose_slow > haopen_slow # hadown_slow = haclose_slow <= haopen_slow # logger.info('haup_slow:%s\n' % haup_slow) # logger.info('hadown_slow:%s\n' % hadown_slow) # resol_trend = self.input(defval=1, title="resolution", type=int) # defval 변경, 예) 5분 --> 5 # source_trend = self.exchange.security(str(resol_trend) + 'd') # init 참고:wq!:wq! # hadf_trend = heikinashi(source_trend) # haopen_trend = hadf_trend['HA_open'][-1] # haclose_trend = hadf_trend['HA_close'][-1] # haup_trend = haclose_trend > haopen_trend # hadown_trend = haclose_trend <= haopen_trend # logger.info('haup_trend:%s\n' % haup_trend) # logger.info('hadown_trend:%s\n' % hadown_slow) # source_entry = self.exchange.security('1h') # # hadf_entry = heikinashi(source_entry) # hadf_trading = heikinashi(hadf_entry) # # ha_open_longtrend_entry = variant(ha_open_values, 2) # 2h # ha_close_longtrend_entry = variant(ha_close_values, 2) # # haopen_longtrend_entry = ha_open_longtrend_entry[-1] # haclose_longtrend_entry = ha_close_longtrend_entry[-1] # haup_longtrend_entry = haclose_longtrend_entry > haopen_longtrend_entry # hadown_longtrend_entry = haclose_longtrend_entry <= haopen_longtrend_entry # # logger.info('1h기준 2h\n') # logger.info('haup_longtrend_enty:%s\n' % haup_longtrend_entry) # logger.info('hadown_longtrend_entry:%s\n' % hadown_longtrend_entry) # # " long " # self.exchange.entry("Long", True, lot, when=crossover(ha_close_longtrend_entry, ha_open_longtrend_entry)) # " short " # self.exchange.entry("Short", False, lot, when=crossunder(ha_close_longtrend_entry, ha_open_longtrend_entry)) # OCC # TradingView # サンプル戦略 # 第一引数: 戦略で使う足幅 # 1分足で直近10期間の情報を戦略で必要とする場合 # for test lot # lot = 1 # defval 변경, 예) 5분 --> 5, 'm' or 1시간 1, 'h', 1Day 1, 'd' # def __init__ 비교 # 1시간 1, 1D의 경우는 resolution도 변경 # logger.info('source: %s' % source) # for test # fb100 = price + 15 # fb0 = price - 15 # 최근 1시간을 본봉단위로 획득 # fibo_l = self.input('length', int, 1440) # 1Day = 60min * 24hr # fibo_l = self.input('length', int, 60) # 1Day = 60min * 24hr # fibo100 = last(highest(high, fibo_l)) # fibo0 = last(lowest(low, fibo_l)) # fb162 = math.ceil((fb100 - fb0) * 0.618 + fb100) # fb062 = math.ceil(fb0 - (fb100 - fb0) * 0.618) # 익손평가 # when this program start, execute only once # short position # self.exchange.order("S162"+str(self.idx), False, lot*2, limit=fb162, post_only=True) # self.exchange.order("S100"+str(self.idx), False, qS100, limit=fb100, post_only=True) # long position # self.exchange.order("L0"+str(self.idx), True, qL0, limit=fb0, post_only=True) # self.exchange.order("L062"+str(self.idx), True, lot*2, limit=fb062, post_only=True) # L062 = bitmex.get_open_order("L062"+str(self.idx)) # S162 = bitmex.get_open_order("S162"+str(self.idx)) # S162_w = bitmex.get_open_order("S162_w"+str(self.idx)) # L062_w = bitmex.get_open_order("L062_w"+str(self.idx)) # # logger.info('(L0 is None): %s' % (L0 is None)) # new entry order # new short position # if flg_changed_timezone or price < fb100: # self.exchange.order("S262"+str(self.idx), False, lot*2, limit=fb262, when=(S262 is None), post_only=True) # self.exchange.order("S200"+str(self.idx), False, lot*1, limit=fb200, when=(S200 is None), post_only=True) # # self.exchange.order("S162"+str(self.idx), False, lot*2, limit=fb162, when=(S162 is None), post_only=True) # self.exchange.order("S138"+str(self.idx), False, lot*1, limit=fb138, when=(S138 is None), post_only=True) # # self.exchange.order("S100"+str(self.idx), False, qS100, limit=fb100, when=(S100 is None), post_only=True) # # # new long position # if flg_changed_timezone or price > fb0: # # self.exchange.order("L0"+str(self.idx), True, qL0, limit=fb0, when=(L0 is None), post_only=True) # self.exchange.order("L038"+str(self.idx), True, lot*1, limit=fb038, when=(L038 is None), post_only=True) # # self.exchange.order("L062"+str(self.idx), True, lot*1, limit=fb062, when=(L062 is None), post_only=True) # self.exchange.order("L0100"+str(self.idx), True, lot*1, limit=fb0100, when=(L0100 is None), post_only=True) # self.exchange.order("L0162"+str(self.idx), True, lot*2, limit=fb0162, when=(L0162 is None), post_only=True) # self.exchange.order("L038_w"+str(self.idx), False, lot*1, limit=fb0, when=fb038, post_only=True) # self.exchange.order("L0100_w" + str(self.idx), False, lot * 1, limit=fb038, when=fb0100, post_only=True) # self.exchange.order("L0160_w" + str(self.idx), False, lot * 2, limit=fb0100, when=fb0162, post_only=True) # # self.exchange.order("S138_w" + str(self.idx), True, lot * 1, limit=fb100, when=fb138, post_only=True) # self.exchange.order("S200_w" + str(self.idx), True, lot * 1, limit=fb138, when=fb200, post_only=True) # self.exchange.order("S262_w" + str(self.idx), True, lot * 2, limit=fb200, when=fb262, post_only=True) # stop order # if price < fb0 and L0 is None: # self.exchange.order("L0_w"+str(self.idx), False, lot*1, limit=fb38, post_only=True) # logger.info('rice <= fb0: %s' % fb0) # if price < fb062 and L062 is None: # self.exchange.order("L062_w"+str(self.idx), False, lot*2, limit=fb038, post_only=True) # logger.info('price <= fb062: %s' % fb062) # if price > fb100 and S100 is None: # logger.info('price >= fb100: %s' % fb100) # self.exchange.order("S100_w"+str(self.idx), True, lot*1, limit=fb62, post_only=True) # if price > fb162 and S162 is None: # self.exchange.order("S162_w"+str(self.idx), True, lot*2, limit=fb138, post_only=True) # logger.info('price >= fb162 %s' % fb162) # logger.info('price >= fb262: %s' % fb262) # logger.info('bitmex.get_margin():%s' % bitmex.get_margin()) # logger.info('bitmex.get_position():%s' % bitmex.get_position()) # save pre-timezone's fb0, fb100 values # for debug # logger.info('fb162: %s' % fb162) # logger.info('fb062: %s' % fb062) # or H5 # for test # sma # for i in range(1, 5): # logger.info('fast_sma 20 [%s] *******: %s' % (-i, fast_sma[-i])) # logger.info('slow_sma 120 ******: %s' % slow_sma[-1]) # rsi # willr # bband # bband_len = self.input('bbandup_len', int, 20) # bbup, bbmid, bblow = bbands(close, timeperiod=bband_len, nbdevup=2, nbdevdn=2, matype=0) # for i in range(1, 2): # logger.info('fast_bband ***** [%s], bbup: %s, bbmid: %s, bblow: %s' % (-i, bbup[-i], bbmid[-i], bblow[-i])) # heikinashi # resolutionh = self.input(defval=1, title="resolution", type=int) # variant_type = self.input(defval=5, title="variant_type", type=int) # sourceh = self.exchange.security(str(resolutionh) + 'h') # # hadf_h = heikinashi(sourceh) # hadf_longtrend_h = heikinashi(hadf_h) # # ha_open_values_h = hadf_longtrend_h['HA_open'].values # ha_close_values_h = hadf_longtrend_h['HA_close'].values # variant = self.variants[variant_type] # # ha_open_longtrend_h = variant(ha_open_values_h, 4) # 1시간 1, 2시간 2 # ha_close_longtrend_h = variant(ha_close_values_h, 4) # haopen_longtrend_h = ha_open_longtrend_h[-1] # haclose_longtrend_h = ha_close_longtrend_h[-1] # haup_longtrend_h = haclose_longtrend_h > haopen_longtrend_h # hadown_longtrend_h = haclose_longtrend_h <= haopen_longtrend_h # # logger.info('haup_longtrend_h:%s\n' % haup_longtrend_h) # for test lot # lot = 1 # defval 변경, 예) 5분 --> 5, 'm' or 1시간 1, 'h', 1Day 1, 'd' # def __init__ 비교 # 1시간 1, 1D의 경우는 resolution도 변경 # for test # fb100 = price + 15 # fb0 = price - 15 # fb262 = math.ceil((fb100 - fb0) * 1.628 + fb100) # fb200 = math.ceil((fb100 - fb0) * 1.0 + fb100) # fb162 = math.ceil((fb100 - fb0) * 0.618 + fb100) # fb138 = math.ceil((fb100 - fb0) * 0.382 + fb100) # fb038 = math.ceil(fb0 - (fb100 - fb0) * 0.382) # fb062 = math.ceil(fb0 - (fb100 - fb0) * 0.618) # fb0100 = math.ceil(fb0 - (fb100 - fb0) * 1.00) # fb0162 = math.ceil(fb0 - (fb100 - fb0) * 1.60) # 익손평가 # when this program start, execute only once # if self.start == 1 or flg_changed_timezone: # for debug # logger.info('fb50: %s' % fb50) # 공격적인 로직 추가시 # if haup_middle and haup_slow and haup_trend and haup_longtrend and fast_rsi[-1] <= 20: # entry long condition on Short Trend # logger.info('Now Short conditions: fast_rsi[-1] <= 10 0k --> %s' % fast_rsi[-1]) # self.exchange.order('Long', True, lot, limit=price - 0.5, post_only=True) # elif hadown_middle and hadown_slow and hadown_trend and hadown_longtrend and fast_rsi[-1] >= 80: # # entry short condition on Long Trend # logger.info('Now Long conditions: fast_rsi[-1] >= 90 0k --> %s' % fast_rsi[-1]) # self.exchange.order('Short', False, lot, limit=price + 0.5, post_only=True) # 일반적인 로직 # 스탑로직 # 손절로직 # # 돌파로직 # if hadown_fast and hadown_middle and hadown_slow: # 모두 숏이면 # self.exchange.order('ChBrkShort', False, qty*2) # self.exchange.cancel_all() # else: # self.exchange.close_all() # elif hadown_trend and hadown_longtrend: # 손절로직 or 돌파로직 # logger.info('>>hadown_trend and hadown_longtrend Long -> Short : trend changed') # # 돌파로직 # if hadown_fast and hadown_middle and hadown_slow: # 모두 숏이면 # self.exchange.order('ChBrkShort', False, qty*2) # self.exchange.cancel_all() # else: # self.exchange.close_all() # else: # logger.info('>>StopLong') # if price < fb100: # 초과익절 로직 # self.exchange.order('StopLong1', False, qty, limit=fb100, post_only=True) # # self.exchange.order('StopLong2', False, qty, limit=fb62, post_only=True) # 스탑로직 # 손절로직 # elif haup_trend and haup_longtrend: # 손절로직 or 돌파로직 # logger.info('>>haup_trend and haup_longtrend Short -> Long : trend changed') # # 돌파로직 # if haup_fast and haup_middle and haup_slow: # 모두 롱이면 # self.exchange.order('ChBrkLong', True, qty*2) # self.exchange.cancel_all() # else: # self.exchange.close_all() # else: # logger.info('>>StopShort') # if price > fb0: # 초과익절로직 # self.exchange.order('StopShort1', False, qty, limit=fb0, post_only=True) # # self.exchange.order('StopShort2', False, qty, limit=fb38, post_only=True) # logger.info('>>StopShort') # catch shooting logic # catch shooting logic # if bitmex.get_open_order('LCatch') is not None and haup_trend: # bitmex.cancel('LCatch') # # if bitmex.get_open_order('SCatch') is not None and hadown_trend: # bitmex.cancel('SCatch') # entry long condition on Short Trend # # entry short condition on Long Trend # back to R2 mode # stop order and back to R2 mode # back to R2 mode # and not hadown_slow): #(fast_slow_rsi <= 93) or # stop order and back to R2 mode # back to R2 mode # save pre-timezone's fb0, fb100 values
| 2.063665
| 2
|
tests/ut/python/exec/resnet_example.py
|
unseenme/mindspore
| 2
|
6629620
|
<reponame>unseenme/mindspore<gh_stars>1-10
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
resnet50 example
"""
import numpy as np
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.ops import operations as P
from ..ut_filter import non_graph_engine
def conv3x3(in_channels, out_channels, stride=1, padding=1):
"""3x3 convolution """
weight = Tensor(np.ones([out_channels, in_channels, 3, 3]).astype(np.float32) * 0.01)
return nn.Conv2d(in_channels, out_channels,
kernel_size=3, stride=stride, padding=padding, weight_init=weight)
def conv1x1(in_channels, out_channels, stride=1, padding=0):
"""1x1 convolution"""
weight = Tensor(np.ones([out_channels, in_channels, 1, 1]).astype(np.float32) * 0.01)
return nn.Conv2d(in_channels, out_channels,
kernel_size=1, stride=stride, padding=padding, weight_init=weight)
def bn_with_initialize(out_channels):
shape = (out_channels)
mean = Tensor(np.ones(shape).astype(np.float32) * 0.01)
var = Tensor(np.ones(shape).astype(np.float32) * 0.01)
beta = Tensor(np.ones(shape).astype(np.float32) * 0.01)
gamma = Tensor(np.ones(shape).astype(np.float32) * 0.01)
return nn.BatchNorm2d(num_features=out_channels,
beta_init=beta,
gamma_init=gamma,
moving_mean_init=mean,
moving_var_init=var)
class ResidualBlock(nn.Cell):
"""
residual Block
"""
expansion = 4
def __init__(self,
in_channels,
out_channels,
stride=1,
down_sample=False):
super(ResidualBlock, self).__init__()
out_chls = out_channels // self.expansion
self.conv1 = conv1x1(in_channels, out_chls, stride=stride, padding=0)
self.bn1 = bn_with_initialize(out_chls)
self.conv2 = conv3x3(out_chls, out_chls, stride=1, padding=1)
self.bn2 = bn_with_initialize(out_chls)
self.conv3 = conv1x1(out_chls, out_channels, stride=1, padding=0)
self.bn3 = bn_with_initialize(out_channels)
self.relu = nn.ReLU()
self.downsample = down_sample
self.conv_down_sample = conv1x1(in_channels, out_channels,
stride=stride, padding=0)
self.bn_down_sample = bn_with_initialize(out_channels)
self.add = P.TensorAdd()
def construct(self, x):
"""
:param x:
:return:
"""
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample:
identity = self.conv_down_sample(identity)
identity = self.bn_down_sample(identity)
out = self.add(out, identity)
out = self.relu(out)
return out
class MakeLayer3(nn.Cell):
"""
make resnet50 3 layers
"""
def __init__(self, block, in_channels, out_channels, stride):
super(MakeLayer3, self).__init__()
self.block_down_sample = block(in_channels, out_channels,
stride=stride, down_sample=True)
self.block1 = block(out_channels, out_channels, stride=1)
self.block2 = block(out_channels, out_channels, stride=1)
def construct(self, x):
x = self.block_down_sample(x)
x = self.block1(x)
x = self.block2(x)
return x
class MakeLayer4(nn.Cell):
"""
make resnet50 4 layers
"""
def __init__(self, block, in_channels, out_channels, stride):
super(MakeLayer4, self).__init__()
self.block_down_sample = block(in_channels, out_channels,
stride=stride, down_sample=True)
self.block1 = block(out_channels, out_channels, stride=1)
self.block2 = block(out_channels, out_channels, stride=1)
self.block3 = block(out_channels, out_channels, stride=1)
def construct(self, x):
x = self.block_down_sample(x)
x = self.block1(x)
x = self.block2(x)
x = self.block3(x)
return x
class MakeLayer6(nn.Cell):
"""
make resnet50 6 layers
"""
def __init__(self, block, in_channels, out_channels, stride):
super(MakeLayer6, self).__init__()
self.block_down_sample = block(in_channels, out_channels,
stride=stride, down_sample=True)
self.block1 = block(out_channels, out_channels, stride=1)
self.block2 = block(out_channels, out_channels, stride=1)
self.block3 = block(out_channels, out_channels, stride=1)
self.block4 = block(out_channels, out_channels, stride=1)
self.block5 = block(out_channels, out_channels, stride=1)
def construct(self, x):
x = self.block_down_sample(x)
x = self.block1(x)
x = self.block2(x)
x = self.block3(x)
x = self.block4(x)
x = self.block5(x)
return x
class ResNet50(nn.Cell):
"""
resnet nn.Cell
"""
def __init__(self, block, num_classes=100):
super(ResNet50, self).__init__()
weight_conv = Tensor(np.ones([64, 3, 7, 7]).astype(np.float32) * 0.01)
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, weight_init=weight_conv)
self.bn1 = bn_with_initialize(64)
self.relu = nn.ReLU()
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2)
self.layer1 = MakeLayer3(
block, in_channels=64, out_channels=256, stride=1)
self.layer2 = MakeLayer4(
block, in_channels=256, out_channels=512, stride=2)
self.layer3 = MakeLayer6(
block, in_channels=512, out_channels=1024, stride=2)
self.layer4 = MakeLayer3(
block, in_channels=1024, out_channels=2048, stride=2)
self.avgpool = nn.AvgPool2d(7, 1)
self.flatten = nn.Flatten()
weight_fc = Tensor(np.ones([num_classes, 512 * block.expansion]).astype(np.float32) * 0.01)
bias_fc = Tensor(np.ones([num_classes]).astype(np.float32) * 0.01)
self.fc = nn.Dense(512 * block.expansion, num_classes, weight_init=weight_fc, bias_init=bias_fc)
def construct(self, x):
"""
:param x:
:return:
"""
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = self.flatten(x)
x = self.fc(x)
return x
def resnet50():
return ResNet50(ResidualBlock, 10)
@non_graph_engine
def test_compile():
net = resnet50()
input_data = Tensor(np.ones([1, 3, 224, 224]).astype(np.float32) * 0.01)
output = net(input_data)
print(output.asnumpy())
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
resnet50 example
"""
import numpy as np
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.ops import operations as P
from ..ut_filter import non_graph_engine
def conv3x3(in_channels, out_channels, stride=1, padding=1):
"""3x3 convolution """
weight = Tensor(np.ones([out_channels, in_channels, 3, 3]).astype(np.float32) * 0.01)
return nn.Conv2d(in_channels, out_channels,
kernel_size=3, stride=stride, padding=padding, weight_init=weight)
def conv1x1(in_channels, out_channels, stride=1, padding=0):
"""1x1 convolution"""
weight = Tensor(np.ones([out_channels, in_channels, 1, 1]).astype(np.float32) * 0.01)
return nn.Conv2d(in_channels, out_channels,
kernel_size=1, stride=stride, padding=padding, weight_init=weight)
def bn_with_initialize(out_channels):
shape = (out_channels)
mean = Tensor(np.ones(shape).astype(np.float32) * 0.01)
var = Tensor(np.ones(shape).astype(np.float32) * 0.01)
beta = Tensor(np.ones(shape).astype(np.float32) * 0.01)
gamma = Tensor(np.ones(shape).astype(np.float32) * 0.01)
return nn.BatchNorm2d(num_features=out_channels,
beta_init=beta,
gamma_init=gamma,
moving_mean_init=mean,
moving_var_init=var)
class ResidualBlock(nn.Cell):
"""
residual Block
"""
expansion = 4
def __init__(self,
in_channels,
out_channels,
stride=1,
down_sample=False):
super(ResidualBlock, self).__init__()
out_chls = out_channels // self.expansion
self.conv1 = conv1x1(in_channels, out_chls, stride=stride, padding=0)
self.bn1 = bn_with_initialize(out_chls)
self.conv2 = conv3x3(out_chls, out_chls, stride=1, padding=1)
self.bn2 = bn_with_initialize(out_chls)
self.conv3 = conv1x1(out_chls, out_channels, stride=1, padding=0)
self.bn3 = bn_with_initialize(out_channels)
self.relu = nn.ReLU()
self.downsample = down_sample
self.conv_down_sample = conv1x1(in_channels, out_channels,
stride=stride, padding=0)
self.bn_down_sample = bn_with_initialize(out_channels)
self.add = P.TensorAdd()
def construct(self, x):
"""
:param x:
:return:
"""
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample:
identity = self.conv_down_sample(identity)
identity = self.bn_down_sample(identity)
out = self.add(out, identity)
out = self.relu(out)
return out
class MakeLayer3(nn.Cell):
"""
make resnet50 3 layers
"""
def __init__(self, block, in_channels, out_channels, stride):
super(MakeLayer3, self).__init__()
self.block_down_sample = block(in_channels, out_channels,
stride=stride, down_sample=True)
self.block1 = block(out_channels, out_channels, stride=1)
self.block2 = block(out_channels, out_channels, stride=1)
def construct(self, x):
x = self.block_down_sample(x)
x = self.block1(x)
x = self.block2(x)
return x
class MakeLayer4(nn.Cell):
"""
make resnet50 4 layers
"""
def __init__(self, block, in_channels, out_channels, stride):
super(MakeLayer4, self).__init__()
self.block_down_sample = block(in_channels, out_channels,
stride=stride, down_sample=True)
self.block1 = block(out_channels, out_channels, stride=1)
self.block2 = block(out_channels, out_channels, stride=1)
self.block3 = block(out_channels, out_channels, stride=1)
def construct(self, x):
x = self.block_down_sample(x)
x = self.block1(x)
x = self.block2(x)
x = self.block3(x)
return x
class MakeLayer6(nn.Cell):
"""
make resnet50 6 layers
"""
def __init__(self, block, in_channels, out_channels, stride):
super(MakeLayer6, self).__init__()
self.block_down_sample = block(in_channels, out_channels,
stride=stride, down_sample=True)
self.block1 = block(out_channels, out_channels, stride=1)
self.block2 = block(out_channels, out_channels, stride=1)
self.block3 = block(out_channels, out_channels, stride=1)
self.block4 = block(out_channels, out_channels, stride=1)
self.block5 = block(out_channels, out_channels, stride=1)
def construct(self, x):
x = self.block_down_sample(x)
x = self.block1(x)
x = self.block2(x)
x = self.block3(x)
x = self.block4(x)
x = self.block5(x)
return x
class ResNet50(nn.Cell):
"""
resnet nn.Cell
"""
def __init__(self, block, num_classes=100):
super(ResNet50, self).__init__()
weight_conv = Tensor(np.ones([64, 3, 7, 7]).astype(np.float32) * 0.01)
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, weight_init=weight_conv)
self.bn1 = bn_with_initialize(64)
self.relu = nn.ReLU()
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2)
self.layer1 = MakeLayer3(
block, in_channels=64, out_channels=256, stride=1)
self.layer2 = MakeLayer4(
block, in_channels=256, out_channels=512, stride=2)
self.layer3 = MakeLayer6(
block, in_channels=512, out_channels=1024, stride=2)
self.layer4 = MakeLayer3(
block, in_channels=1024, out_channels=2048, stride=2)
self.avgpool = nn.AvgPool2d(7, 1)
self.flatten = nn.Flatten()
weight_fc = Tensor(np.ones([num_classes, 512 * block.expansion]).astype(np.float32) * 0.01)
bias_fc = Tensor(np.ones([num_classes]).astype(np.float32) * 0.01)
self.fc = nn.Dense(512 * block.expansion, num_classes, weight_init=weight_fc, bias_init=bias_fc)
def construct(self, x):
"""
:param x:
:return:
"""
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = self.flatten(x)
x = self.fc(x)
return x
def resnet50():
return ResNet50(ResidualBlock, 10)
@non_graph_engine
def test_compile():
net = resnet50()
input_data = Tensor(np.ones([1, 3, 224, 224]).astype(np.float32) * 0.01)
output = net(input_data)
print(output.asnumpy())
|
en
| 0.777406
|
# Copyright 2020 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ resnet50 example 3x3 convolution 1x1 convolution residual Block :param x: :return: make resnet50 3 layers make resnet50 4 layers make resnet50 6 layers resnet nn.Cell :param x: :return:
| 2.312243
| 2
|
youdao.py
|
bobbyesh/youdao-wrapper
| 0
|
6629621
|
<reponame>bobbyesh/youdao-wrapper<filename>youdao.py<gh_stars>0
# api.py
'''Defines a YouDaoAPI class for accessing the public
youdao.com api.
Instantiating With API Key
==========================
>>> youdao = YouDaoAPI(keyfrom, key)
Instantiating With Config File
==============================
Create a keys.ini in the following format,
replace <keyfrom> and <key> with the keyfrom
and key that youdao provided:
# keys.ini
[DEFAULT]
keyfrom = <keyfrom>
key = <key>
>>> youdao = YouDaoAPI.from_config('keys.ini')
Making Queries
==============
The `get` method returns a dictionary with the
word, its definitions, and the pinyin.
After instantiating with one of the above methods:
>>> query = youdao.get('你好')
>>> query
{'word': '你好', 'pinyin': 'nǐ hǎo', 'def': ['hello;hi']}
'''
from urllib.parse import quote
import requests
import configparser
'''
..TODO: Implement YouDaoWord and YouDaoSentence.
'''
class YouDaoWord(object):
def __init__(self, word, definitions, pinyin):
self.word = word
self.definitions = definitions
self.pinyin = pinyin
class YouDaoSentence(object):
pass
class YouDaoAPI(object):
'''
..TODO: Detect when the translation doesn't actually exist.
'''
error_messages = {
0: 'Normal',
20: 'The request string was too long',
30: 'Unable to efficiently translate',
40: 'Unsupported language type',
50: 'Invalid key',
60: 'No dictionary results',
}
def __init__(self, keyfrom, key):
self.keyfrom = keyfrom
self. key = key
self.response = None
@classmethod
def from_config(cls, config_file):
config = configparser.ConfigParser()
config.read(config_file)
keyfrom = config['DEFAULT']['keyfrom']
key = config['DEFAULT']['key']
return cls(keyfrom, key)
def get_word(self, word):
json = self.get_json(word)
if 'basic' not in json:
raise Exception('This request did not return basic definitions. '
'Perhaps it was not a single word?')
definitions = json['basic']['explains']
pinyin = json['basic']['phonetic']
word = json['query']
return {'def': definitions,
'pinyin': pinyin,
'word': word}
def get_json(self, query):
url = self.url(query)
self.response= requests.get(url)
return self.response.json()
def get_jsonp(self, query):
url = self.url(query, doctype='jsonp')
self.response= requests.get(url)
return response.json()
def json(self):
if not self.response:
raise Exception('No response object yet, must call get() first.')
return self.response.json()
def url(self, query, doctype='json'):
return ('http://fanyi.youdao.com/openapi.do?keyfrom=%s&key=%s'
'&type=data&doctype=%s&version=1.1&q=') % (self.keyfrom, self.key, doctype) + quote(query)
def handle_error(self, json):
code = json['error_code']
if code != 0:
raise Exception( self.error_messages[code] )
def test():
words = [ '追逐']
youdao = YouDaoAPI.from_config('keys.ini')
jsons = []
for w in words:
j = youdao.get_word(w)
jsons.append(j)
print(jsons)
if __name__ == '__main__':
test()
|
# api.py
'''Defines a YouDaoAPI class for accessing the public
youdao.com api.
Instantiating With API Key
==========================
>>> youdao = YouDaoAPI(keyfrom, key)
Instantiating With Config File
==============================
Create a keys.ini in the following format,
replace <keyfrom> and <key> with the keyfrom
and key that youdao provided:
# keys.ini
[DEFAULT]
keyfrom = <keyfrom>
key = <key>
>>> youdao = YouDaoAPI.from_config('keys.ini')
Making Queries
==============
The `get` method returns a dictionary with the
word, its definitions, and the pinyin.
After instantiating with one of the above methods:
>>> query = youdao.get('你好')
>>> query
{'word': '你好', 'pinyin': 'nǐ hǎo', 'def': ['hello;hi']}
'''
from urllib.parse import quote
import requests
import configparser
'''
..TODO: Implement YouDaoWord and YouDaoSentence.
'''
class YouDaoWord(object):
def __init__(self, word, definitions, pinyin):
self.word = word
self.definitions = definitions
self.pinyin = pinyin
class YouDaoSentence(object):
pass
class YouDaoAPI(object):
'''
..TODO: Detect when the translation doesn't actually exist.
'''
error_messages = {
0: 'Normal',
20: 'The request string was too long',
30: 'Unable to efficiently translate',
40: 'Unsupported language type',
50: 'Invalid key',
60: 'No dictionary results',
}
def __init__(self, keyfrom, key):
self.keyfrom = keyfrom
self. key = key
self.response = None
@classmethod
def from_config(cls, config_file):
config = configparser.ConfigParser()
config.read(config_file)
keyfrom = config['DEFAULT']['keyfrom']
key = config['DEFAULT']['key']
return cls(keyfrom, key)
def get_word(self, word):
json = self.get_json(word)
if 'basic' not in json:
raise Exception('This request did not return basic definitions. '
'Perhaps it was not a single word?')
definitions = json['basic']['explains']
pinyin = json['basic']['phonetic']
word = json['query']
return {'def': definitions,
'pinyin': pinyin,
'word': word}
def get_json(self, query):
url = self.url(query)
self.response= requests.get(url)
return self.response.json()
def get_jsonp(self, query):
url = self.url(query, doctype='jsonp')
self.response= requests.get(url)
return response.json()
def json(self):
if not self.response:
raise Exception('No response object yet, must call get() first.')
return self.response.json()
def url(self, query, doctype='json'):
return ('http://fanyi.youdao.com/openapi.do?keyfrom=%s&key=%s'
'&type=data&doctype=%s&version=1.1&q=') % (self.keyfrom, self.key, doctype) + quote(query)
def handle_error(self, json):
code = json['error_code']
if code != 0:
raise Exception( self.error_messages[code] )
def test():
words = [ '追逐']
youdao = YouDaoAPI.from_config('keys.ini')
jsons = []
for w in words:
j = youdao.get_word(w)
jsons.append(j)
print(jsons)
if __name__ == '__main__':
test()
|
en
| 0.697633
|
# api.py Defines a YouDaoAPI class for accessing the public youdao.com api. Instantiating With API Key ========================== >>> youdao = YouDaoAPI(keyfrom, key) Instantiating With Config File ============================== Create a keys.ini in the following format, replace <keyfrom> and <key> with the keyfrom and key that youdao provided: # keys.ini [DEFAULT] keyfrom = <keyfrom> key = <key> >>> youdao = YouDaoAPI.from_config('keys.ini') Making Queries ============== The `get` method returns a dictionary with the word, its definitions, and the pinyin. After instantiating with one of the above methods: >>> query = youdao.get('你好') >>> query {'word': '你好', 'pinyin': 'nǐ hǎo', 'def': ['hello;hi']} ..TODO: Implement YouDaoWord and YouDaoSentence. ..TODO: Detect when the translation doesn't actually exist.
| 3.311736
| 3
|
PyMoCapViewer/skeletons/__init__.py
|
justamad/PyMoCapViewer
| 1
|
6629622
|
<gh_stars>1-10
from .skeleton_loader import get_skeleton_definition_for_camera
|
from .skeleton_loader import get_skeleton_definition_for_camera
|
none
| 1
| 1.09642
| 1
|
|
AssistantControl/google_home_led_pattern.py
|
cwalker/Assistants
| 28
|
6629623
|
<reponame>cwalker/Assistants
#!/usr/bin/env python
def GoogleHomeLedPattern(show):
basis = [0] * 3 * 3
basis[0] = 1
basis[4] = 1
basis[8] = 2
return basis
|
#!/usr/bin/env python
def GoogleHomeLedPattern(show):
basis = [0] * 3 * 3
basis[0] = 1
basis[4] = 1
basis[8] = 2
return basis
|
ru
| 0.26433
|
#!/usr/bin/env python
| 2.581985
| 3
|
Pytorch/config.py
|
khshayarr/DREAM-1
| 44
|
6629624
|
<reponame>khshayarr/DREAM-1
# -*- coding:utf-8 -*-
__author__ = 'Randolph'
class Config(object):
def __init__(self):
self.TRAININGSET_DIR = '../data/Train.json'
self.VALIDATIONSET_DIR = '../data/Validation.json'
self.TESTSET_DIR = '../data/Test.json'
self.NEG_SAMPLES = '../data/neg_sample.pickle'
self.MODEL_DIR = 'runs/'
self.cuda = False
self.clip = 10
self.epochs = 200
self.batch_size = 256
self.seq_len = 12
self.learning_rate = 0.01 # Initial Learning Rate
self.log_interval = 1 # num of batches between two logging
self.basket_pool_type = 'avg' # ['avg', 'max']
self.rnn_type = 'LSTM' # ['RNN_TANH', 'RNN_RELU', 'LSTM', 'GRU']
self.rnn_layer_num = 2
self.dropout = 0.5
self.num_product = 26991+1 # 商品数目,用于定义 Embedding Layer
self.embedding_dim = 32 # 商品表征维数, 用于定义 Embedding Layer
self.neg_num = 500 # 负采样个数
self.top_k = 10 # Top K 取值
|
# -*- coding:utf-8 -*-
__author__ = 'Randolph'
class Config(object):
def __init__(self):
self.TRAININGSET_DIR = '../data/Train.json'
self.VALIDATIONSET_DIR = '../data/Validation.json'
self.TESTSET_DIR = '../data/Test.json'
self.NEG_SAMPLES = '../data/neg_sample.pickle'
self.MODEL_DIR = 'runs/'
self.cuda = False
self.clip = 10
self.epochs = 200
self.batch_size = 256
self.seq_len = 12
self.learning_rate = 0.01 # Initial Learning Rate
self.log_interval = 1 # num of batches between two logging
self.basket_pool_type = 'avg' # ['avg', 'max']
self.rnn_type = 'LSTM' # ['RNN_TANH', 'RNN_RELU', 'LSTM', 'GRU']
self.rnn_layer_num = 2
self.dropout = 0.5
self.num_product = 26991+1 # 商品数目,用于定义 Embedding Layer
self.embedding_dim = 32 # 商品表征维数, 用于定义 Embedding Layer
self.neg_num = 500 # 负采样个数
self.top_k = 10 # Top K 取值
|
en
| 0.525711
|
# -*- coding:utf-8 -*- # Initial Learning Rate # num of batches between two logging # ['avg', 'max'] # ['RNN_TANH', 'RNN_RELU', 'LSTM', 'GRU'] # 商品数目,用于定义 Embedding Layer # 商品表征维数, 用于定义 Embedding Layer # 负采样个数 # Top K 取值
| 2.230546
| 2
|
Router.py
|
bachnguyenhuu/RENAT
| 65
|
6629625
|
<filename>Router.py<gh_stars>10-100
# -*- coding: utf-8 -*-
# Copyright 2017-2020 NTT Communications
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import netsnmp
import time
import sys,os,glob
import re
import csv
import json
import inspect
import Common
from VChannel import VChannel
from importlib import import_module
import robot.libraries.DateTime as DateTime
from robot.libraries.BuiltIn import BuiltIn
from robot.libraries.BuiltIn import RobotNotRunningError
from types import MethodType
# expose following keywords
# __all__ = ['switch','cmd','exec_file','snap','snap_diff','xrun','follow_mib']
class Router(object):
""" A class provides keywords for router control. An instance of Router
class automatically assigned methods of a VChannel class (*Note*: this is not
an inheritance but rather 1-to-1 relation)
See [./VChannel.html|VChannel] for more details about `VChannel`.
Device's ``type`` is defined in master ``device.yaml``. The system will load
appropriate modules for each device.
Details about keywords provided by modules could be found in document of each
module likes:
- [./router_mod_juniper.html|Juniper module]
- [./router_mod_cisco.html|Cisco module]
- [./router_mod_gr.html|GR module]
Keywords provides by above module could be executed through `Xrun` keyword
or directly called from ``Router``.
Examples:
| Router.`Switch` | vmx12 |
| Router.`Xrun` | Load Config |
| Router.`Load Config` | |
"""
ROBOT_LIBRARY_SCOPE = 'TEST SUITE'
ROBOT_LIBRARY_VERSION = Common.version()
def __init__(self):
folder = os.path.dirname(__file__)
sys.path.append(folder)
try:
self._vchannel = BuiltIn().get_library_instance('VChannel')
if self._vchannel is None:
raise Exception("Could not find an instance of VChannel. Need import VChannel first")
else:
keyword_list = inspect.getmembers(self._vchannel,inspect.ismethod)
for keyword,body in keyword_list:
if not keyword.startswith('_'):
setattr(self,keyword,body)
# sync the nanme with current VChannel instance
self._cur_name = self._vchannel._current_name
#
mod_list = glob.glob(Common.get_renat_path() + '/router_mod/*.py')
keyword_list = []
for item in mod_list:
# BuiltIn().log_to_console(item)
if item.startswith('_'): continue
mod_name = os.path.basename(item).replace('.py','')
# BuiltIn().log_to_console(mod_name)
mod = import_module('router_mod.' + mod_name)
cmd_list = inspect.getmembers(mod, inspect.isfunction)
for cmd,data in cmd_list:
if not cmd.startswith('_') and cmd not in keyword_list:
keyword_list.append(cmd)
# BuiltIn().log_to_console(' ' + cmd)
def gen_xrun(cmd):
def _xrun(self,*args,**kwargs):
return self.xrun(cmd,*args,**kwargs)
return _xrun
setattr(self,cmd,MethodType(gen_xrun(cmd),self))
except RobotNotRunningError as e:
Common.err("WARN: RENAT is not running")
def xrun(self,cmd,*args,**kwargs):
""" Runs the vendor independent keywords.
Parametes:
- ``cmd``: a keyword
- ``args``: other argumemts
Examples:
| Router.`Xrun` | Flap Interface | ge-0/0/0 |
This keyword will then actually calling the correspond keyword for the device type.
"""
channel = self.get_current_channel()
node = channel['node']
type_list = re.split(r'-|_', channel['type'])
mod_name = ''
type_list_length = len(type_list)
mod_cmd = cmd.lower().replace(' ','_')
# go back from detail mod to common mode
for i in range(0,type_list_length):
mod_name = '_'.join(type_list[0:type_list_length-i])
try:
mod = import_module('router_mod.'+ mod_name)
if hasattr(mod,mod_cmd):
break
except ImportError:
BuiltIn().log(" Could not find `%s`, try another one" % mod_name)
BuiltIn().log(" using `%s` mod for command `%s`" % (mod_name,cmd))
result = getattr(mod,mod_cmd)(self,*args,**kwargs)
return result
def follow_mib( self,node_list,wait_time='10s',interval_time='5s',\
len='12',percentile='80',threshold='75',max_len='300',factor = '1'):
""" Waits until all the nodes defined in ``node_list`` become ``stable``.
Stableness is checked by SNMP polling result. The MIB list is define by
``mib`` in ``node`` section
Parameter:
- ``wait_time(1)``: the time before the evaluation starting
- ``interval_time(2)``: interval between SNMP polling time
- ``threshold``: below this value is evaluated as ``stable``
- ``len(3)``: the size of the evaluation window (number of values that
are used in each valuation)
- ``percentile``: real useful percentage of data (ignore top
``100-percentile`` percent)
- ``max_len(4)``: maximum waiting ``lend`` for this checking
| time sequence: --(1)--|-(2)-|-----|-----|----|-----|-----|
| <--------(3)----------> poll poll
| <--------(3)---------->
| <---------------------(4)---------->
"""
time.sleep(DateTime.convert_time(wait_time))
interval = DateTime.convert_time(interval_time)
data = {}
for node in node_list:
device = Common.LOCAL['node'][node]['device']
type = Common.GLOBAL['device'][device]['type']
data[node] = {}
data[node]['ip'] = Common.GLOBAL['device'][device]['ip']
data[node]['community'] = Common.GLOBAL['snmp-template'][type]['community']
data[node]['mib-file'] = Common.mib_for_node(node)
f = open(data[node]['mib-file'])
data[node]['oid_list'] = json.load(f)['miblist']
f.close()
data[node]['poller'] = netsnmp.SNMPSession(data[node]['ip'], data[node]['community'])
data[node]['monitor'] = []
for i in range(int(len)):
for node in node_list:
for oid in data[node]['oid_list']:
try:
value = float(data[node]['poller'].get(oid['oid'])[0][2])
except:
value = 0.0
data[node]['monitor'].insert(0,value)
time.sleep(interval)
stable = False
count = 0
BuiltIn().log("Stable checking ...")
max_len_value = int(max_len)
while not stable and count < max_len_value:
stable = True
for node in node_list:
for oid in data[node]['oid_list']:
try:
value = float(data[node]['poller'].get(oid['oid'][0][2]))
except:
value = 0.0
data[node]['monitor'].insert(0,value)
data[node]['monitor'].pop()
stable = stable and Common.is_stable(data[node]['monitor'],float(threshold), int(percentile))
BuiltIn().log("node = %s stable = %s" % (node,stable))
BuiltIn().log(",".join(str(i) for i in data[node]['monitor']))
count += 1
time.sleep(interval)
if count < max_len_value:
BuiltIn().log("Stable checking normaly finished")
else:
BuiltIn().log("Stable chekcing forcely finsined")
|
<filename>Router.py<gh_stars>10-100
# -*- coding: utf-8 -*-
# Copyright 2017-2020 NTT Communications
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import netsnmp
import time
import sys,os,glob
import re
import csv
import json
import inspect
import Common
from VChannel import VChannel
from importlib import import_module
import robot.libraries.DateTime as DateTime
from robot.libraries.BuiltIn import BuiltIn
from robot.libraries.BuiltIn import RobotNotRunningError
from types import MethodType
# expose following keywords
# __all__ = ['switch','cmd','exec_file','snap','snap_diff','xrun','follow_mib']
class Router(object):
""" A class provides keywords for router control. An instance of Router
class automatically assigned methods of a VChannel class (*Note*: this is not
an inheritance but rather 1-to-1 relation)
See [./VChannel.html|VChannel] for more details about `VChannel`.
Device's ``type`` is defined in master ``device.yaml``. The system will load
appropriate modules for each device.
Details about keywords provided by modules could be found in document of each
module likes:
- [./router_mod_juniper.html|Juniper module]
- [./router_mod_cisco.html|Cisco module]
- [./router_mod_gr.html|GR module]
Keywords provides by above module could be executed through `Xrun` keyword
or directly called from ``Router``.
Examples:
| Router.`Switch` | vmx12 |
| Router.`Xrun` | Load Config |
| Router.`Load Config` | |
"""
ROBOT_LIBRARY_SCOPE = 'TEST SUITE'
ROBOT_LIBRARY_VERSION = Common.version()
def __init__(self):
folder = os.path.dirname(__file__)
sys.path.append(folder)
try:
self._vchannel = BuiltIn().get_library_instance('VChannel')
if self._vchannel is None:
raise Exception("Could not find an instance of VChannel. Need import VChannel first")
else:
keyword_list = inspect.getmembers(self._vchannel,inspect.ismethod)
for keyword,body in keyword_list:
if not keyword.startswith('_'):
setattr(self,keyword,body)
# sync the nanme with current VChannel instance
self._cur_name = self._vchannel._current_name
#
mod_list = glob.glob(Common.get_renat_path() + '/router_mod/*.py')
keyword_list = []
for item in mod_list:
# BuiltIn().log_to_console(item)
if item.startswith('_'): continue
mod_name = os.path.basename(item).replace('.py','')
# BuiltIn().log_to_console(mod_name)
mod = import_module('router_mod.' + mod_name)
cmd_list = inspect.getmembers(mod, inspect.isfunction)
for cmd,data in cmd_list:
if not cmd.startswith('_') and cmd not in keyword_list:
keyword_list.append(cmd)
# BuiltIn().log_to_console(' ' + cmd)
def gen_xrun(cmd):
def _xrun(self,*args,**kwargs):
return self.xrun(cmd,*args,**kwargs)
return _xrun
setattr(self,cmd,MethodType(gen_xrun(cmd),self))
except RobotNotRunningError as e:
Common.err("WARN: RENAT is not running")
def xrun(self,cmd,*args,**kwargs):
""" Runs the vendor independent keywords.
Parametes:
- ``cmd``: a keyword
- ``args``: other argumemts
Examples:
| Router.`Xrun` | Flap Interface | ge-0/0/0 |
This keyword will then actually calling the correspond keyword for the device type.
"""
channel = self.get_current_channel()
node = channel['node']
type_list = re.split(r'-|_', channel['type'])
mod_name = ''
type_list_length = len(type_list)
mod_cmd = cmd.lower().replace(' ','_')
# go back from detail mod to common mode
for i in range(0,type_list_length):
mod_name = '_'.join(type_list[0:type_list_length-i])
try:
mod = import_module('router_mod.'+ mod_name)
if hasattr(mod,mod_cmd):
break
except ImportError:
BuiltIn().log(" Could not find `%s`, try another one" % mod_name)
BuiltIn().log(" using `%s` mod for command `%s`" % (mod_name,cmd))
result = getattr(mod,mod_cmd)(self,*args,**kwargs)
return result
def follow_mib( self,node_list,wait_time='10s',interval_time='5s',\
len='12',percentile='80',threshold='75',max_len='300',factor = '1'):
""" Waits until all the nodes defined in ``node_list`` become ``stable``.
Stableness is checked by SNMP polling result. The MIB list is define by
``mib`` in ``node`` section
Parameter:
- ``wait_time(1)``: the time before the evaluation starting
- ``interval_time(2)``: interval between SNMP polling time
- ``threshold``: below this value is evaluated as ``stable``
- ``len(3)``: the size of the evaluation window (number of values that
are used in each valuation)
- ``percentile``: real useful percentage of data (ignore top
``100-percentile`` percent)
- ``max_len(4)``: maximum waiting ``lend`` for this checking
| time sequence: --(1)--|-(2)-|-----|-----|----|-----|-----|
| <--------(3)----------> poll poll
| <--------(3)---------->
| <---------------------(4)---------->
"""
time.sleep(DateTime.convert_time(wait_time))
interval = DateTime.convert_time(interval_time)
data = {}
for node in node_list:
device = Common.LOCAL['node'][node]['device']
type = Common.GLOBAL['device'][device]['type']
data[node] = {}
data[node]['ip'] = Common.GLOBAL['device'][device]['ip']
data[node]['community'] = Common.GLOBAL['snmp-template'][type]['community']
data[node]['mib-file'] = Common.mib_for_node(node)
f = open(data[node]['mib-file'])
data[node]['oid_list'] = json.load(f)['miblist']
f.close()
data[node]['poller'] = netsnmp.SNMPSession(data[node]['ip'], data[node]['community'])
data[node]['monitor'] = []
for i in range(int(len)):
for node in node_list:
for oid in data[node]['oid_list']:
try:
value = float(data[node]['poller'].get(oid['oid'])[0][2])
except:
value = 0.0
data[node]['monitor'].insert(0,value)
time.sleep(interval)
stable = False
count = 0
BuiltIn().log("Stable checking ...")
max_len_value = int(max_len)
while not stable and count < max_len_value:
stable = True
for node in node_list:
for oid in data[node]['oid_list']:
try:
value = float(data[node]['poller'].get(oid['oid'][0][2]))
except:
value = 0.0
data[node]['monitor'].insert(0,value)
data[node]['monitor'].pop()
stable = stable and Common.is_stable(data[node]['monitor'],float(threshold), int(percentile))
BuiltIn().log("node = %s stable = %s" % (node,stable))
BuiltIn().log(",".join(str(i) for i in data[node]['monitor']))
count += 1
time.sleep(interval)
if count < max_len_value:
BuiltIn().log("Stable checking normaly finished")
else:
BuiltIn().log("Stable chekcing forcely finsined")
|
en
| 0.703923
|
# -*- coding: utf-8 -*- # Copyright 2017-2020 NTT Communications # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # expose following keywords # __all__ = ['switch','cmd','exec_file','snap','snap_diff','xrun','follow_mib'] A class provides keywords for router control. An instance of Router class automatically assigned methods of a VChannel class (*Note*: this is not an inheritance but rather 1-to-1 relation) See [./VChannel.html|VChannel] for more details about `VChannel`. Device's ``type`` is defined in master ``device.yaml``. The system will load appropriate modules for each device. Details about keywords provided by modules could be found in document of each module likes: - [./router_mod_juniper.html|Juniper module] - [./router_mod_cisco.html|Cisco module] - [./router_mod_gr.html|GR module] Keywords provides by above module could be executed through `Xrun` keyword or directly called from ``Router``. Examples: | Router.`Switch` | vmx12 | | Router.`Xrun` | Load Config | | Router.`Load Config` | | # sync the nanme with current VChannel instance # # BuiltIn().log_to_console(item) # BuiltIn().log_to_console(mod_name) # BuiltIn().log_to_console(' ' + cmd) Runs the vendor independent keywords. Parametes: - ``cmd``: a keyword - ``args``: other argumemts Examples: | Router.`Xrun` | Flap Interface | ge-0/0/0 | This keyword will then actually calling the correspond keyword for the device type. # go back from detail mod to common mode Waits until all the nodes defined in ``node_list`` become ``stable``. Stableness is checked by SNMP polling result. The MIB list is define by ``mib`` in ``node`` section Parameter: - ``wait_time(1)``: the time before the evaluation starting - ``interval_time(2)``: interval between SNMP polling time - ``threshold``: below this value is evaluated as ``stable`` - ``len(3)``: the size of the evaluation window (number of values that are used in each valuation) - ``percentile``: real useful percentage of data (ignore top ``100-percentile`` percent) - ``max_len(4)``: maximum waiting ``lend`` for this checking | time sequence: --(1)--|-(2)-|-----|-----|----|-----|-----| | <--------(3)----------> poll poll | <--------(3)----------> | <---------------------(4)---------->
| 2.06172
| 2
|
Node_init.py
|
dugdmitry/adhoc_routing
| 26
|
6629626
|
<filename>Node_init.py<gh_stars>10-100
#!/usr/bin/python
"""
@package Node_init
Created on Sep 25, 2014
@author: <NAME>
This module is a starting point of the program. It performs two main operations - first, provides methods for correctly
daemonizing the application after start, second, provides the main initialization point for all supporting threads and
handlers, used by the the program, as well as the de-construction routine after killing the daemon.
"""
# Import necessary python modules from the standard library
import sys
import os
import time
import atexit
from signal import SIGINT, SIGTERM
# Import the necessary modules of the program
import RoutingManager
import DataHandler
import RouteTable
import Transport
# Get DEV name from the default configuration file
from conf import DEV, SET_TOPOLOGY_FLAG
# Import module for handling the logging
import routing_logging
# Default daemon parameters
## @var REDIRECT_TO
# This constant defines a string with an absolute path to the stdout file of the daemon.
# In case if the program crashes, the last crash output will be written in this file.
REDIRECT_TO = routing_logging.PATH_TO_LOGS + "crash_output.log"
## @var PIDFILE_PATH
# This constant defines a string with an absolute path to the daemon's pid file.
PIDFILE_PATH = "/var/run/routing_daemon.pid"
# Path to a topology configuration
## @var ABSOLUTE_PATH
# This constant is a string with an absolute path to the program's main directory.
ABSOLUTE_PATH = routing_logging.ABSOLUTE_PATH
## @var TOPOLOGY_PATH
# This constant is a string with an absolute path to the file with pre-defined network topology.
# This file will be used for incoming frames filtering if the "SET_TOPOLOGY_FLAG" in the conf.py
# configuration file will be set to True.
TOPOLOGY_PATH = ABSOLUTE_PATH + "/topology.conf"
# Set root logger
## @var ROUTING_LOG
# Contains a reference to routing_logging.LogWrapper object of the main root logger for writing
# the log messages of the main module.
ROUTING_LOG = routing_logging.create_routing_log("routing.log", "root")
## A class used for creating and managing the application daemon.
# A generic daemon class for starting the main running function by overriding the run() method.
class Daemon:
## Constructor
# @param self The object pointer.
# @param pidfile An absolute path to the pid file of the process.
# @param stdin Path for stdin forwarding
# @param stdout Path for stdout forwarding
# @param stderr Path for stderr forwarding
def __init__(self, pidfile, stdin="/dev/null", stdout=REDIRECT_TO, stderr=REDIRECT_TO):
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.pidfile = pidfile
# Erase all output from previous daemons
f = open(REDIRECT_TO, "w")
f.write("\n" + "-" * 100 + "\n")
f.close()
## Daemonize the process and do all the routine related to that
# @param self The object pointer.
def daemonize(self):
# Fork the process
try:
pid = os.fork()
if pid > 0:
# exit first parent
sys.exit(0)
except OSError, e:
sys.stderr.write("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# decouple from parent environment
os.chdir(ABSOLUTE_PATH)
os.setsid()
os.umask(0)
# do second fork
try:
pid = os.fork()
if pid > 0:
# exit from second parent
sys.exit(0)
except OSError, e:
sys.stderr.write("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# redirect standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
si = file(self.stdin, 'r')
so = file(self.stdout, 'a+')
# se = file(self.stderr, 'a+', 0)
se = file(self.stderr, 'a+')
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
# write pidfile
atexit.register(self.del_pid)
pid = str(os.getpid())
file(self.pidfile, 'w+').write("%s\n" % pid)
## Delete the pid file.
# @param self The object pointer.
def del_pid(self):
os.remove(self.pidfile)
## Start the daemon.
# @param self The object pointer.
def start(self):
# Check for a pidfile to see if the daemon already runs
try:
pf = file(self.pidfile, 'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if pid:
message = "pidfile %s already exist. Daemon already running?\n"
sys.stderr.write(message % self.pidfile)
sys.exit(1)
# Start the daemon
self.daemonize()
self.run()
## Stop the daemon
# @param self The object pointer.
def stop(self):
# Get the pid from the pidfile
try:
pf = file(self.pidfile, 'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if not pid:
message = "pidfile %s does not exist. Daemon not running?\n"
sys.stderr.write(message % self.pidfile)
return # not an error in a restart
# Try killing the daemon process
try:
# SIGINT needed for correctly quitting the threads (e.g., deleting the uds socket file) before killing
os.kill(pid, SIGINT)
time.sleep(0.1)
while 1:
os.kill(pid, SIGTERM)
time.sleep(0.1)
except OSError, err:
err = str(err)
if err.find("No such process") > 0:
if os.path.exists(self.pidfile):
os.remove(self.pidfile)
else:
print str(err)
sys.exit(1)
## Restart the daemon.
# @param self The object pointer.
def restart(self):
self.stop()
self.start()
## Default method for overriding by the child class which inherited the Daemon.
# It will be called after the process has been daemonized by start() or restart().
# @param self The object pointer.
def run(self):
pass
## Generic routing class.
# This is a generic routing class which initializes all the supporting classes, and runs the process in the main loop.
# It also catches the SIGINT signals from the daemon when the program shuts down.
class RoutingDaemon(Daemon):
## Main run method.
# @param self The object pointer.
def run(self):
# Initialize and start the log thread
routing_logging.init_log_thread()
ROUTING_LOG.info("Running the routing instance...")
# Get mac address of the network interface
node_mac = Transport.get_mac(DEV)
# Get a list of neighbors MAC addresses to be accepted (if the TOPOLOGY_FLAG is True).
topology_neighbors = self.get_topology_neighbors(node_mac)
# Creating a transport for communication with a virtual interface
app_transport = Transport.VirtualTransport()
# Creating a transport for communication with network physical interface
raw_transport = Transport.RawTransport(DEV, node_mac, topology_neighbors)
# Create a RouteTable object
table = RouteTable.Table(node_mac)
# Create data handler thread to process all incoming and outgoing messages
data_handler = DataHandler.DataHandler(app_transport, raw_transport, table)
# Creating thread for live configuration / interaction with the running program
uds_server = RoutingManager.Manager(table)
try:
# Start data handler thread
data_handler.run()
# Start uds_server thread
uds_server.start()
while True:
packet = app_transport.recv_from_app()
data_handler.app_handler.process_packet(packet)
# Catch SIGINT signal, raised by the daemon
except KeyboardInterrupt:
# Stop the handlers
data_handler.stop_threads()
# Stop UDS server
uds_server.quit()
# Stop the log thread
routing_logging.stop_log_thread()
return 0
## Get the topology neighbors of a given node from the topology_list.
# It is needed for correct filtering of the incoming frames from the raw socket.
# @param self The object pointer.
# @param node_mac The MAC address in a form "xx:xx:xx:xx:xx:xx" of the node's physical network interface used for
# communication.
def get_topology_neighbors(self, node_mac):
# Open a default topology file, if it exists
try:
f = open(TOPOLOGY_PATH, "r")
except IOError:
# No such file on this path
ROUTING_LOG.warning("Could not open default topology file!!!")
if SET_TOPOLOGY_FLAG:
ROUTING_LOG.warning("All incoming frames will be filtered out!!!")
return list()
data = f.read()[:-1]
entries = data.split("\n\n")
for ent in entries:
arr = ent.split("\n")
if arr[0] == node_mac:
neighbors = arr[1:]
return neighbors
# If nothing was found, return an empty list
return list()
if __name__ == "__main__":
## @var routing
# Main routing daemon object.
routing = RoutingDaemon(PIDFILE_PATH)
if len(sys.argv) == 2:
if 'start' == sys.argv[1]:
routing.start()
elif 'stop' == sys.argv[1]:
routing.stop()
elif 'restart' == sys.argv[1]:
routing.restart()
else:
print "Unknown command"
sys.exit(2)
sys.exit(0)
else:
print "usage: %s start|stop|restart" % sys.argv[0]
sys.exit(2)
|
<filename>Node_init.py<gh_stars>10-100
#!/usr/bin/python
"""
@package Node_init
Created on Sep 25, 2014
@author: <NAME>
This module is a starting point of the program. It performs two main operations - first, provides methods for correctly
daemonizing the application after start, second, provides the main initialization point for all supporting threads and
handlers, used by the the program, as well as the de-construction routine after killing the daemon.
"""
# Import necessary python modules from the standard library
import sys
import os
import time
import atexit
from signal import SIGINT, SIGTERM
# Import the necessary modules of the program
import RoutingManager
import DataHandler
import RouteTable
import Transport
# Get DEV name from the default configuration file
from conf import DEV, SET_TOPOLOGY_FLAG
# Import module for handling the logging
import routing_logging
# Default daemon parameters
## @var REDIRECT_TO
# This constant defines a string with an absolute path to the stdout file of the daemon.
# In case if the program crashes, the last crash output will be written in this file.
REDIRECT_TO = routing_logging.PATH_TO_LOGS + "crash_output.log"
## @var PIDFILE_PATH
# This constant defines a string with an absolute path to the daemon's pid file.
PIDFILE_PATH = "/var/run/routing_daemon.pid"
# Path to a topology configuration
## @var ABSOLUTE_PATH
# This constant is a string with an absolute path to the program's main directory.
ABSOLUTE_PATH = routing_logging.ABSOLUTE_PATH
## @var TOPOLOGY_PATH
# This constant is a string with an absolute path to the file with pre-defined network topology.
# This file will be used for incoming frames filtering if the "SET_TOPOLOGY_FLAG" in the conf.py
# configuration file will be set to True.
TOPOLOGY_PATH = ABSOLUTE_PATH + "/topology.conf"
# Set root logger
## @var ROUTING_LOG
# Contains a reference to routing_logging.LogWrapper object of the main root logger for writing
# the log messages of the main module.
ROUTING_LOG = routing_logging.create_routing_log("routing.log", "root")
## A class used for creating and managing the application daemon.
# A generic daemon class for starting the main running function by overriding the run() method.
class Daemon:
## Constructor
# @param self The object pointer.
# @param pidfile An absolute path to the pid file of the process.
# @param stdin Path for stdin forwarding
# @param stdout Path for stdout forwarding
# @param stderr Path for stderr forwarding
def __init__(self, pidfile, stdin="/dev/null", stdout=REDIRECT_TO, stderr=REDIRECT_TO):
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.pidfile = pidfile
# Erase all output from previous daemons
f = open(REDIRECT_TO, "w")
f.write("\n" + "-" * 100 + "\n")
f.close()
## Daemonize the process and do all the routine related to that
# @param self The object pointer.
def daemonize(self):
# Fork the process
try:
pid = os.fork()
if pid > 0:
# exit first parent
sys.exit(0)
except OSError, e:
sys.stderr.write("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# decouple from parent environment
os.chdir(ABSOLUTE_PATH)
os.setsid()
os.umask(0)
# do second fork
try:
pid = os.fork()
if pid > 0:
# exit from second parent
sys.exit(0)
except OSError, e:
sys.stderr.write("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# redirect standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
si = file(self.stdin, 'r')
so = file(self.stdout, 'a+')
# se = file(self.stderr, 'a+', 0)
se = file(self.stderr, 'a+')
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
# write pidfile
atexit.register(self.del_pid)
pid = str(os.getpid())
file(self.pidfile, 'w+').write("%s\n" % pid)
## Delete the pid file.
# @param self The object pointer.
def del_pid(self):
os.remove(self.pidfile)
## Start the daemon.
# @param self The object pointer.
def start(self):
# Check for a pidfile to see if the daemon already runs
try:
pf = file(self.pidfile, 'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if pid:
message = "pidfile %s already exist. Daemon already running?\n"
sys.stderr.write(message % self.pidfile)
sys.exit(1)
# Start the daemon
self.daemonize()
self.run()
## Stop the daemon
# @param self The object pointer.
def stop(self):
# Get the pid from the pidfile
try:
pf = file(self.pidfile, 'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if not pid:
message = "pidfile %s does not exist. Daemon not running?\n"
sys.stderr.write(message % self.pidfile)
return # not an error in a restart
# Try killing the daemon process
try:
# SIGINT needed for correctly quitting the threads (e.g., deleting the uds socket file) before killing
os.kill(pid, SIGINT)
time.sleep(0.1)
while 1:
os.kill(pid, SIGTERM)
time.sleep(0.1)
except OSError, err:
err = str(err)
if err.find("No such process") > 0:
if os.path.exists(self.pidfile):
os.remove(self.pidfile)
else:
print str(err)
sys.exit(1)
## Restart the daemon.
# @param self The object pointer.
def restart(self):
self.stop()
self.start()
## Default method for overriding by the child class which inherited the Daemon.
# It will be called after the process has been daemonized by start() or restart().
# @param self The object pointer.
def run(self):
pass
## Generic routing class.
# This is a generic routing class which initializes all the supporting classes, and runs the process in the main loop.
# It also catches the SIGINT signals from the daemon when the program shuts down.
class RoutingDaemon(Daemon):
## Main run method.
# @param self The object pointer.
def run(self):
# Initialize and start the log thread
routing_logging.init_log_thread()
ROUTING_LOG.info("Running the routing instance...")
# Get mac address of the network interface
node_mac = Transport.get_mac(DEV)
# Get a list of neighbors MAC addresses to be accepted (if the TOPOLOGY_FLAG is True).
topology_neighbors = self.get_topology_neighbors(node_mac)
# Creating a transport for communication with a virtual interface
app_transport = Transport.VirtualTransport()
# Creating a transport for communication with network physical interface
raw_transport = Transport.RawTransport(DEV, node_mac, topology_neighbors)
# Create a RouteTable object
table = RouteTable.Table(node_mac)
# Create data handler thread to process all incoming and outgoing messages
data_handler = DataHandler.DataHandler(app_transport, raw_transport, table)
# Creating thread for live configuration / interaction with the running program
uds_server = RoutingManager.Manager(table)
try:
# Start data handler thread
data_handler.run()
# Start uds_server thread
uds_server.start()
while True:
packet = app_transport.recv_from_app()
data_handler.app_handler.process_packet(packet)
# Catch SIGINT signal, raised by the daemon
except KeyboardInterrupt:
# Stop the handlers
data_handler.stop_threads()
# Stop UDS server
uds_server.quit()
# Stop the log thread
routing_logging.stop_log_thread()
return 0
## Get the topology neighbors of a given node from the topology_list.
# It is needed for correct filtering of the incoming frames from the raw socket.
# @param self The object pointer.
# @param node_mac The MAC address in a form "xx:xx:xx:xx:xx:xx" of the node's physical network interface used for
# communication.
def get_topology_neighbors(self, node_mac):
# Open a default topology file, if it exists
try:
f = open(TOPOLOGY_PATH, "r")
except IOError:
# No such file on this path
ROUTING_LOG.warning("Could not open default topology file!!!")
if SET_TOPOLOGY_FLAG:
ROUTING_LOG.warning("All incoming frames will be filtered out!!!")
return list()
data = f.read()[:-1]
entries = data.split("\n\n")
for ent in entries:
arr = ent.split("\n")
if arr[0] == node_mac:
neighbors = arr[1:]
return neighbors
# If nothing was found, return an empty list
return list()
if __name__ == "__main__":
## @var routing
# Main routing daemon object.
routing = RoutingDaemon(PIDFILE_PATH)
if len(sys.argv) == 2:
if 'start' == sys.argv[1]:
routing.start()
elif 'stop' == sys.argv[1]:
routing.stop()
elif 'restart' == sys.argv[1]:
routing.restart()
else:
print "Unknown command"
sys.exit(2)
sys.exit(0)
else:
print "usage: %s start|stop|restart" % sys.argv[0]
sys.exit(2)
|
en
| 0.737475
|
#!/usr/bin/python @package Node_init Created on Sep 25, 2014 @author: <NAME> This module is a starting point of the program. It performs two main operations - first, provides methods for correctly daemonizing the application after start, second, provides the main initialization point for all supporting threads and handlers, used by the the program, as well as the de-construction routine after killing the daemon. # Import necessary python modules from the standard library # Import the necessary modules of the program # Get DEV name from the default configuration file # Import module for handling the logging # Default daemon parameters ## @var REDIRECT_TO # This constant defines a string with an absolute path to the stdout file of the daemon. # In case if the program crashes, the last crash output will be written in this file. ## @var PIDFILE_PATH # This constant defines a string with an absolute path to the daemon's pid file. # Path to a topology configuration ## @var ABSOLUTE_PATH # This constant is a string with an absolute path to the program's main directory. ## @var TOPOLOGY_PATH # This constant is a string with an absolute path to the file with pre-defined network topology. # This file will be used for incoming frames filtering if the "SET_TOPOLOGY_FLAG" in the conf.py # configuration file will be set to True. # Set root logger ## @var ROUTING_LOG # Contains a reference to routing_logging.LogWrapper object of the main root logger for writing # the log messages of the main module. ## A class used for creating and managing the application daemon. # A generic daemon class for starting the main running function by overriding the run() method. ## Constructor # @param self The object pointer. # @param pidfile An absolute path to the pid file of the process. # @param stdin Path for stdin forwarding # @param stdout Path for stdout forwarding # @param stderr Path for stderr forwarding # Erase all output from previous daemons ## Daemonize the process and do all the routine related to that # @param self The object pointer. # Fork the process # exit first parent #1 failed: %d (%s)\n" % (e.errno, e.strerror)) # decouple from parent environment # do second fork # exit from second parent #2 failed: %d (%s)\n" % (e.errno, e.strerror)) # redirect standard file descriptors # se = file(self.stderr, 'a+', 0) # write pidfile ## Delete the pid file. # @param self The object pointer. ## Start the daemon. # @param self The object pointer. # Check for a pidfile to see if the daemon already runs # Start the daemon ## Stop the daemon # @param self The object pointer. # Get the pid from the pidfile # not an error in a restart # Try killing the daemon process # SIGINT needed for correctly quitting the threads (e.g., deleting the uds socket file) before killing ## Restart the daemon. # @param self The object pointer. ## Default method for overriding by the child class which inherited the Daemon. # It will be called after the process has been daemonized by start() or restart(). # @param self The object pointer. ## Generic routing class. # This is a generic routing class which initializes all the supporting classes, and runs the process in the main loop. # It also catches the SIGINT signals from the daemon when the program shuts down. ## Main run method. # @param self The object pointer. # Initialize and start the log thread # Get mac address of the network interface # Get a list of neighbors MAC addresses to be accepted (if the TOPOLOGY_FLAG is True). # Creating a transport for communication with a virtual interface # Creating a transport for communication with network physical interface # Create a RouteTable object # Create data handler thread to process all incoming and outgoing messages # Creating thread for live configuration / interaction with the running program # Start data handler thread # Start uds_server thread # Catch SIGINT signal, raised by the daemon # Stop the handlers # Stop UDS server # Stop the log thread ## Get the topology neighbors of a given node from the topology_list. # It is needed for correct filtering of the incoming frames from the raw socket. # @param self The object pointer. # @param node_mac The MAC address in a form "xx:xx:xx:xx:xx:xx" of the node's physical network interface used for # communication. # Open a default topology file, if it exists # No such file on this path # If nothing was found, return an empty list ## @var routing # Main routing daemon object.
| 2.491256
| 2
|
Scripts/core/native/animation/arb.py
|
velocist/TS4CheatsInfo
| 0
|
6629627
|
<reponame>velocist/TS4CheatsInfo<filename>Scripts/core/native/animation/arb.py
# uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Core\native\animation\arb.py
# Compiled at: 2017-08-04 23:38:31
# Size of source mod 2**32: 18313 bytes
from _animation import TRACK_NORMAL
from _math import Transform
from collections import namedtuple
import _math, api_config, enum
_get_next_tag_id = None
_get_current_tag_set = None
def set_tag_functions(get_id, get_set):
global _get_current_tag_set
global _get_next_tag_id
_get_next_tag_id = get_id
_get_current_tag_set = get_set
class BlockOnAnimationTag:
__slots__ = [
'tag']
def __init__(self):
self.tag = _get_next_tag_id()
def __enter__(self):
_get_current_tag_set().add(self.tag)
return self.tag
def __exit__(self, exc_type, exc_val, exc_tb):
_get_current_tag_set().remove(self.tag)
return False
def params_repr(params):
l = []
for key, value in params.items():
if isinstance(key, tuple):
key = key[1] + ':' + key[0]
l.append('{}={}'.format(key, value))
l.sort()
l = ', '.join(l)
l = '[' + l + ']'
return l
class ClipEventType(enum.Int, export=False):
Invalid = 0
Parent = 1
Unparent = 2
Sound = 3
Script = 4
Effect = 5
Visibility = 6
FootPlant = 7
CreateProp = 8
DestroyProp = 9
StopEffect = 10
BlockTransition = 11
Snap = 12
Reaction = 13
DoubleModifierSound = 14
DspInterval = 15
MaterialState = 16
GeometryState = 17
FocusCompatability = 18
SuppressLipSync = 19
Censor = 20
ServerSoundStart = 21
ServerSoundStop = 22
EnableFacialOverlay = 23
FadeObject = 24
AdvanceFlipBook = 3433208315
TimelineScript = 3466999690
ClientLocationCapture = 4151022839
ClientLocationRestore = 271806351
def event_types_match(a, b):
if a == b:
return True
if a == ClipEventType.Script or a == ClipEventType.TimelineScript:
if not b == ClipEventType.Script:
if b == ClipEventType.TimelineScript:
return True
return False
class ArbEventData:
__slots__ = [
'event_type', 'event_id', 'event_data', 'actors']
_no_actors = ()
def __init__(self, event_type, event_id, event_data, actors=None):
self.event_type = event_type
self.event_id = event_id
self.event_data = event_data
self.actors = actors or self._no_actors
class _ArbEventHandler:
__slots__ = [
'callback', 'event_type', 'event_id']
def __init__(self, callback, event_type, event_id):
self.callback = callback
self.event_type = event_type
self.event_id = event_id
try:
from _animation import ArbBase, EVENT_TIME_FROM_END, EVENT_TIME_FROM_START
from _animation import CENSOREVENT_STATE_OFF, CENSOREVENT_STATE_TORSO, CENSOREVENT_STATE_TORSOPELVIS, CENSOREVENT_STATE_PELVIS, CENSOREVENT_STATE_FULLBODY, CENSOREVENT_STATE_RHAND, CENSOREVENT_STATE_LHAND, CENSOREVENT_STATE_TODDLERPELVIS
except:
CENSOREVENT_STATE_OFF = 0
CENSOREVENT_STATE_TORSO = 1
CENSOREVENT_STATE_TORSOPELVIS = 2
CENSOREVENT_STATE_PELVIS = 3
CENSOREVENT_STATE_FULLBODY = 4
CENSOREVENT_STATE_RHAND = 5
CENSOREVENT_STATE_LHAND = 6
CENSOREVENT_STATE_TODDLERPELVIS = 7
class ArbBase:
def schedule(self, actor_id, controller, priority=10000, blend_in=-1.0, blend_out=-1.0):
pass
def _actors(self):
return []
def _events(self):
return []
def _get_boundary_conditions(self, actor_id):
pass
def _begin_synchronized_group(self):
pass
def _end_synchronized_group(self):
pass
def get_estimated_duration(self):
return 1.0
def _get_timing(self):
return (1.0, 1.0, 0.0)
def is_valid(self):
return True
def _add_custom_event(self, actor_id, base_time, time_in_secs, event_id, allow_create_stub=False):
return True
def _ends_in_looping_content(self, actor_id, min_track_id):
return False
class _ArbSyncGroup:
def __init__(self, arb):
self.arb = arb
def __enter__(self):
if self.arb._in_sync_group:
raise NotImplementedError('Starting a sync-group within another sync-group. Nesting is not supported.')
self.arb._begin_synchronized_group()
self.arb._in_sync_group = True
def __exit__(self, exc_type, exc_val, exc_tb):
if not self.arb._in_sync_group:
raise RuntimeError('Ending a sync-group while not within a sync-group.')
self.arb._end_synchronized_group()
self.arb._in_sync_group = False
return False
class _EventHandlerRecord(namedtuple('__EventHandlerRecord', ('clip_name', 'event_type', 'event_id', 'callbacks',
'event_data', 'tag', 'errors'))):
__slots__ = ()
RequiredSlot = namedtuple('RequiredSlot', ('actor_id', 'target_id', 'joint_hash'))
class BoundaryConditionInfo(namedtuple('_BoundaryConditionInfo', ('asm_name', 'params', 'actor_name', 'from_state',
'to_state'))):
def __str__(self):
params = params_repr(self.params)
return '{0.asm_name}: actor {0.actor_name} from {0.from_state} to {0.to_state} with parameters {1}'.format(self, params)
api_config.register_native_support('native.animation.arb.BoundaryConditionInfo')
class BoundaryCondition:
__slots__ = ('pre_condition_transform', 'post_condition_transform', 'pre_condition_reference_object_id',
'post_condition_reference_object_id', 'pre_condition_reference_joint_name_hash',
'post_condition_reference_joint_name_hash', 'required_slots', 'debug_info')
def __init__(self, pre_condition_reference_object_id, pre_condition_transform, pre_condition_reference_joint_name_hash, post_condition_reference_object_id, post_condition_transform, post_condition_reference_joint_name_hash, required_slots):
self.pre_condition_reference_object_id = pre_condition_reference_object_id
self.pre_condition_transform = pre_condition_transform
self.post_condition_reference_object_id = post_condition_reference_object_id
self.post_condition_transform = post_condition_transform
self.pre_condition_reference_joint_name_hash = pre_condition_reference_joint_name_hash
self.post_condition_reference_joint_name_hash = post_condition_reference_joint_name_hash
self.required_slots = required_slots
self.debug_info = None
def __repr__(self):
pre_condition_reference_object_id = 'None'
post_condition_reference_object_id = 'None'
if self.pre_condition_reference_object_id is not None:
pre_condition_reference_object_id = '0x{:x}'.format(self.pre_condition_reference_object_id)
if self.post_condition_reference_object_id is not None:
post_condition_reference_object_id = '0x{:x}'.format(self.post_condition_reference_object_id)
return '<BoundaryCondition {} {} {} {}>'.format(pre_condition_reference_object_id, self.pre_condition_transform if self.pre_condition_transform is not None else 'Indeterminate', post_condition_reference_object_id, self.post_condition_transform if self.post_condition_transform is not None else 'Indeterminate')
class NativeArb(ArbBase):
def __init__(self):
self._in_sync_group = False
self._handlers = []
self.unhandled_event_records = []
def get_boundary_conditions(self, actor):
boundaries = self._get_boundary_conditions(actor.id)
if not boundaries:
return
else:
pre_condition_reference_object_id, pre_condition_reference_joing_name_hash, pre_condition_surface_object_id, pre_condition_surface_joint_name_hash, pre_condition_surface_child_id, pre_condition_transform, post_condition_reference_object_id, post_condition_reference_joint_name_hash, post_condition_transform = boundaries
if pre_condition_surface_object_id == 0 or pre_condition_surface_child_id == 0:
required_slots = ()
else:
required_slots = (
(
pre_condition_surface_child_id, pre_condition_surface_object_id, pre_condition_surface_joint_name_hash),)
return BoundaryCondition(pre_condition_reference_object_id, pre_condition_transform, pre_condition_reference_joing_name_hash, post_condition_reference_object_id, post_condition_transform, post_condition_reference_joint_name_hash, required_slots)
def add_custom_event(self, actor_id, time_in_secs, event_id):
if time_in_secs >= 0:
base_time = EVENT_TIME_FROM_START
else:
base_time = EVENT_TIME_FROM_END
return self._add_custom_event(actor_id, base_time, abs(time_in_secs), event_id, False)
def ends_in_looping_content(self, actor_id, min_track_id=TRACK_NORMAL):
return self._ends_in_looping_content(actor_id, min_track_id)
def synchronized(self):
return _ArbSyncGroup(self)
def register_event_handler(self, handler_method, handler_type=None, handler_id=None):
self._handlers.append(_ArbEventHandler(handler_method, handler_type, handler_id))
def handle_events(self, events=None, event_context=None):
if events is None:
events = self._events()
event_records = []
actors = self._actors()
events = tuple(sorted(events, key=(lambda e: e[0] != ClipEventType.ClientLocationCapture)))
handlers_to_delete = []
for event_type, event_id, event_data in events:
applicable_handlers = [handler for handler in self._handlers if not handler.event_type is None if event_types_match(handler.event_type, event_type) if not handler.event_id == event_id if handler.event_id is None]
if applicable_handlers:
with BlockOnAnimationTag() as (tag):
errors = []
clip_name = event_data.get('clip_name', 'unknown clip')
callback_strings = [str(handler.callback) for handler in applicable_handlers]
event_records.append(_EventHandlerRecord(clip_name, event_type, event_id, callback_strings, event_data, tag, errors))
data = ArbEventData(event_type, event_id, event_data, actors)
for handler in applicable_handlers:
result = 'Exception raised.'
if event_context is not None:
with event_context:
result = handler.callback(data)
else:
result = handler.callback(data)
handlers_to_delete.append(handler)
if result is not None:
isinstance(result, str) or result or errors.append(result)
continue
for handler in handlers_to_delete:
if handler in self._handlers:
self._handlers.remove(handler)
return event_records
def get_timing(self):
return self._get_timing()
def append(self, arb, safe_mode=True, force_sync=False):
if self._append(arb, safe_mode=safe_mode, force_sync=force_sync):
self._handlers.extend(arb._handlers)
return True
return False
|
# uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Core\native\animation\arb.py
# Compiled at: 2017-08-04 23:38:31
# Size of source mod 2**32: 18313 bytes
from _animation import TRACK_NORMAL
from _math import Transform
from collections import namedtuple
import _math, api_config, enum
_get_next_tag_id = None
_get_current_tag_set = None
def set_tag_functions(get_id, get_set):
global _get_current_tag_set
global _get_next_tag_id
_get_next_tag_id = get_id
_get_current_tag_set = get_set
class BlockOnAnimationTag:
__slots__ = [
'tag']
def __init__(self):
self.tag = _get_next_tag_id()
def __enter__(self):
_get_current_tag_set().add(self.tag)
return self.tag
def __exit__(self, exc_type, exc_val, exc_tb):
_get_current_tag_set().remove(self.tag)
return False
def params_repr(params):
l = []
for key, value in params.items():
if isinstance(key, tuple):
key = key[1] + ':' + key[0]
l.append('{}={}'.format(key, value))
l.sort()
l = ', '.join(l)
l = '[' + l + ']'
return l
class ClipEventType(enum.Int, export=False):
Invalid = 0
Parent = 1
Unparent = 2
Sound = 3
Script = 4
Effect = 5
Visibility = 6
FootPlant = 7
CreateProp = 8
DestroyProp = 9
StopEffect = 10
BlockTransition = 11
Snap = 12
Reaction = 13
DoubleModifierSound = 14
DspInterval = 15
MaterialState = 16
GeometryState = 17
FocusCompatability = 18
SuppressLipSync = 19
Censor = 20
ServerSoundStart = 21
ServerSoundStop = 22
EnableFacialOverlay = 23
FadeObject = 24
AdvanceFlipBook = 3433208315
TimelineScript = 3466999690
ClientLocationCapture = 4151022839
ClientLocationRestore = 271806351
def event_types_match(a, b):
if a == b:
return True
if a == ClipEventType.Script or a == ClipEventType.TimelineScript:
if not b == ClipEventType.Script:
if b == ClipEventType.TimelineScript:
return True
return False
class ArbEventData:
__slots__ = [
'event_type', 'event_id', 'event_data', 'actors']
_no_actors = ()
def __init__(self, event_type, event_id, event_data, actors=None):
self.event_type = event_type
self.event_id = event_id
self.event_data = event_data
self.actors = actors or self._no_actors
class _ArbEventHandler:
__slots__ = [
'callback', 'event_type', 'event_id']
def __init__(self, callback, event_type, event_id):
self.callback = callback
self.event_type = event_type
self.event_id = event_id
try:
from _animation import ArbBase, EVENT_TIME_FROM_END, EVENT_TIME_FROM_START
from _animation import CENSOREVENT_STATE_OFF, CENSOREVENT_STATE_TORSO, CENSOREVENT_STATE_TORSOPELVIS, CENSOREVENT_STATE_PELVIS, CENSOREVENT_STATE_FULLBODY, CENSOREVENT_STATE_RHAND, CENSOREVENT_STATE_LHAND, CENSOREVENT_STATE_TODDLERPELVIS
except:
CENSOREVENT_STATE_OFF = 0
CENSOREVENT_STATE_TORSO = 1
CENSOREVENT_STATE_TORSOPELVIS = 2
CENSOREVENT_STATE_PELVIS = 3
CENSOREVENT_STATE_FULLBODY = 4
CENSOREVENT_STATE_RHAND = 5
CENSOREVENT_STATE_LHAND = 6
CENSOREVENT_STATE_TODDLERPELVIS = 7
class ArbBase:
def schedule(self, actor_id, controller, priority=10000, blend_in=-1.0, blend_out=-1.0):
pass
def _actors(self):
return []
def _events(self):
return []
def _get_boundary_conditions(self, actor_id):
pass
def _begin_synchronized_group(self):
pass
def _end_synchronized_group(self):
pass
def get_estimated_duration(self):
return 1.0
def _get_timing(self):
return (1.0, 1.0, 0.0)
def is_valid(self):
return True
def _add_custom_event(self, actor_id, base_time, time_in_secs, event_id, allow_create_stub=False):
return True
def _ends_in_looping_content(self, actor_id, min_track_id):
return False
class _ArbSyncGroup:
def __init__(self, arb):
self.arb = arb
def __enter__(self):
if self.arb._in_sync_group:
raise NotImplementedError('Starting a sync-group within another sync-group. Nesting is not supported.')
self.arb._begin_synchronized_group()
self.arb._in_sync_group = True
def __exit__(self, exc_type, exc_val, exc_tb):
if not self.arb._in_sync_group:
raise RuntimeError('Ending a sync-group while not within a sync-group.')
self.arb._end_synchronized_group()
self.arb._in_sync_group = False
return False
class _EventHandlerRecord(namedtuple('__EventHandlerRecord', ('clip_name', 'event_type', 'event_id', 'callbacks',
'event_data', 'tag', 'errors'))):
__slots__ = ()
RequiredSlot = namedtuple('RequiredSlot', ('actor_id', 'target_id', 'joint_hash'))
class BoundaryConditionInfo(namedtuple('_BoundaryConditionInfo', ('asm_name', 'params', 'actor_name', 'from_state',
'to_state'))):
def __str__(self):
params = params_repr(self.params)
return '{0.asm_name}: actor {0.actor_name} from {0.from_state} to {0.to_state} with parameters {1}'.format(self, params)
api_config.register_native_support('native.animation.arb.BoundaryConditionInfo')
class BoundaryCondition:
__slots__ = ('pre_condition_transform', 'post_condition_transform', 'pre_condition_reference_object_id',
'post_condition_reference_object_id', 'pre_condition_reference_joint_name_hash',
'post_condition_reference_joint_name_hash', 'required_slots', 'debug_info')
def __init__(self, pre_condition_reference_object_id, pre_condition_transform, pre_condition_reference_joint_name_hash, post_condition_reference_object_id, post_condition_transform, post_condition_reference_joint_name_hash, required_slots):
self.pre_condition_reference_object_id = pre_condition_reference_object_id
self.pre_condition_transform = pre_condition_transform
self.post_condition_reference_object_id = post_condition_reference_object_id
self.post_condition_transform = post_condition_transform
self.pre_condition_reference_joint_name_hash = pre_condition_reference_joint_name_hash
self.post_condition_reference_joint_name_hash = post_condition_reference_joint_name_hash
self.required_slots = required_slots
self.debug_info = None
def __repr__(self):
pre_condition_reference_object_id = 'None'
post_condition_reference_object_id = 'None'
if self.pre_condition_reference_object_id is not None:
pre_condition_reference_object_id = '0x{:x}'.format(self.pre_condition_reference_object_id)
if self.post_condition_reference_object_id is not None:
post_condition_reference_object_id = '0x{:x}'.format(self.post_condition_reference_object_id)
return '<BoundaryCondition {} {} {} {}>'.format(pre_condition_reference_object_id, self.pre_condition_transform if self.pre_condition_transform is not None else 'Indeterminate', post_condition_reference_object_id, self.post_condition_transform if self.post_condition_transform is not None else 'Indeterminate')
class NativeArb(ArbBase):
def __init__(self):
self._in_sync_group = False
self._handlers = []
self.unhandled_event_records = []
def get_boundary_conditions(self, actor):
boundaries = self._get_boundary_conditions(actor.id)
if not boundaries:
return
else:
pre_condition_reference_object_id, pre_condition_reference_joing_name_hash, pre_condition_surface_object_id, pre_condition_surface_joint_name_hash, pre_condition_surface_child_id, pre_condition_transform, post_condition_reference_object_id, post_condition_reference_joint_name_hash, post_condition_transform = boundaries
if pre_condition_surface_object_id == 0 or pre_condition_surface_child_id == 0:
required_slots = ()
else:
required_slots = (
(
pre_condition_surface_child_id, pre_condition_surface_object_id, pre_condition_surface_joint_name_hash),)
return BoundaryCondition(pre_condition_reference_object_id, pre_condition_transform, pre_condition_reference_joing_name_hash, post_condition_reference_object_id, post_condition_transform, post_condition_reference_joint_name_hash, required_slots)
def add_custom_event(self, actor_id, time_in_secs, event_id):
if time_in_secs >= 0:
base_time = EVENT_TIME_FROM_START
else:
base_time = EVENT_TIME_FROM_END
return self._add_custom_event(actor_id, base_time, abs(time_in_secs), event_id, False)
def ends_in_looping_content(self, actor_id, min_track_id=TRACK_NORMAL):
return self._ends_in_looping_content(actor_id, min_track_id)
def synchronized(self):
return _ArbSyncGroup(self)
def register_event_handler(self, handler_method, handler_type=None, handler_id=None):
self._handlers.append(_ArbEventHandler(handler_method, handler_type, handler_id))
def handle_events(self, events=None, event_context=None):
if events is None:
events = self._events()
event_records = []
actors = self._actors()
events = tuple(sorted(events, key=(lambda e: e[0] != ClipEventType.ClientLocationCapture)))
handlers_to_delete = []
for event_type, event_id, event_data in events:
applicable_handlers = [handler for handler in self._handlers if not handler.event_type is None if event_types_match(handler.event_type, event_type) if not handler.event_id == event_id if handler.event_id is None]
if applicable_handlers:
with BlockOnAnimationTag() as (tag):
errors = []
clip_name = event_data.get('clip_name', 'unknown clip')
callback_strings = [str(handler.callback) for handler in applicable_handlers]
event_records.append(_EventHandlerRecord(clip_name, event_type, event_id, callback_strings, event_data, tag, errors))
data = ArbEventData(event_type, event_id, event_data, actors)
for handler in applicable_handlers:
result = 'Exception raised.'
if event_context is not None:
with event_context:
result = handler.callback(data)
else:
result = handler.callback(data)
handlers_to_delete.append(handler)
if result is not None:
isinstance(result, str) or result or errors.append(result)
continue
for handler in handlers_to_delete:
if handler in self._handlers:
self._handlers.remove(handler)
return event_records
def get_timing(self):
return self._get_timing()
def append(self, arb, safe_mode=True, force_sync=False):
if self._append(arb, safe_mode=safe_mode, force_sync=force_sync):
self._handlers.extend(arb._handlers)
return True
return False
|
en
| 0.48796
|
# uncompyle6 version 3.7.4 # Python bytecode 3.7 (3394) # Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)] # Embedded file name: T:\InGame\Gameplay\Scripts\Core\native\animation\arb.py # Compiled at: 2017-08-04 23:38:31 # Size of source mod 2**32: 18313 bytes
| 1.915517
| 2
|
src/Practice/Algorithms/Greedy/Minimum Absolute Difference in an Array.py
|
neelkamath/hackerrank-solutions
| 0
|
6629628
|
<gh_stars>0
#!/bin/python3
import math
import os
import random
import re
import sys
def minimumAbsoluteDifference(arr):
arr.sort()
minimum = 1e9
for index, num in enumerate(arr[:len(arr) - 1]):
newMin = abs(num - arr[index + 1])
if newMin < minimum:
minimum = newMin
return minimum
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input())
arr = list(map(int, input().rstrip().split()))
result = minimumAbsoluteDifference(arr)
fptr.write(str(result) + '\n')
fptr.close()
|
#!/bin/python3
import math
import os
import random
import re
import sys
def minimumAbsoluteDifference(arr):
arr.sort()
minimum = 1e9
for index, num in enumerate(arr[:len(arr) - 1]):
newMin = abs(num - arr[index + 1])
if newMin < minimum:
minimum = newMin
return minimum
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input())
arr = list(map(int, input().rstrip().split()))
result = minimumAbsoluteDifference(arr)
fptr.write(str(result) + '\n')
fptr.close()
|
ru
| 0.16812
|
#!/bin/python3
| 3.089164
| 3
|
team/games/__init__.py
|
ReubenJ/ulti_stats
| 0
|
6629629
|
<filename>team/games/__init__.py
from .game import Game
from .point import Point
|
<filename>team/games/__init__.py
from .game import Game
from .point import Point
|
none
| 1
| 1.153423
| 1
|
|
tests/functional/Surfaces/Surface.py
|
jmikeowen/Spheral
| 22
|
6629630
|
<filename>tests/functional/Surfaces/Surface.py
#ATS:t1 = test( SELF, "--CRKSPH True --cfl 0.25 --clearDirectories True --steps=2 --nx=50 --ny=50 --checkAnswer=True --detectSurfaces=True --detectThreshold=0.99 --sweepAngle=pi/4.0 --detectRange=2.0", label="Surface Detection Test -- 2-D (serial)")
from math import *
import mpi
import os, sys, shutil
from Spheral2d import *
from SpheralTestUtilities import *
from findLastRestart import *
import SpheralPointmeshSiloDump
from GenerateNodeDistribution2d import *
title("Surface Detection Test")
class Rejecter(object):
def __init__(self,radius):
self.radius = radius
def __call__(self,x,y,m,H):
nX = []
nY = []
nM = []
nH = []
for i in xrange(len(x)):
ri = sqrt(x[i]*x[i]+y[i]*y[i])
if (ri > self.radius):
nX.append(x[i])
nY.append(y[i])
nM.append(m[i])
nH.append(H[i])
return nX,nY,nM,nH
class dSurface(object):
def __init__(self,nodes,db,Kern,Bf,Sf,hydro,file):
self.nodes = nodes
self.db = db
self.Kern = Kern
self.Bf = Bf
self.Sf = Sf
self.hydro = hydro
self.file = file
def __call__(self,cycle,time,dt):
#self.renormMat()
self.momentNorm()
def renormMat(self):
f = open(self.file, 'w')
f.write("i\tSi\txi\n")
self.db.updateConnectivityMap(True)
cm = self.db.connectivityMap()
for i in xrange(self.nodes.numInternalNodes):
xi = self.nodes.positions()[i]
Hi = self.nodes.Hfield()[i]
neighbors = cm.connectivityForNode(self.nodes, i)
Bi = Tensor.zero
Vi = self.hydro.volume()[0][i]
for j in neighbors[0]:
xj = self.nodes.positions()[j]
xij = xj-xi
Hj = self.nodes.Hfield()[j]
Vj = self.hydro.volume()[0][j] # this could be done better
gWj = Hj*xij.unitVector()*self.Kern.gradValue((Hj*xij).magnitude(),Hj.Determinant())
Bij = gWj.dyad(xij)*Vj
Bi += Bij
Bi = Bi.Inverse()
Ei = Bi.eigenValues()
Si = min(abs(Ei[0]),abs(Ei[1]))
f.write("%d\t%f\t%f\n" % (i,Si,xi.magnitude()))
def momentNorm(self):
f = open(self.file, 'w')
f.write("i\tSi\txi\tSSi\n")
for i in xrange(self.nodes.numInternalNodes):
xi = self.nodes.positions()[i]
m0i = self.hydro.m0()[0][i]
m1i = self.hydro.m1()[0][i]
f.write("%d\t%f\t%f\t%f\n" %(i,m0i,xi.magnitude(),m1i.magnitude()))
#-------------------------------------------------------------------------------
# Generic problem parameters
#-------------------------------------------------------------------------------
commandLine(lattice = True,
nx = 50,
ny = 50,
rmin = 0.0,
rmax = 1.0,
nPerh = 1.01,
rho0 = 1.0,
eps0 = 0.0,
gamma = 5.0/3.0,
mu = 1.0,
rhomin = 1.0e-8,
holeRadius = 0.5,
ASPH = False,
CRKSPH = True,
SPH = True,
XSPH = False,
filter = 0,
KernelConstructor = NBSplineKernel,
order = 7,
# Hydro
Qconstructor = MonaghanGingoldViscosity2d,
correctionOrder = LinearOrder,
Cl = 1.0,
Cq = 2.0,
Qlimiter = False,
balsaraCorrection = False,
epsilon2 = 1e-4,
negligibleSoundSpeed = 1e-5,
csMultiplier = 0.1,
hmin = 0.004,
hmax = 10.0,
hminratio = 0.1,
compatibleEnergy = False,
gradhCorrection = False,
HEvolution = IdealH,
sumForMassDensity = RigorousSumDensity,
densityUpdate = RigorousSumDensity,
HUpdate = IdealH,
linearInExpansion = False,
volumeType = RKVoronoiVolume,
# Timestep constraints
cfl = 0.5,
deltaPhi = 0.01,
domainIndependent = False,
# Integrator
IntegratorConstructor = CheapSynchronousRK2Integrator,
goalTime = 1.0,
dt = 0.0001,
dtMin = 1.0e-5,
dtMax = 1.0e5,
dtGrowth = 2.0,
maxSteps = None,
steps = None,
statsStep = 10,
redistributeStep = 500,
restartStep = 500,
restoreCycle = None,
smoothIters = 0,
rigorousBoundaries = True,
dtverbose = False,
vizCycle = 1,
vizTime = 1.0e5,
vizMethod = SpheralPointmeshSiloDump.dumpPhysicsState,
clearDirectories = False,
renormFile = "renorm.txt",
detectSurfaces = False,
detectRange = 2.0,
sweepAngle = pi/4.0,
detectThreshold = 0.99,
checkAnswer = False,
)
if CRKSPH:
Qconstructor = LimitedMonaghanGingoldViscosity2d
if ASPH:
HydroConstructor = ACRKSPHHydro
else:
HydroConstructor = CRKSPHHydro
else:
if ASPH:
HydroConstructor = ASPHHydro
else:
HydroConstructor = SPHHydro
dataDir = "surface-%i-%i" % (nx,ny)
dataDir = os.path.join(dataDir, "CRK=%s-nPerh=%f" % (CRKSPH,nPerh))
dataDir = os.path.join(dataDir, "Cl=%f-Cq=%f" % (Cl,Cq))
restartBaseName = "%s/SurfaceTest-%i-%i" % (dataDir,nx,ny)
vizDir = os.path.join(dataDir, "visit")
vizBaseName = "SurfaceTest"
#-------------------------------------------------------------------------------
# Check if the necessary output directories exist. If not, create them.
#-------------------------------------------------------------------------------
import os, sys
if mpi.rank == 0:
if clearDirectories and os.path.exists(dataDir):
shutil.rmtree(dataDir)
if not os.path.exists(dataDir):
os.makedirs(dataDir)
if not os.path.exists(vizDir):
os.makedirs(vizDir)
mpi.barrier()
#-------------------------------------------------------------------------------
# If we're restarting, find the set of most recent restart files.
#-------------------------------------------------------------------------------
if restoreCycle is None:
restoreCycle = findLastRestart(restartBaseName)
#-------------------------------------------------------------------------------
# Material properties.
#-------------------------------------------------------------------------------
eos = GammaLawGasMKS(gamma, mu)
#-------------------------------------------------------------------------------
# Interpolation kernels.
#-------------------------------------------------------------------------------
if KernelConstructor==NBSplineKernel:
Wbase = NBSplineKernel(order)
else:
Wbase = KernelConstructor()
WT = TableKernel(KernelConstructor(order), 1000)
WTPi = TableKernel(KernelConstructor(order), 1000)
output('WT')
output('WTPi')
kernelExtent = WT.kernelExtent
output("WT")
nodes1 = makeFluidNodeList("nodes1", eos,
hmin = hmin,
hmax = hmax,
nPerh = nPerh,
kernelExtent = kernelExtent,
rhoMin = rhomin)
#-------------------------------------------------------------------------------
# Set the node properties.
#-------------------------------------------------------------------------------
pos = nodes1.positions()
vel = nodes1.velocity()
mass = nodes1.mass()
eps = nodes1.specificThermalEnergy()
H = nodes1.Hfield()
if restoreCycle is None:
if lattice == True:
xmin = (-1.0, -1.0)
xmax = (1.0, 1.0)
myRejecter = Rejecter(holeRadius)
generator = GenerateNodeDistribution2d(nx,ny,rho0,"lattice",
rmin = rmin,
rmax = rmax,
xmin = xmin,
xmax = xmax,
theta = 2*pi,
nNodePerh = nPerh,
SPH = (not ASPH),
rejecter = myRejecter)
if mpi.procs > 1:
from VoronoiDistributeNodes import distribueNodes2d
else:
from DistributeNodes import distributeNodes2d
distributeNodes2d((nodes1,generator))
output("mpi.reduce(nodes1.numInternalNodes, mpi.MIN)")
output("mpi.reduce(nodes1.numInternalNodes, mpi.MAX)")
output("mpi.reduce(nodes1.numInternalNodes, mpi.SUM)")
for nodeID in xrange(nodes1.numInternalNodes):
eps[nodeID] = eps0
#-------------------------------------------------------------------------------
# Construct a DataBase to hold our node list
#-------------------------------------------------------------------------------
db = DataBase()
output("db")
output("db.appendNodeList(nodes1)")
output("db.numNodeLists")
output("db.numFluidNodeLists")
Bf = db.newFluidTensorFieldList(Tensor.zero, "Normalization")
Sf = db.newFluidScalarFieldList(0.0, "Surface")
#-------------------------------------------------------------------------------
# Construct the artificial viscosity.
#-------------------------------------------------------------------------------
q = Qconstructor(Cl, Cq, linearInExpansion)
q.epsilon2 = epsilon2
q.limiter = Qlimiter
q.balsaraShearCorrection = balsaraCorrection
output("q")
output("q.Cl")
output("q.Cq")
output("q.epsilon2")
output("q.limiter")
output("q.balsaraShearCorrection")
output("q.linearInExpansion")
output("q.quadraticInExpansion")
#-------------------------------------------------------------------------------
# Construct the hydro physics object.
#-------------------------------------------------------------------------------
if CRKSPH:
hydro = HydroConstructor(W = WT,
WPi = WTPi,
Q = q,
filter = filter,
cfl = cfl,
compatibleEnergyEvolution = compatibleEnergy,
XSPH = XSPH,
densityUpdate = densityUpdate,
correctionOrder = correctionOrder,
volumeType = volumeType,
HUpdate = HEvolution,
detectSurfaces = detectSurfaces,
detectThreshold = detectThreshold,
sweepAngle = sweepAngle,
detectRange = detectRange)
else:
hydro = HydroConstructor(W = WT,
Q = q,
cfl = cfl,
compatibleEnergyEvolution = compatibleEnergy,
evolveTotalEnergy = evolveTotalEnergy,
gradhCorrection = gradhCorrection,
correctVelocityGradient = correctVelocityGradient,
densityUpdate = densityUpdate,
XSPH = XSPH,
HUpdate = HEvolution)
output("hydro")
output("hydro.kernel()")
output("hydro.PiKernel()")
output("hydro.cfl")
output("hydro.compatibleEnergyEvolution")
output("hydro.XSPH")
output("hydro.densityUpdate")
output("hydro.HEvolution")
packages = [hydro]
#-------------------------------------------------------------------------------
# Construct the surface detection periodic work function
#-------------------------------------------------------------------------------
#ds = detectSurface(nodes1,db,WT,Bf,Sf,hydro,renormFile)
#dsFreq = 1
#-------------------------------------------------------------------------------
# Construct a time integrator, and add the one physics package.
#-------------------------------------------------------------------------------
integrator = IntegratorConstructor(db)
for p in packages:
integrator.appendPhysicsPackage(p)
integrator.lastDt = dt
if dtMin:
integrator.dtMin = dtMin
if dtMax:
integrator.dtMax = dtMax
integrator.dtGrowth = dtGrowth
output("integrator")
output("integrator.havePhysicsPackage(hydro)")
output("integrator.dtGrowth")
output("integrator.lastDt")
output("integrator.dtMin")
output("integrator.dtMax")
#-------------------------------------------------------------------------------
# Build the controller.
#-------------------------------------------------------------------------------
control = SpheralController(integrator, WT,
statsStep = statsStep,
restartStep = restartStep,
restartBaseName = restartBaseName,
restoreCycle = restoreCycle,
vizMethod = vizMethod,
vizBaseName = "surface-test-%ix%i" % (nx, ny),
vizDir = vizDir,
vizStep = vizCycle,
vizTime = vizTime,
SPH = (not ASPH))
output("control")
#control.appendPeriodicWork(ds,dsFreq)
#-------------------------------------------------------------------------------
# Finally run the problem and plot the results.
#-------------------------------------------------------------------------------
if not steps is None:
control.step(steps)
else:
control.advance(goalTime,maxSteps)
if checkAnswer:
sp = hydro.surfacePoint()
count = 0
for i in xrange(nodes1.numInternalNodes):
if sp[0][i] == 1:
count += 1
if not count == 212:
raise ValueError, "The surface detection algorithm failed!"
else:
print "Surface Detection PASSED."
|
<filename>tests/functional/Surfaces/Surface.py
#ATS:t1 = test( SELF, "--CRKSPH True --cfl 0.25 --clearDirectories True --steps=2 --nx=50 --ny=50 --checkAnswer=True --detectSurfaces=True --detectThreshold=0.99 --sweepAngle=pi/4.0 --detectRange=2.0", label="Surface Detection Test -- 2-D (serial)")
from math import *
import mpi
import os, sys, shutil
from Spheral2d import *
from SpheralTestUtilities import *
from findLastRestart import *
import SpheralPointmeshSiloDump
from GenerateNodeDistribution2d import *
title("Surface Detection Test")
class Rejecter(object):
def __init__(self,radius):
self.radius = radius
def __call__(self,x,y,m,H):
nX = []
nY = []
nM = []
nH = []
for i in xrange(len(x)):
ri = sqrt(x[i]*x[i]+y[i]*y[i])
if (ri > self.radius):
nX.append(x[i])
nY.append(y[i])
nM.append(m[i])
nH.append(H[i])
return nX,nY,nM,nH
class dSurface(object):
def __init__(self,nodes,db,Kern,Bf,Sf,hydro,file):
self.nodes = nodes
self.db = db
self.Kern = Kern
self.Bf = Bf
self.Sf = Sf
self.hydro = hydro
self.file = file
def __call__(self,cycle,time,dt):
#self.renormMat()
self.momentNorm()
def renormMat(self):
f = open(self.file, 'w')
f.write("i\tSi\txi\n")
self.db.updateConnectivityMap(True)
cm = self.db.connectivityMap()
for i in xrange(self.nodes.numInternalNodes):
xi = self.nodes.positions()[i]
Hi = self.nodes.Hfield()[i]
neighbors = cm.connectivityForNode(self.nodes, i)
Bi = Tensor.zero
Vi = self.hydro.volume()[0][i]
for j in neighbors[0]:
xj = self.nodes.positions()[j]
xij = xj-xi
Hj = self.nodes.Hfield()[j]
Vj = self.hydro.volume()[0][j] # this could be done better
gWj = Hj*xij.unitVector()*self.Kern.gradValue((Hj*xij).magnitude(),Hj.Determinant())
Bij = gWj.dyad(xij)*Vj
Bi += Bij
Bi = Bi.Inverse()
Ei = Bi.eigenValues()
Si = min(abs(Ei[0]),abs(Ei[1]))
f.write("%d\t%f\t%f\n" % (i,Si,xi.magnitude()))
def momentNorm(self):
f = open(self.file, 'w')
f.write("i\tSi\txi\tSSi\n")
for i in xrange(self.nodes.numInternalNodes):
xi = self.nodes.positions()[i]
m0i = self.hydro.m0()[0][i]
m1i = self.hydro.m1()[0][i]
f.write("%d\t%f\t%f\t%f\n" %(i,m0i,xi.magnitude(),m1i.magnitude()))
#-------------------------------------------------------------------------------
# Generic problem parameters
#-------------------------------------------------------------------------------
commandLine(lattice = True,
nx = 50,
ny = 50,
rmin = 0.0,
rmax = 1.0,
nPerh = 1.01,
rho0 = 1.0,
eps0 = 0.0,
gamma = 5.0/3.0,
mu = 1.0,
rhomin = 1.0e-8,
holeRadius = 0.5,
ASPH = False,
CRKSPH = True,
SPH = True,
XSPH = False,
filter = 0,
KernelConstructor = NBSplineKernel,
order = 7,
# Hydro
Qconstructor = MonaghanGingoldViscosity2d,
correctionOrder = LinearOrder,
Cl = 1.0,
Cq = 2.0,
Qlimiter = False,
balsaraCorrection = False,
epsilon2 = 1e-4,
negligibleSoundSpeed = 1e-5,
csMultiplier = 0.1,
hmin = 0.004,
hmax = 10.0,
hminratio = 0.1,
compatibleEnergy = False,
gradhCorrection = False,
HEvolution = IdealH,
sumForMassDensity = RigorousSumDensity,
densityUpdate = RigorousSumDensity,
HUpdate = IdealH,
linearInExpansion = False,
volumeType = RKVoronoiVolume,
# Timestep constraints
cfl = 0.5,
deltaPhi = 0.01,
domainIndependent = False,
# Integrator
IntegratorConstructor = CheapSynchronousRK2Integrator,
goalTime = 1.0,
dt = 0.0001,
dtMin = 1.0e-5,
dtMax = 1.0e5,
dtGrowth = 2.0,
maxSteps = None,
steps = None,
statsStep = 10,
redistributeStep = 500,
restartStep = 500,
restoreCycle = None,
smoothIters = 0,
rigorousBoundaries = True,
dtverbose = False,
vizCycle = 1,
vizTime = 1.0e5,
vizMethod = SpheralPointmeshSiloDump.dumpPhysicsState,
clearDirectories = False,
renormFile = "renorm.txt",
detectSurfaces = False,
detectRange = 2.0,
sweepAngle = pi/4.0,
detectThreshold = 0.99,
checkAnswer = False,
)
if CRKSPH:
Qconstructor = LimitedMonaghanGingoldViscosity2d
if ASPH:
HydroConstructor = ACRKSPHHydro
else:
HydroConstructor = CRKSPHHydro
else:
if ASPH:
HydroConstructor = ASPHHydro
else:
HydroConstructor = SPHHydro
dataDir = "surface-%i-%i" % (nx,ny)
dataDir = os.path.join(dataDir, "CRK=%s-nPerh=%f" % (CRKSPH,nPerh))
dataDir = os.path.join(dataDir, "Cl=%f-Cq=%f" % (Cl,Cq))
restartBaseName = "%s/SurfaceTest-%i-%i" % (dataDir,nx,ny)
vizDir = os.path.join(dataDir, "visit")
vizBaseName = "SurfaceTest"
#-------------------------------------------------------------------------------
# Check if the necessary output directories exist. If not, create them.
#-------------------------------------------------------------------------------
import os, sys
if mpi.rank == 0:
if clearDirectories and os.path.exists(dataDir):
shutil.rmtree(dataDir)
if not os.path.exists(dataDir):
os.makedirs(dataDir)
if not os.path.exists(vizDir):
os.makedirs(vizDir)
mpi.barrier()
#-------------------------------------------------------------------------------
# If we're restarting, find the set of most recent restart files.
#-------------------------------------------------------------------------------
if restoreCycle is None:
restoreCycle = findLastRestart(restartBaseName)
#-------------------------------------------------------------------------------
# Material properties.
#-------------------------------------------------------------------------------
eos = GammaLawGasMKS(gamma, mu)
#-------------------------------------------------------------------------------
# Interpolation kernels.
#-------------------------------------------------------------------------------
if KernelConstructor==NBSplineKernel:
Wbase = NBSplineKernel(order)
else:
Wbase = KernelConstructor()
WT = TableKernel(KernelConstructor(order), 1000)
WTPi = TableKernel(KernelConstructor(order), 1000)
output('WT')
output('WTPi')
kernelExtent = WT.kernelExtent
output("WT")
nodes1 = makeFluidNodeList("nodes1", eos,
hmin = hmin,
hmax = hmax,
nPerh = nPerh,
kernelExtent = kernelExtent,
rhoMin = rhomin)
#-------------------------------------------------------------------------------
# Set the node properties.
#-------------------------------------------------------------------------------
pos = nodes1.positions()
vel = nodes1.velocity()
mass = nodes1.mass()
eps = nodes1.specificThermalEnergy()
H = nodes1.Hfield()
if restoreCycle is None:
if lattice == True:
xmin = (-1.0, -1.0)
xmax = (1.0, 1.0)
myRejecter = Rejecter(holeRadius)
generator = GenerateNodeDistribution2d(nx,ny,rho0,"lattice",
rmin = rmin,
rmax = rmax,
xmin = xmin,
xmax = xmax,
theta = 2*pi,
nNodePerh = nPerh,
SPH = (not ASPH),
rejecter = myRejecter)
if mpi.procs > 1:
from VoronoiDistributeNodes import distribueNodes2d
else:
from DistributeNodes import distributeNodes2d
distributeNodes2d((nodes1,generator))
output("mpi.reduce(nodes1.numInternalNodes, mpi.MIN)")
output("mpi.reduce(nodes1.numInternalNodes, mpi.MAX)")
output("mpi.reduce(nodes1.numInternalNodes, mpi.SUM)")
for nodeID in xrange(nodes1.numInternalNodes):
eps[nodeID] = eps0
#-------------------------------------------------------------------------------
# Construct a DataBase to hold our node list
#-------------------------------------------------------------------------------
db = DataBase()
output("db")
output("db.appendNodeList(nodes1)")
output("db.numNodeLists")
output("db.numFluidNodeLists")
Bf = db.newFluidTensorFieldList(Tensor.zero, "Normalization")
Sf = db.newFluidScalarFieldList(0.0, "Surface")
#-------------------------------------------------------------------------------
# Construct the artificial viscosity.
#-------------------------------------------------------------------------------
q = Qconstructor(Cl, Cq, linearInExpansion)
q.epsilon2 = epsilon2
q.limiter = Qlimiter
q.balsaraShearCorrection = balsaraCorrection
output("q")
output("q.Cl")
output("q.Cq")
output("q.epsilon2")
output("q.limiter")
output("q.balsaraShearCorrection")
output("q.linearInExpansion")
output("q.quadraticInExpansion")
#-------------------------------------------------------------------------------
# Construct the hydro physics object.
#-------------------------------------------------------------------------------
if CRKSPH:
hydro = HydroConstructor(W = WT,
WPi = WTPi,
Q = q,
filter = filter,
cfl = cfl,
compatibleEnergyEvolution = compatibleEnergy,
XSPH = XSPH,
densityUpdate = densityUpdate,
correctionOrder = correctionOrder,
volumeType = volumeType,
HUpdate = HEvolution,
detectSurfaces = detectSurfaces,
detectThreshold = detectThreshold,
sweepAngle = sweepAngle,
detectRange = detectRange)
else:
hydro = HydroConstructor(W = WT,
Q = q,
cfl = cfl,
compatibleEnergyEvolution = compatibleEnergy,
evolveTotalEnergy = evolveTotalEnergy,
gradhCorrection = gradhCorrection,
correctVelocityGradient = correctVelocityGradient,
densityUpdate = densityUpdate,
XSPH = XSPH,
HUpdate = HEvolution)
output("hydro")
output("hydro.kernel()")
output("hydro.PiKernel()")
output("hydro.cfl")
output("hydro.compatibleEnergyEvolution")
output("hydro.XSPH")
output("hydro.densityUpdate")
output("hydro.HEvolution")
packages = [hydro]
#-------------------------------------------------------------------------------
# Construct the surface detection periodic work function
#-------------------------------------------------------------------------------
#ds = detectSurface(nodes1,db,WT,Bf,Sf,hydro,renormFile)
#dsFreq = 1
#-------------------------------------------------------------------------------
# Construct a time integrator, and add the one physics package.
#-------------------------------------------------------------------------------
integrator = IntegratorConstructor(db)
for p in packages:
integrator.appendPhysicsPackage(p)
integrator.lastDt = dt
if dtMin:
integrator.dtMin = dtMin
if dtMax:
integrator.dtMax = dtMax
integrator.dtGrowth = dtGrowth
output("integrator")
output("integrator.havePhysicsPackage(hydro)")
output("integrator.dtGrowth")
output("integrator.lastDt")
output("integrator.dtMin")
output("integrator.dtMax")
#-------------------------------------------------------------------------------
# Build the controller.
#-------------------------------------------------------------------------------
control = SpheralController(integrator, WT,
statsStep = statsStep,
restartStep = restartStep,
restartBaseName = restartBaseName,
restoreCycle = restoreCycle,
vizMethod = vizMethod,
vizBaseName = "surface-test-%ix%i" % (nx, ny),
vizDir = vizDir,
vizStep = vizCycle,
vizTime = vizTime,
SPH = (not ASPH))
output("control")
#control.appendPeriodicWork(ds,dsFreq)
#-------------------------------------------------------------------------------
# Finally run the problem and plot the results.
#-------------------------------------------------------------------------------
if not steps is None:
control.step(steps)
else:
control.advance(goalTime,maxSteps)
if checkAnswer:
sp = hydro.surfacePoint()
count = 0
for i in xrange(nodes1.numInternalNodes):
if sp[0][i] == 1:
count += 1
if not count == 212:
raise ValueError, "The surface detection algorithm failed!"
else:
print "Surface Detection PASSED."
|
en
| 0.135963
|
#ATS:t1 = test( SELF, "--CRKSPH True --cfl 0.25 --clearDirectories True --steps=2 --nx=50 --ny=50 --checkAnswer=True --detectSurfaces=True --detectThreshold=0.99 --sweepAngle=pi/4.0 --detectRange=2.0", label="Surface Detection Test -- 2-D (serial)") #self.renormMat() # this could be done better #------------------------------------------------------------------------------- # Generic problem parameters #------------------------------------------------------------------------------- # Hydro # Timestep constraints # Integrator #------------------------------------------------------------------------------- # Check if the necessary output directories exist. If not, create them. #------------------------------------------------------------------------------- #------------------------------------------------------------------------------- # If we're restarting, find the set of most recent restart files. #------------------------------------------------------------------------------- #------------------------------------------------------------------------------- # Material properties. #------------------------------------------------------------------------------- #------------------------------------------------------------------------------- # Interpolation kernels. #------------------------------------------------------------------------------- #------------------------------------------------------------------------------- # Set the node properties. #------------------------------------------------------------------------------- #------------------------------------------------------------------------------- # Construct a DataBase to hold our node list #------------------------------------------------------------------------------- #------------------------------------------------------------------------------- # Construct the artificial viscosity. #------------------------------------------------------------------------------- #------------------------------------------------------------------------------- # Construct the hydro physics object. #------------------------------------------------------------------------------- #------------------------------------------------------------------------------- # Construct the surface detection periodic work function #------------------------------------------------------------------------------- #ds = detectSurface(nodes1,db,WT,Bf,Sf,hydro,renormFile) #dsFreq = 1 #------------------------------------------------------------------------------- # Construct a time integrator, and add the one physics package. #------------------------------------------------------------------------------- #------------------------------------------------------------------------------- # Build the controller. #------------------------------------------------------------------------------- #control.appendPeriodicWork(ds,dsFreq) #------------------------------------------------------------------------------- # Finally run the problem and plot the results. #-------------------------------------------------------------------------------
| 2.26308
| 2
|
geni-lib/samples/userdata.py
|
AERPAW-Platform-Control/gateway
| 0
|
6629631
|
<reponame>AERPAW-Platform-Control/gateway
#!/usr/bin/env python
import geni.portal as portal
import geni.rspec.pg as rspec
import geni.rspec.igext as IG
import geni.rspec.emulab as emulab
from lxml import etree as ET
pc = portal.Context()
request = rspec.Request()
pc.defineParameter("param1", "dsc1", portal.ParameterType.INTEGER, 1)
pc.defineParameter("param2", "dsc2", portal.ParameterType.STRING, "value2")
params = pc.bindParameters()
ele2 = ET.Element("xmlstuff")
ET.SubElement(ele2, "evenmorexml")
node1 = IG.XenVM("node1")
iface1 = node1.addInterface("if1")
# Add user data to node1 in a single line
node1.UserData(emulab.UserDataSet({"data1":ele2, "data2":"val2"}))
link = rspec.Link("link")
link.addInterface(iface1)
# Add user data to link over several lines
linkdata = emulab.UserDataSet()
linkdata.addData("linkdata1", "val1")
linkdata.addData("linkdata2", "val2")
link.UserData(linkdata)
request.addResource(node1)
request.addResource(link)
pc.verifyParameters()
pc.printRequestRSpec(request)
|
#!/usr/bin/env python
import geni.portal as portal
import geni.rspec.pg as rspec
import geni.rspec.igext as IG
import geni.rspec.emulab as emulab
from lxml import etree as ET
pc = portal.Context()
request = rspec.Request()
pc.defineParameter("param1", "dsc1", portal.ParameterType.INTEGER, 1)
pc.defineParameter("param2", "dsc2", portal.ParameterType.STRING, "value2")
params = pc.bindParameters()
ele2 = ET.Element("xmlstuff")
ET.SubElement(ele2, "evenmorexml")
node1 = IG.XenVM("node1")
iface1 = node1.addInterface("if1")
# Add user data to node1 in a single line
node1.UserData(emulab.UserDataSet({"data1":ele2, "data2":"val2"}))
link = rspec.Link("link")
link.addInterface(iface1)
# Add user data to link over several lines
linkdata = emulab.UserDataSet()
linkdata.addData("linkdata1", "val1")
linkdata.addData("linkdata2", "val2")
link.UserData(linkdata)
request.addResource(node1)
request.addResource(link)
pc.verifyParameters()
pc.printRequestRSpec(request)
|
en
| 0.840745
|
#!/usr/bin/env python # Add user data to node1 in a single line # Add user data to link over several lines
| 2.253593
| 2
|
project/routes.py
|
DillonEnge/tack-board-api
| 0
|
6629632
|
<filename>project/routes.py
from project import views
def setup_routes(app):
@app.listener('before_server_start')
def init_routes(app, loop):
app.add_route(views.EventsView.as_view(), '/events')
app.add_route(views.TagsView.as_view(), '/tags')
app.add_route(views.ProfilesView.as_view(), '/profiles')
app.add_route(views.UsersView.as_view(), '/users')
app.add_route(views.PollsView.as_view(), '/polls')
app.add_route(views.GroupsView.as_view(), '/groups')
app.add_route(views.SelectionsView.as_view(), '/selections')
|
<filename>project/routes.py
from project import views
def setup_routes(app):
@app.listener('before_server_start')
def init_routes(app, loop):
app.add_route(views.EventsView.as_view(), '/events')
app.add_route(views.TagsView.as_view(), '/tags')
app.add_route(views.ProfilesView.as_view(), '/profiles')
app.add_route(views.UsersView.as_view(), '/users')
app.add_route(views.PollsView.as_view(), '/polls')
app.add_route(views.GroupsView.as_view(), '/groups')
app.add_route(views.SelectionsView.as_view(), '/selections')
|
none
| 1
| 2.244296
| 2
|
|
templates/tencent-flask/app.py
|
timqian/components
| 0
|
6629633
|
# -*- coding: utf8 -*-
import json
from flask import Flask, jsonify, request
app = Flask(__name__)
@app.route("/")
def index():
return "Hello Flask"
@app.route('/user', methods = ['POST'])
def addUser():
# we must get request body from clound function event;
event = request.environ['event']
user = json.loads(event['body'])
return jsonify(data=user)
@app.route("/user", methods = ['GET'])
def listUser():
users = [{'name': 'test1'}, {'name': 'test2'}]
return jsonify(data=users)
@app.route("/user/<id>", methods = ['GET'])
def getUser(id):
return jsonify(data={'name': 'test1'})
|
# -*- coding: utf8 -*-
import json
from flask import Flask, jsonify, request
app = Flask(__name__)
@app.route("/")
def index():
return "Hello Flask"
@app.route('/user', methods = ['POST'])
def addUser():
# we must get request body from clound function event;
event = request.environ['event']
user = json.loads(event['body'])
return jsonify(data=user)
@app.route("/user", methods = ['GET'])
def listUser():
users = [{'name': 'test1'}, {'name': 'test2'}]
return jsonify(data=users)
@app.route("/user/<id>", methods = ['GET'])
def getUser(id):
return jsonify(data={'name': 'test1'})
|
en
| 0.743448
|
# -*- coding: utf8 -*- # we must get request body from clound function event;
| 3.115031
| 3
|
elasticsearch/_async/client/fleet.py
|
Conky5/elasticsearch-py
| 4
|
6629634
|
<gh_stars>1-10
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from .utils import SKIP_IN_PATH, NamespacedClient, _make_path, query_params
class FleetClient(NamespacedClient):
@query_params("checkpoints", "timeout", "wait_for_advance", "wait_for_index")
async def global_checkpoints(self, index, params=None, headers=None):
"""
Returns the current global checkpoints for an index. This API is design for
internal use by the fleet server project.
.. warning::
This API is **experimental** so may include breaking changes
or be removed in a future version
:arg index: The name of the index.
:arg checkpoints: Comma separated list of checkpoints
:arg timeout: Timeout to wait for global checkpoint to advance
Default: 30s
:arg wait_for_advance: Whether to wait for the global checkpoint
to advance past the specified current checkpoints Default: false
:arg wait_for_index: Whether to wait for the target index to
exist and all primary shards be active Default: false
"""
if index in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'index'.")
return await self.transport.perform_request(
"GET",
_make_path(index, "_fleet", "global_checkpoints"),
params=params,
headers=headers,
)
|
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from .utils import SKIP_IN_PATH, NamespacedClient, _make_path, query_params
class FleetClient(NamespacedClient):
@query_params("checkpoints", "timeout", "wait_for_advance", "wait_for_index")
async def global_checkpoints(self, index, params=None, headers=None):
"""
Returns the current global checkpoints for an index. This API is design for
internal use by the fleet server project.
.. warning::
This API is **experimental** so may include breaking changes
or be removed in a future version
:arg index: The name of the index.
:arg checkpoints: Comma separated list of checkpoints
:arg timeout: Timeout to wait for global checkpoint to advance
Default: 30s
:arg wait_for_advance: Whether to wait for the global checkpoint
to advance past the specified current checkpoints Default: false
:arg wait_for_index: Whether to wait for the target index to
exist and all primary shards be active Default: false
"""
if index in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'index'.")
return await self.transport.perform_request(
"GET",
_make_path(index, "_fleet", "global_checkpoints"),
params=params,
headers=headers,
)
|
en
| 0.839862
|
# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. Returns the current global checkpoints for an index. This API is design for internal use by the fleet server project. .. warning:: This API is **experimental** so may include breaking changes or be removed in a future version :arg index: The name of the index. :arg checkpoints: Comma separated list of checkpoints :arg timeout: Timeout to wait for global checkpoint to advance Default: 30s :arg wait_for_advance: Whether to wait for the global checkpoint to advance past the specified current checkpoints Default: false :arg wait_for_index: Whether to wait for the target index to exist and all primary shards be active Default: false
| 1.948322
| 2
|
tests/GUI/test_scan_viewer.py
|
nhsx-mirror/skunkworks-ct-alignment-lesion-detection
| 2
|
6629635
|
<filename>tests/GUI/test_scan_viewer.py<gh_stars>1-10
import mock
import numpy as np
import pytest
import pyqtgraph as pg
import pyqtgraph.opengl as gl
from PySide2.QtWidgets import QVBoxLayout, QWidget, QSizePolicy
from ai_ct_scans.GUI import ScanViewer
from ai_ct_scans.GUI.scan_viewer import SCAN_COLOURS
class TestScanColours:
def test_is_list(self):
assert isinstance(SCAN_COLOURS, list)
def test_items_are_right_size(self):
for colour in SCAN_COLOURS:
assert len(colour) == 4
@pytest.fixture
def viewer_widget(qtbot):
window = ScanViewer()
qtbot.addWidget(window)
return window
class TestScanViewerUI:
def test_viewer_widget_is_qwidget(self):
assert issubclass(ScanViewer, QWidget)
def test_ui_size_policy(self, viewer_widget):
assert viewer_widget.sizePolicy() == QSizePolicy(
QSizePolicy.MinimumExpanding, QSizePolicy.MinimumExpanding
)
def test_set_layout(self, viewer_widget):
assert isinstance(viewer_widget.ui.layout, QVBoxLayout)
def test_initialise_gl_viewer_widget(self, viewer_widget):
assert isinstance(viewer_widget.ui.viewer, gl.GLViewWidget)
def test_gl_viewer_widget_added_to_layout(self, viewer_widget):
assert viewer_widget.ui.layout.itemAt(0).widget() == viewer_widget.ui.viewer
class TestScanViewer:
def test_volume_plot_initialised_to_none(self, viewer_widget):
assert viewer_widget.volume_plot is None
def test_viewer_data_initialised_to_none(self, viewer_widget):
assert viewer_widget.viewer_data is None
class TestSetData:
def test_set_data_initialises_volume_plot(self, viewer_widget):
data = {
"scan_1": np.ones((2, 3, 4), dtype=np.float64),
"scan_2": np.ones((2, 3, 4), dtype=np.float64) * 2,
}
assert viewer_widget.volume_plot is None
viewer_widget.set_data(data)
assert isinstance(viewer_widget.volume_plot, pg.opengl.GLVolumeItem)
def test_centre_plot_origin(self, viewer_widget):
# Unknown if this can be checked natively without patching.
with mock.patch.object(pg.opengl.GLVolumeItem, "translate") as mock_translate:
data = {
"scan_1": np.ones((2, 3, 4), dtype=np.float64),
"scan_2": np.ones((2, 3, 4), dtype=np.float64) * 2,
}
viewer_widget.set_data(data)
mock_translate.assert_called_with(-1, -1.5, -2)
def test_centre_plot_origin_different(self, viewer_widget):
# Unknown if this can be checked natively without patching.
with mock.patch.object(pg.opengl.GLVolumeItem, "translate") as mock_translate:
data = {
"scan_1": np.ones((2, 6, 10), dtype=np.float64),
"scan_2": np.ones((2, 6, 10), dtype=np.float64) * 2,
}
viewer_widget.set_data(data)
mock_translate.assert_called_with(-1, -3, -5)
@pytest.mark.skip("GL errors unresolved for itemsAt for region on viewer widget.")
def test_adds_item_to_volume_plot(self, viewer_widget):
data = {"scan_1": np.ones((2, 3, 4), dtype=np.float64)}
viewer_widget.set_data(data)
assert viewer_widget.ui.viewer.itemsAt(region=(0, 0, 1, 1)) == [
viewer_widget.volume_plot
]
class TestPreProcessData:
def test_returns_numpy_array(self):
data = {"scan_1": np.ones((2, 3, 4), dtype=np.float64)}
assert isinstance(ScanViewer.pre_process_data(data), np.ndarray)
def test_returns_correct_dimensions(self):
data = {"scan_1": np.ones((2, 3, 4), dtype=np.float64)}
# Shape should be x, y, z, RGBA
assert ScanViewer.pre_process_data(data).shape == (2, 3, 4, 4)
class TestDisplayData:
def test_sets_plot_data(self, viewer_widget):
# Unknown if this can be checked natively without patching.
with mock.patch.object(pg.opengl.GLVolumeItem, "setData") as mock_set_data:
data = {"scan_1": np.zeros((1, 2, 2), dtype=np.float64)}
viewer_widget.set_data(data)
viewer_widget.display_data(
slice_through_scan=False, show_navigation_slice=False
)
print(mock_set_data.call_args_list[1][0][0])
np.testing.assert_array_equal(
mock_set_data.call_args_list[1][0][0], viewer_widget.viewer_data
)
@pytest.mark.parametrize(
"slice, expected",
[
(0, [[[255, 255, 255, 64]]]),
(5, [[[255, 255, 255, 64]]]),
(10, [[[255, 255, 255, 64]]]),
],
ids=["test slice 0", "test slice 5", "test_slice 10"],
)
def test_sets_plot_data_with_plane(self, viewer_widget, slice, expected):
mock_set_data = mock.MagicMock()
viewer_widget.volume_plot = mock.MagicMock()
viewer_widget.volume_plot.setData = mock_set_data
viewer_widget.viewer_data = np.zeros(((15, 1, 1, 4)), dtype=np.float64)
viewer_widget.display_data(
slice, slice_through_scan=False, show_navigation_slice=True
)
np.testing.assert_array_equal(
mock_set_data.call_args_list[0][0][0][slice, :, :, :], expected
)
for other_slice in range(viewer_widget.viewer_data.shape[0]):
if other_slice != slice:
np.testing.assert_array_equal(
mock_set_data.call_args_list[0][0][0][other_slice, :, :, :],
[[[0, 0, 0, 0]]],
)
@pytest.mark.parametrize(
"slice, expected",
[
(0, [[[255, 255, 255, 64]]]),
(5, [[[255, 255, 255, 64]]]),
(10, [[[255, 255, 255, 64]]]),
],
ids=["test slice 0", "test slice 5", "test_slice 10"],
)
def test_sets_plot_data_with_plane_coronal(self, viewer_widget, slice, expected):
mock_set_data = mock.MagicMock()
viewer_widget.volume_plot = mock.MagicMock()
viewer_widget.volume_plot.setData = mock_set_data
viewer_widget.viewer_data = np.zeros(((1, 15, 1, 4)), dtype=np.float64)
viewer_widget.display_data(
slice, orientation=1, slice_through_scan=False, show_navigation_slice=True
)
np.testing.assert_array_equal(
mock_set_data.call_args_list[0][0][0][:, slice, :, :], expected
)
for other_slice in range(viewer_widget.viewer_data.shape[1]):
if other_slice != slice:
np.testing.assert_array_equal(
mock_set_data.call_args_list[0][0][0][:, other_slice, :, :],
[[[0, 0, 0, 0]]],
)
@pytest.mark.parametrize(
"slice, expected",
[
(0, [[[255, 255, 255, 64]]]),
(5, [[[255, 255, 255, 64]]]),
(10, [[[255, 255, 255, 64]]]),
],
ids=["test slice 0", "test slice 5", "test_slice 10"],
)
def test_sets_plot_data_with_plane_saggital(self, viewer_widget, slice, expected):
mock_set_data = mock.MagicMock()
viewer_widget.volume_plot = mock.MagicMock()
viewer_widget.volume_plot.setData = mock_set_data
viewer_widget.viewer_data = np.zeros(((1, 1, 15, 4)), dtype=np.float64)
viewer_widget.display_data(
slice, orientation=2, slice_through_scan=False, show_navigation_slice=True
)
np.testing.assert_array_equal(
mock_set_data.call_args_list[0][0][0][:, :, slice, :], expected
)
for other_slice in range(viewer_widget.viewer_data.shape[2]):
if other_slice != slice:
np.testing.assert_array_equal(
mock_set_data.call_args_list[0][0][0][:, :, other_slice, :],
[[[0, 0, 0, 0]]],
)
@pytest.mark.parametrize(
"slice", [(0), (5), (10)], ids=["test slice 0", "test slice 5", "test_slice 10"]
)
def test_slice_through_volume(self, viewer_widget, slice):
with mock.patch.object(pg.opengl.GLVolumeItem, "setData") as mock_set_data:
data = {"scan_1": np.zeros((15, 1, 1), dtype=np.float64)}
viewer_widget.set_data(data)
viewer_widget.display_data(
slice, slice_through_scan=True, show_navigation_slice=False
)
assert mock_set_data.call_args_list[1][0][0].shape[0] == 15 - slice
@pytest.mark.parametrize(
"slice", [(0), (5), (10)], ids=["test slice 0", "test slice 5", "test_slice 10"]
)
def test_slice_through_volume_coronal(self, viewer_widget, slice):
with mock.patch.object(pg.opengl.GLVolumeItem, "setData") as mock_set_data:
data = {"scan_1": np.zeros((1, 15, 1), dtype=np.float64)}
viewer_widget.set_data(data)
viewer_widget.display_data(
slice,
orientation=1,
slice_through_scan=True,
show_navigation_slice=False,
)
assert mock_set_data.call_args_list[1][0][0].shape[1] == 15 - slice
@pytest.mark.parametrize(
"slice", [(0), (5), (10)], ids=["test slice 0", "test slice 5", "test_slice 10"]
)
def test_slice_through_volume_saggital(self, viewer_widget, slice):
with mock.patch.object(pg.opengl.GLVolumeItem, "setData") as mock_set_data:
data = {"scan_1": np.zeros((1, 1, 15), dtype=np.float64)}
viewer_widget.set_data(data)
viewer_widget.display_data(
slice,
orientation=2,
slice_through_scan=True,
show_navigation_slice=False,
)
assert mock_set_data.call_args_list[1][0][0].shape[2] == 15 - slice
def test_does_not_set_plot_data(self, viewer_widget):
# Unknown if this can be checked natively without patching.
with mock.patch.object(pg.opengl.GLVolumeItem, "setData") as mock_set_data:
viewer_widget.viewer_data = None
viewer_widget.display_data()
assert mock_set_data.called is False
class TestConvertForViewer:
def test_returns_numpy_array(self):
data = np.ones((2, 3, 4), dtype=np.float64)
assert isinstance(ScanViewer.convert_for_viewer(data), np.ndarray)
def test_returns_correct_dimensions(self):
data = np.ones((2, 3, 4), dtype=np.float64)
# Shape should be x, y, z, RGBA
assert ScanViewer.convert_for_viewer(data).shape == (2, 3, 4, 4)
|
<filename>tests/GUI/test_scan_viewer.py<gh_stars>1-10
import mock
import numpy as np
import pytest
import pyqtgraph as pg
import pyqtgraph.opengl as gl
from PySide2.QtWidgets import QVBoxLayout, QWidget, QSizePolicy
from ai_ct_scans.GUI import ScanViewer
from ai_ct_scans.GUI.scan_viewer import SCAN_COLOURS
class TestScanColours:
def test_is_list(self):
assert isinstance(SCAN_COLOURS, list)
def test_items_are_right_size(self):
for colour in SCAN_COLOURS:
assert len(colour) == 4
@pytest.fixture
def viewer_widget(qtbot):
window = ScanViewer()
qtbot.addWidget(window)
return window
class TestScanViewerUI:
def test_viewer_widget_is_qwidget(self):
assert issubclass(ScanViewer, QWidget)
def test_ui_size_policy(self, viewer_widget):
assert viewer_widget.sizePolicy() == QSizePolicy(
QSizePolicy.MinimumExpanding, QSizePolicy.MinimumExpanding
)
def test_set_layout(self, viewer_widget):
assert isinstance(viewer_widget.ui.layout, QVBoxLayout)
def test_initialise_gl_viewer_widget(self, viewer_widget):
assert isinstance(viewer_widget.ui.viewer, gl.GLViewWidget)
def test_gl_viewer_widget_added_to_layout(self, viewer_widget):
assert viewer_widget.ui.layout.itemAt(0).widget() == viewer_widget.ui.viewer
class TestScanViewer:
def test_volume_plot_initialised_to_none(self, viewer_widget):
assert viewer_widget.volume_plot is None
def test_viewer_data_initialised_to_none(self, viewer_widget):
assert viewer_widget.viewer_data is None
class TestSetData:
def test_set_data_initialises_volume_plot(self, viewer_widget):
data = {
"scan_1": np.ones((2, 3, 4), dtype=np.float64),
"scan_2": np.ones((2, 3, 4), dtype=np.float64) * 2,
}
assert viewer_widget.volume_plot is None
viewer_widget.set_data(data)
assert isinstance(viewer_widget.volume_plot, pg.opengl.GLVolumeItem)
def test_centre_plot_origin(self, viewer_widget):
# Unknown if this can be checked natively without patching.
with mock.patch.object(pg.opengl.GLVolumeItem, "translate") as mock_translate:
data = {
"scan_1": np.ones((2, 3, 4), dtype=np.float64),
"scan_2": np.ones((2, 3, 4), dtype=np.float64) * 2,
}
viewer_widget.set_data(data)
mock_translate.assert_called_with(-1, -1.5, -2)
def test_centre_plot_origin_different(self, viewer_widget):
# Unknown if this can be checked natively without patching.
with mock.patch.object(pg.opengl.GLVolumeItem, "translate") as mock_translate:
data = {
"scan_1": np.ones((2, 6, 10), dtype=np.float64),
"scan_2": np.ones((2, 6, 10), dtype=np.float64) * 2,
}
viewer_widget.set_data(data)
mock_translate.assert_called_with(-1, -3, -5)
@pytest.mark.skip("GL errors unresolved for itemsAt for region on viewer widget.")
def test_adds_item_to_volume_plot(self, viewer_widget):
data = {"scan_1": np.ones((2, 3, 4), dtype=np.float64)}
viewer_widget.set_data(data)
assert viewer_widget.ui.viewer.itemsAt(region=(0, 0, 1, 1)) == [
viewer_widget.volume_plot
]
class TestPreProcessData:
def test_returns_numpy_array(self):
data = {"scan_1": np.ones((2, 3, 4), dtype=np.float64)}
assert isinstance(ScanViewer.pre_process_data(data), np.ndarray)
def test_returns_correct_dimensions(self):
data = {"scan_1": np.ones((2, 3, 4), dtype=np.float64)}
# Shape should be x, y, z, RGBA
assert ScanViewer.pre_process_data(data).shape == (2, 3, 4, 4)
class TestDisplayData:
def test_sets_plot_data(self, viewer_widget):
# Unknown if this can be checked natively without patching.
with mock.patch.object(pg.opengl.GLVolumeItem, "setData") as mock_set_data:
data = {"scan_1": np.zeros((1, 2, 2), dtype=np.float64)}
viewer_widget.set_data(data)
viewer_widget.display_data(
slice_through_scan=False, show_navigation_slice=False
)
print(mock_set_data.call_args_list[1][0][0])
np.testing.assert_array_equal(
mock_set_data.call_args_list[1][0][0], viewer_widget.viewer_data
)
@pytest.mark.parametrize(
"slice, expected",
[
(0, [[[255, 255, 255, 64]]]),
(5, [[[255, 255, 255, 64]]]),
(10, [[[255, 255, 255, 64]]]),
],
ids=["test slice 0", "test slice 5", "test_slice 10"],
)
def test_sets_plot_data_with_plane(self, viewer_widget, slice, expected):
mock_set_data = mock.MagicMock()
viewer_widget.volume_plot = mock.MagicMock()
viewer_widget.volume_plot.setData = mock_set_data
viewer_widget.viewer_data = np.zeros(((15, 1, 1, 4)), dtype=np.float64)
viewer_widget.display_data(
slice, slice_through_scan=False, show_navigation_slice=True
)
np.testing.assert_array_equal(
mock_set_data.call_args_list[0][0][0][slice, :, :, :], expected
)
for other_slice in range(viewer_widget.viewer_data.shape[0]):
if other_slice != slice:
np.testing.assert_array_equal(
mock_set_data.call_args_list[0][0][0][other_slice, :, :, :],
[[[0, 0, 0, 0]]],
)
@pytest.mark.parametrize(
"slice, expected",
[
(0, [[[255, 255, 255, 64]]]),
(5, [[[255, 255, 255, 64]]]),
(10, [[[255, 255, 255, 64]]]),
],
ids=["test slice 0", "test slice 5", "test_slice 10"],
)
def test_sets_plot_data_with_plane_coronal(self, viewer_widget, slice, expected):
mock_set_data = mock.MagicMock()
viewer_widget.volume_plot = mock.MagicMock()
viewer_widget.volume_plot.setData = mock_set_data
viewer_widget.viewer_data = np.zeros(((1, 15, 1, 4)), dtype=np.float64)
viewer_widget.display_data(
slice, orientation=1, slice_through_scan=False, show_navigation_slice=True
)
np.testing.assert_array_equal(
mock_set_data.call_args_list[0][0][0][:, slice, :, :], expected
)
for other_slice in range(viewer_widget.viewer_data.shape[1]):
if other_slice != slice:
np.testing.assert_array_equal(
mock_set_data.call_args_list[0][0][0][:, other_slice, :, :],
[[[0, 0, 0, 0]]],
)
@pytest.mark.parametrize(
"slice, expected",
[
(0, [[[255, 255, 255, 64]]]),
(5, [[[255, 255, 255, 64]]]),
(10, [[[255, 255, 255, 64]]]),
],
ids=["test slice 0", "test slice 5", "test_slice 10"],
)
def test_sets_plot_data_with_plane_saggital(self, viewer_widget, slice, expected):
mock_set_data = mock.MagicMock()
viewer_widget.volume_plot = mock.MagicMock()
viewer_widget.volume_plot.setData = mock_set_data
viewer_widget.viewer_data = np.zeros(((1, 1, 15, 4)), dtype=np.float64)
viewer_widget.display_data(
slice, orientation=2, slice_through_scan=False, show_navigation_slice=True
)
np.testing.assert_array_equal(
mock_set_data.call_args_list[0][0][0][:, :, slice, :], expected
)
for other_slice in range(viewer_widget.viewer_data.shape[2]):
if other_slice != slice:
np.testing.assert_array_equal(
mock_set_data.call_args_list[0][0][0][:, :, other_slice, :],
[[[0, 0, 0, 0]]],
)
@pytest.mark.parametrize(
"slice", [(0), (5), (10)], ids=["test slice 0", "test slice 5", "test_slice 10"]
)
def test_slice_through_volume(self, viewer_widget, slice):
with mock.patch.object(pg.opengl.GLVolumeItem, "setData") as mock_set_data:
data = {"scan_1": np.zeros((15, 1, 1), dtype=np.float64)}
viewer_widget.set_data(data)
viewer_widget.display_data(
slice, slice_through_scan=True, show_navigation_slice=False
)
assert mock_set_data.call_args_list[1][0][0].shape[0] == 15 - slice
@pytest.mark.parametrize(
"slice", [(0), (5), (10)], ids=["test slice 0", "test slice 5", "test_slice 10"]
)
def test_slice_through_volume_coronal(self, viewer_widget, slice):
with mock.patch.object(pg.opengl.GLVolumeItem, "setData") as mock_set_data:
data = {"scan_1": np.zeros((1, 15, 1), dtype=np.float64)}
viewer_widget.set_data(data)
viewer_widget.display_data(
slice,
orientation=1,
slice_through_scan=True,
show_navigation_slice=False,
)
assert mock_set_data.call_args_list[1][0][0].shape[1] == 15 - slice
@pytest.mark.parametrize(
"slice", [(0), (5), (10)], ids=["test slice 0", "test slice 5", "test_slice 10"]
)
def test_slice_through_volume_saggital(self, viewer_widget, slice):
with mock.patch.object(pg.opengl.GLVolumeItem, "setData") as mock_set_data:
data = {"scan_1": np.zeros((1, 1, 15), dtype=np.float64)}
viewer_widget.set_data(data)
viewer_widget.display_data(
slice,
orientation=2,
slice_through_scan=True,
show_navigation_slice=False,
)
assert mock_set_data.call_args_list[1][0][0].shape[2] == 15 - slice
def test_does_not_set_plot_data(self, viewer_widget):
# Unknown if this can be checked natively without patching.
with mock.patch.object(pg.opengl.GLVolumeItem, "setData") as mock_set_data:
viewer_widget.viewer_data = None
viewer_widget.display_data()
assert mock_set_data.called is False
class TestConvertForViewer:
def test_returns_numpy_array(self):
data = np.ones((2, 3, 4), dtype=np.float64)
assert isinstance(ScanViewer.convert_for_viewer(data), np.ndarray)
def test_returns_correct_dimensions(self):
data = np.ones((2, 3, 4), dtype=np.float64)
# Shape should be x, y, z, RGBA
assert ScanViewer.convert_for_viewer(data).shape == (2, 3, 4, 4)
|
en
| 0.770385
|
# Unknown if this can be checked natively without patching. # Unknown if this can be checked natively without patching. # Shape should be x, y, z, RGBA # Unknown if this can be checked natively without patching. # Unknown if this can be checked natively without patching. # Shape should be x, y, z, RGBA
| 2.118635
| 2
|
trseeker/tools/trs_groups.py
|
ad3002/Lyrebird
| 0
|
6629636
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
#@created: 07.09.2011
#@author: <NAME>
#@contact: <EMAIL>
'''
Functions related to TRs group analysis.
'''
from trseeker.tools.statistics import get_mean
def get_index(i):
''' Get next family index.'''
s = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
if i<len(s):
return s[i]
else:
return "X%s" % i
def get_popular(s):
''' Get most frequent element of list.'''
r = {}
for x in s:
r.setdefault(x, 0)
r[x] += 1
c = [(v, k) for k, v in r.items()]
c.sort(reverse=True)
return c[0][1]
def get_family_name(trf_objs, seen_units):
''' Get unit and letter for family.'''
# read units to pmatch
units = {}
for trf_obj in trf_objs:
units.setdefault(trf_obj.trf_period, [])
units[trf_obj.trf_period].append(trf_obj.trf_pmatch)
# find optimal unit
max_pmatch = 0
for u in units:
m = get_mean(units[u]) * len(units[u])
if m > max_pmatch:
max_pmatch = m
right_u = u
if m == max_pmatch:
right_u = min(u, right_u)
# change unit
if not right_u in seen_units:
seen_units[right_u] = 0
letter = get_index(seen_units[right_u])
else:
seen_units[right_u] += 1
letter = get_index(seen_units[right_u])
return right_u, letter, seen_units
def join_families_with_common(families):
''' Join families with common members.'''
n = len(families)
for i in xrange(0, n):
if not families[i]:
continue
for j in xrange(i + 1, n):
if not families[j]:
continue
if families[i].intersection(families[j]):
families[i] = families[i].union(families[j])
families[j] = None
families = [x for x in families if x]
for i in xrange(len(families)):
families[i] = list(families[i])
families.sort(key=lambda x: len(x), reverse=True)
return families
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
#@created: 07.09.2011
#@author: <NAME>
#@contact: <EMAIL>
'''
Functions related to TRs group analysis.
'''
from trseeker.tools.statistics import get_mean
def get_index(i):
''' Get next family index.'''
s = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
if i<len(s):
return s[i]
else:
return "X%s" % i
def get_popular(s):
''' Get most frequent element of list.'''
r = {}
for x in s:
r.setdefault(x, 0)
r[x] += 1
c = [(v, k) for k, v in r.items()]
c.sort(reverse=True)
return c[0][1]
def get_family_name(trf_objs, seen_units):
''' Get unit and letter for family.'''
# read units to pmatch
units = {}
for trf_obj in trf_objs:
units.setdefault(trf_obj.trf_period, [])
units[trf_obj.trf_period].append(trf_obj.trf_pmatch)
# find optimal unit
max_pmatch = 0
for u in units:
m = get_mean(units[u]) * len(units[u])
if m > max_pmatch:
max_pmatch = m
right_u = u
if m == max_pmatch:
right_u = min(u, right_u)
# change unit
if not right_u in seen_units:
seen_units[right_u] = 0
letter = get_index(seen_units[right_u])
else:
seen_units[right_u] += 1
letter = get_index(seen_units[right_u])
return right_u, letter, seen_units
def join_families_with_common(families):
''' Join families with common members.'''
n = len(families)
for i in xrange(0, n):
if not families[i]:
continue
for j in xrange(i + 1, n):
if not families[j]:
continue
if families[i].intersection(families[j]):
families[i] = families[i].union(families[j])
families[j] = None
families = [x for x in families if x]
for i in xrange(len(families)):
families[i] = list(families[i])
families.sort(key=lambda x: len(x), reverse=True)
return families
|
en
| 0.764794
|
#!/usr/bin/env python # -*- coding: utf-8 -*- # #@created: 07.09.2011 #@author: <NAME> #@contact: <EMAIL> Functions related to TRs group analysis. Get next family index. Get most frequent element of list. Get unit and letter for family. # read units to pmatch # find optimal unit # change unit Join families with common members.
| 3.554101
| 4
|
vkwave/api/methods/leads.py
|
XIDY-Dex/vkwave
| 222
|
6629637
|
from vkwave.types.responses import *
from ._category import Category
from ._utils import get_params
class Leads(Category):
async def check_user(
self,
lead_id: int,
return_raw_response: bool = False,
test_result: typing.Optional[int] = None,
test_mode: typing.Optional[bool] = None,
auto_start: typing.Optional[bool] = None,
age: typing.Optional[int] = None,
country: typing.Optional[str] = None,
) -> typing.Union[dict, LeadsCheckUserResponse]:
"""
:param lead_id: - Lead ID.
:param test_result: - Value to be return in 'result' field when test mode is used.
:param test_mode:
:param auto_start:
:param age: - User age.
:param country: - User country code.
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("checkUser", params)
if return_raw_response:
return raw_result
result = LeadsCheckUserResponse(**raw_result)
return result
async def complete(
self,
vk_sid: str,
secret: str,
return_raw_response: bool = False,
comment: typing.Optional[str] = None,
) -> typing.Union[dict, LeadsCompleteResponse]:
"""
:param vk_sid: - Session obtained as GET parameter when session started.
:param secret: - Secret key from the lead testing interface.
:param comment: - Comment text.
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("complete", params)
if return_raw_response:
return raw_result
result = LeadsCompleteResponse(**raw_result)
return result
async def get_stats(
self,
lead_id: int,
return_raw_response: bool = False,
secret: typing.Optional[str] = None,
date_start: typing.Optional[str] = None,
date_end: typing.Optional[str] = None,
) -> typing.Union[dict, LeadsGetStatsResponse]:
"""
:param lead_id: - Lead ID.
:param secret: - Secret key obtained from the lead testing interface.
:param date_start: - Day to start stats from (YYYY_MM_DD, e.g.2011-09-17).
:param date_end: - Day to finish stats (YYYY_MM_DD, e.g.2011-09-17).
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("getStats", params)
if return_raw_response:
return raw_result
result = LeadsGetStatsResponse(**raw_result)
return result
async def get_users(
self,
offer_id: int,
secret: str,
return_raw_response: bool = False,
offset: typing.Optional[int] = None,
count: typing.Optional[int] = None,
status: typing.Optional[int] = None,
reverse: typing.Optional[BaseBoolInt] = None,
) -> typing.Union[dict, LeadsGetUsersResponse]:
"""
:param offer_id: - Offer ID.
:param secret: - Secret key obtained in the lead testing interface.
:param offset: - Offset needed to return a specific subset of results.
:param count: - Number of results to return.
:param status: - Action type. Possible values: *'0' — start,, *'1' — finish,, *'2' — blocking users,, *'3' — start in a test mode,, *'4' — finish in a test mode.
:param reverse: - Sort order. Possible values: *'1' — chronological,, *'0' — reverse chronological.
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("getUsers", params)
if return_raw_response:
return raw_result
result = LeadsGetUsersResponse(**raw_result)
return result
async def metric_hit(
self,
data: str,
return_raw_response: bool = False,
) -> typing.Union[dict, LeadsMetricHitResponse]:
"""
:param data: - Metric data obtained in the lead interface.
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("metricHit", params)
if return_raw_response:
return raw_result
result = LeadsMetricHitResponse(**raw_result)
return result
async def start(
self,
lead_id: int,
secret: str,
return_raw_response: bool = False,
uid: typing.Optional[int] = None,
aid: typing.Optional[int] = None,
test_mode: typing.Optional[bool] = None,
force: typing.Optional[bool] = None,
) -> typing.Union[dict, LeadsStartResponse]:
"""
:param lead_id: - Lead ID.
:param secret: - Secret key from the lead testing interface.
:param uid:
:param aid:
:param test_mode:
:param force:
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("start", params)
if return_raw_response:
return raw_result
result = LeadsStartResponse(**raw_result)
return result
|
from vkwave.types.responses import *
from ._category import Category
from ._utils import get_params
class Leads(Category):
async def check_user(
self,
lead_id: int,
return_raw_response: bool = False,
test_result: typing.Optional[int] = None,
test_mode: typing.Optional[bool] = None,
auto_start: typing.Optional[bool] = None,
age: typing.Optional[int] = None,
country: typing.Optional[str] = None,
) -> typing.Union[dict, LeadsCheckUserResponse]:
"""
:param lead_id: - Lead ID.
:param test_result: - Value to be return in 'result' field when test mode is used.
:param test_mode:
:param auto_start:
:param age: - User age.
:param country: - User country code.
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("checkUser", params)
if return_raw_response:
return raw_result
result = LeadsCheckUserResponse(**raw_result)
return result
async def complete(
self,
vk_sid: str,
secret: str,
return_raw_response: bool = False,
comment: typing.Optional[str] = None,
) -> typing.Union[dict, LeadsCompleteResponse]:
"""
:param vk_sid: - Session obtained as GET parameter when session started.
:param secret: - Secret key from the lead testing interface.
:param comment: - Comment text.
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("complete", params)
if return_raw_response:
return raw_result
result = LeadsCompleteResponse(**raw_result)
return result
async def get_stats(
self,
lead_id: int,
return_raw_response: bool = False,
secret: typing.Optional[str] = None,
date_start: typing.Optional[str] = None,
date_end: typing.Optional[str] = None,
) -> typing.Union[dict, LeadsGetStatsResponse]:
"""
:param lead_id: - Lead ID.
:param secret: - Secret key obtained from the lead testing interface.
:param date_start: - Day to start stats from (YYYY_MM_DD, e.g.2011-09-17).
:param date_end: - Day to finish stats (YYYY_MM_DD, e.g.2011-09-17).
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("getStats", params)
if return_raw_response:
return raw_result
result = LeadsGetStatsResponse(**raw_result)
return result
async def get_users(
self,
offer_id: int,
secret: str,
return_raw_response: bool = False,
offset: typing.Optional[int] = None,
count: typing.Optional[int] = None,
status: typing.Optional[int] = None,
reverse: typing.Optional[BaseBoolInt] = None,
) -> typing.Union[dict, LeadsGetUsersResponse]:
"""
:param offer_id: - Offer ID.
:param secret: - Secret key obtained in the lead testing interface.
:param offset: - Offset needed to return a specific subset of results.
:param count: - Number of results to return.
:param status: - Action type. Possible values: *'0' — start,, *'1' — finish,, *'2' — blocking users,, *'3' — start in a test mode,, *'4' — finish in a test mode.
:param reverse: - Sort order. Possible values: *'1' — chronological,, *'0' — reverse chronological.
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("getUsers", params)
if return_raw_response:
return raw_result
result = LeadsGetUsersResponse(**raw_result)
return result
async def metric_hit(
self,
data: str,
return_raw_response: bool = False,
) -> typing.Union[dict, LeadsMetricHitResponse]:
"""
:param data: - Metric data obtained in the lead interface.
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("metricHit", params)
if return_raw_response:
return raw_result
result = LeadsMetricHitResponse(**raw_result)
return result
async def start(
self,
lead_id: int,
secret: str,
return_raw_response: bool = False,
uid: typing.Optional[int] = None,
aid: typing.Optional[int] = None,
test_mode: typing.Optional[bool] = None,
force: typing.Optional[bool] = None,
) -> typing.Union[dict, LeadsStartResponse]:
"""
:param lead_id: - Lead ID.
:param secret: - Secret key from the lead testing interface.
:param uid:
:param aid:
:param test_mode:
:param force:
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("start", params)
if return_raw_response:
return raw_result
result = LeadsStartResponse(**raw_result)
return result
|
en
| 0.58122
|
:param lead_id: - Lead ID. :param test_result: - Value to be return in 'result' field when test mode is used. :param test_mode: :param auto_start: :param age: - User age. :param country: - User country code. :param return_raw_response: - return result at dict :return: :param vk_sid: - Session obtained as GET parameter when session started. :param secret: - Secret key from the lead testing interface. :param comment: - Comment text. :param return_raw_response: - return result at dict :return: :param lead_id: - Lead ID. :param secret: - Secret key obtained from the lead testing interface. :param date_start: - Day to start stats from (YYYY_MM_DD, e.g.2011-09-17). :param date_end: - Day to finish stats (YYYY_MM_DD, e.g.2011-09-17). :param return_raw_response: - return result at dict :return: :param offer_id: - Offer ID. :param secret: - Secret key obtained in the lead testing interface. :param offset: - Offset needed to return a specific subset of results. :param count: - Number of results to return. :param status: - Action type. Possible values: *'0' — start,, *'1' — finish,, *'2' — blocking users,, *'3' — start in a test mode,, *'4' — finish in a test mode. :param reverse: - Sort order. Possible values: *'1' — chronological,, *'0' — reverse chronological. :param return_raw_response: - return result at dict :return: :param data: - Metric data obtained in the lead interface. :param return_raw_response: - return result at dict :return: :param lead_id: - Lead ID. :param secret: - Secret key from the lead testing interface. :param uid: :param aid: :param test_mode: :param force: :param return_raw_response: - return result at dict :return:
| 2.042422
| 2
|
p322_Coin_Change.py
|
bzhou26/leetcode_sol
| 0
|
6629638
|
'''
- Leetcode problem: 322
- Difficulty: Medium
- Brief problem description:
You are given coins of different denominations and a total amount of money amount. Write a function to compute the
fewest number of coins that you need to make up that amount. If that amount of money cannot be made up by any
combination of the coins, return -1.
Example 1:
Input: coins = [1, 2, 5], amount = 11
Output: 3
Explanation: 11 = 5 + 5 + 1
Example 2:
Input: coins = [2], amount = 3
Output: -1
Note:
You may assume that you have an infinite number of each kind of coin.
- Solution Summary:
Typical DP solution, for example, coins are [1, 2, 5], amount = 11
F(1) = -1
F(2) = -1
.
.
.
F(6) = min (F(6-1)+1, F(6-2)+1, F(6-5)+1)
.
.
.
F(11) = min (F(11-1)+1, F(11-2)+1, F(11-5)+1)
NOTE: Only calculate when F(n) != -1 because -1 means not possible.
- Used Resources:
--- <NAME>
'''
class Solution:
def coinChange(self, coins, amount) -> int:
minCoins = [0]
i = 1
while i <= amount:
minCoin = -1
for c in coins:
if i - c >= 0:
if minCoins[i - c] == -1:
continue
if minCoin != -1:
minCoin = min(minCoins[i - c] + 1, minCoin)
else:
minCoin = minCoins[i - c] + 1
minCoins.append(minCoin)
i += 1
return minCoins[amount]
if __name__ == "__main__":
solution = Solution()
testList = [186,419,83,408]
print(solution.coinChange(testList, 6249))
|
'''
- Leetcode problem: 322
- Difficulty: Medium
- Brief problem description:
You are given coins of different denominations and a total amount of money amount. Write a function to compute the
fewest number of coins that you need to make up that amount. If that amount of money cannot be made up by any
combination of the coins, return -1.
Example 1:
Input: coins = [1, 2, 5], amount = 11
Output: 3
Explanation: 11 = 5 + 5 + 1
Example 2:
Input: coins = [2], amount = 3
Output: -1
Note:
You may assume that you have an infinite number of each kind of coin.
- Solution Summary:
Typical DP solution, for example, coins are [1, 2, 5], amount = 11
F(1) = -1
F(2) = -1
.
.
.
F(6) = min (F(6-1)+1, F(6-2)+1, F(6-5)+1)
.
.
.
F(11) = min (F(11-1)+1, F(11-2)+1, F(11-5)+1)
NOTE: Only calculate when F(n) != -1 because -1 means not possible.
- Used Resources:
--- <NAME>
'''
class Solution:
def coinChange(self, coins, amount) -> int:
minCoins = [0]
i = 1
while i <= amount:
minCoin = -1
for c in coins:
if i - c >= 0:
if minCoins[i - c] == -1:
continue
if minCoin != -1:
minCoin = min(minCoins[i - c] + 1, minCoin)
else:
minCoin = minCoins[i - c] + 1
minCoins.append(minCoin)
i += 1
return minCoins[amount]
if __name__ == "__main__":
solution = Solution()
testList = [186,419,83,408]
print(solution.coinChange(testList, 6249))
|
en
| 0.789325
|
- Leetcode problem: 322 - Difficulty: Medium - Brief problem description: You are given coins of different denominations and a total amount of money amount. Write a function to compute the fewest number of coins that you need to make up that amount. If that amount of money cannot be made up by any combination of the coins, return -1. Example 1: Input: coins = [1, 2, 5], amount = 11 Output: 3 Explanation: 11 = 5 + 5 + 1 Example 2: Input: coins = [2], amount = 3 Output: -1 Note: You may assume that you have an infinite number of each kind of coin. - Solution Summary: Typical DP solution, for example, coins are [1, 2, 5], amount = 11 F(1) = -1 F(2) = -1 . . . F(6) = min (F(6-1)+1, F(6-2)+1, F(6-5)+1) . . . F(11) = min (F(11-1)+1, F(11-2)+1, F(11-5)+1) NOTE: Only calculate when F(n) != -1 because -1 means not possible. - Used Resources: --- <NAME>
| 3.673626
| 4
|
palettes/theme.py
|
dirmeier/palettes
| 1
|
6629639
|
<filename>palettes/theme.py
import matplotlib as mpl
def set_theme():
"""
Set a custom theme
Theme removes axis spines right and top and adds a grid
"""
mpl.rcParams["axes.linewidth"] = 1.5
mpl.rcParams["axes.spines.right"] = False
mpl.rcParams["axes.spines.top"] = False
mpl.rcParams["font.size"] = 15
mpl.rcParams["grid.color"] = "grey"
mpl.rcParams["grid.linewidth"] = 0.5
mpl.rcParams["legend.fontsize"] = "smaller"
mpl.rcParams["legend.loc"] = "center right"
mpl.rcParams["xtick.labelsize"] = "x-small"
mpl.rcParams["ytick.labelsize"] = "x-small"
mpl.rcParams["xaxis.labellocation"] = "right"
mpl.rcParams["yaxis.labellocation"] = "top"
|
<filename>palettes/theme.py
import matplotlib as mpl
def set_theme():
"""
Set a custom theme
Theme removes axis spines right and top and adds a grid
"""
mpl.rcParams["axes.linewidth"] = 1.5
mpl.rcParams["axes.spines.right"] = False
mpl.rcParams["axes.spines.top"] = False
mpl.rcParams["font.size"] = 15
mpl.rcParams["grid.color"] = "grey"
mpl.rcParams["grid.linewidth"] = 0.5
mpl.rcParams["legend.fontsize"] = "smaller"
mpl.rcParams["legend.loc"] = "center right"
mpl.rcParams["xtick.labelsize"] = "x-small"
mpl.rcParams["ytick.labelsize"] = "x-small"
mpl.rcParams["xaxis.labellocation"] = "right"
mpl.rcParams["yaxis.labellocation"] = "top"
|
en
| 0.540287
|
Set a custom theme Theme removes axis spines right and top and adds a grid
| 2.735384
| 3
|
src/DTW.py
|
Ishanh2000/CS698F
| 2
|
6629640
|
# AUM SHREEGANESHAAYA NAMAH|| AUM SHREEHANUMATE NAMAH|| AUM SHREEHANUMATE NAMAH||
import csv
def DTW(x = [], y = []):
""" Dynamic Time Warping on the two signals `x` and `y` """
M, N = len(x), len(y) # assume signals x and y have similar variance
if (M < 1) or (N < 1): return None # can do nothing
# Initialization
cost = [ [ None for j in range(N+1) ] for i in range(M+1) ]
for i in range(1, M+1): cost[i][0] = [ float('inf'), ((i-1), 0) ] # [cost, parent]
for j in range(1, N+1): cost[0][j] = [ float('inf'), (0, (j-1)) ] # [cost, parent]
cost[0][0] = [ 0, None ]
# Algorithm
for i in range(1, M+1):
for j in range(1, N+1):
add, parent = cost[i-1][j][0], ((i-1), j)
if (add > cost[i-1][j-1][0]): add, parent = cost[i-1][j-1][0], ((i-1), (j-1))
if (add > cost[i][j-1][0]): add, parent = cost[i][j-1][0], (i, (j-1))
cost[i][j] = [ ((y[j-1] - x[i-1])**2 + add), parent ]
totalCost = cost[M][N][0]
alignment = [ (M, N) ]
while True:
i, j = alignment[-1]
parent = cost[i][j][1]
if parent == None: break
alignment.append(parent)
alignment.reverse()
alignment = [ (a[0]-1, a[1]-1) for a in alignment[1:]]
return totalCost, alignment
def DTW_from_file(pPath = "", qPath = ""):
""" Assume `pPath` and `qPath` are valid absolute paths for now. """
p, q = [], []
with open(pPath, "r") as pf:
csv_reader = list(csv.reader(pf))[1:] # ignore first row (headers)
for row in csv_reader:
p.append([ float(v) for v in row ])
with open(qPath, "r") as qf:
csv_reader = list(csv.reader(qf))[1:] # ignore first row (headers)
for row in csv_reader:
q.append([ float(v) for v in row ])
# interploation of p and q may be required - see screenshot and ask Prof.
scores = {
"ax" : DTW([entry[1] for entry in p], [entry[1] for entry in q]),
"ay" : DTW([entry[2] for entry in p], [entry[2] for entry in q]),
"az" : DTW([entry[3] for entry in p], [entry[3] for entry in q]),
"aT" : DTW([entry[4] for entry in p], [entry[4] for entry in q]),
}
return scores
|
# AUM SHREEGANESHAAYA NAMAH|| AUM SHREEHANUMATE NAMAH|| AUM SHREEHANUMATE NAMAH||
import csv
def DTW(x = [], y = []):
""" Dynamic Time Warping on the two signals `x` and `y` """
M, N = len(x), len(y) # assume signals x and y have similar variance
if (M < 1) or (N < 1): return None # can do nothing
# Initialization
cost = [ [ None for j in range(N+1) ] for i in range(M+1) ]
for i in range(1, M+1): cost[i][0] = [ float('inf'), ((i-1), 0) ] # [cost, parent]
for j in range(1, N+1): cost[0][j] = [ float('inf'), (0, (j-1)) ] # [cost, parent]
cost[0][0] = [ 0, None ]
# Algorithm
for i in range(1, M+1):
for j in range(1, N+1):
add, parent = cost[i-1][j][0], ((i-1), j)
if (add > cost[i-1][j-1][0]): add, parent = cost[i-1][j-1][0], ((i-1), (j-1))
if (add > cost[i][j-1][0]): add, parent = cost[i][j-1][0], (i, (j-1))
cost[i][j] = [ ((y[j-1] - x[i-1])**2 + add), parent ]
totalCost = cost[M][N][0]
alignment = [ (M, N) ]
while True:
i, j = alignment[-1]
parent = cost[i][j][1]
if parent == None: break
alignment.append(parent)
alignment.reverse()
alignment = [ (a[0]-1, a[1]-1) for a in alignment[1:]]
return totalCost, alignment
def DTW_from_file(pPath = "", qPath = ""):
""" Assume `pPath` and `qPath` are valid absolute paths for now. """
p, q = [], []
with open(pPath, "r") as pf:
csv_reader = list(csv.reader(pf))[1:] # ignore first row (headers)
for row in csv_reader:
p.append([ float(v) for v in row ])
with open(qPath, "r") as qf:
csv_reader = list(csv.reader(qf))[1:] # ignore first row (headers)
for row in csv_reader:
q.append([ float(v) for v in row ])
# interploation of p and q may be required - see screenshot and ask Prof.
scores = {
"ax" : DTW([entry[1] for entry in p], [entry[1] for entry in q]),
"ay" : DTW([entry[2] for entry in p], [entry[2] for entry in q]),
"az" : DTW([entry[3] for entry in p], [entry[3] for entry in q]),
"aT" : DTW([entry[4] for entry in p], [entry[4] for entry in q]),
}
return scores
|
en
| 0.737106
|
# AUM SHREEGANESHAAYA NAMAH|| AUM SHREEHANUMATE NAMAH|| AUM SHREEHANUMATE NAMAH|| Dynamic Time Warping on the two signals `x` and `y` # assume signals x and y have similar variance # can do nothing # Initialization # [cost, parent] # [cost, parent] # Algorithm Assume `pPath` and `qPath` are valid absolute paths for now. # ignore first row (headers) # ignore first row (headers) # interploation of p and q may be required - see screenshot and ask Prof.
| 2.850636
| 3
|
biobb_structure_utils/utils/extract_molecule.py
|
bioexcel/biobb_structure_utils
| 1
|
6629641
|
<reponame>bioexcel/biobb_structure_utils
#!/usr/bin/env python3
"""Module containing the ExtractMolecule class and the command line interface."""
import argparse
from biobb_common.configuration import settings
from biobb_common.tools import file_utils as fu
from biobb_common.tools.file_utils import launchlogger
from biobb_common.command_wrapper import cmd_wrapper
from biobb_structure_utils.utils.common import *
class ExtractMolecule():
"""
| biobb_structure_utils ExtractMolecule
| This class is a wrapper of the Structure Checking tool to extract a molecule from a 3D structure.
| Wrapper for the `Structure Checking <https://github.com/bioexcel/biobb_structure_checking>`_ tool to extract a molecule from a 3D structure.
Args:
input_structure_path (str): Input structure file path. File type: input. `Sample file <https://github.com/bioexcel/biobb_structure_utils/raw/master/biobb_structure_utils/test/data/utils/extract_molecule.pdb>`_. Accepted formats: pdb (edam:format_1476).
output_molecule_path (str): Output molecule file path. File type: output. `Sample file <https://github.com/bioexcel/biobb_structure_utils/raw/master/biobb_structure_utils/test/reference/utils/ref_extract_molecule.pdb>`_. Accepted formats: pdb (edam:format_1476).
properties (dic - Python dictionary object containing the tool parameters, not input/output files):
* **molecule_type** (*string*) - ("all") type of molecule to be extracted. If all, only waters and ligands will be removed from the original structure. Values: all, protein, na, dna, rna, chains.
* **chains** (*list*) - (None) if chains selected in **molecule_type**, specify them here, e.g: ["A", "C", "N"].
* **check_structure_path** (*string*) - ("check_structure") path to the check_structure application
* **remove_tmp** (*bool*) - (True) [WF property] Remove temporal files.
* **restart** (*bool*) - (False) [WF property] Do not execute if output files exist.
Examples:
This is a use example of how to use the building block from Python::
from biobb_structure_utils.utils.extract_molecule import extract_molecule
prop = {
'molecule_type': 'chains',
'chains': ['A', 'N', 'F']
}
extract_molecule(input_structure_path='/path/to/myStructure.pdb,
output_molecule_path='/path/to/newMolecule.pdb',
properties=prop)
Info:
* wrapped_software:
* name: Structure Checking from MDWeb
* version: >=3.0.3
* license: Apache-2.0
* ontology:
* name: EDAM
* schema: http://edamontology.org/EDAM.owl
"""
def __init__(self, input_structure_path, output_molecule_path,
properties=None, **kwargs) -> None:
properties = properties or {}
# Input/Output files
self.input_structure_path = str(input_structure_path)
self.output_molecule_path = str(output_molecule_path)
# Properties specific for BB
self.molecule_type = properties.get('molecule_type', 'all')
self.chains = properties.get('chains', [])
self.check_structure_path = properties.get('check_structure_path', 'check_structure')
self.properties = properties
# Common in all BB
self.can_write_console_log = properties.get('can_write_console_log', True)
self.global_log = properties.get('global_log', None)
self.prefix = properties.get('prefix', None)
self.step = properties.get('step', None)
self.path = properties.get('path', '')
self.remove_tmp = properties.get('remove_tmp', True)
self.restart = properties.get('restart', False)
def check_data_params(self, out_log, err_log):
""" Checks all the input/output paths and parameters """
self.input_structure_path = check_input_path(self.input_structure_path, out_log, self.__class__.__name__)
self.output_molecule_path = check_output_path(self.output_molecule_path, out_log, self.__class__.__name__)
def create_command_list(self, command_list_path):
""" Creates a command list file as a input for structure checking """
instructions_list = []
instructions_list.append('ligands --remove All')
instructions_list.append('water --remove Yes')
if self.molecule_type != 'all':
if self.molecule_type == 'chains':
instructions_list.append('chains --select ' + ','.join(self.chains))
else:
instructions_list.append('chains --select ' + self.molecule_type)
with open(command_list_path, 'w') as clp:
for line in instructions_list:
clp.write(line.strip() + '\n')
return command_list_path
@launchlogger
def launch(self) -> int:
"""Execute the :class:`ExtractMolecule <utils.extract_molecule.ExtractMolecule>` utils.extract_molecule.ExtractMolecule object."""
tmp_files = []
# Get local loggers from launchlogger decorator
out_log = getattr(self, 'out_log', None)
err_log = getattr(self, 'err_log', None)
# check input/output paths and parameters
self.check_data_params(out_log, err_log)
# Check the properties
fu.check_properties(self, self.properties)
#Restart if needed
if self.restart:
output_file_list = [self.output_structure_path]
if fu.check_complete_files(output_file_list):
fu.log('Restart is enabled, this step: %s will the skipped' % self.step, out_log, self.global_log)
return 0
# create temporary folder
self.tmp_folder = fu.create_unique_dir()
fu.log('Creating %s temporary folder' % self.tmp_folder, out_log)
# create command list file
self.create_command_list(self.tmp_folder + '/extract_prot.lst')
# run command line
cmd = [self.check_structure_path,
'-i', self.input_structure_path,
'-o', self.output_molecule_path,
'--force_save',
'--non_interactive',
'command_list', '--list', self.tmp_folder + '/extract_prot.lst']
returncode: int = cmd_wrapper.CmdWrapper(cmd, out_log, err_log, self.global_log).launch()
# remove temporary folder
if self.remove_tmp:
fu.rm(self.tmp_folder)
fu.log('Removing %s temporary folder' % self.tmp_folder, out_log)
return returncode
def extract_molecule(input_structure_path: str, output_molecule_path: str, properties: dict = None, **kwargs) -> int:
"""Execute the :class:`ExtractMolecule <utils.extract_molecule.ExtractMolecule>` class and
execute the :meth:`launch() <utils.extract_molecule.ExtractMolecule.launch>` method."""
return ExtractMolecule(input_structure_path=input_structure_path,
output_molecule_path=output_molecule_path,
properties=properties, **kwargs).launch()
def main():
"""Command line execution of this building block. Please check the command line documentation."""
parser = argparse.ArgumentParser(description="Extract a molecule from a 3D structure.", formatter_class=lambda prog: argparse.RawTextHelpFormatter(prog, width=99999))
parser.add_argument('-c', '--config', required=False, help="This file can be a YAML file, JSON file or JSON string")
#Specific args of each building block
required_args = parser.add_argument_group('required arguments')
required_args.add_argument('-i', '--input_structure_path', required=True, help="Input structure file path. Accepted formats: pdb.")
required_args.add_argument('-o', '--output_molecule_path', required=True, help="Output heteroatom file path. Accepted formats: pdb.")
args = parser.parse_args()
config = args.config if args.config else None
properties = settings.ConfReader(config=config).get_prop_dic()
#Specific call of each building block
extract_molecule(input_structure_path=args.input_structure_path,
output_molecule_path=args.output_molecule_path,
properties=properties)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
"""Module containing the ExtractMolecule class and the command line interface."""
import argparse
from biobb_common.configuration import settings
from biobb_common.tools import file_utils as fu
from biobb_common.tools.file_utils import launchlogger
from biobb_common.command_wrapper import cmd_wrapper
from biobb_structure_utils.utils.common import *
class ExtractMolecule():
"""
| biobb_structure_utils ExtractMolecule
| This class is a wrapper of the Structure Checking tool to extract a molecule from a 3D structure.
| Wrapper for the `Structure Checking <https://github.com/bioexcel/biobb_structure_checking>`_ tool to extract a molecule from a 3D structure.
Args:
input_structure_path (str): Input structure file path. File type: input. `Sample file <https://github.com/bioexcel/biobb_structure_utils/raw/master/biobb_structure_utils/test/data/utils/extract_molecule.pdb>`_. Accepted formats: pdb (edam:format_1476).
output_molecule_path (str): Output molecule file path. File type: output. `Sample file <https://github.com/bioexcel/biobb_structure_utils/raw/master/biobb_structure_utils/test/reference/utils/ref_extract_molecule.pdb>`_. Accepted formats: pdb (edam:format_1476).
properties (dic - Python dictionary object containing the tool parameters, not input/output files):
* **molecule_type** (*string*) - ("all") type of molecule to be extracted. If all, only waters and ligands will be removed from the original structure. Values: all, protein, na, dna, rna, chains.
* **chains** (*list*) - (None) if chains selected in **molecule_type**, specify them here, e.g: ["A", "C", "N"].
* **check_structure_path** (*string*) - ("check_structure") path to the check_structure application
* **remove_tmp** (*bool*) - (True) [WF property] Remove temporal files.
* **restart** (*bool*) - (False) [WF property] Do not execute if output files exist.
Examples:
This is a use example of how to use the building block from Python::
from biobb_structure_utils.utils.extract_molecule import extract_molecule
prop = {
'molecule_type': 'chains',
'chains': ['A', 'N', 'F']
}
extract_molecule(input_structure_path='/path/to/myStructure.pdb,
output_molecule_path='/path/to/newMolecule.pdb',
properties=prop)
Info:
* wrapped_software:
* name: Structure Checking from MDWeb
* version: >=3.0.3
* license: Apache-2.0
* ontology:
* name: EDAM
* schema: http://edamontology.org/EDAM.owl
"""
def __init__(self, input_structure_path, output_molecule_path,
properties=None, **kwargs) -> None:
properties = properties or {}
# Input/Output files
self.input_structure_path = str(input_structure_path)
self.output_molecule_path = str(output_molecule_path)
# Properties specific for BB
self.molecule_type = properties.get('molecule_type', 'all')
self.chains = properties.get('chains', [])
self.check_structure_path = properties.get('check_structure_path', 'check_structure')
self.properties = properties
# Common in all BB
self.can_write_console_log = properties.get('can_write_console_log', True)
self.global_log = properties.get('global_log', None)
self.prefix = properties.get('prefix', None)
self.step = properties.get('step', None)
self.path = properties.get('path', '')
self.remove_tmp = properties.get('remove_tmp', True)
self.restart = properties.get('restart', False)
def check_data_params(self, out_log, err_log):
""" Checks all the input/output paths and parameters """
self.input_structure_path = check_input_path(self.input_structure_path, out_log, self.__class__.__name__)
self.output_molecule_path = check_output_path(self.output_molecule_path, out_log, self.__class__.__name__)
def create_command_list(self, command_list_path):
""" Creates a command list file as a input for structure checking """
instructions_list = []
instructions_list.append('ligands --remove All')
instructions_list.append('water --remove Yes')
if self.molecule_type != 'all':
if self.molecule_type == 'chains':
instructions_list.append('chains --select ' + ','.join(self.chains))
else:
instructions_list.append('chains --select ' + self.molecule_type)
with open(command_list_path, 'w') as clp:
for line in instructions_list:
clp.write(line.strip() + '\n')
return command_list_path
@launchlogger
def launch(self) -> int:
"""Execute the :class:`ExtractMolecule <utils.extract_molecule.ExtractMolecule>` utils.extract_molecule.ExtractMolecule object."""
tmp_files = []
# Get local loggers from launchlogger decorator
out_log = getattr(self, 'out_log', None)
err_log = getattr(self, 'err_log', None)
# check input/output paths and parameters
self.check_data_params(out_log, err_log)
# Check the properties
fu.check_properties(self, self.properties)
#Restart if needed
if self.restart:
output_file_list = [self.output_structure_path]
if fu.check_complete_files(output_file_list):
fu.log('Restart is enabled, this step: %s will the skipped' % self.step, out_log, self.global_log)
return 0
# create temporary folder
self.tmp_folder = fu.create_unique_dir()
fu.log('Creating %s temporary folder' % self.tmp_folder, out_log)
# create command list file
self.create_command_list(self.tmp_folder + '/extract_prot.lst')
# run command line
cmd = [self.check_structure_path,
'-i', self.input_structure_path,
'-o', self.output_molecule_path,
'--force_save',
'--non_interactive',
'command_list', '--list', self.tmp_folder + '/extract_prot.lst']
returncode: int = cmd_wrapper.CmdWrapper(cmd, out_log, err_log, self.global_log).launch()
# remove temporary folder
if self.remove_tmp:
fu.rm(self.tmp_folder)
fu.log('Removing %s temporary folder' % self.tmp_folder, out_log)
return returncode
def extract_molecule(input_structure_path: str, output_molecule_path: str, properties: dict = None, **kwargs) -> int:
"""Execute the :class:`ExtractMolecule <utils.extract_molecule.ExtractMolecule>` class and
execute the :meth:`launch() <utils.extract_molecule.ExtractMolecule.launch>` method."""
return ExtractMolecule(input_structure_path=input_structure_path,
output_molecule_path=output_molecule_path,
properties=properties, **kwargs).launch()
def main():
"""Command line execution of this building block. Please check the command line documentation."""
parser = argparse.ArgumentParser(description="Extract a molecule from a 3D structure.", formatter_class=lambda prog: argparse.RawTextHelpFormatter(prog, width=99999))
parser.add_argument('-c', '--config', required=False, help="This file can be a YAML file, JSON file or JSON string")
#Specific args of each building block
required_args = parser.add_argument_group('required arguments')
required_args.add_argument('-i', '--input_structure_path', required=True, help="Input structure file path. Accepted formats: pdb.")
required_args.add_argument('-o', '--output_molecule_path', required=True, help="Output heteroatom file path. Accepted formats: pdb.")
args = parser.parse_args()
config = args.config if args.config else None
properties = settings.ConfReader(config=config).get_prop_dic()
#Specific call of each building block
extract_molecule(input_structure_path=args.input_structure_path,
output_molecule_path=args.output_molecule_path,
properties=properties)
if __name__ == '__main__':
main()
|
en
| 0.469493
|
#!/usr/bin/env python3 Module containing the ExtractMolecule class and the command line interface. | biobb_structure_utils ExtractMolecule | This class is a wrapper of the Structure Checking tool to extract a molecule from a 3D structure. | Wrapper for the `Structure Checking <https://github.com/bioexcel/biobb_structure_checking>`_ tool to extract a molecule from a 3D structure. Args: input_structure_path (str): Input structure file path. File type: input. `Sample file <https://github.com/bioexcel/biobb_structure_utils/raw/master/biobb_structure_utils/test/data/utils/extract_molecule.pdb>`_. Accepted formats: pdb (edam:format_1476). output_molecule_path (str): Output molecule file path. File type: output. `Sample file <https://github.com/bioexcel/biobb_structure_utils/raw/master/biobb_structure_utils/test/reference/utils/ref_extract_molecule.pdb>`_. Accepted formats: pdb (edam:format_1476). properties (dic - Python dictionary object containing the tool parameters, not input/output files): * **molecule_type** (*string*) - ("all") type of molecule to be extracted. If all, only waters and ligands will be removed from the original structure. Values: all, protein, na, dna, rna, chains. * **chains** (*list*) - (None) if chains selected in **molecule_type**, specify them here, e.g: ["A", "C", "N"]. * **check_structure_path** (*string*) - ("check_structure") path to the check_structure application * **remove_tmp** (*bool*) - (True) [WF property] Remove temporal files. * **restart** (*bool*) - (False) [WF property] Do not execute if output files exist. Examples: This is a use example of how to use the building block from Python:: from biobb_structure_utils.utils.extract_molecule import extract_molecule prop = { 'molecule_type': 'chains', 'chains': ['A', 'N', 'F'] } extract_molecule(input_structure_path='/path/to/myStructure.pdb, output_molecule_path='/path/to/newMolecule.pdb', properties=prop) Info: * wrapped_software: * name: Structure Checking from MDWeb * version: >=3.0.3 * license: Apache-2.0 * ontology: * name: EDAM * schema: http://edamontology.org/EDAM.owl # Input/Output files # Properties specific for BB # Common in all BB Checks all the input/output paths and parameters Creates a command list file as a input for structure checking Execute the :class:`ExtractMolecule <utils.extract_molecule.ExtractMolecule>` utils.extract_molecule.ExtractMolecule object. # Get local loggers from launchlogger decorator # check input/output paths and parameters # Check the properties #Restart if needed # create temporary folder # create command list file # run command line # remove temporary folder Execute the :class:`ExtractMolecule <utils.extract_molecule.ExtractMolecule>` class and execute the :meth:`launch() <utils.extract_molecule.ExtractMolecule.launch>` method. Command line execution of this building block. Please check the command line documentation. #Specific args of each building block #Specific call of each building block
| 2.694645
| 3
|
.github/scripts/check_all_icons.py
|
EnisMulic/devicon
| 0
|
6629642
|
<reponame>EnisMulic/devicon<gh_stars>0
from pathlib import Path
import json
# pycharm complains that build_assets is an unresolved ref
# don't worry about it, the script still runs
from build_assets import filehandler
if __name__ == "__main__":
"""
Use as a cmd line script to check all the icons of the devicon.json.
"""
devicon_json_path = str(Path("./devicon.json").resolve())
icons_folder_path = str(Path("./icons").resolve())
with open(devicon_json_path) as json_file:
devicon_json = json.load(json_file)
svgs = filehandler.get_svgs_paths(devicon_json, icons_folder_path)
|
from pathlib import Path
import json
# pycharm complains that build_assets is an unresolved ref
# don't worry about it, the script still runs
from build_assets import filehandler
if __name__ == "__main__":
"""
Use as a cmd line script to check all the icons of the devicon.json.
"""
devicon_json_path = str(Path("./devicon.json").resolve())
icons_folder_path = str(Path("./icons").resolve())
with open(devicon_json_path) as json_file:
devicon_json = json.load(json_file)
svgs = filehandler.get_svgs_paths(devicon_json, icons_folder_path)
|
en
| 0.936897
|
# pycharm complains that build_assets is an unresolved ref # don't worry about it, the script still runs Use as a cmd line script to check all the icons of the devicon.json.
| 2.582874
| 3
|
jupyter-notebooks/analysis-ready-data/visual.py
|
BradNeuberg/notebooks
| 483
|
6629643
|
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import numpy as np
"""
The NDVI values will range from -1 to 1. You want to use a diverging color scheme to visualize the data,
and you want to center the colorbar at a defined midpoint. The class below allows you to normalize the colorbar.
"""
class MidpointNormalize(colors.Normalize):
"""
Normalise the colorbar so that diverging bars work there way either side from a prescribed midpoint value)
e.g. im=ax1.imshow(array, norm=MidpointNormalize(midpoint=0.,vmin=-100, vmax=100))
Credit: <NAME>, http://chris35wills.github.io/matplotlib_diverging_colorbar/
Credit: https://stackoverflow.com/a/48598564
"""
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
colors.Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
# Note that I'm ignoring clipping and other edge cases here.
result, is_scalar = self.process_value(value)
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.array(np.interp(value, x, y), mask=result.mask, copy=False)
def show_ndvi(ndvi, figsize=(20, 10)):
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
# diverging color scheme chosen from https://matplotlib.org/users/colormaps.html
cmap = plt.cm.RdYlGn
mmin = np.nanmin(ndvi)
mmax = np.nanmax(ndvi)
mid = 0
cax = ax.imshow(ndvi, cmap=cmap, clim=(mmin, mmax),
norm=MidpointNormalize(midpoint=mid,vmin=mmin, vmax=mmax))
ax.axis('off')
ax.set_title('Normalized Difference Vegetation Index', fontsize=18, fontweight='bold')
cbar = fig.colorbar(cax, orientation='horizontal', shrink=0.5)
plt.show()
|
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import numpy as np
"""
The NDVI values will range from -1 to 1. You want to use a diverging color scheme to visualize the data,
and you want to center the colorbar at a defined midpoint. The class below allows you to normalize the colorbar.
"""
class MidpointNormalize(colors.Normalize):
"""
Normalise the colorbar so that diverging bars work there way either side from a prescribed midpoint value)
e.g. im=ax1.imshow(array, norm=MidpointNormalize(midpoint=0.,vmin=-100, vmax=100))
Credit: <NAME>, http://chris35wills.github.io/matplotlib_diverging_colorbar/
Credit: https://stackoverflow.com/a/48598564
"""
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
colors.Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
# Note that I'm ignoring clipping and other edge cases here.
result, is_scalar = self.process_value(value)
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.array(np.interp(value, x, y), mask=result.mask, copy=False)
def show_ndvi(ndvi, figsize=(20, 10)):
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
# diverging color scheme chosen from https://matplotlib.org/users/colormaps.html
cmap = plt.cm.RdYlGn
mmin = np.nanmin(ndvi)
mmax = np.nanmax(ndvi)
mid = 0
cax = ax.imshow(ndvi, cmap=cmap, clim=(mmin, mmax),
norm=MidpointNormalize(midpoint=mid,vmin=mmin, vmax=mmax))
ax.axis('off')
ax.set_title('Normalized Difference Vegetation Index', fontsize=18, fontweight='bold')
cbar = fig.colorbar(cax, orientation='horizontal', shrink=0.5)
plt.show()
|
en
| 0.640895
|
The NDVI values will range from -1 to 1. You want to use a diverging color scheme to visualize the data, and you want to center the colorbar at a defined midpoint. The class below allows you to normalize the colorbar. Normalise the colorbar so that diverging bars work there way either side from a prescribed midpoint value) e.g. im=ax1.imshow(array, norm=MidpointNormalize(midpoint=0.,vmin=-100, vmax=100)) Credit: <NAME>, http://chris35wills.github.io/matplotlib_diverging_colorbar/ Credit: https://stackoverflow.com/a/48598564 # Note that I'm ignoring clipping and other edge cases here. # diverging color scheme chosen from https://matplotlib.org/users/colormaps.html
| 3.42392
| 3
|
PBO_ 18130/latihan_42_list2.py
|
viraditty09/PBO
| 0
|
6629644
|
nilai_matakuliah=[70,80,90,90,13]
rata_rata= (sum(nilai_matakuliah)/len(nilai_matakuliah))
print("Nilai Matakuliah=", nilai_matakuliah)
print("Nilai rata_rata=", rata_rata)
|
nilai_matakuliah=[70,80,90,90,13]
rata_rata= (sum(nilai_matakuliah)/len(nilai_matakuliah))
print("Nilai Matakuliah=", nilai_matakuliah)
print("Nilai rata_rata=", rata_rata)
|
none
| 1
| 3.126622
| 3
|
|
bot/match/commands/swap_handler.py
|
monkeydg/POG-bot
| 2
|
6629645
|
from .command import InstantiatedCommand, Command, picking_states
from match.classes import CaptainValidator
from match.common import get_check_captain
from match import MatchStatus
from display import AllStrings as disp, ContextWrapper
import modules.roles as roles
from classes import Player
class SwapHandler(InstantiatedCommand):
def __init__(self, obj):
super().__init__(self, self.swap)
self.validator = None
self.factory = obj
@property
def match(self):
return self.factory.match
def on_start(self):
self.validator = CaptainValidator(self.match)
@self.validator.confirm
async def do_swap(ctx, p_1, p_2):
team1 = p_1.team
team2 = p_2.team
team1.swap_player(p_1, p_2)
team2.swap_player(p_2, p_1)
p_1.change_team(team2)
p_2.change_team(team1)
try:
ctx = self.match.get_process_attr('get_current_context')(ctx)
except AttributeError:
pass
await disp.SWAP_OK.send(ctx, p_1.mention, p_2.mention, match=self.match.proxy)
if self.match.status is MatchStatus.IS_WAITING:
self.match.plugin_manager.on_teams_updated()
def on_clean(self, hard=False):
if self.validator:
self.validator.clean()
if hard:
self.validator = None
def on_team_ready(self, team):
if self.validator:
self.validator.clean()
@Command.command(*picking_states)
async def swap(self, ctx, args):
captain = None
if not roles.is_admin(ctx.author):
captain = await get_check_captain(ctx, self.match, check_turn=False)
if not captain:
return
if len(ctx.message.mentions) != 2:
await disp.SWAP_MENTION_2.send(ctx)
return
players = list()
for mention in ctx.message.mentions:
p = Player.get(mention.id)
if not p:
await disp.RM_NOT_IN_DB.send(ctx)
return
if not(p.match and p.active and p.match is self.match.proxy):
await disp.SWAP_NO.send(ctx, p.mention)
return
if p.active.is_playing:
p.active.team.captain.is_turn = True
p.active.team.on_team_ready(False)
players.append(p.active)
if players[0].team is players[1].team:
await disp.SWAP_SAME_TEAM.send(ctx)
return
# Can't have a sub command running at the same time
self.factory.sub.on_clean()
self.factory.bench.on_clean()
if roles.is_admin(ctx.author):
await self.validator.force_confirm(ctx, p_1=players[0], p_2=players[1])
return
else:
other_captain = self.match.teams[captain.team.id - 1].captain
ctx = self.validator.arm(self.match.channel, captain, p_1=players[0], p_2=players[1])
await disp.SWAP_OK_CONFIRM.send(ctx, other_captain.mention)
|
from .command import InstantiatedCommand, Command, picking_states
from match.classes import CaptainValidator
from match.common import get_check_captain
from match import MatchStatus
from display import AllStrings as disp, ContextWrapper
import modules.roles as roles
from classes import Player
class SwapHandler(InstantiatedCommand):
def __init__(self, obj):
super().__init__(self, self.swap)
self.validator = None
self.factory = obj
@property
def match(self):
return self.factory.match
def on_start(self):
self.validator = CaptainValidator(self.match)
@self.validator.confirm
async def do_swap(ctx, p_1, p_2):
team1 = p_1.team
team2 = p_2.team
team1.swap_player(p_1, p_2)
team2.swap_player(p_2, p_1)
p_1.change_team(team2)
p_2.change_team(team1)
try:
ctx = self.match.get_process_attr('get_current_context')(ctx)
except AttributeError:
pass
await disp.SWAP_OK.send(ctx, p_1.mention, p_2.mention, match=self.match.proxy)
if self.match.status is MatchStatus.IS_WAITING:
self.match.plugin_manager.on_teams_updated()
def on_clean(self, hard=False):
if self.validator:
self.validator.clean()
if hard:
self.validator = None
def on_team_ready(self, team):
if self.validator:
self.validator.clean()
@Command.command(*picking_states)
async def swap(self, ctx, args):
captain = None
if not roles.is_admin(ctx.author):
captain = await get_check_captain(ctx, self.match, check_turn=False)
if not captain:
return
if len(ctx.message.mentions) != 2:
await disp.SWAP_MENTION_2.send(ctx)
return
players = list()
for mention in ctx.message.mentions:
p = Player.get(mention.id)
if not p:
await disp.RM_NOT_IN_DB.send(ctx)
return
if not(p.match and p.active and p.match is self.match.proxy):
await disp.SWAP_NO.send(ctx, p.mention)
return
if p.active.is_playing:
p.active.team.captain.is_turn = True
p.active.team.on_team_ready(False)
players.append(p.active)
if players[0].team is players[1].team:
await disp.SWAP_SAME_TEAM.send(ctx)
return
# Can't have a sub command running at the same time
self.factory.sub.on_clean()
self.factory.bench.on_clean()
if roles.is_admin(ctx.author):
await self.validator.force_confirm(ctx, p_1=players[0], p_2=players[1])
return
else:
other_captain = self.match.teams[captain.team.id - 1].captain
ctx = self.validator.arm(self.match.channel, captain, p_1=players[0], p_2=players[1])
await disp.SWAP_OK_CONFIRM.send(ctx, other_captain.mention)
|
en
| 0.920323
|
# Can't have a sub command running at the same time
| 2.212224
| 2
|
src/waldur_core/core/utils.py
|
waldur/waldur-mastermind
| 4
|
6629646
|
import calendar
import datetime
import functools
import importlib
import os
import re
import time
import unicodedata
import uuid
import warnings
from collections import OrderedDict
from itertools import chain
from operator import itemgetter
import jwt
import requests
from django.apps import apps
from django.conf import settings
from django.contrib.auth import get_user_model
from django.core.exceptions import ObjectDoesNotExist
from django.core.mail import EmailMultiAlternatives
from django.core.management import call_command
from django.core.management.base import BaseCommand
from django.core.serializers.json import DjangoJSONEncoder
from django.db.models import F
from django.db.models.sql.query import get_order_dir
from django.http import QueryDict
from django.template import Context
from django.template.loader import get_template, render_to_string
from django.urls import resolve
from django.utils import timezone
from django.utils.crypto import get_random_string
from django.utils.encoding import force_text
from django.utils.lru_cache import lru_cache
from geopy.geocoders import Nominatim
from requests.packages.urllib3 import exceptions
from rest_framework.settings import api_settings
def flatten(*xs):
return tuple(chain.from_iterable(xs))
def sort_dict(unsorted_dict):
"""
Return a OrderedDict ordered by key names from the :unsorted_dict:
"""
sorted_dict = OrderedDict()
# sort items before inserting them into a dict
for key, value in sorted(unsorted_dict.items(), key=itemgetter(0)):
sorted_dict[key] = value
return sorted_dict
def datetime_to_timestamp(datetime):
return int(time.mktime(datetime.timetuple()))
def timestamp_to_datetime(timestamp, replace_tz=True):
dt = datetime.datetime.fromtimestamp(int(timestamp))
if replace_tz:
dt = dt.replace(tzinfo=timezone.get_current_timezone())
return dt
def timeshift(**kwargs):
return timezone.now().replace(microsecond=0) + datetime.timedelta(**kwargs)
def hours_in_month(month=None, year=None):
now = datetime.datetime.now()
if not month:
month = now.month
if not year:
year = now.year
days_in_month = calendar.monthrange(year, month)[1]
return 24 * days_in_month
def month_start(date):
return timezone.make_aware(
datetime.datetime(day=1, month=date.month, year=date.year)
)
def month_end(date):
days_in_month = calendar.monthrange(date.year, date.month)[1]
last_day_of_month = datetime.date(
month=date.month, year=date.year, day=days_in_month
)
last_second_of_month = datetime.datetime.combine(
last_day_of_month, datetime.time.max
)
return timezone.make_aware(last_second_of_month, timezone.get_current_timezone())
def pwgen(pw_len=16):
"""Generate a random password with the given length.
Allowed chars does not have "I" or "O" or letters and
digits that look similar -- just to avoid confusion.
"""
return get_random_string(
pw_len, 'abcdefghjkmnpqrstuvwxyz' 'ABCDEFGHJKLMNPQRSTUVWXYZ' '23456789'
)
def serialize_instance(instance):
""" Serialize Django model instance """
model_name = force_text(instance._meta)
return '{}:{}'.format(model_name, instance.pk)
def deserialize_instance(serialized_instance):
""" Deserialize Django model instance """
model_name, pk = serialized_instance.split(':')
model = apps.get_model(model_name)
return model._default_manager.get(pk=pk)
def serialize_class(cls):
""" Serialize Python class """
return '{}:{}'.format(cls.__module__, cls.__name__)
def deserialize_class(serilalized_cls):
""" Deserialize Python class """
module_name, cls_name = serilalized_cls.split(':')
module = importlib.import_module(module_name)
return getattr(module, cls_name)
def clear_url(url):
""" Remove domain and protocol from url """
if url.startswith('http'):
return '/' + url.split('/', 3)[-1]
return url
def get_model_from_resolve_match(match):
queryset = match.func.cls.queryset
if queryset is not None:
return queryset.model
else:
return match.func.cls.model
def instance_from_url(url, user=None):
""" Restore instance from URL """
# XXX: This circular dependency will be removed then filter_queryset_for_user
# will be moved to model manager method
from waldur_core.structure.managers import filter_queryset_for_user
url = clear_url(url)
match = resolve(url)
model = get_model_from_resolve_match(match)
queryset = model.objects.all()
if user is not None:
queryset = filter_queryset_for_user(model.objects.all(), user)
return queryset.get(**match.kwargs)
def get_detail_view_name(model):
if model is NotImplemented:
raise AttributeError('Cannot get detail view name for not implemented model')
if hasattr(model, 'get_url_name') and callable(model.get_url_name):
return '%s-detail' % model.get_url_name()
return '%s-detail' % model.__name__.lower()
def get_list_view_name(model):
if model is NotImplemented:
raise AttributeError('Cannot get list view name for not implemented model')
if hasattr(model, 'get_url_name') and callable(model.get_url_name):
return '%s-list' % model.get_url_name()
return '%s-list' % model.__name__.lower()
def get_fake_context():
user = get_user_model()()
request = type(
'R', (object,), {'method': 'GET', 'user': user, 'query_params': QueryDict()}
)
return {'request': request, 'user': user}
def camel_case_to_underscore(name):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def silent_call(name, *args, **options):
call_command(name, stdout=open(os.devnull, 'w'), *args, **options)
def format_text(template_name, context):
template = get_template(template_name).template
return template.render(Context(context, autoescape=False)).strip()
def send_mail_with_attachment(
subject,
body,
to,
from_email=None,
html_message=None,
filename=None,
attachment=None,
content_type='text/plain',
bcc=None,
):
from_email = from_email or settings.DEFAULT_FROM_EMAIL
email = EmailMultiAlternatives(
subject=subject, body=body, to=to, from_email=from_email, bcc=bcc
)
if html_message:
email.attach_alternative(html_message, 'text/html')
if filename:
email.attach(filename, attachment, content_type)
return email.send()
def broadcast_mail(
app,
event_type,
context,
recipient_list,
filename=None,
attachment=None,
content_type='text/plain',
bcc=None,
):
"""
Shorthand to format email message from template file and sent it to all recipients.
It is assumed that there are there are 3 templates available for event type in application.
For example, if app is 'users' and event_type is 'invitation_rejected', then there should be 3 files:
1) users/invitation_rejected_subject.txt is template for email subject
2) users/invitation_rejected_message.txt is template for email body as text
3) users/invitation_rejected_message.html is template for email body as HTML
By default, built-in Django send_mail is used, all members
of the recipient list will see the other recipients in the 'To' field.
Contrary to this, we're using explicit loop in order to ensure that
recipients would NOT see the other recipients.
:param app: prefix for template filename.
:param event_type: postfix for template filename.
:param context: dictionary passed to the template for rendering.
:param recipient_list: list of strings, each an email address.
:param filename: name of the attached file
:param attachment: content of attachment
:param content_type: the content type of attachment
"""
subject_template_name = '%s/%s_subject.txt' % (app, event_type)
subject = format_text(subject_template_name, context)
text_template_name = '%s/%s_message.txt' % (app, event_type)
text_message = format_text(text_template_name, context)
html_template_name = '%s/%s_message.html' % (app, event_type)
html_message = render_to_string(html_template_name, context)
for recipient in recipient_list:
send_mail_with_attachment(
subject,
text_message,
to=[recipient],
html_message=html_message,
filename=filename,
attachment=attachment,
content_type=content_type,
bcc=bcc,
)
def get_ordering(request):
"""
Extract ordering from HTTP request.
"""
return request.query_params.get(api_settings.ORDERING_PARAM)
def order_with_nulls(queryset, field):
"""
If sorting order is ascending, then NULL values come first,
if sorting order is descending, then NULL values come last.
"""
col, order = get_order_dir(field)
descending = True if order == 'DESC' else False
if descending:
return queryset.order_by(F(col).desc(nulls_last=True))
else:
return queryset.order_by(F(col).asc(nulls_first=True))
def is_uuid_like(val):
"""
Check if value looks like a valid UUID.
"""
try:
uuid.UUID(val)
except (TypeError, ValueError, AttributeError):
return False
else:
return True
def chunks(xs, n):
"""
Split list to evenly sized chunks
>> chunks(range(10), 4)
[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9]]
:param xs: arbitrary list
:param n: chunk size
:return: list of lists
"""
return [xs[i : i + n] for i in range(0, len(xs), n)]
def create_batch_fetcher(fetcher):
"""
Decorator to simplify code for chunked fetching.
It fetches resources from backend API in evenly sized chunks.
It is needed in order to avoid too long HTTP request error.
Essentially, it gives the same result as fetcher(items) but does not throw an error.
:param fetcher: fetcher: function which accepts list of items and returns list of results,
for example, list of UUIDs and returns list of projects with given UUIDs
:return: function with the same signature as fetcher
"""
@functools.wraps(fetcher)
def wrapped(items):
"""
:param items: list of items for request, for example, list of UUIDs
:return: merged list of results
"""
result = []
for chunk in chunks(items, settings.WALDUR_CORE['HTTP_CHUNK_SIZE']):
result.extend(fetcher(chunk))
return result
return wrapped
class DryRunCommand(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
'--dry-run',
action='store_true',
help='Don\'t make any changes, instead show what objects would be created.',
)
def encode_jwt_token(data, api_secret_code=None):
"""
Encode Python dictionary as JWT token.
:param data: Dictionary with payload.
:param api_secret_code: optional string, application secret key is used by default.
:return: JWT token string with encoded and signed data.
"""
if api_secret_code is None:
api_secret_code = settings.SECRET_KEY
return jwt.encode(
data, api_secret_code, algorithm='HS256', json_encoder=DjangoJSONEncoder
)
def decode_jwt_token(encoded_data, api_secret_code=None):
"""
Decode JWT token string to Python dictionary.
:param encoded_data: JWT token string with encoded and signed data.
:param api_secret_code: optional string, application secret key is used by default.
:return: Dictionary with payload.
"""
if api_secret_code is None:
api_secret_code = settings.SECRET_KEY
return jwt.decode(encoded_data, api_secret_code, algorithms=['HS256'])
def normalize_unicode(data):
return unicodedata.normalize(u'NFKD', data).encode('ascii', 'ignore').decode('utf8')
UNIT_PATTERN = re.compile(r'(\d+)([KMGTP]?)')
UNITS = {
'K': 2 ** 10,
'M': 2 ** 20,
'G': 2 ** 30,
'T': 2 ** 40,
}
def parse_int(value):
"""
Convert 5K to 5000.
"""
match = re.match(UNIT_PATTERN, value)
if not match:
return 0
value = int(match.group(1))
unit = match.group(2)
if unit:
factor = UNITS[unit]
else:
factor = 1
return factor * value
class QuietSession(requests.Session):
"""Session class that suppresses warning about unsafe TLS sessions and clogging the logs.
Inspired by: https://github.com/kennethreitz/requests/issues/2214#issuecomment-110366218
"""
def request(self, *args, **kwargs):
if not kwargs.get('verify', self.verify):
with warnings.catch_warnings():
if hasattr(
exceptions, 'InsecurePlatformWarning'
): # urllib3 1.10 and lower does not have this warning
warnings.simplefilter('ignore', exceptions.InsecurePlatformWarning)
warnings.simplefilter('ignore', exceptions.InsecureRequestWarning)
return super(QuietSession, self).request(*args, **kwargs)
else:
return super(QuietSession, self).request(*args, **kwargs)
def get_lat_lon_from_address(address):
geo_locator = Nominatim(user_agent='waldur')
location = geo_locator.geocode(address)
if location:
return location.latitude, location.longitude
def format_homeport_link(format_str='', **kwargs):
link = settings.WALDUR_CORE['HOMEPORT_URL'] + format_str
return link.format(**kwargs)
@lru_cache(maxsize=1)
def get_system_robot():
from waldur_core.core import models
try:
return models.User.objects.get(
username='system_robot', is_staff=True, is_active=True
)
except ObjectDoesNotExist:
return
|
import calendar
import datetime
import functools
import importlib
import os
import re
import time
import unicodedata
import uuid
import warnings
from collections import OrderedDict
from itertools import chain
from operator import itemgetter
import jwt
import requests
from django.apps import apps
from django.conf import settings
from django.contrib.auth import get_user_model
from django.core.exceptions import ObjectDoesNotExist
from django.core.mail import EmailMultiAlternatives
from django.core.management import call_command
from django.core.management.base import BaseCommand
from django.core.serializers.json import DjangoJSONEncoder
from django.db.models import F
from django.db.models.sql.query import get_order_dir
from django.http import QueryDict
from django.template import Context
from django.template.loader import get_template, render_to_string
from django.urls import resolve
from django.utils import timezone
from django.utils.crypto import get_random_string
from django.utils.encoding import force_text
from django.utils.lru_cache import lru_cache
from geopy.geocoders import Nominatim
from requests.packages.urllib3 import exceptions
from rest_framework.settings import api_settings
def flatten(*xs):
return tuple(chain.from_iterable(xs))
def sort_dict(unsorted_dict):
"""
Return a OrderedDict ordered by key names from the :unsorted_dict:
"""
sorted_dict = OrderedDict()
# sort items before inserting them into a dict
for key, value in sorted(unsorted_dict.items(), key=itemgetter(0)):
sorted_dict[key] = value
return sorted_dict
def datetime_to_timestamp(datetime):
return int(time.mktime(datetime.timetuple()))
def timestamp_to_datetime(timestamp, replace_tz=True):
dt = datetime.datetime.fromtimestamp(int(timestamp))
if replace_tz:
dt = dt.replace(tzinfo=timezone.get_current_timezone())
return dt
def timeshift(**kwargs):
return timezone.now().replace(microsecond=0) + datetime.timedelta(**kwargs)
def hours_in_month(month=None, year=None):
now = datetime.datetime.now()
if not month:
month = now.month
if not year:
year = now.year
days_in_month = calendar.monthrange(year, month)[1]
return 24 * days_in_month
def month_start(date):
return timezone.make_aware(
datetime.datetime(day=1, month=date.month, year=date.year)
)
def month_end(date):
days_in_month = calendar.monthrange(date.year, date.month)[1]
last_day_of_month = datetime.date(
month=date.month, year=date.year, day=days_in_month
)
last_second_of_month = datetime.datetime.combine(
last_day_of_month, datetime.time.max
)
return timezone.make_aware(last_second_of_month, timezone.get_current_timezone())
def pwgen(pw_len=16):
"""Generate a random password with the given length.
Allowed chars does not have "I" or "O" or letters and
digits that look similar -- just to avoid confusion.
"""
return get_random_string(
pw_len, 'abcdefghjkmnpqrstuvwxyz' 'ABCDEFGHJKLMNPQRSTUVWXYZ' '23456789'
)
def serialize_instance(instance):
""" Serialize Django model instance """
model_name = force_text(instance._meta)
return '{}:{}'.format(model_name, instance.pk)
def deserialize_instance(serialized_instance):
""" Deserialize Django model instance """
model_name, pk = serialized_instance.split(':')
model = apps.get_model(model_name)
return model._default_manager.get(pk=pk)
def serialize_class(cls):
""" Serialize Python class """
return '{}:{}'.format(cls.__module__, cls.__name__)
def deserialize_class(serilalized_cls):
""" Deserialize Python class """
module_name, cls_name = serilalized_cls.split(':')
module = importlib.import_module(module_name)
return getattr(module, cls_name)
def clear_url(url):
""" Remove domain and protocol from url """
if url.startswith('http'):
return '/' + url.split('/', 3)[-1]
return url
def get_model_from_resolve_match(match):
queryset = match.func.cls.queryset
if queryset is not None:
return queryset.model
else:
return match.func.cls.model
def instance_from_url(url, user=None):
""" Restore instance from URL """
# XXX: This circular dependency will be removed then filter_queryset_for_user
# will be moved to model manager method
from waldur_core.structure.managers import filter_queryset_for_user
url = clear_url(url)
match = resolve(url)
model = get_model_from_resolve_match(match)
queryset = model.objects.all()
if user is not None:
queryset = filter_queryset_for_user(model.objects.all(), user)
return queryset.get(**match.kwargs)
def get_detail_view_name(model):
if model is NotImplemented:
raise AttributeError('Cannot get detail view name for not implemented model')
if hasattr(model, 'get_url_name') and callable(model.get_url_name):
return '%s-detail' % model.get_url_name()
return '%s-detail' % model.__name__.lower()
def get_list_view_name(model):
if model is NotImplemented:
raise AttributeError('Cannot get list view name for not implemented model')
if hasattr(model, 'get_url_name') and callable(model.get_url_name):
return '%s-list' % model.get_url_name()
return '%s-list' % model.__name__.lower()
def get_fake_context():
user = get_user_model()()
request = type(
'R', (object,), {'method': 'GET', 'user': user, 'query_params': QueryDict()}
)
return {'request': request, 'user': user}
def camel_case_to_underscore(name):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def silent_call(name, *args, **options):
call_command(name, stdout=open(os.devnull, 'w'), *args, **options)
def format_text(template_name, context):
template = get_template(template_name).template
return template.render(Context(context, autoescape=False)).strip()
def send_mail_with_attachment(
subject,
body,
to,
from_email=None,
html_message=None,
filename=None,
attachment=None,
content_type='text/plain',
bcc=None,
):
from_email = from_email or settings.DEFAULT_FROM_EMAIL
email = EmailMultiAlternatives(
subject=subject, body=body, to=to, from_email=from_email, bcc=bcc
)
if html_message:
email.attach_alternative(html_message, 'text/html')
if filename:
email.attach(filename, attachment, content_type)
return email.send()
def broadcast_mail(
app,
event_type,
context,
recipient_list,
filename=None,
attachment=None,
content_type='text/plain',
bcc=None,
):
"""
Shorthand to format email message from template file and sent it to all recipients.
It is assumed that there are there are 3 templates available for event type in application.
For example, if app is 'users' and event_type is 'invitation_rejected', then there should be 3 files:
1) users/invitation_rejected_subject.txt is template for email subject
2) users/invitation_rejected_message.txt is template for email body as text
3) users/invitation_rejected_message.html is template for email body as HTML
By default, built-in Django send_mail is used, all members
of the recipient list will see the other recipients in the 'To' field.
Contrary to this, we're using explicit loop in order to ensure that
recipients would NOT see the other recipients.
:param app: prefix for template filename.
:param event_type: postfix for template filename.
:param context: dictionary passed to the template for rendering.
:param recipient_list: list of strings, each an email address.
:param filename: name of the attached file
:param attachment: content of attachment
:param content_type: the content type of attachment
"""
subject_template_name = '%s/%s_subject.txt' % (app, event_type)
subject = format_text(subject_template_name, context)
text_template_name = '%s/%s_message.txt' % (app, event_type)
text_message = format_text(text_template_name, context)
html_template_name = '%s/%s_message.html' % (app, event_type)
html_message = render_to_string(html_template_name, context)
for recipient in recipient_list:
send_mail_with_attachment(
subject,
text_message,
to=[recipient],
html_message=html_message,
filename=filename,
attachment=attachment,
content_type=content_type,
bcc=bcc,
)
def get_ordering(request):
"""
Extract ordering from HTTP request.
"""
return request.query_params.get(api_settings.ORDERING_PARAM)
def order_with_nulls(queryset, field):
"""
If sorting order is ascending, then NULL values come first,
if sorting order is descending, then NULL values come last.
"""
col, order = get_order_dir(field)
descending = True if order == 'DESC' else False
if descending:
return queryset.order_by(F(col).desc(nulls_last=True))
else:
return queryset.order_by(F(col).asc(nulls_first=True))
def is_uuid_like(val):
"""
Check if value looks like a valid UUID.
"""
try:
uuid.UUID(val)
except (TypeError, ValueError, AttributeError):
return False
else:
return True
def chunks(xs, n):
"""
Split list to evenly sized chunks
>> chunks(range(10), 4)
[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9]]
:param xs: arbitrary list
:param n: chunk size
:return: list of lists
"""
return [xs[i : i + n] for i in range(0, len(xs), n)]
def create_batch_fetcher(fetcher):
"""
Decorator to simplify code for chunked fetching.
It fetches resources from backend API in evenly sized chunks.
It is needed in order to avoid too long HTTP request error.
Essentially, it gives the same result as fetcher(items) but does not throw an error.
:param fetcher: fetcher: function which accepts list of items and returns list of results,
for example, list of UUIDs and returns list of projects with given UUIDs
:return: function with the same signature as fetcher
"""
@functools.wraps(fetcher)
def wrapped(items):
"""
:param items: list of items for request, for example, list of UUIDs
:return: merged list of results
"""
result = []
for chunk in chunks(items, settings.WALDUR_CORE['HTTP_CHUNK_SIZE']):
result.extend(fetcher(chunk))
return result
return wrapped
class DryRunCommand(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
'--dry-run',
action='store_true',
help='Don\'t make any changes, instead show what objects would be created.',
)
def encode_jwt_token(data, api_secret_code=None):
"""
Encode Python dictionary as JWT token.
:param data: Dictionary with payload.
:param api_secret_code: optional string, application secret key is used by default.
:return: JWT token string with encoded and signed data.
"""
if api_secret_code is None:
api_secret_code = settings.SECRET_KEY
return jwt.encode(
data, api_secret_code, algorithm='HS256', json_encoder=DjangoJSONEncoder
)
def decode_jwt_token(encoded_data, api_secret_code=None):
"""
Decode JWT token string to Python dictionary.
:param encoded_data: JWT token string with encoded and signed data.
:param api_secret_code: optional string, application secret key is used by default.
:return: Dictionary with payload.
"""
if api_secret_code is None:
api_secret_code = settings.SECRET_KEY
return jwt.decode(encoded_data, api_secret_code, algorithms=['HS256'])
def normalize_unicode(data):
return unicodedata.normalize(u'NFKD', data).encode('ascii', 'ignore').decode('utf8')
UNIT_PATTERN = re.compile(r'(\d+)([KMGTP]?)')
UNITS = {
'K': 2 ** 10,
'M': 2 ** 20,
'G': 2 ** 30,
'T': 2 ** 40,
}
def parse_int(value):
"""
Convert 5K to 5000.
"""
match = re.match(UNIT_PATTERN, value)
if not match:
return 0
value = int(match.group(1))
unit = match.group(2)
if unit:
factor = UNITS[unit]
else:
factor = 1
return factor * value
class QuietSession(requests.Session):
"""Session class that suppresses warning about unsafe TLS sessions and clogging the logs.
Inspired by: https://github.com/kennethreitz/requests/issues/2214#issuecomment-110366218
"""
def request(self, *args, **kwargs):
if not kwargs.get('verify', self.verify):
with warnings.catch_warnings():
if hasattr(
exceptions, 'InsecurePlatformWarning'
): # urllib3 1.10 and lower does not have this warning
warnings.simplefilter('ignore', exceptions.InsecurePlatformWarning)
warnings.simplefilter('ignore', exceptions.InsecureRequestWarning)
return super(QuietSession, self).request(*args, **kwargs)
else:
return super(QuietSession, self).request(*args, **kwargs)
def get_lat_lon_from_address(address):
geo_locator = Nominatim(user_agent='waldur')
location = geo_locator.geocode(address)
if location:
return location.latitude, location.longitude
def format_homeport_link(format_str='', **kwargs):
link = settings.WALDUR_CORE['HOMEPORT_URL'] + format_str
return link.format(**kwargs)
@lru_cache(maxsize=1)
def get_system_robot():
from waldur_core.core import models
try:
return models.User.objects.get(
username='system_robot', is_staff=True, is_active=True
)
except ObjectDoesNotExist:
return
|
en
| 0.776835
|
Return a OrderedDict ordered by key names from the :unsorted_dict: # sort items before inserting them into a dict Generate a random password with the given length. Allowed chars does not have "I" or "O" or letters and digits that look similar -- just to avoid confusion. Serialize Django model instance Deserialize Django model instance Serialize Python class Deserialize Python class Remove domain and protocol from url Restore instance from URL # XXX: This circular dependency will be removed then filter_queryset_for_user # will be moved to model manager method Shorthand to format email message from template file and sent it to all recipients. It is assumed that there are there are 3 templates available for event type in application. For example, if app is 'users' and event_type is 'invitation_rejected', then there should be 3 files: 1) users/invitation_rejected_subject.txt is template for email subject 2) users/invitation_rejected_message.txt is template for email body as text 3) users/invitation_rejected_message.html is template for email body as HTML By default, built-in Django send_mail is used, all members of the recipient list will see the other recipients in the 'To' field. Contrary to this, we're using explicit loop in order to ensure that recipients would NOT see the other recipients. :param app: prefix for template filename. :param event_type: postfix for template filename. :param context: dictionary passed to the template for rendering. :param recipient_list: list of strings, each an email address. :param filename: name of the attached file :param attachment: content of attachment :param content_type: the content type of attachment Extract ordering from HTTP request. If sorting order is ascending, then NULL values come first, if sorting order is descending, then NULL values come last. Check if value looks like a valid UUID. Split list to evenly sized chunks >> chunks(range(10), 4) [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9]] :param xs: arbitrary list :param n: chunk size :return: list of lists Decorator to simplify code for chunked fetching. It fetches resources from backend API in evenly sized chunks. It is needed in order to avoid too long HTTP request error. Essentially, it gives the same result as fetcher(items) but does not throw an error. :param fetcher: fetcher: function which accepts list of items and returns list of results, for example, list of UUIDs and returns list of projects with given UUIDs :return: function with the same signature as fetcher :param items: list of items for request, for example, list of UUIDs :return: merged list of results Encode Python dictionary as JWT token. :param data: Dictionary with payload. :param api_secret_code: optional string, application secret key is used by default. :return: JWT token string with encoded and signed data. Decode JWT token string to Python dictionary. :param encoded_data: JWT token string with encoded and signed data. :param api_secret_code: optional string, application secret key is used by default. :return: Dictionary with payload. Convert 5K to 5000. Session class that suppresses warning about unsafe TLS sessions and clogging the logs. Inspired by: https://github.com/kennethreitz/requests/issues/2214#issuecomment-110366218 # urllib3 1.10 and lower does not have this warning
| 1.879041
| 2
|
train_resnet.py
|
BTajini/Resnet-Theano
| 1
|
6629647
|
#!/usr/bin/env python
from __future__ import print_function
import sys, os, time, string
import numpy as np
np.random.seed(1234)
import theano
import theano.tensor as T
import lasagne
from utils import *
sys.setrecursionlimit(10000)
print("Config mode " + theano.config.mode)
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--batch_size", type=int, default=500)
parser.add_argument("--num_epochs", type=int, default=300)
parser.add_argument("--n", type=int, default=18, help="The total of residual blocks will be 3 times this value n.")
parser.add_argument("--width", type=int, default=1, help="For normal block only. Converting Resnet to Wide ResNet.")
parser.add_argument("--num_filters", type=int, default=16, help="Number of filter units for first convolution")
parser.add_argument('--block', default="normal", choices=["normal","bottleneck","dense", "dense_fast"])
parser.add_argument('--dataset', default="mnist", choices=['mnist', 'cifar10', 'food101'])
args = parser.parse_args()
num_classes = 10
if args.dataset == "mnist" :
from dataloader.mnist import Dataloader
elif args.dataset == "cifar10":
from dataloader.cifar10 import Dataloader
else:
from models.resnet101 import model
num_classes = 101
train_data_loader = Dataloader(args.batch_size, "train")
test_data_loader = Dataloader(args.batch_size, "test")
print("Data shape:")
print(train_data_loader.shape())
print("ResNet with {} residual blocks.".format(args.n))
if (args.dataset == "mnist") or (args.dataset == "cifar10"):
print("Bottleneck block. Depth {}".format(9*args.n+2))
elif (args.dataset == "mnist") or (args.dataset == "cifar10"):
print("Width factor {}, no bottleneck. Depth {}.".format(args.width,6*args.n+2))
else:
print("Dense block. Depth {}.".format(3*args.n+1))
from models.resnets import model
network = model(train_data_loader.shape(), n=args.n, num_classes=num_classes, num_filters=args.num_filters, width=args.width, block=args.block)
describe(network)
print("Compiling")
input_var = T.tensor4()
target_var = T.ivector()
prediction = lasagne.layers.get_output(network, input_var)
loss = lasagne.objectives.categorical_crossentropy(prediction, target_var)
loss = loss.mean()
params = lasagne.layers.get_all_params(network, trainable=True)
learning_rate_schedule = {
0: 0.0001, # low initial learning rate as described in paper
2: 0.01,
100: 0.001,
150: 0.0001
}
learning_rate = theano.shared(np.float32(learning_rate_schedule[0]))
updates = lasagne.updates.nesterov_momentum(
loss, params, learning_rate=learning_rate, momentum=0.9)
test_prediction = lasagne.layers.get_output(network, input_var, deterministic=True)
test_loss = lasagne.objectives.categorical_crossentropy(test_prediction,
target_var)
test_loss = test_loss.mean()
test_acc = T.mean(T.eq(T.argmax(test_prediction, axis=1), target_var),
dtype=theano.config.floatX)
train_fn = theano.function([input_var, target_var], loss, updates=updates)
val_fn = theano.function([input_var, target_var], [test_loss, test_acc])
print("Starting training...")
test_acc, test_err = [], []
for epoch in range(args.num_epochs):
if epoch in learning_rate_schedule:
lr = np.float32(learning_rate_schedule[epoch])
print(" setting learning rate to %.7f" % lr)
learning_rate.set_value(lr)
train_err = 0
train_batches = 0
start_time = time.time()
for inputs, targets in progress(train_data_loader.next_minibatch(),
desc='Epoch %d/%d, Batch ' % (epoch + 1, args.num_epochs),
total=train_data_loader.nb_batches):
train_err += train_fn(inputs, targets)
train_batches += 1
val_err = 0
val_acc = 0
val_batches = 0
for inputs, targets in test_data_loader.next_minibatch():
err, acc = val_fn(inputs, targets)
val_err += err
val_acc += acc
val_batches += 1
test_acc += [val_acc / val_batches]
test_err += [val_err / val_batches]
print("Epoch {} of {} took {:.3f}s".format(
epoch + 1, args.num_epochs, time.time() - start_time))
print(" training loss:\t\t{:.6f}".format(train_err / train_batches))
print(" validation loss:\t\t{:.6f}".format(val_err / val_batches))
print(" validation accuracy:\t\t{:.2f} %".format(
val_acc / val_batches * 100))
np.savez("res_test_3",test_acc=test_acc,test_err=test_err)
np.savez("weights", *lasagne.layers.get_all_param_values(network))
test_err = 0
test_acc = 0
test_batches = 0
for inputs, targets in test_data_loader.next_minibatch():
err, acc = val_fn(inputs, targets)
test_err += err
test_acc += acc
test_batches += 1
print("Final results:")
print(" test loss:\t\t\t{:.6f}".format(test_err / test_batches))
print(" test accuracy:\t\t{:.2f} %".format(
test_acc / test_batches * 100))
|
#!/usr/bin/env python
from __future__ import print_function
import sys, os, time, string
import numpy as np
np.random.seed(1234)
import theano
import theano.tensor as T
import lasagne
from utils import *
sys.setrecursionlimit(10000)
print("Config mode " + theano.config.mode)
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--batch_size", type=int, default=500)
parser.add_argument("--num_epochs", type=int, default=300)
parser.add_argument("--n", type=int, default=18, help="The total of residual blocks will be 3 times this value n.")
parser.add_argument("--width", type=int, default=1, help="For normal block only. Converting Resnet to Wide ResNet.")
parser.add_argument("--num_filters", type=int, default=16, help="Number of filter units for first convolution")
parser.add_argument('--block', default="normal", choices=["normal","bottleneck","dense", "dense_fast"])
parser.add_argument('--dataset', default="mnist", choices=['mnist', 'cifar10', 'food101'])
args = parser.parse_args()
num_classes = 10
if args.dataset == "mnist" :
from dataloader.mnist import Dataloader
elif args.dataset == "cifar10":
from dataloader.cifar10 import Dataloader
else:
from models.resnet101 import model
num_classes = 101
train_data_loader = Dataloader(args.batch_size, "train")
test_data_loader = Dataloader(args.batch_size, "test")
print("Data shape:")
print(train_data_loader.shape())
print("ResNet with {} residual blocks.".format(args.n))
if (args.dataset == "mnist") or (args.dataset == "cifar10"):
print("Bottleneck block. Depth {}".format(9*args.n+2))
elif (args.dataset == "mnist") or (args.dataset == "cifar10"):
print("Width factor {}, no bottleneck. Depth {}.".format(args.width,6*args.n+2))
else:
print("Dense block. Depth {}.".format(3*args.n+1))
from models.resnets import model
network = model(train_data_loader.shape(), n=args.n, num_classes=num_classes, num_filters=args.num_filters, width=args.width, block=args.block)
describe(network)
print("Compiling")
input_var = T.tensor4()
target_var = T.ivector()
prediction = lasagne.layers.get_output(network, input_var)
loss = lasagne.objectives.categorical_crossentropy(prediction, target_var)
loss = loss.mean()
params = lasagne.layers.get_all_params(network, trainable=True)
learning_rate_schedule = {
0: 0.0001, # low initial learning rate as described in paper
2: 0.01,
100: 0.001,
150: 0.0001
}
learning_rate = theano.shared(np.float32(learning_rate_schedule[0]))
updates = lasagne.updates.nesterov_momentum(
loss, params, learning_rate=learning_rate, momentum=0.9)
test_prediction = lasagne.layers.get_output(network, input_var, deterministic=True)
test_loss = lasagne.objectives.categorical_crossentropy(test_prediction,
target_var)
test_loss = test_loss.mean()
test_acc = T.mean(T.eq(T.argmax(test_prediction, axis=1), target_var),
dtype=theano.config.floatX)
train_fn = theano.function([input_var, target_var], loss, updates=updates)
val_fn = theano.function([input_var, target_var], [test_loss, test_acc])
print("Starting training...")
test_acc, test_err = [], []
for epoch in range(args.num_epochs):
if epoch in learning_rate_schedule:
lr = np.float32(learning_rate_schedule[epoch])
print(" setting learning rate to %.7f" % lr)
learning_rate.set_value(lr)
train_err = 0
train_batches = 0
start_time = time.time()
for inputs, targets in progress(train_data_loader.next_minibatch(),
desc='Epoch %d/%d, Batch ' % (epoch + 1, args.num_epochs),
total=train_data_loader.nb_batches):
train_err += train_fn(inputs, targets)
train_batches += 1
val_err = 0
val_acc = 0
val_batches = 0
for inputs, targets in test_data_loader.next_minibatch():
err, acc = val_fn(inputs, targets)
val_err += err
val_acc += acc
val_batches += 1
test_acc += [val_acc / val_batches]
test_err += [val_err / val_batches]
print("Epoch {} of {} took {:.3f}s".format(
epoch + 1, args.num_epochs, time.time() - start_time))
print(" training loss:\t\t{:.6f}".format(train_err / train_batches))
print(" validation loss:\t\t{:.6f}".format(val_err / val_batches))
print(" validation accuracy:\t\t{:.2f} %".format(
val_acc / val_batches * 100))
np.savez("res_test_3",test_acc=test_acc,test_err=test_err)
np.savez("weights", *lasagne.layers.get_all_param_values(network))
test_err = 0
test_acc = 0
test_batches = 0
for inputs, targets in test_data_loader.next_minibatch():
err, acc = val_fn(inputs, targets)
test_err += err
test_acc += acc
test_batches += 1
print("Final results:")
print(" test loss:\t\t\t{:.6f}".format(test_err / test_batches))
print(" test accuracy:\t\t{:.2f} %".format(
test_acc / test_batches * 100))
|
en
| 0.914821
|
#!/usr/bin/env python # low initial learning rate as described in paper
| 2.322146
| 2
|
src/apps/kms/backend/controllers/client/ClientsGetController.py
|
parada3desu/foxy-key-broker
| 0
|
6629648
|
from http import HTTPStatus
from fastapi.encoders import jsonable_encoder
from starlette.requests import Request
from starlette.responses import JSONResponse
from src.apps.kms.backend.controllers.KmsController import KmsController
from src.contexts.kms.clients.application.findall.FindClientsByCriteriaQuery import FindClientsByCriteriaQuery
from src.contexts.shared.Infrastructure.parsers.parse_dict_format_to_criteria import parse_dict_to_criteria
from src.contexts.shared.domain.Query import Query
from src.contexts.shared.domain.QueryBus import QueryBus
from src.contexts.shared.domain.Response import Response
class ClientsGetController(KmsController):
def __init__(
self,
query_bus: QueryBus,
):
self._query_bus = query_bus
async def run(self, req: Request) -> JSONResponse:
query_params = dict(req.query_params)
filters, order_by, limit = parse_dict_to_criteria(query_params)
query: Query = FindClientsByCriteriaQuery(filters, order_by, limit)
res: Response = await self._query_bus.ask(query)
json_compatible_item_data = jsonable_encoder(res.to_primitives())
return JSONResponse(status_code=HTTPStatus.OK, content=json_compatible_item_data)
|
from http import HTTPStatus
from fastapi.encoders import jsonable_encoder
from starlette.requests import Request
from starlette.responses import JSONResponse
from src.apps.kms.backend.controllers.KmsController import KmsController
from src.contexts.kms.clients.application.findall.FindClientsByCriteriaQuery import FindClientsByCriteriaQuery
from src.contexts.shared.Infrastructure.parsers.parse_dict_format_to_criteria import parse_dict_to_criteria
from src.contexts.shared.domain.Query import Query
from src.contexts.shared.domain.QueryBus import QueryBus
from src.contexts.shared.domain.Response import Response
class ClientsGetController(KmsController):
def __init__(
self,
query_bus: QueryBus,
):
self._query_bus = query_bus
async def run(self, req: Request) -> JSONResponse:
query_params = dict(req.query_params)
filters, order_by, limit = parse_dict_to_criteria(query_params)
query: Query = FindClientsByCriteriaQuery(filters, order_by, limit)
res: Response = await self._query_bus.ask(query)
json_compatible_item_data = jsonable_encoder(res.to_primitives())
return JSONResponse(status_code=HTTPStatus.OK, content=json_compatible_item_data)
|
none
| 1
| 2.049393
| 2
|
|
nebulousAD/modimpacket/pcapfile.py
|
BraveLittleRoaster/nebulousAD
| 130
|
6629649
|
# SECUREAUTH LABS. Copyright 2018 SecureAuth Corporation. All rights reserved.
#
# This software is provided under under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
from nebulousAD.modimpacket import ImpactPacket, ImpactDecoder, structure
O_ETH = 0
O_IP = 1
O_ARP = 1
O_UDP = 2
O_TCP = 2
O_ICMP = 2
O_UDP_DATA = 3
O_ICMP_DATA = 3
MAGIC = '"\xD4\xC3\xB2\xA1'
class PCapFileHeader(structure.Structure):
structure = (
('magic', MAGIC),
('versionMajor', '<H=2'),
('versionMinor', '<H=4'),
('GMT2localCorrection', '<l=0'),
('timeAccuracy', '<L=0'),
('maxLength', '<L=0xffff'),
('linkType', '<L=1'),
('packets','*:=[]'),
)
class PCapFilePacket(structure.Structure):
structure = (
('tsec', '<L=0'),
('tmsec', '<L=0'),
('savedLength', '<L-data'),
('realLength', '<L-data'),
('data',':'),
)
def __init__(self, *args, **kargs):
structure.Structure.__init__(self, *args, **kargs)
self['data'] = ''
class PcapFile:
def __init__(self, fileName = None, mode = 'rb'):
if not fileName is None:
self.file = open(fileName, mode)
self.hdr = None
self.wroteHeader = False
def reset(self):
self.hdr = None
self.file.seek(0)
def close(self):
self.file.close()
def fileno(self):
return self.file.fileno()
def setFile(self, file):
self.file = file
def setSnapLen(self, snapLen):
self.createHeaderOnce()
self.hdr['maxLength'] = snapLen
def getSnapLen(self):
self.readHeaderOnce()
return self.hdr['maxLength']
def setLinkType(self, linkType):
self.createHeaderOnce()
self.hdr['linkType'] = linkType
def getLinkType(self):
self.readHeaderOnce()
return self.hdr['linkType']
def readHeaderOnce(self):
if self.hdr is None:
self.hdr = PCapFileHeader.fromFile(self.file)
def createHeaderOnce(self):
if self.hdr is None:
self.hdr = PCapFileHeader()
def writeHeaderOnce(self):
if not self.wroteHeader:
self.wroteHeader = True
self.file.seek(0)
self.createHeaderOnce()
self.file.write(str(self.hdr))
def read(self):
self.readHeaderOnce()
try:
pkt = PCapFilePacket.fromFile(self.file)
pkt['data'] = self.file.read(pkt['savedLength'])
return pkt
except:
return None
def write(self, pkt):
self.writeHeaderOnce()
self.file.write(str(pkt))
def packets(self):
self.reset()
while 1:
answer = self.read()
if answer is None: break
yield answer
|
# SECUREAUTH LABS. Copyright 2018 SecureAuth Corporation. All rights reserved.
#
# This software is provided under under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
from nebulousAD.modimpacket import ImpactPacket, ImpactDecoder, structure
O_ETH = 0
O_IP = 1
O_ARP = 1
O_UDP = 2
O_TCP = 2
O_ICMP = 2
O_UDP_DATA = 3
O_ICMP_DATA = 3
MAGIC = '"\xD4\xC3\xB2\xA1'
class PCapFileHeader(structure.Structure):
structure = (
('magic', MAGIC),
('versionMajor', '<H=2'),
('versionMinor', '<H=4'),
('GMT2localCorrection', '<l=0'),
('timeAccuracy', '<L=0'),
('maxLength', '<L=0xffff'),
('linkType', '<L=1'),
('packets','*:=[]'),
)
class PCapFilePacket(structure.Structure):
structure = (
('tsec', '<L=0'),
('tmsec', '<L=0'),
('savedLength', '<L-data'),
('realLength', '<L-data'),
('data',':'),
)
def __init__(self, *args, **kargs):
structure.Structure.__init__(self, *args, **kargs)
self['data'] = ''
class PcapFile:
def __init__(self, fileName = None, mode = 'rb'):
if not fileName is None:
self.file = open(fileName, mode)
self.hdr = None
self.wroteHeader = False
def reset(self):
self.hdr = None
self.file.seek(0)
def close(self):
self.file.close()
def fileno(self):
return self.file.fileno()
def setFile(self, file):
self.file = file
def setSnapLen(self, snapLen):
self.createHeaderOnce()
self.hdr['maxLength'] = snapLen
def getSnapLen(self):
self.readHeaderOnce()
return self.hdr['maxLength']
def setLinkType(self, linkType):
self.createHeaderOnce()
self.hdr['linkType'] = linkType
def getLinkType(self):
self.readHeaderOnce()
return self.hdr['linkType']
def readHeaderOnce(self):
if self.hdr is None:
self.hdr = PCapFileHeader.fromFile(self.file)
def createHeaderOnce(self):
if self.hdr is None:
self.hdr = PCapFileHeader()
def writeHeaderOnce(self):
if not self.wroteHeader:
self.wroteHeader = True
self.file.seek(0)
self.createHeaderOnce()
self.file.write(str(self.hdr))
def read(self):
self.readHeaderOnce()
try:
pkt = PCapFilePacket.fromFile(self.file)
pkt['data'] = self.file.read(pkt['savedLength'])
return pkt
except:
return None
def write(self, pkt):
self.writeHeaderOnce()
self.file.write(str(pkt))
def packets(self):
self.reset()
while 1:
answer = self.read()
if answer is None: break
yield answer
|
en
| 0.820879
|
# SECUREAUTH LABS. Copyright 2018 SecureAuth Corporation. All rights reserved. # # This software is provided under under a slightly modified version # of the Apache Software License. See the accompanying LICENSE file # for more information. #
| 1.861159
| 2
|
tests/test_cudata.py
|
bryancatanzaro/copperhead
| 98
|
6629650
|
#
# Copyright 2012 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
import numpy as np
from copperhead import *
import unittest
from recursive_equal import recursive_equal
class CudataTest(unittest.TestCase):
def testNumpyFlat(self):
a = np.array([1,2,3,4,5])
self.assertTrue(recursive_equal(a, cuarray(a)))
def testPythonFlat(self):
a = [2.78, 3.14, 1.62]
self.assertTrue(recursive_equal(a, cuarray(a)))
def testNumpyNested(self):
a = [[np.array([1,2]), np.array([3,4,5])],
[np.array([6,7,8,9]), np.array([10,11,12,13,14]),
np.array([15,16,17,18,19,20])]]
self.assertTrue(recursive_equal(a, cuarray(a)))
def testPythonNested(self):
a = [[[1,2], [3,4,5]],
[[6,7,8,9], [10,11,12,13,14],
[15,16,17,18,19,20]]]
self.assertTrue(recursive_equal(a, cuarray(a)))
def deref_type_check(self, np_type):
a = np.array([1], dtype=np_type)
b = cuarray(a)
self.assertTrue(type(a[0]) == type(b[0]))
self.assertTrue(a[0] == b[0])
def testInt32(self):
self.deref_type_check(np.int32)
def testInt64(self):
self.deref_type_check(np.int64)
def testFloat32(self):
self.deref_type_check(np.float32)
def testFloat64(self):
self.deref_type_check(np.float64)
def testBool(self):
self.deref_type_check(np.bool)
def testStr(self):
a = [[[1,2], [3,4,5]],
[[6,7,8,9], [10,11,12,13,14],
[15,16,17,18,19,20]]]
self.assertEqual(str(a), str(cuarray(a)))
def testUnequalLength(self):
a = [1,2,3]
b = [1,2,3,4]
self.assertFalse(recursive_equal(a, cuarray(b)))
self.assertFalse(recursive_equal(b, cuarray(a)))
def testUnequalContent(self):
a = [1,2,3]
b = [3,2,1]
self.assertFalse(recursive_equal(a, cuarray(b)))
def testUnequalNested(self):
a = [[1,2],[3,4,5]]
b = [[1,2],[3,4,5,6]]
self.assertFalse(recursive_equal(a, cuarray(b)))
def testUnequalTriplyNested(self):
a = [[[1,2], [3,4,5]],
[[6,7,8,9], [10,11,12,13,14],
[15,16,17,18,19,20]]]
b = [[[1,2], [3,4,5]],
[[6,7,8,9,10], [10,11,12,13,14],
[15,16,17,18,19,20]]]
self.assertFalse(recursive_equal(a, cuarray(b)))
if __name__ == '__main__':
unittest.main()
|
#
# Copyright 2012 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
import numpy as np
from copperhead import *
import unittest
from recursive_equal import recursive_equal
class CudataTest(unittest.TestCase):
def testNumpyFlat(self):
a = np.array([1,2,3,4,5])
self.assertTrue(recursive_equal(a, cuarray(a)))
def testPythonFlat(self):
a = [2.78, 3.14, 1.62]
self.assertTrue(recursive_equal(a, cuarray(a)))
def testNumpyNested(self):
a = [[np.array([1,2]), np.array([3,4,5])],
[np.array([6,7,8,9]), np.array([10,11,12,13,14]),
np.array([15,16,17,18,19,20])]]
self.assertTrue(recursive_equal(a, cuarray(a)))
def testPythonNested(self):
a = [[[1,2], [3,4,5]],
[[6,7,8,9], [10,11,12,13,14],
[15,16,17,18,19,20]]]
self.assertTrue(recursive_equal(a, cuarray(a)))
def deref_type_check(self, np_type):
a = np.array([1], dtype=np_type)
b = cuarray(a)
self.assertTrue(type(a[0]) == type(b[0]))
self.assertTrue(a[0] == b[0])
def testInt32(self):
self.deref_type_check(np.int32)
def testInt64(self):
self.deref_type_check(np.int64)
def testFloat32(self):
self.deref_type_check(np.float32)
def testFloat64(self):
self.deref_type_check(np.float64)
def testBool(self):
self.deref_type_check(np.bool)
def testStr(self):
a = [[[1,2], [3,4,5]],
[[6,7,8,9], [10,11,12,13,14],
[15,16,17,18,19,20]]]
self.assertEqual(str(a), str(cuarray(a)))
def testUnequalLength(self):
a = [1,2,3]
b = [1,2,3,4]
self.assertFalse(recursive_equal(a, cuarray(b)))
self.assertFalse(recursive_equal(b, cuarray(a)))
def testUnequalContent(self):
a = [1,2,3]
b = [3,2,1]
self.assertFalse(recursive_equal(a, cuarray(b)))
def testUnequalNested(self):
a = [[1,2],[3,4,5]]
b = [[1,2],[3,4,5,6]]
self.assertFalse(recursive_equal(a, cuarray(b)))
def testUnequalTriplyNested(self):
a = [[[1,2], [3,4,5]],
[[6,7,8,9], [10,11,12,13,14],
[15,16,17,18,19,20]]]
b = [[[1,2], [3,4,5]],
[[6,7,8,9,10], [10,11,12,13,14],
[15,16,17,18,19,20]]]
self.assertFalse(recursive_equal(a, cuarray(b)))
if __name__ == '__main__':
unittest.main()
|
en
| 0.842436
|
# # Copyright 2012 NVIDIA Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # #
| 2.254641
| 2
|