hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
acf796a89fa0259c1dee3144423a91d1cc364eb6 | 445 | py | Python | fastapi/__init__.py | albertfreist/fastapi | c278139de797d7c57474ecddf84d2c3cbe8cf03c | [
"MIT"
] | 4 | 2020-01-16T09:12:40.000Z | 2020-01-16T09:12:50.000Z | fastapi/__init__.py | albertfreist/fastapi | c278139de797d7c57474ecddf84d2c3cbe8cf03c | [
"MIT"
] | 1 | 2021-03-25T23:19:19.000Z | 2021-03-25T23:19:19.000Z | fastapi/__init__.py | albertfreist/fastapi | c278139de797d7c57474ecddf84d2c3cbe8cf03c | [
"MIT"
] | null | null | null | """FastAPI framework, high performance, easy to learn, fast to code, ready for production"""
__version__ = "0.46.0"
from starlette.background import BackgroundTasks
from .applications import FastAPI
from .datastructures import UploadFile
from .exceptions import HTTPException
from .param_functions import (
Body,
Cookie,
Depends,
File,
Form,
Header,
Path,
Query,
Security,
)
from .routing import APIRouter
| 20.227273 | 92 | 0.72809 |
acf797cda9e2b4d17741cb95dd326484ecc3f255 | 6,940 | py | Python | query_tracer/modules/sql.py | morlandi/django-query-tracer | 18a750678c8cc2e7498d58cbed254b1c1522bf52 | [
"MIT"
] | 4 | 2017-11-03T09:31:55.000Z | 2021-07-14T02:40:03.000Z | query_tracer/modules/sql.py | morlandi/django-query-tracer | 18a750678c8cc2e7498d58cbed254b1c1522bf52 | [
"MIT"
] | 2 | 2019-04-04T12:55:48.000Z | 2020-09-27T18:50:28.000Z | query_tracer/modules/sql.py | morlandi/django-query-tracer | 18a750678c8cc2e7498d58cbed254b1c1522bf52 | [
"MIT"
] | 1 | 2019-04-04T12:50:18.000Z | 2019-04-04T12:50:18.000Z | """
Based on initial work from django-debug-toolbar
"""
import re
from datetime import datetime
try:
from django.db import connections
except ImportError:
# Django version < 1.2
from django.db import connection
connections = {'default': connection}
try:
from django.db.backends import utils # renamed in django 1.7
except ImportError:
from django.db.backends import util as utils # removed in django 1.9
from django.conf import settings as django_settings
#from django.template import Node
from query_tracer.modules import QueryTracerModule
#from query_tracer.utils.stack import tidy_stacktrace, get_template_info
from query_tracer.utils.time import ms_from_timedelta
from query_tracer import settings
try:
import sqlparse
except ImportError:
class sqlparse:
@staticmethod
def format(text, *args, **kwargs):
return text
_sql_fields_re = re.compile(r'SELECT .*? FROM')
_sql_aggregates_re = re.compile(r'SELECT .*?(COUNT|SUM|AVERAGE|MIN|MAX).*? FROM')
def truncate_sql(sql, aggregates=True):
if not aggregates and _sql_aggregates_re.match(sql):
return sql
return _sql_fields_re.sub('SELECT ... FROM', sql)
# # TODO:This should be set in the toolbar loader as a default and panels should
# # get a copy of the toolbar object with access to its config dictionary
# SQL_WARNING_THRESHOLD = getattr(settings, 'QUERYTRACER_CONFIG', {}) \
# .get('SQL_WARNING_THRESHOLD', 500)
try:
from debug_toolbar.panels.sql import DatabaseStatTracker
debug_toolbar = True
except ImportError:
debug_toolbar = False
import django
#version = float('.'.join([str(x) for x in django.VERSION[:2]]))
#if version >= 1.6:
# Version comparison fix required after Django 1.10
version = django.VERSION[0] * 100 + django.VERSION[1]
if version >= 106:
DatabaseStatTracker = utils.CursorWrapper
else:
DatabaseStatTracker = utils.CursorDebugWrapper
class DatabaseStatTracker(DatabaseStatTracker):
"""
Replacement for CursorDebugWrapper which outputs information as it happens.
"""
logger = None
queries = []
def execute(self, sql, params=()):
if params is None:
formatted_sql = sql
elif isinstance(params, dict):
formatted_sql = sql % params
else:
formatted_sql = sql % tuple(params)
if self.logger:
message = formatted_sql
if settings.QUERYTRACER_FILTER_OUT_SQL:
if any(filter_.search(message) for filter_ in settings.QUERYTRACER_FILTER_OUT_SQL):
message = None
if settings.QUERYTRACER_FILTER_IN_SQL:
if not all(filter_.search(message) for filter_ in settings.QUERYTRACER_FILTER_IN_SQL):
message = None
if message is not None:
if settings.QUERYTRACER_TRUNCATE_SQL:
message = truncate_sql(message, aggregates=settings.QUERYTRACER_TRUNCATE_AGGREGATES)
message = sqlparse.format(message, reindent=True, keyword_case='upper')
self.logger.debug(message)
start = datetime.now()
try:
return super(DatabaseStatTracker, self).execute(sql, params)
finally:
stop = datetime.now()
duration = ms_from_timedelta(stop - start)
if self.logger and (not settings.QUERYTRACER_SQL_MIN_DURATION
or duration > settings.QUERYTRACER_SQL_MIN_DURATION):
if self.cursor.rowcount >= 0 and message is not None:
self.logger.debug('Found %s matching rows', self.cursor.rowcount, duration=duration)
if not (debug_toolbar or django_settings.DEBUG):
self.db.queries.append({
'sql': formatted_sql,
'time': duration,
})
else:
self.queries.append({
'sql': formatted_sql,
'time': duration,
})
def executemany(self, sql, param_list):
start = datetime.now()
try:
return super(DatabaseStatTracker, self).executemany(sql, param_list)
finally:
stop = datetime.now()
duration = ms_from_timedelta(stop - start)
if self.logger:
message = sqlparse.format(sql, reindent=True, keyword_case='upper')
message = 'Executed %s times\n' % message
self.logger.debug(message, duration=duration)
self.logger.debug('Found %s matching rows', self.cursor.rowcount, duration=duration, id='query')
if not (debug_toolbar or settings.DEBUG):
self.db.queries.append({
'sql': '%s times: %s' % (len(param_list), sql),
'time': duration,
})
else:
self.queries.append({
'sql': '%s times: %s' % (len(param_list), sql),
'time': duration,
})
class SQLRealTimeModule(QueryTracerModule):
"""
Outputs SQL queries as they happen.
"""
logger_name = 'sql'
def process_init(self, request):
if not issubclass(utils.CursorDebugWrapper, DatabaseStatTracker):
self.old_cursor = utils.CursorDebugWrapper
utils.CursorDebugWrapper = DatabaseStatTracker
utils.CursorDebugWrapper.queries = []
DatabaseStatTracker.logger = self.logger
def process_complete(self, request):
if issubclass(utils.CursorDebugWrapper, DatabaseStatTracker):
# Mimic SQLSummaryModule
queries = utils.CursorDebugWrapper.queries
num_queries = len(queries)
if num_queries:
unique = set([s['sql'] for s in queries])
prompt = 'SQL Summary %s ' % ('.' * 58)
self.logger.info(prompt + '[%(calls)s queries with %(dupes)s duplicates]' % dict(
calls=num_queries,
dupes=num_queries - len(unique),
#), duration=sum(float(c.get('time', 0)) for c in queries) * 1000)
))
utils.CursorDebugWrapper = self.old_cursor
class SQLSummaryModule(QueryTracerModule):
"""
Outputs a summary SQL queries.
"""
logger_name = 'sql'
def process_complete(self, request):
queries = [
q for alias in connections
for q in connections[alias].queries
]
num_queries = len(queries)
if num_queries:
unique = set([s['sql'] for s in queries])
self.logger.info('%(calls)s queries with %(dupes)s duplicates' % dict(
calls=num_queries,
dupes=num_queries - len(unique),
), duration=sum(float(c.get('time', 0)) for c in queries) * 1000)
| 34.356436 | 112 | 0.606772 |
acf7988d559b2aa481b253dbd5e55f0b314722ec | 930 | py | Python | application/migrations/0012_auto_20210416_1711.py | geeksforsocialchange/imok | efb7189c13c398dbd5d4301ca496a2e583b0f5b7 | [
"MIT"
] | 6 | 2021-05-12T08:40:36.000Z | 2022-01-25T08:31:06.000Z | application/migrations/0012_auto_20210416_1711.py | geeksforsocialchange/imok | efb7189c13c398dbd5d4301ca496a2e583b0f5b7 | [
"MIT"
] | 14 | 2021-05-12T09:03:08.000Z | 2021-06-10T13:18:52.000Z | application/migrations/0012_auto_20210416_1711.py | geeksforsocialchange/imok | efb7189c13c398dbd5d4301ca496a2e583b0f5b7 | [
"MIT"
] | 1 | 2021-05-14T20:54:15.000Z | 2021-05-14T20:54:15.000Z | # Generated by Django 3.2 on 2021-04-16 16:11
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('application', '0011_auto_20210415_1507'),
]
operations = [
migrations.RemoveField(
model_name='checkin',
name='id',
),
migrations.RemoveField(
model_name='telegramgroup',
name='id',
),
migrations.AlterField(
model_name='checkin',
name='member',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='application.member'),
),
migrations.AlterField(
model_name='telegramgroup',
name='title',
field=models.CharField(max_length=255, primary_key=True, serialize=False, unique=True),
),
]
| 28.181818 | 144 | 0.601075 |
acf798cfb4d953a2e536ba986f8b8e7a76bfb57f | 1,163 | py | Python | examples/hardware/dsme/analyze.py | CometOS/CometOS | bfb77e185db023b4a3cce945343636d60a389e7d | [
"BSD-3-Clause"
] | 3 | 2019-01-14T19:08:07.000Z | 2021-01-19T11:57:16.000Z | examples/hardware/dsme/analyze.py | CometOS/CometOS | bfb77e185db023b4a3cce945343636d60a389e7d | [
"BSD-3-Clause"
] | 1 | 2016-11-11T14:35:03.000Z | 2016-11-11T14:35:03.000Z | examples/hardware/dsme/analyze.py | CometOS/CometOS | bfb77e185db023b4a3cce945343636d60a389e7d | [
"BSD-3-Clause"
] | 2 | 2020-09-14T08:27:09.000Z | 2020-10-19T14:41:48.000Z | #!/usr/bin/env python
import re
import sys
nodes = {}
seqs = {}
with open(sys.argv[1]) as log:
for l in log:
m = re.search('!(0x.*)!(0x.*)!(.)!(.)!(.*)',l)
if m:
sender = m.group(1)
receiver = m.group(2)
direction = m.group(3)
dummy = (m.group(4) == "d")
seq = m.group(5)
#print sender, receiver, direction, dummy, seq
if dummy:
continue
if not sender in nodes:
nodes[sender] = {}
nodes[sender]["sent"] = 0
nodes[sender]["duplicates"] = 0
nodes[sender]["received"] = 0
seqs[sender] = set()
if direction == "T":
nodes[sender]["sent"] += 1
elif direction == "R":
if seq in seqs[sender]:
nodes[sender]["duplicates"] += 1
else:
nodes[sender]["received"] += 1
seqs[sender].add(seq);
for n in nodes:
nodes[n]["PRR"] = nodes[n]["received"]/float(nodes[n]["sent"])
print nodes
| 25.844444 | 70 | 0.417885 |
acf7995a185a8556d7365f2215aad6736cd7a12e | 791 | py | Python | python3/yaml/yaml1.py | jtraver/dev | c7cd2181594510a8fa27e7325566ed2d79371624 | [
"MIT"
] | null | null | null | python3/yaml/yaml1.py | jtraver/dev | c7cd2181594510a8fa27e7325566ed2d79371624 | [
"MIT"
] | null | null | null | python3/yaml/yaml1.py | jtraver/dev | c7cd2181594510a8fa27e7325566ed2d79371624 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
#!/usr/bin/python
import yaml
print("yaml = %s" % str(yaml))
print("yaml = %s" % dir(yaml))
# http://stackoverflow.com/questions/12470665/yaml-writing-data-into-yaml-through-python
tags1 = {
'aerospike-client-c': '3.0.74',
'aerospike-server': '3.3.10',
'aerospike-server-enterprise': '3.3.10',
'aerospike-tools': '3.3.10',
'client': '2.1.40',
'devtools': '1.0.0',
'release': '1.0.11',
'dev': '0.0.1' # for development testing only
}
with open('tags1.yml', 'w') as outfile:
outfile.write(yaml.dump(tags1, default_flow_style=False))
tags2 = yaml.load(file('tags1.yml'), Loader=yaml.FullLoader)
print("tags2 = %s" % str(tags2))
if tags1 == tags2:
print("load and dump worked")
else:
print("load and dump did not work")
| 24.71875 | 88 | 0.633375 |
acf7995c6e4eb9224b0ff8f1276a6dc122d2e657 | 42,901 | py | Python | src/gqrcode/parse.py | atareao/gqrcode | fc5a89c13c4b9d98a3a28a16ddeba1175922f240 | [
"MIT"
] | 7 | 2019-05-25T18:49:35.000Z | 2022-02-10T13:41:37.000Z | src/gqrcode/parse.py | atareao/gqrcode | fc5a89c13c4b9d98a3a28a16ddeba1175922f240 | [
"MIT"
] | null | null | null | src/gqrcode/parse.py | atareao/gqrcode | fc5a89c13c4b9d98a3a28a16ddeba1175922f240 | [
"MIT"
] | 5 | 2017-07-19T17:17:53.000Z | 2020-07-05T07:50:57.000Z | r'''Parse strings using a specification based on the Python format() syntax.
``parse()`` is the opposite of ``format()``
The module is set up to only export ``parse()``, ``search()`` and
``findall()`` when ``import *`` is used:
>>> from parse import *
From there it's a simple thing to parse a string:
>>> parse("It's {}, I love it!", "It's spam, I love it!")
<Result ('spam',) {}>
>>> _[0]
'spam'
Or to search a string for some pattern:
>>> search('Age: {:d}\n', 'Name: Rufus\nAge: 42\nColor: red\n')
<Result (42,) {}>
Or find all the occurrences of some pattern in a string:
>>> ''.join(r.fixed[0] for r in findall(">{}<", "<p>the <b>bold</b> text</p>"))
'the bold text'
If you're going to use the same pattern to match lots of strings you can
compile it once:
>>> from parse import compile
>>> p = compile("It's {}, I love it!")
>>> print(p)
<Parser "It's {}, I love it!">
>>> p.parse("It's spam, I love it!")
<Result ('spam',) {}>
("compile" is not exported for ``import *`` usage as it would override the
built-in ``compile()`` function)
Format Syntax
-------------
A basic version of the `Format String Syntax`_ is supported with anonymous
(fixed-position), named and formatted fields::
{[field name]:[format spec]}
Field names must be a valid Python identifiers, including dotted names;
element indexes imply dictionaries (see below for example).
Numbered fields are also not supported: the result of parsing will include
the parsed fields in the order they are parsed.
The conversion of fields to types other than strings is done based on the
type in the format specification, which mirrors the ``format()`` behaviour.
There are no "!" field conversions like ``format()`` has.
Some simple parse() format string examples:
>>> parse("Bring me a {}", "Bring me a shrubbery")
<Result ('shrubbery',) {}>
>>> r = parse("The {} who say {}", "The knights who say Ni!")
>>> print(r)
<Result ('knights', 'Ni!') {}>
>>> print(r.fixed)
('knights', 'Ni!')
>>> r = parse("Bring out the holy {item}", "Bring out the holy hand grenade")
>>> print(r)
<Result () {'item': 'hand grenade'}>
>>> print(r.named)
{'item': 'hand grenade'}
>>> print(r['item'])
hand grenade
Dotted names and indexes are possible though the application must make
additional sense of the result:
>>> r = parse("Mmm, {food.type}, I love it!", "Mmm, spam, I love it!")
>>> print(r)
<Result () {'food.type': 'spam'}>
>>> print(r.named)
{'food.type': 'spam'}
>>> print(r['food.type'])
spam
>>> r = parse("My quest is {quest[name]}", "My quest is to seek the holy grail!")
>>> print(r)
<Result () {'quest': {'name': 'to seek the holy grail!'}}>
>>> print(r['quest'])
{'name': 'to seek the holy grail!'}
>>> print(r['quest']['name'])
to seek the holy grail!
Format Specification
--------------------
Most often a straight format-less ``{}`` will suffice where a more complex
format specification might have been used.
Most of `format()`'s `Format Specification Mini-Language`_ is supported:
[[fill]align][0][width][.precision][type]
The differences between `parse()` and `format()` are:
- The align operators will cause spaces (or specified fill character) to be
stripped from the parsed value. The width is not enforced; it just indicates
there may be whitespace or "0"s to strip.
- Numeric parsing will automatically handle a "0b", "0o" or "0x" prefix.
That is, the "#" format character is handled automatically by d, b, o
and x formats. For "d" any will be accepted, but for the others the correct
prefix must be present if at all.
- Numeric sign is handled automatically.
- The thousands separator is handled automatically if the "n" type is used.
- The types supported are a slightly different mix to the format() types. Some
format() types come directly over: "d", "n", "%", "f", "e", "b", "o" and "x".
In addition some regular expression character group types "D", "w", "W", "s"
and "S" are also available.
- The "e" and "g" types are case-insensitive so there is not need for
the "E" or "G" types.
===== =========================================== ========
Type Characters Matched Output
===== =========================================== ========
w Letters and underscore str
W Non-letter and underscore str
s Whitespace str
S Non-whitespace str
d Digits (effectively integer numbers) int
D Non-digit str
n Numbers with thousands separators (, or .) int
% Percentage (converted to value/100.0) float
f Fixed-point numbers float
e Floating-point numbers with exponent float
e.g. 1.1e-10, NAN (all case insensitive)
g General number format (either d, f or e) float
b Binary numbers int
o Octal numbers int
x Hexadecimal numbers (lower and upper case) int
ti ISO 8601 format date/time datetime
e.g. 1972-01-20T10:21:36Z ("T" and "Z"
optional)
te RFC2822 e-mail format date/time datetime
e.g. Mon, 20 Jan 1972 10:21:36 +1000
tg Global (day/month) format date/time datetime
e.g. 20/1/1972 10:21:36 AM +1:00
ta US (month/day) format date/time datetime
e.g. 1/20/1972 10:21:36 PM +10:30
tc ctime() format date/time datetime
e.g. Sun Sep 16 01:03:52 1973
th HTTP log format date/time datetime
e.g. 21/Nov/2011:00:07:11 +0000
ts Linux system log format date/time datetime
e.g. Nov 9 03:37:44
tt Time time
e.g. 10:21:36 PM -5:30
===== =========================================== ========
Some examples of typed parsing with ``None`` returned if the typing
does not match:
>>> parse('Our {:d} {:w} are...', 'Our 3 weapons are...')
<Result (3, 'weapons') {}>
>>> parse('Our {:d} {:w} are...', 'Our three weapons are...')
>>> parse('Meet at {:tg}', 'Meet at 1/2/2011 11:00 PM')
<Result (datetime.datetime(2011, 2, 1, 23, 0),) {}>
And messing about with alignment:
>>> parse('with {:>} herring', 'with a herring')
<Result ('a',) {}>
>>> parse('spam {:^} spam', 'spam lovely spam')
<Result ('lovely',) {}>
Note that the "center" alignment does not test to make sure the value is
centered - it just strips leading and trailing whitespace.
Some notes for the date and time types:
- the presence of the time part is optional (including ISO 8601, starting
at the "T"). A full datetime object will always be returned; the time
will be set to 00:00:00. You may also specify a time without seconds.
- when a seconds amount is present in the input fractions will be parsed
to give microseconds.
- except in ISO 8601 the day and month digits may be 0-padded.
- the date separator for the tg and ta formats may be "-" or "/".
- named months (abbreviations or full names) may be used in the ta and tg
formats in place of numeric months.
- as per RFC 2822 the e-mail format may omit the day (and comma), and the
seconds but nothing else.
- hours greater than 12 will be happily accepted.
- the AM/PM are optional, and if PM is found then 12 hours will be added
to the datetime object's hours amount - even if the hour is greater
than 12 (for consistency.)
- in ISO 8601 the "Z" (UTC) timezone part may be a numeric offset
- timezones are specified as "+HH:MM" or "-HH:MM". The hour may be one or two
digits (0-padded is OK.) Also, the ":" is optional.
- the timezone is optional in all except the e-mail format (it defaults to
UTC.)
- named timezones are not handled yet.
Note: attempting to match too many datetime fields in a single parse() will
currently result in a resource allocation issue. A TooManyFields exception
will be raised in this instance. The current limit is about 15. It is hoped
that this limit will be removed one day.
.. _`Format String Syntax`:
http://docs.python.org/library/string.html#format-string-syntax
.. _`Format Specification Mini-Language`:
http://docs.python.org/library/string.html#format-specification-mini-language
Result and Match Objects
------------------------
The result of a ``parse()`` and ``search()`` operation is either ``None`` (no match), a
``Result`` instance or a ``Match`` instance if ``evaluate_result`` is False.
The ``Result`` instance has three attributes:
fixed
A tuple of the fixed-position, anonymous fields extracted from the input.
named
A dictionary of the named fields extracted from the input.
spans
A dictionary mapping the names and fixed position indices matched to a
2-tuple slice range of where the match occurred in the input.
The span does not include any stripped padding (alignment or width).
The ``Match`` instance has one method:
evaluate_result()
Generates and returns a ``Result`` instance for this ``Match`` object.
Custom Type Conversions
-----------------------
If you wish to have matched fields automatically converted to your own type you
may pass in a dictionary of type conversion information to ``parse()`` and
``compile()``.
The converter will be passed the field string matched. Whatever it returns
will be substituted in the ``Result`` instance for that field.
Your custom type conversions may override the builtin types if you supply one
with the same identifier.
>>> def shouty(string):
... return string.upper()
...
>>> parse('{:shouty} world', 'hello world', dict(shouty=shouty))
<Result ('HELLO',) {}>
If the type converter has the optional ``pattern`` attribute, it is used as
regular expression for better pattern matching (instead of the default one).
>>> def parse_number(text):
... return int(text)
>>> parse_number.pattern = r'\d+'
>>> parse('Answer: {number:Number}', 'Answer: 42', dict(Number=parse_number))
<Result () {'number': 42}>
>>> _ = parse('Answer: {:Number}', 'Answer: Alice', dict(Number=parse_number))
>>> assert _ is None, "MISMATCH"
You can also use the ``with_pattern(pattern)`` decorator to add this
information to a type converter function:
>>> from parse import with_pattern
>>> @with_pattern(r'\d+')
... def parse_number(text):
... return int(text)
>>> parse('Answer: {number:Number}', 'Answer: 42', dict(Number=parse_number))
<Result () {'number': 42}>
A more complete example of a custom type might be:
>>> yesno_mapping = {
... "yes": True, "no": False,
... "on": True, "off": False,
... "true": True, "false": False,
... }
>>> @with_pattern(r"|".join(yesno_mapping))
... def parse_yesno(text):
... return yesno_mapping[text.lower()]
----
**Version history (in brief)**:
- 1.8.2 clarify message on invalid format specs (thanks Rick Teachey)
- 1.8.1 ensure bare hexadecimal digits are not matched
- 1.8.0 support manual control over result evaluation (thanks Timo Furrer)
- 1.7.0 parse dict fields (thanks Mark Visser) and adapted to allow
more than 100 re groups in Python 3.5+ (thanks David King)
- 1.6.6 parse Linux system log dates (thanks Alex Cowan)
- 1.6.5 handle precision in float format (thanks Levi Kilcher)
- 1.6.4 handle pipe "|" characters in parse string (thanks Martijn Pieters)
- 1.6.3 handle repeated instances of named fields, fix bug in PM time
overflow
- 1.6.2 fix logging to use local, not root logger (thanks Necku)
- 1.6.1 be more flexible regarding matched ISO datetimes and timezones in
general, fix bug in timezones without ":" and improve docs
- 1.6.0 add support for optional ``pattern`` attribute in user-defined types
(thanks Jens Engel)
- 1.5.3 fix handling of question marks
- 1.5.2 fix type conversion error with dotted names (thanks Sebastian Thiel)
- 1.5.1 implement handling of named datetime fields
- 1.5 add handling of dotted field names (thanks Sebastian Thiel)
- 1.4.1 fix parsing of "0" in int conversion (thanks James Rowe)
- 1.4 add __getitem__ convenience access on Result.
- 1.3.3 fix Python 2.5 setup.py issue.
- 1.3.2 fix Python 3.2 setup.py issue.
- 1.3.1 fix a couple of Python 3.2 compatibility issues.
- 1.3 added search() and findall(); removed compile() from ``import *``
export as it overwrites builtin.
- 1.2 added ability for custom and override type conversions to be
provided; some cleanup
- 1.1.9 to keep things simpler number sign is handled automatically;
significant robustification in the face of edge-case input.
- 1.1.8 allow "d" fields to have number base "0x" etc. prefixes;
fix up some field type interactions after stress-testing the parser;
implement "%" type.
- 1.1.7 Python 3 compatibility tweaks (2.5 to 2.7 and 3.2 are supported).
- 1.1.6 add "e" and "g" field types; removed redundant "h" and "X";
removed need for explicit "#".
- 1.1.5 accept textual dates in more places; Result now holds match span
positions.
- 1.1.4 fixes to some int type conversion; implemented "=" alignment; added
date/time parsing with a variety of formats handled.
- 1.1.3 type conversion is automatic based on specified field types. Also added
"f" and "n" types.
- 1.1.2 refactored, added compile() and limited ``from parse import *``
- 1.1.1 documentation improvements
- 1.1.0 implemented more of the `Format Specification Mini-Language`_
and removed the restriction on mixing fixed-position and named fields
- 1.0.0 initial release
This code is copyright 2012-2017 Richard Jones <richard@python.org>
See the end of the source file for the license of use.
'''
__version__ = '1.8.2'
# yes, I now have two problems
import re
import sys
from datetime import datetime, time, tzinfo, timedelta
from functools import partial
import logging
__all__ = 'parse search findall with_pattern'.split()
log = logging.getLogger(__name__)
def with_pattern(pattern):
"""Attach a regular expression pattern matcher to a custom type converter
function.
This annotates the type converter with the :attr:`pattern` attribute.
EXAMPLE:
>>> import parse
>>> @parse.with_pattern(r"\d+")
... def parse_number(text):
... return int(text)
is equivalent to:
>>> def parse_number(text):
... return int(text)
>>> parse_number.pattern = r"\d+"
:param pattern: regular expression pattern (as text)
:return: wrapped function
"""
def decorator(func):
func.pattern = pattern
return func
return decorator
def int_convert(base):
'''Convert a string to an integer.
The string may start with a sign.
It may be of a base other than 10.
It may also have other non-numeric characters that we can ignore.
'''
CHARS = '0123456789abcdefghijklmnopqrstuvwxyz'
def f(string, match, base=base):
if string[0] == '-':
sign = -1
else:
sign = 1
if string[0] == '0' and len(string) > 1:
if string[1] in 'bB':
base = 2
elif string[1] in 'oO':
base = 8
elif string[1] in 'xX':
base = 16
else:
# just go with the base specifed
pass
chars = CHARS[:base]
string = re.sub('[^%s]' % chars, '', string.lower())
return sign * int(string, base)
return f
def percentage(string, match):
return float(string[:-1]) / 100.
class FixedTzOffset(tzinfo):
"""Fixed offset in minutes east from UTC.
"""
ZERO = timedelta(0)
def __init__(self, offset, name):
self._offset = timedelta(minutes=offset)
self._name = name
def __repr__(self):
return '<%s %s %s>' % (self.__class__.__name__, self._name,
self._offset)
def utcoffset(self, dt):
return self._offset
def tzname(self, dt):
return self._name
def dst(self, dt):
return self.ZERO
def __eq__(self, other):
return self._name == other._name and self._offset == other._offset
MONTHS_MAP = dict(
Jan=1, January=1,
Feb=2, February=2,
Mar=3, March=3,
Apr=4, April=4,
May=5,
Jun=6, June=6,
Jul=7, July=7,
Aug=8, August=8,
Sep=9, September=9,
Oct=10, October=10,
Nov=11, November=11,
Dec=12, December=12
)
DAYS_PAT = '(Mon|Tue|Wed|Thu|Fri|Sat|Sun)'
MONTHS_PAT = '(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)'
ALL_MONTHS_PAT = '(%s)' % '|'.join(MONTHS_MAP)
TIME_PAT = r'(\d{1,2}:\d{1,2}(:\d{1,2}(\.\d+)?)?)'
AM_PAT = r'(\s+[AP]M)'
TZ_PAT = r'(\s+[-+]\d\d?:?\d\d)'
def date_convert(string, match, ymd=None, mdy=None, dmy=None,
d_m_y=None, hms=None, am=None, tz=None, mm=None, dd=None):
'''Convert the incoming string containing some date / time info into a
datetime instance.
'''
groups = match.groups()
time_only = False
if mm and dd:
y=datetime.today().year
m=groups[mm]
d=groups[dd]
elif ymd is not None:
y, m, d = re.split('[-/\s]', groups[ymd])
elif mdy is not None:
m, d, y = re.split('[-/\s]', groups[mdy])
elif dmy is not None:
d, m, y = re.split('[-/\s]', groups[dmy])
elif d_m_y is not None:
d, m, y = d_m_y
d = groups[d]
m = groups[m]
y = groups[y]
else:
time_only = True
H = M = S = u = 0
if hms is not None and groups[hms]:
t = groups[hms].split(':')
if len(t) == 2:
H, M = t
else:
H, M, S = t
if '.' in S:
S, u = S.split('.')
u = int(float('.' + u) * 1000000)
S = int(S)
H = int(H)
M = int(M)
day_incr = False
if am is not None:
am = groups[am]
if am and am.strip() == 'PM':
H += 12
if H > 23:
day_incr = True
H -= 24
if tz is not None:
tz = groups[tz]
if tz == 'Z':
tz = FixedTzOffset(0, 'UTC')
elif tz:
tz = tz.strip()
if tz.isupper():
# TODO use the awesome python TZ module?
pass
else:
sign = tz[0]
if ':' in tz:
tzh, tzm = tz[1:].split(':')
elif len(tz) == 4: # 'snnn'
tzh, tzm = tz[1], tz[2:4]
else:
tzh, tzm = tz[1:3], tz[3:5]
offset = int(tzm) + int(tzh) * 60
if sign == '-':
offset = -offset
tz = FixedTzOffset(offset, tz)
if time_only:
d = time(H, M, S, u, tzinfo=tz)
else:
y = int(y)
if m.isdigit():
m = int(m)
else:
m = MONTHS_MAP[m]
d = int(d)
d = datetime(y, m, d, H, M, S, u, tzinfo=tz)
if day_incr:
d = d + timedelta(days=1)
return d
class TooManyFields(ValueError):
pass
class RepeatedNameError(ValueError):
pass
# note: {} are handled separately
# note: I don't use r'' here because Sublime Text 2 syntax highlight has a fit
REGEX_SAFETY = re.compile('([?\\\\.[\]()*+\^$!\|])')
# allowed field types
ALLOWED_TYPES = set(list('nbox%fegwWdDsS') +
['t' + c for c in 'ieahgcts'])
def extract_format(aformat, extra_types):
'''Pull apart the format [[fill]align][0][width][.precision][type]
'''
fill = align = None
if aformat[0] in '<>=^':
align = aformat[0]
aformat = aformat[1:]
elif len(aformat) > 1 and aformat[1] in '<>=^':
fill = aformat[0]
align = aformat[1]
aformat = aformat[2:]
zero = False
if aformat and aformat[0] == '0':
zero = True
aformat = aformat[1:]
width = ''
while aformat:
if not aformat[0].isdigit():
break
width += aformat[0]
aformat = aformat[1:]
if aformat.startswith('.'):
# Precision isn't needed but we need to capture it so that
# the ValueError isn't raised.
aformat = aformat[1:] # drop the '.'
precision = ''
while aformat:
if not aformat[0].isdigit():
break
precision += aformat[0]
aformat = aformat[1:]
# the rest is the type, if present
if aformat and aformat not in ALLOWED_TYPES and aformat not in extra_types:
raise ValueError('format spec %r not recognised' % aformat)
return locals()
PARSE_RE = re.compile(r"""({{|}}|{\w*(?:(?:\.\w+)|(?:\[[^\]]+\]))*(?::[^}]+)?})""")
class Parser(object):
'''Encapsulate a format string that may be used to parse other strings.
'''
def __init__(self, aformat, extra_types={}):
# a mapping of a name as in {hello.world} to a regex-group compatible
# name, like hello__world Its used to prevent the transformation of
# name-to-group and group to name to fail subtly, such as in:
# hello_.world-> hello___world->hello._world
self._group_to_name_map = {}
# also store the original field name to group name mapping to allow
# multiple instances of a name in the format string
self._name_to_group_map = {}
# and to sanity check the repeated instances store away the first
# field type specification for the named field
self._name_types = {}
self._format = aformat
self._extra_types = extra_types
self._fixed_fields = []
self._named_fields = []
self._group_index = 0
self._type_conversions = {}
self._expression = self._generate_expression()
self.__search_re = None
self.__match_re = None
log.debug('format %r -> %r' % (aformat, self._expression))
def __repr__(self):
if len(self._format) > 20:
return '<%s %r>' % (self.__class__.__name__,
self._format[:17] + '...')
return '<%s %r>' % (self.__class__.__name__, self._format)
@property
def _search_re(self):
if self.__search_re is None:
try:
self.__search_re = re.compile(self._expression,
re.IGNORECASE | re.DOTALL)
except AssertionError:
# access error through sys to keep py3k and backward compat
e = str(sys.exc_info()[1])
if e.endswith('this version only supports 100 named groups'):
raise TooManyFields('sorry, you are attempting to parse '
'too many complex fields')
return self.__search_re
@property
def _match_re(self):
if self.__match_re is None:
expression = '^%s$' % self._expression
try:
self.__match_re = re.compile(expression,
re.IGNORECASE | re.DOTALL)
except AssertionError:
# access error through sys to keep py3k and backward compat
e = str(sys.exc_info()[1])
if e.endswith('this version only supports 100 named groups'):
raise TooManyFields('sorry, you are attempting to parse '
'too many complex fields')
except re.error:
raise NotImplementedError("Group names (e.g. (?P<name>) can "
"cause failure, as they are not escaped properly: '%s'" %
expression)
return self.__match_re
def parse(self, string, evaluate_result=True):
'''Match my format to the string exactly.
Return a Result or Match instance or None if there's no match.
'''
m = self._match_re.match(string)
if m is None:
return None
if evaluate_result:
return self.evaluate_result(m)
return Match(self, m)
def search(self, string, pos=0, endpos=None, evaluate_result=True):
'''Search the string for my format.
Optionally start the search at "pos" character index and limit the
search to a maximum index of endpos - equivalent to
search(string[:endpos]).
If the ``evaluate_result`` argument is set to ``False`` a
Match instance is returned instead of the actual Result instance.
Return either a Result instance or None if there's no match.
'''
if endpos is None:
endpos = len(string)
m = self._search_re.search(string, pos, endpos)
if m is None:
return None
if evaluate_result:
return self.evaluate_result(m)
return Match(self, m)
def findall(self, string, pos=0, endpos=None, extra_types={}, evaluate_result=True):
'''Search "string" for the all occurrances of "format".
Optionally start the search at "pos" character index and limit the
search to a maximum index of endpos - equivalent to
search(string[:endpos]).
Returns an iterator that holds Result or Match instances for each format match
found.
'''
if endpos is None:
endpos = len(string)
return ResultIterator(self, string, pos, endpos, evaluate_result=evaluate_result)
def _expand_named_fields(self, named_fields):
result = {}
for field, value in named_fields.items():
# split 'aaa[bbb][ccc]...' into 'aaa' and '[bbb][ccc]...'
basename, subkeys = re.match(r'([^\[]+)(.*)', field).groups()
# create nested dictionaries {'aaa': {'bbb': {'ccc': ...}}}
d = result
k = basename
if subkeys:
for subkey in re.findall(r'\[[^\]]+\]', subkeys):
d = d.setdefault(k,{})
k = subkey[1:-1]
# assign the value to the last key
d[k] = value
return result
def evaluate_result(self, m):
'''Generate a Result instance for the given regex match object'''
# ok, figure the fixed fields we've pulled out and type convert them
fixed_fields = list(m.groups())
for n in self._fixed_fields:
if n in self._type_conversions:
fixed_fields[n] = self._type_conversions[n](fixed_fields[n], m)
fixed_fields = tuple(fixed_fields[n] for n in self._fixed_fields)
# grab the named fields, converting where requested
groupdict = m.groupdict()
named_fields = {}
name_map = {}
for k in self._named_fields:
korig = self._group_to_name_map[k]
name_map[korig] = k
if k in self._type_conversions:
value = self._type_conversions[k](groupdict[k], m)
else:
value = groupdict[k]
named_fields[korig] = value
# now figure the match spans
spans = dict((n, m.span(name_map[n])) for n in named_fields)
spans.update((i, m.span(n + 1))
for i, n in enumerate(self._fixed_fields))
# and that's our result
return Result(fixed_fields, self._expand_named_fields(named_fields), spans)
def _regex_replace(self, match):
return '\\' + match.group(1)
def _generate_expression(self):
# turn my _format attribute into the _expression attribute
e = []
for part in PARSE_RE.split(self._format):
if not part:
continue
elif part == '{{':
e.append(r'\{')
elif part == '}}':
e.append(r'\}')
elif part[0] == '{':
# this will be a braces-delimited field to handle
e.append(self._handle_field(part))
else:
# just some text to match
e.append(REGEX_SAFETY.sub(self._regex_replace, part))
return ''.join(e)
def _to_group_name(self, field):
# return a version of field which can be used as capture group, even
# though it might contain '.'
group = field.replace('.', '_').replace('[', '_').replace(']', '_')
# make sure we don't collide ("a.b" colliding with "a_b")
n = 1
while group in self._group_to_name_map:
n += 1
if '.' in field:
group = field.replace('.', '_' * n)
elif '_' in field:
group = field.replace('_', '_' * n)
else:
raise KeyError('duplicated group name %r' % (field, ))
# save off the mapping
self._group_to_name_map[group] = field
self._name_to_group_map[field] = group
return group
def _handle_field(self, field):
# first: lose the braces
field = field[1:-1]
# now figure whether this is an anonymous or named field, and whether
# there's any format specification
aformat = ''
if field and field[0].isalpha():
if ':' in field:
name, aformat = field.split(':')
else:
name = field
if name in self._name_to_group_map:
if self._name_types[name] != aformat:
raise RepeatedNameError('field type %r for field "%s" '
'does not match previous seen type %r' % (aformat,
name, self._name_types[name]))
group = self._name_to_group_map[name]
# match previously-seen value
return '(?P=%s)' % group
else:
group = self._to_group_name(name)
self._name_types[name] = aformat
self._named_fields.append(group)
# this will become a group, which must not contain dots
wrap = '(?P<%s>%%s)' % group
else:
self._fixed_fields.append(self._group_index)
wrap = '(%s)'
if ':' in field:
aformat = field[1:]
group = self._group_index
# simplest case: no type specifier ({} or {name})
if not aformat:
self._group_index += 1
return wrap % '.+?'
# decode the format specification
aformat = extract_format(aformat, self._extra_types)
# figure type conversions, if any
atype = aformat['type']
is_numeric = atype and atype in 'n%fegdobh'
if atype in self._extra_types:
type_converter = self._extra_types[atype]
s = getattr(type_converter, 'pattern', r'.+?')
def f(string, m):
return type_converter(string)
self._type_conversions[group] = f
elif atype == 'n':
s = '\d{1,3}([,.]\d{3})*'
self._group_index += 1
self._type_conversions[group] = int_convert(10)
elif atype == 'b':
s = '(0[bB])?[01]+'
self._type_conversions[group] = int_convert(2)
self._group_index += 1
elif atype == 'o':
s = '(0[oO])?[0-7]+'
self._type_conversions[group] = int_convert(8)
self._group_index += 1
elif atype == 'x':
s = '(0[xX])?[0-9a-fA-F]+'
self._type_conversions[group] = int_convert(16)
self._group_index += 1
elif atype == '%':
s = r'\d+(\.\d+)?%'
self._group_index += 1
self._type_conversions[group] = percentage
elif atype == 'f':
s = r'\d+\.\d+'
self._type_conversions[group] = lambda s, m: float(s)
elif atype == 'e':
s = r'\d+\.\d+[eE][-+]?\d+|nan|NAN|[-+]?inf|[-+]?INF'
self._type_conversions[group] = lambda s, m: float(s)
elif atype == 'g':
s = r'\d+(\.\d+)?([eE][-+]?\d+)?|nan|NAN|[-+]?inf|[-+]?INF'
self._group_index += 2
self._type_conversions[group] = lambda s, m: float(s)
elif atype == 'd':
s = r'\d+|0[xX][0-9a-fA-F]+|\d+|0[bB][01]+|0[oO][0-7]+'
self._type_conversions[group] = int_convert(10)
elif atype == 'ti':
s = r'(\d{4}-\d\d-\d\d)((\s+|T)%s)?(Z|\s*[-+]\d\d:?\d\d)?' % \
TIME_PAT
n = self._group_index
self._type_conversions[group] = partial(date_convert, ymd=n + 1,
hms=n + 4, tz=n + 7)
self._group_index += 7
elif atype == 'tg':
s = r'(\d{1,2}[-/](\d{1,2}|%s)[-/]\d{4})(\s+%s)?%s?%s?' % (
ALL_MONTHS_PAT, TIME_PAT, AM_PAT, TZ_PAT)
n = self._group_index
self._type_conversions[group] = partial(date_convert, dmy=n + 1,
hms=n + 5, am=n + 8, tz=n + 9)
self._group_index += 9
elif atype == 'ta':
s = r'((\d{1,2}|%s)[-/]\d{1,2}[-/]\d{4})(\s+%s)?%s?%s?' % (
ALL_MONTHS_PAT, TIME_PAT, AM_PAT, TZ_PAT)
n = self._group_index
self._type_conversions[group] = partial(date_convert, mdy=n + 1,
hms=n + 5, am=n + 8, tz=n + 9)
self._group_index += 9
elif atype == 'te':
# this will allow microseconds through if they're present, but meh
s = r'(%s,\s+)?(\d{1,2}\s+%s\s+\d{4})\s+%s%s' % (DAYS_PAT,
MONTHS_PAT, TIME_PAT, TZ_PAT)
n = self._group_index
self._type_conversions[group] = partial(date_convert, dmy=n + 3,
hms=n + 5, tz=n + 8)
self._group_index += 8
elif atype == 'th':
# slight flexibility here from the stock Apache format
s = r'(\d{1,2}[-/]%s[-/]\d{4}):%s%s' % (MONTHS_PAT, TIME_PAT,
TZ_PAT)
n = self._group_index
self._type_conversions[group] = partial(date_convert, dmy=n + 1,
hms=n + 3, tz=n + 6)
self._group_index += 6
elif atype == 'tc':
s = r'(%s)\s+%s\s+(\d{1,2})\s+%s\s+(\d{4})' % (
DAYS_PAT, MONTHS_PAT, TIME_PAT)
n = self._group_index
self._type_conversions[group] = partial(date_convert,
d_m_y=(n + 4, n + 3, n + 8), hms=n + 5)
self._group_index += 8
elif atype == 'tt':
s = r'%s?%s?%s?' % (TIME_PAT, AM_PAT, TZ_PAT)
n = self._group_index
self._type_conversions[group] = partial(date_convert, hms=n + 1,
am=n + 4, tz=n + 5)
self._group_index += 5
elif atype == 'ts':
s = r'%s(\s+)(\d+)(\s+)(\d{1,2}:\d{1,2}:\d{1,2})?' % (MONTHS_PAT)
n = self._group_index
self._type_conversions[group] = partial(date_convert, mm=n+1, dd=n+3,
hms=n + 5)
self._group_index += 5
elif atype:
s = r'\%s+' % atype
else:
s = '.+?'
align = aformat['align']
fill = aformat['fill']
# handle some numeric-specific things like fill and sign
if is_numeric:
# prefix with something (align "=" trumps zero)
if align == '=':
# special case - align "=" acts like the zero above but with
# configurable fill defaulting to "0"
if not fill:
fill = '0'
s = '%s*' % fill + s
elif aformat['zero']:
s = '0*' + s
# allow numbers to be prefixed with a sign
s = r'[-+ ]?' + s
if not fill:
fill = ' '
# Place into a group now - this captures the value we want to keep.
# Everything else from now is just padding to be stripped off
if wrap:
s = wrap % s
self._group_index += 1
if aformat['width']:
# all we really care about is that if the format originally
# specified a width then there will probably be padding - without
# an explicit alignment that'll mean right alignment with spaces
# padding
if not align:
align = '>'
if fill in '.\+?*[](){}^$':
fill = '\\' + fill
# align "=" has been handled
if align == '<':
s = '%s%s*' % (s, fill)
elif align == '>':
s = '%s*%s' % (fill, s)
elif align == '^':
s = '%s*%s%s*' % (fill, s, fill)
return s
class Result(object):
'''The result of a parse() or search().
Fixed results may be looked up using result[index]. Named results may be
looked up using result['name'].
'''
def __init__(self, fixed, named, spans):
self.fixed = fixed
self.named = named
self.spans = spans
def __getitem__(self, item):
if isinstance(item, int):
return self.fixed[item]
return self.named[item]
def __repr__(self):
return '<%s %r %r>' % (self.__class__.__name__, self.fixed,
self.named)
class Match(object):
'''The result of a parse() or search() if no results are generated.
This class is only used to expose internal used regex match objects
to the user and use them for external Parser.evaluate_result calls.
'''
def __init__(self, parser, match):
self.parser = parser
self.match = match
def evaluate_result(self):
'''Generate results for this Match'''
return self.parser.evaluate_result(self.match)
class ResultIterator(object):
'''The result of a findall() operation.
Each element is a Result instance.
'''
def __init__(self, parser, string, pos, endpos, evaluate_result=True):
self.parser = parser
self.string = string
self.pos = pos
self.endpos = endpos
self.evaluate_result = evaluate_result
def __iter__(self):
return self
def __next__(self):
m = self.parser._search_re.search(self.string, self.pos, self.endpos)
if m is None:
raise StopIteration()
self.pos = m.end()
if self.evaluate_result:
return self.parser.evaluate_result(m)
return Match(self.parser, m)
# pre-py3k compat
next = __next__
def parse(aformat, string, extra_types={}, evaluate_result=True):
'''Using "format" attempt to pull values from "string".
The format must match the string contents exactly. If the value
you're looking for is instead just a part of the string use
search().
If ``evaluate_result`` is True the return value will be an Result instance with two attributes:
.fixed - tuple of fixed-position values from the string
.named - dict of named values from the string
If ``evaluate_result`` is False the return value will be a Match instance with one method:
.evaluate_result() - This will return a Result instance like you would get
with ``evaluate_result`` set to True
If the format is invalid a ValueError will be raised.
See the module documentation for the use of "extra_types".
In the case there is no match parse() will return None.
'''
return Parser(aformat, extra_types=extra_types).parse(string, evaluate_result=evaluate_result)
def search(aformat, string, pos=0, endpos=None, extra_types={}, evaluate_result=True):
'''Search "string" for the first occurance of "format".
The format may occur anywhere within the string. If
instead you wish for the format to exactly match the string
use parse().
Optionally start the search at "pos" character index and limit the search
to a maximum index of endpos - equivalent to search(string[:endpos]).
If ``evaluate_result`` is True the return value will be an Result instance with two attributes:
.fixed - tuple of fixed-position values from the string
.named - dict of named values from the string
If ``evaluate_result`` is False the return value will be a Match instance with one method:
.evaluate_result() - This will return a Result instance like you would get
with ``evaluate_result`` set to True
If the format is invalid a ValueError will be raised.
See the module documentation for the use of "extra_types".
In the case there is no match parse() will return None.
'''
return Parser(aformat, extra_types=extra_types).search(string, pos, endpos, evaluate_result=evaluate_result)
def findall(aformat, string, pos=0, endpos=None, extra_types={}, evaluate_result=True):
'''Search "string" for the all occurrances of "format".
You will be returned an iterator that holds Result instances
for each format match found.
Optionally start the search at "pos" character index and limit the search
to a maximum index of endpos - equivalent to search(string[:endpos]).
If ``evaluate_result`` is True each returned Result instance has two attributes:
.fixed - tuple of fixed-position values from the string
.named - dict of named values from the string
If ``evaluate_result`` is False each returned value is a Match instance with one method:
.evaluate_result() - This will return a Result instance like you would get
with ``evaluate_result`` set to True
If the aformat is invalid a ValueError will be raised.
See the module documentation for the use of "extra_types".
'''
return Parser(aformat, extra_types=extra_types).findall(string, pos, endpos, evaluate_result=evaluate_result)
def compile(aformat, extra_types={}):
'''Create a Parser instance to parse "format".
The resultant Parser has a method .parse(string) which
behaves in the same manner as parse(format, string).
Use this function if you intend to parse many strings
with the same format.
See the module documentation for the use of "extra_types".
Returns a Parser instance.
'''
return Parser(aformat, extra_types=extra_types)
# Copyright (c) 2012-2013 Richard Jones <richard@python.org>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# vim: set filetype=python ts=4 sw=4 et si tw=75
| 35.514073 | 113 | 0.594205 |
acf799d9f7613d628b55d2283aa732ada0533721 | 32,896 | py | Python | ivy/functional/ivy/general.py | rush2406/ivy | 4ec75e0d3ff861116711d0496c96a060440b668e | [
"Apache-2.0"
] | null | null | null | ivy/functional/ivy/general.py | rush2406/ivy | 4ec75e0d3ff861116711d0496c96a060440b668e | [
"Apache-2.0"
] | null | null | null | ivy/functional/ivy/general.py | rush2406/ivy | 4ec75e0d3ff861116711d0496c96a060440b668e | [
"Apache-2.0"
] | null | null | null | """
Collection of general Ivy functions.
"""
# global
import gc
import math
import einops
import inspect
import numpy as np
from numbers import Number
from typing import Callable, Any, Union, List, Tuple, Dict, Iterable, Optional
# local
import ivy
from ivy.functional.ivy.device import dev
from ivy.framework_handler import current_framework as _cur_framework
FN_CACHE = dict()
INF = float('inf')
TIMEOUT = 15.0
TMP_DIR = '/tmp'
def get_referrers_recursive(item, depth=0, max_depth=None, seen_set=None, local_set=None):
seen_set = ivy.default(seen_set, set())
local_set = ivy.default(local_set, set())
ret_cont = ivy.Container(
repr=str(item).replace(' ', ''), alphabetical_keys=False, keyword_color_dict={'repr': 'magenta'})
referrers = [ref for ref in gc.get_referrers(item) if
not (isinstance(ref, dict) and
min([k in ref for k in ['depth', 'max_depth', 'seen_set', 'local_set']]))]
local_set.add(str(id(referrers)))
for ref in referrers:
ref_id = str(id(ref))
if ref_id in local_set or hasattr(ref, 'cell_contents'):
continue
seen = ref_id in seen_set
seen_set.add(ref_id)
refs_rec = lambda: get_referrers_recursive(ref, depth + 1, max_depth, seen_set, local_set)
this_repr = 'tracked' if seen else str(ref).replace(' ', '')
if not seen and (not max_depth or depth < max_depth):
val = ivy.Container(
repr=this_repr, alphabetical_keys=False, keyword_color_dict={'repr': 'magenta'})
refs = refs_rec()
for k, v in refs.items():
val[k] = v
else:
val = this_repr
ret_cont[str(ref_id)] = val
return ret_cont
def is_native_array(x: Any, exclusive: bool = False)\
-> bool:
"""
Determines whether the input x is a Native Array.
:param x: The input to check
:type x: any
:param exclusive: Whether to check if the data type is exclusively an array, rather than a variable or traced array.
:type exclusive: bool, optional
:return: Boolean, whether or not x is an array.
"""
try:
return _cur_framework(x).is_native_array(x, exclusive)
except ValueError:
return False
def is_ivy_array(x: Any, exclusive: bool = False)\
-> bool:
"""
Determines whether the input x is an Ivy Array.
:param x: The input to check
:type x: any
:param exclusive: Whether to check if the data type is exclusively an array, rather than a variable or traced array.
:type exclusive: bool, optional
:return: Boolean, whether or not x is an array.
"""
return isinstance(x, ivy.Array) and ivy.is_native_array(x.data, exclusive)
# noinspection PyShadowingNames
def copy_array(x: Union[ivy.Array, ivy.NativeArray])\
-> Union[ivy.Array, ivy.NativeArray]:
"""
Copy an array.
:param x: The array to copy
:type x: array
:return: A copy of the input array.
"""
return _cur_framework(x).copy_array(x)
def array_equal(x0: Union[ivy.Array, ivy.NativeArray], x1: Union[ivy.Array, ivy.NativeArray])\
-> bool:
"""
Determines whether two input arrays are equal across all elements.
:param x0: The first input array to compare.
:type x0: array
:param x1: The second input array to compare.
:type x1: array
:return: Boolean, whether or not the input arrays are equal across all elements.
"""
return _cur_framework(x0).array_equal(x0, x1)
def arrays_equal(xs: List[Union[ivy.Array, ivy.NativeArray]])\
-> bool:
"""
Determines whether input arrays are equal across all elements.
:param xs: Sequence of arrays to compare for equality
:type xs: sequence of arrays
:return: Boolean, whether or not all of the input arrays are equal across all elements.
"""
x0 = xs[0]
for x in xs[1:]:
if not array_equal(x0, x):
return False
return True
def all_equal(*xs: Iterable[Any], equality_matrix: bool = False)\
-> Union[bool, Union[ivy.Array, ivy.NativeArray]]:
"""
Determines whether the inputs are all equal.
:param xs: inputs to compare.
:type xs: any
:param equality_matrix: Whether to return a matrix of equalities comparing each input with every other.
Default is False.
:type equality_matrix: bool, optional
:return: Boolean, whether or not the inputs are equal, or matrix array of booleans if equality_matrix=True is set.
"""
equality_fn = ivy.array_equal if ivy.is_native_array(xs[0]) else lambda a, b: a == b
if equality_matrix:
num_arrays = len(xs)
mat = [[None for _ in range(num_arrays)] for _ in range(num_arrays)]
for i, xa in enumerate(xs):
for j_, xb in enumerate(xs[i:]):
j = j_ + i
res = equality_fn(xa, xb)
if ivy.is_native_array(res):
# noinspection PyTypeChecker
res = ivy.to_scalar(res)
# noinspection PyTypeChecker
mat[i][j] = res
# noinspection PyTypeChecker
mat[j][i] = res
return ivy.array(mat)
x0 = xs[0]
for x in xs[1:]:
if not equality_fn(x0, x):
return False
return True
def to_numpy(x: Union[ivy.Array, ivy.NativeArray])\
-> np.ndarray:
"""
Converts array into a numpy array.
:param x: Input array.
:type x: array
:return: A numpy array.
"""
return _cur_framework(x).to_numpy(x)
def to_scalar(x: Union[ivy.Array, ivy.NativeArray])\
-> Number:
"""
Converts an array with a single element into a scalar.
:param x: Input array with a single element.
:type x: array
:return: A scalar.
"""
return _cur_framework(x).to_scalar(x)
def to_list(x: Union[ivy.Array, ivy.NativeArray])\
-> List:
"""
Creates a (possibly nested) list from input array.
:param x: Input array.
:type x: array
:return: A list representation of the input array.
"""
return _cur_framework(x).to_list(x)
def clip_vector_norm(x: Union[ivy.Array, ivy.NativeArray], max_norm: float, p: float = 2.0)\
-> Union[ivy.Array, ivy.NativeArray]:
"""
Clips (limits) the vector p-norm of an array.
:param x: Input array containing elements to clip.
:type x: array
:param max_norm: The maximum value of the array norm.
:type max_norm: float
:param p: The p-value for computing the p-norm. Default is 2.
:type p: float, optional
:return: An array with the vector norm downscaled to the max norm if needed.
"""
norm = ivy.vector_norm(x, keepdims=True, ord=p)
ratio = ivy.stable_divide(max_norm, norm)
if ratio < 1:
return ratio * x
return x
def clip_matrix_norm(x: Union[ivy.Array, ivy.NativeArray], max_norm: float, p: float = 2.0)\
-> Union[ivy.Array, ivy.NativeArray]:
"""
Clips (limits) the matrix norm of an array.
:param x: Input array containing elements to clip.
:type x: array
:param max_norm: The maximum value of the array norm.
:type max_norm: float
:param p: The p-value for computing the p-norm. Default is 2.
:type p: float, optional
:return: An array with the matrix norm downscaled to the max norm if needed.
"""
norms = ivy.matrix_norm(x, p, keepdims=True)
ratios = ivy.maximum(ivy.stable_divide(max_norm, norms), 1.)
return ratios * x
def floormod(x: Union[ivy.Array, ivy.NativeArray], y: Union[ivy.Array, ivy.NativeArray])\
-> Union[ivy.Array, ivy.NativeArray]:
"""
Returns element-wise remainder of division.
:param x: Input array to floormod.
:type x: array
:param y: Denominator input for floormod.
:type y: array
:return: An array of the same shape and type as x, with the elements floor modded.
"""
return _cur_framework(x).floormod(x, y)
def unstack(x: Union[ivy.Array, ivy.NativeArray], axis: int, keepdims: bool = False)\
-> Union[ivy.Array, ivy.NativeArray]:
"""
Unpacks the given dimension of a rank-R array into rank-(R-1) arrays.
:param x: Input array to unstack.
:type x: array
:param axis: Axis for which to unpack the array.
:type axis: int
:param keepdims: Whether to keep dimension 1 in the unstack dimensions. Default is False.
:type keepdims: bool, optional
:return: List of arrays, unpacked along specified dimensions.
"""
return _cur_framework(x).unstack(x, axis, keepdims)
def fourier_encode(x: Union[ivy.Array, ivy.NativeArray], max_freq: Union[float, Union[ivy.Array, ivy.NativeArray]],
num_bands: int = 4, linear: bool = False, concat: bool = True, flatten: bool = False)\
-> Union[ivy.Array, ivy.NativeArray, Tuple]:
"""
Pads an array with fourier encodings.
:param x: Input array to encode.
:type x: array
:param max_freq: The maximum frequency of the encoding.
:type max_freq: float
:param num_bands: The number of frequency bands for the encoding. Default is 4.
:type num_bands: int, optional
:param linear: Whether to space the frequency bands linearly as opposed to geometrically. Default is False.
:type linear: bool, optional
:param concat: Whether to concatenate the position, sin and cos values, or return seperately. Default is True.
:type concat: bool, optional
:param flatten: Whether to flatten the position dimension into the batch dimension. Default is False.
:type flatten: bool, optional
:return: New array with the final dimension expanded, and the encodings stored in this channel.
"""
x_in = x
dim = x.shape[-1]
x = ivy.expand_dims(x, -1)
orig_x = x
if linear:
scales = ivy.linspace(1., max_freq / 2, num_bands, dev=dev(x))
else:
if ivy.backend == 'torch' and isinstance(max_freq,float):
scales = ivy.logspace(0., ivy.log(ivy.array(max_freq / 2)) / math.log(10), num_bands, base=10, dev=dev(x))
else:
scales = ivy.logspace(0., ivy.log(max_freq / 2) / math.log(10), num_bands, base=10, dev=dev(x))
scales = ivy.astype(scales, ivy.dtype(x))
scales = scales[(*((None,) * (len(x.shape) - len(scales.shape))), Ellipsis)]
x = x * scales * math.pi
sin_x = ivy.sin(x)
cos_x = ivy.cos(x)
if flatten:
orig_x = x_in
sin_x = ivy.reshape(sin_x, [-1, num_bands*dim])
cos_x = ivy.reshape(cos_x, [-1, num_bands*dim])
if concat:
return ivy.concat([orig_x, sin_x, cos_x], -1)
return sin_x, cos_x
def value_is_nan(x: Union[ivy.Array, ivy.NativeArray, Number], include_infs: bool = True)\
-> bool:
"""
Determine whether the single valued array or scalar is of nan type
:param x: The input to check Input array.
:type x: array
:param include_infs: Whether to include infs and -infs in the check. Default is True.
:type include_infs: bool, optional
:return Boolean as to whether the input value is a nan or not.
"""
x_scalar = ivy.to_scalar(x) if ivy.is_native_array(x) else x
if not x_scalar == x_scalar:
return True
if include_infs and x_scalar == INF or x_scalar == -INF:
return True
return False
def has_nans(x: Union[ivy.Array, ivy.NativeArray], include_infs: bool = True)\
-> bool:
"""
Determine whether the array contains any nans, as well as infs or -infs if specified.
:param x: Input array.
:type x: array
:param include_infs: Whether to include infs and -infs in the check. Default is True.
:type include_infs: bool, optional
:return: Boolean as to whether the array contains nans.
"""
return value_is_nan(ivy.sum(x), include_infs)
def exists(x: Any)\
-> bool:
"""
Simple check as to whether the input is None or not.
:param x: Input to check.
:type x: any
:return: True if x is not None, else False.
"""
return x is not None
def default(x: Any, default_val: Any, catch_exceptions: bool = False, rev: bool = False, with_callable: bool = False)\
-> Any:
"""
Returns x provided it exists (is not None), else returns default value.
:param x: Input which may or may not exist (be None).
:type x: value if catch_exceptions=False else callable
:param default_val: The default value.
:type default_val: any
:param catch_exceptions: Whether to catch exceptions from callable x. Default is False.
:type catch_exceptions: bool, optional
:param rev: Whether to reverse the input x and default_val. Default is False.
:type rev: bool, optional
:param with_callable: Whether either of the arguments might be callable functions. Default is False.
:type with_callable: bool, optional
:return: x if x exists (is not None), else default.
"""
with_callable = catch_exceptions or with_callable
if rev:
tmp = x
x = default_val
default_val = tmp
if with_callable:
x_callable = callable(x)
default_callable = callable(default_val)
else:
x_callable = False
default_callable = False
if catch_exceptions:
# noinspection PyBroadException
try:
x = x() if x_callable else x
except Exception:
return default_val() if default_callable else default_val
else:
x = x() if x_callable else x
return x if exists(x) else default_val() if default_callable else default_val
def shape_to_tuple(shape: Union[int, Tuple[int], List[int]]):
"""
Returns a tuple representation of the input shape.
:param shape: The shape input to convert to tuple representation.
:retrn: The shape in tuple representation
"""
if isinstance(shape, int):
return (shape,)
else:
return tuple(shape)
def try_else_none(fn):
"""
Try and return the function, otherwise return None if an exception was raised during function execution.
:param fn: Function to try and call and return.
:type fn: callable
"""
return default(fn, None, True)
def arg_names(receiver):
"""
Get the expected keyword arguments for a function or class constructor.
"""
return list(inspect.signature(receiver).parameters.keys())
def match_kwargs(kwargs, *receivers, allow_duplicates=False):
"""
Match keyword arguments to either class or function receivers.
:param kwargs: Keyword arguments to match.
:type kwargs: dict of any
:param receivers: Functions and/or classes to match the keyword arguments to.
:type receivers: callables and/or classes
:param allow_duplicates: Whether to allow one keyword argument to be used for multiple receivers. Default is False.
:type allow_duplicates: bool, optional
:return: Sequence of keyword arguments split as best as possible.
"""
split_kwargs = list()
for receiver in receivers:
expected_kwargs = arg_names(receiver)
found_kwargs = {k: v for k, v in kwargs.items() if k in expected_kwargs}
if not allow_duplicates:
for k in found_kwargs.keys():
del kwargs[k]
split_kwargs.append(found_kwargs)
if len(split_kwargs) == 1:
return split_kwargs[0]
return split_kwargs
def cache_fn(func: Callable)\
-> Callable:
"""
Wrap a function, such that when cache=True is passed as an argument, a previously cached output is returned.
:param func: The function to wrap, whose output should be cached for later.
:type func: callable
:return: The newly cache wrapped function.
"""
global FN_CACHE
if func not in FN_CACHE:
FN_CACHE[func] = dict()
def cached_fn(*args, **kwargs):
key = ''.join([str(i) + ', ' for i in args] + [' kw, '] + [str(i) + ', ' for i in sorted(kwargs.items())])
cache = FN_CACHE[func]
if key in cache:
return cache[key]
ret = func(*args, **kwargs)
cache[key] = ret
return ret
return cached_fn
def current_framework_str()\
-> Union[str, None]:
"""
Return the string of the current globally set framework. Returns None if no framework is set.
:return: The framework string.
"""
fw = _cur_framework()
if fw is None:
return None
return fw.current_framework_str()
def einops_rearrange(x: Union[ivy.Array, ivy.NativeArray], pattern: str, **axes_lengths: Dict[str, int])\
-> Union[ivy.Array, ivy.NativeArray]:
"""
Perform einops rearrange operation on input array x.
:param x: Input array to be re-arranged.
:type x: array
:param pattern: Rearrangement pattern.
:type pattern: str
:param axes_lengths: Any additional specifications for dimensions.
:type axes_lengths: keyword parameter args
:return: New array with einops.rearrange having been applied.
"""
return einops.rearrange(x, pattern, **axes_lengths)
def einops_reduce(x: Union[ivy.Array, ivy.NativeArray], pattern: str, reduction: Union[str, Callable],
**axes_lengths: Dict[str, int]) -> Union[ivy.Array, ivy.NativeArray]:
"""
Perform einops reduce operation on input array x.
:param x: Input array to be reduced.
:type x: array
:param pattern: Reduction pattern.
:type pattern: str
:param reduction: One of available reductions ('min', 'max', 'sum', 'mean', 'prod'), or callable.
:type reduction: str or callable
:param axes_lengths: Any additional specifications for dimensions.
:type axes_lengths: keyword parameter args
:return: New array with einops.reduce having been applied.
"""
return einops.reduce(x, pattern, reduction, **axes_lengths)
def einops_repeat(x: Union[ivy.Array, ivy.NativeArray], pattern: str, **axes_lengths: Dict[str, int])\
-> Union[ivy.Array, ivy.NativeArray]:
"""
Perform einops repeat operation on input array x.
:param x: Input array to be repeated.
:type x: array
:param pattern: Rearrangement pattern.
:type pattern: str
:param axes_lengths: Any additional specifications for dimensions.
:type axes_lengths: keyword parameter args
:return: New array with einops.repeat having been applied.
"""
return einops.repeat(x, pattern, **axes_lengths)
def get_min_denominator()\
-> float:
"""
Get the global minimum denominator used by ivy for numerically stable division.
"""
# noinspection PyProtectedMember
return ivy._MIN_DENOMINATOR
def set_min_denominator(val: float)\
-> None:
"""
Set the global minimum denominator used by ivy for numerically stable division.
:param val: The new value to set the minimum denominator to.
:type val: float
"""
ivy._MIN_DENOMINATOR = val
def get_min_base()\
-> float:
"""
Get the global minimum base used by ivy for numerically stable power raising.
"""
# noinspection PyProtectedMember
return ivy._MIN_BASE
def set_min_base(val: float)\
-> None:
"""
Set the global minimum base used by ivy for numerically stable power raising.
:param val: The new value to set the minimum base to.
:type val: float
"""
ivy._MIN_BASE = val
def stable_divide(numerator: Any, denominator: Any, min_denominator: float = None) -> Any:
"""
Divide the numerator by the denominator, with min denominator added to the denominator for numerical stability.
:param numerator: The numerator of the division.
:type numerator: any valid numerator, including containers
:param denominator: The denominator of the division.
:type denominator: any valid denominator, including containers
:param min_denominator: The minimum denominator to use, use global ivy._MIN_DENOMINATOR by default.
:type min_denominator: float, optional
:return: The new item following the numerically stable division.
"""
# noinspection PyProtectedMember
return numerator / (denominator + default(min_denominator, ivy._MIN_DENOMINATOR))
def stable_pow(base: Any, exponent: Any, min_base: float = None)\
-> Any:
"""
Raise the base by the power, with MIN_BASE added to the base when exponent > 1 for numerical stability.
:param base: The numerator of the division.
:type base: any valid numerator, including containers
:param exponent: The denominator of the division.
:type exponent: any valid denominator, including containers
:param min_base: The minimum base to use, use global ivy._MIN_BASE by default.
:type min_base: float, optional
:return: The new item following the numerically stable division.
"""
# noinspection PyProtectedMember
return (base + default(min_base, ivy._MIN_BASE)) ** exponent
def get_all_arrays_in_memory():
"""
Gets all arrays which are currently alive.
"""
all_arrays = list()
for obj in gc.get_objects():
# noinspection PyBroadException
try:
if ivy.is_native_array(obj):
all_arrays.append(obj)
except Exception:
pass
return all_arrays
def num_arrays_in_memory():
"""
Returns the number of arrays which are currently alive.
"""
return len(get_all_arrays_in_memory())
def print_all_arrays_in_memory():
"""
Prints all arrays which are currently alive.
"""
for arr in get_all_arrays_in_memory():
print(type(arr), arr.shape)
def set_queue_timeout(timeout):
"""
Set the global queue timeout values (in seconds). Default value without this function being called is 10 seconds.
:param timeout: The timeout to set in seconds.
:type timeout: float, optional
"""
global TIMEOUT
TIMEOUT = timeout
def queue_timeout():
"""
Get the global queue timeout values (in seconds). Default value without this function being called is 10 seconds.
"""
global TIMEOUT
return TIMEOUT
def tmp_dir():
"""
Return the directory for saving temporary files.
"""
return TMP_DIR
def set_tmp_dir(tmp_dr):
"""
Set the directory for saving temporary files.
"""
global TMP_DIR
TMP_DIR = tmp_dr
def container_types():
"""
Return all framework-specific types which should be hierarchically parsed in an ivy.Container. Such types must adopt
a key-value structure, and exposes public methods .keys(), .values() and items().
"""
# noinspection PyBroadException
try:
return _cur_framework().container_types()
except ValueError:
return []
def inplace_arrays_supported(f=None):
"""
Determine whether inplace arrays are supported for the current backend framework.
:return: Boolean, whether or not inplace arrays are supported.
"""
return _cur_framework().inplace_arrays_supported()
def inplace_variables_supported(f=None):
"""
Determine whether inplace variables are supported for the current backend framework.
:return: Boolean, whether or not inplace variables are supported.
"""
return _cur_framework().inplace_variables_supported()
def supports_inplace(x):
"""
Determine whether inplace operations are supported for the data type of x.
:param x: Input variable or array to check for inplace support for.
:type x: variable or array
:return: Boolean, whether or not inplace operations are supported for x.
"""
if ivy.is_variable(x):
return ivy.inplace_variables_supported()
elif ivy.is_native_array(x):
return ivy.inplace_arrays_supported()
raise Exception('Input x must be either a variable or an array.')
def assert_supports_inplace(x):
"""
Asserts that inplace operations are supported for x, else raises exception.
:param x: Input variable or array to check for inplace support for.
:type x: variable or array
:return: True if support, raises exception otherwise
"""
if not ivy.supports_inplace(x):
raise Exception('Inplace operations are not supported {} types with {} backend'.format(
type(x), ivy.current_framework_str()))
return True
def inplace_update(x, val, f=None):
"""
Perform in-place update for the input variable.
:param x: The variable to update.
:type x: variable
:param val: The array to update the variable with.
:type val: array
:return: The variable following the in-place update.
"""
return _cur_framework(x).inplace_update(x, val)
def inplace_decrement(x, val, f=None):
"""
Perform in-place decrement for the input variable.
:param x: The variable to decrement.
:type x: variable
:param val: The array to decrement the variable with.
:type val: array
:return: The variable following the in-place decrement.
"""
return _cur_framework(x).inplace_decrement(x, val)
def inplace_increment(x, val, f=None):
"""
Perform in-place increment for the input variable.
:param x: The variable to increment.
:type x: variable
:param val: The array to increment the variable with.
:type val: array
:return: The variable following the in-place increment.
"""
return _cur_framework(x).inplace_increment(x, val)
def cumsum(x: Union[ivy.Array, ivy.NativeArray], axis: int = 0)\
-> Union[ivy.Array, ivy.NativeArray]:
"""
Returns the cumulative sum of the elements along a given axis.
:param x: Input array.
:type x: array
:param axis: Axis along which the cumulative sum is computed. By default 0.
:type axis: int
:return: Input array with cumulatively summed elements along axis.
"""
return _cur_framework(x).cumsum(x, axis)
def cumprod(x: Union[ivy.Array, ivy.NativeArray], axis: int = 0, exclusive: bool = False)\
-> Union[ivy.Array, ivy.NativeArray]:
"""
Returns the cumulative product of the elements along a given axis.
:param x: Input array.
:type x: array
:param axis: Axis along which the cumulative product is computed. By default 0.
:type axis: int
:param exclusive: Whether to perform the cumprod exclusively. Defaults is False.
:type exclusive: bool, optional
:return: Input array with cumulatively multiplied elements along axis.
"""
return _cur_framework(x).cumprod(x, axis, exclusive)
# noinspection PyShadowingNames
def scatter_flat(indices: Union[ivy.Array, ivy.NativeArray], updates: Union[ivy.Array, ivy.NativeArray],
size: Optional[int] = None, tensor: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
reduction: str = 'sum', dev: ivy.Device = None)\
-> Union[ivy.Array, ivy.NativeArray]:
"""
Scatter flat updates into a new flat array according to flat indices.
:param indices: Indices for the new values to occupy.
:type indices: array
:param updates: Values for the new array to hold.
:type updates: array
:param size: The size of the result.
:type size: int
:param tensor: The tensor in which to scatter the results, default is None, in which case the size is used to
scatter into a zeros array.
:param reduction: The reduction method for the scatter, one of 'sum', 'min', 'max' or 'replace'
:type reduction: str
:param dev: device on which to create the array 'cuda:0', 'cuda:1', 'cpu' etc. Same as updates if None.
:type dev: ivy.Device, optional
:return: New array of given shape, with the values scattered at the indices.
"""
return _cur_framework(indices).scatter_flat(indices, updates, size, tensor, reduction, dev)
# noinspection PyShadowingNames
def scatter_nd(indices: Union[ivy.Array, ivy.NativeArray], updates: Union[ivy.Array, ivy.NativeArray],
shape: Optional[Iterable[int]] = None, tensor: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
reduction: str = 'sum', dev: ivy.Device = None)\
-> Union[ivy.Array, ivy.NativeArray]:
"""
Scatter updates into a new array according to indices.
:param indices: Indices for the new values to occupy.
:type indices: array
:param updates: Values for the new array to hold.
:type updates: array
:param shape: The shape of the result. Default is None, in which case tensor argument must be provided.
:type shape: sequence of ints
:param tensor: The tensor in which to scatter the results, default is None, in which case the shape arg is used to
scatter into a zeros array.
:param reduction: The reduction method for the scatter, one of 'sum', 'min', 'max' or 'replace'
:type reduction: str
:param dev: device on which to create the array 'cuda:0', 'cuda:1', 'cpu' etc. Same as updates if None.
:type dev: ivy.Device, optional
:return: New array of given shape, with the values scattered at the indices.
"""
return _cur_framework(indices).scatter_nd(indices, updates, shape, tensor, reduction, dev)
# noinspection PyShadowingNames
def gather(params: Union[ivy.Array, ivy.NativeArray], indices: Union[ivy.Array, ivy.NativeArray], axis: int = -1,
dev: ivy.Device = None) -> Union[ivy.Array, ivy.NativeArray]:
"""
Gather slices from params at axis according to indices.
:param params: The array from which to gather values.
:type params: array
:param indices: Index array.
:type indices: array
:param axis: The axis from which to gather from. Default is -1.
:type axis: int, optional
:param dev: device on which to create the array 'cuda:0', 'cuda:1', 'cpu' etc. Same as x if None.
:type dev: ivy.Device, optional
:return: New array with the values gathered at the specified indices along the specified axis.
"""
return _cur_framework(params).gather(params, indices, axis, dev)
# noinspection PyShadowingNames
def gather_nd(params: Union[ivy.Array, ivy.NativeArray], indices: Union[ivy.Array, ivy.NativeArray],
dev: ivy.Device = None) -> Union[ivy.Array, ivy.NativeArray]:
"""
Gather slices from params into a array with shape specified by indices.
:param params: The array from which to gather values.
:type params: array
:param indices: Index array.
:type indices: array
:param dev: device on which to create the array 'cuda:0', 'cuda:1', 'cpu' etc. Same as x if None.
:type dev: ivy.Device, optional
:return: New array of given shape, with the values gathered at the indices.
"""
return _cur_framework(params).gather_nd(params, indices, dev)
def multiprocessing(context: str = None):
"""
Return framewrk-specific multi-processing module
:param context: The context of the multiprocessing, either fork, forkserver or spawn. Default is None.
:type context: str, optional
:return: Multiprocessing module
"""
return _cur_framework().multiprocessing(context)
def indices_where(x: Union[ivy.Array, ivy.NativeArray])\
-> Union[ivy.Array, ivy.NativeArray]:
"""
Returns indices or true elements in an input boolean array.
:param x: Boolean array, for which indices are desired.
:type x: array
:return: Indices for where the boolean array is True.
"""
return _cur_framework(x).indices_where(x)
# noinspection PyShadowingNames
def one_hot(indices: Union[ivy.Array, ivy.NativeArray], depth: int, dev: ivy.Device = None)\
-> Union[ivy.Array, ivy.NativeArray]:
"""
Returns a one-hot array
:param indices: Indices for where the ones should be scattered *[batch_shape, dim]*
:type indices: array
:param depth: Scalar defining the depth of the one-hot dimension.
:type depth: int
:param dev: device on which to create the array 'cuda:0', 'cuda:1', 'cpu' etc. Same as x if None.
:type dev: ivy.Device, optional
:return: Tensor of zeros with the same shape and type as a, unless dtype provided which overrides.
"""
return _cur_framework(indices).one_hot(indices, depth, dev)
def shape(x: Union[ivy.Array, ivy.NativeArray], as_array: bool = False)\
-> Iterable[int]:
"""
Returns the shape of the array x.
:param x: Input array to infer the shape of.
:type x: array
:param as_array: Whether to return the shape as a array, default False.
:type as_array: bool, optional
:return: Shape of the array
"""
return _cur_framework(x).shape(x, as_array)
def get_num_dims(x: Union[ivy.Array, ivy.NativeArray], as_array: bool = False) -> int:
"""
Returns the number of dimensions of the array x.
:param x: Input array to infer the number of dimensions for.
:type x: array
:param as_array: Whether to return the shape as a array, default False.
:type as_array: bool, optional
:return: Shape of the array
"""
return _cur_framework(x).get_num_dims(x, as_array)
| 34.446073 | 130 | 0.66765 |
acf79a448b2928411fd4bacff537d3376a15dd7c | 4,653 | py | Python | main_windows.py | sebdisdv/HdrProject | f2b7ec83aa83f1ca533101e1e6f183c4399a3311 | [
"MIT"
] | null | null | null | main_windows.py | sebdisdv/HdrProject | f2b7ec83aa83f1ca533101e1e6f183c4399a3311 | [
"MIT"
] | null | null | null | main_windows.py | sebdisdv/HdrProject | f2b7ec83aa83f1ca533101e1e6f183c4399a3311 | [
"MIT"
] | null | null | null |
import cv2
import numpy as np
import exhaustive_ace
import windowed_ace
import debevec
import gradient
import exposure_fusion
import os.path as path
from utils import get_exposure, get_dataset_info, create_folders
from PIL import Image
from consolemenu import SelectionMenu
from termcolor import colored
from typing import List
ALGORITHMS = ["ACE", "ACE_Windowed", "Debevec", "Mertens"]
def select_algorithm():
algorithm_selection = SelectionMenu(
ALGORITHMS,
"Select which algorithm to use",
show_exit_option=False,
clear_screen=True,
)
algorithm_selection.show()
algorithm_selection.join()
return algorithm_selection.selected_option
def select_dataset(names):
dataset_selection = SelectionMenu(
names, "Select which dataset to use", show_exit_option=False, clear_screen=False
)
dataset_selection.show()
dataset_selection.join()
return dataset_selection.selected_option
def select_image(names):
img_selection = SelectionMenu(
names, "Select which image to use", show_exit_option=False, clear_screen=False
)
img_selection.show()
img_selection.join()
return img_selection.selected_option
def select_quit():
quit = SelectionMenu(
["No", "Yes"],
"Do you want to quit?",
show_exit_option=False,
clear_screen=False,
)
quit.show()
quit.exit()
return quit.selected_option
class HdrImplementations:
def __init__(self, dataset_name: str, imgs_names: List[str]) -> None:
self.dataset_name = dataset_name
# self.settings = json.load(open("settings.json"))
self.images_paths = [
path.join("Dataset", dataset_name, img) for img in imgs_names
]
self.images = [cv2.imread(im) for im in self.images_paths]
self.exposure_times = [get_exposure(Image.open(im)) for im in self.images_paths]
self.tonemapAlgo = cv2.createTonemapDrago(1.0, 0.7)
self.result_merge = None
self.result_img = None
def applyDebevecArt(self):
merge = cv2.createMergeDebevec()
self.result_merge = merge.process(self.images, times=self.exposure_times.copy())
def applyAceWindowed(self, image_index, window):
self.result_img = windowed_ace.compute(self.images_paths[image_index], window)
def applyAceExhaustive(self, image_index):
self.result_img = exhaustive_ace.compute(self.images_paths[image_index])
def applyDebevec(self):
self.result_merge = debevec.compute(self.images, self.exposure_times)
self.result_img = self.tonemapAlgo.process(self.result_merge.copy())
if self.dataset_name == "Stella":
self.result_img = 3 * self.result_img
self.result_img = self.result_img * 255
self.result_img = np.clip(self.result_img, 0, 255).astype("uint8")
def applyGradient(self):
self.result_img = gradient.compute(self.images)
def applyExpFusion(self):
self.result_img = exposure_fusion.compute(self.images)
def save_image(self, name):
if self.result_img is not None:
cv2.imwrite(
path.join("Results", self.dataset_name, f"{name}.jpg"), self.result_img
)
def main(names, info):
while True:
algo_index = select_algorithm()
dataset_index = select_dataset(names)
img_index = -1
if algo_index <= 1:
img_index = select_image(info[names[dataset_index]])
hdr = HdrImplementations(
dataset_name=names[dataset_index], imgs_names=info[names[dataset_index]]
)
print(f"Algorithm selected {ALGORITHMS[algo_index]}")
print(f"Dataset selected {names[dataset_index]}")
name_res = input("Insert name for the resulting image: ")
if ALGORITHMS[algo_index] == "ACE":
hdr.applyAceExhaustive(img_index)
elif ALGORITHMS[algo_index] == "ACE_Windowed":
window = int(input("Insert window size in the range 100 <= w <= 250: "))
window = np.clip(window, 100, 250)
hdr.applyAceWindowed(img_index, window)
elif ALGORITHMS[algo_index] == "Debevec":
hdr.applyDebevec()
elif ALGORITHMS[algo_index] == "Mertens":
hdr.applyExpFusion()
hdr.save_image(name_res)
print(
colored(
f"\nImage has been saved in Results/{names[dataset_index]}", "green"
)
)
if select_quit():
exit()
if __name__ == "__main__":
names, info = get_dataset_info()
create_folders(names)
main(names, info)
| 28.2 | 88 | 0.65893 |
acf79e27231d185c066989c86d12b387684aebff | 2,145 | py | Python | examples/retrieval/evaluation/dense/evaluate_faiss_dense.py | svakulenk0/beir | 16f3554c45b33a2c7dd47fe6e67c93055626c074 | [
"Apache-2.0"
] | 1 | 2021-05-20T18:22:29.000Z | 2021-05-20T18:22:29.000Z | examples/retrieval/evaluation/dense/evaluate_faiss_dense.py | svakulenk0/beir | 16f3554c45b33a2c7dd47fe6e67c93055626c074 | [
"Apache-2.0"
] | null | null | null | examples/retrieval/evaluation/dense/evaluate_faiss_dense.py | svakulenk0/beir | 16f3554c45b33a2c7dd47fe6e67c93055626c074 | [
"Apache-2.0"
] | null | null | null | from beir import util, LoggingHandler
from beir.retrieval import models
from beir.datasets.data_loader import GenericDataLoader
from beir.retrieval.evaluation import EvaluateRetrieval
from beir.retrieval.search.dense import DenseRetrievalFaissSearch as DRFS
import pathlib, os
import logging
#### Just some code to print debug information to stdout
logging.basicConfig(format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
handlers=[LoggingHandler()])
#### /print debug information to stdout
#### Download nfcorpus.zip dataset and unzip the dataset
dataset = "nfcorpus.zip"
url = "https://public.ukp.informatik.tu-darmstadt.de/thakur/BEIR/datasets/{}".format(dataset)
out_dir = os.path.join(pathlib.Path(__file__).parent.absolute(), "datasets")
data_path = util.download_and_unzip(url, out_dir)
#### Provide the data_path where nfcorpus has been downloaded and unzipped
corpus, queries, qrels = GenericDataLoader(data_path).load(split="test")
#### Defining our FAISS index
#### Number of clusters used for faiss. Select a value 4*sqrt(N) to 16*sqrt(N)
#### https://github.com/facebookresearch/faiss/wiki/Guidelines-to-choose-an-index
n_clusters = 1024
#### Number of clusters to explorer at search time. We will search for nearest neighbors in 3 clusters.
nprobe = 3
model = DRFS(model=models.SentenceBERT("distilroberta-base-msmarco-v2"), n_clusters=n_clusters, nprobe=nprobe)
retriever = EvaluateRetrieval(model)
#### Retrieve dense results (format of results is identical to qrels)
results = retriever.retrieve(corpus, queries)
#### Evaluate your retrieval using NDCG@k, MAP@K ...
ndcg, _map, recall, precision = retriever.evaluate(qrels, results, retriever.k_values)
#### Retrieval Example ####
query_id, scores_dict = random.choice(list(results.items()))
print("Query : %s\n" % queries[query_id])
scores = sorted(scores_dict.items(), key=lambda item: item[1], reverse=True)
for rank in range(10):
doc_id = scores[rank][0]
print("Doc %d: %s [%s] - %s\n" % (rank+1, doc_id, corpus[doc_id].get("title"), corpus[doc_id].get("text"))) | 42.058824 | 111 | 0.729604 |
acf79e2b7ae1be8ab06d2653c248600178490057 | 1,949 | py | Python | toolkit/LM6d_devkit/LM6d_1_calc_extents.py | THU-DA-6D-Pose-Group/mx-DeepIM | f1c850e5f8f75f1051a89c40daff9185870020f5 | [
"Apache-2.0"
] | 229 | 2018-09-08T08:38:59.000Z | 2022-03-29T07:09:22.000Z | toolkit/LM6d_devkit/LM6d_1_calc_extents.py | greatwallet/mx-DeepIM | 74b6df2e3f6be7d6fed23ba2f553dab5ae950700 | [
"Apache-2.0"
] | 59 | 2018-09-13T20:10:36.000Z | 2021-01-08T12:22:27.000Z | toolkit/LM6d_devkit/LM6d_1_calc_extents.py | greatwallet/mx-DeepIM | 74b6df2e3f6be7d6fed23ba2f553dab5ae950700 | [
"Apache-2.0"
] | 59 | 2018-09-08T07:56:33.000Z | 2022-03-25T22:01:42.000Z | # --------------------------------------------------------
# Deep Iterative Matching Network
# Licensed under The Apache-2.0 License [see LICENSE for details]
# Written by Gu Wang
# --------------------------------------------------------
"""
For more precise evaluation, use the diameters in models_info.txt
"""
from __future__ import print_function, division
import os
import sys
import numpy as np
cur_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(1, os.path.join(cur_dir, "../.."))
version = "v1"
model_root = os.path.join(cur_dir, "../../data/LINEMOD_6D/LM6d_converted/models/LM6d_render_{}/models".format(version))
print("target path: {}".format(model_root))
idx2class = {
1: "ape",
2: "benchvise",
3: "bowl",
4: "camera",
5: "can",
6: "cat",
7: "cup",
8: "driller",
9: "duck",
10: "eggbox",
11: "glue",
12: "holepuncher",
13: "iron",
14: "lamp",
15: "phone",
}
classes = idx2class.values()
classes = sorted(classes)
def class2idx(class_name, idx2class=idx2class):
for k, v in idx2class.items():
if v == class_name:
return k
def load_object_points():
points = {}
for cls_idx, cls_name in idx2class.items():
point_file = os.path.join(model_root, cls_name, "points.xyz")
assert os.path.exists(point_file), "Path does not exist: {}".format(point_file)
points[cls_name] = np.loadtxt(point_file)
return points
def write_extents():
points_dict = load_object_points()
extents = np.zeros((len(classes), 3))
for i, cls_name in enumerate(classes):
extents[i, :] = 2 * np.max(np.abs(points_dict[cls_name]), 0)
# print(extents)
extent_file = os.path.join(model_root, "extents.txt")
# with open(extent_file, 'w') as f:
np.savetxt(extent_file, extents, fmt="%.6f", delimiter=" ")
if __name__ == "__main__":
write_extents()
print("{} finished".format(__file__))
| 26.69863 | 119 | 0.606978 |
acf79e6b4586304c866d219c9ca799c3383a907e | 2,759 | py | Python | scripts/pyoof_effelsberg_mpi.py | tcassanelli/pyoof | 94d1e324837ededf2b1886ed1ebdfcebd2fa7474 | [
"BSD-3-Clause"
] | 13 | 2017-06-23T11:19:43.000Z | 2021-07-21T03:31:37.000Z | scripts/pyoof_effelsberg_mpi.py | tcassanelli/pyoof | 94d1e324837ededf2b1886ed1ebdfcebd2fa7474 | [
"BSD-3-Clause"
] | null | null | null | scripts/pyoof_effelsberg_mpi.py | tcassanelli/pyoof | 94d1e324837ededf2b1886ed1ebdfcebd2fa7474 | [
"BSD-3-Clause"
] | 3 | 2019-05-02T06:18:04.000Z | 2020-03-13T16:04:59.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Tomas Cassanelli
import numpy as np
import glob
from astropy import units as u
from pyoof import aperture, telgeometry, fit_zpoly, extract_data_effelsberg
import mpi4py.rc
mpi4py.rc.threads = False
from mpi4py import MPI
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
# telescope = [blockage, delta, pr, name]
pr = 50 * u.m
telescope = dict(
effelsberg_20deg=[
telgeometry.block_effelsberg(alpha=20 * u.deg),
telgeometry.opd_effelsberg,
pr,
'effelsberg (20 deg blockage)'
],
effelsberg_10deg=[
telgeometry.block_effelsberg(alpha=10 * u.deg),
telgeometry.opd_effelsberg,
pr,
'effelsberg (10 deg blockage)'
],
effelsberg_0deg=[
telgeometry.block_effelsberg(alpha=0 * u.deg),
telgeometry.opd_effelsberg,
pr,
'effelsberg (0 deg blockage)'
],
effelsberg_sr_only=[
telgeometry.block_manual(
pr=50 * u.m, sr=3.25 * u.m, a=0 * u.m, L=0 * u.m),
telgeometry.opd_effelsberg,
pr,
'effelsberg (sub-reflector only blockage)'
],
effelsberg_empty=[
telgeometry.block_manual(
pr=50 * u.m, sr=0 * u.m, a=0 * u.m, L=0 * u.m),
telgeometry.opd_effelsberg,
pr,
'effelsberg (no blockage)'
]
)
def compute_phase_error(pathfits, order_max):
"""
Uses fit_zpoly and calculates the actuators at the Effelsberg telescope.
"""
data_info, data_obs = extract_data_effelsberg(pathfits)
[name, pthto, obs_object, obs_date, freq, wavel, d_z, meanel] = data_info
[beam_data, u_data, v_data] = data_obs
for configuration in telescope.keys():
# for configuration in ['effelsberg_empty']:
fit_zpoly(
data_info=data_info,
data_obs=[beam_data, u_data, v_data],
order_max=order_max,
illum_func=aperture.illum_pedestal,
# illum_func=aperture.illum_gauss,
telescope=telescope[configuration],
fit_previous=True, # True is recommended
resolution=2 ** 8, # standard is 2 ** 8
box_factor=5, # box_size = 5 * pr
config_params_file=None, # default or add path config_file.yaml
make_plots=True, # for now testing only the software
verbose=2,
work_dir='/scratch/v/vanderli/cassane/OOFH7'
)
comm.Barrier()
pth2data = '/home/v/vanderli/cassane/data/pyoof_data/*/*.fits'
files = glob.glob(pth2data)
files_per_rank = np.array_split(files, size)
for _f in files_per_rank[rank]:
compute_phase_error(pathfits=_f, order_max=6)
| 29.351064 | 77 | 0.618702 |
acf79f622a3a8125c9ce3067a959ab0b0dc83129 | 6,573 | py | Python | loaner/web_app/backend/testing/loanertest.py | McDiesel/loaner | dcf5fa640ee9059a814650fa4432fa1116df78e9 | [
"Apache-2.0"
] | null | null | null | loaner/web_app/backend/testing/loanertest.py | McDiesel/loaner | dcf5fa640ee9059a814650fa4432fa1116df78e9 | [
"Apache-2.0"
] | null | null | null | loaner/web_app/backend/testing/loanertest.py | McDiesel/loaner | dcf5fa640ee9059a814650fa4432fa1116df78e9 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class for tests in loaner.example.com."""
import datetime
import mock
from google.appengine.api import taskqueue
from google.appengine.api import users
from google.appengine.ext import testbed
from absl.testing import absltest
import endpoints
from loaner.web_app import constants
from loaner.web_app.backend.auth import permissions
from loaner.web_app.backend.lib import action_loader
from loaner.web_app.backend.lib import events
from loaner.web_app.backend.models import user_model
USER_DOMAIN = constants.APP_DOMAIN
USER_EMAIL = 'user@{}'.format(USER_DOMAIN)
SUPER_ADMIN_EMAIL = 'daredevil@{}'.format(USER_DOMAIN)
TECHNICAL_ADMIN_EMAIL = 'technical-admin@{}'.format(USER_DOMAIN)
OPERATIONAL_ADMIN_EMAIL = 'operational-admin@{}'.format(USER_DOMAIN)
TECHNICIAN_EMAIL = 'technician@{}'.format(USER_DOMAIN)
TEST_DIR_DEVICE_DEFAULT = {
# A test device response from the Cloud Directory API in the Default OU.
'deviceId': 'unique_id',
'serialNumber': '123456',
'status': 'ACTIVE',
'lastSync': datetime.datetime.utcnow(),
'model': 'HP Chromebook 13 G1',
'orgUnitPath': constants.ORG_UNIT_DICT['DEFAULT'],
}
TEST_DIR_DEVICE_GUEST = {
# A test device response from the Cloud Directory API in the Default OU.
'deviceId': 'unique_id',
'serialNumber': '123456',
'status': 'ACTIVE',
'lastSync': datetime.datetime.utcnow(),
'model': 'HP Chromebook 13 G1',
'orgUnitPath': constants.ORG_UNIT_DICT['GUEST'],
}
TEST_DIR_DEVICE1 = {
# A test device response from the Cloud Directory API to test OU moves.
'deviceId': 'unique_id',
'serialNumber': '123456',
'status': 'ACTIVE',
'lastSync': datetime.datetime.utcnow(),
'model': 'HP Chromebook 13 G1',
'orgUnitPath': '/',
}
TEST_DIR_DEVICE2 = {
# A second test device response from the Cloud Directory API.
'deviceId': 'unique_id2',
'serialNumber': '654321',
'status': 'ACTIVE',
'lastSync': datetime.datetime.utcnow(),
'model': 'HP Chromebook 13 G1',
'orgUnitPath': constants.ORG_UNIT_DICT['DEFAULT'],
}
class TestCase(absltest.TestCase):
"""Base test case."""
def setUp(self):
"""Set up the environment for testing."""
super(TestCase, self).setUp()
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
self.testbed.init_user_stub()
self.login_user()
taskqueue_patcher = mock.patch.object(taskqueue, 'add')
self.addCleanup(taskqueue_patcher.stop)
taskqueue_patcher.start()
# events.raise_event() raises an exception if there are no events in
# datastore, and it's called often in the model methods, many of which are
# used in testing. When you want to test that method specifically, first run
# stop() on this patcher; be sure to run start() again before end of test.
self.testbed.mock_raiseevent = mock.Mock()
self.testbed.raise_event_patcher = mock.patch.object(
events, 'raise_event', self.testbed.mock_raiseevent)
self.addCleanup(self.testbed.raise_event_patcher.stop)
self.testbed.raise_event_patcher.start()
def login_user(
self, email=USER_EMAIL, user_id='1', organization=None, is_admin=False):
"""Login a User for return of mocked users.get_current_user."""
self.logout_user()
if organization is None:
organization = email.split('@')[-1] if email else ''
self.testbed.setup_env(
user_email=email,
user_id=user_id,
user_organization=organization,
user_is_admin=('1' if is_admin else '0'),
overwrite=True)
def logout_user(self):
"""Logs out the current user."""
self.testbed.setup_env(
user_email='',
user_id='',
overwrite=True)
def tearDown(self):
"""Tear down the testing environment."""
self.logout_user()
self.testbed.deactivate()
super(TestCase, self).tearDown()
class EndpointsTestCase(TestCase):
"""Base test case for Endpoints."""
def setUp(self):
super(EndpointsTestCase, self).setUp()
patcher = mock.patch.object(endpoints, 'get_current_user')
self.mock_endpoints_get_current_user = patcher.start()
self.mock_endpoints_get_current_user.return_value = None
self.addCleanup(patcher.stop)
def login_endpoints_user(self, email=USER_EMAIL):
"""Logs in a User for return of mocked endpoints.get_current_user."""
self.mock_endpoints_get_current_user.return_value = users.User(email)
user_model.User.get_user(email=email)
def login_admin_endpoints_user(self, email=SUPER_ADMIN_EMAIL):
"""Logs in an admin with all roles."""
self.mock_endpoints_get_current_user.return_value = users.User(email)
user_model.User.get_user(
email=email,
opt_roles=[
permissions.TECHNICAL_ADMIN_ROLE.name,
permissions.OPERATIONAL_ADMIN_ROLE.name,
permissions.TECHNICIAN_ROLE.name,
permissions.USER_ROLE.name
])
class ActionTestCase(TestCase):
"""Base test caser for action modules."""
def setUp(self):
"""Checks imported modules for an action module and includes class."""
super(ActionTestCase, self).setUp()
try:
self.testing_action
except AttributeError:
raise EnvironmentError(
'Create a TestCase setUp method that sets a variable named '
'self.testing_action containing the name of the action module you '
'wish to test, then runs the superclass setUp method.')
actions = action_loader.load_actions([self.testing_action]) # pylint: disable=no-member
if not actions:
raise EnvironmentError(
'The unit test must import at least one valid action module. Verify '
'that self.testing_action is a string that is the name of a module '
'in the actions directory.')
self.action = actions.get(self.testing_action)
def main():
absltest.main()
if __name__ == '__main__':
absltest.main()
| 33.881443 | 92 | 0.709113 |
acf79fd2af7c4477e435e2ef1829f51f7b0d5452 | 4,591 | py | Python | Python/github/github.py | maxxxxxdlp/code_share | 4f9375bf4bdf6048b54b22bd1fa0d3ad010de7ef | [
"MIT"
] | null | null | null | Python/github/github.py | maxxxxxdlp/code_share | 4f9375bf4bdf6048b54b22bd1fa0d3ad010de7ef | [
"MIT"
] | 33 | 2021-07-11T22:55:42.000Z | 2022-01-07T23:23:43.000Z | Python/github/github.py | maxxxxxdlp/code_share | 4f9375bf4bdf6048b54b22bd1fa0d3ad010de7ef | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import sys
import subprocess
import webbrowser
import re
from pathlib import Path
## Find Repository Root
search_directory = os.getcwd()
git_folder = None
while True:
git_folder = os.path.join(search_directory, ".git/")
if os.path.isdir(git_folder):
break
if str(search_directory) == "/":
print("Not a git repository")
exit(-1)
search_directory = Path(search_directory).parent.absolute()
## Run script in Github URL to CLI mode if URL was provided
re_github_url = r"github\.com\/(?P<login>[^/]+)/(?P<repository>[^/]+)/?(?:(?P<type>tree|blob)/(?P<branch>[^/]+/?(?P<path>.*)))?"
parsed_url = (
re.search(re_github_url, sys.argv[1]).groupdict()
if len(sys.argv) == 2
else None
)
if parsed_url:
path = parsed_url["path"] if "path" in parsed_url else ""
if not path:
path = "/"
full_path = os.path.join(search_directory, path)
is_file = (
"type" in parsed_url
and parsed_url["type"] == "blob"
and not path.endswith("/")
)
directory = os.path.dirname(full_path) if is_file else full_path
print(f"cd {directory} && ", end="")
if is_file:
editor = (
os.environ["EDITOR"] if "EDITOR" in os.environ else "open"
)
file_name = os.path.basename(path)
print(f"{editor} {file_name}")
else:
print(
os.environ["LIST_FILES"]
if "LIST_FILES" in os.environ
else "ls"
)
exit(0)
## Run script in CLI to GitHub URL mode
arguments = {
parameter: value
for parameter, value in zip(sys.argv, sys.argv[1:])
if parameter.startswith("-")
}
refs_folder = os.path.join(git_folder, "refs/remotes/")
if not os.path.isdir(refs_folder):
print("No remote is set for this repository")
exit(-1)
remotes = [
item
for item in os.listdir(refs_folder)
if os.path.isdir(os.path.join(refs_folder, item))
]
if len(remotes) == 0:
print("No remote is set for this repository")
exit(-1)
if "-r" in arguments:
if arguments["-r"] not in remotes:
print("Invalid remote specified")
exit(-1)
preferred_remote = arguments["-r"]
elif "origin" in remotes:
preferred_remote = "origin"
else:
print("Using %s as a remote" % remotes[0])
preferred_remote = remotes[0]
origin_url = (
subprocess.check_output(
["git", "config", "--get", "remote.%s.url" % preferred_remote]
)
.strip()
.decode("utf-8")
)
if origin_url.endswith(".git"):
origin_url = origin_url[: -len(".git")]
if not origin_url:
print("Unable to get origin url")
exit(-1)
origin_folder = os.path.join(refs_folder, preferred_remote)
branches = [
item for item in os.listdir(origin_folder) if item != "HEAD"
]
if "-b" in arguments:
preferred_branch = arguments["-b"]
endswith = preferred_branch.startswith(".")
startswith = preferred_branch.endswith(".")
if startswith or endswith:
matched_branches = [
branch
for branch in branches
if (
(
not startswith
or branch.startswith(preferred_branch[:-1])
)
and (
not endswith
or branch.endswith(preferred_branch[1:])
)
)
]
if len(matched_branches) == 1:
preferred_branch = matched_branches[0]
elif len(matched_branches) > 1:
print("Matched multiple branches: %s" % matched_branches)
exit(0)
else:
print("No branches matched")
exit(0)
else:
preferred_branch = (
subprocess.check_output(["git", "branch", "--show-current"])
.strip()
.decode("utf-8")
)
if not preferred_branch:
if len(branches) == 0:
print("No branches found for this repository")
exit(-1)
elif "master" in remotes:
preferred_branch = "master"
elif "main" in remotes:
preferred_branch = "main"
else:
print("Using %s as a branch" % branches[0])
preferred_branch = branches[0]
relative_path = os.getcwd()[len(str(search_directory)) + 1 :]
get_url = lambda file_name: os.path.join(
origin_url,
"blob" if file_name else "tree",
preferred_branch,
relative_path,
file_name,
)
if "-f" in arguments:
url = get_url(arguments["-f"])
else:
url = get_url("")
print("Opening %s" % url)
webbrowser.open_new_tab(url)
| 24.420213 | 128 | 0.584186 |
acf79fd6e606937f2d8a697af1cb1a93a2878dfd | 975 | py | Python | PasswordClassifier.py | JasonPap/hawaian-hacker | b92d41a692661dbbcbef0d0538a536395e0dcfdd | [
"MIT"
] | null | null | null | PasswordClassifier.py | JasonPap/hawaian-hacker | b92d41a692661dbbcbef0d0538a536395e0dcfdd | [
"MIT"
] | null | null | null | PasswordClassifier.py | JasonPap/hawaian-hacker | b92d41a692661dbbcbef0d0538a536395e0dcfdd | [
"MIT"
] | null | null | null | from sklearn import svm
from Password import Password
class PasswordClassifier:
def __init__(self):
self.clf = svm.OneClassSVM(kernel='rbf', gamma=0.1, nu=0.35)
def train(self, TrainingFile):
training_data = []
with open(TrainingFile) as inputFile:
for word in inputFile:
p = Password(word)
p.analyze_frequencies()
training_data.append(p.letter_frequencies.values())
# print training_data
self.clf.fit(training_data)
def predict(self, guess):
p = Password(guess)
p.analyze_frequencies()
if self.clf.decision_function([p.letter_frequencies.values()]) + 400 > 0:
return True
else:
return False
# print p.letter_frequencies.values()
# print guess
# print self.clf.predict([p.letter_frequencies.values()])
# print self.clf.decision_function([p.letter_frequencies.values()])
| 31.451613 | 81 | 0.61641 |
acf7a0b0448b007c8987e52421605dca5e4273dc | 2,222 | py | Python | scripts/setup/generate_secrets.py | enterstudio/zulip | f6f8f1fe36ac7d82bc0a5effc00a47e460f0b325 | [
"Apache-2.0"
] | null | null | null | scripts/setup/generate_secrets.py | enterstudio/zulip | f6f8f1fe36ac7d82bc0a5effc00a47e460f0b325 | [
"Apache-2.0"
] | null | null | null | scripts/setup/generate_secrets.py | enterstudio/zulip | f6f8f1fe36ac7d82bc0a5effc00a47e460f0b325 | [
"Apache-2.0"
] | 1 | 2017-03-19T14:40:16.000Z | 2017-03-19T14:40:16.000Z | #!/usr/bin/env python2.7
# This tools generates local_settings_generated.py using the template
from __future__ import print_function
import sys, os, os.path
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
os.environ['DJANGO_SETTINGS_MODULE'] = 'zproject.settings'
from django.utils.crypto import get_random_string
from zerver.lib.utils import generate_random_token
os.chdir(os.path.join(os.path.dirname(__file__), '..', '..'))
CAMO_CONFIG_FILENAME = '/etc/default/camo'
AUTOGENERATED_SETTINGS = ['shared_secret', 'avatar_salt', 'rabbitmq_password', 'local_database_password',
'initial_password_salt']
def generate_camo_config_file(camo_key):
camo_config = """ENABLED=yes
PORT=9292
CAMO_KEY=%s
""" % (camo_key,)
with open(CAMO_CONFIG_FILENAME, 'w') as camo_file:
camo_file.write(camo_config)
print("Generated Camo config file %s" % (CAMO_CONFIG_FILENAME,))
def generate_django_secretkey():
# Secret key generation taken from Django's startproject.py
chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'
return get_random_string(50, chars)
def generate_secrets(development=False):
if development:
OUTPUT_SETTINGS_FILENAME = "zproject/dev-secrets.conf"
else:
OUTPUT_SETTINGS_FILENAME = "/etc/zulip/zulip-secrets.conf"
lines = ['[secrets]\n']
def config_line(var, value):
return "%s = %s\n" % (var, value)
for name in AUTOGENERATED_SETTINGS:
lines.append(config_line(name, generate_random_token(64)))
lines.append(config_line('secret_key', generate_django_secretkey()))
camo_key = get_random_string(64)
lines.append(config_line('camo_key', camo_key))
if not development:
# Write the Camo config file directly
generate_camo_config_file(camo_key)
out = open(OUTPUT_SETTINGS_FILENAME, 'w')
out.write("".join(lines))
out.close()
print("Generated %s with auto-generated secrets!" % (OUTPUT_SETTINGS_FILENAME,))
if __name__ == '__main__':
development = False
extra_args = sys.argv[1:]
if len(extra_args) and extra_args[0] in ('-d', '--development'):
development = True
generate_secrets(development)
| 31.742857 | 105 | 0.706121 |
acf7a0b19798633063e23dc53edc56d30cecdde8 | 588 | py | Python | lessons/python/cpu-multiprocessing/joblib_tutorial_2_multiple_varying_args.py | vmzhang/studyGroup | d49ddc32bdd7ac91d73cb8890154e1965d1dcfd0 | [
"Apache-2.0"
] | 105 | 2015-06-22T15:23:19.000Z | 2022-03-30T12:20:09.000Z | lessons/python/cpu-multiprocessing/joblib_tutorial_2_multiple_varying_args.py | vmzhang/studyGroup | d49ddc32bdd7ac91d73cb8890154e1965d1dcfd0 | [
"Apache-2.0"
] | 314 | 2015-06-18T22:10:34.000Z | 2022-02-09T16:47:52.000Z | lessons/python/cpu-multiprocessing/joblib_tutorial_2_multiple_varying_args.py | vmzhang/studyGroup | d49ddc32bdd7ac91d73cb8890154e1965d1dcfd0 | [
"Apache-2.0"
] | 142 | 2015-06-18T22:11:53.000Z | 2022-02-03T16:14:43.000Z | from joblib import Parallel, delayed, cpu_count
def f(x, y):
return x * y
if __name__ == '__main__':
y_vals = list(range(10,20))
x_vals = list(range(10))
inputs = list(zip(x_vals,y_vals))
print('Input :', inputs)
builtin_outputs = list(map(f, x_vals, y_vals))
print('Built-in:', builtin_outputs)
#number of threads to use
pool_size = cpu_count()
print("Number of processes:", pool_size)
#can specify backend parameter to use threads
outputs = Parallel(n_jobs=pool_size)(delayed(f)(x,y) for x,y in inputs)
print('Pool:', outputs)
| 26.727273 | 75 | 0.658163 |
acf7a1e44dd9535fcf5f412fb81de40ffe8bf20e | 769 | py | Python | setup.py | ProjectQ-Framework/FermiLib-Plugin-PySCF | 3716a84de478b77d038868376a9fe71e8efd3ad2 | [
"Apache-2.0"
] | 10 | 2017-06-22T19:36:33.000Z | 2022-01-05T18:55:36.000Z | setup.py | ProjectQ-Framework/FermiLib-Plugin-PySCF | 3716a84de478b77d038868376a9fe71e8efd3ad2 | [
"Apache-2.0"
] | 3 | 2017-06-30T06:59:24.000Z | 2017-07-06T05:47:10.000Z | setup.py | ProjectQ-Framework/FermiLib-Plugin-PySCF | 3716a84de478b77d038868376a9fe71e8efd3ad2 | [
"Apache-2.0"
] | 6 | 2017-06-07T16:42:53.000Z | 2022-01-05T18:55:40.000Z | from setuptools import setup, find_packages
# This reads the __version__ variable from fermilibpluginpyscf/_version.py
exec(open('fermilibpluginpyscf/_version.py').read())
# Readme file as long_description:
long_description = open('README.rst').read()
# Read in requirements.txt
requirements = open('requirements.txt').readlines()
requirements = [r.strip() for r in requirements]
setup(
name='fermilibpluginpyscf',
version=__version__,
author='FermiLib plugin PySCF developers',
author_email='fermilib@projectq.ch',
url='http://www.projectq.ch',
description='A plugin allowing FermiLib to interface with PySCF.',
long_description=long_description,
install_requires=requirements,
license='Apache 2',
packages=find_packages()
)
| 32.041667 | 74 | 0.755527 |
acf7a248f8f79362f01d38771595e7bbf72c7098 | 3,942 | py | Python | applicationinsights/channel/contracts/User.py | allieus/ApplicationInsights-Python | 5345a414ff6879ed0a44d090ae01c66e6fffeeec | [
"MIT"
] | 1 | 2019-05-26T12:52:55.000Z | 2019-05-26T12:52:55.000Z | applicationinsights/channel/contracts/User.py | allieus/ApplicationInsights-Python | 5345a414ff6879ed0a44d090ae01c66e6fffeeec | [
"MIT"
] | null | null | null | applicationinsights/channel/contracts/User.py | allieus/ApplicationInsights-Python | 5345a414ff6879ed0a44d090ae01c66e6fffeeec | [
"MIT"
] | 1 | 2019-05-26T12:56:38.000Z | 2019-05-26T12:56:38.000Z | import collections
import copy
from .Utils import _write_complex_object
class User(object):
"""Data contract class for type User.
"""
_defaults = collections.OrderedDict([
('ai.user.accountAcquisitionDate', None),
('ai.user.accountId', None),
('ai.user.userAgent', None),
('ai.user.id', None)
])
def __init__(self):
"""Initializes a new instance of the class.
"""
self._values = {
}
self._initialize()
@property
def account_acquisition_date(self):
"""The account_acquisition_date property.
Returns:
(string). the property value. (defaults to: None)
"""
if 'ai.user.accountAcquisitionDate' in self._values:
return self._values['ai.user.accountAcquisitionDate']
return self._defaults['ai.user.accountAcquisitionDate']
@account_acquisition_date.setter
def account_acquisition_date(self, value):
"""The account_acquisition_date property.
Args:
value (string). the property value.
"""
if value == self._defaults['ai.user.accountAcquisitionDate'] and 'ai.user.accountAcquisitionDate' in self._values:
del self._values['ai.user.accountAcquisitionDate']
else:
self._values['ai.user.accountAcquisitionDate'] = value
@property
def account_id(self):
"""The account_id property.
Returns:
(string). the property value. (defaults to: None)
"""
if 'ai.user.accountId' in self._values:
return self._values['ai.user.accountId']
return self._defaults['ai.user.accountId']
@account_id.setter
def account_id(self, value):
"""The account_id property.
Args:
value (string). the property value.
"""
if value == self._defaults['ai.user.accountId'] and 'ai.user.accountId' in self._values:
del self._values['ai.user.accountId']
else:
self._values['ai.user.accountId'] = value
@property
def user_agent(self):
"""The user_agent property.
Returns:
(string). the property value. (defaults to: None)
"""
if 'ai.user.userAgent' in self._values:
return self._values['ai.user.userAgent']
return self._defaults['ai.user.userAgent']
@user_agent.setter
def user_agent(self, value):
"""The user_agent property.
Args:
value (string). the property value.
"""
if value == self._defaults['ai.user.userAgent'] and 'ai.user.userAgent' in self._values:
del self._values['ai.user.userAgent']
else:
self._values['ai.user.userAgent'] = value
@property
def id(self):
"""The id property.
Returns:
(string). the property value. (defaults to: None)
"""
if 'ai.user.id' in self._values:
return self._values['ai.user.id']
return self._defaults['ai.user.id']
@id.setter
def id(self, value):
"""The id property.
Args:
value (string). the property value.
"""
if value == self._defaults['ai.user.id'] and 'ai.user.id' in self._values:
del self._values['ai.user.id']
else:
self._values['ai.user.id'] = value
def _initialize(self):
"""Initializes the current instance of the object.
"""
pass
def write(self):
"""Writes the contents of this object and returns the content as a dict object.
Returns:
(dict). the object that represents the same data as the current instance.
"""
return _write_complex_object(self._defaults, self._values)
| 31.03937 | 122 | 0.567478 |
acf7a2bc0a20878dcd6a5010c1d510b707f23e06 | 2,053 | py | Python | WMIAdventure/admin/models/tests.py | Michael-Czekanski/WMIAdventure-1 | ea812b13de0cd6c47c541cbede2d016a7837b4b8 | [
"Apache-2.0"
] | 2 | 2021-05-26T15:12:33.000Z | 2021-12-09T17:17:19.000Z | WMIAdventure/admin/models/tests.py | Michael-Czekanski/WMIAdventure-1 | ea812b13de0cd6c47c541cbede2d016a7837b4b8 | [
"Apache-2.0"
] | 558 | 2021-05-27T05:41:23.000Z | 2022-02-27T21:50:54.000Z | WMIAdventure/admin/models/tests.py | Michael-Czekanski/WMIAdventure-1 | ea812b13de0cd6c47c541cbede2d016a7837b4b8 | [
"Apache-2.0"
] | 4 | 2021-05-26T15:09:29.000Z | 2022-03-13T15:28:07.000Z | from unittest import TestCase
from models.Card import Card
from models.Model import Model
class ModelTestCase(TestCase):
def setUp(self) -> None:
self.dict = {'key1': 'val1', 'key2': 'val2'}
def test_create(self):
model = Model(self.dict)
self.assertEqual(model.key1, self.dict['key1'])
self.assertEqual(model.key2, self.dict['key2'])
class CardModelTestCase(TestCase):
def setUp(self) -> None:
# Sample card as if it came from the API
self.dict = {'id': 1, 'name': 'test', 'subject': 'testsubject', 'image': None, 'tooltip': 'testtooltip',
'levels': [
{
'level': 1, 'next_level_cost': 1,
'effects': [
{'card_effect': 1, 'target': 1, 'power': 1.0, 'range': 1.0}
]
}
]
}
def test_creation(self):
card = Card(self.dict)
# Card params
self.assertEqual(card.id, self.dict.get('id'))
self.assertEqual(card.name, self.dict.get('name'))
self.assertEqual(card.subject, self.dict.get('subject'))
self.assertEqual(card.image, self.dict.get('image'))
self.assertEqual(card.tooltip, self.dict.get('tooltip'))
# Level objects inside
dict_level = self.dict.get('levels')[0]
card_level = card.levels[0]
self.assertEqual(card_level.level, dict_level.get('level'))
self.assertEqual(card_level.next_level_cost, dict_level.get('next_level_cost'))
# Effects inside level
dict_effect = dict_level.get('effects')[0]
card_effect = card_level.effects[0]
self.assertEqual(card_effect.card_effect, dict_effect.get('card_effect'))
self.assertEqual(card_effect.target, dict_effect.get('target'))
self.assertEqual(card_effect.power, dict_effect.get('power'))
self.assertEqual(card_effect.range, dict_effect.get('range'))
| 36.660714 | 112 | 0.574769 |
acf7a32c42723e0c9e80e580fb889ed2fc61477e | 35,115 | py | Python | tests/keras/test_callbacks.py | oneandwholly/keras | dc9db6b494a037db15967d8585a8941be46c0b0e | [
"MIT"
] | 20 | 2018-07-16T12:43:24.000Z | 2020-12-15T08:37:35.000Z | tests/keras/test_callbacks.py | oneandwholly/keras | dc9db6b494a037db15967d8585a8941be46c0b0e | [
"MIT"
] | 13 | 2018-10-15T10:09:28.000Z | 2019-01-07T04:48:27.000Z | tests/keras/test_callbacks.py | oneandwholly/keras | dc9db6b494a037db15967d8585a8941be46c0b0e | [
"MIT"
] | 22 | 2018-08-30T14:12:06.000Z | 2021-07-03T19:43:15.000Z | import os
import multiprocessing
import numpy as np
import pytest
from numpy.testing import assert_allclose
from csv import reader
from csv import Sniffer
import shutil
from keras import optimizers
from keras import initializers
from keras import callbacks
from keras.models import Sequential, Model
from keras.layers import Input, Dense, Dropout, add, dot, Lambda, Layer
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import GlobalAveragePooling1D
from keras.layers import GlobalAveragePooling2D
from keras.layers import BatchNormalization
from keras.utils.test_utils import get_test_data
from keras.utils.generic_utils import to_list
from keras.utils.generic_utils import unpack_singleton
from keras import backend as K
from keras.utils import np_utils
try:
from unittest.mock import patch
except:
from mock import patch
input_dim = 2
num_hidden = 4
num_classes = 2
batch_size = 5
train_samples = 20
test_samples = 20
def data_generator(x, y, batch_size):
x = to_list(x)
y = to_list(y)
max_batch_index = len(x[0]) // batch_size
i = 0
while 1:
x_batch = [array[i * batch_size: (i + 1) * batch_size] for array in x]
x_batch = unpack_singleton(x_batch)
y_batch = [array[i * batch_size: (i + 1) * batch_size] for array in y]
y_batch = unpack_singleton(y_batch)
yield x_batch, y_batch
i += 1
i = i % max_batch_index
# Changing the default arguments of get_test_data.
def get_data_callbacks(num_train=train_samples,
num_test=test_samples,
input_shape=(input_dim,),
classification=True,
num_classes=num_classes):
return get_test_data(num_train=num_train,
num_test=num_test,
input_shape=input_shape,
classification=classification,
num_classes=num_classes)
def test_TerminateOnNaN():
np.random.seed(1337)
(X_train, y_train), (X_test, y_test) = get_data_callbacks()
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
cbks = [callbacks.TerminateOnNaN()]
model = Sequential()
initializer = initializers.Constant(value=1e5)
for _ in range(5):
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu',
kernel_initializer=initializer))
model.add(Dense(num_classes, activation='linear'))
model.compile(loss='mean_squared_error',
optimizer='rmsprop')
# case 1 fit
history = model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=20)
loss = history.history['loss']
assert len(loss) == 1
assert loss[0] == np.inf
history = model.fit_generator(data_generator(X_train, y_train, batch_size),
len(X_train),
validation_data=(X_test, y_test),
callbacks=cbks,
epochs=20)
loss = history.history['loss']
assert len(loss) == 1
assert loss[0] == np.inf or np.isnan(loss[0])
def test_stop_training_csv(tmpdir):
np.random.seed(1337)
fp = str(tmpdir / 'test.csv')
(X_train, y_train), (X_test, y_test) = get_data_callbacks()
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
cbks = [callbacks.TerminateOnNaN(), callbacks.CSVLogger(fp)]
model = Sequential()
for _ in range(5):
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_classes, activation='linear'))
model.compile(loss='mean_squared_error',
optimizer='rmsprop')
def data_generator():
i = 0
max_batch_index = len(X_train) // batch_size
tot = 0
while 1:
if tot > 3 * len(X_train):
yield (np.ones([batch_size, input_dim]) * np.nan,
np.ones([batch_size, num_classes]) * np.nan)
else:
yield (X_train[i * batch_size: (i + 1) * batch_size],
y_train[i * batch_size: (i + 1) * batch_size])
i += 1
tot += 1
i = i % max_batch_index
history = model.fit_generator(data_generator(),
len(X_train) // batch_size,
validation_data=(X_test, y_test),
callbacks=cbks,
epochs=20)
loss = history.history['loss']
assert len(loss) > 1
assert loss[-1] == np.inf or np.isnan(loss[-1])
values = []
with open(fp) as f:
for x in reader(f):
values.append(x)
assert 'nan' in values[-1], 'The last epoch was not logged.'
os.remove(fp)
def test_ModelCheckpoint(tmpdir):
np.random.seed(1337)
filepath = str(tmpdir / 'checkpoint.h5')
(X_train, y_train), (X_test, y_test) = get_data_callbacks()
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
# case 1
monitor = 'val_loss'
save_best_only = False
mode = 'auto'
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor,
save_best_only=save_best_only, mode=mode)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=1)
assert os.path.isfile(filepath)
os.remove(filepath)
# case 2
mode = 'min'
cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor,
save_best_only=save_best_only, mode=mode)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=1)
assert os.path.isfile(filepath)
os.remove(filepath)
# case 3
mode = 'max'
monitor = 'val_acc'
cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor,
save_best_only=save_best_only, mode=mode)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=1)
assert os.path.isfile(filepath)
os.remove(filepath)
# case 4
save_best_only = True
cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor,
save_best_only=save_best_only, mode=mode)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=1)
assert os.path.isfile(filepath)
os.remove(filepath)
# case 5
save_best_only = False
period = 2
mode = 'auto'
filepath = 'checkpoint.{epoch:02d}.h5'
cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor,
save_best_only=save_best_only, mode=mode,
period=period)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=4)
assert os.path.isfile(filepath.format(epoch=2))
assert os.path.isfile(filepath.format(epoch=4))
assert not os.path.exists(filepath.format(epoch=1))
assert not os.path.exists(filepath.format(epoch=3))
os.remove(filepath.format(epoch=2))
os.remove(filepath.format(epoch=4))
assert not tmpdir.listdir()
def test_EarlyStopping():
np.random.seed(1337)
(X_train, y_train), (X_test, y_test) = get_data_callbacks()
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
mode = 'max'
monitor = 'val_acc'
patience = 0
cbks = [callbacks.EarlyStopping(patience=patience, monitor=monitor, mode=mode)]
history = model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=20)
mode = 'auto'
monitor = 'val_acc'
patience = 2
cbks = [callbacks.EarlyStopping(patience=patience, monitor=monitor, mode=mode)]
history = model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=20)
def test_EarlyStopping_reuse():
np.random.seed(1337)
patience = 3
data = np.random.random((100, 1))
labels = np.where(data > 0.5, 1, 0)
model = Sequential((
Dense(1, input_dim=1, activation='relu'),
Dense(1, activation='sigmoid'),
))
model.compile(optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])
stopper = callbacks.EarlyStopping(monitor='acc', patience=patience)
weights = model.get_weights()
hist = model.fit(data, labels, callbacks=[stopper], epochs=20)
assert len(hist.epoch) >= patience
# This should allow training to go for at least `patience` epochs
model.set_weights(weights)
hist = model.fit(data, labels, callbacks=[stopper], epochs=20)
assert len(hist.epoch) >= patience
def test_EarlyStopping_patience():
class DummyModel(object):
def __init__(self):
self.stop_training = False
def get_weights(self):
return []
def set_weights(self, weights):
pass
early_stop = callbacks.EarlyStopping(monitor='val_loss', patience=2)
early_stop.model = DummyModel()
losses = [0.0860, 0.1096, 0.1040, 0.1019]
# Should stop after epoch 3,
# as the loss has not improved after patience=2 epochs.
epochs_trained = 0
early_stop.on_train_begin()
for epoch in range(len(losses)):
epochs_trained += 1
early_stop.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
if early_stop.model.stop_training:
break
assert epochs_trained == 3
def test_EarlyStopping_baseline():
class DummyModel(object):
def __init__(self):
self.stop_training = False
def get_weights(self):
return []
def set_weights(self, weights):
pass
def baseline_tester(acc_levels):
early_stop = callbacks.EarlyStopping(monitor='val_acc', baseline=0.75,
patience=2)
early_stop.model = DummyModel()
epochs_trained = 0
early_stop.on_train_begin()
for epoch in range(len(acc_levels)):
epochs_trained += 1
early_stop.on_epoch_end(epoch, logs={'val_acc': acc_levels[epoch]})
if early_stop.model.stop_training:
break
return epochs_trained
acc_levels = [0.55, 0.76, 0.81, 0.81]
baseline_met = baseline_tester(acc_levels)
acc_levels = [0.55, 0.74, 0.81, 0.81]
baseline_not_met = baseline_tester(acc_levels)
# All epochs should run because baseline was met in second epoch
assert baseline_met == 4
# Baseline was not met by second epoch and should stop
assert baseline_not_met == 2
def test_EarlyStopping_final_weights():
class DummyModel(object):
def __init__(self):
self.stop_training = False
self.weights = -1
def get_weights(self):
return self.weights
def set_weights(self, weights):
self.weights = weights
def set_weight_to_epoch(self, epoch):
self.weights = epoch
early_stop = callbacks.EarlyStopping(monitor='val_loss', patience=2)
early_stop.model = DummyModel()
losses = [0.2, 0.15, 0.1, 0.11, 0.12]
epochs_trained = 0
early_stop.on_train_begin()
for epoch in range(len(losses)):
epochs_trained += 1
early_stop.model.set_weight_to_epoch(epoch=epoch)
early_stop.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
if early_stop.model.stop_training:
break
# The best configuration is in the epoch 2 (loss = 0.1000),
# so with patience=2 we need to end up at epoch 4
assert early_stop.model.get_weights() == 4
def test_EarlyStopping_final_weights_when_restoring_model_weights():
class DummyModel(object):
def __init__(self):
self.stop_training = False
self.weights = -1
def get_weights(self):
return self.weights
def set_weights(self, weights):
self.weights = weights
def set_weight_to_epoch(self, epoch):
self.weights = epoch
early_stop = callbacks.EarlyStopping(monitor='val_loss', patience=2,
restore_best_weights=True)
early_stop.model = DummyModel()
losses = [0.2, 0.15, 0.1, 0.11, 0.12]
# The best configuration is in the epoch 2 (loss = 0.1000).
epochs_trained = 0
early_stop.on_train_begin()
for epoch in range(len(losses)):
epochs_trained += 1
early_stop.model.set_weight_to_epoch(epoch=epoch)
early_stop.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
if early_stop.model.stop_training:
break
# The best configuration is in epoch 2 (loss = 0.1000),
# and while patience = 2, we're restoring the best weights,
# so we end up at the epoch with the best weights, i.e. epoch 2
assert early_stop.model.get_weights() == 2
def test_LearningRateScheduler():
np.random.seed(1337)
(X_train, y_train), (X_test, y_test) = get_data_callbacks()
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
cbks = [callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=5)
assert (float(K.get_value(model.optimizer.lr)) - 0.2) < K.epsilon()
def test_ReduceLROnPlateau():
np.random.seed(1337)
(X_train, y_train), (X_test, y_test) = get_data_callbacks()
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
def make_model():
np.random.seed(1337)
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=optimizers.SGD(lr=0.1),
metrics=['accuracy'])
return model
model = make_model()
# This should reduce the LR after the first epoch (due to high epsilon).
cbks = [callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1,
min_delta=10, patience=1, cooldown=5)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=5, verbose=2)
assert_allclose(float(K.get_value(model.optimizer.lr)), 0.01, atol=K.epsilon())
model = make_model()
cbks = [callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1,
min_delta=0, patience=1, cooldown=5)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=5, verbose=2)
assert_allclose(float(K.get_value(model.optimizer.lr)), 0.1, atol=K.epsilon())
def test_ReduceLROnPlateau_patience():
class DummyOptimizer(object):
def __init__(self):
self.lr = K.variable(1.0)
class DummyModel(object):
def __init__(self):
self.optimizer = DummyOptimizer()
reduce_on_plateau = callbacks.ReduceLROnPlateau(monitor='val_loss',
patience=2)
reduce_on_plateau.model = DummyModel()
losses = [0.0860, 0.1096, 0.1040]
lrs = []
for epoch in range(len(losses)):
reduce_on_plateau.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
lrs.append(K.get_value(reduce_on_plateau.model.optimizer.lr))
# The learning rates should be 1.0 except the last one
assert all([lr == 1.0 for lr in lrs[:-1]]) and lrs[-1] < 1.0
def test_ReduceLROnPlateau_backwards_compatibility():
import warnings
with warnings.catch_warnings(record=True) as ws:
reduce_on_plateau = callbacks.ReduceLROnPlateau(epsilon=1e-13)
# Check if warnings are disabled
if os.environ.get("PYTHONWARNINGS") != "ignore":
assert "`epsilon` argument is deprecated" in str(ws[0].message)
assert not hasattr(reduce_on_plateau, 'epsilon')
assert hasattr(reduce_on_plateau, 'min_delta')
assert reduce_on_plateau.min_delta == 1e-13
def test_CSVLogger(tmpdir):
np.random.seed(1337)
filepath = str(tmpdir / 'log.tsv')
sep = '\t'
(X_train, y_train), (X_test, y_test) = get_data_callbacks()
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
def make_model():
np.random.seed(1337)
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=optimizers.SGD(lr=0.1),
metrics=['accuracy'])
return model
# case 1, create new file with defined separator
model = make_model()
cbks = [callbacks.CSVLogger(filepath, separator=sep)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=1)
assert os.path.isfile(filepath)
with open(filepath) as csvfile:
dialect = Sniffer().sniff(csvfile.read())
assert dialect.delimiter == sep
del model
del cbks
# case 2, append data to existing file, skip header
model = make_model()
cbks = [callbacks.CSVLogger(filepath, separator=sep, append=True)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=1)
# case 3, reuse of CSVLogger object
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=2)
import re
with open(filepath) as csvfile:
list_lines = csvfile.readlines()
for line in list_lines:
assert line.count(sep) == 4
assert len(list_lines) == 5
output = " ".join(list_lines)
assert len(re.findall('epoch', output)) == 1
os.remove(filepath)
assert not tmpdir.listdir()
@pytest.mark.parametrize('update_freq', ['batch', 'epoch', 9])
def test_TensorBoard(tmpdir, update_freq):
np.random.seed(np.random.randint(1, 1e7))
filepath = str(tmpdir / 'logs')
(X_train, y_train), (X_test, y_test) = get_data_callbacks()
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
class DummyStatefulMetric(Layer):
def __init__(self, name='dummy_stateful_metric', **kwargs):
super(DummyStatefulMetric, self).__init__(name=name, **kwargs)
self.stateful = True
self.state = K.variable(value=0, dtype='int32')
def reset_states(self):
pass
def __call__(self, y_true, y_pred):
return self.state
inp = Input((input_dim,))
hidden = Dense(num_hidden, activation='relu')(inp)
hidden = Dropout(0.1)(hidden)
hidden = BatchNormalization()(hidden)
output = Dense(num_classes, activation='softmax')(hidden)
model = Model(inputs=inp, outputs=output)
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy', DummyStatefulMetric()])
# we must generate new callbacks for each test, as they aren't stateless
def callbacks_factory(histogram_freq, embeddings_freq=1):
return [callbacks.TensorBoard(log_dir=filepath,
histogram_freq=histogram_freq,
write_images=True, write_grads=True,
embeddings_freq=embeddings_freq,
embeddings_layer_names=['dense_1'],
embeddings_data=X_test,
batch_size=5,
update_freq=update_freq)]
# fit without validation data
model.fit(X_train, y_train, batch_size=batch_size,
callbacks=callbacks_factory(histogram_freq=0, embeddings_freq=0),
epochs=3)
# fit with validation data and accuracy
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test),
callbacks=callbacks_factory(histogram_freq=0), epochs=2)
# fit generator without validation data
train_generator = data_generator(X_train, y_train, batch_size)
model.fit_generator(train_generator, len(X_train), epochs=2,
callbacks=callbacks_factory(histogram_freq=0,
embeddings_freq=0))
# fit generator with validation data and accuracy
train_generator = data_generator(X_train, y_train, batch_size)
model.fit_generator(train_generator, len(X_train), epochs=2,
validation_data=(X_test, y_test),
callbacks=callbacks_factory(histogram_freq=1))
assert os.path.isdir(filepath)
shutil.rmtree(filepath)
assert not tmpdir.listdir()
@pytest.mark.skipif((K.backend() != 'tensorflow'),
reason='Requires TensorFlow backend')
def test_TensorBoard_histogram_freq_must_have_validation_data(tmpdir):
np.random.seed(np.random.randint(1, 1e7))
filepath = str(tmpdir / 'logs')
(X_train, y_train), (X_test, y_test) = get_data_callbacks()
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
inp = Input((input_dim,))
hidden = Dense(num_hidden, activation='relu')(inp)
hidden = Dropout(0.1)(hidden)
output = Dense(num_classes, activation='softmax')(hidden)
model = Model(inputs=inp, outputs=output)
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# we must generate new callbacks for each test, as they aren't stateless
def callbacks_factory(histogram_freq, embeddings_freq=1):
return [callbacks.TensorBoard(log_dir=filepath,
histogram_freq=histogram_freq,
write_images=True, write_grads=True,
embeddings_freq=embeddings_freq,
embeddings_layer_names=['dense_1'],
embeddings_data=X_test,
batch_size=5)]
# fit without validation data should raise ValueError if histogram_freq > 0
with pytest.raises(ValueError) as raised_exception:
model.fit(X_train, y_train, batch_size=batch_size,
callbacks=callbacks_factory(histogram_freq=1), epochs=3)
assert 'validation_data must be provided' in str(raised_exception.value)
train_generator = data_generator(X_train, y_train, batch_size)
validation_generator = data_generator(X_test, y_test, batch_size)
# fit generator without validation data should raise ValueError if
# histogram_freq > 0
with pytest.raises(ValueError) as raised_exception:
model.fit_generator(train_generator,
len(X_train), epochs=2,
callbacks=callbacks_factory(histogram_freq=1))
assert 'validation_data must be provided' in str(raised_exception.value)
# fit generator with validation data generator should raise ValueError if
# histogram_freq > 0
with pytest.raises(ValueError) as raised_exception:
model.fit_generator(train_generator, len(X_train), epochs=2,
validation_data=validation_generator,
validation_steps=1,
callbacks=callbacks_factory(histogram_freq=1))
assert 'validation_data must be provided' in str(raised_exception.value)
def test_TensorBoard_multi_input_output(tmpdir):
np.random.seed(np.random.randint(1, 1e7))
filepath = str(tmpdir / 'logs')
(X_train, y_train), (X_test, y_test) = get_data_callbacks(
input_shape=(input_dim, input_dim))
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
inp1 = Input((input_dim, input_dim))
inp2 = Input((input_dim, input_dim))
inp_3d = add([inp1, inp2])
inp_2d = GlobalAveragePooling1D()(inp_3d)
# test a layer with a list of output tensors
inp_pair = Lambda(lambda x: x)([inp_3d, inp_2d])
hidden = dot(inp_pair, axes=-1)
hidden = Dense(num_hidden, activation='relu')(hidden)
hidden = Dropout(0.1)(hidden)
output1 = Dense(num_classes, activation='softmax')(hidden)
output2 = Dense(num_classes, activation='softmax')(hidden)
model = Model(inputs=[inp1, inp2], outputs=[output1, output2])
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# we must generate new callbacks for each test, as they aren't stateless
def callbacks_factory(histogram_freq, embeddings_freq=1):
return [callbacks.TensorBoard(log_dir=filepath,
histogram_freq=histogram_freq,
write_images=True, write_grads=True,
embeddings_freq=embeddings_freq,
embeddings_layer_names=['dense_1'],
embeddings_data=[X_test] * 2,
batch_size=5)]
# fit without validation data
model.fit([X_train] * 2, [y_train] * 2, batch_size=batch_size,
callbacks=callbacks_factory(histogram_freq=0, embeddings_freq=0),
epochs=3)
# fit with validation data and accuracy
model.fit([X_train] * 2, [y_train] * 2, batch_size=batch_size,
validation_data=([X_test] * 2, [y_test] * 2),
callbacks=callbacks_factory(histogram_freq=1), epochs=2)
train_generator = data_generator([X_train] * 2, [y_train] * 2, batch_size)
# fit generator without validation data
model.fit_generator(train_generator, len(X_train), epochs=2,
callbacks=callbacks_factory(histogram_freq=0,
embeddings_freq=0))
# fit generator with validation data and accuracy
model.fit_generator(train_generator, len(X_train), epochs=2,
validation_data=([X_test] * 2, [y_test] * 2),
callbacks=callbacks_factory(histogram_freq=1))
assert os.path.isdir(filepath)
shutil.rmtree(filepath)
assert not tmpdir.listdir()
def test_TensorBoard_convnet(tmpdir):
np.random.seed(np.random.randint(1, 1e7))
filepath = str(tmpdir / 'logs')
input_shape = (16, 16, 3)
(x_train, y_train), (x_test, y_test) = get_data_callbacks(
num_train=500,
num_test=200,
input_shape=input_shape)
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
model = Sequential([
Conv2D(filters=8, kernel_size=3,
activation='relu',
input_shape=input_shape),
MaxPooling2D(pool_size=2),
Conv2D(filters=4, kernel_size=(3, 3),
activation='relu', padding='same'),
GlobalAveragePooling2D(),
Dense(num_classes, activation='softmax')
])
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
tsb = callbacks.TensorBoard(log_dir=filepath, histogram_freq=1,
write_images=True, write_grads=True,
batch_size=16)
cbks = [tsb]
model.summary()
history = model.fit(x_train, y_train, epochs=2, batch_size=16,
validation_data=(x_test, y_test),
callbacks=cbks,
verbose=0)
assert os.path.isdir(filepath)
shutil.rmtree(filepath)
assert not tmpdir.listdir()
def test_TensorBoard_display_float_from_logs(tmpdir):
filepath = str(tmpdir / 'logs')
input_shape = (3,)
(x_train, y_train), _ = get_data_callbacks(num_train=10,
num_test=0,
input_shape=input_shape)
y_train = np_utils.to_categorical(y_train)
model = Sequential([
Dense(num_classes, activation='softmax')
])
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop')
class CustomCallback(callbacks.Callback):
def on_epoch_end(self, epoch, logs=None):
logs['test'] = 0.
tsb = callbacks.TensorBoard(log_dir=filepath,
batch_size=16)
cbks = [CustomCallback(), tsb]
model.fit(x_train, y_train, epochs=2, batch_size=16,
callbacks=cbks,
verbose=0)
assert os.path.isdir(filepath)
shutil.rmtree(filepath)
assert not tmpdir.listdir()
def test_CallbackValData():
np.random.seed(1337)
(X_train, y_train), (X_test, y_test) = get_data_callbacks()
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
cbk = callbacks.LambdaCallback(on_train_end=lambda x: 1)
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=[cbk], epochs=1)
cbk2 = callbacks.LambdaCallback(on_train_end=lambda x: 1)
train_generator = data_generator(X_train, y_train, batch_size)
model.fit_generator(train_generator, len(X_train), epochs=1,
validation_data=(X_test, y_test),
callbacks=[cbk2])
# callback validation data should always have x, y, and sample weights
assert len(cbk.validation_data) == len(cbk2.validation_data) == 3
assert cbk.validation_data[0] is cbk2.validation_data[0]
assert cbk.validation_data[1] is cbk2.validation_data[1]
assert cbk.validation_data[2].shape == cbk2.validation_data[2].shape
def test_LambdaCallback():
np.random.seed(1337)
(X_train, y_train), (X_test, y_test) = get_data_callbacks()
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# Start an arbitrary process that should run during model training and
# be terminated after training has completed.
def f():
while True:
pass
p = multiprocessing.Process(target=f)
p.start()
cleanup_callback = callbacks.LambdaCallback(
on_train_end=lambda logs: p.terminate())
cbks = [cleanup_callback]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=5)
p.join()
assert not p.is_alive()
def test_TensorBoard_with_ReduceLROnPlateau(tmpdir):
import shutil
np.random.seed(np.random.randint(1, 1e7))
filepath = str(tmpdir / 'logs')
(X_train, y_train), (X_test, y_test) = get_data_callbacks()
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='binary_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
cbks = [
callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.5,
patience=4,
verbose=1),
callbacks.TensorBoard(
log_dir=filepath)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=2)
assert os.path.isdir(filepath)
shutil.rmtree(filepath)
assert not tmpdir.listdir()
def tests_RemoteMonitor():
(X_train, y_train), (X_test, y_test) = get_data_callbacks()
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
cbks = [callbacks.RemoteMonitor()]
with patch('requests.post'):
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=1)
def tests_RemoteMonitorWithJsonPayload():
(X_train, y_train), (X_test, y_test) = get_data_callbacks()
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
cbks = [callbacks.RemoteMonitor(send_as_json=True)]
with patch('requests.post'):
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=1)
if __name__ == '__main__':
pytest.main([__file__])
| 37.316684 | 84 | 0.633575 |
acf7a3312ecce5ba3bdb2c25ab924cd575d14e9d | 553 | py | Python | results/SuperMarioBros-1-4-v0/DeepQAgent/2018-08-19_16-56/agent.py | Kautenja/playing-mario-with-deep-reinforcement-learning | bf61b8babfd06b6e6c26eb3694b84e8c7ff4c076 | [
"MIT"
] | 57 | 2018-04-24T07:07:29.000Z | 2022-01-19T17:07:13.000Z | results/SuperMarioBros-1-4-v0/DeepQAgent/2018-08-19_16-56/agent.py | Kautenja/playing-mario-with-deep-reinforcement-learning | bf61b8babfd06b6e6c26eb3694b84e8c7ff4c076 | [
"MIT"
] | 10 | 2018-06-07T14:29:19.000Z | 2019-07-29T13:48:03.000Z | results/SuperMarioBros-1-4-v0/DeepQAgent/2018-08-19_16-56/agent.py | Kautenja/playing-mario-with-deep-reinforcement-learning | bf61b8babfd06b6e6c26eb3694b84e8c7ff4c076 | [
"MIT"
] | 11 | 2018-09-11T23:14:37.000Z | 2021-06-30T03:56:55.000Z | DeepQAgent(
env=<FrameStackEnv<PenalizeDeathEnv<DownsampleEnv<RewardCacheEnv<BinarySpaceToDiscreteSpaceEnv<TimeLimit<SuperMarioBrosLevelEnv<SuperMarioBros-1-4-v0>>>>>>>>,
render_mode=None
replay_memory_size=1000000,
prioritized_experience_replay=False,
discount_factor=0.99,
update_frequency=4,
optimizer=<keras.optimizers.Adam object at 0x2b2badf5be80>,
exploration_rate=AnnealingVariable(initial_value=1.0, final_value=0.1, steps=2500000),
loss=huber_loss,
target_update_freq=10000,
dueling_network=False
)
| 39.5 | 162 | 0.788427 |
acf7a34bbc440004b4bd7404c7ad99a8955df01d | 6,041 | py | Python | spira/lgm/route/gradual_bend.py | cloudcalvin/spira | 2dcaef188f2bc8c3839e1b5ff0be027e0cd4908c | [
"MIT"
] | null | null | null | spira/lgm/route/gradual_bend.py | cloudcalvin/spira | 2dcaef188f2bc8c3839e1b5ff0be027e0cd4908c | [
"MIT"
] | 1 | 2021-10-17T10:18:04.000Z | 2021-10-17T10:18:04.000Z | spira/lgm/route/gradual_bend.py | cloudcalvin/spira | 2dcaef188f2bc8c3839e1b5ff0be027e0cd4908c | [
"MIT"
] | null | null | null | import spira
import numpy as np
from spira import param
from spira.lgm.route.arc_bend import Arc, ArcRoute
from spira.gdsii.utils import scale_coord_up as scu
class SubArcSeries(spira.Cell):
gdslayer = param.LayerField(number=99)
radius = param.FloatField(default=20)
# radius = param.FloatField(default=20 * 1e6)
width = param.FloatField(default=1.0)
# width = param.FloatField(default=1.0 * 1e6)
angular_coverage = param.FloatField(default=30)
num_steps = param.IntegerField(default=1)
angle_resolution = param.FloatField(default=0.1)
start_angle = param.IntegerField(default=0)
port1 = param.DataField()
port2 = param.DataField()
def _regular_bend(self, prev_port):
""" Now connect a regular bend for
the normal curved portion. """
B = Arc(shape=ArcRoute(radius=self.radius,
width=self.width,
theta=45 - np.rad2deg(self.angular_coverage),
start_angle=self.angular_coverage,
angle_resolution=self.angle_resolution,
gdslayer=spira.Layer(number=88)))
b = spira.SRef(B)
b.connect(port='P1', destination=prev_port)
p0 = b.ports['P2']
self.port2 = spira.Term(
name='P2',
midpoint=p0.midpoint,
# midpoint=scu(p0.midpoint),
width=p0.width,
orientation=p0.orientation
)
return b
def create_elementals(self, elems):
self.angular_coverage = np.deg2rad(self.angular_coverage)
inc_rad = (self.radius**-1) / self.num_steps
angle_step = self.angular_coverage / self.num_steps
print('inc_rad: {}'.format(inc_rad))
print('angle_step: {}'.format(angle_step))
arcs = []
for x in range(self.num_steps):
A = Arc(shape=ArcRoute(radius=1/((x+1)*inc_rad),
width=self.width,
theta=np.rad2deg(angle_step),
start_angle=x * np.rad2deg(angle_step),
angle_resolution=self.angle_resolution,
gdslayer=self.gdslayer))
a = spira.SRef(A)
elems += a
arcs.append(a)
if x > 0:
a.connect(port='P1', destination=prevPort)
prevPort = a.ports['P2']
self.port1 = arcs[0].ports['P1']
elems += self._regular_bend(prevPort)
return elems
def create_ports(self, ports):
ports += self.port1
ports += self.port2
return ports
class ArcSeries(spira.Cell):
gdslayer = param.LayerField(number=91)
radius = param.FloatField(default=20)
# radius = param.FloatField(default=20 * 1e6)
width = param.FloatField(default=1.0)
# width = param.FloatField(default=1.0 * 1e6)
angular_coverage = param.FloatField(default=30)
num_steps = param.IntegerField(default=1)
angle_resolution = param.FloatField(default=0.1)
start_angle = param.IntegerField(default=0)
direction = param.StringField(default='ccw')
port1 = param.DataField()
port2 = param.DataField()
subarc = SubArcSeries
def get_subarc_routes(self):
D = SubArcSeries(
gdslayer = self.gdslayer,
radius = self.radius,
width = self.width,
angular_coverage = self.angular_coverage,
num_steps = self.num_steps,
angle_resolution = self.angle_resolution,
start_angle = self.start_angle
)
s1 = spira.SRef(D)
s2 = spira.SRef(D)
s2.reflect(p1=[0,0], p2=[1,1])
s2.connect(port='P2', destination=s1.ports['P2'])
return s1, s2
def create_elementals(self, elems):
s1, s2 = self.get_subarc_routes()
elems += s1
elems += s2
return elems
def create_ports(self, ports):
s1, s2 = self.get_subarc_routes()
# ports += s1.ports['P1'].modified_copy(name='Port_1')
# ports += s2.ports['P1'].modified_copy(name='Port_2')
return ports
class GradualFractal(spira.Cell):
"""
Creates a 90-degree bent waveguide the bending radius is
gradually increased until it reaches the minimum
value of the radius at the "angular coverage" angle.
It essentially creates a smooth transition to a bent waveguide
mode. User can control number of steps provided. Direction
determined by start angle and cw or ccw switch with the
default 10 "num_steps" and 15 degree coverage,
effective radius is about 1.5*radius.
"""
gdslayer = param.LayerField(number=91)
radius = param.FloatField(default=20)
# radius = param.FloatField(default=20 * 1e6)
width = param.FloatField(default=1.0)
# width = param.FloatField(default=1.0 * 1e6)
angular_coverage = param.FloatField(default=20)
num_steps = param.IntegerField(default=5)
angle_resolution = param.FloatField(default=0.01)
start_angle = param.IntegerField(default=0)
direction = param.StringField(default='ccw')
def create_elementals(self, elems):
D = ArcSeries(
gdslayer = self.gdslayer,
radius = self.radius,
width = self.width,
angular_coverage = self.angular_coverage,
num_steps = self.num_steps,
angle_resolution = self.angle_resolution,
start_angle = self.start_angle
)
# D.xmin, D.ymin = 0, 0
# Orient to default settings...
# D.reflect(p1=[0,0], p2=[1,1])
# D.reflect(p1=[0,0], p2=[1,0])
# D.rotate(angle=self.start_angle, center=D.center)
# D.center = [0, 0]
s1 = spira.SRef(D)
elems += s1
return elems
if __name__ == '__main__':
# gradual = GradualFractal(radius=20*1e6, width=1*1e6)
gradual = GradualFractal(radius=20, width=1)
gradual.elementals
gradual.output()
# gradual.output(name='gradual_bend')
| 28.495283 | 68 | 0.605694 |
acf7a4fae0dc16b453916280fbac33ca2d71c445 | 2,329 | py | Python | src/utlis/summarising_features.py | GVS-Lab/genomic-scoring-breast-cancer-progression | 5b8b8d9945a1bec5f6d5b5fc03a23f6835cbc036 | [
"MIT"
] | null | null | null | src/utlis/summarising_features.py | GVS-Lab/genomic-scoring-breast-cancer-progression | 5b8b8d9945a1bec5f6d5b5fc03a23f6835cbc036 | [
"MIT"
] | null | null | null | src/utlis/summarising_features.py | GVS-Lab/genomic-scoring-breast-cancer-progression | 5b8b8d9945a1bec5f6d5b5fc03a23f6835cbc036 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
def summarise_feature_table(data):
""" Function that summarises distribution characteristics for all columns in a feature table.
Measures computed are median, min, max, standard deviation (SD) Coefficient of Variation (CV) and Coefficient of Dispersion (CD), Inter_Quartile_Range(IQR) and Quartile Coeeffient of Dispersrion (QCD).
Args:
data: feature table with the columns of interest
"""
np.seterr(all='ignore')
median_features = pd.DataFrame(np.array(np.median(data,axis=0))).T
median_features.columns = ['median_' + str(col) for col in data]
min_features = pd.DataFrame(np.array(np.min(data,axis=0))).T
min_features.columns = ['min_' + str(col) for col in data]
max_features = pd.DataFrame(np.array(np.max(data,axis=0))).T
max_features.columns = ['max_' + str(col) for col in data]
SD_features = pd.DataFrame(np.array(np.std(data,axis=0))).T
SD_features.columns = ['std_' + str(col) for col in data]
CV_features = pd.DataFrame(np.array(np.std(data,axis=0))/np.array(np.nanmedian(data,axis=0))).T
CV_features.columns = ['CV_' + str(col) for col in data]
CD_features = pd.DataFrame(np.array(np.var(data,axis=0))/np.array(np.nanmedian(data,axis=0))).T
CD_features.columns = ['CD_' + str(col) for col in data]
IQR_features = pd.DataFrame(np.array(np.subtract(*np.nanpercentile(data, [75, 25],axis=0)))).T
IQR_features.columns = ['IQR_' + str(col) for col in data]
QCD_features = pd.DataFrame(np.array(np.subtract(*np.nanpercentile(data, [75,25],axis=0)))/np.array(np.add(*np.nanpercentile(data, [75, 25],axis=0)))).T
QCD_features.columns = ['QCD_' + str(col) for col in data]
all_features = pd.concat([median_features.reset_index(drop=True),
min_features.reset_index(drop=True),
max_features.reset_index(drop=True),
SD_features.reset_index(drop=True),
CV_features.reset_index(drop=True),
CD_features.reset_index(drop=True),
IQR_features.reset_index(drop=True),
QCD_features.reset_index(drop=True)], axis=1)
return all_features
| 54.162791 | 207 | 0.642336 |
acf7a60c72bbc809bdfba11cd897e4fa7d65aae2 | 2,024 | py | Python | config/settings/test.py | healthainetwork/hain_site | 7a43cbdc19aa479a72a5ed644812c34dc5315d8e | [
"MIT"
] | null | null | null | config/settings/test.py | healthainetwork/hain_site | 7a43cbdc19aa479a72a5ed644812c34dc5315d8e | [
"MIT"
] | null | null | null | config/settings/test.py | healthainetwork/hain_site | 7a43cbdc19aa479a72a5ed644812c34dc5315d8e | [
"MIT"
] | null | null | null | """
With these settings, tests run faster.
"""
from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = False
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env("DJANGO_SECRET_KEY", default="OBGpgFhg6lCgWX7EkB7N4011qLTBhQMhldCVqH7F5QnGQoU4eDpwzi2n4g2tK1xG")
# https://docs.djangoproject.com/en/dev/ref/settings/#test-runner
TEST_RUNNER = "django.test.runner.DiscoverRunner"
# CACHES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache", "LOCATION": ""
}
}
# PASSWORDS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#password-hashers
PASSWORD_HASHERS = ["django.contrib.auth.hashers.MD5PasswordHasher"]
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES[0]["OPTIONS"]["debug"] = DEBUG # noqa F405
TEMPLATES[0]["OPTIONS"]["loaders"] = [ # noqa F405
(
"django.template.loaders.cached.Loader",
[
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
)
]
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = "django.core.mail.backends.locmem.EmailBackend"
# https://docs.djangoproject.com/en/dev/ref/settings/#email-host
EMAIL_HOST = "localhost"
# https://docs.djangoproject.com/en/dev/ref/settings/#email-port
EMAIL_PORT = 1025
# Your stuff...
# ------------------------------------------------------------------------------
| 36.142857 | 113 | 0.546443 |
acf7a6648611747603cb9e0a98451115d116e5f9 | 7,619 | py | Python | publications/articles/record.py | Alzpeta/publications-api | 6332c1329b22ff4f494085f042e893a8a94e33df | [
"MIT"
] | null | null | null | publications/articles/record.py | Alzpeta/publications-api | 6332c1329b22ff4f494085f042e893a8a94e33df | [
"MIT"
] | 13 | 2021-01-21T14:35:29.000Z | 2021-09-01T07:53:27.000Z | publications/articles/record.py | Alzpeta/publications-api | 6332c1329b22ff4f494085f042e893a8a94e33df | [
"MIT"
] | 4 | 2021-02-23T18:17:35.000Z | 2021-07-23T14:54:13.000Z | # -*- coding: utf-8 -*-
#
# Copyright (C) 2020 CESNET.
#
# CESNET OA Publication Repository is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
#
import datetime
import os
import uuid
from deepmerge import always_merger
from flask import url_for, jsonify, request, Response, abort
from flask_login import current_user
from invenio_db import db
from invenio_pidstore.errors import PIDDoesNotExistError, PIDDeletedError
from invenio_pidstore.models import PersistentIdentifier, PIDStatus
from invenio_records_files.api import Record
from oarepo_actions.decorators import action
from oarepo_communities.converters import CommunityPIDValue
from oarepo_communities.permissions import read_permission_factory
from oarepo_communities.proxies import current_oarepo_communities
from oarepo_communities.record import CommunityRecordMixin
from oarepo_communities.views import json_abort
from oarepo_documents.api import getMetadataFromDOI
from oarepo_documents.document_json_mapping import schema_mapping
from oarepo_invenio_model import InheritedSchemaRecordMixin
from oarepo_records_draft import current_drafts
from oarepo_records_draft.record import DraftRecordMixin, InvalidRecordAllowedMixin
from oarepo_validate import SchemaKeepingRecordMixin, MarshmallowValidatedRecordMixin
from simplejson import JSONDecodeError
from .constants import (
ARTICLE_ALLOWED_SCHEMAS,
ARTICLE_PREFERRED_SCHEMA
)
from .marshmallow import ArticleMetadataSchemaV1
from .minters import article_minter
from .permissions import create_draft_object_permission_impl
published_index_name = 'articles-publication-article-v1.0.0'
draft_index_name = 'draft-articles-publication-article-v1.0.0'
all_index_name = 'all-articles'
prefixed_published_index_name = os.environ.get('INVENIO_SEARCH_INDEX_PREFIX', '') + published_index_name
prefixed_draft_index_name = os.environ.get('INVENIO_SEARCH_INDEX_PREFIX', '') + draft_index_name
prefixed_all_index_name = os.environ.get('INVENIO_SEARCH_INDEX_PREFIX', '') + all_index_name
class ArticleBaseRecord(MarshmallowValidatedRecordMixin,
SchemaKeepingRecordMixin,
InheritedSchemaRecordMixin,
CommunityRecordMixin,
Record):
"""Record class for an Article Record"""
ALLOWED_SCHEMAS = ARTICLE_ALLOWED_SCHEMAS
PREFERRED_SCHEMA = ARTICLE_PREFERRED_SCHEMA
MARSHMALLOW_SCHEMA = ArticleMetadataSchemaV1
VALIDATE_PATCH = True
class DOIRecordMixin:
DOI_PID_TYPE = 'doi'
@classmethod
@action(detail=False, url_path='from-doi/', method='post',
permissions=create_draft_object_permission_impl)
def from_doi(cls, **kwargs):
"""Returns an existing article record metadata by its DOI PID.
If no record is found, tries to resolve article metadata from
DOI using CrossRef client.
"""
doi = request.json['doi']
doi_pid = None
try:
doi_pid = PersistentIdentifier.get(cls.DOI_PID_TYPE, doi)
except PIDDoesNotExistError:
pass
except PIDDeletedError:
pass
if doi_pid:
# Found existing article record with this DOI
record = cls.get_record(doi_pid.object_uuid)
# Check if user has permission to read the article
if not read_permission_factory(record).can():
from flask_login import current_user
if not current_user.is_authenticated:
abort(401)
abort(403)
# Get REST endpoint config and Invenio PID for the record
endpoint = current_drafts.endpoint_for_record(record)
pid_type = endpoint.pid_type
pid_value = record.model.json['id']
primary_pid = PersistentIdentifier.get(pid_type, pid_value)
links_factory_imp = endpoint.rest.get('links_factory_imp')
links = {}
if links_factory_imp:
links = links_factory_imp(primary_pid, record)
return jsonify(dict(
article=record.dumps(),
links=links
))
else:
# Try to resolve record metadata from DOI with CrossRef
metadata = None
try:
resolved_document = getMetadataFromDOI(doi)
metadata = schema_mapping(existing_record=resolved_document, doi=doi)
metadata['datasets'] = []
except JSONDecodeError as e:
json_abort(404, 'DOI could not be resolved: %s' % e)
if metadata:
return jsonify(dict(article=metadata))
json_abort(404, 'Article not found by given DOI.')
class ArticleRecord(InvalidRecordAllowedMixin, ArticleBaseRecord):
index_name = published_index_name
_schema = 'publication-article-v1.0.0.json'
@property
def canonical_url(self):
return url_for(f'invenio_records_rest.articles_item',
pid_value=CommunityPIDValue(
self['id'],
current_oarepo_communities.get_primary_community_field(self)
), _external=True)
class ArticleDraftRecord(DraftRecordMixin, DOIRecordMixin, ArticleBaseRecord):
index_name = draft_index_name
@property
def canonical_url(self):
return url_for(f'invenio_records_rest.draft-articles_item',
pid_value=CommunityPIDValue(
self['id'],
current_oarepo_communities.get_primary_community_field(self)
), _external=True)
def validate(self, *args, **kwargs):
if 'created' not in self:
self['created'] = datetime.date.today().strftime('%Y-%m-%d')
if 'creator' not in self:
if current_user.is_authenticated:
self['creator'] = current_user.email
else:
self['creator'] = 'anonymous'
self['modified'] = datetime.date.today().strftime('%Y-%m-%d')
return super().validate(*args, **kwargs)
# temporary solution todo: delet this and create own doi
@classmethod
@action(detail=False, url_path='without_doi/', method='post')
def without_doi(cls, **kwargs):
changes = request.json['changes']
authors = request.json['authors']
datasetUrl = request.json['datasetUrl']
article = {}
article['title'] = {changes['title_lang']: changes['title_val']}
article['abstract'] = {changes['abstract_lang']: changes['abstract_val']}
article['authors'] = authors
article['document_type'] = changes['document_type']
always_merger.merge(article, {
"_primary_community": 'cesnet',
"access_right_category": "success"
})
article['datasets'] = [datasetUrl]
print(article)
record_uuid = uuid.uuid4()
pid = article_minter(record_uuid, article)
record = cls.create(data=article, id_=record_uuid)
indexer = cls.DOCUMENT_INDEXER()
indexer.index(record)
PersistentIdentifier.create('dpsart', pid.pid_value, object_type='rec',
object_uuid=record_uuid,
status=PIDStatus.REGISTERED)
db.session.commit()
return Response(status=302, headers={"Location": record.canonical_url})
class AllArticlesRecord(DOIRecordMixin, ArticleRecord):
index_name = all_index_name
| 38.872449 | 104 | 0.669248 |
acf7a98db14d8338f11b449ccefe8c0c51077dda | 2,003 | py | Python | tensorflow_examples/lite/model_maker/core/api/api_gen_test.py | duy-maimanh/examples | 67ed12fd0adbe22469b6fac916d96e27f02a7330 | [
"Apache-2.0"
] | 6,484 | 2019-02-13T21:32:29.000Z | 2022-03-31T20:50:20.000Z | tensorflow_examples/lite/model_maker/core/api/api_gen_test.py | duy-maimanh/examples | 67ed12fd0adbe22469b6fac916d96e27f02a7330 | [
"Apache-2.0"
] | 288 | 2019-02-13T22:56:03.000Z | 2022-03-24T11:15:19.000Z | tensorflow_examples/lite/model_maker/core/api/api_gen_test.py | duy-maimanh/examples | 67ed12fd0adbe22469b6fac916d96e27f02a7330 | [
"Apache-2.0"
] | 7,222 | 2019-02-13T21:39:34.000Z | 2022-03-31T22:23:54.000Z | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for API generation."""
import json
import tensorflow.compat.v2 as tf
from tensorflow_examples.lite.model_maker.core.api import api_gen
from tensorflow_examples.lite.model_maker.core.api import api_util
from tensorflow_examples.lite.model_maker.core.api import golden_api_doc
from tensorflow_examples.lite.model_maker.core.api import include # pylint: disable=unused-import
class ApiGenTest(tf.test.TestCase):
def test_golden_api(self):
golden = api_gen.load_golden('golden_api.json')
imports = api_util.generate_imports()
imports_json = json.dumps(imports, indent=2, sort_keys=True)
golden_content = api_gen._read_golden_text('golden_api.json')
msg = ('Exported APIs do not match `golden_api.json`. Please check it.\n\n'
'Imports in json format: \n{}\n\n\n'
'Golden file content:\n{}\n\n').format(imports_json, golden_content)
self.assertDictEqual(imports, golden, msg)
def test_golden_api_doc(self):
golden = api_gen.load_golden('golden_api.json')
golden_doc = golden_api_doc.DOCS
api_keys = list(golden.keys())
doc_keys = list(golden_doc.keys())
msg = ('Expect package keys are matched: \n'
'In `golden_api.json`: \n{}\n\n'
'In `golden_api_doc.py`: \n{}\n\n').format(api_keys, doc_keys)
self.assertListEqual(api_keys, doc_keys, msg)
if __name__ == '__main__':
tf.test.main()
| 37.792453 | 98 | 0.734898 |
acf7a98f9e1af0775905219e9516865857f80b95 | 1,402 | py | Python | behavior/interpreter/interpreter.py | lockeCucumber/DesignPatterns | 9681aea059d6b29466077910662889801cee3703 | [
"MIT"
] | 2 | 2019-07-22T09:22:30.000Z | 2021-04-24T22:32:35.000Z | behavior/interpreter/interpreter.py | lockeCucumber/DesignPatterns | 9681aea059d6b29466077910662889801cee3703 | [
"MIT"
] | null | null | null | behavior/interpreter/interpreter.py | lockeCucumber/DesignPatterns | 9681aea059d6b29466077910662889801cee3703 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
class Interpreter(object):
def __init__(self, command):
self.stack = command.split(' ')
self.dynamic_stack = []
def calculate(self):
before_op = ""
for one in self.stack:
if one in ['+', '-', '*', '/']:
before_op = one
else:
if before_op:
left = self.dynamic_stack.pop()
right = one
result = eval(str(left) + before_op + right)
self.dynamic_stack.append(result)
before_op = ""
else:
self.dynamic_stack.append(one)
print self.dynamic_stack[0]
def main():
Interpreter("1 + 2 + 6").calculate()
if __name__ == '__main__':
main()
# 解释器模式(Interpreter Pattern)提供了评估语言的语法或表达式的方式,它属于行为型模式。
# 这种模式实现了一个表达式接口,该接口解释一个特定的上下文。这种模式被用在 SQL 解析、符号处理引擎等。
# 意图:给定一个语言,定义它的文法表示,并定义一个解释器,这个解释器使用该标识来解释语言中的句子。
# 主要解决:对于一些固定文法构建一个解释句子的解释器。
# 何时使用:如果一种特定类型的问题发生的频率足够高,那么可能就值得将该问题的各个实例表述为一个简单语言中的句子。这样就可以构建一个解释器,该解释器通过解释这些句子来解决该问题。
# 如何解决:构件语法树,定义终结符与非终结符。
# 关键代码:构件环境类,包含解释器之外的一些全局信息,一般是 HashMap。
# 应用实例:编译器、运算表达式计算。
# 优点: 1、可扩展性比较好,灵活。 2、增加了新的解释表达式的方式。 3、易于实现简单文法。
# 缺点: 1、可利用场景比较少。 2、对于复杂的文法比较难维护。 3、解释器模式会引起类膨胀。 4、解释器模式采用递归调用方法。
# 使用场景: 1、可以将一个需要解释执行的语言中的句子表示为一个抽象语法树。 2、一些重复出现的问题可以用一种简单的语言来进行表达。 3、一个简单语法需要解释的场景。 | 26.45283 | 89 | 0.609843 |
acf7a99839360c619596983413b4667e281cabd5 | 9,174 | py | Python | conans/client/generators/cmake_find_package_common.py | matthiasng/conan | 634eadc319da928084633a344d42785edccb8d6c | [
"MIT"
] | 2 | 2019-01-09T10:01:29.000Z | 2019-01-09T10:01:31.000Z | conans/client/generators/cmake_find_package_common.py | matthiasng/conan | 634eadc319da928084633a344d42785edccb8d6c | [
"MIT"
] | 1 | 2019-01-09T10:09:41.000Z | 2019-01-09T10:09:41.000Z | conans/client/generators/cmake_find_package_common.py | matthiasng/conan | 634eadc319da928084633a344d42785edccb8d6c | [
"MIT"
] | null | null | null | import textwrap
from conans.client.generators.cmake_common import CMakeCommonMacros
target_template = """
set({name}_INCLUDE_DIRS{build_type_suffix} {deps.include_paths})
set({name}_INCLUDE_DIR{build_type_suffix} {deps.include_path})
set({name}_INCLUDES{build_type_suffix} {deps.include_paths})
set({name}_RES_DIRS{build_type_suffix} {deps.res_paths})
set({name}_DEFINITIONS{build_type_suffix} {deps.defines})
set({name}_LINKER_FLAGS{build_type_suffix}_LIST
$<$<STREQUAL:$<TARGET_PROPERTY:TYPE>,SHARED_LIBRARY>:{deps.sharedlinkflags_list}>
$<$<STREQUAL:$<TARGET_PROPERTY:TYPE>,MODULE_LIBRARY>:{deps.sharedlinkflags_list}>
$<$<STREQUAL:$<TARGET_PROPERTY:TYPE>,EXECUTABLE>:{deps.exelinkflags_list}>
)
set({name}_COMPILE_DEFINITIONS{build_type_suffix} {deps.compile_definitions})
set({name}_COMPILE_OPTIONS{build_type_suffix}_LIST "{deps.cxxflags_list}" "{deps.cflags_list}")
set({name}_LIBRARIES_TARGETS{build_type_suffix} "") # Will be filled later, if CMake 3
set({name}_LIBRARIES{build_type_suffix} "") # Will be filled later
set({name}_LIBS{build_type_suffix} "") # Same as {name}_LIBRARIES
set({name}_SYSTEM_LIBS{build_type_suffix} {deps.system_libs})
set({name}_FRAMEWORK_DIRS{build_type_suffix} {deps.framework_paths})
set({name}_FRAMEWORKS{build_type_suffix} {deps.frameworks})
set({name}_FRAMEWORKS_FOUND{build_type_suffix} "") # Will be filled later
set({name}_BUILD_MODULES_PATHS{build_type_suffix} {deps.build_modules_paths})
conan_find_apple_frameworks({name}_FRAMEWORKS_FOUND{build_type_suffix} "${{{name}_FRAMEWORKS{build_type_suffix}}}" "${{{name}_FRAMEWORK_DIRS{build_type_suffix}}}")
mark_as_advanced({name}_INCLUDE_DIRS{build_type_suffix}
{name}_INCLUDE_DIR{build_type_suffix}
{name}_INCLUDES{build_type_suffix}
{name}_DEFINITIONS{build_type_suffix}
{name}_LINKER_FLAGS{build_type_suffix}_LIST
{name}_COMPILE_DEFINITIONS{build_type_suffix}
{name}_COMPILE_OPTIONS{build_type_suffix}_LIST
{name}_LIBRARIES{build_type_suffix}
{name}_LIBS{build_type_suffix}
{name}_LIBRARIES_TARGETS{build_type_suffix})
# Find the real .lib/.a and add them to {name}_LIBS and {name}_LIBRARY_LIST
set({name}_LIBRARY_LIST{build_type_suffix} {deps.libs})
set({name}_LIB_DIRS{build_type_suffix} {deps.lib_paths})
# Gather all the libraries that should be linked to the targets (do not touch existing variables):
set(_{name}_DEPENDENCIES{build_type_suffix} "${{{name}_FRAMEWORKS_FOUND{build_type_suffix}}} ${{{name}_SYSTEM_LIBS{build_type_suffix}}} {deps_names}")
conan_package_library_targets("${{{name}_LIBRARY_LIST{build_type_suffix}}}" # libraries
"${{{name}_LIB_DIRS{build_type_suffix}}}" # package_libdir
"${{_{name}_DEPENDENCIES{build_type_suffix}}}" # deps
{name}_LIBRARIES{build_type_suffix} # out_libraries
{name}_LIBRARIES_TARGETS{build_type_suffix} # out_libraries_targets
"{build_type_suffix}" # build_type
"{name}") # package_name
set({name}_LIBS{build_type_suffix} ${{{name}_LIBRARIES{build_type_suffix}}})
foreach(_FRAMEWORK ${{{name}_FRAMEWORKS_FOUND{build_type_suffix}}})
list(APPEND {name}_LIBRARIES_TARGETS{build_type_suffix} ${{_FRAMEWORK}})
list(APPEND {name}_LIBRARIES{build_type_suffix} ${{_FRAMEWORK}})
endforeach()
foreach(_SYSTEM_LIB ${{{name}_SYSTEM_LIBS{build_type_suffix}}})
list(APPEND {name}_LIBRARIES_TARGETS{build_type_suffix} ${{_SYSTEM_LIB}})
list(APPEND {name}_LIBRARIES{build_type_suffix} ${{_SYSTEM_LIB}})
endforeach()
# We need to add our requirements too
set({name}_LIBRARIES_TARGETS{build_type_suffix} "${{{name}_LIBRARIES_TARGETS{build_type_suffix}}};{deps_names}")
set({name}_LIBRARIES{build_type_suffix} "${{{name}_LIBRARIES{build_type_suffix}}};{deps_names}")
set(CMAKE_MODULE_PATH {deps.build_paths} ${{CMAKE_MODULE_PATH}})
set(CMAKE_PREFIX_PATH {deps.build_paths} ${{CMAKE_PREFIX_PATH}})
foreach(_BUILD_MODULE_PATH ${{{name}_BUILD_MODULES_PATHS{build_type_suffix}}})
include(${{_BUILD_MODULE_PATH}})
endforeach()
"""
def find_transitive_dependencies(public_deps_names, find_modules):
if find_modules: # for cmake_find_package generator
find = textwrap.dedent("""
if(NOT {dep_name}_FOUND)
find_dependency({dep_name} REQUIRED)
else()
message(STATUS "Dependency {dep_name} already found")
endif()
""")
else: # for cmake_find_package_multi generator
# https://github.com/conan-io/conan/issues/4994
# https://github.com/conan-io/conan/issues/5040
find = textwrap.dedent("""
if(NOT {dep_name}_FOUND)
if(${{CMAKE_VERSION}} VERSION_LESS "3.9.0")
find_package({dep_name} REQUIRED NO_MODULE)
else()
find_dependency({dep_name} REQUIRED NO_MODULE)
endif()
else()
message(STATUS "Dependency {dep_name} already found")
endif()
""")
lines = ["", "# Library dependencies", "include(CMakeFindDependencyMacro)"]
for dep_name in public_deps_names:
lines.append(find.format(dep_name=dep_name))
return "\n".join(lines)
class CMakeFindPackageCommonMacros:
conan_message = CMakeCommonMacros.conan_message
apple_frameworks_macro = textwrap.dedent("""
macro(conan_find_apple_frameworks FRAMEWORKS_FOUND FRAMEWORKS FRAMEWORKS_DIRS)
if(APPLE)
foreach(_FRAMEWORK ${FRAMEWORKS})
# https://cmake.org/pipermail/cmake-developers/2017-August/030199.html
find_library(CONAN_FRAMEWORK_${_FRAMEWORK}_FOUND NAME ${_FRAMEWORK} PATHS ${FRAMEWORKS_DIRS})
if(CONAN_FRAMEWORK_${_FRAMEWORK}_FOUND)
list(APPEND ${FRAMEWORKS_FOUND} ${CONAN_FRAMEWORK_${_FRAMEWORK}_FOUND})
else()
message(FATAL_ERROR "Framework library ${_FRAMEWORK} not found in paths: ${FRAMEWORKS_DIRS}")
endif()
endforeach()
endif()
endmacro()
""")
conan_package_library_targets = textwrap.dedent("""
function(conan_package_library_targets libraries package_libdir deps out_libraries out_libraries_target build_type package_name)
unset(_CONAN_ACTUAL_TARGETS CACHE)
unset(_CONAN_FOUND_SYSTEM_LIBS CACHE)
foreach(_LIBRARY_NAME ${libraries})
find_library(CONAN_FOUND_LIBRARY NAME ${_LIBRARY_NAME} PATHS ${package_libdir}
NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH)
if(CONAN_FOUND_LIBRARY)
conan_message(STATUS "Library ${_LIBRARY_NAME} found ${CONAN_FOUND_LIBRARY}")
list(APPEND _out_libraries ${CONAN_FOUND_LIBRARY})
if(NOT ${CMAKE_VERSION} VERSION_LESS "3.0")
# Create a micro-target for each lib/a found
set(_LIB_NAME CONAN_LIB::${package_name}_${_LIBRARY_NAME}${build_type})
if(NOT TARGET ${_LIB_NAME})
# Create a micro-target for each lib/a found
add_library(${_LIB_NAME} UNKNOWN IMPORTED)
set_target_properties(${_LIB_NAME} PROPERTIES IMPORTED_LOCATION ${CONAN_FOUND_LIBRARY})
set(_CONAN_ACTUAL_TARGETS ${_CONAN_ACTUAL_TARGETS} ${_LIB_NAME})
else()
conan_message(STATUS "Skipping already existing target: ${_LIB_NAME}")
endif()
list(APPEND _out_libraries_target ${_LIB_NAME})
endif()
conan_message(STATUS "Found: ${CONAN_FOUND_LIBRARY}")
else()
conan_message(STATUS "Library ${_LIBRARY_NAME} not found in package, might be system one")
list(APPEND _out_libraries_target ${_LIBRARY_NAME})
list(APPEND _out_libraries ${_LIBRARY_NAME})
set(_CONAN_FOUND_SYSTEM_LIBS "${_CONAN_FOUND_SYSTEM_LIBS};${_LIBRARY_NAME}")
endif()
unset(CONAN_FOUND_LIBRARY CACHE)
endforeach()
if(NOT ${CMAKE_VERSION} VERSION_LESS "3.0")
# Add all dependencies to all targets
string(REPLACE " " ";" deps_list "${deps}")
foreach(_CONAN_ACTUAL_TARGET ${_CONAN_ACTUAL_TARGETS})
set_property(TARGET ${_CONAN_ACTUAL_TARGET} PROPERTY INTERFACE_LINK_LIBRARIES "${_CONAN_FOUND_SYSTEM_LIBS};${deps_list}")
endforeach()
endif()
set(${out_libraries} ${_out_libraries} PARENT_SCOPE)
set(${out_libraries_target} ${_out_libraries_target} PARENT_SCOPE)
endfunction()
""")
| 53.028902 | 163 | 0.649335 |
acf7a9f2ee94f4c250223d9b9b8ec0f20b090480 | 193 | py | Python | python/testData/quickFixes/PyTypeHintsQuickFixTest/instanceCheckOnReference.py | jnthn/intellij-community | 8fa7c8a3ace62400c838e0d5926a7be106aa8557 | [
"Apache-2.0"
] | 2 | 2019-04-28T07:48:50.000Z | 2020-12-11T14:18:08.000Z | python/testData/quickFixes/PyTypeHintsQuickFixTest/instanceCheckOnReference.py | jnthn/intellij-community | 8fa7c8a3ace62400c838e0d5926a7be106aa8557 | [
"Apache-2.0"
] | 173 | 2018-07-05T13:59:39.000Z | 2018-08-09T01:12:03.000Z | python/testData/quickFixes/PyTypeHintsQuickFixTest/instanceCheckOnReference.py | jnthn/intellij-community | 8fa7c8a3ace62400c838e0d5926a7be106aa8557 | [
"Apache-2.0"
] | 2 | 2020-03-15T08:57:37.000Z | 2020-04-07T04:48:14.000Z | from typing import Callable
class A:
pass
C = Callable[..., str]
assert issubclass(A, <error descr="Parameterized generics cannot be used with instance and class checks"><caret>C</error>) | 27.571429 | 122 | 0.740933 |
acf7aa857643205d7b316d4b41c5a58ff6f6aea8 | 888 | py | Python | setup.py | retr0-13/pypykatz | 0d689fbd819c52ca46709737b2c063f40e302832 | [
"MIT"
] | null | null | null | setup.py | retr0-13/pypykatz | 0d689fbd819c52ca46709737b2c063f40e302832 | [
"MIT"
] | null | null | null | setup.py | retr0-13/pypykatz | 0d689fbd819c52ca46709737b2c063f40e302832 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
setup(
# Application name:
name="pypykatz",
# Version number (initial):
version="0.0.2",
# Application author details:
author="Tamas Jos",
author_email="info@skelsec.com",
# Packages
packages=find_packages(),
# Include additional files into the package
include_package_data=True,
# Details
url="https://github.com/skelsec/pypykatz",
zip_safe = True,
#
# license="LICENSE.txt",
description="Python implementation of Mimikatz",
# long_description=open("README.txt").read(),
python_requires='>=3.6',
classifiers=(
"Programming Language :: Python :: 3.6",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
),
install_requires=[
'minidump>=0.0.6',
'minikerberos>=0.0.4',
],
entry_points={
'console_scripts': [
'pypykatz = pypykatz.bin.pypykatz_main:main',
],
}
) | 18.5 | 49 | 0.688063 |
acf7ab52c61999289a4c06004aee83e5e832beb2 | 347 | py | Python | tests/resources/test_resource_paths.py | iriusrisk/startleft | 2833a09ca3c39636b34116bb0f8db0756643f05b | [
"Apache-2.0"
] | 9 | 2021-08-25T16:08:25.000Z | 2022-03-22T14:12:05.000Z | tests/resources/test_resource_paths.py | iriusrisk/startleft | 2833a09ca3c39636b34116bb0f8db0756643f05b | [
"Apache-2.0"
] | 4 | 2021-08-17T13:30:43.000Z | 2022-03-15T07:53:13.000Z | tests/resources/test_resource_paths.py | iriusrisk/startleft | 2833a09ca3c39636b34116bb0f8db0756643f05b | [
"Apache-2.0"
] | null | null | null | import os
cloudformation_for_mappings_tests_json = os.path.dirname(__file__) + '/cloudformation_for_mappings_tests.json'
default_mapping = os.path.dirname(__file__)+'/../../startleft/config/default-cloudformation-mapping.yaml'
example_json = os.path.dirname(__file__) + '/example.json'
example_yaml = os.path.dirname(__file__) + '/example.yaml'
| 43.375 | 110 | 0.792507 |
acf7abb86b472f2aca52393db50eb2ab10df95d1 | 12,051 | py | Python | models/siampose_directReg.py | Chesterfun/SiamPose | 1b984f4873d071a4049bb31067d0215624c8b807 | [
"MIT"
] | 9 | 2019-12-02T05:11:19.000Z | 2021-05-12T03:39:26.000Z | models/siampose_directReg.py | Chesterfun/SiamPose | 1b984f4873d071a4049bb31067d0215624c8b807 | [
"MIT"
] | null | null | null | models/siampose_directReg.py | Chesterfun/SiamPose | 1b984f4873d071a4049bb31067d0215624c8b807 | [
"MIT"
] | 3 | 2019-08-29T06:19:02.000Z | 2021-12-25T11:51:56.000Z | # --------------------------------------------------------
# SiamMask
# Licensed under The MIT License
# Written by Qiang Wang (wangqiang2015 at ia.ac.cn)
# --------------------------------------------------------
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from utils.anchors import Anchors
class JointsMSELoss(nn.Module):
def __init__(self, use_target_weight):
super(JointsMSELoss, self).__init__()
self.criterion = nn.SmoothL1Loss()
self.use_target_weight = use_target_weight
def forward(self, output, target, target_weight):
target = target.float()
# output = output.float()
# target_weight = target_weight.float()
target_weight = torch.unsqueeze(target_weight, -1)
batch_size = output.size(0)
num_joints = 17
heatmaps_pred = output.reshape((batch_size, num_joints, -1)).split(1, 1)
heatmaps_gt = target.reshape((batch_size, num_joints, -1)).split(1, 1)
loss = 0
for idx in range(num_joints):
heatmap_pred = heatmaps_pred[idx].squeeze()
heatmap_gt = heatmaps_gt[idx].squeeze()
if self.use_target_weight:
loss += self.criterion(
heatmap_pred.mul(target_weight[:, idx]),
heatmap_gt.mul(target_weight[:, idx])
)
else:
loss += 0.5 * self.criterion(heatmap_pred, heatmap_gt)
return loss / num_joints
class SiamMask(nn.Module):
def __init__(self, anchors=None, o_sz=63, g_sz=127):
super(SiamMask, self).__init__()
self.anchors = anchors # anchor_cfg
self.anchor_num = len(self.anchors["ratios"]) * len(self.anchors["scales"])
self.anchor = Anchors(anchors)
self.features = None
self.rpn_model = None
self.mask_model = None
self.o_sz = o_sz
self.g_sz = g_sz
self.upSample = nn.UpsamplingBilinear2d(size=[g_sz, g_sz])
self.kp_criterion = JointsMSELoss(True)
self.all_anchors = None
def set_all_anchors(self, image_center, size):
# cx,cy,w,h
if not self.anchor.generate_all_anchors(image_center, size):
return
all_anchors = self.anchor.all_anchors[1] # cx, cy, w, h
self.all_anchors = torch.from_numpy(all_anchors).float().cuda()
self.all_anchors = [self.all_anchors[i] for i in range(4)]
def feature_extractor(self, x):
return self.features(x)
def rpn(self, template, search):
pred_cls, pred_loc = self.rpn_model(template, search)
return pred_cls, pred_loc
def mask(self, template, search):
pred_mask = self.mask_model(template, search)
return pred_mask
def _add_rpn_loss(self, label_cls, label_loc, lable_loc_weight, label_mask, label_mask_weight,
rpn_pred_cls, rpn_pred_loc, rpn_pred_mask, kp_weight, kp_criterion):
rpn_loss_cls = select_cross_entropy_loss(rpn_pred_cls, label_cls)
rpn_loss_loc = weight_l1_loss(rpn_pred_loc, label_loc, lable_loc_weight)
if self.method == 'direct':
rpn_loss_mask, pred_kp, gt_kp = select_mask_logistic_loss(rpn_pred_mask,
label_mask,
label_mask_weight,
kp_weight,
kp_criterion)
elif self.method == 'rel1':
rpn_loss_mask, pred_kp, gt_kp = select_mask_logistic_loss_rel1(rpn_pred_mask,
label_mask,
label_mask_weight,
kp_weight,
kp_criterion)
else:
raise NotImplementedError
return rpn_loss_cls, rpn_loss_loc, rpn_loss_mask, pred_kp, gt_kp
def run(self, template, search, softmax=False):
"""
run network
"""
template_feature = self.feature_extractor(template)
search_feature = self.feature_extractor(search)
rpn_pred_cls, rpn_pred_loc = self.rpn(template_feature, search_feature)
rpn_pred_mask = self.mask(template_feature, search_feature)
if softmax:
rpn_pred_cls = self.softmax(rpn_pred_cls)
return rpn_pred_cls, rpn_pred_loc, rpn_pred_mask, template_feature, search_feature
def softmax(self, cls):
b, a2, h, w = cls.size()
cls = cls.view(b, 2, a2 // 2, h, w)
cls = cls.permute(0, 2, 3, 4, 1).contiguous()
cls = F.log_softmax(cls, dim=4)
return cls
def forward(self, input):
"""
:param input: dict of input with keys of:
'template': [b, 3, h1, w1], input template image.
'search': [b, 3, h2, w2], input search image.
'label_cls':[b, max_num_gts, 5] or None(self.training==False),
each gt contains x1,y1,x2,y2,class.
:return: dict of loss, predict, accuracy
"""
template = input['template']
search = input['search']
if self.training:
label_cls = input['label_cls']
label_loc = input['label_loc']
lable_loc_weight = input['label_loc_weight']
label_mask = input['label_mask']
label_mask_weight = input['label_mask_weight']
label_kp_weight = input['label_kp_weight']
label_kp = input['label_kp']
rpn_pred_cls, rpn_pred_loc, rpn_pred_mask, template_feature, search_feature = \
self.run(template, search, softmax=self.training)
outputs = dict()
outputs['predict'] = [rpn_pred_loc, rpn_pred_cls, rpn_pred_mask, template_feature, search_feature]
if self.training:
rpn_loss_cls, rpn_loss_loc, rpn_loss_mask, pred_kp, gt_kp = \
self._add_rpn_loss(label_cls, label_loc, lable_loc_weight, label_kp, label_mask_weight,
rpn_pred_cls, rpn_pred_loc, rpn_pred_mask,
label_kp_weight, self.kp_criterion)
outputs['losses'] = [rpn_loss_cls, rpn_loss_loc, rpn_loss_mask]
outputs['predict'].append(gt_kp)
outputs['predict'].append(pred_kp)
# outputs['accuracy'] = [iou_acc_mean, iou_acc_5, iou_acc_7]
return outputs
def template(self, z):
self.zf = self.feature_extractor(z)
cls_kernel, loc_kernel = self.rpn_model.template(self.zf)
return cls_kernel, loc_kernel
def track(self, x, cls_kernel=None, loc_kernel=None, softmax=False):
xf = self.feature_extractor(x)
rpn_pred_cls, rpn_pred_loc = self.rpn_model.track(xf, cls_kernel, loc_kernel)
if softmax:
rpn_pred_cls = self.softmax(rpn_pred_cls)
return rpn_pred_cls, rpn_pred_loc
def get_cls_loss(pred, label, select):
if select.nelement() == 0: return pred.sum() * 0.
pred = torch.index_select(pred, 0, select)
label = torch.index_select(label, 0, select)
return F.nll_loss(pred, label)
def select_cross_entropy_loss(pred, label):
pred = pred.view(-1, 2)
label = label.view(-1)
pos = Variable(label.data.eq(1).nonzero().squeeze()).cuda()
neg = Variable(label.data.eq(0).nonzero().squeeze()).cuda()
loss_pos = get_cls_loss(pred, label, pos)
loss_neg = get_cls_loss(pred, label, neg)
return loss_pos * 0.5 + loss_neg * 0.5
def weight_l1_loss(pred_loc, label_loc, loss_weight):
"""
:param pred_loc: [b, 4k, h, w]
:param label_loc: [b, 4k, h, w]
:param loss_weight: [b, k, h, w]
:return: loc loss value
"""
b, _, sh, sw = pred_loc.size()
pred_loc = pred_loc.view(b, 4, -1, sh, sw)
diff = (pred_loc - label_loc).abs()
diff = diff.sum(dim=1).view(b, -1, sh, sw)
loss = diff * loss_weight
return loss.sum().div(b)
def select_mask_logistic_loss(p_m, mask, weight, kp_weight, criterion, o_sz=63, g_sz=127):
# mask = mask[:, 0, :, :]
# mask = mask.unsqueeze(1)
# print('mask shape: ', mask.shape)
# print('pred mask shape: ', p_m.shape)
# print('mask weight shape: ', weight.shape)
# print('kp weight shape: ', kp_weight.shape)
kp_weight_pos = kp_weight.view(kp_weight.size(0), 1, 1, 1, -1)
kp_weight_pos = kp_weight_pos.expand(-1,
weight.size(1),
weight.size(2),
weight.size(3),
-1).contiguous()
# (bs, 1, 25, 25, 17)
kp_weight_pos = kp_weight_pos.view(-1, 17)
mask_weight_pos = mask.view(mask.size(0), 1, 1, 1, -1, 2)
mask_weight_pos = mask_weight_pos.expand(-1,
weight.size(1),
weight.size(2),
weight.size(3),
-1, -1).contiguous()
# (bs, 1, 25, 25, 17)
mask_weight_pos = mask_weight_pos.view(-1, 17, 2)
weight = weight.view(-1)
pos = Variable(weight.data.eq(1).nonzero().squeeze())
kp_weight = torch.index_select(kp_weight_pos, 0, pos)
mask = torch.index_select(mask_weight_pos, 0, pos)
# print('pose shape: ', pos.shape)
if pos.nelement() == 0: return p_m.sum() * 0
if len(p_m.shape) == 4:
p_m = p_m.permute(0, 2, 3, 1).contiguous().view(-1, 17, 2)
# print('atf pred mask shape: ', p_m.shape)
p_m = torch.index_select(p_m, 0, pos)
# print('atf selected pred mask shape: ', p_m.shape)
else:
p_m = torch.index_select(p_m, 0, pos)
loss = criterion(p_m, mask, kp_weight)
# iou_m, iou_5, iou_7 = iou_measure(p_m, mask_uf)
return loss, p_m, mask # , iou_m, iou_5, iou_7
def select_mask_logistic_loss_rel1(p_m, mask, weight, kp_weight, criterion, o_sz=63, g_sz=127):
# mask shape: [bs, 3, 17, size, size]
kp_weight_pos = kp_weight.view(kp_weight.size(0), 1, 1, 1, -1)
kp_weight_pos = kp_weight_pos.expand(-1,
weight.size(1),
weight.size(2),
weight.size(3),
-1).contiguous()
# (bs, 1, 25, 25, 17)
kp_weight_pos = kp_weight_pos.view(-1, 17)
mask = mask[:, :2] # [bs, 2, 17, size, size]
mask = mask.permute(0, 3, 4, 2, 1).contiguous()
# (bs, 1, 25, 25, 17)
mask_weight_pos = mask.view(-1, 17, 2)
weight = weight.view(-1)
pos = Variable(weight.data.eq(1).nonzero().squeeze())
kp_weight = torch.index_select(kp_weight_pos, 0, pos)
mask = torch.index_select(mask_weight_pos, 0, pos)
# print('pose shape: ', pos.shape)
if pos.nelement() == 0: return p_m.sum() * 0
if len(p_m.shape) == 4:
p_m = p_m.permute(0, 2, 3, 1).contiguous().view(-1, 17, 2)
# print('atf pred mask shape: ', p_m.shape)
p_m = torch.index_select(p_m, 0, pos)
# print('atf selected pred mask shape: ', p_m.shape)
else:
p_m = torch.index_select(p_m, 0, pos)
loss = criterion(p_m, mask, kp_weight)
# iou_m, iou_5, iou_7 = iou_measure(p_m, mask_uf)
return loss, p_m, mask # , iou_m, iou_5, iou_7
def iou_measure(pred, label):
pred = pred.ge(0)
mask_sum = pred.eq(1).add(label.eq(1))
intxn = torch.sum(mask_sum == 2, dim=1).float()
union = torch.sum(mask_sum > 0, dim=1).float()
iou = intxn / union
return torch.mean(iou), (torch.sum(iou > 0.5).float() / iou.shape[0]), (torch.sum(iou > 0.7).float() / iou.shape[0])
if __name__ == "__main__":
p_m = torch.randn(4, 63 * 63, 25, 25)
cls = torch.randn(4, 1, 25, 25) > 0.9
mask = torch.randn(4, 1, 255, 255) * 2 - 1
loss = select_mask_logistic_loss(p_m, mask, cls)
print(loss)
| 39.126623 | 120 | 0.573479 |
acf7abd77cdebf39ebbd48a4f80a69de58f687ba | 1,085 | py | Python | UTIL/DateTime.py | leelim81/Pair-Trading-Reinforcement-Learning | 0a5068bbc628288ef3c6fe09bd3026e7d7e57ae8 | [
"MIT"
] | 186 | 2019-06-11T14:56:37.000Z | 2022-03-28T17:31:36.000Z | UTIL/DateTime.py | leelim81/Pair-Trading-Reinforcement-Learning | 0a5068bbc628288ef3c6fe09bd3026e7d7e57ae8 | [
"MIT"
] | 9 | 2019-07-26T06:02:57.000Z | 2021-03-03T09:24:40.000Z | UTIL/DateTime.py | leelim81/Pair-Trading-Reinforcement-Learning | 0a5068bbc628288ef3c6fe09bd3026e7d7e57ae8 | [
"MIT"
] | 69 | 2019-06-20T09:32:46.000Z | 2022-02-24T02:08:31.000Z | import calendar
import datetime
import numpy as np
import pandas as pd
def get_dates_weekday(start_date, end_date):
dt = end_date - start_date
dates = []
for i in range(dt.days + 1):
date = start_date + datetime.timedelta(i)
if calendar.weekday(date.year, date.month, date.day) < 5:
dates.append(date)
return dates
def get_time_section(timestamp, section_length):
date_array = np.array([int(t[8:10]) for t in timestamp])
ind_new_date = np.insert(np.diff(date_array), 0, 0)
section_idx = 0
section = np.zeros_like(date_array)
for i in range(len(section)):
if ind_new_date[i] == 1:
section_idx = 0
if i % section_length == 0:
section_idx += 1
section[i] = section_idx
return section
def format_timestamp(timestamp, output_type='str', str_format='%Y-%m-%d %H:%M:%S'):
dt = pd.to_datetime(timestamp)
if output_type == 'str':
dt = dt.apply(lambda x: pd.datetime.strftime(x, str_format))
return dt
| 27.125 | 84 | 0.610138 |
acf7ac703319a8763633c3eefa185940d9c61b5d | 1,133 | py | Python | sequential_sentence_classification/predictor.py | WERimagin/sequence-labelling | 2be00b7d89531af0a8d65dad88daba56e0940f4b | [
"Apache-2.0"
] | 44 | 2019-09-04T23:11:49.000Z | 2022-03-13T12:16:37.000Z | sequential_sentence_classification/predictor.py | WERimagin/sequence-labelling | 2be00b7d89531af0a8d65dad88daba56e0940f4b | [
"Apache-2.0"
] | 14 | 2019-09-09T21:53:34.000Z | 2021-09-17T18:40:56.000Z | sequential_sentence_classification/predictor.py | WERimagin/sequence-labelling | 2be00b7d89531af0a8d65dad88daba56e0940f4b | [
"Apache-2.0"
] | 20 | 2019-09-21T13:36:11.000Z | 2022-02-14T00:43:09.000Z | from typing import List
from overrides import overrides
from allennlp.common.util import JsonDict, sanitize
from allennlp.data import Instance
from allennlp.predictors.predictor import Predictor
@Predictor.register('SeqClassificationPredictor')
class SeqClassificationPredictor(Predictor):
"""
Predictor for the abstruct model
"""
def predict_json(self, json_dict: JsonDict) -> JsonDict:
pred_labels = []
sentences = json_dict['sentences']
paper_id = json_dict['abstract_id']
for sentences_loop, _, _, _ in \
self._dataset_reader.enforce_max_sent_per_example(sentences):
instance = self._dataset_reader.text_to_instance(abstract_id=0, sentences=sentences_loop)
output = self._model.forward_on_instance(instance)
idx = output['action_probs'].argmax(axis=1).tolist()
labels = [self._model.vocab.get_token_from_index(i, namespace='labels') for i in idx]
pred_labels.extend(labels)
assert len(pred_labels) == len(sentences)
preds = list(zip(sentences, pred_labels))
return paper_id, preds
| 40.464286 | 101 | 0.704325 |
acf7aca498fd9eb08e2f6d27dc3aa584026c94f5 | 1,252 | py | Python | app/migrations/0001_initial.py | karanverma7/OnlineStore | 4e2995547ccb1460f5d1a5329d3847ee252d045e | [
"MIT"
] | null | null | null | app/migrations/0001_initial.py | karanverma7/OnlineStore | 4e2995547ccb1460f5d1a5329d3847ee252d045e | [
"MIT"
] | null | null | null | app/migrations/0001_initial.py | karanverma7/OnlineStore | 4e2995547ccb1460f5d1a5329d3847ee252d045e | [
"MIT"
] | null | null | null | # Generated by Django 3.0.7 on 2020-06-29 16:05
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=120)),
('address', models.TextField()),
('city', models.CharField(max_length=100)),
('items', models.CharField(max_length=200)),
('orderStatus', models.BooleanField(default=False)),
('payment_method', models.CharField(max_length=400)),
('payment_data', models.CharField(max_length=400)),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('price', models.FloatField()),
('image', models.ImageField(upload_to='')),
],
),
]
| 33.837838 | 114 | 0.547125 |
acf7ad0e6d495dcfde436366d126cf2587650004 | 967 | py | Python | src/pssparser/model/activity_stmt_base.py | PSSTools/py-pss-parser | c068942a770d2abbc4626f7e654f1bb405242f4d | [
"Apache-2.0"
] | 1 | 2020-03-09T21:37:13.000Z | 2020-03-09T21:37:13.000Z | src/pssparser/model/activity_stmt_base.py | PSSTools/py-pss-parser | c068942a770d2abbc4626f7e654f1bb405242f4d | [
"Apache-2.0"
] | null | null | null | src/pssparser/model/activity_stmt_base.py | PSSTools/py-pss-parser | c068942a770d2abbc4626f7e654f1bb405242f4d | [
"Apache-2.0"
] | 1 | 2020-04-21T21:02:47.000Z | 2020-04-21T21:02:47.000Z |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
'''
Created on Apr 13, 2020
@author: ballance
'''
class ActivityStmtBase(object):
def __init__(self):
self.label = None
self.data = None
| 31.193548 | 62 | 0.730093 |
acf7ad10b65bafe63aae8d2a02af4b5f5ef4c8f1 | 973 | py | Python | neutron_lib/tests/unit/api/definitions/test_security_groups_remote_address_group.py | Joffref/neutron-lib | 4847d6728223949b0277b8fcac36bc0950dbdd8b | [
"Apache-2.0"
] | 41 | 2015-12-02T17:54:03.000Z | 2022-01-14T18:55:57.000Z | neutron_lib/tests/unit/api/definitions/test_security_groups_remote_address_group.py | Joffref/neutron-lib | 4847d6728223949b0277b8fcac36bc0950dbdd8b | [
"Apache-2.0"
] | null | null | null | neutron_lib/tests/unit/api/definitions/test_security_groups_remote_address_group.py | Joffref/neutron-lib | 4847d6728223949b0277b8fcac36bc0950dbdd8b | [
"Apache-2.0"
] | 62 | 2016-02-16T12:55:18.000Z | 2021-12-29T03:03:03.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.api.definitions import security_groups_remote_address_group
from neutron_lib.tests.unit.api.definitions import base
class SecurityGroupsRemoteAddressGroupDefinitionTestCase(
base.DefinitionBaseTestCase):
extension_module = security_groups_remote_address_group
extension_resources = ('security_group_rules',)
extension_attributes = ('remote_address_group_id',)
| 42.304348 | 78 | 0.770812 |
acf7ad6e006b8c350f8e97bae4d8b6e253ed7f8c | 1,596 | py | Python | scripts/cscap/wx/extract_colin.py | akrherz/datateam | 2efbaa24ff2e28115eeabce9193c3d3b152068d8 | [
"MIT"
] | null | null | null | scripts/cscap/wx/extract_colin.py | akrherz/datateam | 2efbaa24ff2e28115eeabce9193c3d3b152068d8 | [
"MIT"
] | 3 | 2016-04-28T16:01:06.000Z | 2016-08-28T18:05:15.000Z | scripts/cscap/wx/extract_colin.py | akrherz/datateam | 2efbaa24ff2e28115eeabce9193c3d3b152068d8 | [
"MIT"
] | 1 | 2016-08-28T16:59:18.000Z | 2016-08-28T16:59:18.000Z | """ Extract some data for Colin
1951-2010 Annual GDDs by climate district Apr 1 - Oct 31
1951-2010 Frost-free days ...
"""
import psycopg2
import pandas as pd
pgconn = psycopg2.connect(database='coop', host='iemdb', user='nobody')
cursor = pgconn.cursor()
from pyiem.network import Table as NetworkTable
nt = NetworkTable(['IACLIMATE', 'MNCLIMATE', 'NDCLIMATE', 'OHCLIMATE',
'INCLIMATE', 'ILCLIMATE', 'MICLIMATE', 'WICLIMATE',
'SDCLIMATE', 'NECLIMATE', 'KSCLIMATE', 'MOCLIMATE'])
res = []
for sid in nt.sts.keys():
if sid[2] != 'C':
continue
TABLE = "alldata_%s" % (sid[:2],)
cursor.execute("""
WITH gdd as (
SELECT year, sum(gdd50(high,low)) from """+TABLE+""" WHERE
station = %s and sday between '0401' and '1031' and year >= 1951
and year < 2011 GROUP by year),
ff as (
SELECT year,
max(case when month < 7 and low < 32 then extract(doy from day) else 0 end),
min(case when month > 7 and low < 32 then extract(doy from day) else 366 end)
from """+TABLE+""" WHERE station = %s and year >= 1951 and year < 2011
GROUP by year)
SELECT g.year, g.sum, f.min - f.max from ff f JOIN gdd g on (g.year = f.year)
ORDER by g.year ASC
""", (sid, sid))
for row in cursor:
res.append(dict(station=sid, year=row[0], gdd50=row[1],
frostfree=int(row[2])))
df = pd.DataFrame(res)
df.to_csv('output.csv', index=False, columns=['station', 'year', 'gdd50',
'frostfree'])
| 35.466667 | 85 | 0.580201 |
acf7ae67089ddb55ed91df933241ee863caf2dbc | 3,599 | py | Python | modules/networkx/tests/test_convert_scipy.py | fstwn/Cockatoo | 0c5f9c515053bfc31e62d20fddc4ae9bece09d88 | [
"MIT"
] | 9 | 2020-09-26T03:41:21.000Z | 2021-11-29T06:52:35.000Z | modules/networkx/tests/test_convert_scipy.py | fstwn/Cockatoo | 0c5f9c515053bfc31e62d20fddc4ae9bece09d88 | [
"MIT"
] | 9 | 2020-08-10T19:38:03.000Z | 2022-02-24T08:41:32.000Z | modules/networkx/tests/test_convert_scipy.py | fstwn/Cockatoo | 0c5f9c515053bfc31e62d20fddc4ae9bece09d88 | [
"MIT"
] | 3 | 2020-12-26T08:43:56.000Z | 2021-10-17T19:37:52.000Z | from nose import SkipTest
from nose.tools import assert_raises, assert_true
import networkx as nx
from networkx.generators.classic import barbell_graph,cycle_graph,path_graph
class TestConvertNumpy(object):
@classmethod
def setupClass(cls):
global np, sp, sparse
try:
import numpy as np
import scipy as sp
import scipy.sparse as sparse
except ImportError:
raise SkipTest('SciPy sparse library not available.')
def __init__(self):
self.G1 = barbell_graph(10, 3)
self.G2 = cycle_graph(10, create_using=nx.DiGraph())
self.G3 = self.create_weighted(nx.Graph())
self.G4 = self.create_weighted(nx.DiGraph())
def create_weighted(self, G):
g = cycle_graph(4)
e = g.edges()
source = [u for u,v in e]
dest = [v for u,v in e]
weight = [s+10 for s in source]
ex = zip(source, dest, weight)
G.add_weighted_edges_from(ex)
return G
def assert_equal(self, G1, G2):
assert_true( sorted(G1.nodes())==sorted(G2.nodes()) )
assert_true( sorted(G1.edges())==sorted(G2.edges()) )
def identity_conversion(self, G, A, create_using):
GG = nx.from_scipy_sparse_matrix(A, create_using=create_using)
self.assert_equal(G, GG)
GW = nx.to_networkx_graph(A, create_using=create_using)
self.assert_equal(G, GW)
GI = create_using.__class__(A)
self.assert_equal(G, GI)
ACSR = A.tocsr()
GI = create_using.__class__(ACSR)
self.assert_equal(G, GI)
ACOO = A.tocoo()
GI = create_using.__class__(ACOO)
self.assert_equal(G, GI)
ACSC = A.tocsc()
GI = create_using.__class__(ACSC)
self.assert_equal(G, GI)
AD = A.todense()
GI = create_using.__class__(AD)
self.assert_equal(G, GI)
AA = A.toarray()
GI = create_using.__class__(AA)
self.assert_equal(G, GI)
def test_shape(self):
"Conversion from non-square sparse array."
A = sp.sparse.lil_matrix([[1,2,3],[4,5,6]])
assert_raises(nx.NetworkXError, nx.from_scipy_sparse_matrix, A)
def test_identity_graph_matrix(self):
"Conversion from graph to sparse matrix to graph."
A = nx.to_scipy_sparse_matrix(self.G1)
self.identity_conversion(self.G1, A, nx.Graph())
def test_identity_digraph_matrix(self):
"Conversion from digraph to sparse matrix to digraph."
A = nx.to_scipy_sparse_matrix(self.G2)
self.identity_conversion(self.G2, A, nx.DiGraph())
def test_identity_weighted_graph_matrix(self):
"""Conversion from weighted graph to sparse matrix to weighted graph."""
A = nx.to_scipy_sparse_matrix(self.G3)
self.identity_conversion(self.G3, A, nx.Graph())
def test_identity_weighted_digraph_matrix(self):
"""Conversion from weighted digraph to sparse matrix to weighted digraph."""
A = nx.to_scipy_sparse_matrix(self.G4)
self.identity_conversion(self.G4, A, nx.DiGraph())
def test_nodelist(self):
"""Conversion from graph to sparse matrix to graph with nodelist."""
P4 = path_graph(4)
P3 = path_graph(3)
nodelist = P3.nodes()
A = nx.to_scipy_sparse_matrix(P4, nodelist=nodelist)
GA = nx.Graph(A)
self.assert_equal(GA, P3)
# Make nodelist ambiguous by containing duplicates.
nodelist += [nodelist[0]]
assert_raises(nx.NetworkXError, nx.to_numpy_matrix, P3, nodelist=nodelist)
| 33.018349 | 84 | 0.636844 |
acf7aefc19db57c90fff4fb7ea2e7cf57e26d390 | 584 | py | Python | ObjectOrientedPython/GetterAndSetter.py | dsabhrawal/python-examples | 55b3dd6c9fd0b992bcfe3422765dc80fb143a54b | [
"MIT"
] | 1 | 2020-03-01T17:24:20.000Z | 2020-03-01T17:24:20.000Z | ObjectOrientedPython/GetterAndSetter.py | dsabhrawal/python-examples | 55b3dd6c9fd0b992bcfe3422765dc80fb143a54b | [
"MIT"
] | null | null | null | ObjectOrientedPython/GetterAndSetter.py | dsabhrawal/python-examples | 55b3dd6c9fd0b992bcfe3422765dc80fb143a54b | [
"MIT"
] | null | null | null | # private variables are preceeded by __ ex __name
class Student:
def setName(self,name):
self.__name = name #Setting value for private variable __name
def getName(self):
return self.__name
def setRollno(self,rollno):
self.__rollno = rollno #setting value for private variable __rollno
def getRollno(self):
return self.__rollno
def display(self):
print('Name: ',self.getName())
print('RollNo: ',self.getRollno())
s = Student()
#s.__name # Error not visible
s.setName('ABC')
s.setRollno(101)
s.display() | 22.461538 | 76 | 0.65411 |
acf7b18cb3962517ff2d4c2ecf1a5d5d44749358 | 2,817 | py | Python | Lab5/SplitSQLIntoSeparateQueries.py | DLohmann/dklugSQLAutoGrader | af4564943f61d5db6e0eb01ad7818a53eff57597 | [
"MIT"
] | null | null | null | Lab5/SplitSQLIntoSeparateQueries.py | DLohmann/dklugSQLAutoGrader | af4564943f61d5db6e0eb01ad7818a53eff57597 | [
"MIT"
] | null | null | null | Lab5/SplitSQLIntoSeparateQueries.py | DLohmann/dklugSQLAutoGrader | af4564943f61d5db6e0eb01ad7818a53eff57597 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import re #regex for removing comments
# comment_remover(text) was modified from: https://stackoverflow.com/questions/241327/python-snippet-to-remove-c-and-c-comments
def comment_remover(text):
def replacer(match):
s = match.group(0)
if s.startswith('/'):
return " " # note: a space and not an empty string
else:
return s
pattern = re.compile(
r'//.*?$|/\*.*?\*/|\'(?:\\.|[^\\\'])*\'|"(?:\\.|[^\\"])*"',
re.DOTALL | re.MULTILINE
)
return re.sub(pattern, replacer, text)
#takes out all comments from a file, using regex
def remove_line_comments (str):
#newStr = re.sub(str, "--.+\n|--.+\z", "") # remove any single line comments, including any comment at the end of the file
# newStr = re.sub(str, "--.+\n", "") # remove all single line comments
# newStr = re.sub(newStr, "--.+\z", "") # remove any single line comment at the end of file
# newStr = re.sub(newStr, "", "") # remove all multi-line comments
# /\*[a-zA-Z0-9\ ]+ \*/
#lineCommentRe = re.compile ("--\ ?^Q.+$") #"--.+\n|--.+\z")
lineCommentRe = re.compile ("--.+\n|--.+\z")
newStr = re.sub(lineCommentRe, "\n", str)
return newStr
#Read "Lab_3_Queries", take out comments, and save into "Lohmann_Lab_3_Queries"
'''
#rf = open ("Lab_3_Queries", "r", encoding="utf-8")
#fileStr = rf.read()
#rf.close()
fileStr =open("Lab_3_Queries", "r", encoding="utf-8").read()#.replace("--.+\n|--.+\z", "")
fileStr = comment_remover(fileStr)
#fileStr = removeComments (fileStr)
wf = open ("Lohmann_Lab_3_Queries.sql", "w", encoding="utf-8")
wf.write(fileStr)
wf.close()
'''
#Create array of strings, where each string is a separate query
#rf = open("Lohmann_Lab_3_Queries", "r", encoding="utf-8")
'''
fileStr =open("Lab_3_Queries", "r", encoding="utf-8").read()
pattern = re.compile (r";$", re.DOTALL | re.MULTILINE)
queryStrings = re.split(pattern, fileStr) #splits by ';' character followed by newline
queryList = [query + ";" for query in queryList if (len(query) > 5)] # add back the ';' character, and remove any short, garbage strings created by regex
'''
# fileStr =open(r"C:\Users\david\OneDrive\Documents\Classes\CURRENT-SEMESTER\CSE 111 Database Systems\CSE111\Lab-3\Lohmann_Lab_3_Queries.sql", "r", encoding="utf-8").read()
fileStr =open(r"Lohmann_Lab_5_Queries.sql", "r").read()
fileStr = comment_remover(fileStr)
fileStr = remove_line_comments(fileStr)
pattern = re.compile(';', re.MULTILINE|re.DOTALL)
queryList = re.split (pattern, fileStr)
queryList = [query + ";" for query in queryList if (len(query) > 5)]
#Write each query to separate files
for i in range (len(queryList)):
print ("\n\n\nQuery # ", (i+1), " is:\n")
print (queryList[i])
wf = open("Results/" + str(i+1) + ".sql", "w")
wf.write(queryList[i])
wf.close()
| 36.584416 | 172 | 0.644302 |
acf7b1cdfd5cc6e74068c49de3b182f215b2f7ea | 1,487 | py | Python | ROS_Training/Custom Nodes/my_convertor.py | zestyoreo/ITSP-21 | 1f7cd88c62b585d04866db40beea811e23cdd732 | [
"MIT"
] | null | null | null | ROS_Training/Custom Nodes/my_convertor.py | zestyoreo/ITSP-21 | 1f7cd88c62b585d04866db40beea811e23cdd732 | [
"MIT"
] | null | null | null | ROS_Training/Custom Nodes/my_convertor.py | zestyoreo/ITSP-21 | 1f7cd88c62b585d04866db40beea811e23cdd732 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# license removed for brevity
import rospy
import math
from beginner_tutorial.msg import quaternions
from beginner_tutorial.msg import euler_angles
class q_and_e:
def __init__(self):
rospy.init_node('my_convertor', anonymous=True)
self.conv_sub = rospy.Subscriber('topic1', quaternions, self.euler_conv)
self.conv_pub = rospy.Publisher('topic2', euler_angles, queue_size=10)
self.rate = rospy.Rate(10)
self.quat=quaternions()
self.eul = euler_angles()
def euler_conv(self, data):
self.quat = data
#roll (x-axis rotation)
sinr_cosp = 2 * (self.quat.w * self.quat.x + self.quat.y * self.quat.z)
cosr_cosp = 1 - 2 * (self.quat.x * self.quat.x + self.quat.y * self.quat.y)
self.eul.roll = atan2(sinr_cosp, cosr_cosp)
#pitch (y-axis rotation)
sinp = 2 * (self.quat.w * self.quat.y - self.quat.z * self.quat.x)
if fabs(sinp) >= 1:
self.eul.pitch = copysign(3.14159/ 2, sinp) #use 90 degrees if out of range
else:
self.eul.pitch = asin(sinp);
#yaw (z-axis rotation)
siny_cosp = 2 * (self.quat.w * self.quat.z + self.quat.x * self.quat.y)
cosy_cosp = 1 - 2 * (self.quat.y * self.quat.y + self.quat.z * self.quat.z)
self.eul.yaw = atan2(siny_cosp, cosy_cosp)
def run(self):
self.conv_pub.publish(self.eul)
rospy.spin()
if __name__ == '__main__':
try:
x=q_and_e()
x.run()
except rospy.ROSInterruptException:
pass | 29.74 | 84 | 0.64694 |
acf7b2bfd1b6e053e2fb65a138a13a616c9a0b23 | 436 | py | Python | tests/hello_world/test_main.py | milancermak/aws-cd-pipeline | ae38440c6aa679e20899c5aa67a63956aa9e57bf | [
"Apache-2.0"
] | 1 | 2019-10-05T11:22:40.000Z | 2019-10-05T11:22:40.000Z | tests/hello_world/test_main.py | milancermak/aws-cd-pipeline | ae38440c6aa679e20899c5aa67a63956aa9e57bf | [
"Apache-2.0"
] | null | null | null | tests/hello_world/test_main.py | milancermak/aws-cd-pipeline | ae38440c6aa679e20899c5aa67a63956aa9e57bf | [
"Apache-2.0"
] | null | null | null | import collections
from src.hello_world import main
Context = collections.namedtuple('Context', ['function_name',
'function_version',
'invoked_function_arn'])
def test_handler():
context = Context('testname',
'testversion',
'test:arn')
result = main.handler({}, context)
assert result
| 25.647059 | 69 | 0.504587 |
acf7b4ef8b42a0371f8b29fcf971a09b9ea534f2 | 8,849 | py | Python | spiketag/base/SPKTAG.py | aliddell/spiketag | f5600126c2c6c9be319e8b808d51ea33be843909 | [
"BSD-3-Clause"
] | null | null | null | spiketag/base/SPKTAG.py | aliddell/spiketag | f5600126c2c6c9be319e8b808d51ea33be843909 | [
"BSD-3-Clause"
] | null | null | null | spiketag/base/SPKTAG.py | aliddell/spiketag | f5600126c2c6c9be319e8b808d51ea33be843909 | [
"BSD-3-Clause"
] | null | null | null | from .MUA import MUA
from .SPK import SPK
from .FET import FET
from .CLU import CLU
from .CLU import status_manager
import numpy as np
import json
import pickle
import pandas as pd
from numba import njit
@njit(cache=True)
def to_labels(grp_clu_matrix, cumsum_nclu):
'''
grp_clu_matrix: (N, 2) matrix, each row is a (grp_id, clu_id) pair
cumsum_nclu: (40,) vector, cumsum of model.nclus or spktag.nclus
'''
grp_clu_matrix = grp_clu_matrix.astype(np.int32)
N = grp_clu_matrix.shape[0]
labels = np.zeros((N,))
for i in range(N):
grp_id, clu_id = grp_clu_matrix[i]
if clu_id > 0:
labels[i] = cumsum_nclu[grp_id-1] + clu_id
return labels
class SPKTAG(object):
def __init__(self, probe=None, spk=None, fet=None, clu=None, clu_manager=None, gtimes=None, filename=None):
'''
spk : spk object
fet : fet object
clu : dictionary of clu object (each item is a channel based clu object)
clu_manager : clu manager
gtimes : dictionary of group with spike times
'''
self.probe = probe
if filename is not None: # load from file
self.fromfile(filename)
elif gtimes is not None : # construct
self.gtimes = gtimes
self.spk = spk
self.fet = fet
self.clu = clu
self.spklen = spk.spklen
self.fetlen = fet.fetlen
self.grplen = self.probe.group_len
self.ngrp = len(self.probe.grp_dict.keys())
self.clu_manager = clu_manager
self.dtype = [('t', 'int32'),
('group','int32'),
('spk', 'f4', (self.spklen, self.grplen)),
('fet','f4',(self.fetlen,)),
('clu','int32')]
else:
pass
@property
def nspk(self):
return sum([len(v) for v in self.gtimes.values()])
def build_meta(self):
meta = {}
meta["fs"] = self.probe.fs
meta["ngrp"] = self.ngrp
meta["grplen"] = self.probe.group_len
meta["fetlen"] = self.fetlen
meta["spklen"] = self.spklen
meta["clu_statelist"] = self.clu_manager.state_list
return meta
def build_hdbscan_tree(self):
treeinfo = {}
for i in range(self.ngrp):
try:
treeinfo[i] = self.clu[i]._extra_info
except:
treeinfo[i] = None
return treeinfo
def build_spktag(self):
spktag = np.zeros(self.nspk, dtype=self.dtype)
start_index = 0
for g, times in self.gtimes.items():
if times.shape[0] > 0:
end_index = start_index + len(times)
spktag['t'][start_index:end_index] = times
spktag['group'][start_index:end_index] = np.full((len(times)), g, dtype=np.int)
spktag['spk'][start_index:end_index] = self.spk[g]
spktag['fet'][start_index:end_index] = self.fet[g]
spktag['clu'][start_index:end_index] = self.clu[g].membership
start_index = end_index
return spktag
def build_spkid_matrix(self, including_noise=False):
spkid_matrix = np.hstack((self.spktag['t'].reshape(-1,1),
self.spktag['group'].reshape(-1,1),
self.spktag['fet'],
self.spktag['clu'].reshape(-1,1)))
if including_noise is False:
spkid_matrix = spkid_matrix[spkid_matrix[:,-1]!=0]
grp_clu_matrix = spkid_matrix[:, [1,-1]]
global_labels = to_labels(grp_clu_matrix, self.nclus.cumsum())
spkid_matrix[:, -1] = global_labels
spkid_matrix = pd.DataFrame(spkid_matrix).sort_values(0, ascending=True)
spkid_matrix.columns = ['frame_id','group_id','fet0','fet1','fet2','fet3','spike_id']
spkid_matrix.index = np.arange(global_labels.shape[0])
return spkid_matrix
def update(self, spk, fet, clu, gtimes):
self.spk = spk
self.fet = fet
self.clu = clu
self.gtimes = gtimes
self.build_meta()
self.build_spktag()
self.build_spkid_matrix()
def tofile(self, filename, including_noise=False):
self.meta = self.build_meta()
self.treeinfo = self.build_hdbscan_tree()
self.spktag = self.build_spktag()
self.spkid_matrix = self.build_spkid_matrix(including_noise=including_noise)
with open(filename+'.meta', 'w') as metafile:
json.dump(self.meta, metafile, indent=4)
np.save(filename+'.npy', self.treeinfo)
self.spktag.tofile(filename) # numpy to file
self.spkid_matrix.to_pickle(filename+'.pd') # pandas data frame
def fromfile(self, filename):
# meta file
with open(filename+'.meta', 'r') as metafile:
self.meta = json.load(metafile)
self.fs = self.meta['fs']
self.ngrp = self.meta['ngrp']
self.grplen = self.meta['grplen']
self.spklen = self.meta['spklen']
self.fetlen = self.meta['fetlen']
self.clu_statelist = self.meta['clu_statelist']
# condensed tree info
self.treeinfo = np.load(filename+'.npy', allow_pickle=True).item()
# spiketag
self.dtype = [('t', 'int32'),
('group', 'int32'),
('spk', 'f4', (self.spklen, self.grplen)),
('fet', 'f4', (self.fetlen,)),
('clu', 'int32')]
self.spktag = np.fromfile(filename, dtype=self.dtype)
try:
self.spkid_matrix = pd.read_pickle(filename+'.pd')
except:
pass
def tospk(self):
spkdict = {}
for g in self.gtimes.keys():
spkdict[g] = self.spktag['spk'][self.spktag['group']==g]
self.spk = SPK(spkdict)
return self.spk
def tofet(self):
fetdict = {}
for g in self.gtimes.keys():
fetdict[g] = self.spktag['fet'][self.spktag['group']==g]
self.fet = FET(fetdict)
return self.fet
def toclu(self):
cludict = {}
for g in self.gtimes.keys():
cludict[g] = CLU(self.spktag['clu'][self.spktag['group']==g], treeinfo=self.treeinfo[g])
cludict[g]._id = g
cludict[g]._state = cludict[g].s[self.clu_statelist[g]]
self.clu = cludict
return self.clu
def to_gtimes(self):
times = self.spktag['t']
groups = self.spktag['group']
gtimes = {}
for g in np.unique(groups):
gtimes[g] = times[np.where(groups == g)[0]]
self.gtimes = gtimes
return self.gtimes
@property
def done_groups(self):
return np.where(np.array(self.clu_manager.state_list) == 3)[0]
@property
def nclus(self):
self._nclus = []
for i in range(self.ngrp):
n = self.clu[i].nclu
self._nclus.append(n)
self._nclus = np.array(self._nclus) - 1
return self._nclus
def _get_label(self, grp_id, clu_id):
assert(clu_id<=self.nclus[grp_id]), "group {} contains only {} clusters".format(grp_id, self.nclus[grp_id])
if clu_id == 0:
return 0
else:
clu_offset = self.nclus.cumsum()[grp_id-1]
return clu_offset+clu_id
def get_spk_times(self, group_id, cluster_id):
'''
get spike times from a specific group with a specific cluster number
'''
idx = self.clu[group_id][cluster_id]
spk_times = self.gtimes[group_id][idx]/self.fs
return spk_times
def get_spk_time_dict(self):
'''
callable after clu_manager is initiated
'''
k = 0
spk_time_dict = {}
for grp_No, grp_state in enumerate(self.clu_manager.state_list):
if grp_state == 3: # done state
for clu_No in range(1, self.clu[grp_No].nclu):
spk_time_dict[k] = self.get_spk_times(grp_No, clu_No)
k+=1
return spk_time_dict
def load(self, filename):
self.fromfile(filename)
self.gtimes = self.to_gtimes()
self.spk = self.tospk()
self.fet = self.tofet()
self.clu = self.toclu()
self.clu_manager = status_manager()
for _clu in self.clu.values():
self.clu_manager.append(_clu)
self.spk_time_dict = self.get_spk_time_dict()
self.spk_time_array = np.array(list(self.spk_time_dict.values()))
self.n_units = len(self.spk_time_dict)
print('loading from spktag {}: {} neurons extracted'.format(filename, self.n_units))
| 34.701961 | 115 | 0.55769 |
acf7b4f98e0240917b9783b85d9a67e6ee304561 | 2,148 | py | Python | app.py | MustafaGangardiwala/task-reminder | c6abd9c217563de55cec33ec3a03fa9e931256d5 | [
"MIT"
] | null | null | null | app.py | MustafaGangardiwala/task-reminder | c6abd9c217563de55cec33ec3a03fa9e931256d5 | [
"MIT"
] | null | null | null | app.py | MustafaGangardiwala/task-reminder | c6abd9c217563de55cec33ec3a03fa9e931256d5 | [
"MIT"
] | 2 | 2020-10-01T04:39:05.000Z | 2020-10-01T07:23:44.000Z | from flask import Flask, render_template, url_for, request, redirect
from flask_sqlalchemy import SQLAlchemy
from datetime import datetime
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///test.db'
db = SQLAlchemy(app)
class Todo(db.Model):
id = db.Column(db.Integer, primary_key = True)
status = db.Column(db.Boolean, default=False)
content = db.Column(db.String(200), nullable=False)
date_created = db.Column(db.DateTime, default=datetime.now)
def __repr__(self):
return '<Task %r>' % self.id
@app.route('/', methods=['POST', 'GET'])
def index():
if request.method == 'POST':
task_content = request.form['content']
new_task = Todo(content=task_content)
try:
db.session.add(new_task)
db.session.commit()
return redirect('/')
except:
return 'Ther was an issue adding your task'
else:
tasks = Todo.query.order_by(Todo.date_created).all()
return render_template('index.html', tasks=tasks)
@app.route('/delete/<int:id>')
def delete(id):
task_to_delete = Todo.query.get_or_404(id)
try:
db.session.delete(task_to_delete)
db.session.commit()
return redirect('/')
except:
return 'There was issue with the deleting task'
@app.route('/done/<int:id>', methods=['POST'])
def done(id):
try:
task = Todo.query.get_or_404(id)
if request.form['done'] == 'on':
task.status = True
except:
task.status = False
try:
db.session.commit()
return redirect('/')
except:
return 'There was issue with the updating task'
@app.route('/update/<int:id>', methods=['GET', 'POST'])
def update(id):
task = Todo.query.get_or_404(id)
if request.method == 'POST':
task.content = request.form['content']
else:
return render_template('update.html', task=task)
try:
db.session.commit()
return redirect('/')
except:
return 'There was issue with the updating task'
if __name__ == "__main__":
app.run(debug=True)
| 24.976744 | 68 | 0.61406 |
acf7b7abb94d2dfa689c42de0b24cd02b82ba6be | 920 | py | Python | solutions/nim-game.py | Shuailong/Leetcode | 153aa7bc1b9ad5bbc503ddd7e0694bd2a17a2b97 | [
"MIT"
] | 3 | 2017-05-21T04:28:32.000Z | 2019-03-06T03:28:51.000Z | solutions/nim-game.py | Shuailong/Leetcode | 153aa7bc1b9ad5bbc503ddd7e0694bd2a17a2b97 | [
"MIT"
] | null | null | null | solutions/nim-game.py | Shuailong/Leetcode | 153aa7bc1b9ad5bbc503ddd7e0694bd2a17a2b97 | [
"MIT"
] | 1 | 2018-10-28T22:49:40.000Z | 2018-10-28T22:49:40.000Z | #!/usr/bin/env python
# encoding: utf-8
"""
nim-game.py
Created by Shuailong on 2015-12-21.
https://leetcode.com/problems/nim-game/.
"""
class Solution1(object):
def canWinNim(self, n):
"""
:type n: int
:rtype: bool
"""
'''Too time consuming'''
win1 = True
win2 = True
win3 = True
win = True
i = 4
while i < n+1:
win = not win1 or not win2 or not win3
win1 = win2
win2 = win3
win3 = win
i += 1
return win
class Solution(object):
def canWinNim(self, n):
"""
:type n: int
:rtype: bool
"""
'''Find the law and rewrite'''
return n & 3 != 0
# return n % 4 != 0
def main():
solution = Solution()
n = 4
print solution.canWinNim(n)
if __name__ == '__main__':
main()
| 16.727273 | 50 | 0.466304 |
acf7ba9653365b1670973460f6fc26f4fdafc034 | 1,920 | py | Python | app/settings.py | ONSdigital/sdx-receipt-ctp | ff1ed23a6d5de305ee79978eafff50989adb50c6 | [
"MIT"
] | null | null | null | app/settings.py | ONSdigital/sdx-receipt-ctp | ff1ed23a6d5de305ee79978eafff50989adb50c6 | [
"MIT"
] | 15 | 2016-12-22T15:17:36.000Z | 2017-10-12T15:11:35.000Z | app/settings.py | ONSdigital/sdx-receipt-ctp | ff1ed23a6d5de305ee79978eafff50989adb50c6 | [
"MIT"
] | 1 | 2021-04-11T08:17:35.000Z | 2021-04-11T08:17:35.000Z | import logging
import os
import requests
from requests.packages.urllib3.util.retry import Retry
from requests.adapters import HTTPAdapter
LOGGING_LEVEL = logging.getLevelName(os.getenv('LOGGING_LEVEL', 'DEBUG'))
APP_ROOT = os.path.dirname(os.path.abspath(__file__))
APP_TMP = os.path.join(APP_ROOT, 'tmp')
RECEIPT_HOST = os.getenv("CTP_RECEIPT_HOST", "http://localhost:8191")
RECEIPT_PATH = os.getenv("CTP_RECEIPT_PATH", "questionnairereceipts")
RECEIPT_USER = os.getenv("CTP_RECEIPT_USER", "gateway")
RECEIPT_PASS = os.getenv("CTP_RECEIPT_PASS", "ctp")
RABBIT_QUEUE = os.getenv('RECEIPT_CTP_QUEUE', 'ctp_receipt')
RABBIT_QUARANTINE_QUEUE = os.getenv('RECEIPT_CTP_QUARANTINE_QUEUE', 'ctp_receipt_quarantine')
RABBIT_EXCHANGE = os.getenv('RABBITMQ_EXCHANGE', 'message')
SDX_RECEIPT_CTP_SECRET = os.getenv("SDX_RECEIPT_CTP_SECRET")
if SDX_RECEIPT_CTP_SECRET is not None:
SDX_RECEIPT_CTP_SECRET = SDX_RECEIPT_CTP_SECRET.encode("ascii")
RABBIT_URL = 'amqp://{user}:{password}@{hostname}:{port}/{vhost}'.format(
hostname=os.getenv('RABBITMQ_HOST', 'rabbit'),
port=os.getenv('RABBITMQ_PORT', 5672),
user=os.getenv('RABBITMQ_DEFAULT_USER', 'rabbit'),
password=os.getenv('RABBITMQ_DEFAULT_PASS', 'rabbit'),
vhost=os.getenv('RABBITMQ_DEFAULT_VHOST', '%2f')
)
RABBIT_URL2 = 'amqp://{user}:{password}@{hostname}:{port}/{vhost}'.format(
hostname=os.getenv('RABBITMQ_HOST2', 'rabbit'),
port=os.getenv('RABBITMQ_PORT2', 5672),
user=os.getenv('RABBITMQ_DEFAULT_USER', 'rabbit'),
password=os.getenv('RABBITMQ_DEFAULT_PASS', 'rabbit'),
vhost=os.getenv('RABBITMQ_DEFAULT_VHOST', '%2f')
)
RABBIT_URLS = [RABBIT_URL, RABBIT_URL2]
# Configure the number of retries attempted before failing call
session = requests.Session()
retries = Retry(total=5, backoff_factor=0.1)
session.mount('http://', HTTPAdapter(max_retries=retries))
session.mount('https://', HTTPAdapter(max_retries=retries))
| 40 | 93 | 0.757292 |
acf7bc648a06904890ced07dccdb279a9c189d7a | 1,249 | py | Python | spider_code/jiandan.py | legendsonldh/diff_spider | b5e3e4c2eb4750b7cc3997084af0ffc10db1112f | [
"MIT"
] | null | null | null | spider_code/jiandan.py | legendsonldh/diff_spider | b5e3e4c2eb4750b7cc3997084af0ffc10db1112f | [
"MIT"
] | null | null | null | spider_code/jiandan.py | legendsonldh/diff_spider | b5e3e4c2eb4750b7cc3997084af0ffc10db1112f | [
"MIT"
] | null | null | null | #-*-coding:utf-8-*-
from selenium import webdriver
import requests
import re
import os
from bs4 import BeautifulSoup
import time
os.makedirs('./meizi/ ' , exist_ok=False)
urls = ["http://jandan.net/ooxx/page-{}#comments".format(str(i)) for i in range(1, 48)]
# 定位Phantom.js 的参数设置
service_args=[]
# service_args.append('--load-images=no') ##关闭图片加载
service_args.append('--disk-cache=yes') ##开启缓存
service_args.append('--ignore-ssl-errors=true') ##忽略https错误
service_args.append('--ssl-protocol=any')##防止网站无法解析
d = webdriver.PhantomJS(service_args=service_args)
for url in urls:
d.get(url)
time.sleep(2)
print(d.current_url) # 查看链接是否污染
data = d.page_source
soup = BeautifulSoup(data, "lxml") #解析网页
img1 = soup.find_all("div",{"class":"text"})
for img in img1:
if str('gif') in str(img):
pass
else:
img_url = img.find("img",{"src": re.compile('.*?\.jpg')})['src']
r = requests.get(img_url, stream=True)
image_name = img_url.split('/')[-1]
with open('./meizi/%s' % image_name, 'wb') as f:
for chunk in r.iter_content(chunk_size=32):
f.write(chunk)
print('Saved %s' % image_name) | 32.868421 | 87 | 0.609287 |
acf7bca4042a8372a8b1060e800d4ac8178e4ef8 | 3,846 | py | Python | Day1/day1-afternoon-exercises/data_collector/gutenberg_scraper.py | klimpie94/Python-training | 7af210126cfe2e9386a8f22075ea0d7eff80daac | [
"RSA-MD"
] | null | null | null | Day1/day1-afternoon-exercises/data_collector/gutenberg_scraper.py | klimpie94/Python-training | 7af210126cfe2e9386a8f22075ea0d7eff80daac | [
"RSA-MD"
] | 1 | 2021-12-13T20:33:28.000Z | 2021-12-13T20:33:28.000Z | Day1/day1-afternoon-exercises/data_collector/gutenberg_scraper.py | klimpie94/Python-training | 7af210126cfe2e9386a8f22075ea0d7eff80daac | [
"RSA-MD"
] | 9 | 2020-02-05T10:24:12.000Z | 2020-02-10T13:08:50.000Z |
import os
from pathlib import Path
import json
import requests
import re
from bs4 import BeautifulSoup
from data_collector.scraping_utils import (
get_book_link_from_html,
get_book_name_from_html,
get_book_author_from_html,
get_book_downloads_from_html,
get_book_image_from_html)
from data_collector.custom_logger import set_logger
LOGGER = set_logger("data_collector")
CACHED_DATA_PATH = os.path.join(
os.fspath(Path(__file__).parents[0]),
"cached_data",
"popular_books.json")
def request_page_content(page_url):
'''Requests (GET) a page content.'''
page = requests.get(page_url)
assert page.status_code == 200, "Request is not successful"
return page.content
def get_book_link_list_from_page_content(page_content):
'''Finds all the book list tags in a given page content.
Args:
page_content (bytes): This is the requested page content with
requests package.
Returns:
list of book_list tags
'''
soup_object = BeautifulSoup(page_content, features="lxml")
return soup_object.findAll("li", {"class": "booklink"})
def get_book_contents_dict(book_html):
'''Creates a book contents dictionary for each book
html tag listed in the page.
Args:
book_html (bs4.element.Tag): The html tag for book
Returns:
book_dict (dict): Book dictionary with name, author
link, download count and image information.
'''
book_dict = {"book_name": None,
"book_author": None,
"book_link": None,
"book_downloads": None,
"book_image": None}
book_dict["book_link"] = get_book_link_from_html(book_html)
book_dict["book_name"] = get_book_name_from_html(book_html)
book_dict["book_author"] = get_book_author_from_html(book_html)
book_dict["book_downloads"] = get_book_downloads_from_html(book_html)
book_dict["book_image"] = get_book_image_from_html(book_html)
return book_dict
def create_main_dict_for_popular_books(popular_book_page):
'''Cretaes a main dictionary made of the popular books listed
in the Gutenberg popular books webpage.
Args:
popular_book_page (str): Popular books webpage string value.
Retruns:
dict of book contents, including name, author, download count,
image (if exists) and link.
'''
try:
gutenberg_content = request_page_content(popular_book_page)
book_list = get_book_link_list_from_page_content(gutenberg_content)
return {f"b_{idx}": get_book_contents_dict(book)
for idx, book in enumerate(book_list)}
except Exception as e:
LOGGER.info(f"{e}: cannot reach the website...")
LOGGER.info(f"Trying to use the cached dataset...")
with open(CACHED_DATA_PATH) as cached_data:
return json.load(cached_data)
def get_book_text_link(book_link_str):
'''Given a book link as string finds the book text link as path only.
Book link must be given with a homepage concated. As an example:
"https://www.gutenberg.org/ebooks/46" or
"https://www.gutenberg.org/ebooks/102"
Args:
book_link_str (str): Book link as string.
Returns:
book_text_link (str): The link path containing the
book itself as a text file.
Example:
>>> get_book_text_link("https://www.gutenberg.org/ebooks/46")
"/files/46/46-0.txt"
'''
try:
book_page = book_link_str
book_page_content = request_page_content(book_page)
soup_object = BeautifulSoup(book_page_content, features="lxml")
return soup_object.find(
"a", {"type": re.compile(r"text/plain")})["href"]
except Exception:
LOGGER.info(
f"Cannot get the book text in:\n{book_link_str}")
return None
| 30.768 | 75 | 0.682787 |
acf7bd402db56799ea729e96f9cc538ee11f8d35 | 3,627 | py | Python | lib/utils.py | pemami4911/EfficientMORL | 67939f06d35aa783b52bfe6f36ec9304d0bae1b7 | [
"MIT"
] | 18 | 2021-06-08T02:35:20.000Z | 2021-11-04T07:26:25.000Z | lib/utils.py | pemami4911/EfficientMORL | 67939f06d35aa783b52bfe6f36ec9304d0bae1b7 | [
"MIT"
] | null | null | null | lib/utils.py | pemami4911/EfficientMORL | 67939f06d35aa783b52bfe6f36ec9304d0bae1b7 | [
"MIT"
] | 1 | 2021-08-18T19:14:55.000Z | 2021-08-18T19:14:55.000Z | import torch
import torch.nn.functional as F
from torch.nn import init
import numpy as np
from scipy.stats import truncnorm
def truncated_normal_initializer(shape, mean, stddev):
# compute threshold at 2 std devs
values = truncnorm.rvs(mean - 2 * stddev, mean + 2 * stddev, size=shape)
return torch.from_numpy(values).float()
def init_weights(net, init_type='normal', init_gain=0.02):
"""Initialize network weights.
Modified from: https://github.com/baudm/MONet-pytorch/blob/master/models/networks.py
Parameters:
net (network) -- network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might
work better for some applications. Feel free to try yourself.
"""
def init_func(m): # define the initialization function
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
init.normal_(m.weight.data, 0.0, init_gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=init_gain)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=init_gain)
elif init_type == 'truncated_normal':
m.weight.data = truncated_normal_initializer(m.weight.shape, 0.0, stddev=init_gain)
elif init_type == 'zeros':
init.constant_(m.weight.data, 0.0)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1:
init.normal_(m.weight.data, 1.0, init_gain)
init.constant_(m.bias.data, 0.0)
net.apply(init_func)
def _softplus_to_std(softplus):
softplus = torch.min(softplus, torch.ones_like(softplus)*80)
return torch.sqrt(torch.log(1. + softplus.exp()) + 1e-5)
def mvn(loc, softplus, temperature=1.0):
return torch.distributions.independent.Independent(
torch.distributions.normal.Normal(loc, _softplus_to_std(softplus) * (1./temperature)), 1)
def std_mvn(shape, device):
loc = torch.zeros(shape).to(device)
scale = torch.ones(shape).to(device)
return torch.distributions.independent.Independent(
torch.distributions.normal.Normal(loc, scale), 1)
def gmm_negativeloglikelihood(x_t, x_loc, log_var, mask_logprobs):
"""
mask_logprobs: [N, K, 1, H, W]
"""
# NLL [batch_size, 1, H, W]
sq_err = (x_t.unsqueeze(1) - x_loc).pow(2)
# log N(x; x_loc, log_var): [N, K, C, H, W]
normal_ll = -0.5 * log_var - 0.5 * (sq_err / torch.exp(log_var))
# [N, K, C, H, W]
log_p_k = (mask_logprobs + normal_ll)
# logsumexp over slots [N, C, H, W]
log_p = torch.logsumexp(log_p_k, dim=1)
# [N]
nll = -torch.sum(log_p, dim=[1,2,3])
return nll
def gaussian_negativeloglikelihood(x_t, x_loc, log_var):
sq_err = (x_t - x_loc).pow(2) # [N,C,H,W]
# log N(x; x_loc, log_var): [N,C, H, W]
normal_ll = -0.5 * log_var - 0.5 * (sq_err / torch.exp(log_var))
nll = -torch.sum(normal_ll, dim=[1,2,3]) # [N]
return nll
| 40.752809 | 103 | 0.635787 |
acf7be30d09153aed412ce3ea94690e8d1ff0b8e | 531 | py | Python | common_app/decorators.py | KameliyaN/testProject-master | a851f69fcc84116fb90d243bee24e10295e92228 | [
"MIT"
] | null | null | null | common_app/decorators.py | KameliyaN/testProject-master | a851f69fcc84116fb90d243bee24e10295e92228 | [
"MIT"
] | null | null | null | common_app/decorators.py | KameliyaN/testProject-master | a851f69fcc84116fb90d243bee24e10295e92228 | [
"MIT"
] | null | null | null | from functools import wraps
from django.core.exceptions import PermissionDenied
from django.http import HttpResponse
from common_app.models import Article
def user_is_article_author_or_admin(view):
@wraps(view)
def wrap(request, *args, **kwargs):
article = Article.objects.get(pk=kwargs['pk'])
if article.user == request.user.profile or request.user.is_superuser:
return view(request, *args, **kwargs)
else:
return HttpResponse('You are not authorised')
return wrap
| 27.947368 | 77 | 0.704331 |
acf7bfb8d44ace970ca6728d15065545118db177 | 963 | py | Python | morphtransformkun/morphtransformkun.py | ankalagigaurave/morphtransformkun-Package | ecfd9df12c548511f4ad593982b1ba4423beedad | [
"MIT"
] | null | null | null | morphtransformkun/morphtransformkun.py | ankalagigaurave/morphtransformkun-Package | ecfd9df12c548511f4ad593982b1ba4423beedad | [
"MIT"
] | null | null | null | morphtransformkun/morphtransformkun.py | ankalagigaurave/morphtransformkun-Package | ecfd9df12c548511f4ad593982b1ba4423beedad | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Making a package.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1GKoEPFc861NPXvOClIDsc1Exsdu3GXrN
"""
import cv2
import numpy as np
def erosion(img,kernel_size,iteration):
img = img
kernel = np.ones((kernel_size,kernel_size),np.uint8)
erosion = cv2.erode(img,kernel,iterations = iteration)
return erosion
def dilation(img,kernel_size,iteration):
img = img
kernel = np.ones((kernel_size,kernel_size),np.uint8)
dilation = cv2.dilate(img,kernel,iterations = 1)
return dilation
def opening(img,kernel_size,iteration):
img = img
kernel = np.ones((kernel_size,kernel_size),np.uint8)
opening = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)
return opening
def closing(img,kernel_size,iteration):
img = img
kernel = np.ones((kernel_size,kernel_size),np.uint8)
closing = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel)
return closing
| 26.027027 | 77 | 0.747664 |
acf7c0154eb7390dd9b31eddf527e59e23e5572a | 1,232 | py | Python | Number guessing game.py | shubhamm-06/Number-guessing-game | c2068062f64ad5dc6588c6d3893e11e2d53c2094 | [
"BSL-1.0"
] | null | null | null | Number guessing game.py | shubhamm-06/Number-guessing-game | c2068062f64ad5dc6588c6d3893e11e2d53c2094 | [
"BSL-1.0"
] | null | null | null | Number guessing game.py | shubhamm-06/Number-guessing-game | c2068062f64ad5dc6588c6d3893e11e2d53c2094 | [
"BSL-1.0"
] | null | null | null | # ********************Number Guessing Game**************************
# ----------------------
# Date : Saturday 13/11/2021
# Time : 10:15:00 AM
import random
L1 = random.randint(1,5)
def level1(num1):
if num1 == L1:
print("You guessed the corrrect number!")
else:
print("Guess again")
def hint(L1):
if L1 == 1:
print("Multiply or Divide me with any number, I will alwas give equal result . \n Who am I? \n*******************************************************************")
elif L1 == 2 :
print("I am smallest even number \n**************************** ")
elif L1 == 3 :
print("I can give you a bronze medal")
elif L1 == 4:
print("Square of me half of 32")
elif L1 == 5:
print("Multiply me with any number i will always give out 5 or zero as the last digit \n*********************************************************************")
def point():
pass
b = hint(L1)
a = int(input("Guess the number :\n"))
c = level1(a)
hint(L1)
a1 = int(input("Guess the number :\n"))
c1 = level1(a1)
| 28 | 171 | 0.405844 |
acf7c034becf95bf750dee22c876dbe58f5b76eb | 2,801 | py | Python | docs/docs.py | eaton-lab/toyplot | 472f2f2f1bc048e485ade44d75c3ace310be4b41 | [
"BSD-3-Clause"
] | 438 | 2015-01-06T20:54:02.000Z | 2022-03-15T00:39:33.000Z | docs/docs.py | eaton-lab/toyplot | 472f2f2f1bc048e485ade44d75c3ace310be4b41 | [
"BSD-3-Clause"
] | 184 | 2015-01-26T17:04:47.000Z | 2022-02-19T16:29:00.000Z | docs/docs.py | eaton-lab/toyplot | 472f2f2f1bc048e485ade44d75c3ace310be4b41 | [
"BSD-3-Clause"
] | 45 | 2015-07-06T18:00:27.000Z | 2022-02-14T12:46:17.000Z | # Copyright 2014, Sandia Corporation. Under the terms of Contract
# DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains certain
# rights in this software.
import numpy
import toyplot.color
import toyplot.require
def plot_luma(*args): #pragma: no cover
if len(args) == 2:
colormaps = [(args[0], args[1])]
else:
colormaps = args[0]
grid_n = 4.0
grid_m = numpy.ceil(len(colormaps) / grid_n)
canvas = toyplot.Canvas(grid_n * 150, grid_m * 150)
for index, (name, colormap) in enumerate(colormaps):
if isinstance(colormap, toyplot.color.Palette):
colormap = toyplot.color.LinearMap(colormap)
x = numpy.linspace(0, 1, 200)
y = [toyplot.color.to_lab(color)[0] for color in colormap.colors(x, 0, 1)]
axes = canvas.cartesian(
grid=(grid_m, grid_n, index),
ymin=0,
ymax=100,
margin=30,
xshow=True,
yshow=True,
label=name,
)
axes.scatterplot(x, y, size=10, color=(x, colormap))
return canvas
def prufer_tree(sequence):
"""Use a Prufer sequence to generate a tree.
"""
sequence = toyplot.require.integer_vector(sequence)
n = len(sequence)
if numpy.any(sequence < 0) or numpy.any(sequence >= n+2):
raise ValueError("Sequence values must be in the range [0, %s)" % n+2) # pragma: no cover
sources = []
targets = []
degree = numpy.ones(n+2, dtype="int64")
for i in sequence:
degree[i] += 1
for i in sequence:
for j in numpy.arange(n+2):
if degree[j] == 1:
sources.append(i)
targets.append(j)
degree[i] -= 1
degree[j] -= 1
break
u, v = numpy.flatnonzero(degree == 1)
sources.append(u)
targets.append(v)
return numpy.column_stack((sources, targets))
def barabasi_albert_graph(n=30, m=2, seed=1234):
"""Generate a graph using the preferential attachment model of Barabasi and Albert.
"""
if m < 1 or m >= n:
raise ValueError("m must be in the range [1, n].") # pragma: no cover
generator = numpy.random.RandomState(seed=seed)
sources = []
targets = []
new_source = m
new_targets = numpy.arange(m)
repeated_nodes = numpy.array([], dtype="int64")
while new_source < n:
for new_target in new_targets:
sources.append(new_source)
targets.append(new_target)
repeated_nodes = numpy.append(repeated_nodes, new_targets)
repeated_nodes = numpy.append(repeated_nodes, numpy.repeat(new_source, m))
new_targets = generator.choice(repeated_nodes, size=m)
new_source += 1
return numpy.column_stack((sources, targets))
| 30.11828 | 97 | 0.604784 |
acf7c069711815ae1fecb27a864960caea977226 | 234 | py | Python | CS 495 - Spatiotemporal Databases (IS)/queries/pointInPolygon/mapper.py | kevin-411/SIUE-Projects | 92f386abc1b44762d6b146a71a393447234d6cf2 | [
"MIT"
] | 1 | 2021-02-11T09:14:28.000Z | 2021-02-11T09:14:28.000Z | CS 495 - Spatiotemporal Databases (IS)/queries/pointInPolygon/mapper.py | kevin-411/SIUE-Projects | 92f386abc1b44762d6b146a71a393447234d6cf2 | [
"MIT"
] | null | null | null | CS 495 - Spatiotemporal Databases (IS)/queries/pointInPolygon/mapper.py | kevin-411/SIUE-Projects | 92f386abc1b44762d6b146a71a393447234d6cf2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
'''
Created on November 16, 2013
@author: Brian
'''
import sys
import raincloudregion
mcrc = raincloudregion.translateMovingRegion(sys.stdin)
word = mcrc.color
word = word.strip()
print '%s\t%s' % (word, 1)
| 13.764706 | 55 | 0.709402 |
acf7c0d6a405a1de834bb7a232be1fe5051a1f4e | 422 | py | Python | savoten/repository/json_reader_writer.py | JunYamaguchi/savoten | 0dc0c54af409bb6b72bf1985c38614212b3955f4 | [
"MIT"
] | null | null | null | savoten/repository/json_reader_writer.py | JunYamaguchi/savoten | 0dc0c54af409bb6b72bf1985c38614212b3955f4 | [
"MIT"
] | 57 | 2018-04-30T05:59:43.000Z | 2019-12-08T12:16:35.000Z | savoten/repository/json_reader_writer.py | JunYamaguchi/savoten | 0dc0c54af409bb6b72bf1985c38614212b3955f4 | [
"MIT"
] | 1 | 2019-11-03T15:11:05.000Z | 2019-11-03T15:11:05.000Z | import json
import filelock
def lock(filename):
return filelock.FileLock(filename + ".lock", timeout=1)
def unlock(lock):
if isinstance(lock, filelock.FileLock):
lock.release()
else:
raise TypeError
def json_write(filename, data):
with open(filename, "w") as f:
json.dump(data, f)
def json_read(filename):
with open(filename, "r") as f:
return json.load(f)
| 14.551724 | 59 | 0.635071 |
acf7c123ce1cf1429750fe4efe07e3ca5225783b | 2,407 | py | Python | tests/test_postgres.py | dongweiming/tortoise-orm | 468444af1414472c08984a596ec83ce5d8806e45 | [
"Apache-2.0"
] | null | null | null | tests/test_postgres.py | dongweiming/tortoise-orm | 468444af1414472c08984a596ec83ce5d8806e45 | [
"Apache-2.0"
] | null | null | null | tests/test_postgres.py | dongweiming/tortoise-orm | 468444af1414472c08984a596ec83ce5d8806e45 | [
"Apache-2.0"
] | 1 | 2019-05-10T16:22:38.000Z | 2019-05-10T16:22:38.000Z | """
Test some postgres-specific features
"""
import ssl
from tests.testmodels import Tournament
from tortoise import Tortoise
from tortoise.contrib import test
from tortoise.exceptions import OperationalError
class TestTwoDatabases(test.SimpleTestCase):
async def setUp(self):
if Tortoise._inited:
await self._tearDownDB()
self.db_config = test.getDBConfig(app_label="models", modules=["tests.testmodels"])
if self.db_config["connections"]["models"]["engine"] != "tortoise.backends.asyncpg":
raise test.SkipTest("PostgreSQL only")
async def tearDown(self) -> None:
if Tortoise._inited:
await Tortoise._drop_databases()
async def test_schema(self):
from asyncpg.exceptions import InvalidSchemaNameError
self.db_config["connections"]["models"]["credentials"]["schema"] = "mytestschema"
await Tortoise.init(self.db_config, _create_db=True)
with self.assertRaises(InvalidSchemaNameError):
await Tortoise.generate_schemas()
conn = Tortoise.get_connection("models")
await conn.execute_script("CREATE SCHEMA mytestschema;")
await Tortoise.generate_schemas()
tournament = await Tournament.create(name="Test")
await Tortoise.close_connections()
del self.db_config["connections"]["models"]["credentials"]["schema"]
await Tortoise.init(self.db_config)
with self.assertRaises(OperationalError):
await Tournament.filter(name="Test").first()
conn = Tortoise.get_connection("models")
res = await conn.execute_query(
"SELECT id, name FROM mytestschema.tournament WHERE name='Test' LIMIT 1"
)
self.assertEqual(len(res), 1)
self.assertEqual(tournament.id, res[0][0])
self.assertEqual(tournament.name, res[0][1])
async def test_ssl_true(self):
self.db_config["connections"]["models"]["credentials"]["ssl"] = True
with self.assertRaises(ConnectionError):
await Tortoise.init(self.db_config)
async def test_ssl_custom(self):
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
self.db_config["connections"]["models"]["credentials"]["ssl"] = ctx
with self.assertRaises(ConnectionError):
await Tortoise.init(self.db_config)
| 35.397059 | 92 | 0.673868 |
acf7c16d0a6b871c4bf280aa716faacb149cd6ae | 11,669 | py | Python | mmdet3d/models/roi_heads/point_rcnn_roi_head.py | ammaryasirnaich/mmdetection3d | 5e549546abbb2a7b43aab59e40e87599f61dcc4a | [
"Apache-2.0"
] | null | null | null | mmdet3d/models/roi_heads/point_rcnn_roi_head.py | ammaryasirnaich/mmdetection3d | 5e549546abbb2a7b43aab59e40e87599f61dcc4a | [
"Apache-2.0"
] | null | null | null | mmdet3d/models/roi_heads/point_rcnn_roi_head.py | ammaryasirnaich/mmdetection3d | 5e549546abbb2a7b43aab59e40e87599f61dcc4a | [
"Apache-2.0"
] | null | null | null | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from torch.nn import functional as F
from mmdet3d.core import AssignResult
from mmdet3d.core.bbox import bbox3d2result, bbox3d2roi
from mmdet.core import build_assigner, build_sampler
from ..builder import HEADS, build_head, build_roi_extractor
from .base_3droi_head import Base3DRoIHead
@HEADS.register_module()
class PointRCNNRoIHead(Base3DRoIHead):
"""RoI head for PointRCNN.
Args:
bbox_head (dict): Config of bbox_head.
point_roi_extractor (dict): Config of RoI extractor.
train_cfg (dict): Train configs.
test_cfg (dict): Test configs.
depth_normalizer (float, optional): Normalize depth feature.
Defaults to 70.0.
init_cfg (dict, optional): Config of initialization. Defaults to None.
"""
def __init__(self,
bbox_head,
point_roi_extractor,
train_cfg,
test_cfg,
depth_normalizer=70.0,
pretrained=None,
init_cfg=None):
super(PointRCNNRoIHead, self).__init__(
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg)
self.depth_normalizer = depth_normalizer
if point_roi_extractor is not None:
self.point_roi_extractor = build_roi_extractor(point_roi_extractor)
self.init_assigner_sampler()
def init_bbox_head(self, bbox_head):
"""Initialize box head.
Args:
bbox_head (dict): Config dict of RoI Head.
"""
self.bbox_head = build_head(bbox_head)
def init_mask_head(self):
"""Initialize maek head."""
pass
def init_assigner_sampler(self):
"""Initialize assigner and sampler."""
self.bbox_assigner = None
self.bbox_sampler = None
if self.train_cfg:
if isinstance(self.train_cfg.assigner, dict):
self.bbox_assigner = build_assigner(self.train_cfg.assigner)
elif isinstance(self.train_cfg.assigner, list):
self.bbox_assigner = [
build_assigner(res) for res in self.train_cfg.assigner
]
self.bbox_sampler = build_sampler(self.train_cfg.sampler)
def forward_train(self, feats_dict, input_metas, proposal_list,
gt_bboxes_3d, gt_labels_3d):
"""Training forward function of PointRCNNRoIHead.
Args:
feats_dict (dict): Contains features from the first stage.
imput_metas (list[dict]): Meta info of each input.
proposal_list (list[dict]): Proposal information from rpn.
The dictionary should contain the following keys:
- boxes_3d (:obj:`BaseInstance3DBoxes`): Proposal bboxes
- labels_3d (torch.Tensor): Labels of proposals
gt_bboxes_3d (list[:obj:`BaseInstance3DBoxes`]):
GT bboxes of each sample. The bboxes are encapsulated
by 3D box structures.
gt_labels_3d (list[LongTensor]): GT labels of each sample.
Returns:
dict: Losses from RoI RCNN head.
- loss_bbox (torch.Tensor): Loss of bboxes
"""
features = feats_dict['features']
points = feats_dict['points']
point_cls_preds = feats_dict['points_cls_preds']
sem_scores = point_cls_preds.sigmoid()
point_scores = sem_scores.max(-1)[0]
sample_results = self._assign_and_sample(proposal_list, gt_bboxes_3d,
gt_labels_3d)
# concat the depth, semantic features and backbone features
features = features.transpose(1, 2).contiguous()
point_depths = points.norm(dim=2) / self.depth_normalizer - 0.5
features_list = [
point_scores.unsqueeze(2),
point_depths.unsqueeze(2), features
]
features = torch.cat(features_list, dim=2)
bbox_results = self._bbox_forward_train(features, points,
sample_results)
losses = dict()
losses.update(bbox_results['loss_bbox'])
return losses
def simple_test(self, feats_dict, img_metas, proposal_list, **kwargs):
"""Simple testing forward function of PointRCNNRoIHead.
Note:
This function assumes that the batch size is 1
Args:
feats_dict (dict): Contains features from the first stage.
img_metas (list[dict]): Meta info of each image.
proposal_list (list[dict]): Proposal information from rpn.
Returns:
dict: Bbox results of one frame.
"""
rois = bbox3d2roi([res['boxes_3d'].tensor for res in proposal_list])
labels_3d = [res['labels_3d'] for res in proposal_list]
features = feats_dict['features']
points = feats_dict['points']
point_cls_preds = feats_dict['points_cls_preds']
sem_scores = point_cls_preds.sigmoid()
point_scores = sem_scores.max(-1)[0]
features = features.transpose(1, 2).contiguous()
point_depths = points.norm(dim=2) / self.depth_normalizer - 0.5
features_list = [
point_scores.unsqueeze(2),
point_depths.unsqueeze(2), features
]
features = torch.cat(features_list, dim=2)
batch_size = features.shape[0]
bbox_results = self._bbox_forward(features, points, batch_size, rois)
object_score = bbox_results['cls_score'].sigmoid()
bbox_list = self.bbox_head.get_bboxes(
rois,
object_score,
bbox_results['bbox_pred'],
labels_3d,
img_metas,
cfg=self.test_cfg)
bbox_results = [
bbox3d2result(bboxes, scores, labels)
for bboxes, scores, labels in bbox_list
]
return bbox_results
def _bbox_forward_train(self, features, points, sampling_results):
"""Forward training function of roi_extractor and bbox_head.
Args:
features (torch.Tensor): Backbone features with depth and \
semantic features.
points (torch.Tensor): Pointcloud.
sampling_results (:obj:`SamplingResult`): Sampled results used
for training.
Returns:
dict: Forward results including losses and predictions.
"""
rois = bbox3d2roi([res.bboxes for res in sampling_results])
batch_size = features.shape[0]
bbox_results = self._bbox_forward(features, points, batch_size, rois)
bbox_targets = self.bbox_head.get_targets(sampling_results,
self.train_cfg)
loss_bbox = self.bbox_head.loss(bbox_results['cls_score'],
bbox_results['bbox_pred'], rois,
*bbox_targets)
bbox_results.update(loss_bbox=loss_bbox)
return bbox_results
def _bbox_forward(self, features, points, batch_size, rois):
"""Forward function of roi_extractor and bbox_head used in both
training and testing.
Args:
features (torch.Tensor): Backbone features with depth and
semantic features.
points (torch.Tensor): Pointcloud.
batch_size (int): Batch size.
rois (torch.Tensor): RoI boxes.
Returns:
dict: Contains predictions of bbox_head and
features of roi_extractor.
"""
pooled_point_feats = self.point_roi_extractor(features, points,
batch_size, rois)
cls_score, bbox_pred = self.bbox_head(pooled_point_feats)
bbox_results = dict(cls_score=cls_score, bbox_pred=bbox_pred)
return bbox_results
def _assign_and_sample(self, proposal_list, gt_bboxes_3d, gt_labels_3d):
"""Assign and sample proposals for training.
Args:
proposal_list (list[dict]): Proposals produced by RPN.
gt_bboxes_3d (list[:obj:`BaseInstance3DBoxes`]): Ground truth
boxes.
gt_labels_3d (list[torch.Tensor]): Ground truth labels
Returns:
list[:obj:`SamplingResult`]: Sampled results of each training
sample.
"""
sampling_results = []
# bbox assign
for batch_idx in range(len(proposal_list)):
cur_proposal_list = proposal_list[batch_idx]
cur_boxes = cur_proposal_list['boxes_3d']
cur_labels_3d = cur_proposal_list['labels_3d']
cur_gt_bboxes = gt_bboxes_3d[batch_idx].to(cur_boxes.device)
cur_gt_labels = gt_labels_3d[batch_idx]
batch_num_gts = 0
# 0 is bg
batch_gt_indis = cur_gt_labels.new_full((len(cur_boxes), ), 0)
batch_max_overlaps = cur_boxes.tensor.new_zeros(len(cur_boxes))
# -1 is bg
batch_gt_labels = cur_gt_labels.new_full((len(cur_boxes), ), -1)
# each class may have its own assigner
if isinstance(self.bbox_assigner, list):
for i, assigner in enumerate(self.bbox_assigner):
gt_per_cls = (cur_gt_labels == i)
pred_per_cls = (cur_labels_3d == i)
cur_assign_res = assigner.assign(
cur_boxes.tensor[pred_per_cls],
cur_gt_bboxes.tensor[gt_per_cls],
gt_labels=cur_gt_labels[gt_per_cls])
# gather assign_results in different class into one result
batch_num_gts += cur_assign_res.num_gts
# gt inds (1-based)
gt_inds_arange_pad = gt_per_cls.nonzero(
as_tuple=False).view(-1) + 1
# pad 0 for indice unassigned
gt_inds_arange_pad = F.pad(
gt_inds_arange_pad, (1, 0), mode='constant', value=0)
# pad -1 for indice ignore
gt_inds_arange_pad = F.pad(
gt_inds_arange_pad, (1, 0), mode='constant', value=-1)
# convert to 0~gt_num+2 for indices
gt_inds_arange_pad += 1
# now 0 is bg, >1 is fg in batch_gt_indis
batch_gt_indis[pred_per_cls] = gt_inds_arange_pad[
cur_assign_res.gt_inds + 1] - 1
batch_max_overlaps[
pred_per_cls] = cur_assign_res.max_overlaps
batch_gt_labels[pred_per_cls] = cur_assign_res.labels
assign_result = AssignResult(batch_num_gts, batch_gt_indis,
batch_max_overlaps,
batch_gt_labels)
else: # for single class
assign_result = self.bbox_assigner.assign(
cur_boxes.tensor,
cur_gt_bboxes.tensor,
gt_labels=cur_gt_labels)
# sample boxes
sampling_result = self.bbox_sampler.sample(assign_result,
cur_boxes.tensor,
cur_gt_bboxes.tensor,
cur_gt_labels)
sampling_results.append(sampling_result)
return sampling_results
| 40.658537 | 79 | 0.581884 |
acf7c1f2115b904950cb808e51602410d78f5385 | 809 | py | Python | src/globus_sdk/services/transfer/errors.py | sirosen/globus-sdk-python | 0d4e420f52329ab8f993bfe6f86729fb1ef07570 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/globus_sdk/services/transfer/errors.py | sirosen/globus-sdk-python | 0d4e420f52329ab8f993bfe6f86729fb1ef07570 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/globus_sdk/services/transfer/errors.py | sirosen/globus-sdk-python | 0d4e420f52329ab8f993bfe6f86729fb1ef07570 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | from typing import Any, List
import requests
from globus_sdk import exc
class TransferAPIError(exc.GlobusAPIError):
"""
Error class for the Transfer API client. In addition to the
inherited ``code`` and ``message`` instance variables, provides ``request_id``.
:ivar request_id: Unique identifier for the request, which should be
provided when contacting support@globus.org.
"""
def __init__(self, r: requests.Response) -> None:
self.request_id = None
super().__init__(r)
def _get_args(self) -> List[Any]:
args = super()._get_args()
args.append(self.request_id)
return args
def _load_from_json(self, data: dict) -> None:
super()._load_from_json(data)
self.request_id = data.get("request_id")
| 27.896552 | 83 | 0.65513 |
acf7c1fa64eb0aec4ccf75e15fc3c72f63d1f56c | 1,190 | py | Python | case/test_case1.py | imzengyang/appiumexample | 5bb0898f831103d5564ec4766141d41f99eb5712 | [
"MIT"
] | null | null | null | case/test_case1.py | imzengyang/appiumexample | 5bb0898f831103d5564ec4766141d41f99eb5712 | [
"MIT"
] | null | null | null | case/test_case1.py | imzengyang/appiumexample | 5bb0898f831103d5564ec4766141d41f99eb5712 | [
"MIT"
] | null | null | null | #coding=utf-8
import sys
sys.path.append("E:\codes\AppiumPython")
import unittest
from util.server import Server
import multiprocessing
import time
from business.login_business import LoginBusiness
from util.write_user_command import WriteUserCommand
from base.base_driver import BaseDriver
from util.tools import Tools
tool = Tools()
rootpath = tool.getRootPath()
class CaseTest(unittest.TestCase):
def __init__(self, methodName='runTest', param=None):
super(CaseTest, self).__init__(methodName)
global parames
parames = param
print(parames)
@classmethod
def setUpClass(cls):
print( "setUpclass---->",parames)
base_driver = BaseDriver()
cls.driver = base_driver.android_driver(parames)
cls.login_business = LoginBusiness(cls.driver,parames)
def setUp(self):
print ("this is setup\n")
def test_01(self):
self.login_business.login_token()
def tearDown(self):
#截屏操作
time.sleep(1)
print( "this is teardown\n")
if sys.exc_info()[0]:
self.login_business.login_handle.login_page.driver.save_screenshot( rootpath+"/jpg/test01.png")
@classmethod
def tearDownClass(cls):
time.sleep(1)
print ("this is class teardown\n")
# cls.driver.quit()
| 22.037037 | 98 | 0.754622 |
acf7c257d80bd807e683a531402728c3fd9547f4 | 1,569 | py | Python | tools/c7n_azure/tests/test_container_service.py | anastasiia-zolochevska/cloud-custodian | f25315a01bec808c16ab0e2d433d6151cf5769e4 | [
"Apache-2.0"
] | 2 | 2020-01-20T19:46:28.000Z | 2020-08-19T14:20:27.000Z | tools/c7n_azure/tests/test_container_service.py | anastasiia-zolochevska/cloud-custodian | f25315a01bec808c16ab0e2d433d6151cf5769e4 | [
"Apache-2.0"
] | 1 | 2017-12-13T14:05:24.000Z | 2017-12-13T14:05:24.000Z | tools/c7n_azure/tests/test_container_service.py | anastasiia-zolochevska/cloud-custodian | f25315a01bec808c16ab0e2d433d6151cf5769e4 | [
"Apache-2.0"
] | 1 | 2016-08-10T20:17:17.000Z | 2016-08-10T20:17:17.000Z | # Copyright 2015-2018 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
from azure_common import BaseTest
class ContainerServiceTest(BaseTest):
def setUp(self):
super(ContainerServiceTest, self).setUp()
def test_container_service_schema_validate(self):
with self.sign_out_patch():
p = self.load_policy({
'name': 'test-azure-container-service',
'resource': 'azure.containerservice'
}, validate=True)
self.assertTrue(p)
def test_find_by_name(self):
p = self.load_policy({
'name': 'test-azure-containerservice',
'resource': 'azure.containerservice',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'eq',
'value_type': 'normalize',
'value': 'cctestacs'}],
})
resources = p.run()
self.assertEqual(len(resources), 1)
| 35.659091 | 82 | 0.639261 |
acf7c4dee63129c680bfc0016b4e1c16b3d38268 | 1,640 | py | Python | src/posts/migrations/0001_initial.py | adityashetty0302/blog-app-django-drf | 6e8f67c14c275951771055eda7417da6def3c7bb | [
"MIT"
] | 1 | 2021-09-29T16:21:07.000Z | 2021-09-29T16:21:07.000Z | src/posts/migrations/0001_initial.py | adityashetty0302/blog-app-django-drf | 6e8f67c14c275951771055eda7417da6def3c7bb | [
"MIT"
] | 9 | 2020-02-12T03:20:36.000Z | 2021-09-20T17:32:42.000Z | src/posts/migrations/0001_initial.py | adityashetty0302/blog-app-django-drf | 6e8f67c14c275951771055eda7417da6def3c7bb | [
"MIT"
] | 1 | 2020-03-16T14:31:10.000Z | 2020-03-16T14:31:10.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2019-03-18 06:30
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import posts.models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=120)),
('slug', models.SlugField(unique=True)),
('image', models.ImageField(blank=True, height_field='height_field', null=True, upload_to=posts.models.upload_location, width_field='width_field')),
('height_field', models.IntegerField(default=0)),
('width_field', models.IntegerField(default=0)),
('content', models.TextField()),
('draft', models.BooleanField(default=False)),
('publish', models.DateField()),
('read_time', models.IntegerField(default=0)),
('updated', models.DateTimeField(auto_now=True)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('user', models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-timestamp', '-updated'],
},
),
]
| 39.047619 | 164 | 0.604268 |
acf7c5751f88a5673a09130e8deb0bdaa1085110 | 13,067 | py | Python | application.py | ademola25/catalog | eb95bc9277566e49e659e221735bf6ba9c3aff8e | [
"MIT"
] | null | null | null | application.py | ademola25/catalog | eb95bc9277566e49e659e221735bf6ba9c3aff8e | [
"MIT"
] | null | null | null | application.py | ademola25/catalog | eb95bc9277566e49e659e221735bf6ba9c3aff8e | [
"MIT"
] | null | null | null | #!/usr/bin/python
from flask import Flask, render_template, request, redirect
from flask import jsonify, url_for, flash, g
from sqlalchemy import create_engine, asc
from sqlalchemy.orm import sessionmaker
from functools import wraps
from database_setup import Category, Base, SpaItem, User
from flask import session as login_session
import random
import string
from oauth2client.client import flow_from_clientsecrets
from oauth2client.client import FlowExchangeError
import httplib2
import json
from flask import make_response
import requests
app = Flask(__name__)
CLIENT_ID = json.loads(
open('client_secrets.json', 'r').read())['web']['client_id']
APPLICATION_NAME = "Category in Spa Application"
# Connecting to the Database and create database session
engine = create_engine('sqlite:///spa_category.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
# Create anti-forgery state token
@app.route('/login')
def showLogin():
state = ''.join(random.choice(string.ascii_uppercase + string.digits)
for x in xrange(32))
login_session['state'] = state
# return "The current session state is %s" % login_session['state']
return render_template('login.html', STATE=state)
# conecting through google plus
@app.route('/gconnect', methods=['POST'])
def gconnect():
# Validate state token
if request.args.get('state') != login_session['state']:
response = make_response(json.dumps('Invalid state parameter.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Obtain authorization code
code = request.data
try:
# Upgrade the authorization code into a credentials object
oauth_flow = flow_from_clientsecrets('client_secrets.json', scope='')
oauth_flow.redirect_uri = 'postmessage'
credentials = oauth_flow.step2_exchange(code)
except FlowExchangeError:
response = make_response(
json.dumps('Failed to upgrade the authorization code.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Check that the access token is valid.
access_token = credentials.access_token
url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=%s'
% access_token)
h = httplib2.Http()
result = json.loads(h.request(url, 'GET')[1])
# If there was an error in the access token info, abort.
if result.get('error') is not None:
response = make_response(json.dumps(result.get('error')), 500)
response.headers['Content-Type'] = 'application/json'
return response
# Verify that the access token is used for the intended user.
gplus_id = credentials.id_token['sub']
if result['user_id'] != gplus_id:
response = make_response(
json.dumps("Token's user ID doesn't match given user ID."), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Verify that the access token is valid for this app.
if result['issued_to'] != CLIENT_ID:
response = make_response(
json.dumps("Token's client ID does not match app's."), 401)
print "Token's client ID does not match app's."
response.headers['Content-Type'] = 'application/json'
return response
stored_access_token = login_session.get('access_token')
stored_gplus_id = login_session.get('gplus_id')
if stored_access_token is not None and gplus_id == stored_gplus_id:
response = make_response(
json.dumps('Current user is already connected.'), 200)
response.headers['Content-Type'] = 'application/json'
return response
# Store the access token in the session for later use.
login_session['access_token'] = credentials.access_token
login_session['gplus_id'] = gplus_id
# Get user info
userinfo_url = "https://www.googleapis.com/oauth2/v1/userinfo"
params = {'access_token': credentials.access_token, 'alt': 'json'}
answer = requests.get(userinfo_url, params=params)
data = answer.json()
login_session['username'] = data['name']
login_session['picture'] = data['picture']
login_session['email'] = data['email']
# ADD PROVIDER TO LOGIN SESSION
login_session['provider'] = 'google'
# see if user exists, if it doesn't make a new one
user_id = getUserID(data["email"])
if not user_id:
user_id = createUser(login_session)
login_session['user_id'] = user_id
output = ''
output += '<h1>Welcome, '
output += login_session['username']
output += '!</h1>'
output += '<img src="'
output += login_session['picture']
output += ' " style = "width: 300px; height: 300px;border-radius: \
150px;-webkit-border-radius: 150px;-moz-border-radius: 150px;"> '
flash("you are now logged in as %s" % login_session['username'])
print "done!"
return output
# User Helper Functions
def createUser(login_session):
newUser = User(name=login_session['username'], email=login_session[
'email'], picture=login_session['picture'])
session.add(newUser)
session.commit()
user = session.query(User).filter_by(email=login_session['email']).one()
return user.id
def getUserInfo(user_id):
user = session.query(User).filter_by(id=user_id).one()
return user
def getUserID(email):
try:
user = session.query(User).filter_by(email=email).one()
return user.id
except:
return None
# DISCONNECT - Revoke a current user's token and reset their login_session
@app.route('/gdisconnect')
def gdisconnect():
# Only disconnect a connected user.
access_token = login_session.get('access_token')
if access_token is None:
response = make_response(
json.dumps('Current user not connected.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' % access_token
h = httplib2.Http()
result = h.request(url, 'GET')[0]
if result['status'] == '200':
response = make_response(json.dumps('Successfully disconnected.'), 200)
response.headers['Content-Type'] = 'application/json'
return response
else:
response = make_response(json.dumps('Failed to revoke token for given \
user.', 400))
response.headers['Content-Type'] = 'application/json'
return response
# Login required
def login_required(f):
@wraps(f)
def y(*args, **kwargs):
if 'user_id' not in login_session:
return redirect(url_for('showLogin'))
return f(*args, **kwargs)
return y
# JSON APIs to view Spa Category Information
@app.route('/spacategory/<int:categories_id>/spaitem/JSON')
def spaCategoryJSON(categories_id):
categories = session.query(Category).filter_by(id=categories_id).one()
items = session.query(SpaItem).filter_by(
categories_id=categories_id).all()
return jsonify(SpaItems=[i.serialize for i in items])
@app.route('/spacategory/<int:categories_id>/spaitem/<int:spa_item_id>/JSON')
def spaItemJSON(categories_id, spa_item_id):
Spa_Item = session.query(SpaItem).filter_by(id=spa_item_id).one()
return jsonify(Spa_Item=Spa_Item.serialize)
@app.route('/spacategory/JSON')
def categoriesJSON():
categories = session.query(Category).all()
return jsonify(categories=[r.serialize for r in categories])
# Show all spa categories
@app.route('/')
@app.route('/spacategory/')
def showCategory():
categories = session.query(Category).all()
items = session.query(SpaItem).order_by(SpaItem.id.desc())
if 'username' not in login_session:
return render_template(
'publicspacategory.html',
categories=categories, items=items)
else:
return render_template(
'spacategory.html',
categories=categories,
items=items)
# Show spa items
@app.route('/spacategory/<int:categories_id>/')
@app.route('/spacategory/<int:categories_id>/spaitem/')
def showSpaItem(categories_id):
categories = session.query(Category).filter_by(id=categories_id).one()
category = session.query(Category).all()
creator = getUserInfo(categories.user_id)
items = session.query(SpaItem).filter_by(
categories_id=categories.id).order_by(SpaItem.id.desc())
return render_template(
'publicspaitem.html',
categories=categories, items=items,
category=category, creator=creator)
# READ- specifying some item from the spa items
@app.route('/spacategories/<int:categories_id>/spaitem/<int:spa_item_id>/')
def showItemDetails(categories_id, spa_item_id):
"""returns category item"""
categories = session.query(Category).filter_by(id=categories_id).one()
items = session.query(SpaItem).filter_by(id=spa_item_id).one()
creator = getUserInfo(categories.user_id)
if 'username' not in login_session:
return render_template(
'public_itemdetails.html',
categories=categories,
items=items, creator=creator)
else:
return render_template(
'item_details.html',
categories=categories,
items=items, creator=creator)
# create new spa item
@app.route('/spacategory/spaitem/new', methods=['GET', 'POST'])
@login_required
def newSpaItem():
if request.method == 'POST':
newItem = SpaItem(
name=request.form['name'],
description=request.form['description'],
categories_id=request.form['categories_id'],
user_id=login_session['user_id'])
session.add(newItem)
session.commit()
flash('New Spa %s Item Successfully Created' % (newItem.name))
return redirect(url_for('showCategory'))
else:
return render_template('newspaitem.html')
# Edit a spa item
@app.route(
'/spacategory/<int:categories_id>/spaitem/<int:spa_item_id>/edit',
methods=['GET', 'POST'])
@login_required
def editSpaItem(categories_id, spa_item_id):
editedItem = session.query(SpaItem).filter_by(id=spa_item_id).one()
categories = session.query(Category).all()
if editedItem.user_id != login_session['user_id']:
return "<script>function myFunction() {alert('You are not authorized"\
"to edit this item. Please create your own item in order to edit.');"\
"window.location = '/';}</script><body onload='myFunction()''>"
if request.method == 'POST':
if request.form['name']:
editedItem.name = request.form['name']
if request.form['description']:
editedItem.description = request.form['description']
session.add(editedItem)
session.commit()
flash('Spa Item Successfully Updated')
return redirect(url_for(
'showSpaItem', categories_id=categories_id,
spa_item_id=spa_item_id))
else:
return render_template(
'editspaitem.html', item=editedItem,
categories_id=categories_id,
spa_item_id=spa_item_id,
categories=categories)
# Delete a spa item
@app.route(
'/spacategory/<int:categories_id>/spaitem/<int:spa_item_id>/delete',
methods=['GET', 'POST'])
@login_required
def deleteSpaItem(categories_id, spa_item_id):
categories = session.query(Category).filter_by(id=categories_id).one()
itemToDelete = session.query(SpaItem).filter_by(id=spa_item_id).one()
if itemToDelete.user_id != login_session['user_id']:
return "<script>function myFunction() {alert('You are not authorized "\
"to delete this item. Please create your own item in order to delete"\
" .');window.location = '/';}</script><body onload='myFunction()''>"
if request.method == 'POST':
session.delete(itemToDelete)
session.commit()
flash('Spa Item Successfully Deleted')
return redirect(url_for('showCategory', categories_id=categories_id))
else:
return render_template(
'deleteSpaitem.html', categories_id=categories_id,
spa_item_id=spa_item_id, item=itemToDelete)
# Disconnect from Google
@app.route('/disconnect')
def disconnect():
if 'provider' in login_session:
if login_session['provider'] == 'google':
gdisconnect()
del login_session['gplus_id']
del login_session['access_token']
del login_session['username']
del login_session['email']
del login_session['picture']
del login_session['user_id']
del login_session['provider']
flash("Successfully logged out.")
return redirect(url_for('showCategory'))
else:
flash("You were not logged in")
return redirect(url_for('showCategory'))
if __name__ == '__main__':
app.secret_key = 'super_secret_key'
app.debug = True
app.run(host='0.0.0.0', port=8000)
| 35.997245 | 79 | 0.668784 |
acf7c5b90b1a761bbf684b68f6c13147e9b7f06c | 1,662 | py | Python | CTFd/users.py | amanbansal2709/ctfd | 941335a5e205ca818ce1758076858b628e4fa05b | [
"Apache-2.0"
] | null | null | null | CTFd/users.py | amanbansal2709/ctfd | 941335a5e205ca818ce1758076858b628e4fa05b | [
"Apache-2.0"
] | null | null | null | CTFd/users.py | amanbansal2709/ctfd | 941335a5e205ca818ce1758076858b628e4fa05b | [
"Apache-2.0"
] | 1 | 2021-12-23T14:11:15.000Z | 2021-12-23T14:11:15.000Z | from flask import request, render_template, Blueprint
from CTFd.models import Users
from CTFd.utils.decorators import authed_only
from CTFd.utils import config
from CTFd.utils.user import get_current_user
from CTFd.utils.decorators.visibility import check_account_visibility, check_score_visibility
users = Blueprint('users', __name__)
@users.route('/users')
@check_account_visibility
def listing():
page = abs(request.args.get('page', 1, type=int))
results_per_page = 50
page_start = results_per_page * (page - 1)
page_end = results_per_page * (page - 1) + results_per_page
count = Users.query.filter_by(banned=False, hidden=False).count()
users = Users.query.filter_by(banned=False, hidden=False).slice(page_start, page_end).all()
pages = int(count / results_per_page) + (count % results_per_page > 0)
return render_template(
'users/users.html',
users=users,
pages=pages,
curr_page=page
)
@users.route('/profile')
@users.route('/user')
@authed_only
def private():
user = get_current_user()
solves = user.get_solves()
awards = user.get_awards()
place = user.place
score = user.score
return render_template(
'users/private.html',
solves=solves,
awards=awards,
user=user,
score=score,
place=place,
score_frozen=config.is_scoreboard_frozen()
)
@users.route('/users/<int:user_id>')
@check_account_visibility
@check_score_visibility
def public(user_id):
user = Users.query.filter_by(id=user_id, banned=False, hidden=False).first_or_404()
return render_template('users/public.html', user=user)
| 27.245902 | 95 | 0.703369 |
acf7c78f1d6a390f1386e0770acaf9a90f360d94 | 4,550 | py | Python | utils.py | jacksky64/ML-BoneSuppression | 4257d009c3c334b75e082da3b5a8f53b0b82ba7d | [
"MIT"
] | null | null | null | utils.py | jacksky64/ML-BoneSuppression | 4257d009c3c334b75e082da3b5a8f53b0b82ba7d | [
"MIT"
] | null | null | null | utils.py | jacksky64/ML-BoneSuppression | 4257d009c3c334b75e082da3b5a8f53b0b82ba7d | [
"MIT"
] | null | null | null | import numpy as np
import tensorflow as tf
import os
import cv2
from PIL import Image, ImageOps
import random
import sys
from sklearn.utils import shuffle
def crop_to_square(image, upsampling):
"""
Crop image to square
"""
if image.shape[0] == image.shape[1]:
return image
if upsampling:
img = Image.fromarray(image)
target_side = max(img.size)
horizontal_padding = (target_side - img.size[0]) / 2
vertical_padding = (target_side - img.size[1]) / 2
start = [-horizontal_padding, -vertical_padding]
width = img.size[0] + horizontal_padding
height = img.size[1] + vertical_padding
else:
target_side = min(image.shape)
horizontal_padding = int((image.shape[0] - target_side) / 2)
vertical_padding = int((image.shape[1] - target_side) / 2)
start = [horizontal_padding, vertical_padding]
width = image.shape[0] - horizontal_padding
height = image.shape[1] - vertical_padding
return image[start[0]:width, start[1]:height]
img = img.crop((start[0], start[1], width, height))
return np.array(img)
def extract_n_preprocess_dicom(path, size):
"""
Extract DICOM image from path with preprocessing to size
"""
ds = extract_image(path)
ds = crop_to_square(ds, upsampling=True)
# ds = imresize(ds, (size,size), "lanczos")
ds = cv2.resize(ds, (size,size), interpolation = cv2.INTER_LANCZOS4 )
return ds
def extract_image(path):
"""
Extract DICOM image from path
"""
# ds = cv2.imread(path)
# ds = cv2.cvtColor(ds, cv2.COLOR_BGR2GRAY)
ds = cv2.imread(path,cv2.IMREAD_ANYDEPTH|cv2.IMREAD_GRAYSCALE)
return ds
def augment_image_pair(image1, image2, size, output_path1, output_path2):
"""
Augment image pair
"""
image1 = Image.fromarray(image1).convert('L')
image2 = Image.fromarray(image2).convert('L')
offset = random.randint(0, 100)
rotate = random.randint(-30,30)
min_val = random.randint(0, offset+1)
# Flip
if random.randint(1,3) % 2 == 0:
image1 = image1.transpose(Image.FLIP_LEFT_RIGHT)
image2 = image2.transpose(Image.FLIP_LEFT_RIGHT)
# Add offset
image1 = ImageOps.expand(image1, offset)
image2 = ImageOps.expand(image2, offset)
# Rotate
image1 = image1.rotate(rotate)
image2 = image2.rotate(rotate)
# Crop
image1 = image1.crop((min_val, min_val, min_val+size, min_val+size))
image2 = image2.crop((min_val, min_val, min_val+size, min_val+size))
# Save
image1.save(output_path1)
image2.save(output_path2)
def extract_images(paths):
"""
Extract images from paths
"""
images = []
for path in paths:
ds = extract_image(path)
images.append(ds)
return images
def check_and_create_dir(dir_path):
"""
Check and create directory path
"""
if not os.path.isdir(dir_path):
os.makedirs(dir_path)
def extract_image_path(folders, extension="png"):
"""
Extract image paths with extension from folders
"""
images = []
for folder in folders:
for dirName, subdirList, fileList in os.walk(folder):
for filename in fileList:
if "." + extension in filename.lower():
images.append(os.path.join(dirName,filename))
return images
def extract_n_normalize_image(path):
"""
Extract DICOM image from path
"""
ds = extract_image(path)
biggest = np.amax(ds)
return ds.astype(float)/biggest
def get_batch(batch_size, size, x_filenames, y_filenames):
X, y = shuffle(x_filenames, y_filenames)
X = X[:batch_size]
y = y[:batch_size]
X_images = []
y_images = []
for i in range(len(X)):
X_images.append(extract_n_normalize_image(X[i]))
y_images.append(extract_n_normalize_image(y[i]))
X_images = np.reshape(np.array(X_images), (batch_size, size, size, 1))
y_images = np.reshape(np.array(y_images), (batch_size, size, size, 1))
return (X_images, y_images)
def print_train_steps(current_step, total_steps):
point = int(current_step / (total_steps * 0.05))
sys.stdout.write("\r[" + "=" * point + " " * (20 - point) + "] ---- Step {}/{} ----- ".format(current_step, total_steps) + str(int(float(current_step) * 100 / total_steps)) + "%")
sys.stdout.flush()
def variable_to_cv2_image(varim):
"""
Converts array to an OpenCV image
Args:
varim: image array
"""
res = (varim*32767.).clip(0, 32767).astype(np.uint16)
return res | 30.536913 | 185 | 0.647033 |
acf7c7c4e37cebdc6e859f71614319718004532a | 1,131 | py | Python | test/python/test_logreg.py | tomMoral/BlitzL1 | e2ba9b4873b321dcfed4abb50e204706ad15ed00 | [
"MIT"
] | null | null | null | test/python/test_logreg.py | tomMoral/BlitzL1 | e2ba9b4873b321dcfed4abb50e204706ad15ed00 | [
"MIT"
] | null | null | null | test/python/test_logreg.py | tomMoral/BlitzL1 | e2ba9b4873b321dcfed4abb50e204706ad15ed00 | [
"MIT"
] | null | null | null | import os
import sys
import numpy as np
from scipy import sparse
import blitzl1
def approx_equal(x, y):
if abs(x - y) < 1e-5:
return True
return False
def test_SmallLogReg():
blitzl1.set_use_intercept(False)
blitzl1.set_tolerance(0.0)
blitzl1.set_verbose(False)
A = np.arange(20).reshape(5, 4)
b = np.array([1, -1, -1, 1, 1])
A = sparse.csc_matrix(A)
prob = blitzl1.LogRegProblem(A, b)
sol = prob.solve(2)
if not approx_equal(sol.objective_value, 3.312655451335882):
print "test SmallLogReg obj failed"
if not approx_equal(sol.x[0], 0.0520996109147):
print "test SmallLogReg x[0] failed"
python_obj = sol.evaluate_loss(A, b) + 2 * np.linalg.norm(sol.x, ord=1)
if not approx_equal(sol.objective_value, python_obj):
print "test SmallLogReg python_obj failed"
blitzl1.set_use_intercept(True)
blitzl1.set_tolerance(0.0001)
sol = prob.solve(1.5)
blitzl1.set_tolerance(0.01)
sol2 = prob.solve(1.5, initial_x=sol.x, initial_intercept=sol.intercept)
if sol2._num_iterations != 1:
print "test SmallLogReg initial conditions failed"
def main():
test_SmallLogReg()
main()
| 25.704545 | 74 | 0.71618 |
acf7c84929ac7e554a11b6eae3f16576e93adb3f | 3,707 | py | Python | qiskit/backends/aer/statevector_simulator.py | dsh0416/qiskit-terra | 5a1879f3784be4eb5d87b0fb0ef9230ca7a01b65 | [
"Apache-2.0"
] | null | null | null | qiskit/backends/aer/statevector_simulator.py | dsh0416/qiskit-terra | 5a1879f3784be4eb5d87b0fb0ef9230ca7a01b65 | [
"Apache-2.0"
] | null | null | null | qiskit/backends/aer/statevector_simulator.py | dsh0416/qiskit-terra | 5a1879f3784be4eb5d87b0fb0ef9230ca7a01b65 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2017, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
# pylint: disable=invalid-name
"""
Interface to C++ quantum circuit simulator with realistic noise.
"""
import logging
import uuid
from qiskit.qobj import QobjInstruction
from .qasm_simulator import QasmSimulator
from ._simulatorerror import SimulatorError
from .aerjob import AerJob
logger = logging.getLogger(__name__)
class StatevectorSimulator(QasmSimulator):
"""C++ statevector simulator"""
DEFAULT_CONFIGURATION = {
'name': 'statevector_simulator',
'url': 'https://github.com/QISKit/qiskit-terra/src/qasm-simulator-cpp',
'simulator': True,
'local': True,
'description': 'A C++ statevector simulator for qobj files',
'coupling_map': 'all-to-all',
'basis_gates': 'u1,u2,u3,cx,cz,id,x,y,z,h,s,sdg,t,tdg,rzz,load,save,snapshot'
}
def __init__(self, configuration=None, provider=None):
super().__init__(configuration=configuration or self.DEFAULT_CONFIGURATION.copy(),
provider=provider)
def run(self, qobj):
"""Run a qobj on the the backend."""
job_id = str(uuid.uuid4())
aer_job = AerJob(self, job_id, self._run_job, qobj)
aer_job.submit()
return aer_job
def _run_job(self, job_id, qobj):
"""Run a Qobj on the backend."""
self._validate(qobj)
final_state_key = 32767 # Internal key for final state snapshot
# Add final snapshots to circuits
for experiment in qobj.experiments:
experiment.instructions.append(
QobjInstruction(name='snapshot', params=[final_state_key])
)
result = super()._run_job(job_id, qobj)
# Replace backend name with current backend
result.backend_name = self.name
# Extract final state snapshot and move to 'statevector' data field
for experiment_result in result.results.values():
snapshots = experiment_result.snapshots
if str(final_state_key) in snapshots:
final_state_key = str(final_state_key)
# Pop off final snapshot added above
final_state = snapshots.pop(final_state_key, None)
final_state = final_state['statevector'][0]
# Add final state to results data
experiment_result.data['statevector'] = final_state
# Remove snapshot dict if empty
if snapshots == {}:
experiment_result.data.pop('snapshots', None)
return result
def _validate(self, qobj):
"""Semantic validations of the qobj which cannot be done via schemas.
Some of these may later move to backend schemas.
1. No shots
2. No measurements in the middle
"""
if qobj.config.shots != 1:
logger.info("statevector simulator only supports 1 shot. "
"Setting shots=1.")
qobj.config.shots = 1
for experiment in qobj.experiments:
if getattr(experiment.config, 'shots', 1) != 1:
logger.info("statevector simulator only supports 1 shot. "
"Setting shots=1 for circuit %s.", experiment.name)
experiment.config.shots = 1
for op in experiment.instructions:
if op.name in ['measure', 'reset']:
raise SimulatorError(
"In circuit {}: statevector simulator does not support "
"measure or reset.".format(experiment.header.name))
| 38.216495 | 90 | 0.621797 |
acf7c896a8e3a51779faded9e25ff5f7b6e04d84 | 1,189 | py | Python | Auctions/first_price_generic.py | kubkon/Phd-python | 5dccd6a107204a3b196e42205e691025539311aa | [
"MIT",
"Unlicense"
] | null | null | null | Auctions/first_price_generic.py | kubkon/Phd-python | 5dccd6a107204a3b196e42205e691025539311aa | [
"MIT",
"Unlicense"
] | null | null | null | Auctions/first_price_generic.py | kubkon/Phd-python | 5dccd6a107204a3b196e42205e691025539311aa | [
"MIT",
"Unlicense"
] | null | null | null | #!/usr/bin/env python2.7
# encoding: utf-8
"""
first_price_generic.py
Created by Jakub Konka on 2011-03-04.
Copyright (c) 2011 Strathclyde Uni. All rights reserved.
"""
import sys
import os
import numpy as np
import scipy.integrate as integrate
import matplotlib.pyplot as plt
def main():
iterations = 100
N = 2
avg_revenue = []
revenue = []
for i in range(iterations):
bids = map(lambda x: x*(N-1)/N, (np.random.uniform(0,1) for n in range(N)))
bids.sort()
revenue.append(bids[N-1])
avg_revenue.append(np.average(revenue))
# Plot & save the figures
# Figure 1: Pseudo-random draws vs. Average revenue
plt.figure()
r1 = plt.plot(range(iterations), avg_revenue, 'ro')
theory = integrate.quad(lambda t,n=N: n*(n-1)*t*(1-t)*t**(n-2), 0, 1)
r2 = plt.axhline(theory[0], ls='dotted')
plt.title('First-price sealed-bid IPV with N={0} bidders\n(valuations drawn from uniform distribution)'.format(N))
plt.xlabel('Number of iterations')
plt.ylabel('Average revenue of the seller')
plt.legend((r1, r2), ('Numerical results', 'Theoretical prediction'), 'upper right')
plt.savefig('fpa_avg_revenue_'+ str(N) +'.pdf')
plt.close('all')
if __name__ == '__main__':
main()
| 27.022727 | 115 | 0.696384 |
acf7c906416499251d800cb26a3ead1ab6e20784 | 57,965 | py | Python | subversion/tests/cmdline/tree_conflict_tests.py | markphip/subversion | b68ad49667ccd4a3fd3083c24909e6fcca4b8348 | [
"Apache-2.0"
] | null | null | null | subversion/tests/cmdline/tree_conflict_tests.py | markphip/subversion | b68ad49667ccd4a3fd3083c24909e6fcca4b8348 | [
"Apache-2.0"
] | 1 | 2016-09-14T18:22:43.000Z | 2016-09-14T18:22:43.000Z | subversion/tests/cmdline/tree_conflict_tests.py | markphip/subversion | b68ad49667ccd4a3fd3083c24909e6fcca4b8348 | [
"Apache-2.0"
] | 1 | 2020-11-04T07:25:49.000Z | 2020-11-04T07:25:49.000Z | #!/usr/bin/env python
#
# tree_conflict_tests.py: testing tree-conflict cases.
#
# Subversion is a tool for revision control.
# See http://subversion.apache.org for more information.
#
# ====================================================================
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
######################################################################
# General modules
import sys, re, os, stat, traceback
# Our testing module
import svntest
from svntest import main, wc, verify
from svntest.actions import run_and_verify_svn
from svntest.actions import run_and_verify_commit
from svntest.actions import run_and_verify_resolved
from svntest.actions import run_and_verify_update
from svntest.actions import run_and_verify_status
from svntest.actions import run_and_verify_info
from svntest.actions import get_virginal_state
import shutil
import logging
# (abbreviation)
Skip = svntest.testcase.Skip_deco
SkipUnless = svntest.testcase.SkipUnless_deco
XFail = svntest.testcase.XFail_deco
Issues = svntest.testcase.Issues_deco
Issue = svntest.testcase.Issue_deco
Wimp = svntest.testcase.Wimp_deco
Item = svntest.wc.StateItem
AnyOutput = svntest.verify.AnyOutput
RegexOutput = svntest.verify.RegexOutput
RegexListOutput = svntest.verify.RegexListOutput
UnorderedOutput = svntest.verify.UnorderedOutput
AlternateOutput = svntest.verify.AlternateOutput
logger = logging.getLogger()
######################################################################
# Tests
#
# Each test must return on success or raise on failure.
#----------------------------------------------------------------------
# The tests in this file are for cases where a tree conflict is to be raised.
# (They do not check that conflicts are not raised in other cases.)
# Note: Delete, Replace and Move are presently tested together but probably
# will eventually need to be tested separately.
# A tree conflict being raised means:
# - the conflict is reported initially
# - the conflict is persistently visible
# - the conflict blocks commits until resolved
# - the conflict blocks (some?) further merges
# Desired:
# - interactive conflict resolution
# A "tree conflict on file P/F" means:
# - the operation reports action code "C" on path P/F
# - "svn status" reports status code "C" on path P/F
# - "svn info" reports details of the conflict on path P/F
# - "svn commit" fails if the user-requested targets include path P/F
# - "svn merge/update/switch" fails if it tries to modify P/F in any way
# A "tree conflict on dir P/D" means:
# - the operation reports action code "C" on path P/D
# - "svn status" reports status code "C" on path P/D
# - "svn info" reports details of the conflict on P/D
# - "svn commit" fails if it includes any part of the P/D sub-tree
# - "svn merge/up/sw" fails if it modifies any part of the P/D sub-tree
#----------------------------------------------------------------------
# Two sets of paths. The paths to be used for the destination of a copy
# or move must differ between the incoming change and the local mods,
# otherwise scenarios involving a move onto a move would conflict on the
# destination node as well as on the source, and we only want to be testing
# one thing at a time in most tests.
def incoming_paths(root_dir, parent_dir):
"""Create a set of paths in which the victims of tree conflicts are
children of PARENT_DIR. ROOT_DIR should be a shallower directory
in which items "F1" and "D1" can pre-exist and be shared across
multiple parent dirs."""
return {
'F1' : os.path.join(root_dir, "F1"),
'F' : os.path.join(parent_dir, "F"),
'F2' : os.path.join(parent_dir, "F2-in"),
'F3' : os.path.join(root_dir, "F3"),
'D1' : os.path.join(root_dir, "D1"),
'D' : os.path.join(parent_dir, "D"),
'D2' : os.path.join(parent_dir, "D2-in"),
}
def localmod_paths(root_dir, parent_dir):
"""Create a set of paths in which the victims of tree conflicts are
children of PARENT_DIR. ROOT_DIR should be a shallower directory
in which items "F1" and "D1" can pre-exist and be shared across
multiple parent dirs."""
return {
'F1' : os.path.join(root_dir, "F1"),
'F' : os.path.join(parent_dir, "F"),
'F2' : os.path.join(parent_dir, "F2-local"),
'F3' : os.path.join(root_dir, "F3"),
'D1' : os.path.join(root_dir, "D1"),
'D' : os.path.join(parent_dir, "D"),
'D2' : os.path.join(parent_dir, "D2-local"),
}
# Perform the action MODACTION on the WC items given by PATHS. The
# available actions can be seen within this function.
def modify(modaction, paths, is_init=True):
F1 = paths['F1'] # existing file to copy from
F3 = paths['F3'] # existing file to copy from
F = paths['F'] # target file
F2 = paths['F2'] # non-existing file to copy/move to
D1 = paths['D1'] # existing dir to copy from
D = paths['D'] # target dir
D2 = paths['D2'] # non-existing dir to copy/move to
# print " Mod: '" + modaction + "' '" + P + "'"
if modaction == 'ft': # file text-mod
assert os.path.exists(F)
main.file_append(F, "This is a text-mod of file F.\n")
elif modaction == 'fP': # file Prop-mod
assert os.path.exists(F)
main.run_svn(None, 'pset', 'fprop1', 'A prop set on file F.', F)
elif modaction == 'dP': # dir Prop-mod
assert os.path.exists(D)
main.run_svn(None, 'pset', 'dprop1', 'A prop set on dir D.', D)
elif modaction == 'fD': # file Delete
assert os.path.exists(F)
main.run_svn(None, 'del', F)
elif modaction == 'dD': # dir Delete
assert os.path.exists(D)
main.run_svn(None, 'del', D)
elif modaction == 'fA': # file Add (new)
assert os.path.exists(F)
main.run_svn(None, 'add', F)
main.run_svn(None, 'pset', 'fprop2', 'A prop of added file F.', F)
elif modaction == 'dA': # dir Add (new)
assert os.path.exists(D)
main.run_svn(None, 'add', D)
main.run_svn(None, 'pset', 'dprop2', 'A prop of added dir D.', D)
elif modaction == 'fC': # file Copy (from F1)
if is_init:
main.run_svn(None, 'copy', F1, F)
else:
main.run_svn(None, 'copy', F3, F)
elif modaction == 'dC': # dir Copy (from D1)
main.run_svn(None, 'copy', D1, D)
elif modaction == 'fM': # file Move (to F2)
main.run_svn(None, 'rename', F, F2)
elif modaction == 'dM': # dir Move (to D2)
main.run_svn(None, 'rename', D, D2)
elif modaction == 'fa': # file add (new) on disk
assert not os.path.exists(F)
main.file_write(F, "This is file F.\n")
elif modaction == 'da': # dir add (new) on disk
assert not os.path.exists(D)
os.mkdir(D)
elif modaction == 'fd': # file delete from disk
assert os.path.exists(F)
os.remove(F)
elif modaction == 'dd': # dir delete from disk
assert os.path.exists(D)
os.remove(D)
else:
raise Exception("unknown modaction: '" + modaction + "'")
#----------------------------------------------------------------------
# Lists of change scenarios
#
# Each scenario expresses a change in terms of the client commands
# (including "move") that create that change. The change may exist in a
# repository, or may be applied to a WC by an "update" or "switch" or
# "merge", or may exist in a WC as a local modification.
#
# In addition, each scenario may include some local-modification actions
# that, if performed on the WC after this change, will make the disk state
# incompatible with the version-controlled state - e.g. by deleting a file
# that metadata says is present or vice-versa.
# File names:
# F1 = any existing file
# F3 = any existing file
# F = the file-path being acted on
# F2 = any non-existent file-path
# D1 = any existing dir
# D = the dir-path being acted on
# D2 = any non-existent dir-path
# P = the parent dir of F and of D
# Format of a change scenario:
# (
# list of actions to create the file/directory to be changed later,
# list of actions to make the change
# )
# Action lists to initialise the repository with a file or directory absent
# or present, to provide the starting point from which we perform the changes
# that are to be tested.
absent_f = []
absent_d = []
create_f = ['fa','fA']
create_d = ['da','dA']
# Scenarios that start with no existing versioned item
#
# CREATE:
# file-add(F) = add-new(F) or copy(F1,F)(and modify?)
# dir-add(D) = add-new(D)(deep?) or copy(D1,D)(and modify?)
f_adds = [
#( absent_f, ['fa','fA'] ), ### local add-without-history: not a tree conflict
( absent_f, ['fC'] ),
( absent_f, ['fC','ft'] ), ### Fails because update seems to assume that the
### local file is unmodified (same as issue 1736?).
#( absent_f, ['fC','fP'] ), # don't test all combinations, just because it's slow
]
d_adds = [
#( absent_d, ['da','dA'] ), ### local add-without-history: not a tree conflict
( absent_d, ['dC'] ),
#( absent_d, ['dC','dP'] ), # not yet
]
# Scenarios that start with an existing versioned item
#
# GO-AWAY: node is no longer at the path where it was.
# file-del(F) = del(F)
# file-move(F) = move(F,F2)
# dir-del(D) = del(D) or move(D,D2)
# Note: file-move(F) does not conflict with incoming edit
#
# REPLACE: node is no longer at the path where it was, but another node is.
# file-rpl(F) = file-del(F) + file-add(F)
# dir-rpl(D) = dir-del(D) + dir-add(D)
# Note: Schedule replace-by-different-node-type is unsupported in WC.
#
# MODIFY:
# file-mod(F) = text-mod(F) and/or prop-mod(F)
# dir-mod(D) = prop-mod(D) and/or file-mod(child-F) and/or dir-mod(child-D)
f_dels = [
( create_f, ['fD'] ),
]
f_moves = [
( create_f, ['fM'] ),
]
d_dels = [
( create_d, ['dD'] ),
]
d_moves = [
( create_d, ['dM'] ),
]
f_rpls = [
# Don't test all possible combinations, just because it's slow
( create_f, ['fD','fa','fA'] ),
( create_f, ['fM','fC'] ),
]
d_rpls = [
# We're not testing directory replacements yet.
# Don't test all possible combinations, just because it's slow
#( create_d, ['dD','dA'] ),
#( create_d, ['dM','dC'] ),
# Note that directory replacement differs from file replacement: the
# schedule-delete dir is still on disk and is re-used for the re-addition.
]
f_rpl_d = [
# File replaced by directory: not yet testable
]
d_rpl_f = [
# Directory replaced by file: not yet testable
]
f_mods = [
( create_f, ['ft'] ),
( create_f, ['fP'] ),
#( create_f, ['ft','fP'] ), # don't test all combinations, just because it's slow
]
d_mods = [
( create_d, ['dP'] ),
# These test actions for operating on a child of the directory are not yet implemented:
#( create_d, ['f_fA'] ),
#( create_d, ['f_ft'] ),
#( create_d, ['f_fP'] ),
#( create_d, ['f_fD'] ),
#( create_d, ['d_dP'] ),
#( create_d, ['d_f_fA'] ),
]
#----------------------------------------------------------------------
# Set up all of the given SCENARIOS in their respective unique paths.
# This means committing their initialisation actions in r2, and then
# committing their change actions in r3 (assuming the repos was at r1).
# (See also the somewhat related svntest.actions.build_greek_tree_conflicts()
# and tree-conflicts tests using deep_trees in various other .py files.)
# SCENARIOS is a list of scenario tuples: (init_actions, change_actions).
# WC_DIR is a local path of an existing WC.
# BR_DIR is a nonexistent path within WC_DIR.
# BR_DIR and any necessary parent directories will be created, and then the
# scenario will be set up within it, and committed to the repository.
def set_up_repos(wc_dir, br_dir, scenarios):
if not os.path.exists(br_dir):
main.run_svn(None, "mkdir", "--parents", br_dir)
# create the file F1 and dir D1 which the tests regard as pre-existing
paths = incoming_paths(wc_dir, wc_dir) # second arg is bogus but unimportant
F1 = paths['F1'] # existing file to copy from
F3 = paths['F3'] # existing file to copy from
main.file_write(F1, "This is initially file F1.\n")
main.file_write(F3, "This is initially file F3.\n")
main.run_svn(None, 'add', F1, F3)
D1 = paths['D1'] # existing dir to copy from
main.run_svn(None, 'mkdir', D1)
# create the initial parent dirs, and each file or dir unless to-be-added
for init_mods, action_mods in scenarios:
path = "_".join(action_mods)
P = os.path.join(br_dir, path) # parent of items to be tested
main.run_svn(None, 'mkdir', '--parents', P)
for modaction in init_mods:
modify(modaction, incoming_paths(wc_dir, P))
run_and_verify_svn(AnyOutput, [],
'commit', '-m', 'Initial set-up.', wc_dir)
# Capture the revision number
init_rev = 2 ### hard-coded
# modify all files and dirs in their various ways
for _path, action_mods in scenarios:
path = "_".join(action_mods)
P = os.path.join(br_dir, path) # parent
for modaction in action_mods:
modify(modaction, incoming_paths(wc_dir, P))
# commit all the modifications
run_and_verify_svn(AnyOutput, [],
'commit', '-m', 'Action.', wc_dir)
# Capture the revision number
changed_rev = 3 ### hard-coded
return (init_rev, changed_rev)
#----------------------------------------------------------------------
# Apply each of the changes in INCOMING_SCENARIOS to each of the local
# modifications in LOCALMOD_SCENARIOS.
# Ensure that the result in each case includes a tree conflict on the parent.
# OPERATION = 'update' or 'switch' or 'merge'
# If COMMIT_LOCAL_MODS is true, the LOCALMOD_SCENARIOS will be committed to
# the target branch before applying the INCOMING_SCENARIOS.
def ensure_tree_conflict(sbox, operation,
incoming_scenarios, localmod_scenarios,
commit_local_mods=False):
sbox.build()
wc_dir = sbox.wc_dir
def url_of(repo_relative_path):
return sbox.repo_url + '/' + repo_relative_path
logger.debug("")
logger.debug("=== Starting a set of '" + operation + "' tests.")
# Path to source branch, relative to wc_dir.
# Source is where the "incoming" mods are made.
source_br = "branch1"
logger.debug("--- Creating changes in repos")
source_wc_dir = os.path.join(wc_dir, source_br)
source_left_rev, source_right_rev = set_up_repos(wc_dir, source_wc_dir,
incoming_scenarios)
head_rev = source_right_rev ### assumption
# Local mods are the outer loop because cleaning up the WC is slow
# ('svn revert' isn't sufficient because it leaves unversioned files)
for _loc_init_mods, loc_action in localmod_scenarios:
# Determine the branch (directory) in which local mods will be made.
if operation == 'update':
# Path to target branch (where conflicts are raised), relative to wc_dir.
target_br = source_br
target_start_rev = source_left_rev
else: # switch/merge
# Make, and work in, a "branch2" that is a copy of "branch1".
target_br = "branch2"
run_and_verify_svn(AnyOutput, [],
'copy', '-r', str(source_left_rev), url_of(source_br),
url_of(target_br),
'-m', 'Create target branch.')
head_rev += 1
target_start_rev = head_rev
main.run_svn(None, 'checkout', '-r', str(target_start_rev), sbox.repo_url,
wc_dir)
saved_cwd = os.getcwd()
os.chdir(wc_dir)
for _inc_init_mods, inc_action in incoming_scenarios:
scen_name = "_".join(inc_action)
source_url = url_of(source_br + '/' + scen_name)
target_path = os.path.join(target_br, scen_name)
logger.debug("=== " + str(inc_action) + " onto " + str(loc_action))
logger.debug("--- Making local mods")
for modaction in loc_action:
modify(modaction, localmod_paths(".", target_path), is_init=False)
if commit_local_mods:
run_and_verify_svn(AnyOutput, [],
'commit', target_path,
'-m', 'Mods in target branch.')
head_rev += 1
# For update, verify the pre-condition that WC is out of date.
# For switch/merge, there is no such precondition.
if operation == 'update':
logger.debug("--- Trying to commit (expecting 'out-of-date' error)")
run_and_verify_commit(".", None, None, ".*Commit failed.*",
target_path)
if modaction.startswith('f'):
victim_name = 'F'
else:
victim_name = 'D'
victim_path = os.path.join(target_path, victim_name)
# Perform the operation that tries to apply incoming changes to the WC.
# The command is expected to do something (and give some output),
# and it should raise a conflict but not an error.
expected_stdout = svntest.verify.ExpectedOutput(" C " + victim_path
+ "\n",
match_all=False)
# Do the main action
if operation == 'update':
logger.debug("--- Updating")
run_and_verify_svn(expected_stdout, [],
'update', target_path)
elif operation == 'switch':
logger.debug("--- Switching")
run_and_verify_svn(expected_stdout, [],
'switch', source_url, target_path)
elif operation == 'merge':
logger.debug("--- Merging")
run_and_verify_svn(expected_stdout, [],
'merge',
'--allow-mixed-revisions',
'-r', str(source_left_rev) + ':' + str(source_right_rev),
source_url, target_path)
else:
raise Exception("unknown operation: '" + operation + "'")
logger.debug("--- Checking that 'info' reports the conflict")
if operation == 'update' or operation == 'switch':
incoming_left_rev = target_start_rev
else:
incoming_left_rev = source_left_rev
if operation == 'update' or operation == 'merge':
incoming_right_rev = source_right_rev
else:
incoming_right_rev = head_rev
expected_info = { 'Tree conflict' : '.* upon ' + operation +
r'.* \((none|(file|dir).*' +
re.escape(victim_name + '@' + str(incoming_left_rev)) + r')' +
r'.* \((none|(file|dir).*' +
re.escape(victim_name + '@' + str(incoming_right_rev)) + r')' }
run_and_verify_info([expected_info], victim_path)
logger.debug("--- Trying to commit (expecting 'conflict' error)")
### run_and_verify_commit() requires an "output_tree" argument, but
# here we get away with passing None because we know an implementation
# detail: namely that it's not going to look at that argument if it
# gets the stderr that we're expecting.
run_and_verify_commit(".", None, None, ".*conflict.*", victim_path)
logger.debug("--- Checking that 'status' reports the conflict")
expected_stdout = AlternateOutput([
RegexListOutput([
"^......C.* " + re.escape(victim_path) + "$",
"^ > .* upon " + operation] +
svntest.main.summary_of_conflicts(tree_conflicts=1)),
RegexListOutput([
"^......C.* " + re.escape(victim_path) + "$",
"^ > moved to .*",
"^ > .* upon " + operation] +
svntest.main.summary_of_conflicts(tree_conflicts=1))
])
run_and_verify_svn(expected_stdout, [],
'status', victim_path)
logger.debug("--- Resolving the conflict")
# Make sure resolving the parent does nothing.
run_and_verify_resolved([], os.path.dirname(victim_path))
# The real resolved call.
run_and_verify_resolved([victim_path])
logger.debug("--- Checking that 'status' does not report a conflict")
exitcode, stdout, stderr = run_and_verify_svn(None, [],
'status', victim_path)
for line in stdout:
if line[6] == 'C': # and line.endswith(victim_path + '\n'):
raise svntest.Failure("unexpected status C") # on victim_path
# logger.debug("--- Committing (should now succeed)")
# run_and_verify_svn(None, [],
# 'commit', '-m', '', target_path)
# target_start_rev += 1
logger.debug("")
os.chdir(saved_cwd)
# Clean up the target branch and WC
main.run_svn(None, 'revert', '-R', wc_dir)
main.safe_rmtree(wc_dir)
if operation != 'update':
run_and_verify_svn(AnyOutput, [],
'delete', url_of(target_br),
'-m', 'Delete target branch.')
head_rev += 1
#----------------------------------------------------------------------
# Tests for update/switch affecting a file, where the incoming change
# conflicts with a scheduled change in the WC.
#
# WC state: as scheduled (no obstruction)
def up_sw_file_mod_onto_del(sbox):
"up/sw file: modify onto del/rpl"
sbox2 = sbox.clone_dependent()
ensure_tree_conflict(sbox, 'update', f_mods,
f_dels + f_rpls)
ensure_tree_conflict(sbox2, 'switch', f_mods,
f_dels + f_rpls)
# Note: See UC1 in notes/tree-conflicts/use-cases.txt.
def up_sw_file_del_onto_mod(sbox):
"up/sw file: del/rpl/mv onto modify"
# Results: tree-conflict on F
# no other change to WC (except possibly other half of move)
# ### OR (see Nico's email <>):
# schedule-delete but leave F on disk (can only apply with
# text-mod; prop-mod can't be preserved in this way)
sbox2 = sbox.clone_dependent()
ensure_tree_conflict(sbox, 'update', f_dels + f_moves + f_rpls,
f_mods)
ensure_tree_conflict(sbox2, 'switch', f_dels + f_moves + f_rpls,
f_mods)
# Note: See UC2 in notes/tree-conflicts/use-cases.txt.
def up_sw_file_del_onto_del(sbox):
"up/sw file: del/rpl/mv onto del/rpl/mv"
sbox2 = sbox.clone_dependent()
ensure_tree_conflict(sbox, 'update', f_dels + f_moves + f_rpls,
f_dels + f_rpls)
ensure_tree_conflict(sbox2, 'switch', f_dels + f_moves + f_rpls,
f_dels + f_rpls)
# Note: See UC3 in notes/tree-conflicts/use-cases.txt.
def up_sw_file_add_onto_add(sbox):
"up/sw file: add onto add"
sbox2 = sbox.clone_dependent()
ensure_tree_conflict(sbox, 'update', f_adds, f_adds)
ensure_tree_conflict(sbox2, 'switch', f_adds, f_adds)
#----------------------------------------------------------------------
# Tests for update/switch affecting a dir, where the incoming change
# conflicts with a scheduled change in the WC.
def up_sw_dir_mod_onto_del(sbox):
"up/sw dir: modify onto del/rpl/mv"
# WC state: any (D necessarily exists; children may have any state)
sbox2 = sbox.clone_dependent()
ensure_tree_conflict(sbox, 'update', d_mods,
d_dels + d_rpls)
ensure_tree_conflict(sbox2, 'switch', d_mods,
d_dels + d_rpls)
def up_sw_dir_del_onto_mod(sbox):
"up/sw dir: del/rpl/mv onto modify"
# WC state: any (D necessarily exists; children may have any state)
sbox2 = sbox.clone_dependent()
ensure_tree_conflict(sbox, 'update', d_dels + d_moves + d_rpls,
d_mods)
ensure_tree_conflict(sbox2, 'switch', d_dels + d_moves + d_rpls,
d_mods)
def up_sw_dir_del_onto_del(sbox):
"up/sw dir: del/rpl/mv onto del/rpl/mv"
# WC state: any (D necessarily exists; children may have any state)
sbox2 = sbox.clone_dependent()
ensure_tree_conflict(sbox, 'update', d_dels + d_moves + d_rpls,
d_dels + d_rpls)
ensure_tree_conflict(sbox2, 'switch', d_dels + d_moves + d_rpls,
d_dels + d_rpls)
# This is currently set as XFail over ra_dav because it hits
# issue #3314 'DAV can overwrite directories during copy'
#
# TRUNK@35827.DBG>svn st -v branch1
# 2 2 jrandom branch1
# 2 2 jrandom branch1\dC
# A + - 2 jrandom branch1\dC\D
#
# TRUNK@35827.DBG>svn log -r2:HEAD branch1 -v
# ------------------------------------------------------------------------
# r2 | jrandom | 2009-02-12 09:26:52 -0500 (Thu, 12 Feb 2009) | 1 line
# Changed paths:
# A /D1
# A /F1
# A /branch1
# A /branch1/dC
#
# Initial set-up.
# ------------------------------------------------------------------------
# r3 | jrandom | 2009-02-12 09:26:52 -0500 (Thu, 12 Feb 2009) | 1 line
# Changed paths:
# A /branch1/dC/D (from /D1:2)
#
# Action.
# ------------------------------------------------------------------------
#
# TRUNK@35827.DBG>svn ci -m "Should be ood" branch1
# Adding branch1\dC\D
#
# Committed revision 4.
@Issue(3314)
def up_sw_dir_add_onto_add(sbox):
"up/sw dir: add onto add"
# WC state: as scheduled (no obstruction)
sbox2 = sbox.clone_dependent()
ensure_tree_conflict(sbox, 'update', d_adds, d_adds)
ensure_tree_conflict(sbox2, 'switch', d_adds, d_adds)
#----------------------------------------------------------------------
# Tests for merge affecting a file, where the incoming change
# conflicts with the target.
def merge_file_mod_onto_not_file(sbox):
"merge file: modify onto not-file"
sbox2 = sbox.clone_dependent()
# Test merges where the "local mods" are committed to the target branch.
ensure_tree_conflict(sbox, 'merge', f_mods, f_dels + f_moves + f_rpl_d,
commit_local_mods=True)
# Test merges where the "local mods" are uncommitted mods in the WC.
ensure_tree_conflict(sbox2, 'merge', f_mods, f_dels + f_moves)
# Note: See UC4 in notes/tree-conflicts/use-cases.txt.
def merge_file_del_onto_not_same(sbox):
"merge file: del/rpl/mv onto not-same"
sbox2 = sbox.clone_dependent()
ensure_tree_conflict(sbox, 'merge', f_dels + f_moves + f_rpls, f_mods,
commit_local_mods=True)
ensure_tree_conflict(sbox2, 'merge', f_dels + f_moves + f_rpls, f_mods)
# Note: See UC5 in notes/tree-conflicts/use-cases.txt.
def merge_file_del_onto_not_file(sbox):
"merge file: del/rpl/mv onto not-file"
sbox2 = sbox.clone_dependent()
ensure_tree_conflict(sbox, 'merge', f_dels + f_moves + f_rpls,
f_dels + f_moves + f_rpl_d,
commit_local_mods=True)
ensure_tree_conflict(sbox2, 'merge', f_dels + f_moves + f_rpls,
f_dels + f_moves)
# Note: See UC6 in notes/tree-conflicts/use-cases.txt.
def merge_file_add_onto_not_none(sbox):
"merge file: add onto not-none"
sbox2 = sbox.clone_dependent()
ensure_tree_conflict(sbox, 'merge', f_adds, f_adds,
commit_local_mods=True)
ensure_tree_conflict(sbox2, 'merge', f_adds, f_adds)
# TODO: Also test directory adds at path "F"?
#----------------------------------------------------------------------
# Tests for merge affecting a dir, where the incoming change
# conflicts with the target branch.
def merge_dir_mod_onto_not_dir(sbox):
"merge dir: modify onto not-dir"
sbox2 = sbox.clone_dependent()
ensure_tree_conflict(sbox, 'merge', d_mods, d_dels + d_moves + d_rpl_f,
commit_local_mods=True)
ensure_tree_conflict(sbox2, 'merge', d_mods, d_dels + d_moves)
# Test for issue #3150 'tree conflicts with directories as victims'.
@Issue(3150)
def merge_dir_del_onto_not_same(sbox):
"merge dir: del/rpl/mv onto not-same"
sbox2 = sbox.clone_dependent()
ensure_tree_conflict(sbox, 'merge', d_dels + d_moves + d_rpls, d_mods,
commit_local_mods=True)
ensure_tree_conflict(sbox2, 'merge', d_dels + d_moves + d_rpls, d_mods)
def merge_dir_del_onto_not_dir(sbox):
"merge dir: del/rpl/mv onto not-dir"
sbox2 = sbox.clone_dependent()
ensure_tree_conflict(sbox, 'merge', d_dels + d_moves + d_rpls,
d_dels + d_moves + d_rpl_f,
commit_local_mods=True)
ensure_tree_conflict(sbox2, 'merge', d_dels + d_moves + d_rpls,
d_dels + d_moves)
def merge_dir_add_onto_not_none(sbox):
"merge dir: add onto not-none"
sbox2 = sbox.clone_dependent()
ensure_tree_conflict(sbox, 'merge', d_adds, d_adds,
commit_local_mods=True)
ensure_tree_conflict(sbox2, 'merge', d_adds, d_adds)
# TODO: also try with file adds at path "D"?
#----------------------------------------------------------------------
@Issue(3805)
def force_del_tc_inside(sbox):
"--force del on dir with TCs inside"
# A/C <- delete with --force
# A + C A/C/dir
# A + C A/C/file
sbox.build()
wc_dir = sbox.wc_dir
C = os.path.join(wc_dir, "A", "C")
dir = os.path.join(wc_dir, "A", "C", "dir")
file = os.path.join(wc_dir, "A", "C", "file")
# Add dir
main.run_svn(None, 'mkdir', dir)
# Add file
content = "This is the file 'file'.\n"
main.file_append(file, content)
main.run_svn(None, 'add', file)
main.run_svn(None, 'commit', '-m', 'Add dir and file', wc_dir)
# Remove dir and file in r3.
main.run_svn(None, 'delete', dir, file)
main.run_svn(None, 'commit', '-m', 'Remove dir and file', wc_dir)
# Warp back to -r2, dir and file coming back.
main.run_svn(None, 'update', '-r2', wc_dir)
# Set a meaningless prop on each dir and file
run_and_verify_svn(["property 'propname' set on '" + dir + "'\n"],
[], 'ps', 'propname', 'propval', dir)
run_and_verify_svn(["property 'propname' set on '" + file + "'\n"],
[], 'ps', 'propname', 'propval', file)
# Update WC to HEAD, tree conflicts result dir and file
# because there are local mods on the props.
expected_output = wc.State(wc_dir, {
'A/C/dir' : Item(status=' ', treeconflict='C'),
'A/C/file' : Item(status=' ', treeconflict='C'),
})
expected_disk = main.greek_state.copy()
expected_disk.add({
'A/C/dir' : Item(props={'propname' : 'propval'}),
'A/C/file' : Item(contents=content, props={'propname' : 'propval'}),
})
expected_status = get_virginal_state(wc_dir, 2)
expected_status.tweak(wc_rev='3')
expected_status.add({
'A/C/dir' : Item(status='A ', wc_rev='-', copied='+', treeconflict='C'),
'A/C/file' : Item(status='A ', wc_rev='-', copied='+', treeconflict='C'),
})
run_and_verify_update(wc_dir,
expected_output, expected_disk, expected_status,
check_props=True)
# Delete A/C with --force, in effect disarming the tree-conflicts.
run_and_verify_svn(verify.UnorderedOutput(['D ' + C + '\n',
'D ' + dir + '\n',
'D ' + file + '\n']),
[], 'delete', C, '--force')
# Verify deletion status
# Note: the tree conflicts are removed because we forced the delete.
expected_status.tweak('A/C', status='D ')
expected_status.remove('A/C/dir', 'A/C/file')
run_and_verify_status(wc_dir, expected_status)
# Commit, remove the "disarmed" tree-conflict.
expected_output = wc.State(wc_dir, { 'A/C' : Item(verb='Deleting') })
expected_status.remove('A/C')
run_and_verify_commit(wc_dir,
expected_output, expected_status)
#----------------------------------------------------------------------
@Issue(3805)
def force_del_tc_is_target(sbox):
"--force del on tree-conflicted targets"
# A/C
# A + C A/C/dir <- delete with --force
# A + C A/C/file <- delete with --force
sbox.build()
wc_dir = sbox.wc_dir
C = os.path.join(wc_dir, "A", "C")
dir = os.path.join(wc_dir, "A", "C", "dir")
file = os.path.join(wc_dir, "A", "C", "file")
# Add dir
main.run_svn(None, 'mkdir', dir)
# Add file
content = "This is the file 'file'.\n"
main.file_append(file, content)
main.run_svn(None, 'add', file)
main.run_svn(None, 'commit', '-m', 'Add dir and file', wc_dir)
# Remove dir and file in r3.
main.run_svn(None, 'delete', dir, file)
main.run_svn(None, 'commit', '-m', 'Remove dir and file', wc_dir)
# Warp back to -r2, dir and file coming back.
main.run_svn(None, 'update', '-r2', wc_dir)
# Set a meaningless prop on each dir and file
run_and_verify_svn(["property 'propname' set on '" + dir + "'\n"],
[], 'ps', 'propname', 'propval', dir)
run_and_verify_svn(["property 'propname' set on '" + file + "'\n"],
[], 'ps', 'propname', 'propval', file)
# Update WC to HEAD, tree conflicts result dir and file
# because there are local mods on the props.
expected_output = wc.State(wc_dir, {
'A/C/dir' : Item(status=' ', treeconflict='C'),
'A/C/file' : Item(status=' ', treeconflict='C'),
})
expected_disk = main.greek_state.copy()
expected_disk.add({
'A/C/dir' : Item(props={'propname' : 'propval'}),
'A/C/file' : Item(contents=content, props={'propname' : 'propval'}),
})
expected_status = get_virginal_state(wc_dir, 2)
expected_status.tweak(wc_rev='3')
expected_status.add({
'A/C/dir' : Item(status='A ', wc_rev='-', copied='+', treeconflict='C'),
'A/C/file' : Item(status='A ', wc_rev='-', copied='+', treeconflict='C'),
})
run_and_verify_update(wc_dir,
expected_output, expected_disk, expected_status,
check_props=True)
# Delete nodes with --force, in effect disarming the tree-conflicts.
run_and_verify_svn(['D ' + dir + '\n',
'D ' + file + '\n'],
[],
'delete', dir, file, '--force')
# The rm --force now removes the nodes and the tree conflicts on them
expected_status.remove('A/C/dir', 'A/C/file')
run_and_verify_status(wc_dir, expected_status)
# Commit, remove the "disarmed" tree-conflict.
expected_output = wc.State(wc_dir, {})
run_and_verify_commit(wc_dir,
expected_output, expected_status)
#----------------------------------------------------------------------
# A regression test to check that "rm --keep-local" on a tree-conflicted
# node leaves the WC in a valid state in which simple commands such as
# "status" do not error out. At one time the command left the WC in an
# invalid state. (Before r989189, "rm --keep-local" used to have the effect
# of "disarming" the conflict in the sense that "commit" would ignore the
# conflict.)
def query_absent_tree_conflicted_dir(sbox):
"query an unversioned tree-conflicted dir"
sbox.build()
wc_dir = sbox.wc_dir
# Some paths we'll care about
C_path = os.path.join(wc_dir, "A", "C")
C_C_path = os.path.join(wc_dir, "A", "C", "C")
# Add a directory A/C/C as r2.
main.run_svn(None, 'mkdir', C_C_path)
main.run_svn(None, 'commit', '-m', 'Add directory A/C/C', wc_dir)
# Remove that directory A/C/C as r3.
main.run_svn(None, 'delete', C_C_path)
main.run_svn(None, 'commit', '-m', 'Remove directory A/C/C', wc_dir)
# Warp back to -r2 with the directory added.
main.run_svn(None, 'update', '-r2', wc_dir)
# Set a meaningless prop on A/C/C
run_and_verify_svn(["property 'propname' set on '" + C_C_path + "'\n"],
[], 'ps', 'propname', 'propval', C_C_path)
# Update WC to HEAD, a tree conflict results on A/C/C because of the
# working prop on A/C/C.
expected_output = wc.State(wc_dir, {
'A/C/C' : Item(status=' ', treeconflict='C'),
})
expected_disk = main.greek_state.copy()
expected_disk.add({'A/C/C' : Item(props={'propname' : 'propval'})})
expected_status = get_virginal_state(wc_dir, 1)
expected_status.tweak(wc_rev='3')
expected_status.add({'A/C/C' : Item(status='A ',
wc_rev='-',
copied='+',
treeconflict='C')})
run_and_verify_update(wc_dir,
expected_output, expected_disk, expected_status,
check_props=True)
# Delete A/C with --keep-local.
run_and_verify_svn(verify.UnorderedOutput(['D ' + C_C_path + '\n',
'D ' + C_path + '\n']),
[],
'delete', C_path, '--keep-local')
expected_status.tweak('A/C', status='D ')
expected_status.remove('A/C/C')
run_and_verify_status(wc_dir, expected_status)
# Try to access the absent tree-conflict as explicit target.
# These used to fail like this:
## CMD: svn status -v -u -q
## [...]
## subversion/svn/status-cmd.c:248: (apr_err=155035)
## subversion/svn/util.c:953: (apr_err=155035)
## subversion/libsvn_client/status.c:270: (apr_err=155035)
## subversion/libsvn_wc/lock.c:607: (apr_err=155035)
## subversion/libsvn_wc/entries.c:1607: (apr_err=155035)
## subversion/libsvn_wc/wc_db.c:3288: (apr_err=155035)
## svn: Expected node '/.../tree_conflict_tests-20/A/C' to be added.
# A/C/C is now unversioned, using status:
expected_output = wc.State(wc_dir, {
})
run_and_verify_status(C_C_path, expected_output)
# using info:
run_and_verify_svn(None, ".*W155010.*The node.*was not found.*",
'info', C_C_path)
#----------------------------------------------------------------------
@Issue(3608)
def up_add_onto_add_revert(sbox):
"issue #3608: reverting an add onto add conflict"
sbox.build()
wc_dir = sbox.wc_dir
wc2_dir = sbox.add_wc_path('wc2')
svntest.actions.run_and_verify_svn(None, [], 'checkout',
sbox.repo_url, wc2_dir)
file1 = os.path.join(wc_dir, 'newfile')
file2 = os.path.join(wc2_dir, 'newfile')
dir1 = os.path.join(wc_dir, 'NewDir')
dir2 = os.path.join(wc2_dir, 'NewDir')
main.run_svn(None, 'cp', os.path.join(wc_dir, 'iota'), file1)
main.run_svn(None, 'cp', os.path.join(wc2_dir, 'iota'), file2)
main.run_svn(None, 'cp', os.path.join(wc_dir, 'A/C'), dir1)
main.run_svn(None, 'cp', os.path.join(wc2_dir, 'A/C'), dir2)
sbox.simple_commit(message='Added file')
expected_disk = main.greek_state.copy()
expected_disk.add({
'newfile' : Item(contents="This is the file 'iota'.\n"),
'NewDir' : Item(),
})
expected_status = get_virginal_state(wc2_dir, 2)
expected_status.add({
'newfile' : Item(status='R ', copied='+', treeconflict='C', wc_rev='-'),
'NewDir' : Item(status='R ', copied='+', treeconflict='C', wc_rev='-'),
})
run_and_verify_update(wc2_dir,
None, expected_disk, expected_status,
check_props=True)
# Currently (r927086), this removes dir2 and file2 in a way that
# they don't reappear after update.
main.run_svn(None, 'revert', file2)
main.run_svn(None, 'revert', dir2)
expected_status = get_virginal_state(wc2_dir, 2)
expected_status.add({
'newfile' : Item(status=' ', wc_rev='2'),
'NewDir' : Item(status=' ', wc_rev='2'),
})
# Expected behavior is that after revert + update the tree matches
# the repository
run_and_verify_update(wc2_dir,
None, expected_disk, expected_status,
check_props=True)
#----------------------------------------------------------------------
# Regression test for issue #3525 and #3533
#
@Issues(3525,3533)
def lock_update_only(sbox):
"lock status update shouldn't flag tree conflict"
sbox.build()
wc_dir = sbox.wc_dir
# Make a second copy of the working copy
wc_b = sbox.add_wc_path('_b')
svntest.actions.duplicate_dir(wc_dir, wc_b)
fname = 'iota'
file_path = os.path.join(sbox.wc_dir, fname)
file_path_b = os.path.join(wc_b, fname)
# Lock a file as wc_author, and schedule the file for deletion.
svntest.actions.run_and_verify_svn(".*locked by user", [], 'lock',
'-m', '', file_path)
svntest.main.run_svn(None, 'delete', file_path)
# In our other working copy, steal that lock.
svntest.actions.run_and_verify_svn(".*locked by user", [], 'lock',
'-m', '', '--force', file_path)
# Now update the first working copy. It should appear as a no-op.
expected_disk = main.greek_state.copy()
expected_disk.remove('iota')
expected_status = get_virginal_state(wc_dir, 1)
expected_status.tweak('iota', status='D ', writelocked='K')
run_and_verify_update(wc_dir,
None, expected_disk, expected_status,
check_props=True)
#----------------------------------------------------------------------
@Issue(3469)
def at_directory_external(sbox):
"tree conflict at directory external"
sbox.build()
wc_dir = sbox.wc_dir
# r2: create a directory external: ^/E -> ^/A
svntest.main.run_svn(None, 'ps', 'svn:externals', '^/A E', wc_dir)
svntest.main.run_svn(None, 'commit', '-m', 'ps', wc_dir)
svntest.main.run_svn(None, 'update', wc_dir)
# r3: modify ^/A/B/E/alpha
open(sbox.ospath('A/B/E/alpha'), 'a').write('This is still A/B/E/alpha.\n')
svntest.main.run_svn(None, 'commit', '-m', 'file mod', wc_dir)
svntest.main.run_svn(None, 'update', wc_dir)
merge_rev = svntest.main.youngest(sbox.repo_dir)
# r4: create ^/A/B/E/alpha2
open(sbox.ospath('A/B/E/alpha2'), 'a').write("This is the file 'alpha2'.\n")
svntest.main.run_svn(None, 'add', sbox.ospath('A/B/E/alpha2'))
svntest.main.run_svn(None, 'commit', '-m', 'file add', wc_dir)
svntest.main.run_svn(None, 'update', wc_dir)
merge_rev2 = svntest.main.youngest(sbox.repo_dir)
# r5: merge those
svntest.main.run_svn(None, "merge", '-c', merge_rev, '^/A/B', wc_dir)
svntest.main.run_svn(None, "merge", '-c', merge_rev2, '^/A/B', wc_dir)
#----------------------------------------------------------------------
@Issue(3779)
### This test currently passes on the current behaviour.
### However in many cases it is unclear whether the current behaviour is
### correct. Review is still required.
def actual_only_node_behaviour(sbox):
"test behaviour with actual-only nodes"
sbox.build()
A_url = sbox.repo_url + '/A'
A_copy_url = sbox.repo_url + '/A_copy'
wc_dir = sbox.wc_dir
foo_path = sbox.ospath('A/foo', wc_dir)
# r2: copy ^/A -> ^/A_copy
sbox.simple_repo_copy('A', 'A_copy')
# r3: add a file foo on ^/A_copy branch
wc2_dir = sbox.add_wc_path('wc2')
foo2_path = sbox.ospath('foo', wc2_dir)
svntest.main.run_svn(None, "checkout", A_copy_url, wc2_dir)
svntest.main.file_write(foo2_path, "This is initially file foo.\n")
svntest.main.run_svn(None, "add", foo2_path)
svntest.main.run_svn(None, "commit", '-m', svntest.main.make_log_msg(),
foo2_path)
# r4: make a change to foo
svntest.main.file_append(foo2_path, "This is a new line in file foo.\n")
svntest.main.run_svn(None, "commit", '-m', svntest.main.make_log_msg(),
wc2_dir)
# cherry-pick r4 to ^/A -- the resulting tree conflict creates
# an actual-only node for 'A/foo'
sbox.simple_update()
svntest.main.run_svn(None, "merge", '-c', '4', A_copy_url,
os.path.join(wc_dir, 'A'))
# Attempt running various commands on foo and verify expected behavior
# add
expected_stdout = None
expected_stderr = ".*foo.*is an existing item in conflict.*"
run_and_verify_svn(expected_stdout, expected_stderr,
"add", foo_path)
# add (with an existing obstruction of foo)
svntest.main.file_write(foo_path, "This is an obstruction of foo.\n")
expected_stdout = None
expected_stderr = ".*foo.*is an existing item in conflict.*"
run_and_verify_svn(expected_stdout, expected_stderr,
"add", foo_path)
os.remove(foo_path) # remove obstruction
# blame (praise, annotate, ann)
expected_stdout = None
expected_stderr = ".*foo.*not found.*"
run_and_verify_svn(expected_stdout, expected_stderr,
"blame", foo_path)
# cat
expected_stdout = None
expected_stderr = ".*foo.*not under version control.*"
run_and_verify_svn(expected_stdout, expected_stderr,
"cat", foo_path)
# cat -rBASE
expected_stdout = None
expected_stderr = ".*foo.*not under version control.*"
run_and_verify_svn(expected_stdout, expected_stderr,
"cat", "-r", "BASE", foo_path)
# changelist (cl)
expected_stdout = None
expected_stderr = ".*svn: warning: W155010: The node '.*foo' was not found."
run_and_verify_svn(expected_stdout, expected_stderr,
"changelist", "my_changelist", foo_path)
# checkout (co)
### this does not error out -- needs review
expected_stdout = None
expected_stderr = []
run_and_verify_svn(expected_stdout, expected_stderr,
"checkout", A_copy_url, foo_path)
### for now, ignore the fact that checkout succeeds and remove the nested
### working copy so we can test more commands
def onerror(function, path, execinfo):
os.chmod(path, stat.S_IREAD | stat.S_IWRITE)
os.remove(path)
shutil.rmtree(foo_path, onerror=onerror)
# cleanup
expected_stdout = None
expected_stderr = ".*foo.*is not a working copy directory"
run_and_verify_svn(expected_stdout, expected_stderr,
"cleanup", foo_path)
# commit (ci)
expected_stdout = None
expected_stderr = ".*foo.*remains in conflict.*"
run_and_verify_svn(expected_stdout, expected_stderr,
"commit", foo_path)
# copy (cp)
expected_stdout = None
expected_stderr = ".*foo.*does not exist.*"
run_and_verify_svn(expected_stdout, expected_stderr,
"copy", foo_path, foo_path + ".copy")
# delete (del, remove, rm)
expected_stdout = None
expected_stderr = ".*foo.*is not under version control.*"
run_and_verify_svn(expected_stdout, expected_stderr,
"delete", foo_path)
# diff (di)
expected_stdout = None
expected_stderr = ".*E155.*foo.*was not found.*"
run_and_verify_svn(expected_stdout, expected_stderr,
"diff", foo_path)
# export
expected_stdout = None
expected_stderr = ".*foo.*was not found.*"
run_and_verify_svn(expected_stdout, expected_stderr,
"export", foo_path, sbox.get_tempname())
# import
expected_stdout = None
expected_stderr = ".*(foo.*does not exist|Can't stat.*foo).*"
run_and_verify_svn(expected_stdout, expected_stderr,
"import", '-m', svntest.main.make_log_msg(),
foo_path, sbox.repo_url + '/foo_imported')
# info
expected_info = {
'Tree conflict': 'local missing or deleted or moved away, incoming file edit upon merge.*',
'Name': 'foo',
'Schedule': 'normal',
'Node Kind': 'none',
'Path': re.escape(sbox.ospath('A/foo')),
}
run_and_verify_info([expected_info], foo_path)
# list (ls)
expected_stdout = None
expected_stderr = ".*foo.*was not found.*"
run_and_verify_svn(expected_stdout, expected_stderr,
"list", foo_path)
# lock
expected_stdout = None
expected_stderr = ".*foo.*was not found.*"
run_and_verify_svn(expected_stdout, expected_stderr,
"lock", foo_path)
# log
expected_stdout = None
expected_stderr = ".*foo.*was not found.*"
run_and_verify_svn(expected_stdout, expected_stderr,
"log", foo_path)
# merge
# note: this is intentionally a no-op merge that does not record mergeinfo
expected_stdout = None
expected_stderr = ".*foo.*was not found.*"
run_and_verify_svn(expected_stdout, expected_stderr,
"merge", '--ignore-ancestry', '-c', '4',
A_copy_url + '/mu', foo_path)
# mergeinfo
expected_stdout = None
expected_stderr = ".*foo.*was not found.*"
run_and_verify_svn(expected_stdout, expected_stderr,
"mergeinfo", A_copy_url + '/foo', foo_path)
# mkdir
expected_stdout = None
expected_stderr = ".*foo.*is an existing item in conflict.*"
run_and_verify_svn(expected_stdout, expected_stderr,
"mkdir", foo_path)
# move (mv, rename, ren)
expected_stdout = None
expected_stderr = ".*foo.*does not exist.*"
run_and_verify_svn(expected_stdout, expected_stderr,
"move", foo_path, foo_path + ".moved")
# patch
expected_stdout = None
expected_stderr = ".*foo.*does not exist.*"
patch_path = sbox.get_tempname()
f = open(patch_path, 'w')
patch_data = [
"--- foo (revision 2)\n"
"+++ foo (working copy)\n"
"@@ -1 +1,2 @@\n"
" foo\n"
" +foo\n"
]
for line in patch_data:
f.write(line)
f.close()
run_and_verify_svn(expected_stdout, expected_stderr,
"patch", patch_path, sbox.ospath("A/foo"))
# propdel (pdel, pd)
expected_stdout = None
expected_stderr = ".*foo.*was not found.*"
run_and_verify_svn(expected_stdout, expected_stderr,
"propdel", "svn:eol-style", foo_path)
# propget (pget, pg)
expected_stdout = None
expected_stderr = ".*foo.*is not under version control.*"
run_and_verify_svn(expected_stdout, expected_stderr,
"propget", "svn:eol-style", foo_path)
# proplist (plist, pl)
expected_stdout = None
expected_stderr = ".*foo.*is not under version control.*"
svntest.actions.run_and_verify_svn(expected_stdout, expected_stderr,
"proplist", foo_path)
# propset (pset, ps)
expected_stdout = None
expected_stderr = ".*foo.*was not found.*"
run_and_verify_svn(expected_stdout, expected_stderr,
"propset", "svn:eol-style", "native", foo_path)
# relocate
expected_stdout = None
expected_stderr = ".*foo.*was not found.*"
run_and_verify_svn(expected_stdout, expected_stderr,
"relocate", A_copy_url + "/foo", foo_path)
# resolve
expected_stdout = "Resolved conflicted state of.*foo.*"
expected_stderr = []
run_and_verify_svn(expected_stdout, expected_stderr,
"resolve", "--accept", "working", foo_path)
# revert the entire working copy and repeat the merge so we can test
# more commands
svntest.main.run_svn(None, "revert", "-R", wc_dir)
svntest.main.run_svn(None, "merge", '-c', '4', A_copy_url,
os.path.join(wc_dir, 'A'))
# revert
expected_stdout = "Reverted.*foo.*"
expected_stderr = []
run_and_verify_svn(expected_stdout, expected_stderr,
"revert", foo_path)
# revert the entire working copy and repeat the merge so we can test
# more commands
svntest.main.run_svn(None, "revert", "-R", wc_dir)
svntest.main.run_svn(None, "merge", '-c', '4', A_copy_url,
os.path.join(wc_dir, 'A'))
# revert
expected_stdout = "Reverted.*foo.*"
expected_stderr = []
run_and_verify_svn(expected_stdout, expected_stderr,
"revert", "-R", foo_path)
# revert the entire working copy and repeat the merge so we can test
# more commands
svntest.main.run_svn(None, "revert", "-R", wc_dir)
svntest.main.run_svn(None, "merge", '-c', '4', A_copy_url,
os.path.join(wc_dir, 'A'))
# status (stat, st)
expected_status = wc.State(foo_path, {
'' : Item(status='! ', treeconflict='C'),
})
run_and_verify_status(foo_path, expected_status)
# switch (sw)
expected_stdout = None
expected_stderr = ".*foo.*was not found.*"
run_and_verify_svn(expected_stdout, expected_stderr,
"switch", A_copy_url + "/foo", foo_path)
# unlock
expected_stdout = None
expected_stderr = ".*foo.*was not found.*"
run_and_verify_svn(expected_stdout, expected_stderr,
"unlock", foo_path)
# update (up)
# This doesn't skip because the update is anchored at the parent of A,
# the parent of A is not in conflict, and the update doesn't attempt to
# change foo itself.
expected_stdout = [
"Updating '" + foo_path + "':\n", "At revision 4.\n"]
expected_stderr = []
run_and_verify_svn(expected_stdout, expected_stderr,
"update", foo_path)
# upgrade
expected_stdout = None
expected_stderr = ".*Can't upgrade.*foo.*"
run_and_verify_svn(expected_stdout, expected_stderr,
"upgrade", foo_path)
#----------------------------------------------------------------------
# Regression test for an issue #3526 variant
#
@Issues(3526)
def update_dir_with_not_present(sbox):
"lock status update shouldn't flag tree conflict"
sbox.build()
wc_dir = sbox.wc_dir
newtxt = sbox.ospath('A/B/new.txt')
main.file_write(newtxt, 'new.txt')
sbox.simple_add('A/B/new.txt')
sbox.simple_commit()
sbox.simple_move('A/B/new.txt', 'A/C/newer.txt')
sbox.simple_commit()
sbox.simple_rm('A/B')
# We can't commit this without updating (ra_svn produces its own error)
run_and_verify_svn(None,
"svn: (E155011|E160028|E170004): (Dir|Item).*B.*out of date",
'ci', '-m', '', wc_dir)
# So we run update
run_and_verify_svn(None, [],
'up', wc_dir)
# And now we can commit
run_and_verify_svn(None, [],
'ci', '-m', '', wc_dir)
def update_delete_mixed_rev(sbox):
"update that deletes mixed-rev"
sbox.build()
wc_dir = sbox.wc_dir
sbox.simple_move('A/B/E/alpha', 'A/B/E/alpha2')
sbox.simple_commit()
sbox.simple_update()
sbox.simple_rm('A/B')
sbox.simple_commit()
sbox.simple_update(revision=1)
sbox.simple_update(target='A/B/E', revision=2)
sbox.simple_mkdir('A/B/E2')
# Update raises a tree conflict on A/B due to local mod A/B/E2
expected_output = wc.State(wc_dir, {
'A/B' : Item(status=' ', treeconflict='C'),
})
expected_disk = main.greek_state.copy()
expected_disk.add({
'A/B/E2' : Item(),
'A/B/E/alpha2' : Item(contents='This is the file \'alpha\'.\n'),
})
expected_disk.remove('A/B/E/alpha')
expected_status = get_virginal_state(wc_dir, 3)
expected_status.remove('A/B/E/alpha')
expected_status.add({
'A/B/E2' : Item(status='A ', wc_rev='-'),
'A/B/E/alpha2' : Item(status=' ', copied='+', wc_rev='-'),
})
expected_status.tweak('A/B',
status='A ', copied='+', treeconflict='C', wc_rev='-')
expected_status.tweak('A/B/F', 'A/B/E', 'A/B/E/beta', 'A/B/lambda',
copied='+', wc_rev='-')
# The entries world doesn't see a changed revision as another add
# while the WC-NG world does...
expected_status.tweak('A/B/E', status='A ', entry_status=' ')
run_and_verify_update(wc_dir,
expected_output, expected_disk, expected_status,
check_props=True)
# Resolving to working state should give a mixed-revision copy that
# gets committed as multiple copies
run_and_verify_resolved([sbox.ospath('A/B')], sbox.ospath('A/B'))
expected_output = wc.State(wc_dir, {
'A/B' : Item(verb='Adding'),
'A/B/E' : Item(verb='Replacing'),
'A/B/E2' : Item(verb='Adding'),
})
expected_status.tweak('A/B', 'A/B/E', 'A/B/E2', 'A/B/F', 'A/B/E/alpha2',
'A/B/E/beta', 'A/B/lambda',
status=' ', wc_rev=4, copied=None, treeconflict=None)
run_and_verify_commit(wc_dir,
expected_output, expected_status)
expected_info = {
'Name': 'alpha2',
'Node Kind': 'file',
}
run_and_verify_info([expected_info], sbox.repo_url + '/A/B/E/alpha2')
#######################################################################
# Run the tests
# list all tests here, starting with None:
test_list = [ None,
up_sw_file_mod_onto_del,
up_sw_file_del_onto_mod,
up_sw_file_del_onto_del,
up_sw_file_add_onto_add,
up_sw_dir_mod_onto_del,
up_sw_dir_del_onto_mod,
up_sw_dir_del_onto_del,
up_sw_dir_add_onto_add,
merge_file_mod_onto_not_file,
merge_file_del_onto_not_same,
merge_file_del_onto_not_file,
merge_file_add_onto_not_none,
merge_dir_mod_onto_not_dir,
merge_dir_del_onto_not_same,
merge_dir_del_onto_not_dir,
merge_dir_add_onto_not_none,
force_del_tc_inside,
force_del_tc_is_target,
query_absent_tree_conflicted_dir,
up_add_onto_add_revert,
lock_update_only,
at_directory_external,
actual_only_node_behaviour,
update_dir_with_not_present,
update_delete_mixed_rev,
]
if __name__ == '__main__':
svntest.main.run_tests(test_list)
# NOTREACHED
### End of file.
| 37.517799 | 95 | 0.613491 |
acf7ca4f0601ecbb782520b69ffd4a9d449011b4 | 334 | py | Python | packages/pyright-internal/src/tests/samples/dataclass21.py | Microsoft/pyright | adf7c3e92e4540d930e3652de3c1c335855af595 | [
"MIT"
] | 3,934 | 2019-03-22T09:26:41.000Z | 2019-05-06T21:03:08.000Z | packages/pyright-internal/src/tests/samples/dataclass21.py | Microsoft/pyright | adf7c3e92e4540d930e3652de3c1c335855af595 | [
"MIT"
] | 107 | 2019-03-24T04:09:37.000Z | 2019-05-06T17:00:04.000Z | packages/pyright-internal/src/tests/samples/dataclass21.py | Microsoft/pyright | adf7c3e92e4540d930e3652de3c1c335855af595 | [
"MIT"
] | 119 | 2019-03-23T10:48:04.000Z | 2019-05-06T08:57:56.000Z | # This sample tests that a dataclass member without a type annotation
# but with a field descriptor assignment results in an error.
from dataclasses import dataclass, field
@dataclass
class MyClass:
id: int
x: int = field()
# This should generate an error because it will result in a runtime exception
y = field()
| 23.857143 | 81 | 0.733533 |
acf7ccbbeed8985c88676225092ba18edd0335aa | 191 | py | Python | SnakeBackend/main/admin.py | PatrickKoss/Snake | b15ba77d7b848db48206c14d8e16f10ebcc2e808 | [
"MIT"
] | null | null | null | SnakeBackend/main/admin.py | PatrickKoss/Snake | b15ba77d7b848db48206c14d8e16f10ebcc2e808 | [
"MIT"
] | 11 | 2021-03-19T12:27:29.000Z | 2022-02-27T11:10:27.000Z | SnakeBackend/main/admin.py | PatrickKoss/Snake | b15ba77d7b848db48206c14d8e16f10ebcc2e808 | [
"MIT"
] | null | null | null | from django.contrib import admin
from leaderboard.models import SnakeCategories, Scoreboard
# Register your models here.
admin.site.register(SnakeCategories)
admin.site.register(Scoreboard)
| 27.285714 | 58 | 0.842932 |
acf7ccceb7f8efa12b3787f9fd9525d716a55382 | 3,769 | py | Python | lib/custom_whatlies/language/_sense2vec_lang.py | Pliploop/NLP_Bulk_labelling_app | a9a7bf3ea5b48730b56a901a9b857322c6b1f75a | [
"MIT"
] | null | null | null | lib/custom_whatlies/language/_sense2vec_lang.py | Pliploop/NLP_Bulk_labelling_app | a9a7bf3ea5b48730b56a901a9b857322c6b1f75a | [
"MIT"
] | null | null | null | lib/custom_whatlies/language/_sense2vec_lang.py | Pliploop/NLP_Bulk_labelling_app | a9a7bf3ea5b48730b56a901a9b857322c6b1f75a | [
"MIT"
] | null | null | null | import spacy
from sense2vec import Sense2Vec, Sense2VecComponent
from lib.custom_whatlies.embedding import Embedding
from lib.custom_whatlies.embeddingset import EmbeddingSet
class Sense2VecLanguage:
"""
This object is used to lazily fetch [Embedding][whatlies.embedding.Embedding]s or
[EmbeddingSet][whatlies.embeddingset.EmbeddingSet]s from a sense2vec language
backend. This object is meant for retreival, not plotting.
Arguments:
sense2vec_path: path to downloaded vectors
**Usage**:
```python
> lang = Sense2VecLanguage(sense2vec_path="/path/to/reddit_vectors-1.1.0")
> lang['bank|NOUN']
> lang['bank|VERB']
```
Important:
The reddit vectors are not given by this library.
You can find the download link [here](https://github.com/explosion/sense2vec#pretrained-vectors).
Warning:
This tool is temporarily not supported because sense2vec isn't supported by spaCy v3 just yet.
"""
def __init__(self, sense2vec_path):
self.sense2vec_path = sense2vec_path
self.s2v = Sense2Vec().from_disk(sense2vec_path)
def __getitem__(self, query):
"""
Retreive a single embedding or a set of embeddings.
Arguments:
query: single string or list of strings
**Usage**
```python
> lang = SpacyLanguage("en_core_web_md")
> lang['duck|NOUN']
> lang[['duck|NOUN'], ['duck|VERB']]
```
"""
if isinstance(query, str):
vec = self.s2v[query]
return Embedding(query, vec)
return EmbeddingSet(*[self[tok] for tok in query])
def embset_similar(self, query, n=10):
"""
Retreive an [EmbeddingSet][whatlies.embeddingset.EmbeddingSet] that are the most simmilar to the passed query.
Arguments:
query: query to use
n: the number of items you'd like to see returned
Returns:
An [EmbeddingSet][whatlies.embeddingset.EmbeddingSet] containing the similar embeddings.
"""
return EmbeddingSet(
*[self[tok] for tok, sim in self.s2v.most_similar(query, n=n)],
name=f"Embset[s2v similar_{n}:{query}]",
)
def score_similar(self, query, n=10):
"""
Retreive an EmbeddingSet that are the most simmilar to the passed query.
Arguments:
query: query to use
n: the number of items you'd like to see returned
Returns:
An list of ([Embedding][whatlies.embedding.Embedding], score) tuples.
"""
return [(self[tok], sim) for tok, sim in self.s2v.most_similar(query, n=n)]
class Sense2VecSpacyLanguage:
"""
This object is used to lazily fetch `Embedding`s from a sense2vec language
backend. Note that it is different than an `EmbeddingSet` in the sense
it does not have anything precomputed.
**Usage**:
```
lang = Sense2VecLanguage(spacy_model="en_core_web_sm", sense2vec="/path/to/reddit_vectors-1.1.0")
lang['bank|NOUN']
lang['bank|VERB']
```
"""
def __init__(self, model_name, sense2vec_path):
self.nlp = spacy.load(model_name)
s2v = Sense2VecComponent(self.nlp.vocab).from_disk(sense2vec_path)
self.nlp.add(s2v)
def __getitem__(self, string):
doc = self.nlp(string)
vec = doc.vector
start, end = 0, -1
split_string = string.split(" ")
for idx, word in enumerate(split_string):
if word[0] == "[":
start = idx
if word[-1] == "]":
end = idx + 1
if start != 0:
if end != -1:
vec = doc[start:end].vector
return Embedding(string, vec)
| 31.672269 | 118 | 0.617405 |
acf7cce31a8f503c5e28bbc6ae87591dd1933812 | 1,734 | py | Python | sdk/python/tests/compiler/testdata/conditions_with_global_params.py | shrivs3/kfp-tekton | b7c1d542d43ea24e70f24a874a7c972199e8f976 | [
"Apache-2.0"
] | 102 | 2019-10-23T20:35:41.000Z | 2022-03-27T10:28:56.000Z | sdk/python/tests/compiler/testdata/conditions_with_global_params.py | shrivs3/kfp-tekton | b7c1d542d43ea24e70f24a874a7c972199e8f976 | [
"Apache-2.0"
] | 891 | 2019-10-24T04:08:17.000Z | 2022-03-31T22:45:40.000Z | sdk/python/tests/compiler/testdata/conditions_with_global_params.py | shrivs3/kfp-tekton | b7c1d542d43ea24e70f24a874a7c972199e8f976 | [
"Apache-2.0"
] | 85 | 2019-10-24T04:04:36.000Z | 2022-03-01T10:52:57.000Z | # Copyright 2021 kubeflow.org
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from kfp import dsl
from kfp_tekton.compiler import TektonCompiler
from kfp import components
@components.func_to_container_op
def add_numbers(a: int, b: int) -> int:
print(a + b)
return a + b
@components.func_to_container_op
def print_number(a: int) -> int:
print(a)
return a
@components.func_to_container_op
def notify_success():
print('SUCCESS!')
@components.func_to_container_op
def notify_failure():
print('FAILED!')
@components.func_to_container_op
def produce_number() -> int:
import random
rn = random.randrange(0, 1000)
print(rn)
return rn
@dsl.pipeline(name='conditions-with-global-params')
def conditions_with_global_params(n: int = 5, threshold: int = 10, lower_bound: int = 15):
add_numbers_task = add_numbers(n, lower_bound)
print_number_task = print_number(add_numbers_task.output)
with dsl.Condition(print_number_task.output > threshold):
notify_success()
with dsl.Condition(print_number_task.output <= threshold):
notify_failure()
if __name__ == '__main__':
TektonCompiler().compile(conditions_with_global_params, __file__.replace('.py', '.yaml'))
| 27.52381 | 93 | 0.739908 |
acf7cd3fb4cfe80306c7921ef5b23be0ea078ea1 | 19,190 | py | Python | web/router.py | Daia-Systems-SPA/sii-dte-py | a1030f4abf3d4004fdede88072cc8621214158bf | [
"MIT"
] | null | null | null | web/router.py | Daia-Systems-SPA/sii-dte-py | a1030f4abf3d4004fdede88072cc8621214158bf | [
"MIT"
] | null | null | null | web/router.py | Daia-Systems-SPA/sii-dte-py | a1030f4abf3d4004fdede88072cc8621214158bf | [
"MIT"
] | null | null | null | __version__ = '0.1'
import json
import datetime
import uuid
import os
from flask import render_template, jsonify, session, request, redirect, url_for, make_response, Flask
from flask_cors import cross_origin
from instance.config import APP_ROOT, DRY_RUN
from lib.zeep.sii_plugin import SiiPlugin
from lib.models.dte import DTEBuidler, DTECAF, DTECover, DTEPayload
from lib.models.sii_token import Token
from lib.pdf_generator import PDFGenerator
from lib.certificate_service import CertificateService
from lib.caf_service import CAFService
from lib.document_service import DocumentService
from lib.sii_connector_auth import SiiConnectorAuth
from lib.sii_document_uploader import SiiDocumentUploader
app = Flask(__name__, instance_relative_config=True)
""" Basic key, ensures that is changes everytime at application startup """
epoch = datetime.datetime.utcfromtimestamp(0)
app.secret_key = str(epoch)
""" Might stay in memory for safety reason """
_key_by_uid = {}
""" In memory store, future : database store """
_caf_by_uid = {}
_document_list_by_uid = {}
def redirect_url(default='index'):
return request.args.get('next') or \
request.referrer or \
url_for(default)
def is_anonymous_authorized_pages(endpoint):
return (endpoint == 'login' \
or endpoint == 'static'
or endpoint == 'index'
or endpoint is None
or endpoint.startswith('api'))
@app.before_request
def auth():
if is_anonymous_authorized_pages(request.endpoint) == False:
""" Not logged in """
if 'uid' not in session:
""" Return HTTP 403, Forbidden and login page """
return "Not logged in", 403
@app.route('/login', methods=['POST'])
@cross_origin()
def login():
if 'RUT' in request.form:
session['uid'] = uuid.uuid4()
session['RUT'] = request.form['RUT']
session['RES'] = request.form['RES']
session['RES_Date'] = request.form['RES_Date']
""" TODO """
session['user_id'] = 1
return redirect(redirect_url())
else:
return "Missing RUT parameter.", 400
@app.route('/logout', methods=['GET'])
@cross_origin()
def logout():
""" Delete session """
uid = str(session['uid'])
session.clear()
try:
del _key_by_uid[uid]
del _caf_by_uid[uid]
except KeyError:
""" No certificate registered """
pass
return redirect(redirect_url('login'))
@app.route('/')
def index():
return render_template('index.html')
ALLOWED_CERT_EXTENSIONS = ['pfx', 'pem']
def is_valid_cert_file(filename):
return '.' in filename and filename.rsplit('.',1)[1] in ALLOWED_CERT_EXTENSIONS
ALLOWED_CAF_EXTENSIONS = ['xml', 'caf']
def is_valid_caf_file(filename):
return '.' in filename and filename.rsplit('.',1)[1] in ALLOWED_CAF_EXTENSIONS
@app.route('/certificate', methods=['POST'])
@cross_origin()
def set_certificate():
certificate = request.files['certificate']
password = request.form['password']
if is_valid_cert_file(certificate.filename):
uid = str(session['uid'])
""" Save in temporary location """
certificate.filename = str(session['uid']) + '.pfx'
filepath = APP_ROOT + 'temp/' + str(certificate.filename)
certificate.save(filepath)
""" Extract key and certificate """
cert = CertificateService(pfx_file_path=filepath, pfx_password=password)
cert.generate_certificate_and_key()
""" Store in session """
_key_by_uid[uid] = { 'key': cert.key, 'cert': cert.certificate }
session['key_state'] = 'loaded'
""" Delete """
os.remove(filepath)
if cert.key is not None and len(cert.key) > 0:
return redirect(redirect_url())
else:
return "Could not extract key (Invalid password ?)", 400
else:
return "Valid file extensions: " + str(ALLOWED_CERT_EXTENSIONS), 400
@app.route('/token', methods=['GET'])
@cross_origin()
def get_token():
if 'key_state' in session:
uid = str(session['uid'])
if uid in _key_by_uid:
""" Get seed """
auth = SiiConnectorAuth(server='palena', module=SiiConnectorAuth.GET_SEED_MODULE_ID)
seed = auth.get_seed()
""" Get token """
auth = SiiConnectorAuth(server='palena',module=SiiConnectorAuth.GET_TOKEN_MODULE_ID)
auth.set_key_and_certificate(_key_by_uid[uid]['key'], _key_by_uid[uid]['cert'])
token_string = auth.get_token(seed)
token = Token(token_string)
""" Store in session """
session['token'] = token.to_json()
return token.to_json(), 200
return "Certificate not loaded.", 400
@app.route('/dte', methods=['POST'])
@cross_origin()
def set_dte():
""" Format : """
"""
{
'DocumentType':'52',
'DocumentNumber': '', 'SII': '',
'Header': {
'Specifics': {
'ShippingPort': '',
'LandingPort': '',
'MovementType': '',
'ExpeditionType': ''
}
},
'Date': '',
'Receiver': {
'Name': '', 'Address': '', 'Activity': '', 'RUT': '', 'City': '', 'Phone': ''
},
'Sender': {
'Name': '', 'Address': '', 'Activity': '', 'RUT': '', 'City': '', 'Phone': ''
},
'Details':
{
'1': {'Code': '', 'Name': '', 'Quantity': '', 'Unit': '', 'UnitPrice': ''},
'2': {'Code': '', 'Name': '', 'Quantity': '', 'Unit': '', 'UnitPrice': ''},
'3': {'Code': '', 'Name': '', 'Quantity': '', 'Unit': '', 'UnitPrice': ''},
'4': {'Code': '', 'Name': '', 'Quantity': '', 'Unit': '', 'UnitPrice': ''},
'5': {'Code': '', 'Name': '', 'Quantity': '', 'Unit': '', 'UnitPrice': ''},
'6': {'Code': '', 'Name': '', 'Quantity': '', 'Unit': '', 'UnitPrice': ''},
'7': {'Code': '', 'Name': '', 'Quantity': '', 'Unit': '', 'UnitPrice': ''}
},
'Comment': '',
'IVA': ''
}
"""
""" Get POSTed parameters, build PDF and return file """
uid = str(session['uid'])
pdf = PDFGenerator()
document_service = DocumentService(session['user_id'])
caf_service = CAFService(session['user_id'])
form_parameters = request.get_json(force=True)
type = int(form_parameters["DocumentType"])
folio = int(form_parameters['DocumentNumber'])
receiver_parameters = form_parameters['Receiver']
sender_parameters = form_parameters['Sender']
specific_header_parameters = form_parameters['Header']['Specifics']
specific_header_parameters['DocumentNumber'] = form_parameters['DocumentNumber']
specific_header_parameters['Comment'] = form_parameters['Comment']
if type == 52:
""" Especifico guia de despacho """
specific_header_parameters['Extras'] = {}
item_list = form_parameters['Details']
discount_list = {}
reference_list = {}
if 'Discounts' in form_parameters:
discount_list = form_parameters['Discounts']
if 'References' in form_parameters:
reference_list = form_parameters['References']
builder = DTEBuidler()
""" Bind user information """
specific_header_parameters['User'] = {}
specific_header_parameters['User']['Resolution'] = session['RES']
specific_header_parameters['User']['ResolutionDate'] = session['RES_Date']
specific_header_parameters['User']['RUT'] = session['RUT']
caf = DTECAF(parameters={}, signature='', private_key='')
caf_id = None
if folio == 0:
""" No tenemos folio asociado al documento, buscamos el siguiente folio correspondiente
en la base de datos """
stored_caf = caf_service.get_caf_for_document_type(type)
caf_id = stored_caf.id
caf.load_from_XML_string(stored_caf.file)
else:
try:
""" Si el folio viene de un proveedor externo, debe estar incluido el CAF """
caf_file = specific_header_parameters['CAF_STRING']
caf.load_from_XML_string(caf_file)
except:
raise ValueError("Se especificó un numero de folio sin CAF asociado.")
tree, pretty_dte, dte_object = builder.build(type, sender_parameters, receiver_parameters, specific_header_parameters, item_list, reference_list, discount_list, caf)
envelope = {}
envelope[1] = dte_object
""" Generate cover (Caratula) """
cover = DTECover(dtes=envelope, resolution={'Date': session['RES_Date'], 'Number': session['RES']}, user=specific_header_parameters['User'])
""" Generate payload to be uploaded (without signature, only tagged)"""
payload = DTEPayload(dtes=envelope, cover=cover, user={})
document_id = dte_object.get_document_id()
if 'InternalUniqueId' in form_parameters:
document_id = form_parameters['InternalUniqueId']
if uid not in _document_list_by_uid:
_document_list_by_uid[uid] = {}
document = document_service.get_document_by_code(document_id)
if not document:
document_db_id = document_service.save_document(caf_id=caf_id,\
document_number=folio,\
document_code=document_id,\
document_type=dte_object.get_document_type(),\
document_xml=payload.dump(),\
document_json=json.dumps(specific_header_parameters),\
pdf_file=None)
else:
document_db_id = document.id
""" Update JSON, XML """
document_service.update_document(document_id=document_db_id, document_xml=payload.dump(), document_json=json.dumps(specific_header_parameters))
_document_list_by_uid[uid][str(document_db_id)] = (dte_object, pretty_dte)
return str(document_db_id), 200
@app.route('/caf', methods=['POST'])
@cross_origin()
def set_caf():
uid = str(session['uid'])
caf = request.files['caf']
if is_valid_caf_file(caf.filename):
""" CAF valido """
caf_service = CAFService(session['user_id'])
caf_content = caf.read()
_caf_by_uid[uid] = caf_content
session['caf'] = caf.filename
""" Guardamos en BDD """
caf_service.import_from_XML(caf_content)
return render_template('index.html'), 200
@app.route('/caf', methods=['GET'])
@cross_origin()
def get_caf():
""" Get CAF saved for current user """
caf_service = CAFService(session['user_id'])
cafs = caf_service.get_created_CAF()
""" FW """
result = []
for caf in cafs:
result.append({
'id': caf.id ,
'type': caf.document_type,
'from': caf.document_number_from,
'to': caf.document_number_to,
'last_number_used':caf.last_number_used
})
return render_template('caf.html', cafs=result), 200
@app.route("/dte/<string:document_id>/sent", methods=['GET'])
@cross_origin()
def generate_sent_pdf(document_id):
""" Get preview from previously built document """
uid = str(session['uid'])
pdf = PDFGenerator()
""" We have a document with this id """
if uid in _document_list_by_uid and document_id in _document_list_by_uid[uid]:
dte_object, _ = _document_list_by_uid[uid][document_id]
_, binary_pdf = pdf.generate_binary(dte_object)
response = make_response(binary_pdf)
response.headers['Content-Type'] = 'application/pdf'
response.headers['Content-Disposition'] = \
'attachment; filename=%s.pdf' % document_id
return response
else:
return "No document " + str(document_id) + " found", 404
@app.route('/dte/<string:document_id>/preview', methods=['GET'])
@cross_origin()
def generate_preview(document_id):
""" Get preview from previously built document """
uid = str(session['uid'])
pdf = PDFGenerator()
""" We have a document with this id """
if uid in _document_list_by_uid and document_id in _document_list_by_uid[uid]:
dte_object, _ = _document_list_by_uid[uid][document_id]
_, binary_pdf = pdf.generate_binary(dte_object)
response = make_response(binary_pdf)
response.headers['Content-Type'] = 'application/pdf'
response.headers['Content-Disposition'] = \
'attachment; filename=%s.pdf' % "PREV" + document_id
return response
else:
return "No document " + str(document_id) + " found", 404
@app.route('/document/form/<int:type>', methods=['GET'])
@cross_origin()
def get_document_form(type):
uid = str(session['uid'])
sender_parameters = {}
receiver_parameters = {}
specific_header_parameters = {}
item_list = {}
caf_service = CAFService(session['user_id'])
path = 'test/data'
""" Read test files """
with open(path + '/user.json', encoding='ISO-8859-1') as json_file:
user_parameters = json.load(json_file)
with open(path + '/sender.json', encoding='ISO-8859-1') as json_file:
sender_parameters = json.load(json_file)
with open(path + '/receiver.json', encoding='ISO-8859-1') as json_file:
receiver_parameters = json.load(json_file)
with open(path + '/items.json', encoding='ISO-8859-1') as json_file:
item_list = json.load(json_file)
with open(path + '/references.json', encoding='ISO-8859-1') as json_file:
reference_list = json.load(json_file)
with open(path + '/specifics.json', encoding='ISO-8859-1') as json_file:
specific_header_parameters = json.load(json_file)
try:
with open(path + '/discounts.json', encoding='ISO-8859-1') as json_file:
discounts_parameters = json.load(json_file)
except:
discounts_parameters = []
pass
caf = DTECAF(parameters={}, signature='', private_key='')
stored_caf = caf_service.get_caf_for_document_type(type)
caf.load_from_XML_string(stored_caf.file)
builder = DTEBuidler()
""" Bind user information """
specific_header_parameters['User'] = {}
specific_header_parameters['User']['Resolution'] = session['RES']
specific_header_parameters['User']['ResolutionDate'] = session['RES_Date']
specific_header_parameters['User']['RUT'] = session['RUT']
_, pretty_dte, dte_object = builder.build(type, sender_parameters, receiver_parameters, specific_header_parameters, item_list, reference_list, discounts_parameters, caf)
parameters = dte_object.to_template_parameters()
return render_template('sii_document_form.html', parameters=parameters), 200
@app.route('/dte/<int:document_id>/folio', methods=['GET'])
def get_document_folio(document_id):
user_id = session['user_id']
document_service = DocumentService(user_id)
doc = document_service.get_document(document_id)
if doc:
return str(doc.document_number), 200
else:
return "0", 204
@app.route('/document/test/<int:type>/pdf', methods=['GET'])
@cross_origin()
def generate_test_pdf(type):
uid = str(session['uid'])
""" Get parameters, build PDF and return file """
pdf = PDFGenerator()
""" Dump test XML """
sender_parameters = {}
receiver_parameters = {}
specific_header_parameters = {}
item_list = {}
caf_service = CAFService(session['user_id'])
""" Read test files """
with open('test/data/sender.json') as json_file:
sender_parameters = json.load(json_file)
with open('test/data/receiver.json') as json_file:
receiver_parameters = json.load(json_file)
with open('test/data/items.json') as json_file:
item_list = json.load(json_file)
with open('test/data/specifics.json') as json_file:
specific_header_parameters = json.load(json_file)
caf = DTECAF(parameters={}, signature='', private_key='')
stored_caf = caf_service.get_caf_for_document_type(type)
caf.load_from_XML_string(stored_caf.file)
builder = DTEBuidler()
""" Bind user information """
specific_header_parameters['User'] = {}
specific_header_parameters['User']['Resolution'] = session['RES']
specific_header_parameters['User']['ResolutionDate'] = session['RES_Date']
specific_header_parameters['User']['RUT'] = session['RUT']
_, pretty_dte, dte_object = builder.build(type, sender_parameters, receiver_parameters, specific_header_parameters, item_list, caf)
pdfFilename, binary_pdf = pdf.generate_binary(dte_object)
response = make_response(binary_pdf)
response.headers['Content-Type'] = 'application/pdf'
response.headers['Content-Disposition'] = \
'attachment; filename=%s.pdf' % pdfFilename
return response
@app.route('/dte/<string:document_id>/sii', methods=['PUT'])
@cross_origin()
def send_to_sii(document_id):
""" Send DTE file stored in session at specified ID to SII """
uid = str(session['uid'])
user_id = session['user_id']
return do_send_to_sii(document_id, uid, user_id, session)
def do_send_to_sii(document_id, uid, user_id, session):
""" We have a document with this id """
document_service = DocumentService(user_id)
builder = DTEBuidler()
""" Obtener el documento en espera """
doc = document_service.get_document(document_id)
siiSignature = SiiPlugin()
pdf = PDFGenerator()
""" Load key """
cert_data = list(_key_by_uid.values())[0]
siiSignature.key = cert_data['key']
siiSignature.cert = cert_data['cert']
""" Remove declaration """
declare = '<?xml version="1.0" encoding="ISO-8859-1"?>'
def remove_xml_declaration(xml):
return xml.replace('<?xml version="1.0" encoding="ISO-8859-1"?>', '')
payload = remove_xml_declaration(doc.xml_string)
""" Para enviar al SII, el RUTReceptor debe ser el del SII """
def change_payload_receptor_rut(new_rut:str, xml_string:str):
import re
REGEX_RUT_RECEP = r"<RutReceptor>(.*)</RutReceptor>"
return re.sub(REGEX_RUT_RECEP, "<RutReceptor>" + new_rut + "</RutReceptor>", xml_string)
""" 60803000-K: SII """
payload_for_sii = change_payload_receptor_rut('60803000-K', payload)
""" Sign """
ready_to_upload = siiSignature.sign_tagged_message(payload_for_sii)
""" Add declaration back """
ready_to_upload = declare + '\n' + ready_to_upload
""" Store ready-to-upload XML """
document_service.update_document(document_id=document_id, \
document_xml=ready_to_upload)
with open(APP_ROOT + 'temp/DTE_ENV_' + str(document_id) + '.xml', "w") as myXML:
myXML.write(ready_to_upload)
""" Get token """
auth = SiiConnectorAuth(server='palena', module=SiiConnectorAuth.GET_SEED_MODULE_ID)
seed = auth.get_seed()
auth = SiiConnectorAuth(server='palena', module=SiiConnectorAuth.GET_TOKEN_MODULE_ID)
auth.set_key_and_certificate(cert_data['key'], cert_data['cert'])
token_string = auth.get_token(seed)
token = Token(token_string)
""" Actualizamos el estado del tracker a "EN ENVIO" """
document_service.set_document_state(document_id=document_id, state=2, serial=token.get_token(), result='')
if not DRY_RUN :
""" Upload del documento """
uploader = SiiDocumentUploader(url='https://palena.sii.cl/cgi_dte/UPL/DTEUpload', token=token.get_token(), application_name='DAIAERP')
""" MOCKED: Parametros de usuario/empresa """
result = uploader.send_document(user_rut=session['RUT'], company_rut='76087419-1', document_path=None, doc_id=document_id, document_content=ready_to_upload)
""" Actualizamos el estado a "ENVIADO" """
document_service.set_document_state(document_id=document_id, state=3, serial=token.get_token(), result=str(result))
else:
print("DryRun habilitado, no se envió información al SII.")
return str(document_id), 200
@app.route('/dte/<string:document_id>/sii/print', methods=['GET'])
@cross_origin()
def api_print_sent_dte(document_id):
#user_id = session['user_id']
pdf = PDFGenerator()
builder = DTEBuidler()
doc_service = DocumentService(int(1))
binary_pdf = b''
pdfFilename = ''
dte = doc_service.get_document(document_id)
if dte:
if dte.pdf_file and len(dte.pdf_file) > 0:
binary_pdf = dte.pdf_file
pdfFilename = "T" + str(dte.document_type) + "I" + str(dte.document_number) + ".pdf"
else:
payload, documents = builder.from_string(dte.xml_string.replace('<?xml version="1.0" encoding="ISO-8859-1"?>', ''))
dtes = [c for a,b,c in documents]
for dte_object in dtes:
params = dte_object.to_template_parameters()
params['Header']['Specifics'] = json.loads(dte.json_string)
pdfFilename, binary_pdf = pdf.generate_binary(dte_object, cedible=False, preview=0, template_parameters=params)
doc_service.update_document(document_id=document_id, \
pdf_file=binary_pdf)
response = make_response(binary_pdf)
response.headers['Content-Type'] = 'application/pdf'
response.headers['Content-Disposition'] = \
'attachment; filename=%s' % pdfFilename
return response | 34.954463 | 170 | 0.71334 |
acf7cd53e383b4fc0f29dd5c0d5e5d6a7ff32a47 | 10,579 | py | Python | mesonbuild/scripts/meson_test.py | NNemec/meson | d72a5c14f83253bafaf6b2531442d981ea1df2ed | [
"Apache-2.0"
] | null | null | null | mesonbuild/scripts/meson_test.py | NNemec/meson | d72a5c14f83253bafaf6b2531442d981ea1df2ed | [
"Apache-2.0"
] | null | null | null | mesonbuild/scripts/meson_test.py | NNemec/meson | d72a5c14f83253bafaf6b2531442d981ea1df2ed | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# Copyright 2013-2016 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mesonbuild
from .. import build
import sys, os, subprocess, time, datetime, pickle, multiprocessing, json
import concurrent.futures as conc
import argparse
import platform
import signal
def is_windows():
platname = platform.system().lower()
return platname == 'windows' or 'mingw' in platname
collected_logs = []
error_count = 0
options = None
parser = argparse.ArgumentParser()
parser.add_argument('--wrapper', default=None, dest='wrapper',
help='wrapper to run tests with (e.g. valgrind)')
parser.add_argument('--wd', default=None, dest='wd',
help='directory to cd into before running')
parser.add_argument('--suite', default=None, dest='suite',
help='Only run tests belonging to this suite.')
parser.add_argument('--no-stdsplit', default=True, dest='split', action='store_false',
help='Do not split stderr and stdout in test logs.')
parser.add_argument('--print-errorlogs', default=False, action='store_true',
help="Whether to print faling tests' logs.")
parser.add_argument('args', nargs='+')
class TestRun():
def __init__(self, res, returncode, should_fail, duration, stdo, stde, cmd,
env):
self.res = res
self.returncode = returncode
self.duration = duration
self.stdo = stdo
self.stde = stde
self.cmd = cmd
self.env = env
self.should_fail = should_fail
def get_log(self):
res = '--- command ---\n'
if self.cmd is None:
res += 'NONE\n'
else:
res += "\n%s %s\n" %(' '.join(
["%s='%s'" % (k, v) for k, v in self.env.items()]),
' ' .join(self.cmd))
if self.stdo:
res += '--- stdout ---\n'
res += self.stdo
if self.stde:
if res[-1:] != '\n':
res += '\n'
res += '--- stderr ---\n'
res += self.stde
if res[-1:] != '\n':
res += '\n'
res += '-------\n\n'
return res
def decode(stream):
try:
return stream.decode('utf-8')
except UnicodeDecodeError:
return stream.decode('iso-8859-1', errors='ignore')
def write_json_log(jsonlogfile, test_name, result):
jresult = {'name' : test_name,
'stdout' : result.stdo,
'result' : result.res,
'duration' : result.duration,
'returncode' : result.returncode,
'command' : result.cmd,
'env' : result.env}
if result.stde:
jresult['stderr'] = result.stde
jsonlogfile.write(json.dumps(jresult) + '\n')
def run_with_mono(fname):
if fname.endswith('.exe') and not is_windows():
return True
return False
def run_single_test(wrap, test):
global options
if test.fname[0].endswith('.jar'):
cmd = ['java', '-jar'] + test.fname
elif not test.is_cross and run_with_mono(test.fname[0]):
cmd = ['mono'] + test.fname
else:
if test.is_cross:
if test.exe_runner is None:
# Can not run test on cross compiled executable
# because there is no execute wrapper.
cmd = None
else:
cmd = [test.exe_runner] + test.fname
else:
cmd = test.fname
if cmd is None:
res = 'SKIP'
duration = 0.0
stdo = 'Not run because can not execute cross compiled binaries.'
stde = None
returncode = -1
else:
if len(wrap) > 0 and 'valgrind' in wrap[0]:
cmd = wrap + test.valgrind_args + cmd + test.cmd_args
else:
cmd = wrap + cmd + test.cmd_args
starttime = time.time()
child_env = os.environ.copy()
if isinstance(test.env, build.EnvironmentVariables):
test.env = test.env.get_env(child_env)
child_env.update(test.env)
if len(test.extra_paths) > 0:
child_env['PATH'] = child_env['PATH'] + ';'.join([''] + test.extra_paths)
if is_windows():
setsid = None
else:
setsid = os.setsid
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE if options and options.split else subprocess.STDOUT,
env=child_env,
cwd=test.workdir,
preexec_fn=setsid)
timed_out = False
try:
(stdo, stde) = p.communicate(timeout=test.timeout)
except subprocess.TimeoutExpired:
timed_out = True
# Python does not provide multiplatform support for
# killing a process and all its children so we need
# to roll our own.
if is_windows():
subprocess.call(['taskkill', '/F', '/T', '/PID', str(p.pid)])
else:
os.killpg(os.getpgid(p.pid), signal.SIGKILL)
(stdo, stde) = p.communicate()
endtime = time.time()
duration = endtime - starttime
stdo = decode(stdo)
if stde:
stde = decode(stde)
if timed_out:
res = 'TIMEOUT'
elif (not test.should_fail and p.returncode == 0) or \
(test.should_fail and p.returncode != 0):
res = 'OK'
else:
res = 'FAIL'
returncode = p.returncode
return TestRun(res, returncode, test.should_fail, duration, stdo, stde, cmd, test.env)
def print_stats(numlen, tests, name, result, i, logfile, jsonlogfile):
global collected_logs, error_count, options
startpad = ' '*(numlen - len('%d' % (i+1)))
num = '%s%d/%d' % (startpad, i+1, len(tests))
padding1 = ' '*(38-len(name))
padding2 = ' '*(8-len(result.res))
result_str = '%s %s %s%s%s%5.2f s' % \
(num, name, padding1, result.res, padding2, result.duration)
print(result_str)
result_str += "\n\n" + result.get_log()
if (result.returncode != 0) != result.should_fail:
error_count += 1
if options.print_errorlogs:
collected_logs.append(result_str)
logfile.write(result_str)
write_json_log(jsonlogfile, name, result)
def drain_futures(futures):
for i in futures:
(result, numlen, tests, name, i, logfile, jsonlogfile) = i
print_stats(numlen, tests, name, result.result(), i, logfile, jsonlogfile)
def filter_tests(suite, tests):
if suite is None:
return tests
return [x for x in tests if suite in x.suite]
def run_tests(datafilename):
global options
logfile_base = 'meson-logs/testlog'
if options.wrapper is None:
wrap = []
logfilename = logfile_base + '.txt'
jsonlogfilename = logfile_base+ '.json'
else:
wrap = [options.wrapper]
logfilename = logfile_base + '-' + options.wrapper.replace(' ', '_') + '.txt'
jsonlogfilename = logfile_base + '-' + options.wrapper.replace(' ', '_') + '.json'
with open(datafilename, 'rb') as f:
tests = pickle.load(f)
if len(tests) == 0:
print('No tests defined.')
return
numlen = len('%d' % len(tests))
varname = 'MESON_TESTTHREADS'
if varname in os.environ:
try:
num_workers = int(os.environ[varname])
except ValueError:
print('Invalid value in %s, using 1 thread.' % varname)
num_workers = 1
else:
num_workers = multiprocessing.cpu_count()
executor = conc.ThreadPoolExecutor(max_workers=num_workers)
futures = []
filtered_tests = filter_tests(options.suite, tests)
with open(jsonlogfilename, 'w') as jsonlogfile, \
open(logfilename, 'w') as logfile:
logfile.write('Log of Meson test suite run on %s.\n\n' %
datetime.datetime.now().isoformat())
for i, test in enumerate(filtered_tests):
if test.suite[0] == '':
visible_name = test.name
else:
if options.suite is not None:
visible_name = options.suite + ' / ' + test.name
else:
visible_name = test.suite[0] + ' / ' + test.name
if not test.is_parallel:
drain_futures(futures)
futures = []
res = run_single_test(wrap, test)
print_stats(numlen, filtered_tests, visible_name, res, i,
logfile, jsonlogfile)
else:
f = executor.submit(run_single_test, wrap, test)
futures.append((f, numlen, filtered_tests, visible_name, i,
logfile, jsonlogfile))
drain_futures(futures)
return logfilename
def run(args):
global collected_logs, error_count, options
collected_logs = [] # To avoid state leaks when invoked multiple times (running tests in-process)
error_count = 0
options = parser.parse_args(args)
if len(options.args) != 1:
print('Test runner for Meson. Do not run on your own, mmm\'kay?')
print('%s [data file]' % sys.argv[0])
if options.wd is not None:
os.chdir(options.wd)
datafile = options.args[0]
logfilename = run_tests(datafile)
if len(collected_logs) > 0:
if len(collected_logs) > 10:
print('\nThe output from 10 first failed tests:\n')
else:
print('\nThe output from the failed tests:\n')
for log in collected_logs[:10]:
lines = log.splitlines()
if len(lines) > 100:
print(lines[0])
print('--- Listing only the last 100 lines from a long log. ---')
lines = lines[-99:]
for line in lines:
print(line)
print('Full log written to %s.' % logfilename)
return error_count
if __name__ == '__main__':
sys.exit(run(sys.argv[1:]))
| 36.605536 | 104 | 0.570753 |
acf7cd9ff5af6e2c3d078cae887e4147ec68d8bc | 6,355 | py | Python | src/cnn_multifilter_large_task1.py | boknilev/dsl-char-cnn | 2a61b4eb8f5e5604b4042fdee21f24b73537d724 | [
"MIT"
] | 15 | 2017-02-24T03:30:49.000Z | 2021-07-14T07:54:29.000Z | src/cnn_multifilter_large_task1.py | boknilev/dsl-char-cnn | 2a61b4eb8f5e5604b4042fdee21f24b73537d724 | [
"MIT"
] | null | null | null | src/cnn_multifilter_large_task1.py | boknilev/dsl-char-cnn | 2a61b4eb8f5e5604b4042fdee21f24b73537d724 | [
"MIT"
] | 2 | 2017-03-16T02:26:48.000Z | 2019-05-01T06:01:43.000Z | '''Character CNN code for DSL 2016 task 2
Partly based on:
https://github.com/fchollet/keras/blob/master/examples/imdb_cnn.py
'''
from __future__ import print_function
import numpy as np
np.random.seed(1337) # for reproducibility
import tensorflow as tf
tf.set_random_seed(1337) # probably not needed
from keras.preprocessing import sequence
from keras.models import Model, load_model
from keras.layers import Dense, Dropout, Activation, Flatten, Input
from keras.layers import Embedding, merge
from keras.layers import Convolution1D, MaxPooling1D
#from keras import backend as K
from keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard
from keras.utils import np_utils
#from keras.regularizers import l1, l2, l1l2, activity_l1, activity_l2, activity_l1l2
#from keras.layers.normalization import BatchNormalization
from sklearn.metrics import confusion_matrix, accuracy_score, classification_report
from data import load_data, load_labels, get_task1_alphabet, task1_train_file, task1_test_file, task1_labels_file
alphabet = get_task1_alphabet()
# limit tensorflow memory usage
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.4
set_session(tf.Session(config=config))
# set tensorflow random seed for reproducibility
# model file
model_file = "cnn_model_gpu_multifilter_large_morehidden_moredrop_task1.hdf5"
# set parameters:
print('Hyperparameters:')
alphabet_size = len(alphabet) + 2 # add 2, one padding and unknown chars
print('Alphabet size:', alphabet_size)
maxlen = 400
print('Max text len:', maxlen)
batch_size = 64
print('Batch size:', batch_size)
embedding_dims = 50
print('Embedding dim:', embedding_dims)
nb_filters = [50,100,150,200,200,200,200]
print('Number of filters:', nb_filters)
filter_lengths = [1,2,3,4,5,6,7]
print('Filter lengths:', filter_lengths)
hidden_dims = 500
print('Hidden dems:', hidden_dims)
nb_epoch = 30
embedding_droupout = 0.2
print('Embedding dropout:', embedding_droupout)
fc_dropout = 0.7
print('Fully-connected dropout:', fc_dropout)
print('Loading data...')
(X_train, y_train), (X_test, y_test), num_classes = load_data(task1_train_file, task1_test_file, alphabet)
print(len(X_train), 'train sequences')
print(len(X_test), 'test sequences')
print('Pad sequences (samples x time)')
X_train = sequence.pad_sequences(X_train, maxlen=maxlen)
X_test = sequence.pad_sequences(X_test, maxlen=maxlen)
print('X_train shape:', X_train.shape)
print('X_test shape:', X_test.shape)
y_train = np.array(y_train)
y_test = np.array(y_test)
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, num_classes)
Y_test = np_utils.to_categorical(y_test, num_classes)
print('Build model...')
main_input = Input(shape=(maxlen,))
# we start off with an efficient embedding layer which maps
# our vocab indices into embedding_dims dimensions
embedding_layer = Embedding(alphabet_size,
embedding_dims,
input_length=maxlen,
dropout=embedding_droupout)
embedded = embedding_layer(main_input)
# we add a Convolution1D for each filter length, which will learn nb_filters[i]
# word group filters of size filter_lengths[i]:
convs = []
for i in xrange(len(nb_filters)):
conv_layer = Convolution1D(nb_filter=nb_filters[i],
filter_length=filter_lengths[i],
border_mode='valid',
activation='relu',
subsample_length=1)
conv_out = conv_layer(embedded)
# we use max pooling:
conv_out = MaxPooling1D(pool_length=conv_layer.output_shape[1])(conv_out)
# We flatten the output of the conv layer,
# so that we can concat all conv outpus and add a vanilla dense layer:
conv_out = Flatten()(conv_out)
convs.append(conv_out)
# concat all conv outputs
x = merge(convs, mode='concat') if len(convs) > 1 else convs[0]
#concat = BatchNormalization()(concat)
# We add a vanilla hidden layer:
x = Dense(hidden_dims)(x)
x = Dropout(fc_dropout)(x)
x = Activation('relu')(x)
# We project onto number of classes output layer, and squash it with a softmax:
main_output = Dense(num_classes, activation='softmax')(x)
# finally, define the model
model = Model(input=main_input, output=main_output)
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
print('Train...')
# define callbacks
stopping = EarlyStopping(monitor='val_loss', patience='10')
checkpointer = ModelCheckpoint(filepath=model_file, verbose=1, save_best_only=True)
tensorboard = TensorBoard(log_dir="./logs-multifilter-large-morehidden-moredrop-task1", write_graph=False)
model.fit(X_train, Y_train,
batch_size=batch_size,
nb_epoch=nb_epoch,
validation_data=(X_test, Y_test),
callbacks=[stopping, checkpointer, tensorboard])
probabilities = model.predict(X_test, batch_size=batch_size)
predictions = probabilities.argmax(axis=-1)
idx2label = load_labels(task1_labels_file)
#with open('cnn_predictions.txt', 'w') as g:
# for i in xrange(len(y_test)):
# g.write(' '.join([str(v) for v in X_test[i]]) + '\t' + idx2label.get(y_test[i], 'ERROR') + '\t' + idx2label.get(predictions[i], 'ERROR') + '\n')
print('Performance of final model (not necessarily best model):')
print('========================================================')
cm = confusion_matrix(y_test, predictions)
print('Confusion matrix:')
print(cm)
acc = accuracy_score(y_test, predictions)
print('Accuracy score:')
print(acc)
labels = [label for (idx, label) in sorted(idx2label.items())]
score_report = classification_report(y_test, predictions, target_names=labels)
print('Score report:')
print(score_report)
best_model = load_model(model_file)
probabilities = best_model.predict(X_test, batch_size=batch_size)
predictions = probabilities.argmax(axis=-1)
print('Performance of best model:')
print('==========================')
cm = confusion_matrix(y_test, predictions)
print('Confusion matrix:')
print(cm)
acc = accuracy_score(y_test, predictions)
print('Accuracy score:')
print(acc)
labels = [label for (idx, label) in sorted(idx2label.items())]
score_report = classification_report(y_test, predictions, target_names=labels)
print('Score report:')
print(score_report)
| 38.053892 | 153 | 0.738316 |
acf7cdb860bb6d950ed54a0cfede389ea1e56b9c | 261 | py | Python | roglick/components/display.py | Kromey/roglick | b76202af71df0c30be0bd5f06a3428c990476e0e | [
"MIT"
] | 6 | 2015-05-05T21:28:35.000Z | 2019-04-14T13:42:38.000Z | roglick/components/display.py | Kromey/roglick | b76202af71df0c30be0bd5f06a3428c990476e0e | [
"MIT"
] | null | null | null | roglick/components/display.py | Kromey/roglick | b76202af71df0c30be0bd5f06a3428c990476e0e | [
"MIT"
] | null | null | null | from roglick.engine import colors
from roglick.engine.ecs import ComponentBase
class PositionComponent(ComponentBase):
_properties = (('x', 0), ('y', 0))
class SpriteComponent(ComponentBase):
_properties = (('glyph', ' '), ('color', colors.white))
| 21.75 | 59 | 0.701149 |
acf7cdc0924688c08be6f54f23638b02c0bb7279 | 4,159 | py | Python | vim/vim.symlink/plugin/py/lib/thrift/protocol/TProtocol.py | xsyn/dotfiles | 91684c1f50e4ba91b24e73adc39faf6b73c11f48 | [
"MIT"
] | null | null | null | vim/vim.symlink/plugin/py/lib/thrift/protocol/TProtocol.py | xsyn/dotfiles | 91684c1f50e4ba91b24e73adc39faf6b73c11f48 | [
"MIT"
] | null | null | null | vim/vim.symlink/plugin/py/lib/thrift/protocol/TProtocol.py | xsyn/dotfiles | 91684c1f50e4ba91b24e73adc39faf6b73c11f48 | [
"MIT"
] | null | null | null | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from thrift.Thrift import *
class TProtocolException(TException):
"""Custom Protocol Exception class"""
UNKNOWN = 0
INVALID_DATA = 1
NEGATIVE_SIZE = 2
SIZE_LIMIT = 3
BAD_VERSION = 4
def __init__(self, type=UNKNOWN, message=None):
TException.__init__(self, message)
self.type = type
class TProtocolBase:
"""Base class for Thrift protocol driver."""
def __init__(self, trans):
self.trans = trans
def writeMessageBegin(self, name, type, seqid):
pass
def writeMessageEnd(self):
pass
def writeStructBegin(self, name):
pass
def writeStructEnd(self):
pass
def writeFieldBegin(self, name, type, id):
pass
def writeFieldEnd(self):
pass
def writeFieldStop(self):
pass
def writeMapBegin(self, ktype, vtype, size):
pass
def writeMapEnd(self):
pass
def writeListBegin(self, etype, size):
pass
def writeListEnd(self):
pass
def writeSetBegin(self, etype, size):
pass
def writeSetEnd(self):
pass
def writeBool(self, bool):
pass
def writeByte(self, byte):
pass
def writeI16(self, i16):
pass
def writeI32(self, i32):
pass
def writeI64(self, i64):
pass
def writeDouble(self, dub):
pass
def writeString(self, str):
pass
def readMessageBegin(self):
pass
def readMessageEnd(self):
pass
def readStructBegin(self):
pass
def readStructEnd(self):
pass
def readFieldBegin(self):
pass
def readFieldEnd(self):
pass
def readMapBegin(self):
pass
def readMapEnd(self):
pass
def readListBegin(self):
pass
def readListEnd(self):
pass
def readSetBegin(self):
pass
def readSetEnd(self):
pass
def readBool(self):
pass
def readByte(self):
pass
def readI16(self):
pass
def readI32(self):
pass
def readI64(self):
pass
def readDouble(self):
pass
def readString(self):
pass
def skip(self, type):
if type == TType.STOP:
return
elif type == TType.BOOL:
self.readBool()
elif type == TType.BYTE:
self.readByte()
elif type == TType.I16:
self.readI16()
elif type == TType.I32:
self.readI32()
elif type == TType.I64:
self.readI64()
elif type == TType.DOUBLE:
self.readDouble()
elif type == TType.STRING:
self.readString()
elif type == TType.STRUCT:
name = self.readStructBegin()
while True:
(name, type, id) = self.readFieldBegin()
if type == TType.STOP:
break
self.skip(type)
self.readFieldEnd()
self.readStructEnd()
elif type == TType.MAP:
(ktype, vtype, size) = self.readMapBegin()
for i in range(size):
self.skip(ktype)
self.skip(vtype)
self.readMapEnd()
elif type == TType.SET:
(etype, size) = self.readSetBegin()
for i in range(size):
self.skip(etype)
self.readSetEnd()
elif type == TType.LIST:
(etype, size) = self.readListBegin()
for i in range(size):
self.skip(etype)
self.readListEnd()
class TProtocolFactory:
def getProtocol(self, trans):
pass
| 20.18932 | 62 | 0.619139 |
acf7cdecd2059e91658cc87b516725170b713868 | 484 | py | Python | converter/rst/ref.py | codio/book-converter | ea3fd75f2ca8704ffad6bfcc83cf7af557bcd0b8 | [
"MIT"
] | 1 | 2019-09-14T12:22:25.000Z | 2019-09-14T12:22:25.000Z | converter/rst/ref.py | codio/book-converter | ea3fd75f2ca8704ffad6bfcc83cf7af557bcd0b8 | [
"MIT"
] | 4 | 2020-08-21T08:24:40.000Z | 2022-03-11T23:57:03.000Z | converter/rst/ref.py | codio/book-converter | ea3fd75f2ca8704ffad6bfcc83cf7af557bcd0b8 | [
"MIT"
] | null | null | null | import re
class Ref(object):
def __init__(self, source_string):
self.str = source_string
self._ref_re = re.compile(r""":(ref|chap):`(?P<name>.*?)(?P<label_name><.*?>)?`""", flags=re.DOTALL)
@staticmethod
def _ref(matchobj):
name = matchobj.group('name')
name = name.strip()
return f'Chapter: **{name}**'
def convert(self):
output = self.str
output = self._ref_re.sub(self._ref, output)
return output
| 25.473684 | 108 | 0.576446 |
acf7ce383c687038b005f8a03557b4aa882b849b | 5,355 | py | Python | discord/invite.py | Werseter/discord.py | 00a659c6526b2445162b52eaf970adbd22c6d35d | [
"MIT"
] | null | null | null | discord/invite.py | Werseter/discord.py | 00a659c6526b2445162b52eaf970adbd22c6d35d | [
"MIT"
] | 1 | 2019-01-24T09:33:42.000Z | 2019-01-24T09:33:42.000Z | discord/invite.py | Werseter/discord.py | 00a659c6526b2445162b52eaf970adbd22c6d35d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2015-2017 Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from .utils import parse_time
from .mixins import Hashable
from .object import Object
class Invite(Hashable):
"""Represents a Discord :class:`Guild` or :class:`abc.GuildChannel` invite.
Depending on the way this object was created, some of the attributes can
have a value of ``None``.
.. container:: operations
.. describe:: x == y
Checks if two invites are equal.
.. describe:: x != y
Checks if two invites are not equal.
.. describe:: hash(x)
Returns the invite hash.
.. describe:: str(x)
Returns the invite URL.
Attributes
-----------
max_age: :class:`int`
How long the before the invite expires in seconds. A value of 0 indicates that it doesn't expire.
code: :class:`str`
The URL fragment used for the invite.
guild: :class:`Guild`
The guild the invite is for.
revoked: :class:`bool`
Indicates if the invite has been revoked.
created_at: `datetime.datetime`
A datetime object denoting the time the invite was created.
temporary: :class:`bool`
Indicates that the invite grants temporary membership.
If True, members who joined via this invite will be kicked upon disconnect.
uses: :class:`int`
How many times the invite has been used.
max_uses: :class:`int`
How many times the invite can be used.
inviter: :class:`User`
The user who created the invite.
channel: :class:`abc.GuildChannel`
The channel the invite is for.
"""
__slots__ = ('max_age', 'code', 'guild', 'revoked', 'created_at', 'uses',
'temporary', 'max_uses', 'inviter', 'channel', '_state')
def __init__(self, *, state, data):
self._state = state
self.max_age = data.get('max_age')
self.code = data.get('code')
self.guild = data.get('guild')
self.revoked = data.get('revoked')
self.created_at = parse_time(data.get('created_at'))
self.temporary = data.get('temporary')
self.uses = data.get('uses')
self.max_uses = data.get('max_uses')
inviter_data = data.get('inviter')
self.inviter = None if inviter_data is None else self._state.store_user(inviter_data)
self.channel = data.get('channel')
@classmethod
def from_incomplete(cls, *, state, data):
guild_id = int(data['guild']['id'])
channel_id = int(data['channel']['id'])
guild = state._get_guild(guild_id)
if guild is not None:
channel = guild.get_channel(channel_id)
else:
guild = Object(id=guild_id)
channel = Object(id=channel_id)
guild.name = data['guild']['name']
guild.splash = data['guild']['splash']
guild.splash_url = ''
if guild.splash:
guild.splash_url = 'https://cdn.discordapp.com/splashes/{0.id}/{0.splash}.jpg?size=2048'.format(guild)
channel.name = data['channel']['name']
data['guild'] = guild
data['channel'] = channel
return cls(state=state, data=data)
def __str__(self):
return self.url
def __repr__(self):
return '<Invite code={0.code!r}>'.format(self)
def __hash__(self):
return hash(self.code)
@property
def id(self):
"""Returns the proper code portion of the invite."""
return self.code
@property
def url(self):
"""A property that retrieves the invite URL."""
return 'http://discord.gg/' + self.code
async def delete(self, *, reason=None):
"""|coro|
Revokes the instant invite.
You must have the :attr:`~Permissions.manage_channels` permission to do this.
Parameters
-----------
reason: Optional[str]
The reason for deleting this invite. Shows up on the audit log.
Raises
-------
Forbidden
You do not have permissions to revoke invites.
NotFound
The invite is invalid or expired.
HTTPException
Revoking the invite failed.
"""
await self._state.http.delete_invite(self.code, reason=reason)
| 32.652439 | 118 | 0.634547 |
acf7cea5b6c3af158acfab9b9fb86ea97fb221c8 | 2,413 | py | Python | pyfunceble_docker/config/client.py | PyFunceble/docker | 5daceda6b96a397c5403e7b85be100c045cb5d7a | [
"MIT"
] | 1 | 2020-05-16T14:20:41.000Z | 2020-05-16T14:20:41.000Z | pyfunceble_docker/config/client.py | PyFunceble/docker | 5daceda6b96a397c5403e7b85be100c045cb5d7a | [
"MIT"
] | 1 | 2021-09-13T22:01:40.000Z | 2021-10-09T17:29:05.000Z | pyfunceble_docker/config/client.py | PyFunceble/docker | 5daceda6b96a397c5403e7b85be100c045cb5d7a | [
"MIT"
] | null | null | null | """
The tool to check the availability or syntax of domains, IPv4, IPv6 or URL.
::
██████╗ ██╗ ██╗███████╗██╗ ██╗███╗ ██╗ ██████╗███████╗██████╗ ██╗ ███████╗
██╔══██╗╚██╗ ██╔╝██╔════╝██║ ██║████╗ ██║██╔════╝██╔════╝██╔══██╗██║ ██╔════╝
██████╔╝ ╚████╔╝ █████╗ ██║ ██║██╔██╗ ██║██║ █████╗ ██████╔╝██║ █████╗
██╔═══╝ ╚██╔╝ ██╔══╝ ██║ ██║██║╚██╗██║██║ ██╔══╝ ██╔══██╗██║ ██╔══╝
██║ ██║ ██║ ╚██████╔╝██║ ╚████║╚██████╗███████╗██████╔╝███████╗███████╗
╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═══╝ ╚═════╝╚══════╝╚═════╝ ╚══════╝╚══════╝
This file is part of the PyFunceble project.
Provides the docker clients.
Author:
Nissar Chababy, @funilrys, contactTATAfunilrysTODTODcom
Special thanks:
https://pyfunceble.github.io/special-thanks.html
Contributors:
https://pyfunceble.github.io/contributors.html
Project link:
https://github.com/funilrys/PyFunceble
Project documentation:
https://pyfunceble.readthedocs.io/en/dev/
Project homepage:
https://pyfunceble.github.io/
License:
::
MIT License
Copyright (c) 2019, 2020 PyFunceble
Copyright (c) 2017, 2018, 2019, 2020 Nissar Chababy
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import docker
REGISTRY_URL = "docker.io"
docker_api_client = docker.APIClient(base_url="unix://var/run/docker.sock")
| 35.485294 | 88 | 0.585993 |
acf7cf6b85da9bf1bacf226186776e023d7e19a3 | 11,725 | py | Python | 60_Mastermind/python/mastermind.py | serranojl/basic-computer-games | b197b00deefe8b69d5b5f13d7f2003ccc9649a70 | [
"Unlicense"
] | null | null | null | 60_Mastermind/python/mastermind.py | serranojl/basic-computer-games | b197b00deefe8b69d5b5f13d7f2003ccc9649a70 | [
"Unlicense"
] | null | null | null | 60_Mastermind/python/mastermind.py | serranojl/basic-computer-games | b197b00deefe8b69d5b5f13d7f2003ccc9649a70 | [
"Unlicense"
] | null | null | null | import random
import sys
from typing import List, Union
# Global variables
colors = ["BLACK", "WHITE", "RED", "GREEN", "ORANGE", "YELLOW", "PURPLE", "TAN"]
color_letters = "BWRGOYPT"
num_positions = 0
num_colors = 100
human_score = 0
computer_score = 0
def main() -> None:
global colors, color_letters, num_positions, num_colors, human_score, computer_score
colors = ["BLACK", "WHITE", "RED", "GREEN", "ORANGE", "YELLOW", "PURPLE", "TAN"]
color_letters = "BWRGOYPT"
num_colors = 100
human_score = 0
computer_score = 0
# get user inputs for game conditions
print("Mastermind")
print("Creative Computing Morristown, New Jersey")
while num_colors > 8:
num_colors = int(input("Number of colors (max 8): ")) # C9 in BASIC
num_positions = int(input("Number of positions: ")) # P9 in BASIC
num_rounds = int(input("Number of rounds: ")) # R9 in BASIC
possibilities = num_colors**num_positions
all_possibilities = [1] * possibilities
print(f"Number of possibilities {possibilities}")
print("Color\tLetter")
print("=====\t======")
for element in range(0, num_colors):
print(f"{colors[element]}\t{colors[element][0]}")
current_round = 1
while current_round <= num_rounds:
print(f"Round number {current_round}")
num_moves = 1
guesses: List[List[Union[str, int]]] = []
turn_over = False
print("Guess my combination ...")
answer = int(possibilities * random.random())
numeric_answer = [-1] * num_positions
for _ in range(0, answer):
numeric_answer = get_possibility(numeric_answer)
# human_readable_answer = make_human_readable(numeric_answer, color_letters)
while num_moves < 10 and not turn_over:
print(f"Move # {num_moves} Guess : ")
user_command = input("Guess ")
if user_command == "BOARD":
print_board(guesses) # 2000
elif user_command == "QUIT": # 2500
human_readable_answer = make_human_readable(
numeric_answer, color_letters
)
print(f"QUITTER! MY COMBINATION WAS: {human_readable_answer}")
print("GOOD BYE")
quit()
elif len(user_command) != num_positions: # 410
print("BAD NUMBER OF POSITIONS")
else:
invalid_letters = get_invalid_letters(user_command)
if invalid_letters > "":
print(f"INVALID GUESS: {invalid_letters}")
else:
guess_results = compare_two_positions(
user_command, make_human_readable(numeric_answer, color_letters)
)
print(f"Results: {guess_results}")
if guess_results[1] == num_positions: # correct guess
turn_over = True
print(f"You guessed it in {num_moves} moves!")
human_score = human_score + num_moves
print_score(computer_score, human_score)
else:
print(
"You have {} blacks and {} whites".format(
guess_results[1], guess_results[2]
)
)
num_moves = num_moves + 1
guesses.append(guess_results)
if not turn_over: # RAN OUT OF MOVES
print("YOU RAN OUT OF MOVES! THAT'S ALL YOU GET!")
print(
"THE ACTUAL COMBINATION WAS: {}".format(
make_human_readable(numeric_answer, color_letters)
)
)
human_score = human_score + num_moves
print_score(computer_score, human_score)
# COMPUTER TURN
guesses = []
turn_over = False
inconsistent_information = False
while not turn_over and not inconsistent_information:
all_possibilities = [1] * possibilities
num_moves = 1
inconsistent_information = False
print("NOW I GUESS. THINK OF A COMBINATION.")
input("HIT RETURN WHEN READY: ")
while num_moves < 10 and not turn_over and not inconsistent_information:
found_guess = False
computer_guess = int(possibilities * random.random())
if (
all_possibilities[computer_guess] == 1
): # random guess is possible, use it
found_guess = True
guess = computer_guess
else:
for i in range(computer_guess, possibilities):
if all_possibilities[i] == 1:
found_guess = True
guess = i
break
if not found_guess:
for i in range(0, computer_guess):
if all_possibilities[i] == 1:
found_guess = True
guess = i
break
if not found_guess: # inconsistent info from user
print("YOU HAVE GIVEN ME INCONSISTENT INFORMATION.")
print("TRY AGAIN, AND THIS TIME PLEASE BE MORE CAREFUL.")
turn_over = True
inconsistent_information = True
else:
numeric_guess = [-1] * num_positions
for _ in range(0, guess):
numeric_guess = get_possibility(numeric_guess)
human_readable_guess = make_human_readable(
numeric_guess, color_letters
)
print(f"My guess is: {human_readable_guess}")
blacks_str, whites_str = input(
"ENTER BLACKS, WHITES (e.g. 1,2): "
).split(",")
blacks = int(blacks_str)
whites = int(whites_str)
if blacks == num_positions: # Correct guess
print(f"I GOT IT IN {num_moves} MOVES")
turn_over = True
computer_score = computer_score + num_moves
print_score(computer_score, human_score)
else:
num_moves += 1
for i in range(0, possibilities):
if all_possibilities[i] == 0: # already ruled out
continue
numeric_possibility = [-1] * num_positions
for _ in range(0, i):
numeric_possibility = get_possibility(
numeric_possibility
)
human_readable_possibility = make_human_readable(
numeric_possibility, color_letters
) # 4000
comparison = compare_two_positions(
human_readable_possibility, human_readable_guess
)
print(comparison)
if (blacks > comparison[1]) or (whites > comparison[2]): # type: ignore
all_possibilities[i] = 0
if not turn_over: # COMPUTER DID NOT GUESS
print("I USED UP ALL MY MOVES!")
print("I GUESS MY CPU IS JUST HAVING AN OFF DAY.")
computer_score = computer_score + num_moves
print_score(computer_score, human_score)
current_round += 1
print_score(computer_score, human_score, is_final_score=True)
sys.exit()
# 470
def get_invalid_letters(user_command) -> str:
"""Makes sure player input consists of valid colors for selected game configuration."""
valid_colors = color_letters[:num_colors]
invalid_letters = ""
for letter in user_command:
if letter not in valid_colors:
invalid_letters = invalid_letters + letter
return invalid_letters
# 2000
def print_board(guesses) -> None:
"""Print previous guesses within the round."""
print("Board")
print("Move\tGuess\tBlack White")
for idx, guess in enumerate(guesses):
print(f"{idx + 1}\t{guess[0]}\t{guess[1]} {guess[2]}")
# 3500
# Easily the place for most optimization, since they generate every possibility
# every time when checking for potential solutions
# From the original article:
# "We did try a version that kept an actual list of all possible combinations
# (as a string array), which was significantly faster than this versionn but
# which ate tremendous amounts of memory."
def get_possibility(possibility) -> List[int]:
# print(possibility)
if possibility[0] > -1: # 3530
current_position = 0 # Python arrays are zero-indexed
while True:
if possibility[current_position] < num_colors - 1: # zero-index again
possibility[current_position] += 1
return possibility
else:
possibility[current_position] = 0
current_position += 1
else: # 3524
possibility = [0] * num_positions
return possibility
# 4500
def compare_two_positions(guess: str, answer: str) -> List[Union[str, int]]:
"""Returns blacks (correct color and position) and whites (correct color only) for candidate position (guess) versus reference position (answer)."""
increment = 0
blacks = 0
whites = 0
initial_guess = guess
for pos in range(0, num_positions):
if guess[pos] != answer[pos]:
for pos2 in range(0, num_positions):
if not (
guess[pos] != answer[pos2] or guess[pos2] == answer[pos2]
): # correct color but not correct place
whites = whites + 1
answer = answer[:pos2] + chr(increment) + answer[pos2 + 1 :]
guess = guess[:pos] + chr(increment + 1) + guess[pos + 1 :]
increment = increment + 2
else: # correct color and placement
blacks = blacks + 1
# THIS IS DEVIOUSLY CLEVER
guess = guess[:pos] + chr(increment + 1) + guess[pos + 1 :]
answer = answer[:pos] + chr(increment) + answer[pos + 1 :]
increment = increment + 2
return [initial_guess, blacks, whites]
# 5000 + logic from 1160
def print_score(computer_score, human_score, is_final_score: bool = False) -> None:
"""Print score after each turn ends, including final score at end of game."""
if is_final_score:
print("GAME OVER")
print("FINAL SCORE:")
else:
print("SCORE:")
print(f" COMPUTER {computer_score}")
print(f" HUMAN {human_score}")
# 4000, 5500, 6000 subroutines are all identical
def make_human_readable(num: List[int], color_letters) -> str:
"""Make the numeric representation of a position human readable."""
retval = ""
for i in range(0, len(num)):
retval = retval + color_letters[int(num[i])]
return retval
if __name__ == "__main__":
main()
| 43.106618 | 153 | 0.529126 |
acf7cf835079de552b223f8caa5935aa40646dc8 | 262 | py | Python | tests/test_cli.py | samuelcolvin/sasstastic | fac226d6dafab978de509986787e5b3472f3587a | [
"MIT"
] | 27 | 2020-05-20T16:42:43.000Z | 2021-04-19T04:29:06.000Z | tests/test_cli.py | samuelcolvin/sasstastic | fac226d6dafab978de509986787e5b3472f3587a | [
"MIT"
] | 8 | 2020-05-21T20:45:13.000Z | 2020-11-12T14:30:53.000Z | tests/test_cli.py | samuelcolvin/sasstastic | fac226d6dafab978de509986787e5b3472f3587a | [
"MIT"
] | 2 | 2020-05-21T13:41:19.000Z | 2021-09-23T10:45:17.000Z | from typer.testing import CliRunner
from sasstastic.cli import cli
runner = CliRunner()
def test_print_commands():
result = runner.invoke(cli, ['--help'])
assert result.exit_code == 0
assert 'Fantastic SASS and SCSS compilation' in result.output
| 21.833333 | 65 | 0.732824 |
acf7d0a3583b5a24a7319164fb9ace9967e88292 | 57 | py | Python | python/cendalytics/education/bp/__init__.py | jiportilla/ontology | 8a66bb7f76f805c64fc76cfc40ab7dfbc1146f40 | [
"MIT"
] | null | null | null | python/cendalytics/education/bp/__init__.py | jiportilla/ontology | 8a66bb7f76f805c64fc76cfc40ab7dfbc1146f40 | [
"MIT"
] | null | null | null | python/cendalytics/education/bp/__init__.py | jiportilla/ontology | 8a66bb7f76f805c64fc76cfc40ab7dfbc1146f40 | [
"MIT"
] | null | null | null | from .education_analysis_api import EducationAnalysisAPI
| 28.5 | 56 | 0.912281 |
acf7d147ca6e2e4f4414ee5618a9b4fbb2887f90 | 2,960 | py | Python | support/cover2html.py | griggheo/cheesecake | 2b7aa5c980e8becb163cbcb801b202b17f715054 | [
"CNRI-Python-GPL-Compatible"
] | 12 | 2015-01-15T01:13:42.000Z | 2022-03-04T21:14:27.000Z | support/cover2html.py | griggheo/cheesecake | 2b7aa5c980e8becb163cbcb801b202b17f715054 | [
"CNRI-Python-GPL-Compatible"
] | 6 | 2015-01-19T19:46:51.000Z | 2019-02-05T20:20:26.000Z | support/cover2html.py | griggheo/cheesecake | 2b7aa5c980e8becb163cbcb801b202b17f715054 | [
"CNRI-Python-GPL-Compatible"
] | 3 | 2015-10-24T20:08:09.000Z | 2019-02-04T20:53:21.000Z | #!/usr/bin/env python
#
# Convert coverage output to HTML table.
#
# Copyright (c) 2006 Michal Kwiatkowski <michal@trivas.pl>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of the author nor the names of his contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
import sys
def sort_by_cover(lines):
def get_percent(obj):
return int(obj[3][:-1])
def compare(x, y):
return cmp(get_percent(x), get_percent(y))
lines.sort(compare)
def make_row(line, header=False, emphasis=False):
result = []
tag = 'td'
if header:
tag = 'th'
result.append('<tr>\n')
for field in line:
if emphasis:
result.append('<%(tag)s><strong>%(field)s</strong></%(tag)s>\n' % \
{'tag': tag, 'field': field})
else:
result.append('<%(tag)s>%(field)s</%(tag)s>\n' % \
{'tag': tag, 'field': field})
result.append('</tr>\n')
return ''.join(result)
def cover2html(text):
text_lines = map(lambda x: re.split(r'\s+', x, 4), text.splitlines())
text_lines = filter(lambda x: x[0].strip('-'), text_lines)
title_line = text_lines.pop(0)
summary_line = text_lines.pop()
sort_by_cover(text_lines)
result = []
result.append('<table border="1">\n')
result.append(make_row(title_line, header=True))
for line in text_lines:
result.append(make_row(line))
result.append(make_row(summary_line, emphasis=True))
result.append('</table>\n')
return ''.join(result)
if __name__ == '__main__':
print(cover2html(sys.stdin.read()))
| 31.157895 | 79 | 0.684122 |
acf7d1c0b6ae0ff9c415989cc0213d7f67ce0763 | 1,960 | py | Python | hummingbot/connector/exchange/wazirx/wazirx_user_stream_tracker.py | BGTCapital/hummingbot | 2c50f50d67cedccf0ef4d8e3f4c8cdce3dc87242 | [
"Apache-2.0"
] | 3,027 | 2019-04-04T18:52:17.000Z | 2022-03-30T09:38:34.000Z | hummingbot/connector/exchange/wazirx/wazirx_user_stream_tracker.py | BGTCapital/hummingbot | 2c50f50d67cedccf0ef4d8e3f4c8cdce3dc87242 | [
"Apache-2.0"
] | 4,080 | 2019-04-04T19:51:11.000Z | 2022-03-31T23:45:21.000Z | hummingbot/connector/exchange/wazirx/wazirx_user_stream_tracker.py | BGTCapital/hummingbot | 2c50f50d67cedccf0ef4d8e3f4c8cdce3dc87242 | [
"Apache-2.0"
] | 1,342 | 2019-04-04T20:50:53.000Z | 2022-03-31T15:22:36.000Z | #!/usr/bin/env python
import asyncio
import logging
from typing import Optional, List
from hummingbot.core.data_type.user_stream_tracker_data_source import UserStreamTrackerDataSource
from hummingbot.logger import HummingbotLogger
from hummingbot.core.data_type.user_stream_tracker import UserStreamTracker
from hummingbot.connector.exchange.wazirx.wazirx_api_user_stream_data_source import WazirxAPIUserStreamDataSource
from hummingbot.connector.exchange.wazirx.wazirx_auth import WazirxAuth
from hummingbot.connector.exchange.wazirx.wazirx_constants import EXCHANGE_NAME
class WazirxUserStreamTracker(UserStreamTracker):
_cbpust_logger: Optional[HummingbotLogger] = None
@classmethod
def logger(cls) -> HummingbotLogger:
if cls._bust_logger is None:
cls._bust_logger = logging.getLogger(__name__)
return cls._bust_logger
def __init__(
self,
wazirx_auth: Optional[WazirxAuth] = None,
trading_pairs: Optional[List[str]] = [],
):
super().__init__()
self._wazirx_auth: WazirxAuth = wazirx_auth
self._trading_pairs: List[str] = trading_pairs
self._ev_loop: asyncio.events.AbstractEventLoop = asyncio.get_event_loop()
self._data_source: Optional[UserStreamTrackerDataSource] = None
self._user_stream_tracking_task: Optional[asyncio.Task] = None
@property
def data_source(self) -> UserStreamTrackerDataSource:
if not self._data_source:
self._data_source = WazirxAPIUserStreamDataSource(
wazirx_auth=self._wazirx_auth
)
return self._data_source
@property
def exchange_name(self) -> str:
return EXCHANGE_NAME
async def start(self):
self._user_stream_tracking_task = asyncio.ensure_future(
self.data_source.listen_for_user_stream(self._ev_loop, self._user_stream)
)
await asyncio.gather(self._user_stream_tracking_task)
| 37.692308 | 113 | 0.742347 |
acf7d3299d297f725c1260446dd47435cd862982 | 16,486 | py | Python | mc_libs/restClient.py | Locottus/Python | 8a6a864c54371fff2b9f34c3c2a69a387c6266f1 | [
"MIT"
] | null | null | null | mc_libs/restClient.py | Locottus/Python | 8a6a864c54371fff2b9f34c3c2a69a387c6266f1 | [
"MIT"
] | null | null | null | mc_libs/restClient.py | Locottus/Python | 8a6a864c54371fff2b9f34c3c2a69a387c6266f1 | [
"MIT"
] | null | null | null | import os,requests,json
#from flask_cors import CORS
#import flask
#https://realpython.com/python-requests/
#https://docs.python.org/3/library/queue.html
#local environment vars.
'''
base_url = os.environ['HealthJumpURL']
email = os.environ['HealthJumpEmail']
password = os.environ['HealthJumpPassword']
SecretKey = os.environ['HealthJumpSecretKey']
Version = os.environ['HealthJumpVersion']
ClientID = os.environ['HealthJumpClientID']
routePath = os.environ['HealthJumpServerPath']
apiPort = os.environ['HealthJumpServerApiPort']
'''
#DEV vars for reference when setting env.
base_url = 'https://api.healthjump.com/hjdw/'
email ='sandbox@healthjump.com'
password = 'R-%Sx?qP%+RN69CS'
SecretKey = 'yemj6bz8sskxi7wl4r2zk0ao77b2wdpvrceyoe6g'
Version = '3.0'
ClientID = 'SBOX02'
routePath = r"/api/"
apiPort = 1600
def getClients(token,parm=None):
print('getClients')
headers={"Secretkey" : SecretKey,
"Version":Version,
"Authorization":"Bearer " + token
}
url = base_url + ClientID + '/client_ids'
if parm is not None:
url += '?' + parm
response = requests.get(url,headers = headers)
print(response)
print(response.headers)
#print(response.json()) #esta es la respuesta en json
if response.status_code == 200:
print('Success!')
print(json.loads(response.content))
else :
print('error')
print(json.loads(response.content))
return response.json()# json.loads(response.content)
def getAllergies(token,parm=None):
print('getClients')
headers={"Secretkey" : SecretKey,
"Version":Version,
"Authorization":"Bearer " + token
}
url = base_url + ClientID + '/allergy'
if parm is not None:
url += '?' + parm
response = requests.get(url,headers = headers)
print(response)
print(response.headers)
#print(response.json()) #esta es la respuesta en json
if response.status_code == 200:
print('Success!')
print(json.loads(response.content))
else :
print('error')
print(json.loads(response.content))
return response.json()# json.loads(response.content)
def getAppointments(token,parm=None):
print('getAppointments')
headers={"Secretkey" : SecretKey,
"Version":Version,
"Authorization":"Bearer " + token
}
url = base_url + ClientID + '/appointment'
if parm is not None:
url += '?' + parm
response = requests.get(url,headers = headers)
print(response)
print(response.headers)
#print(response.json()) #esta es la respuesta en json
if response.status_code == 200:
print('Success!')
#print(json.loads(response.content))
print(response.json)
#return response.json
else :
print('error')
print(json.loads(response.content))
return response.json()# json.loads(response.content)
def getAttribution(token,parm=None):
print('getAttribution')
headers={"Secretkey" : SecretKey,
"Version":Version,
"Authorization":"Bearer " + token
}
url = base_url + ClientID + '/attribution'
if parm is not None:
url += '?' + parm
response = requests.get(url,headers = headers)
print(response)
print(response.headers)
#print(response.json()) #esta es la respuesta en json
if response.status_code == 200:
print('Success!')
print(json.loads(response.content))
else :
print('error')
print(json.loads(response.content))
return response.json()# json.loads(response.content)
def getCharge(token,parm=None):
print('getCharge')
headers={"Secretkey" : SecretKey,
"Version":Version,
"Authorization":"Bearer " + token
}
url = base_url + ClientID + '/charge'
if parm is not None:
url += '?' + parm
response = requests.get(url,headers = headers)
print(response)
print(response.headers)
#print(response.json()) #esta es la respuesta en json
if response.status_code == 200:
print('Success!')
print(json.loads(response.content))
else :
print('error')
print(json.loads(response.content))
return response.json()# json.loads(response.content)
'''
def getChargeWParms(token,parm=None):
#?hj_modify_timestamp=btwn~2016-10-08 01:54:43.956800~2016-10-13 20:54:43.956820
print('getChargeWParms')
headers={"Secretkey" : SecretKey,
"Version":Version,
"Authorization":"Bearer " + token
}
response = requests.get('https://api.healthjump.com/hjdw/' + ClientID + '/charge?' + parm,headers = headers)
print(response)
print(response.headers)
#print(response.json()) #esta es la respuesta en json
if response.status_code == 200:
print('Success!')
print(json.loads(response.content))
else :
print('error')
print(json.loads(response.content))
'''
def getDemographics(token,parm=None):
print('getDemographics')
headers={"Secretkey" : SecretKey,
"Version":Version,
"Authorization":"Bearer " + token
}
url = base_url + ClientID + '/demographic'
if parm is not None:
url += '?' + parm
response = requests.get(url,headers = headers)
print(response)
print(response.headers)
#print(response.json()) #esta es la respuesta en json
if response.status_code == 200:
print('Success!')
print(json.loads(response.content))
else :
print('error')
print(json.loads(response.content))
return response.json()# json.loads(response.content)
def getDiagnosis(token,parm=None):
print('getDiagnosis')
headers={"Secretkey" : SecretKey,
"Version":Version,
"Authorization":"Bearer " + token
}
url = base_url + ClientID + '/diagnosis'
if parm is not None:
url += '?' + parm
response = requests.get(url,headers = headers)
print(response)
print(response.headers)
#print(response.json()) #esta es la respuesta en json
if response.status_code == 200:
print('Success!')
print(json.loads(response.content))
else :
print('error')
print(json.loads(response.content))
return response.json()# json.loads(response.content)
def getEncounters(token,parm=None):
print('getEncounters')
headers={"Secretkey" : SecretKey,
"Version":Version,
"Authorization":"Bearer " + token
}
url = base_url + ClientID + '/encounter'
if parm is not None:
url += '?' + parm
response = requests.get(url,headers = headers)
print(response)
print(response.headers)
#print(response.json()) #esta es la respuesta en json
if response.status_code == 200:
print('Success!')
print(json.loads(response.content))
else :
print('error')
print(json.loads(response.content))
return response.json()# json.loads(response.content)
def getInmunization(token,parm=None):
print('getInmunization')
headers={"Secretkey" : SecretKey,
"Version":Version,
"Authorization":"Bearer " + token
}
url = base_url + ClientID + '/immunization'
if parm is not None:
url += '?' + parm
response = requests.get(url,headers = headers)
print(response)
print(response.headers)
#print(response.json()) #esta es la respuesta en json
if response.status_code == 200:
print('Success!')
print(json.loads(response.content))
else :
print('error')
print(json.loads(response.content))
return response.json()# json.loads(response.content)
def getLabOrder(token,parm=None):
print('getLabOrder')
headers={"Secretkey" : SecretKey,
"Version":Version,
"Authorization":"Bearer " + token
}
url = base_url + ClientID + '/lab_order'
if parm is not None:
url += '?' + parm
response = requests.get(url,headers = headers)
print(response)
print(response.headers)
#print(response.json()) #esta es la respuesta en json
if response.status_code == 200:
print('Success!')
print(json.loads(response.content))
else :
print('error')
print(json.loads(response.content))
return response.json()# json.loads(response.content)
def getLabResult(token,parm=None):
print('getLabResult')
headers={"Secretkey" : SecretKey,
"Version":Version,
"Authorization":"Bearer " + token
}
url = base_url + ClientID + '/lab_result'
if parm is not None:
url += '?' + parm
response = requests.get(url,headers = headers)
print(response)
print(response.headers)
#print(response.json()) #esta es la respuesta en json
if response.status_code == 200:
print('Success!')
print(json.loads(response.content))
else :
print('error')
print(json.loads(response.content))
return response.json()# json.loads(response.content)
def getMedication(token,parm=None):
print('getMedication')
headers={"Secretkey" : SecretKey,
"Version":Version,
"Authorization":"Bearer " + token
}
url = base_url + ClientID + '/medication'
if parm is not None:
url += '?' + parm
response = requests.get(url,headers = headers)
print(response)
print(response.headers)
#print(response.json()) #esta es la respuesta en json
if response.status_code == 200:
print('Success!')
print(json.loads(response.content))
else :
print('error')
print(json.loads(response.content))
return response.json()# json.loads(response.content)
def getPayers(token,parm=None):
print('getPayers')
headers={"Secretkey" : SecretKey,
"Version":Version,
"Authorization":"Bearer " + token
}
url = base_url + ClientID + '/payer_dim'
if parm is not None:
url += '?' + parm
response = requests.get(url,headers = headers)
print(response)
print(response.headers)
#print(response.json()) #esta es la respuesta en json
if response.status_code == 200:
print('Success!')
print(json.loads(response.content))
else :
print('error')
print(json.loads(response.content))
return response.json()# json.loads(response.content)
def getProcedures(token,parm=None):
print('getProcedures')
headers={"Secretkey" : SecretKey,
"Version":Version,
"Authorization":"Bearer " + token
}
url = base_url + ClientID + '/procedure'
if parm is not None:
url += '?' + parm
response = requests.get(url,headers = headers)
print(response)
print(response.headers)
#print(response.json()) #esta es la respuesta en json
if response.status_code == 200:
print('Success!')
print(json.loads(response.content))
else :
print('error')
print(json.loads(response.content))
return response.json()# json.loads(response.content)
def getProviders(token,parm=None):
print('getProviders')
headers={"Secretkey" : SecretKey,
"Version":Version,
"Authorization":"Bearer " + token
}
url = base_url + ClientID + '/provider'
if parm is not None:
url += '?' + parm
response = requests.get(url,headers = headers)
print(response)
print(response.headers)
#print(response.json()) #esta es la respuesta en json
if response.status_code == 200:
print('Success!')
print(json.loads(response.content))
else :
print('error')
print(json.loads(response.content))
return response.json()# json.loads(response.content)
def getSocialHistory(token,parm=None):
print('getSocialHistory')
headers={"Secretkey" : SecretKey,
"Version":Version,
"Authorization":"Bearer " + token
}
url = base_url + ClientID + '/social_history'
if parm is not None:
url += '?' + parm
response = requests.get(url,headers = headers)
print(response)
print(response.headers)
#print(response.json()) #esta es la respuesta en json
if response.status_code == 200:
print('Success!')
print(json.loads(response.content))
else :
print('error')
print(json.loads(response.content))
return response.json()# json.loads(response.content)
def getTransactions(token,parm=None):
print('getTransactions')
headers={"Secretkey" : SecretKey,
"Version":Version,
"Authorization":"Bearer " + token
}
url = base_url + ClientID + '/transaction'
if parm is not None:
url += '?' + parm
response = requests.get(url,headers = headers)
print(response)
print(response.headers)
#print(response.json()) #esta es la respuesta en json
if response.status_code == 200:
print('Success!')
print(json.loads(response.content))
else :
print('error')
print(json.loads(response.content))
return response.json()# json.loads(response.content)
def getVitals(token,parm=None):
print('getVitals')
headers={"Secretkey" : SecretKey,
"Version":Version,
"Authorization":"Bearer " + token
}
url = base_url + ClientID + '/vitals'
if parm is not None:
url += '?' + parm
response = requests.get(url,headers = headers)
print(response)
print(response.headers)
#print(response.json()) #esta es la respuesta en json
if response.status_code == 200:
print('Success!')
print(json.loads(response.content))
else :
print('error')
print(json.loads(response.content))
return response.json()# json.loads(response.content)
def getUnload(token,parm=None):
print('getUnload')
headers={"Secretkey" : SecretKey,
"Version":Version,
"Authorization":"Bearer " + token
}
url = base_url + ClientID + '/appointment'
if parm is not None:
url += '?' + parm
response = requests.get(url,headers = headers)
print(response)
print(response.headers)
#print(response.json()) #esta es la respuesta en json
if response.status_code == 200:
print('Success!')
print(json.loads(response.content))
else :
print('error')
print(json.loads(response.content))
return response.json()# json.loads(response.content)
#first step before request to an endpoint
def postAuthenticate():
print('asking for new token')
body = ({"email": email, "password": password})
response = requests.post('https://api.healthjump.com/authenticate', data=body)
#print(response.content)
token = json.loads(response.content)
print(token['token'])
#print(response.status_code)
if response.status_code == 200:
print('Success!')
return token['token']
else : #error
return ""
#SERVICES REST
'''
from flask import Flask,request
app = Flask(__name__)
cors = CORS(app, resources={routePath + '*': {"origins": "*"}})
@app.route(routePath ,methods=['GET'])
def rootAddress():
token = postAuthenticate()
#hago los reqs
return getAllergies(token)
'''
if __name__ == '__main__':
#app.run(debug=True)
print("starting")
#app.run(ssl_context='adhoc',host='0.0.0.0', port=apiPort)
#app.run(host='0.0.0.0', port=apiPort)
#token = postAuthenticate()
#getAppointments(token,"provider_id=1234564502")
#getAppointments(token)
#getAllergies(token)
| 25.880691 | 112 | 0.591835 |
acf7d3a6a64ae018d134508c3335d30bbcecbd06 | 244 | py | Python | vnpy/trader/gateway/bitmexGateway/__init__.py | black0144/vnpy | 0d0ea30dad14a0150f7500ff9a62528030321426 | [
"MIT"
] | 11 | 2019-10-28T13:01:48.000Z | 2021-06-20T03:38:09.000Z | vnpy/trader/gateway/bitmexGateway/__init__.py | black0144/vnpy | 0d0ea30dad14a0150f7500ff9a62528030321426 | [
"MIT"
] | null | null | null | vnpy/trader/gateway/bitmexGateway/__init__.py | black0144/vnpy | 0d0ea30dad14a0150f7500ff9a62528030321426 | [
"MIT"
] | 6 | 2019-10-28T13:16:13.000Z | 2020-09-08T08:03:41.000Z | # encoding: UTF-8
from vnpy.trader import vtConstant
from .bitmexGateway import BitmexGateway
gatewayClass = BitmexGateway
gatewayName = 'BITMEX'
gatewayDisplayName = 'BITMEX'
gatewayType = vtConstant.GATEWAYTYPE_BTC
gatewayQryEnabled = False | 24.4 | 40 | 0.827869 |
acf7d64aec20cb47d0bb2e864bfe9844ebf89c23 | 15,450 | py | Python | leopy/src/leopy/algo/leo_update.py | rpl-cmu/leo | 4ed27b169172795930a9103598144eb3ca70a405 | [
"MIT"
] | 15 | 2021-11-15T23:04:19.000Z | 2022-03-16T05:09:48.000Z | leopy/src/leopy/algo/leo_update.py | psodhi/logo | 4ed27b169172795930a9103598144eb3ca70a405 | [
"MIT"
] | null | null | null | leopy/src/leopy/algo/leo_update.py | psodhi/logo | 4ed27b169172795930a9103598144eb3ca70a405 | [
"MIT"
] | 1 | 2021-08-11T02:53:29.000Z | 2021-08-11T02:53:29.000Z | import numpy as np
import os
from datetime import datetime
import copy
from functools import partial
import pandas as pd
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
import torch.multiprocessing as mp
from torch.utils.tensorboard import SummaryWriter
from leopy.optim import cost, gtsamopt, sampler
from leopy.algo.leo_obs_models import *
from leopy.utils import dir_utils, vis_utils
from leopy.eval import quant_metrics
import logging
log = logging.getLogger(__name__)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def load_dataset(params, idx, dataset_mode="train"):
idx_offset = 0 if (dataset_mode == "train") else params.leo.test_idx_offset
filename = "{0}/{1}/{2}/{3}/{4:04d}.json".format(
params.BASE_PATH, params.dataio.srcdir_dataset, params.dataio.dataset_name, dataset_mode, idx+idx_offset)
dataset = dir_utils.read_file_json(filename, verbose=False)
return dataset
def eval_learnt_params(theta, theta_exp, params, dataframe):
params.optim.save_fig = False
n_data = params.leo.n_data_test
traj_err_trans_test, traj_err_rot_test = np.zeros((n_data, 1)), np.zeros((n_data, 1))
for data_idx in range(0, n_data):
# optimizer trajectory
x_opt, data, _, _ = optimizer_soln(theta, params, data_idx, dataset_mode="test")
x_opt = torch.tensor(x_opt, requires_grad=True, dtype=torch.float32, device=device)
# expert trajectory
x_exp = get_exp_traj(data, theta_exp, params)
# traj errors
traj_err_trans_test[data_idx, :], traj_err_rot_test[data_idx, :] = quant_metrics.traj_error(
xyh_est=x_opt.detach().cpu().numpy(), xyh_gt=x_exp.detach().cpu().numpy())
dataframe.loc[(data_idx+params.leo.test_idx_offset, params.optim.nsteps-1), 'test/err/tracking/trans'] = traj_err_trans_test[data_idx, :]
dataframe.loc[(data_idx+params.leo.test_idx_offset, params.optim.nsteps-1), 'test/err/tracking/rot'] = traj_err_rot_test[data_idx, :]
return traj_err_trans_test, traj_err_rot_test, dataframe
def add_tracking_errors_to_dataframe(df, x_opt, x_exp, params=None):
nsteps = int(0.5 * x_opt.shape[0]) if (params.dataio.dataset_type == 'push2d') else x_opt.shape[0]
x_opt_np = x_opt.detach().cpu().numpy()
x_exp_np = x_exp.detach().cpu().numpy()
for tstep in range(1, nsteps):
err_trans, err_rot = quant_metrics.traj_error(xyh_est=x_opt_np[0:tstep, :], xyh_gt=x_exp_np[0:tstep, :])
df.loc[tstep, 'train/err/tracking/trans'] = err_trans
df.loc[tstep, 'train/err/tracking/rot'] = err_rot
return df
def check_traj_convergence(traj_err_trans, traj_err_rot, traj_err_trans_prev, traj_err_rot_prev, params):
# if ((traj_err_trans < params.leo.eps_traj_err_trans) & (traj_err_rot < params.leo.eps_traj_err_rot)):
# return True
diff_traj_err_trans = np.absolute(traj_err_trans - traj_err_trans_prev)
diff_traj_err_rot = np.absolute(traj_err_rot - traj_err_rot_prev)
# print("[leo_update::train] iteration {0}/{1}, diff_traj_err_trans: {2}, diff_traj_err_rot: {3}".format(
# params.leo.itr, params.leo.max_iters-1, diff_traj_err_trans, diff_traj_err_rot))
if ((diff_traj_err_trans < params.leo.eps_diff_traj_err_trans) & (diff_traj_err_rot < params.leo.eps_diff_traj_err_rot)):
return True
return False
def optimizer_soln(theta, params, data_idx, dataset_mode="train"):
# load data
data = load_dataset(params, data_idx, dataset_mode)
params.dataio.data_idx = "{0:04d}".format(data_idx) # todo: switch to integer
cost_obj = cost.Cost(data, theta, params=params, device=device)
mean, cov, dataframe = gtsamopt.run_optimizer(cost_obj, params=params)
# x_opt: n_x x dim_x
x_opt = sampler.sampler_gaussian(mean, cov=None)
# x_samples: n_samples x n_x x dim_x
x_samples = sampler.sampler_gaussian(mean, cov, n_samples=params.leo.n_samples, temp=params.leo.temp)
return x_opt, data, x_samples, dataframe
def cost_optimizer(x_samples, cost_obj, params=None):
# get cost values
if params.leo.sampler:
cost_opt = torch.tensor([0.], requires_grad=True, device=device)
for sidx in range(0, x_samples.shape[0]):
cost_opt = cost_opt + cost_obj.costfn(x_samples[sidx], log=params.logger.cost_flag)
cost_opt = cost_opt / x_samples.shape[0]
else:
cost_opt = cost_obj.costfn(x_samples, log=params.logger.cost_flag)
return cost_opt
def cost_expert(x_exp, cost_obj, params=None):
cost_exp = cost_obj.costfn(x_exp)
return cost_exp
def get_exp_traj_realizable(data, theta_exp, params):
# expert values x_exp
cost_obj_exp = cost.Cost(data, theta_exp, params=params, device=device)
mean, _, _ = gtsamopt.run_optimizer(cost_obj_exp, params=params)
x_samples = sampler.sampler_gaussian(mean, cov=None)
x_exp = torch.tensor(x_samples, requires_grad=True, dtype=torch.float32, device=device)
return x_exp
def get_exp_traj_groundtruth(data, params):
# expert values x_exp
if (params.dataio.dataset_type == "push2d"):
obj_poses_gt = np.array(data['obj_poses_gt'][0:params.optim.nsteps])
ee_poses_gt = np.array(data['ee_poses_gt'][0:params.optim.nsteps])
x_gt = np.vstack((obj_poses_gt, ee_poses_gt))
elif (params.dataio.dataset_type == "nav2d"):
x_gt = np.array(data['poses_gt'][0:params.optim.nsteps])
else:
print(f"[leo_update::get_exp_traj_groundtruth] x_exp not found for {params.dataio.dataset_type}")
return
x_exp = torch.tensor(x_gt, requires_grad=True, dtype=torch.float32, device=device)
return x_exp
def get_exp_traj(data, theta_exp, params):
x_exp_realizable = get_exp_traj_realizable(data, theta_exp, params)
x_exp_groundtruth = get_exp_traj_groundtruth(data, params)
# debug: backward through graph a second time error
# x_diff = x_exp_realizable - x_exp_groundtruth
# x_diff[:, 2] = tf_utils.wrap_to_pi(x_diff[:, 2])
# x_exp = x_exp_groundtruth + params.leo.realizability_coeff * x_diff
# x_exp[:, 2] = tf_utils.wrap_to_pi(x_exp[:, 2])
x_exp = x_exp_groundtruth
return x_exp
def optimizer_update(optimizer, output):
# clear, backprop and apply new gradients
optimizer.zero_grad()
output.backward()
optimizer.step()
def run(params):
# figs = vis_utils.init_plots(n_figs=1, interactive=params.optim.show_fig)
print("[leo_update::run] Using device: {0}".format(device))
params.dataio.prefix = datetime.now().strftime("%m-%d-%Y-%H-%M-%S")
if params.random_seed is not None:
np.random.seed(params.random_seed)
torch.manual_seed(params.random_seed)
# save config params
dir_cfg = "{0}/{1}/{2}/{3}".format(params.BASE_PATH, params.plot.dstdir, params.dataio.dataset_name, params.dataio.prefix)
dir_utils.make_dir(dir_cfg)
print(params.pretty())
with open("{0}/{1}_config.txt".format(dir_cfg, params.dataio.prefix), "w") as f:
print(params.pretty(), file=f)
# init tensorboard visualizer
if params.leo.tb_flag:
tb_dir = "{0}".format(params.dataio.prefix)
os.system('mkdir -p {0}/runs/{1}'.format(params.BASE_PATH, tb_dir))
tb_writer = SummaryWriter("{0}/runs/{1}".format(params.BASE_PATH, tb_dir))
# for printing cost grad, vals every leo iteration
if (params.dataio.dataset_type == "push2d"):
print_list = ["tactile", "qs"]
elif (params.dataio.dataset_type == "nav2d"):
print_list = ["odom", "gps"]
# init theta params
params = add_theta_exp_to_params(params)
theta, theta_exp = init_theta(params)
print(" ************** [leo_update::theta_init] ************** ")
for name, param in theta.named_parameters():
print('name: ', name)
print(type(param))
print('param.shape: ', param.shape)
print('param.requires_grad: ', param.requires_grad)
print(param)
print('=====')
# init leo update params
max_iters = params.leo.max_iters
n_data = params.leo.n_data_train
# leo loss optimizer
params_optimize = filter(lambda p: p.requires_grad, theta.parameters())
optimizer = optim.Adam(params_optimize, lr=params.leo.lr, weight_decay=params.leo.lmd)
if params.leo.lr_scheduler:
# scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', patience=20, factor=0.5, verbose=True)
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.95, verbose=True)
# init multiprocess params
mp.set_start_method('spawn')
# init clipper, gradient hook
for name, param in theta.named_parameters():
if (param.requires_grad == True):
# if any(word in name for word in print_list):
param.register_hook(lambda grad: print("[leo_update::run] GRAD : {}".format(grad)))
# collect expert trajectories
x_exp_all = []
for data_idx in range(0, n_data):
data = load_dataset(params, data_idx)
params.leo.itr = 0
params.dataio.data_idx = "{0:04d}".format(data_idx)
params.optim.save_fig = False
# expert trajectory
x_exp = get_exp_traj(data, theta_exp, params)
x_exp_all.append(x_exp)
# main leo loop, update theta
df_leo_list = []
itr = 0
while (itr < max_iters):
cost_opt = torch.tensor([0.], requires_grad=True, device=device)
cost_exp = torch.tensor([0.], requires_grad=True, device=device)
mean_traj_err_trans_prev, mean_traj_err_rot_prev = 0.0, 0.0
loss = torch.tensor([0.], requires_grad=True, device=device)
# set config params
params.leo.itr = itr
params.optim.save_fig = True
# logger
if (params.optim.save_logger):
logger_dir = "{0}/local/logger/{1}/leo_itr_{2:04d}".format(
params.BASE_PATH, params.dataio.dataset_name, params.leo.itr)
dir_utils.make_dir(logger_dir)
# parallelized optimizer run
pool = mp.Pool(processes=params.leo.pool_processes)
optimizer_soln_fn = partial(optimizer_soln, copy.deepcopy(theta), params)
data_idxs = np.arange(0, n_data)
result_opt = pool.map(optimizer_soln_fn, data_idxs)
pool.close()
pool.join()
traj_err_trans_train, traj_err_rot_train = np.zeros((n_data, 1)), np.zeros((n_data, 1))
df_data_list = []
for data_idx in range(0, n_data):
# expert, optim trajs for current data idx
x_exp = x_exp_all[data_idx]
x_opt = result_opt[data_idx][0]
data = result_opt[data_idx][1]
x_samples = result_opt[data_idx][2]
x_opt = torch.tensor(x_opt, requires_grad=True,
dtype=torch.float32, device=device)
x_samples = torch.tensor(x_samples, requires_grad=True,
dtype=torch.float32, device=device)
# optim, expert costs for current data idx
cost_obj = cost.Cost(data, theta, params=params, device=device)
cost_opt_curr = cost_optimizer(x_samples, cost_obj, params=params)
cost_exp_curr = cost_expert(x_exp, cost_obj, params=None)
# create a common data frame
if params.logger.enable:
df_opt = result_opt[data_idx][3]
df_cost = cost_obj.get_dataframe()
df = pd.concat([df_cost, df_opt], axis=1)
df = add_tracking_errors_to_dataframe(df, x_opt, x_exp, params)
df = df.assign(leo_loss= (cost_exp - cost_opt).item())
df = pd.concat({data_idx: df}, names=['data_idx'])
df_data_list.append(df)
# sum up costs over data idxs
cost_opt = cost_opt + cost_opt_curr
cost_exp = cost_exp + cost_exp_curr
# traj errors
traj_err_trans_train[data_idx, :], traj_err_rot_train[data_idx, :] = quant_metrics.traj_error(
xyh_est=x_opt[0:params.optim.nsteps, :].detach().cpu().numpy(), xyh_gt=x_exp[0:params.optim.nsteps, :].detach().cpu().numpy())
# leo loss
l2_reg = params.leo.l2_wt * theta.norm()
loss = 1/n_data * (cost_exp - cost_opt) + l2_reg
# test trajectory errors
# traj_err_trans_test, traj_err_rot_test, df = eval_learnt_params(theta, theta_exp, params=params, dataframe=df)
# concat dataframes across data idxs
if params.logger.enable:
df = pd.concat(df_data_list)
df = pd.concat({itr: df}, names=['leo_itr'])
df_leo_list.append(df)
print("[leo_update::train] iteration {0}/{1} VALUE {2}: {3}".format(itr, max_iters-1, name, param.data))
mean_traj_err_trans, mean_traj_err_rot = np.mean(traj_err_trans_train), np.mean(traj_err_rot_train)
if params.leo.tb_flag:
tb_writer.add_scalar("loss", loss.item(), itr)
tb_writer.add_scalar("err/tracking/trans", mean_traj_err_trans, itr)
tb_writer.add_scalar("err/tracking/rot", mean_traj_err_rot, itr)
for name, param in theta.named_parameters():
# if any(word in name for word in print_list):
if (param.requires_grad == True):
print("[leo_update::train] iteration {0}/{1} VALUE {2}: {3}".format(itr, max_iters-1, name, param.data))
print("[leo_update::train] iteration {0}/{1}, loss: {2}, cost_opt: {3}, cost_exp: {4}".format(
itr, max_iters-1, loss.item(), cost_opt.item(), cost_exp.item()))
print("[leo_update::train] iteration {0}/{1}, traj_err_trans_train: {2}, traj_err_rot_train: {3}".format(
itr, max_iters-1, mean_traj_err_trans, mean_traj_err_rot))
# print("[leo_update::test] fevals: {0}, traj_err_trans_test: ({1}, {2}), traj_err_rot_test: ({3}, {4})".format(
# itr, np.mean(traj_err_trans_test), np.std(traj_err_trans_test), np.mean(traj_err_rot_test), np.std(traj_err_rot_test)))
if (params.leo.use_traj_convergence):
converge_flag = check_traj_convergence(mean_traj_err_trans, mean_traj_err_rot, mean_traj_err_trans_prev, mean_traj_err_rot_prev, params)
mean_traj_err_trans_prev, mean_traj_err_rot_prev = mean_traj_err_trans, mean_traj_err_rot
if converge_flag: break
optimizer_update(optimizer, loss)
theta.min_clip()
if params.leo.lr_scheduler:
scheduler.step()
# write dataframe to file
if (params.logger.enable) & (params.logger.save_csv):
dataframe = pd.concat(df_leo_list)
logdir = f"{params.BASE_PATH}/local/datalogs/{params.dataio.dataset_name}/{params.dataio.prefix}"
os.makedirs(logdir, exist_ok=True)
dataframe.to_csv(f"{logdir}/datalog_{params.dataio.prefix}.csv")
log.info(f"Saved logged data to {logdir}")
itr = itr + 1
# plotting
if (params.leo.save_video):
for idx in range(0, n_data):
dataset_name_idx = "{0}_{1:04d}".format(params.dataio.dataset_name, idx)
imgdir = "{0}/{1}/{2}".format(params.BASE_PATH, params.plot.dstdir, dataset_name_idx)
vis_utils.write_video_ffmpeg(imgdir, "{0}/{1}".format(imgdir, "optimized_isam2"))
| 40.765172 | 148 | 0.663948 |
acf7d684515013909d9d98fd38c4cca2767ec6b2 | 5,451 | py | Python | pdfminer/pdfdevice.py | hason-contributions/pdfminer.six | fd63dbf62e291f6cba5beda968e6f9c480f20033 | [
"MIT"
] | null | null | null | pdfminer/pdfdevice.py | hason-contributions/pdfminer.six | fd63dbf62e291f6cba5beda968e6f9c480f20033 | [
"MIT"
] | null | null | null | pdfminer/pdfdevice.py | hason-contributions/pdfminer.six | fd63dbf62e291f6cba5beda968e6f9c480f20033 | [
"MIT"
] | null | null | null |
from .pdffont import PDFUnicodeNotDefined
from . import utils
## PDFDevice
##
class PDFDevice(object):
def __init__(self, rsrcmgr):
self.rsrcmgr = rsrcmgr
self.ctm = None
return
def __repr__(self):
return '<PDFDevice>'
def close(self):
return
def set_ctm(self, ctm):
self.ctm = ctm
return
def begin_tag(self, tag, props=None):
return
def end_tag(self):
return
def do_tag(self, tag, props=None):
return
def begin_page(self, page, ctm):
return
def end_page(self, page):
return
def begin_figure(self, name, bbox, matrix):
return
def end_figure(self, name):
return
def paint_path(self, graphicstate, stroke, fill, evenodd, path):
return
def render_image(self, name, stream):
return
def render_string(self, textstate, seq):
return
## PDFTextDevice
##
class PDFTextDevice(PDFDevice):
def render_string(self, textstate, seq):
matrix = utils.mult_matrix(textstate.matrix, self.ctm)
font = textstate.font
fontsize = textstate.fontsize
scaling = textstate.scaling * .01
charspace = textstate.charspace * scaling
wordspace = textstate.wordspace * scaling
rise = textstate.rise
if font.is_multibyte():
wordspace = 0
dxscale = .001 * fontsize * scaling
if font.is_vertical():
textstate.linematrix = self.render_string_vertical(
seq, matrix, textstate.linematrix, font, fontsize,
scaling, charspace, wordspace, rise, dxscale)
else:
textstate.linematrix = self.render_string_horizontal(
seq, matrix, textstate.linematrix, font, fontsize,
scaling, charspace, wordspace, rise, dxscale)
return
def render_string_horizontal(self, seq, matrix, pos,
font, fontsize, scaling, charspace, wordspace, rise, dxscale):
(x, y) = pos
needcharspace = False
for obj in seq:
if utils.isnumber(obj):
x -= obj*dxscale
needcharspace = True
else:
for cid in font.decode(obj):
if needcharspace:
x += charspace
x += self.render_char(utils.translate_matrix(matrix, (x, y)),
font, fontsize, scaling, rise, cid)
if cid == 32 and wordspace:
x += wordspace
needcharspace = True
return (x, y)
def render_string_vertical(self, seq, matrix, pos,
font, fontsize, scaling, charspace, wordspace, rise, dxscale):
(x, y) = pos
needcharspace = False
for obj in seq:
if utils.isnumber(obj):
y -= obj*dxscale
needcharspace = True
else:
for cid in font.decode(obj):
if needcharspace:
y += charspace
y += self.render_char(utils.translate_matrix(matrix, (x, y)),
font, fontsize, scaling, rise, cid)
if cid == 32 and wordspace:
y += wordspace
needcharspace = True
return (x, y)
def render_char(self, matrix, font, fontsize, scaling, rise, cid):
return 0
## TagExtractor
##
class TagExtractor(PDFDevice):
def __init__(self, rsrcmgr, outfp, codec='utf-8'):
PDFDevice.__init__(self, rsrcmgr)
self.outfp = outfp
self.codec = codec
self.pageno = 0
self._stack = []
return
def render_string(self, textstate, seq):
font = textstate.font
text = ''
for obj in seq:
obj = utils.make_compat_str(obj)
if not isinstance(obj, str):
continue
chars = font.decode(obj)
for cid in chars:
try:
char = font.to_unichr(cid)
text += char
except PDFUnicodeNotDefined:
print(chars)
pass
self.outfp.write(utils.enc(text, self.codec))
return
def begin_page(self, page, ctm):
output = '<page id="%s" bbox="%s" rotate="%d">' % (self.pageno, utils.bbox2str(page.mediabox), page.rotate)
self.outfp.write(utils.make_compat_bytes(output))
return
def end_page(self, page):
self.outfp.write(utils.make_compat_bytes('</page>\n'))
self.pageno += 1
return
def begin_tag(self, tag, props=None):
s = ''
if isinstance(props, dict):
s = ''.join(' %s="%s"' % (utils.enc(k), utils.enc(str(v))) for (k, v)
in sorted(props.iteritems()))
out_s = '<%s%s>' % (utils.enc(tag.name), s)
self.outfp.write(utils.make_compat_bytes(out_s))
self._stack.append(tag)
return
def end_tag(self):
assert self._stack
tag = self._stack.pop(-1)
out_s = '</%s>' % utils.enc(tag.name)
self.outfp.write(utils.make_compat_bytes(out_s))
return
def do_tag(self, tag, props=None):
self.begin_tag(tag, props)
self._stack.pop(-1)
return
| 29.625 | 115 | 0.533113 |
acf7d727af32c832f335bd60177732b023f0c674 | 786 | py | Python | source/119-Função_exponencial.py | FelixLuciano/DesSoft-2020.2 | a44063d63778329f1e1266881f20f7954ecb528b | [
"MIT"
] | null | null | null | source/119-Função_exponencial.py | FelixLuciano/DesSoft-2020.2 | a44063d63778329f1e1266881f20f7954ecb528b | [
"MIT"
] | null | null | null | source/119-Função_exponencial.py | FelixLuciano/DesSoft-2020.2 | a44063d63778329f1e1266881f20f7954ecb528b | [
"MIT"
] | null | null | null | # Função exponencial
# Se pode usar uma série para calcular o valor do e (número de Euler ou Neperiano). Basicamente a ideia é somar uma sequência de número, e conforme se avança na sequência, se chega mais perto do valor desejado. A série de Taylor para calcular e^x é:
# e^x = 1 + x + \frac{x^2}{2!} + \frac{x^3}{3!} + \frac{x^4}{4!} + \frac{x^5}{5!} + ...
# Faça uma função em Python que calcula o resultado do ex para uma série de tamanho n. Você pode supor que as entradas para x e n serão enviadas nesta ordem e sempre serão números positivos.
# O nome da sua função deve ser 'calcula_euler'.
import math
def calcula_euler (x, n):
euler_power = 0
for i in range(0, n):
euler_power += x**i / math.factorial(i)
return euler_power
| 49.125 | 258 | 0.670483 |
acf7d75d549785c4b3085948519ccf05263c2ac4 | 27,835 | py | Python | test/dialect/oracle/test_reflection.py | lvillis/sqlalchemy | 889d05c444264bf1b6d11386459d3360cc529d27 | [
"MIT"
] | 5,383 | 2018-11-27T07:34:03.000Z | 2022-03-31T19:40:59.000Z | test/dialect/oracle/test_reflection.py | lvillis/sqlalchemy | 889d05c444264bf1b6d11386459d3360cc529d27 | [
"MIT"
] | 2,719 | 2018-11-27T07:55:01.000Z | 2022-03-31T22:09:44.000Z | test/dialect/oracle/test_reflection.py | lvillis/sqlalchemy | 889d05c444264bf1b6d11386459d3360cc529d27 | [
"MIT"
] | 1,056 | 2015-01-03T00:30:17.000Z | 2022-03-15T12:56:24.000Z | # coding: utf-8
from sqlalchemy import exc
from sqlalchemy import FLOAT
from sqlalchemy import ForeignKey
from sqlalchemy import ForeignKeyConstraint
from sqlalchemy import func
from sqlalchemy import Identity
from sqlalchemy import Index
from sqlalchemy import inspect
from sqlalchemy import INTEGER
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import Numeric
from sqlalchemy import PrimaryKeyConstraint
from sqlalchemy import select
from sqlalchemy import testing
from sqlalchemy import text
from sqlalchemy import Unicode
from sqlalchemy import UniqueConstraint
from sqlalchemy.dialects.oracle.base import BINARY_DOUBLE
from sqlalchemy.dialects.oracle.base import BINARY_FLOAT
from sqlalchemy.dialects.oracle.base import DOUBLE_PRECISION
from sqlalchemy.dialects.oracle.base import NUMBER
from sqlalchemy.testing import assert_raises
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
from sqlalchemy.testing import is_true
from sqlalchemy.testing.engines import testing_engine
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import Table
class MultiSchemaTest(fixtures.TestBase, AssertsCompiledSQL):
__only_on__ = "oracle"
__backend__ = True
@classmethod
def setup_test_class(cls):
# currently assuming full DBA privs for the user.
# don't really know how else to go here unless
# we connect as the other user.
with testing.db.begin() as conn:
for stmt in (
"""
create table %(test_schema)s.parent(
id integer primary key,
data varchar2(50)
);
COMMENT ON TABLE %(test_schema)s.parent IS 'my table comment';
create table %(test_schema)s.child(
id integer primary key,
data varchar2(50),
parent_id integer references %(test_schema)s.parent(id)
);
create table local_table(
id integer primary key,
data varchar2(50)
);
create synonym %(test_schema)s.ptable for %(test_schema)s.parent;
create synonym %(test_schema)s.ctable for %(test_schema)s.child;
create synonym %(test_schema)s_pt for %(test_schema)s.parent;
create synonym %(test_schema)s.local_table for local_table;
-- can't make a ref from local schema to the
-- remote schema's table without this,
-- *and* cant give yourself a grant !
-- so we give it to public. ideas welcome.
grant references on %(test_schema)s.parent to public;
grant references on %(test_schema)s.child to public;
"""
% {"test_schema": testing.config.test_schema}
).split(";"):
if stmt.strip():
conn.exec_driver_sql(stmt)
@classmethod
def teardown_test_class(cls):
with testing.db.begin() as conn:
for stmt in (
"""
drop table %(test_schema)s.child;
drop table %(test_schema)s.parent;
drop table local_table;
drop synonym %(test_schema)s.ctable;
drop synonym %(test_schema)s.ptable;
drop synonym %(test_schema)s_pt;
drop synonym %(test_schema)s.local_table;
"""
% {"test_schema": testing.config.test_schema}
).split(";"):
if stmt.strip():
conn.exec_driver_sql(stmt)
def test_create_same_names_explicit_schema(self, metadata, connection):
schema = testing.db.dialect.default_schema_name
meta = metadata
parent = Table(
"parent",
meta,
Column("pid", Integer, primary_key=True),
schema=schema,
)
child = Table(
"child",
meta,
Column("cid", Integer, primary_key=True),
Column("pid", Integer, ForeignKey("%s.parent.pid" % schema)),
schema=schema,
)
meta.create_all(connection)
connection.execute(parent.insert(), {"pid": 1})
connection.execute(child.insert(), {"cid": 1, "pid": 1})
eq_(connection.execute(child.select()).fetchall(), [(1, 1)])
def test_reflect_alt_table_owner_local_synonym(self):
meta = MetaData()
parent = Table(
"%s_pt" % testing.config.test_schema,
meta,
autoload_with=testing.db,
oracle_resolve_synonyms=True,
)
self.assert_compile(
parent.select(),
"SELECT %(test_schema)s_pt.id, "
"%(test_schema)s_pt.data FROM %(test_schema)s_pt"
% {"test_schema": testing.config.test_schema},
)
def test_reflect_alt_synonym_owner_local_table(self):
meta = MetaData()
parent = Table(
"local_table",
meta,
autoload_with=testing.db,
oracle_resolve_synonyms=True,
schema=testing.config.test_schema,
)
self.assert_compile(
parent.select(),
"SELECT %(test_schema)s.local_table.id, "
"%(test_schema)s.local_table.data "
"FROM %(test_schema)s.local_table"
% {"test_schema": testing.config.test_schema},
)
def test_create_same_names_implicit_schema(self, metadata, connection):
meta = metadata
parent = Table(
"parent", meta, Column("pid", Integer, primary_key=True)
)
child = Table(
"child",
meta,
Column("cid", Integer, primary_key=True),
Column("pid", Integer, ForeignKey("parent.pid")),
)
meta.create_all(connection)
connection.execute(parent.insert(), {"pid": 1})
connection.execute(child.insert(), {"cid": 1, "pid": 1})
eq_(connection.execute(child.select()).fetchall(), [(1, 1)])
def test_reflect_alt_owner_explicit(self):
meta = MetaData()
parent = Table(
"parent",
meta,
autoload_with=testing.db,
schema=testing.config.test_schema,
)
child = Table(
"child",
meta,
autoload_with=testing.db,
schema=testing.config.test_schema,
)
self.assert_compile(
parent.join(child),
"%(test_schema)s.parent JOIN %(test_schema)s.child ON "
"%(test_schema)s.parent.id = %(test_schema)s.child.parent_id"
% {"test_schema": testing.config.test_schema},
)
with testing.db.connect() as conn:
conn.execute(
select(parent, child).select_from(parent.join(child))
).fetchall()
# check table comment (#5146)
eq_(parent.comment, "my table comment")
def test_reflect_table_comment(self, metadata, connection):
local_parent = Table(
"parent",
metadata,
Column("q", Integer),
comment="my local comment",
)
local_parent.create(connection)
insp = inspect(connection)
eq_(
insp.get_table_comment(
"parent", schema=testing.config.test_schema
),
{"text": "my table comment"},
)
eq_(
insp.get_table_comment(
"parent",
),
{"text": "my local comment"},
)
eq_(
insp.get_table_comment(
"parent", schema=connection.dialect.default_schema_name
),
{"text": "my local comment"},
)
def test_reflect_local_to_remote(self, connection):
connection.exec_driver_sql(
"CREATE TABLE localtable (id INTEGER "
"PRIMARY KEY, parent_id INTEGER REFERENCES "
"%(test_schema)s.parent(id))"
% {"test_schema": testing.config.test_schema},
)
try:
meta = MetaData()
lcl = Table("localtable", meta, autoload_with=testing.db)
parent = meta.tables["%s.parent" % testing.config.test_schema]
self.assert_compile(
parent.join(lcl),
"%(test_schema)s.parent JOIN localtable ON "
"%(test_schema)s.parent.id = "
"localtable.parent_id"
% {"test_schema": testing.config.test_schema},
)
finally:
connection.exec_driver_sql("DROP TABLE localtable")
def test_reflect_alt_owner_implicit(self):
meta = MetaData()
parent = Table(
"parent",
meta,
autoload_with=testing.db,
schema=testing.config.test_schema,
)
child = Table(
"child",
meta,
autoload_with=testing.db,
schema=testing.config.test_schema,
)
self.assert_compile(
parent.join(child),
"%(test_schema)s.parent JOIN %(test_schema)s.child "
"ON %(test_schema)s.parent.id = "
"%(test_schema)s.child.parent_id"
% {"test_schema": testing.config.test_schema},
)
with testing.db.connect() as conn:
conn.execute(
select(parent, child).select_from(parent.join(child))
).fetchall()
def test_reflect_alt_owner_synonyms(self, connection):
connection.exec_driver_sql(
"CREATE TABLE localtable (id INTEGER "
"PRIMARY KEY, parent_id INTEGER REFERENCES "
"%s.ptable(id))" % testing.config.test_schema,
)
try:
meta = MetaData()
lcl = Table(
"localtable",
meta,
autoload_with=connection,
oracle_resolve_synonyms=True,
)
parent = meta.tables["%s.ptable" % testing.config.test_schema]
self.assert_compile(
parent.join(lcl),
"%(test_schema)s.ptable JOIN localtable ON "
"%(test_schema)s.ptable.id = "
"localtable.parent_id"
% {"test_schema": testing.config.test_schema},
)
connection.execute(
select(parent, lcl).select_from(parent.join(lcl))
).fetchall()
finally:
connection.exec_driver_sql("DROP TABLE localtable")
def test_reflect_remote_synonyms(self):
meta = MetaData()
parent = Table(
"ptable",
meta,
autoload_with=testing.db,
schema=testing.config.test_schema,
oracle_resolve_synonyms=True,
)
child = Table(
"ctable",
meta,
autoload_with=testing.db,
schema=testing.config.test_schema,
oracle_resolve_synonyms=True,
)
self.assert_compile(
parent.join(child),
"%(test_schema)s.ptable JOIN "
"%(test_schema)s.ctable "
"ON %(test_schema)s.ptable.id = "
"%(test_schema)s.ctable.parent_id"
% {"test_schema": testing.config.test_schema},
)
class ConstraintTest(fixtures.TablesTest):
__only_on__ = "oracle"
__backend__ = True
run_deletes = None
@classmethod
def define_tables(cls, metadata):
Table("foo", metadata, Column("id", Integer, primary_key=True))
def test_oracle_has_no_on_update_cascade(self, connection):
bar = Table(
"bar",
self.tables_test_metadata,
Column("id", Integer, primary_key=True),
Column(
"foo_id", Integer, ForeignKey("foo.id", onupdate="CASCADE")
),
)
assert_raises(exc.SAWarning, bar.create, connection)
bat = Table(
"bat",
self.tables_test_metadata,
Column("id", Integer, primary_key=True),
Column("foo_id", Integer),
ForeignKeyConstraint(["foo_id"], ["foo.id"], onupdate="CASCADE"),
)
assert_raises(exc.SAWarning, bat.create, connection)
def test_reflect_check_include_all(self, connection):
insp = inspect(connection)
eq_(insp.get_check_constraints("foo"), [])
eq_(
[
rec["sqltext"]
for rec in insp.get_check_constraints("foo", include_all=True)
],
['"ID" IS NOT NULL'],
)
class SystemTableTablenamesTest(fixtures.TestBase):
__only_on__ = "oracle"
__backend__ = True
def setup_test(self):
with testing.db.begin() as conn:
conn.exec_driver_sql("create table my_table (id integer)")
conn.exec_driver_sql(
"create global temporary table my_temp_table (id integer)",
)
conn.exec_driver_sql(
"create table foo_table (id integer) tablespace SYSTEM"
)
def teardown_test(self):
with testing.db.begin() as conn:
conn.exec_driver_sql("drop table my_temp_table")
conn.exec_driver_sql("drop table my_table")
conn.exec_driver_sql("drop table foo_table")
def test_table_names_no_system(self):
insp = inspect(testing.db)
eq_(insp.get_table_names(), ["my_table"])
def test_temp_table_names_no_system(self):
insp = inspect(testing.db)
eq_(insp.get_temp_table_names(), ["my_temp_table"])
def test_table_names_w_system(self):
engine = testing_engine(options={"exclude_tablespaces": ["FOO"]})
insp = inspect(engine)
eq_(
set(insp.get_table_names()).intersection(
["my_table", "foo_table"]
),
set(["my_table", "foo_table"]),
)
class DontReflectIOTTest(fixtures.TestBase):
"""test that index overflow tables aren't included in
table_names."""
__only_on__ = "oracle"
__backend__ = True
def setup_test(self):
with testing.db.begin() as conn:
conn.exec_driver_sql(
"""
CREATE TABLE admin_docindex(
token char(20),
doc_id NUMBER,
token_frequency NUMBER,
token_offsets VARCHAR2(2000),
CONSTRAINT pk_admin_docindex PRIMARY KEY (token, doc_id))
ORGANIZATION INDEX
TABLESPACE users
PCTTHRESHOLD 20
OVERFLOW TABLESPACE users
""",
)
def teardown_test(self):
with testing.db.begin() as conn:
conn.exec_driver_sql("drop table admin_docindex")
def test_reflect_all(self, connection):
m = MetaData()
m.reflect(connection)
eq_(set(t.name for t in m.tables.values()), set(["admin_docindex"]))
def all_tables_compression_missing():
with testing.db.connect() as conn:
if (
"Enterprise Edition"
not in conn.exec_driver_sql("select * from v$version").scalar()
# this works in Oracle Database 18c Express Edition Release
) and testing.db.dialect.server_version_info < (18,):
return True
return False
def all_tables_compress_for_missing():
with testing.db.connect() as conn:
if (
"Enterprise Edition"
not in conn.exec_driver_sql("select * from v$version").scalar()
):
return True
return False
class TableReflectionTest(fixtures.TestBase):
__only_on__ = "oracle"
__backend__ = True
@testing.fails_if(all_tables_compression_missing)
def test_reflect_basic_compression(self, metadata, connection):
tbl = Table(
"test_compress",
metadata,
Column("data", Integer, primary_key=True),
oracle_compress=True,
)
metadata.create_all(connection)
m2 = MetaData()
tbl = Table("test_compress", m2, autoload_with=connection)
# Don't hardcode the exact value, but it must be non-empty
assert tbl.dialect_options["oracle"]["compress"]
@testing.fails_if(all_tables_compress_for_missing)
def test_reflect_oltp_compression(self, metadata, connection):
tbl = Table(
"test_compress",
metadata,
Column("data", Integer, primary_key=True),
oracle_compress="OLTP",
)
metadata.create_all(connection)
m2 = MetaData()
tbl = Table("test_compress", m2, autoload_with=connection)
assert tbl.dialect_options["oracle"]["compress"] == "OLTP"
class RoundTripIndexTest(fixtures.TestBase):
__only_on__ = "oracle"
__backend__ = True
def test_no_pk(self, metadata, connection):
Table(
"sometable",
metadata,
Column("id_a", Unicode(255)),
Column("id_b", Unicode(255)),
Index("pk_idx_1", "id_a", "id_b", unique=True),
Index("pk_idx_2", "id_b", "id_a", unique=True),
)
metadata.create_all(connection)
insp = inspect(connection)
eq_(
insp.get_indexes("sometable"),
[
{
"name": "pk_idx_1",
"column_names": ["id_a", "id_b"],
"dialect_options": {},
"unique": True,
},
{
"name": "pk_idx_2",
"column_names": ["id_b", "id_a"],
"dialect_options": {},
"unique": True,
},
],
)
@testing.combinations((True,), (False,), argnames="explicit_pk")
def test_include_indexes_resembling_pk(
self, metadata, connection, explicit_pk
):
t = Table(
"sometable",
metadata,
Column("id_a", Unicode(255), primary_key=True),
Column("id_b", Unicode(255), primary_key=True),
Column("group", Unicode(255), primary_key=True),
Column("col", Unicode(255)),
# Oracle won't let you do this unless the indexes have
# the columns in different order
Index("pk_idx_1", "id_b", "id_a", "group", unique=True),
Index("pk_idx_2", "id_b", "group", "id_a", unique=True),
)
if explicit_pk:
t.append_constraint(
PrimaryKeyConstraint(
"id_a", "id_b", "group", name="some_primary_key"
)
)
metadata.create_all(connection)
insp = inspect(connection)
eq_(
insp.get_indexes("sometable"),
[
{
"name": "pk_idx_1",
"column_names": ["id_b", "id_a", "group"],
"dialect_options": {},
"unique": True,
},
{
"name": "pk_idx_2",
"column_names": ["id_b", "group", "id_a"],
"dialect_options": {},
"unique": True,
},
],
)
def test_reflect_fn_index(self, metadata, connection):
"""test reflection of a functional index.
it appears this emitted a warning at some point but does not right now.
the returned data is not exactly correct, but this is what it's
likely been doing for many years.
"""
s_table = Table(
"sometable",
metadata,
Column("group", Unicode(255), primary_key=True),
Column("col", Unicode(255)),
)
Index("data_idx", func.upper(s_table.c.col))
metadata.create_all(connection)
eq_(
inspect(connection).get_indexes("sometable"),
[
{
"column_names": [],
"dialect_options": {},
"name": "data_idx",
"unique": False,
}
],
)
def test_basic(self, metadata, connection):
s_table = Table(
"sometable",
metadata,
Column("id_a", Unicode(255), primary_key=True),
Column("id_b", Unicode(255), primary_key=True, unique=True),
Column("group", Unicode(255), primary_key=True),
Column("col", Unicode(255)),
UniqueConstraint("col", "group"),
)
# "group" is a keyword, so lower case
normalind = Index("tableind", s_table.c.id_b, s_table.c.group)
Index(
"compress1", s_table.c.id_a, s_table.c.id_b, oracle_compress=True
)
Index(
"compress2",
s_table.c.id_a,
s_table.c.id_b,
s_table.c.col,
oracle_compress=1,
)
metadata.create_all(connection)
mirror = MetaData()
mirror.reflect(connection)
metadata.drop_all(connection)
mirror.create_all(connection)
inspect = MetaData()
inspect.reflect(connection)
def obj_definition(obj):
return (
obj.__class__,
tuple([c.name for c in obj.columns]),
getattr(obj, "unique", None),
)
# find what the primary k constraint name should be
primaryconsname = connection.scalar(
text(
"""SELECT constraint_name
FROM all_constraints
WHERE table_name = :table_name
AND owner = :owner
AND constraint_type = 'P' """
),
dict(
table_name=s_table.name.upper(),
owner=testing.db.dialect.default_schema_name.upper(),
),
)
reflectedtable = inspect.tables[s_table.name]
# make a dictionary of the reflected objects:
reflected = dict(
[
(obj_definition(i), i)
for i in reflectedtable.indexes | reflectedtable.constraints
]
)
# assert we got primary key constraint and its name, Error
# if not in dict
assert (
reflected[
(PrimaryKeyConstraint, ("id_a", "id_b", "group"), None)
].name.upper()
== primaryconsname.upper()
)
# Error if not in dict
eq_(reflected[(Index, ("id_b", "group"), False)].name, normalind.name)
assert (Index, ("id_b",), True) in reflected
assert (Index, ("col", "group"), True) in reflected
idx = reflected[(Index, ("id_a", "id_b"), False)]
assert idx.dialect_options["oracle"]["compress"] == 2
idx = reflected[(Index, ("id_a", "id_b", "col"), False)]
assert idx.dialect_options["oracle"]["compress"] == 1
eq_(len(reflectedtable.constraints), 1)
eq_(len(reflectedtable.indexes), 5)
class DBLinkReflectionTest(fixtures.TestBase):
__requires__ = ("oracle_test_dblink",)
__only_on__ = "oracle"
__backend__ = True
@classmethod
def setup_test_class(cls):
from sqlalchemy.testing import config
cls.dblink = config.file_config.get("sqla_testing", "oracle_db_link")
# note that the synonym here is still not totally functional
# when accessing via a different username as we do with the
# multiprocess test suite, so testing here is minimal
with testing.db.begin() as conn:
conn.exec_driver_sql(
"create table test_table "
"(id integer primary key, data varchar2(50))"
)
conn.exec_driver_sql(
"create synonym test_table_syn "
"for test_table@%s" % cls.dblink
)
@classmethod
def teardown_test_class(cls):
with testing.db.begin() as conn:
conn.exec_driver_sql("drop synonym test_table_syn")
conn.exec_driver_sql("drop table test_table")
def test_reflection(self):
"""test the resolution of the synonym/dblink."""
m = MetaData()
t = Table(
"test_table_syn",
m,
autoload_with=testing.db,
oracle_resolve_synonyms=True,
)
eq_(list(t.c.keys()), ["id", "data"])
eq_(list(t.primary_key), [t.c.id])
class TypeReflectionTest(fixtures.TestBase):
__only_on__ = "oracle"
__backend__ = True
def _run_test(self, metadata, connection, specs, attributes):
columns = [Column("c%i" % (i + 1), t[0]) for i, t in enumerate(specs)]
m = metadata
Table("oracle_types", m, *columns)
m.create_all(connection)
m2 = MetaData()
table = Table("oracle_types", m2, autoload_with=connection)
for i, (reflected_col, spec) in enumerate(zip(table.c, specs)):
expected_spec = spec[1]
reflected_type = reflected_col.type
is_(type(reflected_type), type(expected_spec))
for attr in attributes:
eq_(
getattr(reflected_type, attr),
getattr(expected_spec, attr),
"Column %s: Attribute %s value of %s does not "
"match %s for type %s"
% (
"c%i" % (i + 1),
attr,
getattr(reflected_type, attr),
getattr(expected_spec, attr),
spec[0],
),
)
def test_integer_types(self, metadata, connection):
specs = [(Integer, INTEGER()), (Numeric, INTEGER())]
self._run_test(metadata, connection, specs, [])
def test_number_types(
self,
metadata,
connection,
):
specs = [(Numeric(5, 2), NUMBER(5, 2)), (NUMBER, NUMBER())]
self._run_test(metadata, connection, specs, ["precision", "scale"])
def test_float_types(
self,
metadata,
connection,
):
specs = [
(DOUBLE_PRECISION(), FLOAT()),
# when binary_precision is supported
# (DOUBLE_PRECISION(), oracle.FLOAT(binary_precision=126)),
(BINARY_DOUBLE(), BINARY_DOUBLE()),
(BINARY_FLOAT(), BINARY_FLOAT()),
(FLOAT(5), FLOAT()),
# when binary_precision is supported
# (FLOAT(5), oracle.FLOAT(binary_precision=5),),
(FLOAT(), FLOAT()),
# when binary_precision is supported
# (FLOAT(5), oracle.FLOAT(binary_precision=126),),
]
self._run_test(metadata, connection, specs, ["precision"])
class IdentityReflectionTest(fixtures.TablesTest):
__only_on__ = "oracle"
__backend__ = True
__requires__ = ("identity_columns",)
@classmethod
def define_tables(cls, metadata):
Table("t1", metadata, Column("id1", Integer, Identity(on_null=True)))
Table("t2", metadata, Column("id2", Integer, Identity(order=True)))
def test_reflect_identity(self):
insp = inspect(testing.db)
common = {
"always": False,
"start": 1,
"increment": 1,
"on_null": False,
"maxvalue": 10 ** 28 - 1,
"minvalue": 1,
"cycle": False,
"cache": 20,
"order": False,
}
for col in insp.get_columns("t1") + insp.get_columns("t2"):
if col["name"] == "id1":
is_true("identity" in col)
exp = common.copy()
exp["on_null"] = True
eq_(col["identity"], exp)
if col["name"] == "id2":
is_true("identity" in col)
exp = common.copy()
exp["order"] = True
eq_(col["identity"], exp)
| 32.555556 | 79 | 0.555416 |
acf7d7b679651990f5e29a9eccac34458f82ffa8 | 5,571 | py | Python | ir_featuremap_extractor.py | yas-sim/openvino-ir-utility | 8e026f7c2dd1312216a810f99baf7488336cc5fd | [
"Apache-2.0"
] | 8 | 2020-06-16T02:04:36.000Z | 2021-09-08T16:52:06.000Z | ir_featuremap_extractor.py | yas-sim/openvino-ir-utility | 8e026f7c2dd1312216a810f99baf7488336cc5fd | [
"Apache-2.0"
] | null | null | null | ir_featuremap_extractor.py | yas-sim/openvino-ir-utility | 8e026f7c2dd1312216a810f99baf7488336cc5fd | [
"Apache-2.0"
] | 1 | 2020-09-13T07:14:15.000Z | 2020-09-13T07:14:15.000Z | import os
import sys
import argparse
import copy
import pickle
import cv2
import xml.etree.ElementTree as et
from openvino.inference_engine import IECore
def splitFileName(file):
dirname, filename = os.path.split(file)
basename, extname = os.path.splitext(filename)
return dirname, basename, extname
def readXML(model):
dname, bname, ename = splitFileName(model)
tree = et.parse(os.path.join(dname, bname+'.xml'))
return tree
def readBIN(model):
dname, bname, ename = splitFileName(model)
with open(os.path.join(dname, bname+'.bin'), 'rb') as f:
weight = f.read()
return weight
def findNodeFromXML(xmltree, nodeid):
root = xmltree.getroot()
layers = root.find('layers')
for layer in layers.findall('layer'):
if int(layer.attrib['id']) == nodeid:
return layer
return None
def modifyXMLForFeatureVectorProbing(xmltree, nodeid):
xmlcopy = copy.deepcopy(xmltree)
layer = findNodeFromXML(xmlcopy, nodeid)
# obtain output port information of the target node (port # and dims)
outport = layer.find('output').find('port')
outport_id = int(outport.attrib['id'])
outport_prec = outport.attrib['precision']
outport_dims = outport.findall('dim')
outport_dims_string = ""
for dim in outport_dims:
outport_dims_string += et.tostring(dim).decode('utf-8')
# generate XML strings
dummyLayer = """
<layer id="9999" name="featuremap_checker_dummy_node" type="Result" version="opset1">
<input>
<port id="0">
{}
</port>
</input>
</layer>
""".format(outport_dims_string)
dummyEdge = ' <edge from-layer="{}" from-port="{}" to-layer="9999" to-port="0"/>'.format(nodeid, outport_id)
# modify XML to make a dummy branch path for feature map extraction
xmlcopy.find('layers').append(et.fromstring(dummyLayer))
xmlcopy.find('edges').append(et.fromstring(dummyEdge))
# return the modified XML and the name of the target node (specified by 'nodeid')
return xmlcopy, layer.attrib['name']
# TODO: You need to modify this function to make this fit to your model
# E.g. - If your model uses multiple inputs, you need to prepare input data for those inputs
# - If your model requires non-image data, you need to implement appropiate data preparation code and preprocessing for it
def prepareInputs(net_inputs, args):
input_data = {}
input_blob_names = list(net_inputs.keys())
input0Name = input_blob_names[0]
input0Info = net_inputs[input0Name]
N,C,H,W = input0Info.tensor_desc.dims
img = cv2.imread(args.input) # default = image.jpg
img = cv2.resize(img, (W, H))
img = img.transpose((2, 0, 1))
img = img.reshape((1, C, H, W))
input_data[input0Name] = img
return input_data
def main(args):
originalXML = readXML(args.model)
weight = readBIN(args.model)
print('node# : nodeName')
feature_vectors = {}
ie = IECore()
root = originalXML.getroot()
layers = root.find('layers')
for layer in layers.findall('layer'):
nodeid = int(layer.attrib['id'])
nodetype = layer.attrib['type']
if nodetype in ['Const']: # , 'ShapeOf', 'Convert', 'StridedSlice', 'PriorBox']:
continue
if not layer.find('output') is None:
nodeName = layer.attrib['name']
outputport = layer.find('output').find('port')
proc = outputport.attrib['precision']
dims = []
for dim in outputport.findall('dim'): # extract shape information
dims.append(dim.text)
modifiedXML, targetNodeName = modifyXMLForFeatureVectorProbing(originalXML, nodeid)
XMLstr = et.tostring(modifiedXML.getroot())
print('{} : {}'.format(nodeid, targetNodeName))
net = ie.read_network(XMLstr, weight, init_from_buffer=True)
try:
exenet = ie.load_network(net, args.device)
except RuntimeError:
#et.dump(modifiedXML)
print('*** RuntimeError: load_network() -- Skip node \'{}\' - \'{}\''.format(targetNodeName, nodetype))
continue
inputs = prepareInputs(net.input_info, args) # ToDo: Prepare inupts for inference. User may need to modify this function to generate appropriate input for the specific model.
res = exenet.infer(inputs)[nodeName]
feature_vectors[nodeName] = [proc, dims, res]
#print(nodeName, res)
del exenet
del net
dirname, filename = os.path.split(args.model)
basename, extname = os.path.splitext(filename)
fname = basename+'_featmap.pickle'
with open(fname, 'wb') as f:
pickle.dump(feature_vectors, f)
print('\nFeature maps are output to \'{}\''.format(fname))
if __name__ == "__main__":
print('*** OpenVINO feature map extractor')
print('@@@ This program takes \'image.jpg\' and supply to the 1st input blob as default.')
print('@@@ In case your model requires special data input, you need to modify \'prepareInputs()\' function to meet the requirements.')
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--model', type=str, help='input IR model path')
parser.add_argument('-i', '--input', type=str, default='image.jpg', help='input image data path (default=image.jpg)')
parser.add_argument('-d', '--device', type=str, default='CPU', help='device to use')
args = parser.parse_args()
main(args)
| 35.711538 | 189 | 0.641716 |
acf7d85cb9cca8225e603e62eb0b9153cbc93bf4 | 7,338 | py | Python | python/ray/rllib/evaluation/episode.py | vladfi1/ray | 3b141b26cd4af491b3c1fb8ce4dbb00265246b1e | [
"Apache-2.0"
] | 3 | 2018-06-06T22:36:49.000Z | 2018-06-06T22:41:55.000Z | python/ray/rllib/evaluation/episode.py | vladfi1/ray | 3b141b26cd4af491b3c1fb8ce4dbb00265246b1e | [
"Apache-2.0"
] | null | null | null | python/ray/rllib/evaluation/episode.py | vladfi1/ray | 3b141b26cd4af491b3c1fb8ce4dbb00265246b1e | [
"Apache-2.0"
] | 2 | 2019-04-09T12:30:24.000Z | 2020-07-23T13:45:40.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import defaultdict
import random
import numpy as np
from ray.rllib.env.base_env import _DUMMY_AGENT_ID
from ray.rllib.utils.annotations import DeveloperAPI
@DeveloperAPI
class MultiAgentEpisode(object):
"""Tracks the current state of a (possibly multi-agent) episode.
Attributes:
new_batch_builder (func): Create a new MultiAgentSampleBatchBuilder.
add_extra_batch (func): Return a built MultiAgentBatch to the sampler.
batch_builder (obj): Batch builder for the current episode.
total_reward (float): Summed reward across all agents in this episode.
length (int): Length of this episode.
episode_id (int): Unique id identifying this trajectory.
agent_rewards (dict): Summed rewards broken down by agent.
custom_metrics (dict): Dict where the you can add custom metrics.
user_data (dict): Dict that you can use for temporary storage.
Use case 1: Model-based rollouts in multi-agent:
A custom compute_actions() function in a policy graph can inspect the
current episode state and perform a number of rollouts based on the
policies and state of other agents in the environment.
Use case 2: Returning extra rollouts data.
The model rollouts can be returned back to the sampler by calling:
>>> batch = episode.new_batch_builder()
>>> for each transition:
batch.add_values(...) # see sampler for usage
>>> episode.extra_batches.add(batch.build_and_reset())
"""
def __init__(self, policies, policy_mapping_fn, batch_builder_factory,
extra_batch_callback):
self.new_batch_builder = batch_builder_factory
self.add_extra_batch = extra_batch_callback
self.batch_builder = batch_builder_factory()
self.total_reward = 0.0
self.length = 0
self.episode_id = random.randrange(2e9)
self.agent_rewards = defaultdict(float)
self.custom_metrics = {}
self.user_data = {}
self._policies = policies
self._policy_mapping_fn = policy_mapping_fn
self._next_agent_index = 0
self._agent_to_index = {}
self._agent_to_policy = {}
self._agent_to_rnn_state = {}
self._agent_to_last_obs = {}
self._agent_to_last_raw_obs = {}
self._agent_to_last_info = {}
self._agent_to_last_action = {}
self._agent_to_last_pi_info = {}
self._agent_to_prev_action = {}
self._agent_reward_history = defaultdict(list)
@DeveloperAPI
def policy_for(self, agent_id=_DUMMY_AGENT_ID):
"""Returns the policy graph for the specified agent.
If the agent is new, the policy mapping fn will be called to bind the
agent to a policy for the duration of the episode.
"""
if agent_id not in self._agent_to_policy:
self._agent_to_policy[agent_id] = self._policy_mapping_fn(agent_id)
return self._agent_to_policy[agent_id]
@DeveloperAPI
def last_observation_for(self, agent_id=_DUMMY_AGENT_ID):
"""Returns the last observation for the specified agent."""
return self._agent_to_last_obs.get(agent_id)
@DeveloperAPI
def last_raw_obs_for(self, agent_id=_DUMMY_AGENT_ID):
"""Returns the last un-preprocessed obs for the specified agent."""
return self._agent_to_last_raw_obs.get(agent_id)
@DeveloperAPI
def last_info_for(self, agent_id=_DUMMY_AGENT_ID):
"""Returns the last info for the specified agent."""
return self._agent_to_last_info.get(agent_id)
@DeveloperAPI
def last_action_for(self, agent_id=_DUMMY_AGENT_ID):
"""Returns the last action for the specified agent, or zeros."""
if agent_id in self._agent_to_last_action:
return _flatten_action(self._agent_to_last_action[agent_id])
else:
policy = self._policies[self.policy_for(agent_id)]
flat = _flatten_action(policy.action_space.sample())
return np.zeros_like(flat)
@DeveloperAPI
def prev_action_for(self, agent_id=_DUMMY_AGENT_ID):
"""Returns the previous action for the specified agent."""
if agent_id in self._agent_to_prev_action:
return _flatten_action(self._agent_to_prev_action[agent_id])
else:
# We're at t=0, so return all zeros.
return np.zeros_like(self.last_action_for(agent_id))
@DeveloperAPI
def prev_reward_for(self, agent_id=_DUMMY_AGENT_ID):
"""Returns the previous reward for the specified agent."""
history = self._agent_reward_history[agent_id]
if len(history) >= 2:
return history[-2]
else:
# We're at t=0, so there is no previous reward, just return zero.
return 0.0
@DeveloperAPI
def rnn_state_for(self, agent_id=_DUMMY_AGENT_ID):
"""Returns the last RNN state for the specified agent."""
if agent_id not in self._agent_to_rnn_state:
policy = self._policies[self.policy_for(agent_id)]
self._agent_to_rnn_state[agent_id] = policy.get_initial_state()
return self._agent_to_rnn_state[agent_id]
@DeveloperAPI
def last_pi_info_for(self, agent_id=_DUMMY_AGENT_ID):
"""Returns the last info object for the specified agent."""
return self._agent_to_last_pi_info[agent_id]
def _add_agent_rewards(self, reward_dict):
for agent_id, reward in reward_dict.items():
if reward is not None:
self.agent_rewards[agent_id,
self.policy_for(agent_id)] += reward
self.total_reward += reward
self._agent_reward_history[agent_id].append(reward)
def _set_rnn_state(self, agent_id, rnn_state):
self._agent_to_rnn_state[agent_id] = rnn_state
def _set_last_observation(self, agent_id, obs):
self._agent_to_last_obs[agent_id] = obs
def _set_last_raw_obs(self, agent_id, obs):
self._agent_to_last_raw_obs[agent_id] = obs
def _set_last_info(self, agent_id, info):
self._agent_to_last_info[agent_id] = info
def _set_last_action(self, agent_id, action):
if agent_id in self._agent_to_last_action:
self._agent_to_prev_action[agent_id] = \
self._agent_to_last_action[agent_id]
self._agent_to_last_action[agent_id] = action
def _set_last_pi_info(self, agent_id, pi_info):
self._agent_to_last_pi_info[agent_id] = pi_info
def _agent_index(self, agent_id):
if agent_id not in self._agent_to_index:
self._agent_to_index[agent_id] = self._next_agent_index
self._next_agent_index += 1
return self._agent_to_index[agent_id]
def _flatten_action(action):
# Concatenate tuple actions
if isinstance(action, list) or isinstance(action, tuple):
expanded = []
for a in action:
if not hasattr(a, "shape") or len(a.shape) == 0:
expanded.append(np.expand_dims(a, 1))
else:
expanded.append(a)
action = np.concatenate(expanded, axis=0).flatten()
return action
| 38.21875 | 79 | 0.676479 |
acf7d9267a7f5c74cfa0d92764b8eb8366b25f3d | 1,740 | py | Python | fairseq/optim/__init__.py | guillemcortes/FBK-Fairseq-ST | 66b4d0ff8b211750c8c955c3d3f277f41713996e | [
"BSD-3-Clause"
] | 26 | 2019-11-01T09:26:17.000Z | 2022-03-31T14:35:19.000Z | fairseq/optim/__init__.py | guillemcortes/FBK-Fairseq-ST | 66b4d0ff8b211750c8c955c3d3f277f41713996e | [
"BSD-3-Clause"
] | 12 | 2019-12-27T02:38:37.000Z | 2021-03-09T14:03:25.000Z | fairseq/optim/__init__.py | guillemcortes/FBK-Fairseq-ST | 66b4d0ff8b211750c8c955c3d3f277f41713996e | [
"BSD-3-Clause"
] | 14 | 2019-10-11T14:17:15.000Z | 2021-05-05T17:49:06.000Z | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import importlib
import os
from .fairseq_optimizer import FairseqOptimizer
from .fp16_optimizer import FP16Optimizer
OPTIMIZER_REGISTRY = {}
OPTIMIZER_CLASS_NAMES = set()
def build_optimizer(args, params):
params = list(filter(lambda p: p.requires_grad, params))
return OPTIMIZER_REGISTRY[args.optimizer](args, params)
def register_optimizer(name):
"""Decorator to register a new optimizer."""
def register_optimizer_cls(cls):
if name in OPTIMIZER_REGISTRY:
raise ValueError('Cannot register duplicate optimizer ({})'.format(name))
if not issubclass(cls, FairseqOptimizer):
raise ValueError('Optimizer ({}: {}) must extend FairseqOptimizer'.format(name, cls.__name__))
if cls.__name__ in OPTIMIZER_CLASS_NAMES:
# We use the optimizer class name as a unique identifier in
# checkpoints, so all optimizer must have unique class names.
raise ValueError('Cannot register optimizer with duplicate class name ({})'.format(cls.__name__))
OPTIMIZER_REGISTRY[name] = cls
OPTIMIZER_CLASS_NAMES.add(cls.__name__)
return cls
return register_optimizer_cls
# automatically import any Python files in the optim/ directory
for file in os.listdir(os.path.dirname(__file__)):
if file.endswith('.py') and not file.startswith('_'):
module = file[:file.find('.py')]
importlib.import_module('fairseq.optim.' + module)
| 37.021277 | 109 | 0.717241 |
acf7d9fc57fe609a33807d4ee63f2044bf4a1148 | 1,482 | py | Python | scripts/01_data_mapping/012_pairsam2cooler.py | agalitsyna/sc_dros | a7ffa8cac89fbfafa17fc02c245bcde1372dcc4e | [
"MIT"
] | 1 | 2021-01-04T20:52:14.000Z | 2021-01-04T20:52:14.000Z | scripts/01_data_mapping/012_pairsam2cooler.py | agalitsyna/sc_dros | a7ffa8cac89fbfafa17fc02c245bcde1372dcc4e | [
"MIT"
] | null | null | null | scripts/01_data_mapping/012_pairsam2cooler.py | agalitsyna/sc_dros | a7ffa8cac89fbfafa17fc02c245bcde1372dcc4e | [
"MIT"
] | 1 | 2020-04-30T17:14:02.000Z | 2020-04-30T17:14:02.000Z | """
Converts a set of pairsam files for a particular cell into cooler file.
Example run in bash:
pref="A6"; python pairsam2cooler.py "../DATA/PAIR/${pref}*.pairsam" "$pref" "../data/cool/"
Parameters:
argv[1] - mask of input pairsam files
argv[2] - cell name (or prefix)
"""
from utils import * # from ../../lib/ folder
from sys import argv
mask = argv[1]
pref = cell = argv[2]
output_path = argv[3]
output_cool = f"{output_path}/{pref}.{{}}.cool" # A mask for writing cool files
output_pairix = f"{output_path}/{pref}.pairix"
output_stats = f"{output_path}/{pref}.stats"
chromnames = ['chr4', 'chrX', 'chr2L', 'chr2R', 'chr3L', 'chr3R']
logging.debug(f"Running pairsam2cool for: {mask} {pref}\n Writing into {output_path}/{pref} files")
filelist = glob.glob(mask)
exp_list = [x.split('/')[-1].split('.')[0] for x in filelist]
print(list(zip(filelist, exp_list)))
logging.debug("Reading dataframe...")
# Reading pairsam files into single dataframe
df = read_pairsams(filelist, exp_list, cell)
# Filtering dataframe by proper ligation junctions
df_filtered, stats = filter_pair_df(df)
stats['cell'] = cell
stats['exp'] = cell
# Writing filtering statistics
with open(output_stats, 'w') as outf:
for k in sorted(stats.keys()):
outf.write('{}\t{}\n'.format(k, stats[k]))
# Writing output to cooler file
create_cooler(df_filtered, output_pairix, output_cool,
"../../data/GENOME/dm3.reduced.chrom.sizes",
resolutions_list=[10])
| 29.64 | 99 | 0.684211 |
acf7da58dd5897c97c2545f7845029303f37344a | 3,049 | py | Python | test/test_shoppinglist.py | fossabot/pygrocydm | 6da0c1eda19c6059941440465ac815cead2760b3 | [
"MIT"
] | null | null | null | test/test_shoppinglist.py | fossabot/pygrocydm | 6da0c1eda19c6059941440465ac815cead2760b3 | [
"MIT"
] | null | null | null | test/test_shoppinglist.py | fossabot/pygrocydm | 6da0c1eda19c6059941440465ac815cead2760b3 | [
"MIT"
] | null | null | null | import json
from datetime import datetime
from test.test_const import CONST_BASE_URL, CONST_PORT, CONST_SSL
from unittest import TestCase
from pygrocydm.grocy_api_client import GrocyApiClient
from pygrocydm.shopping_list import (SHOPPING_LIST_ENDPOINT,
SHOPPING_LISTS_ENDPOINT, ShoppingList,
ShoppingListItem)
class TestShoppingList(TestCase):
def setUp(self):
self.api = GrocyApiClient(CONST_BASE_URL, "demo_mode", verify_ssl=CONST_SSL, port=CONST_PORT)
self.endpoint = SHOPPING_LISTS_ENDPOINT + '/1'
def test_shopping_list_data_diff_valid(self):
shopping_list = self.api.do_request("GET", self.endpoint)
shopping_list_keys = shopping_list.keys()
moked_shopping_list_json = """{
"id": "1",
"name": "Shopping list",
"description": null,
"row_created_timestamp": "2020-03-02 00:50:09"
}"""
moked_keys = json.loads(moked_shopping_list_json).keys()
self.assertCountEqual(list(shopping_list_keys), list(moked_keys))
def test_parse_json(self):
shopping_list = ShoppingList(self.api, SHOPPING_LIST_ENDPOINT, self.api.do_request("GET", self.endpoint))
assert isinstance(shopping_list.id, int)
assert isinstance(shopping_list.description, str) or shopping_list.description is None
assert isinstance(shopping_list.name, str)
assert isinstance(shopping_list.row_created_timestamp, datetime)
class TestShoppingListItem(TestCase):
def setUp(self):
self.api = GrocyApiClient(CONST_BASE_URL, "demo_mode", verify_ssl=CONST_SSL, port=CONST_PORT)
self.endpoint = SHOPPING_LIST_ENDPOINT + '/1'
def test_shopping_list_item_data_diff_valid(self):
shopping_list_item = self.api.do_request("GET", self.endpoint)
shopping_list_item_keys = shopping_list_item.keys()
moked_shopping_list_item_json = """{
"id": "1",
"product_id": null,
"note": "Some good snacks",
"amount": "1",
"row_created_timestamp": "2020-03-02 00:50:10",
"shopping_list_id": "1",
"done": "0"
}"""
moked_keys = json.loads(moked_shopping_list_item_json).keys()
self.assertCountEqual(list(shopping_list_item_keys), list(moked_keys))
def test_parse_json(self):
shopping_list_item = ShoppingListItem(self.api, SHOPPING_LISTS_ENDPOINT, self.api.do_request("GET", self.endpoint))
assert isinstance(shopping_list_item.id, int)
assert isinstance(shopping_list_item.product_id, int) or shopping_list_item.product_id is None
assert isinstance(shopping_list_item.note, str) or shopping_list_item.note is None
assert isinstance(shopping_list_item.amount, float)
assert isinstance(shopping_list_item.shopping_list_id, int)
assert isinstance(shopping_list_item.done, bool)
assert isinstance(shopping_list_item.row_created_timestamp, datetime)
| 44.838235 | 123 | 0.691374 |
acf7db6b2ad2b5bfc157771ffbaaebb0a9660fdd | 9,838 | py | Python | dashboard/modules/job/cli.py | jianoaix/ray | 1701b923bc83905f8961c06a6a173e3eba46a936 | [
"Apache-2.0"
] | null | null | null | dashboard/modules/job/cli.py | jianoaix/ray | 1701b923bc83905f8961c06a6a173e3eba46a936 | [
"Apache-2.0"
] | 41 | 2021-09-21T01:13:48.000Z | 2022-03-19T07:12:22.000Z | dashboard/modules/job/cli.py | jianoaix/ray | 1701b923bc83905f8961c06a6a173e3eba46a936 | [
"Apache-2.0"
] | 1 | 2019-09-24T16:24:49.000Z | 2019-09-24T16:24:49.000Z | import asyncio
import os
import pprint
from subprocess import list2cmdline
import time
from typing import Optional, Tuple
import click
import ray.ray_constants as ray_constants
from ray.autoscaler._private.cli_logger import add_click_logging_options, cli_logger, cf
from ray.job_submission import JobStatus, JobSubmissionClient
from ray.internal.storage import _load_class
from ray.util.annotations import PublicAPI
from ray.dashboard.modules.dashboard_sdk import parse_runtime_env_args
def _get_sdk_client(
address: Optional[str], create_cluster_if_needed: bool = False
) -> JobSubmissionClient:
if address is None and "RAY_ADDRESS" in os.environ:
address = os.environ["RAY_ADDRESS"]
cli_logger.labeled_value("Job submission server address", address)
return JobSubmissionClient(address, create_cluster_if_needed)
def _log_big_success_msg(success_msg):
cli_logger.newline()
cli_logger.success("-" * len(success_msg))
cli_logger.success(success_msg)
cli_logger.success("-" * len(success_msg))
cli_logger.newline()
def _log_big_error_msg(success_msg):
cli_logger.newline()
cli_logger.error("-" * len(success_msg))
cli_logger.error(success_msg)
cli_logger.error("-" * len(success_msg))
cli_logger.newline()
def _log_job_status(client: JobSubmissionClient, job_id: str):
info = client.get_job_info(job_id)
if info.status == JobStatus.SUCCEEDED:
_log_big_success_msg(f"Job '{job_id}' succeeded")
elif info.status == JobStatus.STOPPED:
cli_logger.warning(f"Job '{job_id}' was stopped")
elif info.status == JobStatus.FAILED:
_log_big_error_msg(f"Job '{job_id}' failed")
if info.message is not None:
cli_logger.print(f"Status message: {info.message}", no_format=True)
else:
# Catch-all.
cli_logger.print(f"Status for job '{job_id}': {info.status}")
if info.message is not None:
cli_logger.print(f"Status message: {info.message}", no_format=True)
async def _tail_logs(client: JobSubmissionClient, job_id: str):
async for lines in client.tail_job_logs(job_id):
print(lines, end="")
_log_job_status(client, job_id)
@click.group("job")
def job_cli_group():
pass
@job_cli_group.command()
@click.option(
"--address",
type=str,
default=None,
required=False,
help=(
"Address of the Ray cluster to connect to. Can also be specified "
"using the RAY_ADDRESS environment variable."
),
)
@click.option(
"--job-id",
type=str,
default=None,
required=False,
help=("Job ID to specify for the job. " "If not provided, one will be generated."),
)
@click.option(
"--runtime-env",
type=str,
default=None,
required=False,
help="Path to a local YAML file containing a runtime_env definition.",
)
@click.option(
"--runtime-env-json",
type=str,
default=None,
required=False,
help="JSON-serialized runtime_env dictionary.",
)
@click.option(
"--working-dir",
type=str,
default=None,
required=False,
help=(
"Directory containing files that your job will run in. Can be a "
"local directory or a remote URI to a .zip file (S3, GS, HTTP). "
"If specified, this overrides the option in --runtime-env."
),
)
@click.option(
"--no-wait",
is_flag=True,
type=bool,
default=False,
help="If set, will not stream logs and wait for the job to exit.",
)
@add_click_logging_options
@click.argument("entrypoint", nargs=-1, required=True, type=click.UNPROCESSED)
@PublicAPI
def submit(
address: Optional[str],
job_id: Optional[str],
runtime_env: Optional[str],
runtime_env_json: Optional[str],
working_dir: Optional[str],
entrypoint: Tuple[str],
no_wait: bool,
):
"""Submits a job to be run on the cluster.
Example:
ray job submit -- python my_script.py --arg=val
"""
if ray_constants.RAY_JOB_SUBMIT_HOOK in os.environ:
# Submit all args as **kwargs per the JOB_SUBMIT_HOOK contract.
_load_class(os.environ[ray_constants.RAY_JOB_SUBMIT_HOOK])(
address=address,
job_id=job_id,
runtime_env=runtime_env,
runtime_env_json=runtime_env_json,
working_dir=working_dir,
entrypoint=entrypoint,
no_wait=no_wait,
)
client = _get_sdk_client(address, create_cluster_if_needed=True)
final_runtime_env = parse_runtime_env_args(
runtime_env=runtime_env,
runtime_env_json=runtime_env_json,
working_dir=working_dir,
)
job_id = client.submit_job(
entrypoint=list2cmdline(entrypoint),
job_id=job_id,
runtime_env=final_runtime_env,
)
_log_big_success_msg(f"Job '{job_id}' submitted successfully")
with cli_logger.group("Next steps"):
cli_logger.print("Query the logs of the job:")
with cli_logger.indented():
cli_logger.print(cf.bold(f"ray job logs {job_id}"))
cli_logger.print("Query the status of the job:")
with cli_logger.indented():
cli_logger.print(cf.bold(f"ray job status {job_id}"))
cli_logger.print("Request the job to be stopped:")
with cli_logger.indented():
cli_logger.print(cf.bold(f"ray job stop {job_id}"))
cli_logger.newline()
sdk_version = client.get_version()
# sdk version 0 does not have log streaming
if not no_wait:
if int(sdk_version) > 0:
cli_logger.print(
"Tailing logs until the job exits " "(disable with --no-wait):"
)
asyncio.get_event_loop().run_until_complete(_tail_logs(client, job_id))
else:
cli_logger.warning(
"Tailing logs is not enabled for job sdk client version "
f"{sdk_version}. Please upgrade your ray to latest version "
"for this feature."
)
@job_cli_group.command()
@click.option(
"--address",
type=str,
default=None,
required=False,
help=(
"Address of the Ray cluster to connect to. Can also be specified "
"using the RAY_ADDRESS environment variable."
),
)
@click.argument("job-id", type=str)
@add_click_logging_options
@PublicAPI(stability="beta")
def status(address: Optional[str], job_id: str):
"""Queries for the current status of a job.
Example:
ray job status <my_job_id>
"""
client = _get_sdk_client(address)
_log_job_status(client, job_id)
@job_cli_group.command()
@click.option(
"--address",
type=str,
default=None,
required=False,
help=(
"Address of the Ray cluster to connect to. Can also be specified "
"using the RAY_ADDRESS environment variable."
),
)
@click.option(
"--no-wait",
is_flag=True,
type=bool,
default=False,
help="If set, will not wait for the job to exit.",
)
@click.argument("job-id", type=str)
@add_click_logging_options
@PublicAPI(stability="beta")
def stop(address: Optional[str], no_wait: bool, job_id: str):
"""Attempts to stop a job.
Example:
ray job stop <my_job_id>
"""
client = _get_sdk_client(address)
cli_logger.print(f"Attempting to stop job {job_id}")
client.stop_job(job_id)
if no_wait:
return
else:
cli_logger.print(
f"Waiting for job '{job_id}' to exit " f"(disable with --no-wait):"
)
while True:
status = client.get_job_status(job_id)
if status in {JobStatus.STOPPED, JobStatus.SUCCEEDED, JobStatus.FAILED}:
_log_job_status(client, job_id)
break
else:
cli_logger.print(f"Job has not exited yet. Status: {status}")
time.sleep(1)
@job_cli_group.command()
@click.option(
"--address",
type=str,
default=None,
required=False,
help=(
"Address of the Ray cluster to connect to. Can also be specified "
"using the RAY_ADDRESS environment variable."
),
)
@click.argument("job-id", type=str)
@click.option(
"-f",
"--follow",
is_flag=True,
type=bool,
default=False,
help="If set, follow the logs (like `tail -f`).",
)
@add_click_logging_options
@PublicAPI(stability="beta")
def logs(address: Optional[str], job_id: str, follow: bool):
"""Gets the logs of a job.
Example:
ray job logs <my_job_id>
"""
client = _get_sdk_client(address)
sdk_version = client.get_version()
# sdk version 0 did not have log streaming
if follow:
if int(sdk_version) > 0:
asyncio.get_event_loop().run_until_complete(_tail_logs(client, job_id))
else:
cli_logger.warning(
"Tailing logs is not enabled for job sdk client version "
f"{sdk_version}. Please upgrade your ray to latest version "
"for this feature."
)
else:
# Set no_format to True because the logs may have unescaped "{" and "}"
# and the CLILogger calls str.format().
cli_logger.print(client.get_job_logs(job_id), end="", no_format=True)
@job_cli_group.command()
@click.option(
"--address",
type=str,
default=None,
required=False,
help=(
"Address of the Ray cluster to connect to. Can also be specified "
"using the RAY_ADDRESS environment variable."
),
)
@add_click_logging_options
@PublicAPI(stability="beta")
def list(address: Optional[str]):
"""Lists all running jobs and their information.
Example:
ray job list
"""
client = _get_sdk_client(address)
# Set no_format to True because the logs may have unescaped "{" and "}"
# and the CLILogger calls str.format().
cli_logger.print(pprint.pformat(client.list_jobs()), no_format=True)
| 28.85044 | 88 | 0.654808 |
acf7dbac8b3b411e471d74cda7b06915a54c7214 | 5,469 | py | Python | 25-ratios/model/yolov3.py | Yu-Nie/YOLOV3 | 09db1d551d293dcfa7a638fd6693920840d28a74 | [
"MIT"
] | null | null | null | 25-ratios/model/yolov3.py | Yu-Nie/YOLOV3 | 09db1d551d293dcfa7a638fd6693920840d28a74 | [
"MIT"
] | null | null | null | 25-ratios/model/yolov3.py | Yu-Nie/YOLOV3 | 09db1d551d293dcfa7a638fd6693920840d28a74 | [
"MIT"
] | null | null | null | import sys
sys.path.append("..")
# AbsolutePath = os.path.abspath(__file__) #将相对路径转换成绝对路径
# SuperiorCatalogue = os.path.dirname(AbsolutePath) #相对路径的上级路径
# BaseDir = os.path.dirname(SuperiorCatalogue) #在“SuperiorCatalogue”的基础上在脱掉一层路径,得到我们想要的路径。
# sys.path.insert(0,BaseDir) #将我们取出来的路径加入
import torch.nn as nn
import torch
from model.backbones.darknet53 import Darknet53
from model.necks.yolo_fpn import FPN_YOLOV3
from model.head.yolo_head import Yolo_head
from model.layers.conv_module import Convolutional
import config.yolov3_config_voc as cfg
import numpy as np
from utils.tools import *
class Yolov3(nn.Module):
"""
Note : int the __init__(), to define the modules should be in order, because of the weight file is order
"""
def __init__(self, init_weights=True):
super(Yolov3, self).__init__()
self.__anchors = torch.FloatTensor(cfg.MODEL["ANCHORS"])
self.__strides = torch.FloatTensor(cfg.MODEL["STRIDES"])
self.__nC = cfg.DATA["NUM"]
self.__out_channel = cfg.MODEL["ANCHORS_PER_SCLAE"] * (self.__nC + 5 + 25)
self.__backnone = Darknet53()
self.__fpn = FPN_YOLOV3(fileters_in=[1024, 512, 256],
fileters_out=[self.__out_channel, self.__out_channel, self.__out_channel])
# small
self.__head_s = Yolo_head(nC=self.__nC, anchors=self.__anchors[0], stride=self.__strides[0])
# medium
self.__head_m = Yolo_head(nC=self.__nC, anchors=self.__anchors[1], stride=self.__strides[1])
# large
self.__head_l = Yolo_head(nC=self.__nC, anchors=self.__anchors[2], stride=self.__strides[2])
if init_weights:
self.__init_weights()
def forward(self, x):
out = []
x_s, x_m, x_l = self.__backnone(x)
x_s, x_m, x_l = self.__fpn(x_l, x_m, x_s)
out.append(self.__head_s(x_s))
out.append(self.__head_m(x_m))
out.append(self.__head_l(x_l))
if self.training:
p, p_d = list(zip(*out))
return p, p_d # smalll, medium, large
else:
p, p_d = list(zip(*out))
return p, torch.cat(p_d, 0)
def __init_weights(self):
" Note :nn.Conv2d nn.BatchNorm2d'initing modes are uniform "
for m in self.modules():
if isinstance(m, nn.Conv2d):
torch.nn.init.normal_(m.weight.data, 0.0, 0.01)
if m.bias is not None:
m.bias.data.zero_()
# print("initing {}".format(m))
elif isinstance(m, nn.BatchNorm2d):
torch.nn.init.constant_(m.weight.data, 1.0)
torch.nn.init.constant_(m.bias.data, 0.0)
# print("initing {}".format(m))
def load_darknet_weights(self, weight_file, cutoff=52):
"https://github.com/ultralytics/yolov3/blob/master/models.py"
print("load darknet weights : ", weight_file)
with open(weight_file, 'rb') as f:
_ = np.fromfile(f, dtype=np.int32, count=5)
weights = np.fromfile(f, dtype=np.float32)
count = 0
ptr = 0
for m in self.modules():
if isinstance(m, Convolutional):
# only initing backbone conv's weights
if count == cutoff:
break
count += 1
conv_layer = m._Convolutional__conv
if m.norm == "bn":
# Load BN bias, weights, running mean and running variance
bn_layer = m._Convolutional__norm
num_b = bn_layer.bias.numel() # Number of biases
# Bias
bn_b = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(bn_layer.bias.data)
bn_layer.bias.data.copy_(bn_b)
ptr += num_b
# Weight
bn_w = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(bn_layer.weight.data)
bn_layer.weight.data.copy_(bn_w)
ptr += num_b
# Running Mean
bn_rm = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(bn_layer.running_mean)
bn_layer.running_mean.data.copy_(bn_rm)
ptr += num_b
# Running Var
bn_rv = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(bn_layer.running_var)
bn_layer.running_var.data.copy_(bn_rv)
ptr += num_b
# print("loading weight {}".format(bn_layer))
else:
# Load conv. bias
num_b = conv_layer.bias.numel()
conv_b = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(conv_layer.bias.data)
conv_layer.bias.data.copy_(conv_b)
ptr += num_b
# Load conv. weights
num_w = conv_layer.weight.numel()
conv_w = torch.from_numpy(weights[ptr:ptr + num_w]).view_as(conv_layer.weight.data)
conv_layer.weight.data.copy_(conv_w)
ptr += num_w
# print("loading weight {}".format(conv_layer))
if __name__ == '__main__':
net = Yolov3()
print(net)
in_img = torch.randn(12, 3, 416, 416)
p, p_d = net(in_img)
for i in range(3):
print(p[i].shape)
print(p_d[i].shape)
| 37.458904 | 108 | 0.565003 |
acf7dbd2c9e313d1e288f74611e6a3121867f148 | 582 | py | Python | broccoli/funcstions/generic/tile.py | naritotakizawa/broccoli | 7feddc9353313cc2ba0d39228a4109acfdd71d4f | [
"MIT"
] | 5 | 2018-08-08T07:17:49.000Z | 2018-10-09T02:42:29.000Z | broccoli/funcstions/generic/tile.py | naritotakizawa/broccoli | 7feddc9353313cc2ba0d39228a4109acfdd71d4f | [
"MIT"
] | 68 | 2018-07-05T07:12:34.000Z | 2020-12-28T04:51:32.000Z | broccoli/funcstions/generic/tile.py | naritotakizawa/broccoli | 7feddc9353313cc2ba0d39228a4109acfdd71d4f | [
"MIT"
] | null | null | null | """タイルに関する、汎用的な関数を提供する。"""
from broccoli import register
from broccoli import const
@register.function('generic.tile.only_player', attr='is_public', material='tile')
def only_player(self, obj=None):
"""プレイヤーのみ、通行を許可する。
obj引数がNoneの場合は、通行不可です。
obj引数があり、かつプレイヤーならば通行を許可します。
"""
if obj is not None and obj.kind == const.PLAYER:
return True
return False
@register.function('generic.tile.goal', attr='on_self', material='tile')
def goal(self, obj):
"""プレイヤーが乗ったら、次のマップへ移動する。"""
if obj.kind == const.PLAYER:
obj.canvas.manager.jump()
| 23.28 | 81 | 0.685567 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.