hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a1209c5954e1405274be90df26ff3a3a703cb49
| 1,771
|
py
|
Python
|
src/solvers/backtracking.py
|
daniel-toncu/N-Queens
|
363f3f43af7ee453c060d296979c041d50327794
|
[
"MIT"
] | null | null | null |
src/solvers/backtracking.py
|
daniel-toncu/N-Queens
|
363f3f43af7ee453c060d296979c041d50327794
|
[
"MIT"
] | null | null | null |
src/solvers/backtracking.py
|
daniel-toncu/N-Queens
|
363f3f43af7ee453c060d296979c041d50327794
|
[
"MIT"
] | null | null | null |
"""
"""
from solvers.base import BaseSolver
class BacktrackingSolver(BaseSolver):
"""
"""
def _is_safe_place(self, board, row, column):
"""
"""
# Check Row on Left Side
for i in range(column):
if board[row][i] == 1:
return False
# Check Upper Diagonal on Left Side
for i, j in zip(range(row, -1, -1), range(column, -1, -1)):
if board[i][j] == 1:
return False
# Check Lower Diagonal on Left Side
for i, j in zip(range(row, self._n, 1), range(column, -1, -1)):
if board[i][j] == 1:
return False
return True
def _print_configuration(self, board):
"""
"""
print()
for line in board:
for cell in line:
character = "Q" if cell == 1 else "_"
print(character, end=" ")
print()
def _backtrack(self, board, column):
"""
"""
if column >= self._n:
# It is a Solution
self._solutions += 1
if self._print_solutions:
self._print_configuration(board)
return
for row in range(self._n):
if self._is_safe_place(board, row, column):
replicated_board = [
line[:] for line in board
]
replicated_board[row][column] = 1
self._backtrack(replicated_board, column + 1)
def _solve(self):
"""
"""
self._solutions = 0
board = [
[0 for _ in range(self._n)]
for _ in range(self._n)
]
self._backtrack(board, 0)
return self._solutions
| 20.125
| 71
| 0.468097
|
4a120ae022baf346ae18fe691b11e1650d0df2db
| 2,475
|
py
|
Python
|
isometric-deformation/ext/libigl/python/tutorial/605_Tetgen.py
|
jiayaozhang/CS-370-Mesh-Processing
|
26646d29af8cbc0d461302afa137f12b508b8b1b
|
[
"MIT"
] | 187
|
2019-01-23T04:07:11.000Z
|
2022-03-27T03:44:58.000Z
|
isometric-deformation/ext/libigl/python/tutorial/605_Tetgen.py
|
jiayaozhang/CS-370-Mesh-Processing
|
26646d29af8cbc0d461302afa137f12b508b8b1b
|
[
"MIT"
] | 8
|
2019-03-22T13:27:38.000Z
|
2020-06-18T13:23:23.000Z
|
isometric-deformation/ext/libigl/python/tutorial/605_Tetgen.py
|
jiayaozhang/CS-370-Mesh-Processing
|
26646d29af8cbc0d461302afa137f12b508b8b1b
|
[
"MIT"
] | 34
|
2019-02-13T01:11:12.000Z
|
2022-02-28T03:29:40.000Z
|
# This file is part of libigl, a simple c++ geometry processing library.
#
# Copyright (C) 2017 Sebastian Koch <s.koch@tu-berlin.de> and Daniele Panozzo <daniele.panozzo@gmail.com>
#
# This Source Code Form is subject to the terms of the Mozilla Public License
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
import sys, os
# Add the igl library to the modules search path
sys.path.insert(0, os.getcwd() + "/../")
import pyigl as igl
from shared import TUTORIAL_SHARED_PATH, check_dependencies
dependencies = ["tetgen", "glfw"]
check_dependencies(dependencies)
# Input polygon
V = igl.eigen.MatrixXd()
F = igl.eigen.MatrixXi()
B = igl.eigen.MatrixXd()
# Tetrahedralized interior
TV = igl.eigen.MatrixXd()
TT = igl.eigen.MatrixXi()
TF = igl.eigen.MatrixXi()
viewer = igl.glfw.Viewer()
def key_down(viewer, key, modifier):
if key >= ord('1') and key <= ord('9'):
t = float((key - ord('1')) + 1) / 9.0
v = igl.eigen.MatrixXd()
v = B.col(2) - B.col(2).minCoeff()
v /= v.col(0).maxCoeff()
s = []
for i in range(v.size()):
if v[i, 0] < t:
s.append(i)
V_temp = igl.eigen.MatrixXd(len(s) * 4, 3)
F_temp = igl.eigen.MatrixXd(len(s) * 4, 3).castint()
for i in range(len(s)):
V_temp.setRow(i * 4 + 0, TV.row(TT[s[i], 0]))
V_temp.setRow(i * 4 + 1, TV.row(TT[s[i], 1]))
V_temp.setRow(i * 4 + 2, TV.row(TT[s[i], 2]))
V_temp.setRow(i * 4 + 3, TV.row(TT[s[i], 3]))
F_temp.setRow(i * 4 + 0, igl.eigen.MatrixXd([[(i*4)+0, (i*4)+1, (i*4)+3]]).castint())
F_temp.setRow(i * 4 + 1, igl.eigen.MatrixXd([[(i*4)+0, (i*4)+2, (i*4)+1]]).castint())
F_temp.setRow(i * 4 + 2, igl.eigen.MatrixXd([[(i*4)+3, (i*4)+2, (i*4)+0]]).castint())
F_temp.setRow(i * 4 + 3, igl.eigen.MatrixXd([[(i*4)+1, (i*4)+2, (i*4)+3]]).castint())
viewer.data().clear()
viewer.data().set_mesh(V_temp, F_temp)
viewer.data().set_face_based(True)
else:
return False
return True
# Load a surface mesh
igl.readOFF(TUTORIAL_SHARED_PATH + "fertility.off", V, F)
# Tetrahedralize the interior
igl.tetgen.tetrahedralize(V, F, "pq1.414Y", TV, TT, TF)
# Compute barycenters
igl.barycenter(TV, TT, B)
# Plot the generated mesh
key_down(viewer, ord('5'), 0)
viewer.callback_key_down = key_down
viewer.launch()
| 30.182927
| 105
| 0.598788
|
4a120b692ee7f2eb7220fdf1a372094184b7a72b
| 5,372
|
py
|
Python
|
test/test_java_timestamp.py
|
pombredanne/javaproperties
|
23c23bfae27d0d7c94338810e84d763ebd0d81f8
|
[
"MIT"
] | 21
|
2016-10-03T18:38:45.000Z
|
2021-06-29T02:20:48.000Z
|
test/test_java_timestamp.py
|
pombredanne/javaproperties
|
23c23bfae27d0d7c94338810e84d763ebd0d81f8
|
[
"MIT"
] | 13
|
2017-03-07T17:53:05.000Z
|
2020-11-27T22:07:11.000Z
|
test/test_java_timestamp.py
|
pombredanne/javaproperties
|
23c23bfae27d0d7c94338810e84d763ebd0d81f8
|
[
"MIT"
] | 11
|
2016-12-05T08:48:33.000Z
|
2022-03-11T10:15:11.000Z
|
from datetime import datetime
import sys
from dateutil.tz import tzstr
import pytest
from javaproperties import java_timestamp
# Unix timestamps and datetime objects don't support leap seconds or month 13,
# so there's no need (and no way) to test handling of them here.
old_pacific = tzstr("PST8PDT,M4.1.0,M10.5.0")
@pytest.mark.parametrize(
"ts,s",
[
(None, ""),
(False, ""),
(0, "Wed Dec 31 19:00:00 EST 1969"),
(1234567890.101112, "Fri Feb 13 18:31:30 EST 2009"),
(1234567890.987654, "Fri Feb 13 18:31:30 EST 2009"),
# Months:
(1451624400, "Fri Jan 01 00:00:00 EST 2016"),
(1454396522, "Tue Feb 02 02:02:02 EST 2016"),
(1456992183, "Thu Mar 03 03:03:03 EST 2016"),
(1459757044, "Mon Apr 04 04:04:04 EDT 2016"),
(1462439105, "Thu May 05 05:05:05 EDT 2016"),
(1465207566, "Mon Jun 06 06:06:06 EDT 2016"),
(1467889627, "Thu Jul 07 07:07:07 EDT 2016"),
(1470658088, "Mon Aug 08 08:08:08 EDT 2016"),
(1473426549, "Fri Sep 09 09:09:09 EDT 2016"),
(1476108610, "Mon Oct 10 10:10:10 EDT 2016"),
(1478880671, "Fri Nov 11 11:11:11 EST 2016"),
(1481562732, "Mon Dec 12 12:12:12 EST 2016"),
# Days of the week:
(1451818800, "Sun Jan 03 06:00:00 EST 2016"),
(1451883600, "Mon Jan 04 00:00:00 EST 2016"),
(1451973600, "Tue Jan 05 01:00:00 EST 2016"),
(1452063600, "Wed Jan 06 02:00:00 EST 2016"),
(1452153600, "Thu Jan 07 03:00:00 EST 2016"),
(1452243600, "Fri Jan 08 04:00:00 EST 2016"),
(1452333600, "Sat Jan 09 05:00:00 EST 2016"),
# Leap day:
(1456733655, "Mon Feb 29 03:14:15 EST 2016"),
# PM/24-hour time:
(1463159593, "Fri May 13 13:13:13 EDT 2016"),
# Before spring ahead:
(1457852399, "Sun Mar 13 01:59:59 EST 2016"),
(datetime(2016, 3, 13, 1, 59, 59), "Sun Mar 13 01:59:59 EST 2016"),
(
datetime(2006, 4, 2, 1, 59, 59, 0, old_pacific),
"Sun Apr 02 01:59:59 PST 2006",
),
# Skipped by spring ahead:
(datetime(2016, 3, 13, 2, 30, 0), "Sun Mar 13 03:30:00 EDT 2016"),
(
datetime(2006, 4, 2, 2, 30, 0, 0, old_pacific),
"Sun Apr 02 02:30:00 PDT 2006",
),
# After spring ahead:
(1457852401, "Sun Mar 13 03:00:01 EDT 2016"),
(datetime(2016, 3, 13, 3, 0, 1), "Sun Mar 13 03:00:01 EDT 2016"),
(
datetime(2006, 4, 2, 3, 0, 1, 0, old_pacific),
"Sun Apr 02 03:00:01 PDT 2006",
),
# Before fall back:
(1478411999, "Sun Nov 06 01:59:59 EDT 2016"),
(datetime(2016, 11, 6, 0, 59, 59), "Sun Nov 06 00:59:59 EDT 2016"),
(
datetime(2006, 10, 29, 0, 59, 59, 0, old_pacific),
"Sun Oct 29 00:59:59 PDT 2006",
),
# Duplicated by fall back:
# Times duplicated by DST are interpreted non-deterministically by Python
# pre-3.6 (cf. <https://git.io/vixsE>), so there are two possible return
# values for these calls.
(
datetime(2016, 11, 6, 1, 30, 0),
("Sun Nov 06 01:30:00 EDT 2016", "Sun Nov 06 01:30:00 EST 2016"),
),
(
datetime(2006, 10, 29, 1, 30, 0, 0, old_pacific),
("Sun Oct 29 01:30:00 PDT 2006", "Sun Oct 29 01:30:00 PST 2006"),
),
# After fall back:
(1478412001, "Sun Nov 06 01:00:01 EST 2016"),
(datetime(2016, 11, 6, 2, 0, 1), "Sun Nov 06 02:00:01 EST 2016"),
(
datetime(2006, 10, 29, 2, 0, 1, 0, old_pacific),
"Sun Oct 29 02:00:01 PST 2006",
),
],
)
def test_java_timestamp(ts, s):
r = java_timestamp(ts)
if isinstance(s, tuple):
assert r in s
else:
assert r == s
# Times duplicated by fall back, disambiguated with `fold`:
@pytest.mark.xfail(
hasattr(sys, "pypy_version_info") and sys.pypy_version_info[:3] < (7, 2, 0),
reason="Broken on this version of PyPy",
# Certain versions of pypy3.6 (including the one on Travis as of
# 2020-02-23) have a bug in their datetime libraries that prevents the
# `fold` attribute from working correctly. The latest known version to
# feature this bug is 7.1.1 (Python version 3.6.1), and the earliest known
# version to feature a fix is 7.2.0 (Python version 3.6.9); I don't *think*
# there were any releases in between those two versions, but it isn't
# entirely clear.
)
@pytest.mark.parametrize(
"ts,fold,s",
[
(datetime(2016, 11, 6, 1, 30, 0), 0, "Sun Nov 06 01:30:00 EDT 2016"),
(
datetime(2006, 10, 29, 1, 30, 0, 0, old_pacific),
0,
"Sun Oct 29 01:30:00 PDT 2006",
),
(datetime(2016, 11, 6, 1, 30, 0), 1, "Sun Nov 06 01:30:00 EST 2016"),
(
datetime(2006, 10, 29, 1, 30, 0, 0, old_pacific),
1,
"Sun Oct 29 01:30:00 PST 2006",
),
],
)
def test_java_timestamp_fold(ts, fold, s):
assert java_timestamp(ts.replace(fold=fold)) == s
def test_java_timestamp_now(fixed_timestamp):
assert java_timestamp() == fixed_timestamp
def test_java_timestamp_dogfood_type_error():
with pytest.raises(TypeError):
java_timestamp("Mon Dec 12 12:12:12 EST 2016")
| 37.830986
| 81
| 0.566828
|
4a120b8498f65daa1ee84fa0d2ca6b8f861ba69a
| 27,391
|
py
|
Python
|
corehq/apps/userreports/tasks.py
|
EXTREMOPHILARUM/commcare-hq
|
b97aa9095615d0c3c5f259db67ad9438afa3d7a5
|
[
"BSD-3-Clause"
] | null | null | null |
corehq/apps/userreports/tasks.py
|
EXTREMOPHILARUM/commcare-hq
|
b97aa9095615d0c3c5f259db67ad9438afa3d7a5
|
[
"BSD-3-Clause"
] | 1
|
2021-06-02T04:45:16.000Z
|
2021-06-02T04:45:16.000Z
|
corehq/apps/userreports/tasks.py
|
EXTREMOPHILARUM/commcare-hq
|
b97aa9095615d0c3c5f259db67ad9438afa3d7a5
|
[
"BSD-3-Clause"
] | null | null | null |
import logging
from collections import defaultdict
from datetime import datetime, timedelta
from django.conf import settings
from django.db import DatabaseError, InternalError, transaction
from django.db.models import Count, Min
from django.utils.translation import ugettext as _
from botocore.vendored.requests.exceptions import ReadTimeout
from botocore.vendored.requests.packages.urllib3.exceptions import (
ProtocolError,
)
from celery.schedules import crontab
from celery.task import periodic_task, task
from couchdbkit import ResourceConflict, ResourceNotFound
from corehq.util.es.elasticsearch import ConnectionTimeout
from corehq.util.metrics import metrics_counter, metrics_gauge, metrics_histogram_timer
from corehq.util.queries import paginated_queryset
from couchexport.models import Format
from dimagi.utils.chunked import chunked
from dimagi.utils.couch import CriticalSection
from dimagi.utils.logging import notify_exception
from pillowtop.dao.couch import ID_CHUNK_SIZE
from soil.util import expose_download, get_download_file_path
from corehq import toggles
from corehq.apps.change_feed.data_sources import (
get_document_store_for_doc_type,
)
from corehq.apps.reports.util import (
DatatablesParams,
send_report_download_email,
)
from corehq.apps.userreports.const import (
ASYNC_INDICATOR_CHUNK_SIZE,
ASYNC_INDICATOR_QUEUE_TIME,
ASYNC_INDICATOR_MAX_RETRIES,
UCR_CELERY_QUEUE,
UCR_INDICATOR_CELERY_QUEUE,
)
from corehq.apps.userreports.exceptions import (
StaticDataSourceConfigurationNotFoundError,
)
from corehq.apps.userreports.models import (
AsyncIndicator,
DataSourceConfiguration,
StaticDataSourceConfiguration,
get_report_config,
id_is_static,
)
from corehq.apps.userreports.rebuild import DataSourceResumeHelper
from corehq.apps.userreports.reports.data_source import (
ConfigurableReportDataSource,
)
from corehq.apps.userreports.specs import EvaluationContext
from corehq.apps.userreports.util import (
get_async_indicator_modify_lock_key,
get_indicator_adapter,
)
from corehq.elastic import ESError
from corehq.util.context_managers import notify_someone
from corehq.util.decorators import serial_task
from corehq.util.timer import TimingContext
from corehq.util.view_utils import reverse
celery_task_logger = logging.getLogger('celery.task')
def _get_config_by_id(indicator_config_id):
if id_is_static(indicator_config_id):
return StaticDataSourceConfiguration.by_id(indicator_config_id)
else:
return DataSourceConfiguration.get(indicator_config_id)
def _build_indicators(config, document_store, relevant_ids):
adapter = get_indicator_adapter(config, raise_errors=True, load_source='build_indicators')
for doc in document_store.iter_documents(relevant_ids):
if config.asynchronous:
AsyncIndicator.update_record(
doc.get('_id'), config.referenced_doc_type, config.domain, [config._id]
)
else:
# save is a noop if the filter doesn't match
adapter.best_effort_save(doc)
@task(serializer='pickle', queue=UCR_CELERY_QUEUE, ignore_result=True)
def rebuild_indicators(indicator_config_id, initiated_by=None, limit=-1, source=None, engine_id=None):
config = _get_config_by_id(indicator_config_id)
success = _('Your UCR table {} has finished rebuilding in {}').format(config.table_id, config.domain)
failure = _('There was an error rebuilding Your UCR table {} in {}.').format(config.table_id, config.domain)
send = False
if limit == -1:
send = toggles.SEND_UCR_REBUILD_INFO.enabled(initiated_by)
with notify_someone(initiated_by, success_message=success, error_message=failure, send=send):
adapter = get_indicator_adapter(config)
if engine_id:
if getattr(adapter, 'all_adapters', None):
adapter = [
adapter_ for adapter_ in adapter.all_adapters
if adapter_.engine_id == engine_id
][0]
elif adapter.engine_id != engine_id:
raise AssertionError("Engine ID does not match adapter")
if not id_is_static(indicator_config_id):
# Save the start time now in case anything goes wrong. This way we'll be
# able to see if the rebuild started a long time ago without finishing.
config.meta.build.initiated = datetime.utcnow()
config.meta.build.finished = False
config.meta.build.rebuilt_asynchronously = False
config.save()
skip_log = bool(limit > 0) # don't store log for temporary report builder UCRs
adapter.rebuild_table(initiated_by=initiated_by, source=source, skip_log=skip_log)
_iteratively_build_table(config, limit=limit)
@task(serializer='pickle', queue=UCR_CELERY_QUEUE, ignore_result=True)
def rebuild_indicators_in_place(indicator_config_id, initiated_by=None, source=None):
config = _get_config_by_id(indicator_config_id)
success = _('Your UCR table {} has finished rebuilding in {}').format(config.table_id, config.domain)
failure = _('There was an error rebuilding Your UCR table {} in {}.').format(config.table_id, config.domain)
send = toggles.SEND_UCR_REBUILD_INFO.enabled(initiated_by)
with notify_someone(initiated_by, success_message=success, error_message=failure, send=send):
adapter = get_indicator_adapter(config)
if not id_is_static(indicator_config_id):
config.meta.build.initiated_in_place = datetime.utcnow()
config.meta.build.finished_in_place = False
config.meta.build.rebuilt_asynchronously = False
config.save()
adapter.build_table(initiated_by=initiated_by, source=source)
_iteratively_build_table(config, in_place=True)
@task(serializer='pickle', queue=UCR_CELERY_QUEUE, ignore_result=True, acks_late=True)
def resume_building_indicators(indicator_config_id, initiated_by=None):
config = _get_config_by_id(indicator_config_id)
success = _('Your UCR table {} has finished rebuilding in {}').format(config.table_id, config.domain)
failure = _('There was an error rebuilding Your UCR table {} in {}.').format(config.table_id, config.domain)
send = toggles.SEND_UCR_REBUILD_INFO.enabled(initiated_by)
with notify_someone(initiated_by, success_message=success, error_message=failure, send=send):
resume_helper = DataSourceResumeHelper(config)
adapter = get_indicator_adapter(config)
adapter.log_table_build(
initiated_by=initiated_by,
source='resume_building_indicators',
)
_iteratively_build_table(config, resume_helper)
def _iteratively_build_table(config, resume_helper=None, in_place=False, limit=-1):
resume_helper = resume_helper or DataSourceResumeHelper(config)
indicator_config_id = config._id
case_type_or_xmlns_list = config.get_case_type_or_xmlns_filter()
completed_ct_xmlns = resume_helper.get_completed_case_type_or_xmlns()
if completed_ct_xmlns:
case_type_or_xmlns_list = [
case_type_or_xmlns
for case_type_or_xmlns in case_type_or_xmlns_list
if case_type_or_xmlns not in completed_ct_xmlns
]
for case_type_or_xmlns in case_type_or_xmlns_list:
relevant_ids = []
document_store = get_document_store_for_doc_type(
config.domain, config.referenced_doc_type,
case_type_or_xmlns=case_type_or_xmlns,
load_source="build_indicators",
)
for i, relevant_id in enumerate(document_store.iter_document_ids()):
if i >= limit > -1:
break
relevant_ids.append(relevant_id)
if len(relevant_ids) >= ID_CHUNK_SIZE:
_build_indicators(config, document_store, relevant_ids)
relevant_ids = []
if relevant_ids:
_build_indicators(config, document_store, relevant_ids)
resume_helper.add_completed_case_type_or_xmlns(case_type_or_xmlns)
resume_helper.clear_resume_info()
if not id_is_static(indicator_config_id):
if in_place:
config.meta.build.finished_in_place = True
else:
config.meta.build.finished = True
try:
config.save()
except ResourceConflict:
current_config = DataSourceConfiguration.get(config._id)
# check that a new build has not yet started
if in_place:
if config.meta.build.initiated_in_place == current_config.meta.build.initiated_in_place:
current_config.meta.build.finished_in_place = True
else:
if config.meta.build.initiated == current_config.meta.build.initiated:
current_config.meta.build.finished = True
current_config.save()
@task(serializer='pickle', queue=UCR_CELERY_QUEUE)
def compare_ucr_dbs(domain, report_config_id, filter_values, sort_column=None, sort_order=None, params=None):
if report_config_id not in settings.UCR_COMPARISONS:
return
control_report, unused = get_report_config(report_config_id, domain)
candidate_report = None
new_report_config_id = settings.UCR_COMPARISONS.get(report_config_id)
if new_report_config_id is not None:
# a report is configured to be compared against
candidate_report, unused = get_report_config(new_report_config_id, domain)
_compare_ucr_reports(
domain, control_report, candidate_report, filter_values, sort_column, sort_order, params)
else:
# no report is configured. Assume we should try mirrored engine_ids
# report_config.config is a DataSourceConfiguration
for engine_id in control_report.config.mirrored_engine_ids:
_compare_ucr_reports(
domain, control_report, control_report, filter_values, sort_column,
sort_order, params, candidate_engine_id=engine_id)
def _compare_ucr_reports(domain, control_report, candidate_report, filter_values, sort_column, sort_order, params,
candidate_engine_id=None):
from corehq.apps.userreports.laboratory.experiment import UCRExperiment
def _run_report(spec, engine_id=None):
data_source = ConfigurableReportDataSource.from_spec(spec, include_prefilters=True)
if engine_id:
data_source.data_source.override_engine_id(engine_id)
data_source.set_filter_values(filter_values)
if sort_column:
data_source.set_order_by(
[(data_source.top_level_columns[int(sort_column)].column_id, sort_order.upper())]
)
if params:
datatables_params = DatatablesParams.from_request_dict(params)
start = datatables_params.start
limit = datatables_params.count
else:
start, limit = None, None
page = list(data_source.get_data(start=start, limit=limit))
total_records = data_source.get_total_records()
json_response = {
'aaData': page,
"iTotalRecords": total_records,
}
total_row = data_source.get_total_row() if data_source.has_total_row else None
if total_row is not None:
json_response["total_row"] = total_row
return json_response
experiment_context = {
"domain": domain,
"report_config_id": control_report._id,
"new_report_config_id": candidate_report._id,
"filter_values": filter_values,
}
experiment = UCRExperiment(name="UCR DB Experiment", context=experiment_context)
with experiment.control() as c:
c.record(_run_report(control_report))
with experiment.candidate() as c:
c.record(_run_report(candidate_report, candidate_engine_id))
objects = experiment.run()
return objects
@task(serializer='pickle', queue=UCR_CELERY_QUEUE, ignore_result=True)
def delete_data_source_task(domain, config_id):
from corehq.apps.userreports.views import delete_data_source_shared
delete_data_source_shared(domain, config_id)
@periodic_task(run_every=crontab(minute='*/5'), queue=settings.CELERY_PERIODIC_QUEUE)
def run_queue_async_indicators_task():
"""
A periodic task that runs every few minutes, if ran within the permitted time slots,
would queue a task to further queue few AsyncIndicators for processing
"""
if time_in_range(datetime.utcnow(), settings.ASYNC_INDICATOR_QUEUE_TIMES):
queue_async_indicators.delay()
def time_in_range(time, time_dictionary):
"""time_dictionary will be of the format:
{
'*': [(begin_hour, end_hour), (begin_hour, end_hour), ...] catch all for days
1: [(begin_hour, end_hour), ...] hours for Monday (Monday 1, Sunday 7)
}
All times UTC
"""
if not time_dictionary:
return True
hours_for_today = time_dictionary.get(time.isoweekday())
if not hours_for_today:
hours_for_today = time_dictionary.get('*')
for valid_hours in hours_for_today:
if valid_hours[0] <= time.hour <= valid_hours[1]:
return True
return False
@serial_task('queue-async-indicators', timeout=30 * 60, queue=settings.CELERY_PERIODIC_QUEUE, max_retries=0)
def queue_async_indicators():
"""
Fetches AsyncIndicators that
1. were not queued till now or were last queued more than 4 hours ago
2. have failed less than ASYNC_INDICATOR_MAX_RETRIES times
This task quits after it has run for more than
ASYNC_INDICATOR_QUEUE_TIME - 30 seconds i.e 4 minutes 30 seconds.
While it runs, it clubs fetched AsyncIndicators by domain and doc type and queue them for processing.
"""
start = datetime.utcnow()
cutoff = start + ASYNC_INDICATOR_QUEUE_TIME - timedelta(seconds=30)
retry_threshold = start - timedelta(hours=4)
# don't requeue anything that has been retried more than ASYNC_INDICATOR_MAX_RETRIES times
indicators = AsyncIndicator.objects.filter(unsuccessful_attempts__lt=ASYNC_INDICATOR_MAX_RETRIES)[:settings.ASYNC_INDICATORS_TO_QUEUE]
indicators_by_domain_doc_type = defaultdict(list)
# page so that envs can have arbitarily large settings.ASYNC_INDICATORS_TO_QUEUE
for indicator in paginated_queryset(indicators, 1000):
# only requeue things that are not in queue or were last queued earlier than the threshold
if not indicator.date_queued or indicator.date_queued < retry_threshold:
indicators_by_domain_doc_type[(indicator.domain, indicator.doc_type)].append(indicator)
for k, indicators in indicators_by_domain_doc_type.items():
_queue_indicators(indicators)
if datetime.utcnow() > cutoff:
break
def _queue_indicators(async_indicators):
"""
Extract doc ids for the passed AsyncIndicators and queue task to process indicators for them.
Mark date_queued on all AsyncIndicator passed to utcnow.
"""
for chunk in chunked(async_indicators, ASYNC_INDICATOR_CHUNK_SIZE):
now = datetime.utcnow()
indicator_doc_ids = [i.doc_id for i in chunk]
# AsyncIndicator have doc_id as a unique column, so this update would only
# update the passed AsyncIndicators
AsyncIndicator.objects.filter(doc_id__in=indicator_doc_ids).update(date_queued=now)
build_async_indicators.delay(indicator_doc_ids)
@task(serializer='pickle', queue=UCR_INDICATOR_CELERY_QUEUE, ignore_result=True, acks_late=True)
def build_async_indicators(indicator_doc_ids):
# written to be used with _queue_indicators, indicator_doc_ids must
# be a chunk of 100
memoizers = {'configs': {}, 'adapters': {}}
assert(len(indicator_doc_ids)) <= ASYNC_INDICATOR_CHUNK_SIZE
def handle_exception(exception, config_id, doc, adapter):
metric = None
if isinstance(exception, (ProtocolError, ReadTimeout)):
metric = 'commcare.async_indicator.riak_error'
elif isinstance(exception, (ESError, ConnectionTimeout)):
# a database had an issue so log it and go on to the next document
metric = 'commcare.async_indicator.es_error'
elif isinstance(exception, (DatabaseError, InternalError)):
# a database had an issue so log it and go on to the next document
metric = 'commcare.async_indicator.psql_error'
else:
# getting the config could fail before the adapter is set
if adapter:
adapter.handle_exception(doc, exception)
if metric:
metrics_counter(metric, tags={'config_id': config_id, 'doc_id': doc['_id']})
def doc_ids_from_rows(rows):
formatted_rows = [
{column.column.database_column_name.decode('utf-8'): column.value for column in row}
for row in rows
]
return set(row['doc_id'] for row in formatted_rows)
def _get_config(config_id):
config_by_id = memoizers['configs']
if config_id in config_by_id:
return config_by_id[config_id]
else:
config = _get_config_by_id(config_id)
config_by_id[config_id] = config
return config
def _get_adapter(config):
adapter_by_config = memoizers['adapters']
if config._id in adapter_by_config:
return adapter_by_config[config._id]
else:
adapter = get_indicator_adapter(config, load_source='build_async_indicators')
adapter_by_config[config._id] = adapter
return adapter
def _metrics_timer(step, config_id=None):
tags = {
'action': step,
}
if config_id and settings.ENTERPRISE_MODE:
tags['config_id'] = config_id
else:
# Prometheus requires consistent tags even if not available
tags['config_id'] = None
return metrics_histogram_timer(
'commcare.async_indicator.timing',
timing_buckets=(.03, .1, .3, 1, 3, 10), tags=tags
)
# tracks processed/deleted configs to be removed from each indicator
configs_to_remove_by_indicator_id = defaultdict(list)
def _mark_config_to_remove(config_id, indicator_ids):
for _id in indicator_ids:
configs_to_remove_by_indicator_id[_id].append(config_id)
timer = TimingContext()
lock_keys = [
get_async_indicator_modify_lock_key(indicator_doc_id)
for indicator_doc_id in indicator_doc_ids
]
with CriticalSection(lock_keys):
all_indicators = AsyncIndicator.objects.filter(
doc_id__in=indicator_doc_ids
)
if not all_indicators:
return
doc_store = get_document_store_for_doc_type(
all_indicators[0].domain, all_indicators[0].doc_type,
load_source="build_async_indicators",
)
failed_indicators = set()
rows_to_save_by_adapter = defaultdict(list)
docs_to_delete_by_adapter = defaultdict(list)
# there will always be one AsyncIndicator per doc id
indicator_by_doc_id = {i.doc_id: i for i in all_indicators}
config_ids = set()
with timer:
for doc in doc_store.iter_documents(list(indicator_by_doc_id.keys())):
indicator = indicator_by_doc_id[doc['_id']]
eval_context = EvaluationContext(doc)
for config_id in indicator.indicator_config_ids:
with _metrics_timer('transform', config_id):
config_ids.add(config_id)
try:
config = _get_config(config_id)
except (ResourceNotFound, StaticDataSourceConfigurationNotFoundError):
celery_task_logger.info("{} no longer exists, skipping".format(config_id))
# remove because the config no longer exists
_mark_config_to_remove(config_id, [indicator.pk])
continue
except ESError:
celery_task_logger.info("ES errored when trying to retrieve config")
failed_indicators.add(indicator)
continue
adapter = None
try:
adapter = _get_adapter(config)
rows_to_save = adapter.get_all_values(doc, eval_context)
if rows_to_save:
rows_to_save_by_adapter[adapter].extend(rows_to_save)
else:
docs_to_delete_by_adapter[adapter].append(doc)
eval_context.reset_iteration()
except Exception as e:
failed_indicators.add(indicator)
handle_exception(e, config_id, doc, adapter)
with _metrics_timer('single_batch_update'):
for adapter, rows in rows_to_save_by_adapter.items():
doc_ids = doc_ids_from_rows(rows)
indicators = [indicator_by_doc_id[doc_id] for doc_id in doc_ids]
try:
with _metrics_timer('update', adapter.config._id):
adapter.save_rows(rows, use_shard_col=True)
except Exception as e:
failed_indicators.union(indicators)
message = str(e)
notify_exception(None, "Exception bulk saving async indicators:{}".format(message))
else:
# remove because it's successfully processed
_mark_config_to_remove(
config_id,
[i.pk for i in indicators]
)
with _metrics_timer('single_batch_delete'):
for adapter, docs in docs_to_delete_by_adapter.items():
with _metrics_timer('delete', adapter.config._id):
adapter.bulk_delete(docs)
# delete fully processed indicators
processed_indicators = set(all_indicators) - failed_indicators
AsyncIndicator.objects.filter(pk__in=[i.pk for i in processed_indicators]).delete()
# update failure for failed indicators
with transaction.atomic():
for indicator in failed_indicators:
indicator.update_failure(
configs_to_remove_by_indicator_id.get(indicator.pk, [])
)
indicator.save()
metrics_counter('commcare.async_indicator.processed_success', len(processed_indicators))
metrics_counter('commcare.async_indicator.processed_fail', len(failed_indicators))
metrics_counter(
'commcare.async_indicator.processing_time', timer.duration,
tags={'config_ids': config_ids}
)
metrics_counter(
'commcare.async_indicator.processed_total', len(indicator_doc_ids),
tags={'config_ids': config_ids}
)
@periodic_task(run_every=crontab(minute="*/5"), queue=settings.CELERY_PERIODIC_QUEUE)
def async_indicators_metrics():
now = datetime.utcnow()
oldest_indicator = AsyncIndicator.objects.order_by('date_queued').first()
if oldest_indicator and oldest_indicator.date_queued:
lag = (now - oldest_indicator.date_queued).total_seconds()
metrics_gauge('commcare.async_indicator.oldest_queued_indicator', lag)
oldest_100_indicators = AsyncIndicator.objects.all()[:100]
if oldest_100_indicators.exists():
oldest_indicator = oldest_100_indicators[0]
lag = (now - oldest_indicator.date_created).total_seconds()
metrics_gauge('commcare.async_indicator.oldest_created_indicator', lag)
lags = [
(now - indicator.date_created).total_seconds()
for indicator in oldest_100_indicators
]
avg_lag = sum(lags) / len(lags)
metrics_gauge('commcare.async_indicator.oldest_created_indicator_avg', avg_lag)
for config_id, metrics in _indicator_metrics().items():
tags = {"config_id": config_id}
metrics_gauge('commcare.async_indicator.indicator_count', metrics['count'], tags=tags)
metrics_gauge('commcare.async_indicator.lag', metrics['lag'], tags=tags,
documentation="Lag of oldest created indicator including failed indicators")
# Don't use ORM summing because it would attempt to get every value in DB
unsuccessful_attempts = sum(AsyncIndicator.objects.values_list('unsuccessful_attempts', flat=True).all()[:100])
metrics_gauge('commcare.async_indicator.unsuccessful_attempts', unsuccessful_attempts)
oldest_unprocessed = AsyncIndicator.objects.filter(unsuccessful_attempts=0).first()
if oldest_unprocessed:
lag = (now - oldest_unprocessed.date_created).total_seconds()
else:
lag = 0
metrics_gauge(
'commcare.async_indicator.true_lag',
lag,
documentation="Lag of oldest created indicator that didn't get ever queued"
)
metrics_gauge(
'commcare.async_indicator.fully_failed_count',
AsyncIndicator.objects.filter(unsuccessful_attempts=ASYNC_INDICATOR_MAX_RETRIES).count(),
documentation="Number of indicators that failed max-retry number of times"
)
def _indicator_metrics(date_created=None):
"""
returns {
"config_id": {
"count": number of indicators with that config,
"lag": number of seconds ago that the row was created
}
}
"""
ret = {}
indicator_metrics = (
AsyncIndicator.objects
.values('indicator_config_ids')
.annotate(Count('indicator_config_ids'), Min('date_created'))
.order_by() # needed to get rid of implicit ordering by date_created
)
now = datetime.utcnow()
if date_created:
indicator_metrics = indicator_metrics.filter(date_created__lt=date_created)
for ind in indicator_metrics:
count = ind['indicator_config_ids__count']
lag = (now - ind['date_created__min']).total_seconds()
for config_id in ind['indicator_config_ids']:
if ret.get(config_id):
ret[config_id]['count'] += ind['indicator_config_ids__count']
ret[config_id]['lag'] = max(lag, ret[config_id]['lag'])
else:
ret[config_id] = {
"count": count,
"lag": lag
}
return ret
@task(serializer='pickle')
def export_ucr_async(report_export, download_id, user):
use_transfer = settings.SHARED_DRIVE_CONF.transfer_enabled
ascii_title = report_export.title.encode('ascii', 'replace').decode('utf-8')
filename = '{}.xlsx'.format(ascii_title.replace('/', '?'))
file_path = get_download_file_path(use_transfer, filename)
report_export.create_export(file_path, Format.XLS_2007)
expose_download(use_transfer, file_path, filename, download_id, 'xlsx')
link = reverse("retrieve_download", args=[download_id], params={"get_file": '1'}, absolute=True)
send_report_download_email(report_export.title, user.get_email(), link)
| 43.408875
| 138
| 0.681319
|
4a120c80de0f04200cb93bab524ffe4fe749cb72
| 5,193
|
py
|
Python
|
scripts/icsexport.py
|
cigno5/pyscripts
|
10be001ec03d806d3e742b828020b1e07dd8dada
|
[
"MIT"
] | null | null | null |
scripts/icsexport.py
|
cigno5/pyscripts
|
10be001ec03d806d3e742b828020b1e07dd8dada
|
[
"MIT"
] | null | null | null |
scripts/icsexport.py
|
cigno5/pyscripts
|
10be001ec03d806d3e742b828020b1e07dd8dada
|
[
"MIT"
] | null | null | null |
import argparse
import getpass
import json
import os
import re
import sys
from datetime import datetime, timedelta
import requests
import abnconv
from abnconv import QIFOutput, Trsx
def extract_transactions():
base_url = "https://www.icscards.nl"
login_url = "%s/pub/nl/pub/login" % base_url
account_url = "%s/sec/nl/sec/allaccountsv2" % base_url
transactions_url = \
"{baseurl}/sec/nl/sec/transactions/search?fromDate={start_date}&untilDate={end_date}&accountNumber=" \
.format(baseurl=base_url,
start_date=from_date.strftime("%Y-%m-%d"),
end_date=to_date.strftime("%Y-%m-%d"), )
s = requests.Session()
s.get(base_url)
# logging in
s.post(login_url, json={"loginType": "PASSWORD",
"virtualPortal": "ICS-ABNAMRO",
"username": username,
"password": password})
# extract header
more_headers = {
'X-XSRF-TOKEN': s.cookies.get('XSRF-TOKEN')
}
r = s.get(account_url, headers=more_headers)
if not r.ok:
raise ValueError
all_transactions = list()
for account_data in [ad for ad in json.loads(r.text) if ad['valid'] is True]:
account_number = str(account_data['accountNumber'])
r = s.get(transactions_url + account_number, headers=more_headers)
account_transactions = json.loads(r.text)
for account_transaction in account_transactions:
account_transaction['accountNumber'] = account_number
all_transactions.append(account_transaction)
return all_transactions
def read_transactions():
for line in sys.stdin:
account_transactions = json.loads(line)
break
all_transactions = list()
for account_transaction in account_transactions:
account_transaction['accountNumber'] = '65770350018'
all_transactions.append(account_transaction)
return all_transactions
def load_settings():
if args.password:
_password = args.password
else:
_password = getpass.getpass(prompt='Please input your password')
if args.from_date:
try:
_from = datetime.strptime(args.from_date, "%d%m%Y")
except ValueError:
_from = datetime.now() - timedelta(days=abs(int(args.from_date)))
else:
_from = (datetime.now() - timedelta(days=1)).replace(day=1)
if args.to_date:
_to = datetime.strptime(args.to_date, "%d%m%Y")
else:
_to = datetime.now()
if args.file:
_file = args.file
else:
_file = os.path.join(os.getcwd(), "transactions_%s-%s.qif" % (
_from.strftime("%d%m%Y"), _to.strftime("%d%m%Y")
))
return args.username, _password, _from, _to, _file
def export_transactions():
payee_re = re.compile("((www.)?[\w\.]+).+")
with QIFOutput(file) as out:
for transaction in transactions:
if transaction['transactionDate'] is None \
or transaction['description'] == '' \
or transaction['typeOfTransaction'] == 'A':
continue
account = abnconv.find_account(transaction['accountNumber'])
description = transaction['description']
tsx = Trsx(account.iban)
tsx.type = 'Bank'
tsx.memo = description
tsx.date = datetime.strptime(transaction['transactionDate'], '%Y-%m-%d')
tsx.amount = float(transaction['billingAmount']) * -1
tsx.payee = payee_re.search(description).group(1)
if transaction['typeOfTransaction'] == 'P':
tsx.dest_iban = account.ics_debit_iban
tsx.payee = None
tsx.date -= timedelta(days=1)
out += tsx
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("username", help="Username")
parser.add_argument("-p", "--password", help="Password, if not specified it will be requested")
parser.add_argument("-c", "--config", help="Abnconv.ini configuration file (as for abnconv script)")
parser.add_argument("--from-date", help="From date in format ddmmyyyy (default: beginning of the month) "
"or -d (days)")
parser.add_argument("--to-date", help="To date in format ddmmyyyy (default: today)")
parser.add_argument("--file", help="Output file (default will be created using dates)")
parser.add_argument("--read", action='store_true', help="Read transactions from stdin (such a shame!)")
args = parser.parse_args()
abnconv.accounts = abnconv.load_accounts(args.config or 'abnconv.ini')
username, password, from_date, to_date, file = load_settings()
if args.read:
print("Read transactions from stdin")
transactions = read_transactions()
else:
print("Extract transactions from %s to %a" % (from_date.strftime("%d/%m/%Y"), to_date.strftime("%d/%m/%Y")))
transactions = extract_transactions()
if len(transactions) > 0:
export_transactions()
else:
print("No transaction found in the specified period")
| 33.076433
| 116
| 0.620643
|
4a120cfa5ab9bbc993f275a351a3407ae81a1ea8
| 6,744
|
py
|
Python
|
census/customestimator/trainer/task.py
|
lakshmanok/cloudml-samples
|
57311b1c8a7640801c6638c642884cd85bbc2c85
|
[
"Apache-2.0"
] | 1
|
2018-08-16T02:16:59.000Z
|
2018-08-16T02:16:59.000Z
|
census/customestimator/trainer/task.py
|
Ugenteraan/cloudml-samples
|
57311b1c8a7640801c6638c642884cd85bbc2c85
|
[
"Apache-2.0"
] | null | null | null |
census/customestimator/trainer/task.py
|
Ugenteraan/cloudml-samples
|
57311b1c8a7640801c6638c642884cd85bbc2c85
|
[
"Apache-2.0"
] | 2
|
2019-06-30T18:47:22.000Z
|
2020-08-14T16:41:38.000Z
|
import argparse
import os
import model
import tensorflow as tf
from tensorflow.contrib.learn import learn_runner
from tensorflow.contrib.learn.python.learn.utils import (
saved_model_export_utils)
from tensorflow.contrib.training.python.training import hparam
def generate_experiment_fn(**experiment_args):
"""Create an experiment function.
See command line help text for description of args.
Args:
experiment_args: keyword arguments to be passed through to experiment
See `tf.contrib.learn.Experiment` for full args.
Returns:
A function:
(tf.contrib.learn.RunConfig, tf.contrib.training.HParams) -> Experiment
This function is used by learn_runner to create an Experiment which
executes model code provided in the form of an Estimator and
input functions.
"""
def _experiment_fn(run_config, hparams):
# num_epochs can control duration if train_steps isn't
# passed to Experiment
train_input = lambda: model.generate_input_fn(
hparams.train_files,
num_epochs=hparams.num_epochs,
batch_size=hparams.train_batch_size,
)
# Don't shuffle evaluation data
eval_input = lambda: model.generate_input_fn(
hparams.eval_files,
batch_size=hparams.eval_batch_size,
shuffle=False
)
return tf.contrib.learn.Experiment(
tf.estimator.Estimator(
model.generate_model_fn(
embedding_size=hparams.embedding_size,
# Construct layers sizes with exponetial decay
hidden_units=[
max(2, int(hparams.first_layer_size *
hparams.scale_factor**i))
for i in range(hparams.num_layers)
],
learning_rate=hparams.learning_rate
),
config=run_config
),
train_input_fn=train_input,
eval_input_fn=eval_input,
**experiment_args
)
return _experiment_fn
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Input Arguments
parser.add_argument(
'--train-files',
help='GCS or local paths to training data',
nargs='+',
required=True
)
parser.add_argument(
'--num-epochs',
help="""\
Maximum number of training data epochs on which to train.
If both --max-steps and --num-epochs are specified,
the training job will run for --max-steps or --num-epochs,
whichever occurs first. If unspecified will run for --max-steps.\
""",
type=int,
)
parser.add_argument(
'--train-batch-size',
help='Batch size for training steps',
type=int,
default=40
)
parser.add_argument(
'--eval-batch-size',
help='Batch size for evaluation steps',
type=int,
default=40
)
parser.add_argument(
'--eval-files',
help='GCS or local paths to evaluation data',
nargs='+',
required=True
)
# Training arguments
parser.add_argument(
'--embedding-size',
help='Number of embedding dimensions for categorical columns',
default=8,
type=int
)
parser.add_argument(
'--learning-rate',
help='Learning rate for the optimizer',
default=0.1,
type=float
)
parser.add_argument(
'--first-layer-size',
help='Number of nodes in the first layer of the DNN',
default=100,
type=int
)
parser.add_argument(
'--num-layers',
help='Number of layers in the DNN',
default=4,
type=int
)
parser.add_argument(
'--scale-factor',
help='How quickly should the size of the layers in the DNN decay',
default=0.7,
type=float
)
parser.add_argument(
'--job-dir',
help='GCS location to write checkpoints and export models',
required=True
)
parser.add_argument(
'--reuse-job-dir',
action='store_true',
default=False,
help="""\
Flag to decide if the model checkpoint should
be re-used from the job-dir. If False then the
job-dir will be deleted
"""
)
parser.add_argument(
'--verbosity',
choices=[
'DEBUG',
'ERROR',
'FATAL',
'INFO',
'WARN'
],
default='INFO',
help='Set logging verbosity'
)
# Experiment arguments
parser.add_argument(
'--eval-delay-secs',
help='How long to wait before running first evaluation',
default=10,
type=int
)
parser.add_argument(
'--min-eval-frequency',
help='Minimum number of training steps between evaluations',
default=1,
type=int
)
parser.add_argument(
'--train-steps',
help="""\
Steps to run the training job for. If --num-epochs is not specified,
this must be. Otherwise the training job will run indefinitely.\
""",
type=int
)
parser.add_argument(
'--eval-steps',
help="""\
Number of steps to run evalution for at each checkpoint.
If unspecified will run until the input from --eval-files is exhausted
""",
default=None,
type=int
)
parser.add_argument(
'--export-format',
help='The input format of the exported SavedModel binary',
choices=['JSON', 'CSV', 'EXAMPLE'],
default='JSON'
)
args = parser.parse_args()
# Set python level verbosity
tf.logging.set_verbosity(args.verbosity)
# Set C++ Graph Execution level verbosity
os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(
tf.logging.__dict__[args.verbosity] / 10)
# If job_dir_reuse is False then remove the job_dir if it exists
if not args.reuse_job_dir:
if tf.gfile.Exists(args.job_dir):
tf.gfile.DeleteRecursively(args.job_dir)
tf.logging.info("Deleted job_dir {} to avoid re-use".format(args.job_dir))
else:
tf.logging.info("No job_dir available to delete")
else:
tf.logging.info("Reusing job_dir {} if it exists".format(args.job_dir))
# Run the training job
# learn_runner pulls configuration information from environment
# variables using tf.learn.RunConfig and uses this configuration
# to conditionally execute Experiment, or param server code
learn_runner.run(
generate_experiment_fn(
min_eval_frequency=args.min_eval_frequency,
eval_delay_secs=args.eval_delay_secs,
train_steps=args.train_steps,
eval_steps=args.eval_steps,
export_strategies=[saved_model_export_utils.make_export_strategy(
model.SERVING_FUNCTIONS[args.export_format],
exports_to_keep=1
)]
),
run_config=tf.contrib.learn.RunConfig(model_dir=args.job_dir),
hparams=hparam.HParams(**args.__dict__)
)
| 29.068966
| 80
| 0.640273
|
4a120d9ddeffdd305183215536c9364f02f1c8da
| 1,402
|
py
|
Python
|
app_reservas/migrations/0004_clase_recurso.py
|
fedegallar/reservas
|
75fc06b9dedf53eca76b61ea0ccc914d5e084b2d
|
[
"MIT"
] | 1
|
2018-11-10T14:57:54.000Z
|
2018-11-10T14:57:54.000Z
|
app_reservas/migrations/0004_clase_recurso.py
|
fedegallar/reservas
|
75fc06b9dedf53eca76b61ea0ccc914d5e084b2d
|
[
"MIT"
] | 6
|
2020-06-05T17:11:56.000Z
|
2021-09-07T23:38:00.000Z
|
app_reservas/migrations/0004_clase_recurso.py
|
fedegallar/reservas
|
75fc06b9dedf53eca76b61ea0ccc914d5e084b2d
|
[
"MIT"
] | 1
|
2019-04-16T20:00:05.000Z
|
2019-04-16T20:00:05.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
"""
Creación de modelo Recurso.
"""
dependencies = [
('app_reservas', '0003_verbose_names'),
]
operations = [
migrations.CreateModel(
name='Recurso',
fields=[
('id', models.AutoField(auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID')),
('calendar_codigo', models.CharField(max_length=100)),
('calendar_color', models.CharField(blank=True, max_length=10)),
],
options={
'verbose_name_plural': 'Recursos',
'verbose_name': 'Recurso',
},
),
migrations.AddField(
model_name='aula',
name='recurso_ptr',
field=models.OneToOneField(to='app_reservas.Recurso',
parent_link=True,
auto_created=True,
default=None,
serialize=False,
null=True),
preserve_default=False,
),
]
| 31.863636
| 80
| 0.445792
|
4a120e7884f331c97f408d6f62f5bd9296be7ef7
| 1,080
|
py
|
Python
|
82. Remove Duplicates from Sorted List II.py
|
alijon30/Leetcode
|
73e8171945e1fcbc59e76f79667c9ea130db27e9
|
[
"Unlicense"
] | null | null | null |
82. Remove Duplicates from Sorted List II.py
|
alijon30/Leetcode
|
73e8171945e1fcbc59e76f79667c9ea130db27e9
|
[
"Unlicense"
] | null | null | null |
82. Remove Duplicates from Sorted List II.py
|
alijon30/Leetcode
|
73e8171945e1fcbc59e76f79667c9ea130db27e9
|
[
"Unlicense"
] | null | null | null |
Given the head of a sorted linked list, delete all nodes that have duplicate numbers, leaving only distinct numbers from the original list. Return the linked list sorted as well.
Example 1:
Input: head = [1,2,3,3,4,4,5]
Output: [1,2,5]
Example 2:
Input: head = [1,1,1,2,3]
Output: [2,3]
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def deleteDuplicates(self, head: Optional[ListNode]) -> Optional[ListNode]:
dummy = curr = ListNode()
Dict = {}
while head != None:
if head.val not in Dict.keys():
Dict[head.val] = 1
else:
Dict[head.val] += 1
head = head.next
for val1, val2 in Dict.items():
if val2 == 1:
curr.next = ListNode(val1)
curr = curr.next
return dummy.next
| 21.176471
| 178
| 0.502778
|
4a120e9f1ec48f2c7236f24d785314da9083f91a
| 59,025
|
py
|
Python
|
Lib/logging/handlers.py
|
chexca/cpython
|
cfc6ce4d40f2f01314b7e283fb972a7bb3ed3faa
|
[
"CNRI-Python-GPL-Compatible"
] | 4
|
2019-04-17T19:09:30.000Z
|
2021-08-18T14:51:39.000Z
|
Lib/logging/handlers.py
|
chexca/cpython
|
cfc6ce4d40f2f01314b7e283fb972a7bb3ed3faa
|
[
"CNRI-Python-GPL-Compatible"
] | 4
|
2020-03-13T22:24:05.000Z
|
2020-03-19T15:08:18.000Z
|
Lib/logging/handlers.py
|
chexca/cpython
|
cfc6ce4d40f2f01314b7e283fb972a7bb3ed3faa
|
[
"CNRI-Python-GPL-Compatible"
] | 5
|
2018-12-29T15:43:57.000Z
|
2020-12-14T15:29:43.000Z
|
# Copyright 2001-2016 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Additional handlers for the logging package for Python. The core package is
based on PEP 282 and comments thereto in comp.lang.python.
Copyright (C) 2001-2016 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging.handlers' and log away!
"""
import logging, socket, os, pickle, struct, time, re
from stat import ST_DEV, ST_INO, ST_MTIME
import queue
import threading
import copy
#
# Some constants...
#
DEFAULT_TCP_LOGGING_PORT = 9020
DEFAULT_UDP_LOGGING_PORT = 9021
DEFAULT_HTTP_LOGGING_PORT = 9022
DEFAULT_SOAP_LOGGING_PORT = 9023
SYSLOG_UDP_PORT = 514
SYSLOG_TCP_PORT = 514
_MIDNIGHT = 24 * 60 * 60 # number of seconds in a day
class BaseRotatingHandler(logging.FileHandler):
"""
Base class for handlers that rotate log files at a certain point.
Not meant to be instantiated directly. Instead, use RotatingFileHandler
or TimedRotatingFileHandler.
"""
namer = None
rotator = None
def __init__(self, filename, mode, encoding=None, delay=False, errors=None):
"""
Use the specified filename for streamed logging
"""
logging.FileHandler.__init__(self, filename, mode=mode,
encoding=encoding, delay=delay,
errors=errors)
self.mode = mode
self.encoding = encoding
self.errors = errors
def emit(self, record):
"""
Emit a record.
Output the record to the file, catering for rollover as described
in doRollover().
"""
try:
if self.shouldRollover(record):
self.doRollover()
logging.FileHandler.emit(self, record)
except Exception:
self.handleError(record)
def rotation_filename(self, default_name):
"""
Modify the filename of a log file when rotating.
This is provided so that a custom filename can be provided.
The default implementation calls the 'namer' attribute of the
handler, if it's callable, passing the default name to
it. If the attribute isn't callable (the default is None), the name
is returned unchanged.
:param default_name: The default name for the log file.
"""
if not callable(self.namer):
result = default_name
else:
result = self.namer(default_name)
return result
def rotate(self, source, dest):
"""
When rotating, rotate the current log.
The default implementation calls the 'rotator' attribute of the
handler, if it's callable, passing the source and dest arguments to
it. If the attribute isn't callable (the default is None), the source
is simply renamed to the destination.
:param source: The source filename. This is normally the base
filename, e.g. 'test.log'
:param dest: The destination filename. This is normally
what the source is rotated to, e.g. 'test.log.1'.
"""
if not callable(self.rotator):
# Issue 18940: A file may not have been created if delay is True.
if os.path.exists(source):
os.rename(source, dest)
else:
self.rotator(source, dest)
class RotatingFileHandler(BaseRotatingHandler):
"""
Handler for logging to a set of files, which switches from one file
to the next when the current file reaches a certain size.
"""
def __init__(self, filename, mode='a', maxBytes=0, backupCount=0,
encoding=None, delay=False, errors=None):
"""
Open the specified file and use it as the stream for logging.
By default, the file grows indefinitely. You can specify particular
values of maxBytes and backupCount to allow the file to rollover at
a predetermined size.
Rollover occurs whenever the current log file is nearly maxBytes in
length. If backupCount is >= 1, the system will successively create
new files with the same pathname as the base file, but with extensions
".1", ".2" etc. appended to it. For example, with a backupCount of 5
and a base file name of "app.log", you would get "app.log",
"app.log.1", "app.log.2", ... through to "app.log.5". The file being
written to is always "app.log" - when it gets filled up, it is closed
and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc.
exist, then they are renamed to "app.log.2", "app.log.3" etc.
respectively.
If maxBytes is zero, rollover never occurs.
"""
# If rotation/rollover is wanted, it doesn't make sense to use another
# mode. If for example 'w' were specified, then if there were multiple
# runs of the calling application, the logs from previous runs would be
# lost if the 'w' is respected, because the log file would be truncated
# on each run.
if maxBytes > 0:
mode = 'a'
BaseRotatingHandler.__init__(self, filename, mode, encoding=encoding,
delay=delay, errors=errors)
self.maxBytes = maxBytes
self.backupCount = backupCount
def doRollover(self):
"""
Do a rollover, as described in __init__().
"""
if self.stream:
self.stream.close()
self.stream = None
if self.backupCount > 0:
for i in range(self.backupCount - 1, 0, -1):
sfn = self.rotation_filename("%s.%d" % (self.baseFilename, i))
dfn = self.rotation_filename("%s.%d" % (self.baseFilename,
i + 1))
if os.path.exists(sfn):
if os.path.exists(dfn):
os.remove(dfn)
os.rename(sfn, dfn)
dfn = self.rotation_filename(self.baseFilename + ".1")
if os.path.exists(dfn):
os.remove(dfn)
self.rotate(self.baseFilename, dfn)
if not self.delay:
self.stream = self._open()
def shouldRollover(self, record):
"""
Determine if rollover should occur.
Basically, see if the supplied record would cause the file to exceed
the size limit we have.
"""
if self.stream is None: # delay was set...
self.stream = self._open()
if self.maxBytes > 0: # are we rolling over?
msg = "%s\n" % self.format(record)
self.stream.seek(0, 2) #due to non-posix-compliant Windows feature
if self.stream.tell() + len(msg) >= self.maxBytes:
return 1
return 0
class TimedRotatingFileHandler(BaseRotatingHandler):
"""
Handler for logging to a file, rotating the log file at certain timed
intervals.
If backupCount is > 0, when rollover is done, no more than backupCount
files are kept - the oldest ones are deleted.
"""
def __init__(self, filename, when='h', interval=1, backupCount=0,
encoding=None, delay=False, utc=False, atTime=None,
errors=None):
BaseRotatingHandler.__init__(self, filename, 'a', encoding=encoding,
delay=delay, errors=errors)
self.when = when.upper()
self.backupCount = backupCount
self.utc = utc
self.atTime = atTime
# Calculate the real rollover interval, which is just the number of
# seconds between rollovers. Also set the filename suffix used when
# a rollover occurs. Current 'when' events supported:
# S - Seconds
# M - Minutes
# H - Hours
# D - Days
# midnight - roll over at midnight
# W{0-6} - roll over on a certain day; 0 - Monday
#
# Case of the 'when' specifier is not important; lower or upper case
# will work.
if self.when == 'S':
self.interval = 1 # one second
self.suffix = "%Y-%m-%d_%H-%M-%S"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}(\.\w+)?$"
elif self.when == 'M':
self.interval = 60 # one minute
self.suffix = "%Y-%m-%d_%H-%M"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}(\.\w+)?$"
elif self.when == 'H':
self.interval = 60 * 60 # one hour
self.suffix = "%Y-%m-%d_%H"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}(\.\w+)?$"
elif self.when == 'D' or self.when == 'MIDNIGHT':
self.interval = 60 * 60 * 24 # one day
self.suffix = "%Y-%m-%d"
self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$"
elif self.when.startswith('W'):
self.interval = 60 * 60 * 24 * 7 # one week
if len(self.when) != 2:
raise ValueError("You must specify a day for weekly rollover from 0 to 6 (0 is Monday): %s" % self.when)
if self.when[1] < '0' or self.when[1] > '6':
raise ValueError("Invalid day specified for weekly rollover: %s" % self.when)
self.dayOfWeek = int(self.when[1])
self.suffix = "%Y-%m-%d"
self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$"
else:
raise ValueError("Invalid rollover interval specified: %s" % self.when)
self.extMatch = re.compile(self.extMatch, re.ASCII)
self.interval = self.interval * interval # multiply by units requested
# The following line added because the filename passed in could be a
# path object (see Issue #27493), but self.baseFilename will be a string
filename = self.baseFilename
if os.path.exists(filename):
t = os.stat(filename)[ST_MTIME]
else:
t = int(time.time())
self.rolloverAt = self.computeRollover(t)
def computeRollover(self, currentTime):
"""
Work out the rollover time based on the specified time.
"""
result = currentTime + self.interval
# If we are rolling over at midnight or weekly, then the interval is already known.
# What we need to figure out is WHEN the next interval is. In other words,
# if you are rolling over at midnight, then your base interval is 1 day,
# but you want to start that one day clock at midnight, not now. So, we
# have to fudge the rolloverAt value in order to trigger the first rollover
# at the right time. After that, the regular interval will take care of
# the rest. Note that this code doesn't care about leap seconds. :)
if self.when == 'MIDNIGHT' or self.when.startswith('W'):
# This could be done with less code, but I wanted it to be clear
if self.utc:
t = time.gmtime(currentTime)
else:
t = time.localtime(currentTime)
currentHour = t[3]
currentMinute = t[4]
currentSecond = t[5]
currentDay = t[6]
# r is the number of seconds left between now and the next rotation
if self.atTime is None:
rotate_ts = _MIDNIGHT
else:
rotate_ts = ((self.atTime.hour * 60 + self.atTime.minute)*60 +
self.atTime.second)
r = rotate_ts - ((currentHour * 60 + currentMinute) * 60 +
currentSecond)
if r < 0:
# Rotate time is before the current time (for example when
# self.rotateAt is 13:45 and it now 14:15), rotation is
# tomorrow.
r += _MIDNIGHT
currentDay = (currentDay + 1) % 7
result = currentTime + r
# If we are rolling over on a certain day, add in the number of days until
# the next rollover, but offset by 1 since we just calculated the time
# until the next day starts. There are three cases:
# Case 1) The day to rollover is today; in this case, do nothing
# Case 2) The day to rollover is further in the interval (i.e., today is
# day 2 (Wednesday) and rollover is on day 6 (Sunday). Days to
# next rollover is simply 6 - 2 - 1, or 3.
# Case 3) The day to rollover is behind us in the interval (i.e., today
# is day 5 (Saturday) and rollover is on day 3 (Thursday).
# Days to rollover is 6 - 5 + 3, or 4. In this case, it's the
# number of days left in the current week (1) plus the number
# of days in the next week until the rollover day (3).
# The calculations described in 2) and 3) above need to have a day added.
# This is because the above time calculation takes us to midnight on this
# day, i.e. the start of the next day.
if self.when.startswith('W'):
day = currentDay # 0 is Monday
if day != self.dayOfWeek:
if day < self.dayOfWeek:
daysToWait = self.dayOfWeek - day
else:
daysToWait = 6 - day + self.dayOfWeek + 1
newRolloverAt = result + (daysToWait * (60 * 60 * 24))
if not self.utc:
dstNow = t[-1]
dstAtRollover = time.localtime(newRolloverAt)[-1]
if dstNow != dstAtRollover:
if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
addend = -3600
else: # DST bows out before next rollover, so we need to add an hour
addend = 3600
newRolloverAt += addend
result = newRolloverAt
return result
def shouldRollover(self, record):
"""
Determine if rollover should occur.
record is not used, as we are just comparing times, but it is needed so
the method signatures are the same
"""
t = int(time.time())
if t >= self.rolloverAt:
return 1
return 0
def getFilesToDelete(self):
"""
Determine the files to delete when rolling over.
More specific than the earlier method, which just used glob.glob().
"""
dirName, baseName = os.path.split(self.baseFilename)
fileNames = os.listdir(dirName)
result = []
prefix = baseName + "."
plen = len(prefix)
for fileName in fileNames:
if fileName[:plen] == prefix:
suffix = fileName[plen:]
if self.extMatch.match(suffix):
result.append(os.path.join(dirName, fileName))
if len(result) < self.backupCount:
result = []
else:
result.sort()
result = result[:len(result) - self.backupCount]
return result
def doRollover(self):
"""
do a rollover; in this case, a date/time stamp is appended to the filename
when the rollover happens. However, you want the file to be named for the
start of the interval, not the current time. If there is a backup count,
then we have to get a list of matching filenames, sort them and remove
the one with the oldest suffix.
"""
if self.stream:
self.stream.close()
self.stream = None
# get the time that this sequence started at and make it a TimeTuple
currentTime = int(time.time())
dstNow = time.localtime(currentTime)[-1]
t = self.rolloverAt - self.interval
if self.utc:
timeTuple = time.gmtime(t)
else:
timeTuple = time.localtime(t)
dstThen = timeTuple[-1]
if dstNow != dstThen:
if dstNow:
addend = 3600
else:
addend = -3600
timeTuple = time.localtime(t + addend)
dfn = self.rotation_filename(self.baseFilename + "." +
time.strftime(self.suffix, timeTuple))
if os.path.exists(dfn):
os.remove(dfn)
self.rotate(self.baseFilename, dfn)
if self.backupCount > 0:
for s in self.getFilesToDelete():
os.remove(s)
if not self.delay:
self.stream = self._open()
newRolloverAt = self.computeRollover(currentTime)
while newRolloverAt <= currentTime:
newRolloverAt = newRolloverAt + self.interval
#If DST changes and midnight or weekly rollover, adjust for this.
if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc:
dstAtRollover = time.localtime(newRolloverAt)[-1]
if dstNow != dstAtRollover:
if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
addend = -3600
else: # DST bows out before next rollover, so we need to add an hour
addend = 3600
newRolloverAt += addend
self.rolloverAt = newRolloverAt
class WatchedFileHandler(logging.FileHandler):
"""
A handler for logging to a file, which watches the file
to see if it has changed while in use. This can happen because of
usage of programs such as newsyslog and logrotate which perform
log file rotation. This handler, intended for use under Unix,
watches the file to see if it has changed since the last emit.
(A file has changed if its device or inode have changed.)
If it has changed, the old file stream is closed, and the file
opened to get a new stream.
This handler is not appropriate for use under Windows, because
under Windows open files cannot be moved or renamed - logging
opens the files with exclusive locks - and so there is no need
for such a handler. Furthermore, ST_INO is not supported under
Windows; stat always returns zero for this value.
This handler is based on a suggestion and patch by Chad J.
Schroeder.
"""
def __init__(self, filename, mode='a', encoding=None, delay=False,
errors=None):
logging.FileHandler.__init__(self, filename, mode=mode,
encoding=encoding, delay=delay,
errors=errors)
self.dev, self.ino = -1, -1
self._statstream()
def _statstream(self):
if self.stream:
sres = os.fstat(self.stream.fileno())
self.dev, self.ino = sres[ST_DEV], sres[ST_INO]
def reopenIfNeeded(self):
"""
Reopen log file if needed.
Checks if the underlying file has changed, and if it
has, close the old stream and reopen the file to get the
current stream.
"""
# Reduce the chance of race conditions by stat'ing by path only
# once and then fstat'ing our new fd if we opened a new log stream.
# See issue #14632: Thanks to John Mulligan for the problem report
# and patch.
try:
# stat the file by path, checking for existence
sres = os.stat(self.baseFilename)
except FileNotFoundError:
sres = None
# compare file system stat with that of our stream file handle
if not sres or sres[ST_DEV] != self.dev or sres[ST_INO] != self.ino:
if self.stream is not None:
# we have an open file handle, clean it up
self.stream.flush()
self.stream.close()
self.stream = None # See Issue #21742: _open () might fail.
# open a new file handle and get new stat info from that fd
self.stream = self._open()
self._statstream()
def emit(self, record):
"""
Emit a record.
If underlying file has changed, reopen the file before emitting the
record to it.
"""
self.reopenIfNeeded()
logging.FileHandler.emit(self, record)
class SocketHandler(logging.Handler):
"""
A handler class which writes logging records, in pickle format, to
a streaming socket. The socket is kept open across logging calls.
If the peer resets it, an attempt is made to reconnect on the next call.
The pickle which is sent is that of the LogRecord's attribute dictionary
(__dict__), so that the receiver does not need to have the logging module
installed in order to process the logging event.
To unpickle the record at the receiving end into a LogRecord, use the
makeLogRecord function.
"""
def __init__(self, host, port):
"""
Initializes the handler with a specific host address and port.
When the attribute *closeOnError* is set to True - if a socket error
occurs, the socket is silently closed and then reopened on the next
logging call.
"""
logging.Handler.__init__(self)
self.host = host
self.port = port
if port is None:
self.address = host
else:
self.address = (host, port)
self.sock = None
self.closeOnError = False
self.retryTime = None
#
# Exponential backoff parameters.
#
self.retryStart = 1.0
self.retryMax = 30.0
self.retryFactor = 2.0
def makeSocket(self, timeout=1):
"""
A factory method which allows subclasses to define the precise
type of socket they want.
"""
if self.port is not None:
result = socket.create_connection(self.address, timeout=timeout)
else:
result = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
result.settimeout(timeout)
try:
result.connect(self.address)
except OSError:
result.close() # Issue 19182
raise
return result
def createSocket(self):
"""
Try to create a socket, using an exponential backoff with
a max retry time. Thanks to Robert Olson for the original patch
(SF #815911) which has been slightly refactored.
"""
now = time.time()
# Either retryTime is None, in which case this
# is the first time back after a disconnect, or
# we've waited long enough.
if self.retryTime is None:
attempt = True
else:
attempt = (now >= self.retryTime)
if attempt:
try:
self.sock = self.makeSocket()
self.retryTime = None # next time, no delay before trying
except OSError:
#Creation failed, so set the retry time and return.
if self.retryTime is None:
self.retryPeriod = self.retryStart
else:
self.retryPeriod = self.retryPeriod * self.retryFactor
if self.retryPeriod > self.retryMax:
self.retryPeriod = self.retryMax
self.retryTime = now + self.retryPeriod
def send(self, s):
"""
Send a pickled string to the socket.
This function allows for partial sends which can happen when the
network is busy.
"""
if self.sock is None:
self.createSocket()
#self.sock can be None either because we haven't reached the retry
#time yet, or because we have reached the retry time and retried,
#but are still unable to connect.
if self.sock:
try:
self.sock.sendall(s)
except OSError: #pragma: no cover
self.sock.close()
self.sock = None # so we can call createSocket next time
def makePickle(self, record):
"""
Pickles the record in binary format with a length prefix, and
returns it ready for transmission across the socket.
"""
ei = record.exc_info
if ei:
# just to get traceback text into record.exc_text ...
dummy = self.format(record)
# See issue #14436: If msg or args are objects, they may not be
# available on the receiving end. So we convert the msg % args
# to a string, save it as msg and zap the args.
d = dict(record.__dict__)
d['msg'] = record.getMessage()
d['args'] = None
d['exc_info'] = None
# Issue #25685: delete 'message' if present: redundant with 'msg'
d.pop('message', None)
s = pickle.dumps(d, 1)
slen = struct.pack(">L", len(s))
return slen + s
def handleError(self, record):
"""
Handle an error during logging.
An error has occurred during logging. Most likely cause -
connection lost. Close the socket so that we can retry on the
next event.
"""
if self.closeOnError and self.sock:
self.sock.close()
self.sock = None #try to reconnect next time
else:
logging.Handler.handleError(self, record)
def emit(self, record):
"""
Emit a record.
Pickles the record and writes it to the socket in binary format.
If there is an error with the socket, silently drop the packet.
If there was a problem with the socket, re-establishes the
socket.
"""
try:
s = self.makePickle(record)
self.send(s)
except Exception:
self.handleError(record)
def close(self):
"""
Closes the socket.
"""
self.acquire()
try:
sock = self.sock
if sock:
self.sock = None
sock.close()
logging.Handler.close(self)
finally:
self.release()
class DatagramHandler(SocketHandler):
"""
A handler class which writes logging records, in pickle format, to
a datagram socket. The pickle which is sent is that of the LogRecord's
attribute dictionary (__dict__), so that the receiver does not need to
have the logging module installed in order to process the logging event.
To unpickle the record at the receiving end into a LogRecord, use the
makeLogRecord function.
"""
def __init__(self, host, port):
"""
Initializes the handler with a specific host address and port.
"""
SocketHandler.__init__(self, host, port)
self.closeOnError = False
def makeSocket(self):
"""
The factory method of SocketHandler is here overridden to create
a UDP socket (SOCK_DGRAM).
"""
if self.port is None:
family = socket.AF_UNIX
else:
family = socket.AF_INET
s = socket.socket(family, socket.SOCK_DGRAM)
return s
def send(self, s):
"""
Send a pickled string to a socket.
This function no longer allows for partial sends which can happen
when the network is busy - UDP does not guarantee delivery and
can deliver packets out of sequence.
"""
if self.sock is None:
self.createSocket()
self.sock.sendto(s, self.address)
class SysLogHandler(logging.Handler):
"""
A handler class which sends formatted logging records to a syslog
server. Based on Sam Rushing's syslog module:
http://www.nightmare.com/squirl/python-ext/misc/syslog.py
Contributed by Nicolas Untz (after which minor refactoring changes
have been made).
"""
# from <linux/sys/syslog.h>:
# ======================================================================
# priorities/facilities are encoded into a single 32-bit quantity, where
# the bottom 3 bits are the priority (0-7) and the top 28 bits are the
# facility (0-big number). Both the priorities and the facilities map
# roughly one-to-one to strings in the syslogd(8) source code. This
# mapping is included in this file.
#
# priorities (these are ordered)
LOG_EMERG = 0 # system is unusable
LOG_ALERT = 1 # action must be taken immediately
LOG_CRIT = 2 # critical conditions
LOG_ERR = 3 # error conditions
LOG_WARNING = 4 # warning conditions
LOG_NOTICE = 5 # normal but significant condition
LOG_INFO = 6 # informational
LOG_DEBUG = 7 # debug-level messages
# facility codes
LOG_KERN = 0 # kernel messages
LOG_USER = 1 # random user-level messages
LOG_MAIL = 2 # mail system
LOG_DAEMON = 3 # system daemons
LOG_AUTH = 4 # security/authorization messages
LOG_SYSLOG = 5 # messages generated internally by syslogd
LOG_LPR = 6 # line printer subsystem
LOG_NEWS = 7 # network news subsystem
LOG_UUCP = 8 # UUCP subsystem
LOG_CRON = 9 # clock daemon
LOG_AUTHPRIV = 10 # security/authorization messages (private)
LOG_FTP = 11 # FTP daemon
LOG_NTP = 12 # NTP subsystem
LOG_SECURITY = 13 # Log audit
LOG_CONSOLE = 14 # Log alert
LOG_SOLCRON = 15 # Scheduling daemon (Solaris)
# other codes through 15 reserved for system use
LOG_LOCAL0 = 16 # reserved for local use
LOG_LOCAL1 = 17 # reserved for local use
LOG_LOCAL2 = 18 # reserved for local use
LOG_LOCAL3 = 19 # reserved for local use
LOG_LOCAL4 = 20 # reserved for local use
LOG_LOCAL5 = 21 # reserved for local use
LOG_LOCAL6 = 22 # reserved for local use
LOG_LOCAL7 = 23 # reserved for local use
priority_names = {
"alert": LOG_ALERT,
"crit": LOG_CRIT,
"critical": LOG_CRIT,
"debug": LOG_DEBUG,
"emerg": LOG_EMERG,
"err": LOG_ERR,
"error": LOG_ERR, # DEPRECATED
"info": LOG_INFO,
"notice": LOG_NOTICE,
"panic": LOG_EMERG, # DEPRECATED
"warn": LOG_WARNING, # DEPRECATED
"warning": LOG_WARNING,
}
facility_names = {
"auth": LOG_AUTH,
"authpriv": LOG_AUTHPRIV,
"console": LOG_CONSOLE,
"cron": LOG_CRON,
"daemon": LOG_DAEMON,
"ftp": LOG_FTP,
"kern": LOG_KERN,
"lpr": LOG_LPR,
"mail": LOG_MAIL,
"news": LOG_NEWS,
"ntp": LOG_NTP,
"security": LOG_SECURITY,
"solaris-cron": LOG_SOLCRON,
"syslog": LOG_SYSLOG,
"user": LOG_USER,
"uucp": LOG_UUCP,
"local0": LOG_LOCAL0,
"local1": LOG_LOCAL1,
"local2": LOG_LOCAL2,
"local3": LOG_LOCAL3,
"local4": LOG_LOCAL4,
"local5": LOG_LOCAL5,
"local6": LOG_LOCAL6,
"local7": LOG_LOCAL7,
}
#The map below appears to be trivially lowercasing the key. However,
#there's more to it than meets the eye - in some locales, lowercasing
#gives unexpected results. See SF #1524081: in the Turkish locale,
#"INFO".lower() != "info"
priority_map = {
"DEBUG" : "debug",
"INFO" : "info",
"WARNING" : "warning",
"ERROR" : "error",
"CRITICAL" : "critical"
}
def __init__(self, address=('localhost', SYSLOG_UDP_PORT),
facility=LOG_USER, socktype=None):
"""
Initialize a handler.
If address is specified as a string, a UNIX socket is used. To log to a
local syslogd, "SysLogHandler(address="/dev/log")" can be used.
If facility is not specified, LOG_USER is used. If socktype is
specified as socket.SOCK_DGRAM or socket.SOCK_STREAM, that specific
socket type will be used. For Unix sockets, you can also specify a
socktype of None, in which case socket.SOCK_DGRAM will be used, falling
back to socket.SOCK_STREAM.
"""
logging.Handler.__init__(self)
self.address = address
self.facility = facility
self.socktype = socktype
if isinstance(address, str):
self.unixsocket = True
# Syslog server may be unavailable during handler initialisation.
# C's openlog() function also ignores connection errors.
# Moreover, we ignore these errors while logging, so it not worse
# to ignore it also here.
try:
self._connect_unixsocket(address)
except OSError:
pass
else:
self.unixsocket = False
if socktype is None:
socktype = socket.SOCK_DGRAM
host, port = address
ress = socket.getaddrinfo(host, port, 0, socktype)
if not ress:
raise OSError("getaddrinfo returns an empty list")
for res in ress:
af, socktype, proto, _, sa = res
err = sock = None
try:
sock = socket.socket(af, socktype, proto)
if socktype == socket.SOCK_STREAM:
sock.connect(sa)
break
except OSError as exc:
err = exc
if sock is not None:
sock.close()
if err is not None:
raise err
self.socket = sock
self.socktype = socktype
def _connect_unixsocket(self, address):
use_socktype = self.socktype
if use_socktype is None:
use_socktype = socket.SOCK_DGRAM
self.socket = socket.socket(socket.AF_UNIX, use_socktype)
try:
self.socket.connect(address)
# it worked, so set self.socktype to the used type
self.socktype = use_socktype
except OSError:
self.socket.close()
if self.socktype is not None:
# user didn't specify falling back, so fail
raise
use_socktype = socket.SOCK_STREAM
self.socket = socket.socket(socket.AF_UNIX, use_socktype)
try:
self.socket.connect(address)
# it worked, so set self.socktype to the used type
self.socktype = use_socktype
except OSError:
self.socket.close()
raise
def encodePriority(self, facility, priority):
"""
Encode the facility and priority. You can pass in strings or
integers - if strings are passed, the facility_names and
priority_names mapping dictionaries are used to convert them to
integers.
"""
if isinstance(facility, str):
facility = self.facility_names[facility]
if isinstance(priority, str):
priority = self.priority_names[priority]
return (facility << 3) | priority
def close(self):
"""
Closes the socket.
"""
self.acquire()
try:
self.socket.close()
logging.Handler.close(self)
finally:
self.release()
def mapPriority(self, levelName):
"""
Map a logging level name to a key in the priority_names map.
This is useful in two scenarios: when custom levels are being
used, and in the case where you can't do a straightforward
mapping by lowercasing the logging level name because of locale-
specific issues (see SF #1524081).
"""
return self.priority_map.get(levelName, "warning")
ident = '' # prepended to all messages
append_nul = True # some old syslog daemons expect a NUL terminator
def emit(self, record):
"""
Emit a record.
The record is formatted, and then sent to the syslog server. If
exception information is present, it is NOT sent to the server.
"""
try:
msg = self.format(record)
if self.ident:
msg = self.ident + msg
if self.append_nul:
msg += '\000'
# We need to convert record level to lowercase, maybe this will
# change in the future.
prio = '<%d>' % self.encodePriority(self.facility,
self.mapPriority(record.levelname))
prio = prio.encode('utf-8')
# Message is a string. Convert to bytes as required by RFC 5424
msg = msg.encode('utf-8')
msg = prio + msg
if self.unixsocket:
try:
self.socket.send(msg)
except OSError:
self.socket.close()
self._connect_unixsocket(self.address)
self.socket.send(msg)
elif self.socktype == socket.SOCK_DGRAM:
self.socket.sendto(msg, self.address)
else:
self.socket.sendall(msg)
except Exception:
self.handleError(record)
class SMTPHandler(logging.Handler):
"""
A handler class which sends an SMTP email for each logging event.
"""
def __init__(self, mailhost, fromaddr, toaddrs, subject,
credentials=None, secure=None, timeout=5.0):
"""
Initialize the handler.
Initialize the instance with the from and to addresses and subject
line of the email. To specify a non-standard SMTP port, use the
(host, port) tuple format for the mailhost argument. To specify
authentication credentials, supply a (username, password) tuple
for the credentials argument. To specify the use of a secure
protocol (TLS), pass in a tuple for the secure argument. This will
only be used when authentication credentials are supplied. The tuple
will be either an empty tuple, or a single-value tuple with the name
of a keyfile, or a 2-value tuple with the names of the keyfile and
certificate file. (This tuple is passed to the `starttls` method).
A timeout in seconds can be specified for the SMTP connection (the
default is one second).
"""
logging.Handler.__init__(self)
if isinstance(mailhost, (list, tuple)):
self.mailhost, self.mailport = mailhost
else:
self.mailhost, self.mailport = mailhost, None
if isinstance(credentials, (list, tuple)):
self.username, self.password = credentials
else:
self.username = None
self.fromaddr = fromaddr
if isinstance(toaddrs, str):
toaddrs = [toaddrs]
self.toaddrs = toaddrs
self.subject = subject
self.secure = secure
self.timeout = timeout
def getSubject(self, record):
"""
Determine the subject for the email.
If you want to specify a subject line which is record-dependent,
override this method.
"""
return self.subject
def emit(self, record):
"""
Emit a record.
Format the record and send it to the specified addressees.
"""
try:
import smtplib
from email.message import EmailMessage
import email.utils
port = self.mailport
if not port:
port = smtplib.SMTP_PORT
smtp = smtplib.SMTP(self.mailhost, port, timeout=self.timeout)
msg = EmailMessage()
msg['From'] = self.fromaddr
msg['To'] = ','.join(self.toaddrs)
msg['Subject'] = self.getSubject(record)
msg['Date'] = email.utils.localtime()
msg.set_content(self.format(record))
if self.username:
if self.secure is not None:
smtp.ehlo()
smtp.starttls(*self.secure)
smtp.ehlo()
smtp.login(self.username, self.password)
smtp.send_message(msg)
smtp.quit()
except Exception:
self.handleError(record)
class NTEventLogHandler(logging.Handler):
"""
A handler class which sends events to the NT Event Log. Adds a
registry entry for the specified application name. If no dllname is
provided, win32service.pyd (which contains some basic message
placeholders) is used. Note that use of these placeholders will make
your event logs big, as the entire message source is held in the log.
If you want slimmer logs, you have to pass in the name of your own DLL
which contains the message definitions you want to use in the event log.
"""
def __init__(self, appname, dllname=None, logtype="Application"):
logging.Handler.__init__(self)
try:
import win32evtlogutil, win32evtlog
self.appname = appname
self._welu = win32evtlogutil
if not dllname:
dllname = os.path.split(self._welu.__file__)
dllname = os.path.split(dllname[0])
dllname = os.path.join(dllname[0], r'win32service.pyd')
self.dllname = dllname
self.logtype = logtype
self._welu.AddSourceToRegistry(appname, dllname, logtype)
self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE
self.typemap = {
logging.DEBUG : win32evtlog.EVENTLOG_INFORMATION_TYPE,
logging.INFO : win32evtlog.EVENTLOG_INFORMATION_TYPE,
logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE,
logging.ERROR : win32evtlog.EVENTLOG_ERROR_TYPE,
logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE,
}
except ImportError:
print("The Python Win32 extensions for NT (service, event "\
"logging) appear not to be available.")
self._welu = None
def getMessageID(self, record):
"""
Return the message ID for the event record. If you are using your
own messages, you could do this by having the msg passed to the
logger being an ID rather than a formatting string. Then, in here,
you could use a dictionary lookup to get the message ID. This
version returns 1, which is the base message ID in win32service.pyd.
"""
return 1
def getEventCategory(self, record):
"""
Return the event category for the record.
Override this if you want to specify your own categories. This version
returns 0.
"""
return 0
def getEventType(self, record):
"""
Return the event type for the record.
Override this if you want to specify your own types. This version does
a mapping using the handler's typemap attribute, which is set up in
__init__() to a dictionary which contains mappings for DEBUG, INFO,
WARNING, ERROR and CRITICAL. If you are using your own levels you will
either need to override this method or place a suitable dictionary in
the handler's typemap attribute.
"""
return self.typemap.get(record.levelno, self.deftype)
def emit(self, record):
"""
Emit a record.
Determine the message ID, event category and event type. Then
log the message in the NT event log.
"""
if self._welu:
try:
id = self.getMessageID(record)
cat = self.getEventCategory(record)
type = self.getEventType(record)
msg = self.format(record)
self._welu.ReportEvent(self.appname, id, cat, type, [msg])
except Exception:
self.handleError(record)
def close(self):
"""
Clean up this handler.
You can remove the application name from the registry as a
source of event log entries. However, if you do this, you will
not be able to see the events as you intended in the Event Log
Viewer - it needs to be able to access the registry to get the
DLL name.
"""
#self._welu.RemoveSourceFromRegistry(self.appname, self.logtype)
logging.Handler.close(self)
class HTTPHandler(logging.Handler):
"""
A class which sends records to a Web server, using either GET or
POST semantics.
"""
def __init__(self, host, url, method="GET", secure=False, credentials=None,
context=None):
"""
Initialize the instance with the host, the request URL, and the method
("GET" or "POST")
"""
logging.Handler.__init__(self)
method = method.upper()
if method not in ["GET", "POST"]:
raise ValueError("method must be GET or POST")
if not secure and context is not None:
raise ValueError("context parameter only makes sense "
"with secure=True")
self.host = host
self.url = url
self.method = method
self.secure = secure
self.credentials = credentials
self.context = context
def mapLogRecord(self, record):
"""
Default implementation of mapping the log record into a dict
that is sent as the CGI data. Overwrite in your class.
Contributed by Franz Glasner.
"""
return record.__dict__
def getConnection(self, host, secure):
"""
get a HTTP[S]Connection.
Override when a custom connection is required, for example if
there is a proxy.
"""
import http.client
if secure:
connection = http.client.HTTPSConnection(host, context=self.context)
else:
connection = http.client.HTTPConnection(host)
return connection
def emit(self, record):
"""
Emit a record.
Send the record to the Web server as a percent-encoded dictionary
"""
try:
import urllib.parse
host = self.host
h = self.getConnection(host, self.secure)
url = self.url
data = urllib.parse.urlencode(self.mapLogRecord(record))
if self.method == "GET":
if (url.find('?') >= 0):
sep = '&'
else:
sep = '?'
url = url + "%c%s" % (sep, data)
h.putrequest(self.method, url)
# support multiple hosts on one IP address...
# need to strip optional :port from host, if present
i = host.find(":")
if i >= 0:
host = host[:i]
# See issue #30904: putrequest call above already adds this header
# on Python 3.x.
# h.putheader("Host", host)
if self.method == "POST":
h.putheader("Content-type",
"application/x-www-form-urlencoded")
h.putheader("Content-length", str(len(data)))
if self.credentials:
import base64
s = ('%s:%s' % self.credentials).encode('utf-8')
s = 'Basic ' + base64.b64encode(s).strip().decode('ascii')
h.putheader('Authorization', s)
h.endheaders()
if self.method == "POST":
h.send(data.encode('utf-8'))
h.getresponse() #can't do anything with the result
except Exception:
self.handleError(record)
class BufferingHandler(logging.Handler):
"""
A handler class which buffers logging records in memory. Whenever each
record is added to the buffer, a check is made to see if the buffer should
be flushed. If it should, then flush() is expected to do what's needed.
"""
def __init__(self, capacity):
"""
Initialize the handler with the buffer size.
"""
logging.Handler.__init__(self)
self.capacity = capacity
self.buffer = []
def shouldFlush(self, record):
"""
Should the handler flush its buffer?
Returns true if the buffer is up to capacity. This method can be
overridden to implement custom flushing strategies.
"""
return (len(self.buffer) >= self.capacity)
def emit(self, record):
"""
Emit a record.
Append the record. If shouldFlush() tells us to, call flush() to process
the buffer.
"""
self.buffer.append(record)
if self.shouldFlush(record):
self.flush()
def flush(self):
"""
Override to implement custom flushing behaviour.
This version just zaps the buffer to empty.
"""
self.acquire()
try:
self.buffer.clear()
finally:
self.release()
def close(self):
"""
Close the handler.
This version just flushes and chains to the parent class' close().
"""
try:
self.flush()
finally:
logging.Handler.close(self)
class MemoryHandler(BufferingHandler):
"""
A handler class which buffers logging records in memory, periodically
flushing them to a target handler. Flushing occurs whenever the buffer
is full, or when an event of a certain severity or greater is seen.
"""
def __init__(self, capacity, flushLevel=logging.ERROR, target=None,
flushOnClose=True):
"""
Initialize the handler with the buffer size, the level at which
flushing should occur and an optional target.
Note that without a target being set either here or via setTarget(),
a MemoryHandler is no use to anyone!
The ``flushOnClose`` argument is ``True`` for backward compatibility
reasons - the old behaviour is that when the handler is closed, the
buffer is flushed, even if the flush level hasn't been exceeded nor the
capacity exceeded. To prevent this, set ``flushOnClose`` to ``False``.
"""
BufferingHandler.__init__(self, capacity)
self.flushLevel = flushLevel
self.target = target
# See Issue #26559 for why this has been added
self.flushOnClose = flushOnClose
def shouldFlush(self, record):
"""
Check for buffer full or a record at the flushLevel or higher.
"""
return (len(self.buffer) >= self.capacity) or \
(record.levelno >= self.flushLevel)
def setTarget(self, target):
"""
Set the target handler for this handler.
"""
self.target = target
def flush(self):
"""
For a MemoryHandler, flushing means just sending the buffered
records to the target, if there is one. Override if you want
different behaviour.
The record buffer is also cleared by this operation.
"""
self.acquire()
try:
if self.target:
for record in self.buffer:
self.target.handle(record)
self.buffer.clear()
finally:
self.release()
def close(self):
"""
Flush, if appropriately configured, set the target to None and lose the
buffer.
"""
try:
if self.flushOnClose:
self.flush()
finally:
self.acquire()
try:
self.target = None
BufferingHandler.close(self)
finally:
self.release()
class QueueHandler(logging.Handler):
"""
This handler sends events to a queue. Typically, it would be used together
with a multiprocessing Queue to centralise logging to file in one process
(in a multi-process application), so as to avoid file write contention
between processes.
This code is new in Python 3.2, but this class can be copy pasted into
user code for use with earlier Python versions.
"""
def __init__(self, queue):
"""
Initialise an instance, using the passed queue.
"""
logging.Handler.__init__(self)
self.queue = queue
def enqueue(self, record):
"""
Enqueue a record.
The base implementation uses put_nowait. You may want to override
this method if you want to use blocking, timeouts or custom queue
implementations.
"""
self.queue.put_nowait(record)
def prepare(self, record):
"""
Prepares a record for queuing. The object returned by this method is
enqueued.
The base implementation formats the record to merge the message
and arguments, and removes unpickleable items from the record
in-place.
You might want to override this method if you want to convert
the record to a dict or JSON string, or send a modified copy
of the record while leaving the original intact.
"""
# The format operation gets traceback text into record.exc_text
# (if there's exception data), and also returns the formatted
# message. We can then use this to replace the original
# msg + args, as these might be unpickleable. We also zap the
# exc_info and exc_text attributes, as they are no longer
# needed and, if not None, will typically not be pickleable.
msg = self.format(record)
# bpo-35726: make copy of record to avoid affecting other handlers in the chain.
record = copy.copy(record)
record.message = msg
record.msg = msg
record.args = None
record.exc_info = None
record.exc_text = None
return record
def emit(self, record):
"""
Emit a record.
Writes the LogRecord to the queue, preparing it for pickling first.
"""
try:
self.enqueue(self.prepare(record))
except Exception:
self.handleError(record)
class QueueListener(object):
"""
This class implements an internal threaded listener which watches for
LogRecords being added to a queue, removes them and passes them to a
list of handlers for processing.
"""
_sentinel = None
def __init__(self, queue, *handlers, respect_handler_level=False):
"""
Initialise an instance with the specified queue and
handlers.
"""
self.queue = queue
self.handlers = handlers
self._thread = None
self.respect_handler_level = respect_handler_level
def dequeue(self, block):
"""
Dequeue a record and return it, optionally blocking.
The base implementation uses get. You may want to override this method
if you want to use timeouts or work with custom queue implementations.
"""
return self.queue.get(block)
def start(self):
"""
Start the listener.
This starts up a background thread to monitor the queue for
LogRecords to process.
"""
self._thread = t = threading.Thread(target=self._monitor)
t.daemon = True
t.start()
def prepare(self, record):
"""
Prepare a record for handling.
This method just returns the passed-in record. You may want to
override this method if you need to do any custom marshalling or
manipulation of the record before passing it to the handlers.
"""
return record
def handle(self, record):
"""
Handle a record.
This just loops through the handlers offering them the record
to handle.
"""
record = self.prepare(record)
for handler in self.handlers:
if not self.respect_handler_level:
process = True
else:
process = record.levelno >= handler.level
if process:
handler.handle(record)
def _monitor(self):
"""
Monitor the queue for records, and ask the handler
to deal with them.
This method runs on a separate, internal thread.
The thread will terminate if it sees a sentinel object in the queue.
"""
q = self.queue
has_task_done = hasattr(q, 'task_done')
while True:
try:
record = self.dequeue(True)
if record is self._sentinel:
if has_task_done:
q.task_done()
break
self.handle(record)
if has_task_done:
q.task_done()
except queue.Empty:
break
def enqueue_sentinel(self):
"""
This is used to enqueue the sentinel record.
The base implementation uses put_nowait. You may want to override this
method if you want to use timeouts or work with custom queue
implementations.
"""
self.queue.put_nowait(self._sentinel)
def stop(self):
"""
Stop the listener.
This asks the thread to terminate, and then waits for it to do so.
Note that if you don't call this before your application exits, there
may be some records still left on the queue, which won't be processed.
"""
self.enqueue_sentinel()
self._thread.join()
self._thread = None
| 38.327922
| 120
| 0.57906
|
4a120f228aa7b8769b252e7935b8322659a78bf6
| 7,057
|
py
|
Python
|
frappe/api.py
|
kidsyn/frappe
|
8de131dab29b69b33729240a66bc3483513dafc4
|
[
"MIT"
] | 1
|
2021-04-25T07:07:36.000Z
|
2021-04-25T07:07:36.000Z
|
frappe/api.py
|
kidsyn/frappe
|
8de131dab29b69b33729240a66bc3483513dafc4
|
[
"MIT"
] | null | null | null |
frappe/api.py
|
kidsyn/frappe
|
8de131dab29b69b33729240a66bc3483513dafc4
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
import base64
import binascii
import json
from urllib.parse import urlencode, urlparse
import frappe
import frappe.client
import frappe.handler
from frappe import _
from frappe.utils.response import build_response
def handle():
"""
Handler for `/api` methods
### Examples:
`/api/method/{methodname}` will call a whitelisted method
`/api/resource/{doctype}` will query a table
examples:
- `?fields=["name", "owner"]`
- `?filters=[["Task", "name", "like", "%005"]]`
- `?limit_start=0`
- `?limit_page_length=20`
`/api/resource/{doctype}/{name}` will point to a resource
`GET` will return doclist
`POST` will insert
`PUT` will update
`DELETE` will delete
`/api/resource/{doctype}/{name}?run_method={method}` will run a whitelisted controller method
"""
parts = frappe.request.path[1:].split("/",3)
call = doctype = name = None
if len(parts) > 1:
call = parts[1]
if len(parts) > 2:
doctype = parts[2]
if len(parts) > 3:
name = parts[3]
if call=="method":
frappe.local.form_dict.cmd = doctype
return frappe.handler.handle()
elif call=="resource":
if "run_method" in frappe.local.form_dict:
method = frappe.local.form_dict.pop("run_method")
doc = frappe.get_doc(doctype, name)
doc.is_whitelisted(method)
if frappe.local.request.method=="GET":
if not doc.has_permission("read"):
frappe.throw(_("Not permitted"), frappe.PermissionError)
frappe.local.response.update({"data": doc.run_method(method, **frappe.local.form_dict)})
if frappe.local.request.method=="POST":
if not doc.has_permission("write"):
frappe.throw(_("Not permitted"), frappe.PermissionError)
frappe.local.response.update({"data": doc.run_method(method, **frappe.local.form_dict)})
frappe.db.commit()
else:
if name:
if frappe.local.request.method=="GET":
doc = frappe.get_doc(doctype, name)
if not doc.has_permission("read"):
raise frappe.PermissionError
frappe.local.response.update({"data": doc})
if frappe.local.request.method=="PUT":
data = get_request_form_data()
doc = frappe.get_doc(doctype, name)
if "flags" in data:
del data["flags"]
# Not checking permissions here because it's checked in doc.save
doc.update(data)
frappe.local.response.update({
"data": doc.save().as_dict()
})
if doc.parenttype and doc.parent:
frappe.get_doc(doc.parenttype, doc.parent).save()
frappe.db.commit()
if frappe.local.request.method == "DELETE":
# Not checking permissions here because it's checked in delete_doc
frappe.delete_doc(doctype, name, ignore_missing=False)
frappe.local.response.http_status_code = 202
frappe.local.response.message = "ok"
frappe.db.commit()
elif doctype:
if frappe.local.request.method == "GET":
if frappe.local.form_dict.get('fields'):
frappe.local.form_dict['fields'] = json.loads(frappe.local.form_dict['fields'])
frappe.local.form_dict.setdefault('limit_page_length', 20)
frappe.local.response.update({
"data": frappe.call(
frappe.client.get_list,
doctype,
**frappe.local.form_dict
)
})
if frappe.local.request.method == "POST":
data = get_request_form_data()
data.update({
"doctype": doctype
})
frappe.local.response.update({
"data": frappe.get_doc(data).insert().as_dict()
})
frappe.db.commit()
else:
raise frappe.DoesNotExistError
else:
raise frappe.DoesNotExistError
return build_response("json")
def get_request_form_data():
if frappe.local.form_dict.data is None:
data = frappe.safe_decode(frappe.local.request.get_data())
else:
data = frappe.local.form_dict.data
return frappe.parse_json(data)
def validate_auth():
"""
Authenticate and sets user for the request.
"""
authorization_header = frappe.get_request_header("Authorization", str()).split(" ")
if len(authorization_header) == 2:
validate_oauth(authorization_header)
validate_auth_via_api_keys(authorization_header)
validate_auth_via_hooks()
def validate_oauth(authorization_header):
"""
Authenticate request using OAuth and set session user
Args:
authorization_header (list of str): The 'Authorization' header containing the prefix and token
"""
from frappe.integrations.oauth2 import get_oauth_server
from frappe.oauth import get_url_delimiter
form_dict = frappe.local.form_dict
token = authorization_header[1]
req = frappe.request
parsed_url = urlparse(req.url)
access_token = {"access_token": token}
uri = parsed_url.scheme + "://" + parsed_url.netloc + parsed_url.path + "?" + urlencode(access_token)
http_method = req.method
body = req.get_data()
headers = req.headers
try:
required_scopes = frappe.db.get_value("OAuth Bearer Token", token, "scopes").split(get_url_delimiter())
valid, oauthlib_request = get_oauth_server().verify_request(uri, http_method, body, headers, required_scopes)
if valid:
frappe.set_user(frappe.db.get_value("OAuth Bearer Token", token, "user"))
frappe.local.form_dict = form_dict
except AttributeError:
pass
def validate_auth_via_api_keys(authorization_header):
"""
Authenticate request using API keys and set session user
Args:
authorization_header (list of str): The 'Authorization' header containing the prefix and token
"""
try:
auth_type, auth_token = authorization_header
authorization_source = frappe.get_request_header("Frappe-Authorization-Source")
if auth_type.lower() == 'basic':
api_key, api_secret = frappe.safe_decode(base64.b64decode(auth_token)).split(":")
validate_api_key_secret(api_key, api_secret, authorization_source)
elif auth_type.lower() == 'token':
api_key, api_secret = auth_token.split(":")
validate_api_key_secret(api_key, api_secret, authorization_source)
except binascii.Error:
frappe.throw(_("Failed to decode token, please provide a valid base64-encoded token."), frappe.InvalidAuthorizationToken)
except (AttributeError, TypeError, ValueError):
pass
def validate_api_key_secret(api_key, api_secret, frappe_authorization_source=None):
"""frappe_authorization_source to provide api key and secret for a doctype apart from User"""
doctype = frappe_authorization_source or 'User'
doc = frappe.db.get_value(
doctype=doctype,
filters={"api_key": api_key},
fieldname=["name"]
)
form_dict = frappe.local.form_dict
doc_secret = frappe.utils.password.get_decrypted_password(doctype, doc, fieldname='api_secret')
if api_secret == doc_secret:
if doctype == 'User':
user = frappe.db.get_value(
doctype="User",
filters={"api_key": api_key},
fieldname=["name"]
)
else:
user = frappe.db.get_value(doctype, doc, 'user')
if frappe.local.login_manager.user in ('', 'Guest'):
frappe.set_user(user)
frappe.local.form_dict = form_dict
def validate_auth_via_hooks():
for auth_hook in frappe.get_hooks('auth_hooks', []):
frappe.get_attr(auth_hook)()
| 28.922131
| 123
| 0.715602
|
4a120f4dd5a5f0ecd35b71e45f2b2d9ab271f82c
| 48,458
|
py
|
Python
|
python/ccxt/async_support/liquid.py
|
0shimax/ccxt
|
2fd7e65dc13800b331274cd4a73345de94ee3c9b
|
[
"MIT"
] | null | null | null |
python/ccxt/async_support/liquid.py
|
0shimax/ccxt
|
2fd7e65dc13800b331274cd4a73345de94ee3c9b
|
[
"MIT"
] | null | null | null |
python/ccxt/async_support/liquid.py
|
0shimax/ccxt
|
2fd7e65dc13800b331274cd4a73345de94ee3c9b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
import math
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import NotSupported
from ccxt.base.errors import DDoSProtection
from ccxt.base.errors import InvalidNonce
from ccxt.base.decimal_to_precision import TICK_SIZE
from ccxt.base.precise import Precise
class liquid(Exchange):
def describe(self):
return self.deep_extend(super(liquid, self).describe(), {
'id': 'liquid',
'name': 'Liquid',
'countries': ['JP', 'CN', 'TW'],
'version': '2',
'rateLimit': 1000,
'has': {
'cancelOrder': True,
'CORS': None,
'createOrder': True,
'editOrder': True,
'fetchBalance': True,
'fetchClosedOrders': True,
'fetchCurrencies': True,
'fetchMarkets': True,
'fetchMyTrades': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchTicker': True,
'fetchTickers': True,
'fetchTrades': True,
'fetchWithdrawals': True,
'withdraw': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/45798859-1a872600-bcb4-11e8-8746-69291ce87b04.jpg',
'api': 'https://api.liquid.com',
'www': 'https://www.liquid.com',
'doc': [
'https://developers.liquid.com',
],
'fees': 'https://help.liquid.com/getting-started-with-liquid/the-platform/fee-structure',
'referral': 'https://www.liquid.com/sign-up/?affiliate=SbzC62lt30976',
},
'api': {
'public': {
'get': [
'currencies',
'products',
'products/{id}',
'products/{id}/price_levels',
'executions',
'ir_ladders/{currency}',
'fees', # add fetchFees, fetchTradingFees, fetchFundingFees
],
},
'private': {
'get': [
'accounts', # undocumented https://github.com/ccxt/ccxt/pull/7493
'accounts/balance',
'accounts/main_asset',
'accounts/{id}',
'accounts/{currency}/reserved_balance_details',
'crypto_accounts', # add fetchAccounts
'crypto_withdrawal',
'crypto_withdrawals',
'crypto_withdrawals/crypto_networks',
'executions/me',
'fiat_accounts', # add fetchAccounts
'fund_infos', # add fetchDeposits
'loan_bids',
'loans',
'orders',
'orders/{id}',
'orders/{id}/trades', # add fetchOrderTrades
'trades',
'trades/{id}/loans',
'trading_accounts',
'trading_accounts/{id}',
'transactions',
'withdrawals', # add fetchWithdrawals
'user/fee_tier',
'user/fees',
'trading_accounts/{id}',
'bank_accounts',
'accounts/{currency}/reserved_balance_details',
],
'post': [
'crypto_withdrawals',
'fund_infos',
'fiat_accounts',
'loan_bids',
'orders',
'withdrawals',
'fees/estimate',
],
'put': [
'crypto_withdrawal/{id}/cancel',
'loan_bids/{id}/close',
'loans/{id}',
'orders/{id}', # add editOrder
'orders/{id}/cancel',
'trades/{id}',
'trades/{id}/adjust_margin',
'trades/{id}/close',
'trades/close_all',
'trading_accounts/{id}',
'withdrawals/{id}/cancel',
],
},
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'taker': 0.0030,
'maker': 0.0000,
'tiers': {
'perpetual': {
'maker': [
[0, 0.0000],
[25000, 0.0000],
[50000, -0.00025],
[100000, -0.00025],
[1000000, -0.00025],
[10000000, -0.00025],
[25000000, -0.00025],
[50000000, -0.00025],
[75000000, -0.00025],
[100000000, -0.00025],
[200000000, -0.00025],
[300000000, -0.00025],
],
'taker': [
[0, 0.00120],
[25000, 0.00115],
[50000, 0.00110],
[100000, 0.00105],
[1000000, 0.00100],
[10000000, 0.00095],
[25000000, 0.00090],
[50000000, 0.00085],
[75000000, 0.00080],
[100000000, 0.00075],
[200000000, 0.00070],
[300000000, 0.00065],
],
},
'spot': {
'taker': [
[0, 0.003],
[10000, 0.0029],
[20000, 0.0028],
[50000, 0.0026],
[100000, 0.0020],
[1000000, 0.0016],
[5000000, 0.0012],
[10000000, 0.0010],
[25000000, 0.0009],
[50000000, 0.0008],
[100000000, 0.0007],
[200000000, 0.0006],
[500000000, 0.0004],
[1000000000, 0.0003],
],
'maker': [
[0, 0.0000],
[10000, 0.0020],
[20000, 0.0019],
[50000, 0.0018],
[100000, 0.0016],
[1000000, 0.0008],
[5000000, 0.0007],
[10000000, 0.0005],
[25000000, 0.0000],
[50000000, 0.0000],
[100000000, 0.0000],
[200000000, 0.0000],
[500000000, 0.0000],
[1000000000, 0.0000],
],
},
},
},
},
'precisionMode': TICK_SIZE,
'exceptions': {
'API rate limit exceeded. Please retry after 300s': DDoSProtection,
'API Authentication failed': AuthenticationError,
'Nonce is too small': InvalidNonce,
'Order not found': OrderNotFound,
'Can not update partially filled order': InvalidOrder,
'Can not update non-live order': OrderNotFound,
'not_enough_free_balance': InsufficientFunds,
'must_be_positive': InvalidOrder,
'less_than_order_size': InvalidOrder,
'price_too_high': InvalidOrder,
'price_too_small': InvalidOrder, # {"errors":{"order":["price_too_small"]}}
'product_disabled': BadSymbol, # {"errors":{"order":["product_disabled"]}}
},
'commonCurrencies': {
'HOT': 'HOT Token',
'MIOTA': 'IOTA', # https://github.com/ccxt/ccxt/issues/7487
'TON': 'Tokamak Network',
'BIFI': 'Bifrost Finance',
},
'options': {
'cancelOrderException': True,
'networks': {
'ETH': 'ERC20',
'TRX': 'TRC20',
'XLM': 'Stellar',
'ALGO': 'Algorand',
},
},
})
async def fetch_currencies(self, params={}):
response = await self.publicGetCurrencies(params)
#
# [
# {
# currency_type: 'fiat',
# currency: 'USD',
# symbol: '$',
# assets_precision: 2,
# quoting_precision: 5,
# minimum_withdrawal: '15.0',
# withdrawal_fee: 5,
# minimum_fee: null,
# minimum_order_quantity: null,
# display_precision: 2,
# depositable: True,
# withdrawable: True,
# discount_fee: 0.5,
# credit_card_fundable: False,
# lendable: False,
# position_fundable: True,
# has_memo: False,
# stable_currency: null,
# root_currency: 'USD',
# minimum_loan_bid_quantity: '0.0',
# maximum_order_taker_quantity: null,
# name: 'United States Dollar'
# },
# ]
#
result = {}
for i in range(0, len(response)):
currency = response[i]
id = self.safe_string(currency, 'currency')
code = self.safe_currency_code(id)
name = self.safe_string(currency, 'name')
active = currency['depositable'] and currency['withdrawable']
amountPrecision = self.safe_integer(currency, 'assets_precision')
result[code] = {
'id': id,
'code': code,
'info': currency,
'name': name,
'active': active,
'fee': self.safe_number(currency, 'withdrawal_fee'),
'precision': amountPrecision,
'limits': {
'amount': {
'min': math.pow(10, -amountPrecision),
'max': math.pow(10, amountPrecision),
},
'withdraw': {
'min': self.safe_number(currency, 'minimum_withdrawal'),
'max': None,
},
},
}
return result
async def fetch_markets(self, params={}):
spot = await self.publicGetProducts(params)
#
# [
# {
# "id":"637",
# "product_type":"CurrencyPair",
# "code":"CASH",
# "name":null,
# "market_ask":"0.00000797",
# "market_bid":"0.00000727",
# "indicator":null,
# "currency":"BTC",
# "currency_pair_code":"TFTBTC",
# "symbol":null,
# "btc_minimum_withdraw":null,
# "fiat_minimum_withdraw":null,
# "pusher_channel":"product_cash_tftbtc_637",
# "taker_fee":"0.0",
# "maker_fee":"0.0",
# "low_market_bid":"0.00000685",
# "high_market_ask":"0.00000885",
# "volume_24h":"3696.0755956",
# "last_price_24h":"0.00000716",
# "last_traded_price":"0.00000766",
# "last_traded_quantity":"1748.0377978",
# "average_price":null,
# "quoted_currency":"BTC",
# "base_currency":"TFT",
# "tick_size":"0.00000001",
# "disabled":false,
# "margin_enabled":false,
# "cfd_enabled":false,
# "perpetual_enabled":false,
# "last_event_timestamp":"1596962820.000797146",
# "timestamp":"1596962820.000797146",
# "multiplier_up":"9.0",
# "multiplier_down":"0.1",
# "average_time_interval":null
# },
# ]
#
perpetual = await self.publicGetProducts({'perpetual': '1'})
#
# [
# {
# "id":"604",
# "product_type":"Perpetual",
# "code":"CASH",
# "name":null,
# "market_ask":"11721.5",
# "market_bid":"11719.0",
# "indicator":null,
# "currency":"USD",
# "currency_pair_code":"P-BTCUSD",
# "symbol":"$",
# "btc_minimum_withdraw":null,
# "fiat_minimum_withdraw":null,
# "pusher_channel":"product_cash_p-btcusd_604",
# "taker_fee":"0.0012",
# "maker_fee":"0.0",
# "low_market_bid":"11624.5",
# "high_market_ask":"11859.0",
# "volume_24h":"0.271",
# "last_price_24h":"11621.5",
# "last_traded_price":"11771.5",
# "last_traded_quantity":"0.09",
# "average_price":"11771.5",
# "quoted_currency":"USD",
# "base_currency":"P-BTC",
# "tick_size":"0.5",
# "disabled":false,
# "margin_enabled":false,
# "cfd_enabled":false,
# "perpetual_enabled":true,
# "last_event_timestamp":"1596963309.418853092",
# "timestamp":"1596963309.418853092",
# "multiplier_up":null,
# "multiplier_down":"0.1",
# "average_time_interval":300,
# "index_price":"11682.8124",
# "mark_price":"11719.96781",
# "funding_rate":"0.00273",
# "fair_price":"11720.2745"
# },
# ]
#
currencies = await self.fetch_currencies()
currenciesByCode = self.index_by(currencies, 'code')
result = []
markets = self.array_concat(spot, perpetual)
for i in range(0, len(markets)):
market = markets[i]
id = self.safe_string(market, 'id')
baseId = self.safe_string(market, 'base_currency')
quoteId = self.safe_string(market, 'quoted_currency')
productType = self.safe_string(market, 'product_type')
type = 'spot'
spot = True
swap = False
if productType == 'Perpetual':
spot = False
swap = True
type = 'swap'
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = None
if swap:
symbol = self.safe_string(market, 'currency_pair_code')
else:
symbol = base + '/' + quote
maker = self.fees['trading']['maker']
taker = self.fees['trading']['taker']
if type == 'swap':
maker = self.safe_number(market, 'maker_fee', self.fees['trading']['maker'])
taker = self.safe_number(market, 'taker_fee', self.fees['trading']['taker'])
disabled = self.safe_value(market, 'disabled', False)
active = not disabled
baseCurrency = self.safe_value(currenciesByCode, base)
precision = {
'amount': 0.00000001,
'price': self.safe_number(market, 'tick_size'),
}
minAmount = None
if baseCurrency is not None:
minAmount = self.safe_number(baseCurrency['info'], 'minimum_order_quantity')
lastPrice = self.safe_number(market, 'last_traded_price')
minPrice = None
maxPrice = None
if lastPrice:
multiplierDown = self.safe_number(market, 'multiplier_down')
multiplierUp = self.safe_number(market, 'multiplier_up')
if multiplierDown is not None:
minPrice = lastPrice * multiplierDown
if multiplierUp is not None:
maxPrice = lastPrice * multiplierUp
limits = {
'amount': {
'min': minAmount,
'max': None,
},
'price': {
'min': minPrice,
'max': maxPrice,
},
'cost': {
'min': None,
'max': None,
},
}
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'type': type,
'spot': spot,
'swap': swap,
'maker': maker,
'taker': taker,
'limits': limits,
'precision': precision,
'active': active,
'info': market,
})
return result
def parse_balance(self, response):
result = {
'info': response,
'timestamp': None,
'datetime': None,
}
crypto = self.safe_value(response, 'crypto_accounts', [])
fiat = self.safe_value(response, 'fiat_accounts', [])
for i in range(0, len(crypto)):
balance = crypto[i]
currencyId = self.safe_string(balance, 'currency')
code = self.safe_currency_code(currencyId)
account = self.account()
account['total'] = self.safe_string(balance, 'balance')
account['used'] = self.safe_string(balance, 'reserved_balance')
result[code] = account
for i in range(0, len(fiat)):
balance = fiat[i]
currencyId = self.safe_string(balance, 'currency')
code = self.safe_currency_code(currencyId)
account = self.account()
account['total'] = self.safe_string(balance, 'balance')
account['used'] = self.safe_string(balance, 'reserved_balance')
result[code] = account
return self.safe_balance(result)
async def fetch_balance(self, params={}):
await self.load_markets()
response = await self.privateGetAccounts(params)
#
# {
# crypto_accounts: [
# {
# id: 2221179,
# currency: 'USDT',
# balance: '0.0',
# reserved_balance: '0.0',
# pusher_channel: 'user_xxxxx_account_usdt',
# lowest_offer_interest_rate: null,
# highest_offer_interest_rate: null,
# address: '0',
# currency_symbol: 'USDT',
# minimum_withdraw: null,
# currency_type: 'crypto'
# },
# ],
# fiat_accounts: [
# {
# id: 1112734,
# currency: 'USD',
# balance: '0.0',
# reserved_balance: '0.0',
# pusher_channel: 'user_xxxxx_account_usd',
# lowest_offer_interest_rate: null,
# highest_offer_interest_rate: null,
# currency_symbol: '$',
# send_to_btc_address: null,
# exchange_rate: '1.0',
# currency_type: 'fiat'
# }
# ]
# }
#
return self.parse_balance(response)
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
request = {
'id': self.market_id(symbol),
}
response = await self.publicGetProductsIdPriceLevels(self.extend(request, params))
return self.parse_order_book(response, symbol, None, 'buy_price_levels', 'sell_price_levels')
def parse_ticker(self, ticker, market=None):
timestamp = self.milliseconds()
last = None
if 'last_traded_price' in ticker:
if ticker['last_traded_price']:
length = len(ticker['last_traded_price'])
if length > 0:
last = self.safe_number(ticker, 'last_traded_price')
symbol = None
if market is None:
marketId = self.safe_string(ticker, 'id')
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
else:
baseId = self.safe_string(ticker, 'base_currency')
quoteId = self.safe_string(ticker, 'quoted_currency')
if symbol in self.markets:
market = self.markets[symbol]
else:
symbol = self.safe_currency_code(baseId) + '/' + self.safe_currency_code(quoteId)
if market is not None:
symbol = market['symbol']
open = self.safe_number(ticker, 'last_price_24h')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_number(ticker, 'high_market_ask'),
'low': self.safe_number(ticker, 'low_market_bid'),
'bid': self.safe_number(ticker, 'market_bid'),
'bidVolume': None,
'ask': self.safe_number(ticker, 'market_ask'),
'askVolume': None,
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': self.safe_number(ticker, 'volume_24h'),
'quoteVolume': None,
'info': ticker,
}, market)
async def fetch_tickers(self, symbols=None, params={}):
await self.load_markets()
response = await self.publicGetProducts(params)
result = {}
for i in range(0, len(response)):
ticker = self.parse_ticker(response[i])
symbol = ticker['symbol']
result[symbol] = ticker
return self.filter_by_array(result, 'symbol', symbols)
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'id': market['id'],
}
response = await self.publicGetProductsId(self.extend(request, params))
return self.parse_ticker(response, market)
def parse_trade(self, trade, market=None):
# { id: 12345,
# quantity: "6.789",
# price: "98765.4321",
# taker_side: "sell",
# created_at: 1512345678,
# my_side: "buy" }
timestamp = self.safe_timestamp(trade, 'created_at')
orderId = self.safe_string(trade, 'order_id')
# 'taker_side' gets filled for both fetchTrades and fetchMyTrades
takerSide = self.safe_string(trade, 'taker_side')
# 'my_side' gets filled for fetchMyTrades only and may differ from 'taker_side'
mySide = self.safe_string(trade, 'my_side')
side = mySide if (mySide is not None) else takerSide
takerOrMaker = None
if mySide is not None:
takerOrMaker = 'taker' if (takerSide == mySide) else 'maker'
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string(trade, 'quantity')
price = self.parse_number(priceString)
amount = self.parse_number(amountString)
cost = self.parse_number(Precise.string_mul(priceString, amountString))
id = self.safe_string(trade, 'id')
symbol = None
if market is not None:
symbol = market['symbol']
return {
'info': trade,
'id': id,
'order': orderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': None,
'side': side,
'takerOrMaker': takerOrMaker,
'price': price,
'amount': amount,
'cost': cost,
'fee': None,
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'product_id': market['id'],
}
if limit is not None:
request['limit'] = limit
if since is not None:
# timestamp should be in seconds, whereas we use milliseconds in since and everywhere
request['timestamp'] = int(since / 1000)
response = await self.publicGetExecutions(self.extend(request, params))
result = response if (since is not None) else response['models']
return self.parse_trades(result, market, since, limit)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
# the `with_details` param is undocumented - it adds the order_id to the results
request = {
'product_id': market['id'],
'with_details': True,
}
if limit is not None:
request['limit'] = limit
response = await self.privateGetExecutionsMe(self.extend(request, params))
return self.parse_trades(response['models'], market, since, limit)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
clientOrderId = self.safe_string_2(params, 'clientOrderId', 'client_order_id')
params = self.omit(params, ['clientOrderId', 'client_order_id'])
request = {
'order_type': type,
'product_id': self.market_id(symbol),
'side': side,
'quantity': self.amount_to_precision(symbol, amount),
}
if clientOrderId is not None:
request['client_order_id'] = clientOrderId
if (type == 'limit') or (type == 'limit_post_only') or (type == 'market_with_range') or (type == 'stop'):
request['price'] = self.price_to_precision(symbol, price)
response = await self.privatePostOrders(self.extend(request, params))
#
# {
# "id": 2157474,
# "order_type": "limit",
# "quantity": "0.01",
# "disc_quantity": "0.0",
# "iceberg_total_quantity": "0.0",
# "side": "sell",
# "filled_quantity": "0.0",
# "price": "500.0",
# "created_at": 1462123639,
# "updated_at": 1462123639,
# "status": "live",
# "leverage_level": 1,
# "source_exchange": "QUOINE",
# "product_id": 1,
# "product_code": "CASH",
# "funding_currency": "USD",
# "currency_pair_code": "BTCUSD",
# "order_fee": "0.0",
# "client_order_id": null,
# }
#
return self.parse_order(response)
async def cancel_order(self, id, symbol=None, params={}):
await self.load_markets()
request = {
'id': id,
}
response = await self.privatePutOrdersIdCancel(self.extend(request, params))
order = self.parse_order(response)
if order['status'] == 'closed':
if self.options['cancelOrderException']:
raise OrderNotFound(self.id + ' order closed already: ' + self.json(response))
return order
async def edit_order(self, id, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
if price is None:
raise ArgumentsRequired(self.id + ' editOrder() requires the price argument')
request = {
'order': {
'quantity': self.amount_to_precision(symbol, amount),
'price': self.price_to_precision(symbol, price),
},
'id': id,
}
response = await self.privatePutOrdersId(self.extend(request, params))
return self.parse_order(response)
def parse_order_status(self, status):
statuses = {
'live': 'open',
'filled': 'closed',
'cancelled': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# createOrder
#
# {
# "id": 2157474,
# "order_type": "limit",
# "quantity": "0.01",
# "disc_quantity": "0.0",
# "iceberg_total_quantity": "0.0",
# "side": "sell",
# "filled_quantity": "0.0",
# "price": "500.0",
# "created_at": 1462123639,
# "updated_at": 1462123639,
# "status": "live",
# "leverage_level": 1,
# "source_exchange": "QUOINE",
# "product_id": 1,
# "product_code": "CASH",
# "funding_currency": "USD",
# "currency_pair_code": "BTCUSD",
# "order_fee": "0.0"
# "client_order_id": null,
# }
#
# fetchOrder, fetchOrders, fetchOpenOrders, fetchClosedOrders
#
# {
# "id": 2157479,
# "order_type": "limit",
# "quantity": "0.01",
# "disc_quantity": "0.0",
# "iceberg_total_quantity": "0.0",
# "side": "sell",
# "filled_quantity": "0.01",
# "price": "500.0",
# "created_at": 1462123639,
# "updated_at": 1462123639,
# "status": "filled",
# "leverage_level": 2,
# "source_exchange": "QUOINE",
# "product_id": 1,
# "product_code": "CASH",
# "funding_currency": "USD",
# "currency_pair_code": "BTCUSD",
# "order_fee": "0.0",
# "executions": [
# {
# "id": 4566133,
# "quantity": "0.01",
# "price": "500.0",
# "taker_side": "buy",
# "my_side": "sell",
# "created_at": 1465396785
# }
# ]
# }
#
orderId = self.safe_string(order, 'id')
timestamp = self.safe_timestamp(order, 'created_at')
marketId = self.safe_string(order, 'product_id')
market = self.safe_value(self.markets_by_id, marketId)
status = self.parse_order_status(self.safe_string(order, 'status'))
amount = self.safe_number(order, 'quantity')
filled = self.safe_number(order, 'filled_quantity')
price = self.safe_number(order, 'price')
symbol = None
feeCurrency = None
if market is not None:
symbol = market['symbol']
feeCurrency = market['quote']
type = self.safe_string(order, 'order_type')
tradeCost = 0
tradeFilled = 0
average = self.safe_number(order, 'average_price')
trades = self.parse_trades(self.safe_value(order, 'executions', []), market, None, None, {
'order': orderId,
'type': type,
})
numTrades = len(trades)
for i in range(0, numTrades):
# php copies values upon assignment, but not references them
# todo rewrite self(shortly)
trade = trades[i]
trade['order'] = orderId
trade['type'] = type
tradeFilled = self.sum(tradeFilled, trade['amount'])
tradeCost = self.sum(tradeCost, trade['cost'])
cost = None
lastTradeTimestamp = None
if numTrades > 0:
lastTradeTimestamp = trades[numTrades - 1]['timestamp']
if not average and (tradeFilled > 0):
average = tradeCost / tradeFilled
if cost is None:
cost = tradeCost
if filled is None:
filled = tradeFilled
remaining = None
if amount is not None and filled is not None:
remaining = amount - filled
side = self.safe_string(order, 'side')
clientOrderId = self.safe_string(order, 'client_order_id')
return {
'id': orderId,
'clientOrderId': clientOrderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': lastTradeTimestamp,
'type': type,
'timeInForce': None,
'postOnly': None,
'status': status,
'symbol': symbol,
'side': side,
'price': price,
'stopPrice': None,
'amount': amount,
'filled': filled,
'cost': cost,
'remaining': remaining,
'average': average,
'trades': trades,
'fee': {
'currency': feeCurrency,
'cost': self.safe_number(order, 'order_fee'),
},
'info': order,
}
async def fetch_order(self, id, symbol=None, params={}):
await self.load_markets()
request = {
'id': id,
}
response = await self.privateGetOrdersId(self.extend(request, params))
return self.parse_order(response)
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
market = None
request = {
# 'funding_currency': market['quoteId'], # filter orders based on "funding" currency(quote currency)
# 'product_id': market['id'],
# 'status': 'live', # 'filled', 'cancelled'
# 'trading_type': 'spot', # 'margin', 'cfd'
'with_details': 1, # return full order details including executions
}
if symbol is not None:
market = self.market(symbol)
request['product_id'] = market['id']
if limit is not None:
request['limit'] = limit
response = await self.privateGetOrders(self.extend(request, params))
#
# {
# "models": [
# {
# "id": 2157474,
# "order_type": "limit",
# "quantity": "0.01",
# "disc_quantity": "0.0",
# "iceberg_total_quantity": "0.0",
# "side": "sell",
# "filled_quantity": "0.0",
# "price": "500.0",
# "created_at": 1462123639,
# "updated_at": 1462123639,
# "status": "live",
# "leverage_level": 1,
# "source_exchange": "QUOINE",
# "product_id": 1,
# "product_code": "CASH",
# "funding_currency": "USD",
# "currency_pair_code": "BTCUSD",
# "order_fee": "0.0",
# "executions": [], # optional
# }
# ],
# "current_page": 1,
# "total_pages": 1
# }
#
orders = self.safe_value(response, 'models', [])
return self.parse_orders(orders, market, since, limit)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
request = {'status': 'live'}
return await self.fetch_orders(symbol, since, limit, self.extend(request, params))
async def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
request = {'status': 'filled'}
return await self.fetch_orders(symbol, since, limit, self.extend(request, params))
async def withdraw(self, code, amount, address, tag=None, params={}):
tag, params = self.handle_withdraw_tag_and_params(tag, params)
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
request = {
# 'auth_code': '', # optional 2fa code
'crypto_withdrawal': {
'currency': currency['id'],
'address': address,
'amount': amount,
# 'payment_id': tag, # for XRP only
# 'memo_type': 'text', # 'text', 'id' or 'hash', for XLM only
# 'memo_value': tag, # for XLM only
},
}
if tag is not None:
if code == 'XRP':
request['crypto_withdrawal']['payment_id'] = tag
elif code == 'XLM':
request['crypto_withdrawal']['memo_type'] = 'text' # overrideable via params
request['crypto_withdrawal']['memo_value'] = tag
else:
raise NotSupported(self.id + ' withdraw() only supports a tag along the address for XRP or XLM')
networks = self.safe_value(self.options, 'networks', {})
paramsCwArray = self.safe_value(params, 'crypto_withdrawal', {})
network = self.safe_string_upper(paramsCwArray, 'network') # self line allows the user to specify either ERC20 or ETH
network = self.safe_string(networks, network, network) # handle ERC20>ETH alias
if network is not None:
request['crypto_withdrawal']['network'] = network
params['crypto_withdrawal'] = self.omit(params['crypto_withdrawal'], 'network')
response = await self.privatePostCryptoWithdrawals(self.deep_extend(request, params))
#
# {
# "id": 1353,
# "address": "1BvBMSEYstWetqTFn5Au4m4GFg7xJaNVN2",
# "amount": 1.0,
# "state": "pending",
# "currency": "BTC",
# "withdrawal_fee": 0.0,
# "created_at": 1568016450,
# "updated_at": 1568016450,
# "payment_id": null
# }
#
return self.parse_transaction(response, currency)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
await self.load_markets()
request = {
# state: 'processed', # optional: pending, filed, cancelled, processing, processed, reverted to_be_reviewed, declined, broadcasted
}
currency = None
if code is not None:
currency = self.currency(code)
response = await self.privateGetCryptoWithdrawals(self.extend(request, params))
#
# {
# models: [
# {
# id: '2',
# address: '1BvBMSEYstWetqTFn5Au4m4GFg7xJaNVN2',
# amount: '0.01',
# state: 'processed',
# currency: 'BTC',
# withdrawal_fee: '0.0005',
# created_at: '1614718276',
# updated_at: '1614720926',
# payment_id: null,
# transaction_hash: 'xxxxxxxx...',
# broadcasted_at: '1614720762',
# wallet_label: 'btc',
# chain_name: 'Bitcoin',
# network: null
# },
# ],
# current_page: '1',
# total_pages: '1'
# }
#
transactions = self.safe_value(response, 'models', [])
return self.parse_transactions(transactions, currency, since, limit)
def parse_transaction_status(self, status):
statuses = {
'pending': 'pending',
'cancelled': 'canceled',
'approved': 'ok',
'processing': 'pending',
'processed': 'ok',
'reverted': 'failed',
'to_be_reviewed': 'pending',
'declined': 'failed',
'broadcasted': 'ok',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# withdraw
#
# {
# id: '1',
# address: '1BvBMSEYstWetqTFn5Au4m4GFg7xJaNVN2',
# amount: '0.01',
# state: 'pending',
# currency: 'BTC',
# withdrawal_fee: '0.0007',
# created_at: '1626000533',
# updated_at: '1626000533',
# payment_id: null,
# transaction_hash: null,
# broadcasted_at: null,
# wallet_label: null,
# chain_name: 'Bitcoin',
# network: null
# },
#
# fetchWithdrawals
#
# {
# id: '2',
# address: '1BvBMSEYstWetqTFn5Au4m4GFg7xJaNVN2',
# amount: '0.01',
# state: 'processed',
# currency: 'BTC',
# withdrawal_fee: '0.0005',
# created_at: '1614718276',
# updated_at: '1614720926',
# payment_id: '',
# transaction_hash: 'xxxxxxxx...',
# broadcasted_at: '1614720762',
# wallet_label: 'btc',
# chain_name: 'Bitcoin',
# network: null
# },
#
# fetchDeposits
#
# ...
#
id = self.safe_string(transaction, 'id')
address = self.safe_string(transaction, 'address')
tag = self.safe_string_2(transaction, 'payment_id', 'memo_value')
txid = self.safe_string(transaction, 'transaction_hash')
currencyId = self.safe_string_2(transaction, 'currency', 'asset')
code = self.safe_currency_code(currencyId, currency)
timestamp = self.safe_timestamp(transaction, 'created_at')
updated = self.safe_timestamp(transaction, 'updated_at')
type = 'withdrawal'
status = self.parse_transaction_status(self.safe_string(transaction, 'state'))
amountString = self.safe_string(transaction, 'amount')
feeCostString = self.safe_string(transaction, 'withdrawal_fee')
amount = self.parse_number(Precise.string_sub(amountString, feeCostString))
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'address': address,
'tag': tag,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': updated,
'fee': {
'currency': code,
'cost': self.parse_number(feeCostString),
},
}
def nonce(self):
return self.milliseconds()
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
headers = {
'X-Quoine-API-Version': self.version,
'Content-Type': 'application/json',
}
if api == 'private':
self.check_required_credentials()
if method == 'GET':
if query:
url += '?' + self.urlencode(query)
elif query:
body = self.json(query)
nonce = self.nonce()
request = {
'path': url,
'token_id': self.apiKey,
'iat': int(math.floor(nonce / 1000)), # issued at
}
if not ('client_order_id' in query):
request['nonce'] = nonce
headers['X-Quoine-Auth'] = self.jwt(request, self.encode(self.secret))
else:
if query:
url += '?' + self.urlencode(query)
url = self.urls['api'] + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if code >= 200 and code < 300:
return
if code == 401:
# expected non-json response
self.throw_exactly_matched_exception(self.exceptions, body, body)
return
if code == 429:
raise DDoSProtection(self.id + ' ' + body)
if response is None:
return
feedback = self.id + ' ' + body
message = self.safe_string(response, 'message')
errors = self.safe_value(response, 'errors')
if message is not None:
#
# {"message": "Order not found"}
#
self.throw_exactly_matched_exception(self.exceptions, message, feedback)
elif errors is not None:
#
# {"errors": {"user": ["not_enough_free_balance"]}}
# {"errors": {"quantity": ["less_than_order_size"]}}
# {"errors": {"order": ["Can not update partially filled order"]}}
#
types = list(errors.keys())
for i in range(0, len(types)):
type = types[i]
errorMessages = errors[type]
for j in range(0, len(errorMessages)):
message = errorMessages[j]
self.throw_exactly_matched_exception(self.exceptions, message, feedback)
else:
raise ExchangeError(feedback)
| 41.135823
| 143
| 0.459367
|
4a120f7da2f1d6ee270a44ffa009bb7e5f2605ba
| 86,976
|
py
|
Python
|
sphinxcontrib/confluencebuilder/translator/storage.py
|
sphinx-contrib/confluencebuilder
|
ef93320ead496a4e55458dd46a2a23669f62f17a
|
[
"BSD-2-Clause"
] | 158
|
2019-03-18T13:42:40.000Z
|
2022-03-25T09:46:59.000Z
|
sphinxcontrib/confluencebuilder/translator/storage.py
|
sphinx-contrib/confluencebuilder
|
ef93320ead496a4e55458dd46a2a23669f62f17a
|
[
"BSD-2-Clause"
] | 192
|
2019-03-15T14:12:25.000Z
|
2022-03-27T18:35:48.000Z
|
sphinxcontrib/confluencebuilder/translator/storage.py
|
sphinx-contrib/confluencebuilder
|
ef93320ead496a4e55458dd46a2a23669f62f17a
|
[
"BSD-2-Clause"
] | 54
|
2019-03-22T14:14:31.000Z
|
2022-03-08T06:54:28.000Z
|
# -*- coding: utf-8 -*-
"""
:copyright: Copyright 2016-2020 Sphinx Confluence Builder Contributors (AUTHORS)
:copyright: Copyright 2018-2020 by the Sphinx team (sphinx-doc/sphinx#AUTHORS)
:license: BSD-2-Clause (LICENSE)
"""
from __future__ import unicode_literals
from docutils import nodes
from os import path
from sphinx import addnodes
from sphinx.locale import _
from sphinx.locale import admonitionlabels
from sphinx.util.images import get_image_size
from sphinxcontrib.confluencebuilder.exceptions import ConfluenceError
from sphinxcontrib.confluencebuilder.logger import ConfluenceLogger
from sphinxcontrib.confluencebuilder.state import ConfluenceState
from sphinxcontrib.confluencebuilder.std.confluence import FALLBACK_HIGHLIGHT_STYLE
from sphinxcontrib.confluencebuilder.std.confluence import FCMMO
from sphinxcontrib.confluencebuilder.std.confluence import INDENT
from sphinxcontrib.confluencebuilder.std.confluence import LITERAL2LANG_MAP
from sphinxcontrib.confluencebuilder.std.sphinx import DEFAULT_HIGHLIGHT_STYLE
from sphinxcontrib.confluencebuilder.translator import ConfluenceBaseTranslator
from sphinxcontrib.confluencebuilder.util import first
import math
import posixpath
import sys
try:
unicode
except NameError:
unicode = str
class ConfluenceStorageFormatTranslator(ConfluenceBaseTranslator):
_tracked_unknown_code_lang = []
"""
confluence storage format extension translator
A storage format-specific translator instance for the Confluence extension
for Sphinx.
Args:
document: the document being translated
builder: the sphinx builder instance
"""
def __init__(self, document, builder):
ConfluenceBaseTranslator.__init__(self, document, builder)
config = builder.config
self.add_secnumbers = config.confluence_add_secnumbers
self.numfig = config.numfig
self.numfig_format = config.numfig_format
self.secnumber_suffix = config.confluence_secnumber_suffix
self.todo_include_todos = getattr(config, 'todo_include_todos', None)
self._building_footnotes = False
self._figure_context = []
self._manpage_url = getattr(config, 'manpages_url', None)
self._reference_context = []
self._thead_context = []
self.colspecs = []
self._tocdepth = ConfluenceState.toctreeDepth(self.docname)
# helpers for dealing with disabled/unsupported features
restricted = config.confluence_adv_restricted
self.can_admonition = 'info' not in restricted
self.can_anchor = 'anchor' not in restricted
self.can_children = 'children' not in restricted
self.can_code = 'code' not in restricted
self.can_expand = 'expand' not in restricted
self.can_jira = 'jira' not in restricted
self.can_viewfile = 'viewfile' not in restricted
if (config.confluence_page_hierarchy
and config.confluence_adv_hierarchy_child_macro
and self.can_children):
self.apply_hierarchy_children_macro = True
else:
self.apply_hierarchy_children_macro = False
def encode(self, text):
text = self._encode_sf(text)
return ConfluenceBaseTranslator.encode(self, text)
# ---------
# structure
# ---------
def get_secnumber(self, node):
if node.get('secnumber'):
return node['secnumber']
if isinstance(node.parent, nodes.section):
if self.builder.name == 'singleconfluence':
docname = self._docnames[-1]
raw_anchor = node.parent['ids'][0]
anchorname = '%s/#%s' % (docname, node.parent['ids'][0])
if anchorname not in self.builder.secnumbers:
anchorname = '%s/' % raw_anchor
else:
anchorname = '#' + node.parent['ids'][0]
if anchorname not in self.builder.secnumbers:
anchorname = ''
if self.builder.secnumbers.get(anchorname):
return self.builder.secnumbers[anchorname]
return None
def add_secnumber(self, node):
if not self.add_secnumbers:
return
secnumber = self.get_secnumber(node)
if secnumber:
self.body.append('.'.join(map(str, secnumber)) +
self.secnumber_suffix)
def add_fignumber(self, node):
if not self.numfig:
return
def append_fignumber(figtype, figure_id):
if self.builder.name == 'singleconfluence':
key = '%s/%s' % (self._docnames[-1], figtype)
else:
key = figtype
if figure_id in self.builder.fignumbers.get(key, {}):
prefix = self.numfig_format.get(figtype)
if prefix:
numbers = self.builder.fignumbers[key][figure_id]
self.body.append(prefix % '.'.join(map(str, numbers)) + ' ')
figtype = self.builder.env.domains['std'].get_enumerable_node_type(node)
if figtype:
if len(node['ids']) > 0:
append_fignumber(figtype, node['ids'][0])
def visit_start_of_file(self, node):
ConfluenceBaseTranslator.visit_start_of_file(self, node)
# ensure document target exists for singleconfluence
#
# When references to individual documents are built, they will use the
# target mapping which should (in theory) be the section title generated
# for the specific document. In the event that a page does not have a
# title, there will be no target to map to. The fallback for these
# references is to just link to the anchor point on a page matching the
# target document's docname value. If it is detected that there is no
# target registered for a given document (since it's titleless), build
# an anchor point with the name matching the title (which allows the
# fallback link to jump to the desired point in a document).
if self.builder.name == 'singleconfluence' and self.can_anchor:
doc_anchorname = '%s/' % node['docname']
doc_target = ConfluenceState.target(doc_anchorname)
if not doc_target:
doc_id = node['docname']
self.body.append(self._start_ac_macro(node, 'anchor'))
self.body.append(self._build_ac_parameter(node, '', doc_id))
self.body.append(self._end_ac_macro(node))
def visit_title(self, node):
if isinstance(node.parent, (nodes.section, nodes.topic)):
self.body.append(
self._start_tag(node, 'h{}'.format(self._title_level)))
self.add_secnumber(node)
self.add_fignumber(node.parent)
self.context.append(self._end_tag(node))
# if title points to a section and does not already contain a
# reference, create a link to it
if ('refid' in node and not node.next_node(nodes.reference) and
self.can_anchor):
anchor_value = ''.join(node['refid'].split())
self.body.append(self._start_ac_link(node, anchor_value))
self.context.append(self._end_ac_link(node))
self.body.append(self._start_ac_link_body(node))
self.context.append(self._end_ac_link_body(node))
elif (isinstance(node.parent, addnodes.compact_paragraph) and
node.parent.get('toctree')):
self.visit_caption(node)
else:
# Only render section/topic titles in headers. For all other nodes,
# they must explicitly manage their own title entries.
raise nodes.SkipNode
def depart_title(self, node):
if isinstance(node.parent, (nodes.section, nodes.topic)):
if ('refid' in node and not node.next_node(nodes.reference) and
self.can_anchor):
self.body.append(self.context.pop()) # ac_link_body
self.body.append(self.context.pop()) # end_ac_link
self.body.append(self.context.pop()) # h<x>
elif (isinstance(node.parent, addnodes.compact_paragraph) and
node.parent.get('toctree')):
self.depart_caption(node)
def visit_paragraph(self, node):
attribs = {}
# MyST-Parser will inject text-align hints in the node's classes
# attribute; if set, attempt to apply the style
if isinstance(node.parent, nodes.entry):
for class_ in node.parent.get('classes', []):
if class_.startswith('text-align:'):
attribs['style'] = self._encode_sf(class_)
break
self.body.append(self._start_tag(node, 'p', **attribs))
self.context.append(self._end_tag(node))
def depart_paragraph(self, node):
self.body.append(self.context.pop()) # p
def visit_transition(self, node):
self.body.append(self._start_tag(
node, 'hr', suffix=self.nl, empty=True))
raise nodes.SkipNode
# ----------------------
# body elements -- lists
# ----------------------
def _apply_leading_list_item_offets(self, node, attribs):
# Confluence's provided styles remove first-child elements leading
# margins. This causes some unexpected styling issues when list entries
# which contain other block elements do not style appropriately. This
# extensions attempts to maintain compact list item entries; however,
# for a list which contains non-compact entries (e.g. multiple
# paragraphs), instead, each list item will be applied a respective
# margin offset.
#
# Previously, a pattern such as the following would occur:
#
# - line
# (spacing)
# line
# - line
# - line
# (spacing)
# line
# - line
# (spacing)
# line
#
# To prevent this from happening, a margin applied to non-compact
# entries will render as:
#
# - line
# (spacing)
# line
# (spacing) <-- spacing between complex list item
# - line <-- no spacing for compact list (desired)
# - line
# (spacing)
# line
# (spacing) <-- spacing between complex list item
# - line
# (spacing)
# line
#
# If any item in this list contains two or more children (with the
# exception of a "paragraph" + list pair), consider the entire list a
# complex one and flag each list item to include a margin.
has_complex = False
for child in node.children: # list items
if len(child.children) > 2 or (len(child.children) == 2
and not isinstance(child.children[1],
(nodes.bullet_list, nodes.enumerated_list))):
has_complex = True
break
if has_complex:
for child in node.children:
child.__confluence_list_item_margin = True
# If this list is nested inside a complex list, ensure this list starts
# off with a margin (to offset it's position inside the complex list).
if isinstance(node.parent, nodes.list_item):
try:
if node.parent.__confluence_list_item_margin:
attribs['style'] = 'margin-top: {}px;'.format(FCMMO)
except AttributeError:
pass
def visit_bullet_list(self, node):
attribs = {}
self._apply_leading_list_item_offets(node, attribs)
self.body.append(self._start_tag(node, 'ul', suffix=self.nl, **attribs))
self.context.append(self._end_tag(node))
def depart_bullet_list(self, node):
self.body.append(self.context.pop()) # ul
def visit_enumerated_list(self, node):
attribs = {}
self._apply_leading_list_item_offets(node, attribs)
# note: - Not all Confluence versions (if any) support populating the
# 'type' attribute of an ordered list tag; however, the 'style'
# attribute is accepted.
# - Not all Confluence versions (if any) support populating the
# 'start' attribute of an ordered list tag; limiting to
# auto-enumeration items only.
list_style_type = None
if 'enumtype' in node:
if node['enumtype'] == 'upperalpha':
list_style_type = 'upper-alpha'
elif node['enumtype'] == 'loweralpha':
list_style_type = 'lower-alpha'
elif node['enumtype'] == 'upperroman':
list_style_type = 'upper-roman'
elif node['enumtype'] == 'lowerroman':
list_style_type = 'lower-roman'
elif node['enumtype'] == 'arabic':
list_style_type = 'decimal'
else:
self.warn(
'unknown enumerated list type: {}'.format(node['enumtype']))
if list_style_type:
if 'style' not in attribs:
attribs['style'] = ''
attribs['style'] = '{}list-style-type: {};'.format(
attribs['style'], list_style_type)
self.body.append(self._start_tag(node, 'ol', suffix=self.nl, **attribs))
self.context.append(self._end_tag(node))
def depart_enumerated_list(self, node):
self.body.append(self.context.pop()) # ol
def visit_list_item(self, node):
# apply margin offset if flagged (see _apply_leading_list_item_offets)
attribs = {}
try:
if node.__confluence_list_item_margin:
attribs['style'] = 'margin-top: {}px;'.format(FCMMO)
except AttributeError:
pass
self.body.append(self._start_tag(node, 'li', suffix=self.nl, **attribs))
self.context.append(self._end_tag(node))
def depart_list_item(self, node):
self.body.append(self.context.pop()) # li
# ---------------------------------
# body elements -- definition lists
# ---------------------------------
def visit_definition_list(self, node):
self.body.append(self._start_tag(node, 'dl', suffix=self.nl))
self.context.append(self._end_tag(node))
def depart_definition_list(self, node):
self.body.append(self.context.pop()) # dl
def visit_definition_list_item(self, node):
# When processing a definition list item (an entry), multiple terms may
# exist for the given entry (e.g. when using a glossary). Before
# displaying an actual definition of one or more terms, there may exist
# classifiers for a given entry. On the last term for an entry, all
# classifier information will be displayed in the definition-type. In
# order to achieve this, a list entry will be tracked to see if a term
# has been processed for an entry. If a new term is detected, the
# previous term's tag will be closed off. On the final term, the tag is
# not closed off until the definition (visit_definition) is processed.
# This allows classifier information to be populated into the last term
# element.
self._has_term = False
def depart_definition_list_item(self, node):
self._has_term = False
def visit_term(self, node):
# close of previous term (see visit_definition_list_item)
if self._has_term:
self.body.append(self.context.pop()) # dt
if 'ids' in node and self.can_anchor:
for id in node['ids']:
self.body.append(self._start_ac_macro(node, 'anchor'))
self.body.append(self._build_ac_parameter(node, '', id))
self.body.append(self._end_ac_macro(node))
self.body.append(self._start_tag(node, 'dt'))
self.context.append(self._end_tag(node))
self._has_term = True
def depart_term(self, node):
# note: Do not pop the context populated from 'visit_term'. The last
# entry may need to hold classifier information inside it. Either
# next term or a term's definition will pop the context.
pass
def visit_classifier(self, node):
self.body.append(' : ')
self.body.append(self._start_tag(node, 'em'))
self.context.append(self._end_tag(node, suffix=''))
def depart_classifier(self, node):
self.body.append(self.context.pop()) # em
def visit_definition(self, node):
if self._has_term:
self.body.append(self.context.pop()) # dt
self._has_term = False
self.body.append(self._start_tag(node, 'dd', suffix=self.nl))
self.context.append(self._end_tag(node))
def depart_definition(self, node):
self.body.append(self.context.pop()) # dd
def visit_termsep(self, node):
raise nodes.SkipNode
# ----------------------------
# body elements -- field lists
# ----------------------------
def visit_field_list(self, node):
self.body.append(self._start_tag(node, 'table', suffix=self.nl))
self.context.append(self._end_tag(node))
self.body.append(self._start_tag(node, 'tbody', suffix=self.nl))
self.context.append(self._end_tag(node))
def depart_field_list(self, node):
self.body.append(self.context.pop()) # tbody
self.body.append(self.context.pop()) # table
def visit_field(self, node):
self.body.append(self._start_tag(node, 'tr', suffix=self.nl))
self.context.append(self._end_tag(node))
def depart_field(self, node):
self.body.append(self.context.pop()) # tr
def visit_field_name(self, node):
self.body.append(self._start_tag(node, 'td',
**{'style': 'border: none'}))
self.context.append(self._end_tag(node))
self.body.append(self._start_tag(node, 'strong'))
self.context.append(self._end_tag(node, suffix=''))
def depart_field_name(self, node):
self.body.append(':')
self.body.append(self.context.pop()) # strong
self.body.append(self.context.pop()) # td
def visit_field_body(self, node):
self.body.append(self._start_tag(node, 'td',
**{'style': 'border: none'}))
self.context.append(self._end_tag(node))
def depart_field_body(self, node):
self.body.append(self.context.pop()) # td
# -----------------------------
# body elements -- option lists
# -----------------------------
def visit_option_list(self, node):
self.body.append(self._start_tag(node, 'table', suffix=self.nl))
self.context.append(self._end_tag(node))
self.body.append(self._start_tag(node, 'tbody', suffix=self.nl))
self.context.append(self._end_tag(node))
def depart_option_list(self, node):
self.body.append(self.context.pop()) # tbody
self.body.append(self.context.pop()) # table
def visit_option_list_item(self, node):
self.body.append(self._start_tag(node, 'tr', suffix=self.nl))
self.context.append(self._end_tag(node))
def depart_option_list_item(self, node):
self.body.append(self.context.pop()) # tr
def visit_option_group(self, node):
self._first_option = True
self.body.append(self._start_tag(node, 'td',
**{'style': 'border: none'}))
self.context.append(self._end_tag(node))
self.body.append(self._start_tag(node, 'code'))
self.context.append(self._end_tag(node, suffix=''))
def depart_option_group(self, node):
self.body.append(self.context.pop()) # code
self.body.append(self.context.pop()) # td
def visit_option(self, node):
if self._first_option:
self._first_option = False
else:
self.body.append(', ')
def depart_option(self, node):
pass
def visit_option_string(self, node):
pass
def depart_option_string(self, node):
pass
def visit_option_argument(self, node):
self.body.append(node['delimiter'])
self.body.append(self._start_tag(node, 'em'))
self.context.append(self._end_tag(node, suffix=''))
def depart_option_argument(self, node):
self.body.append(self.context.pop()) # em
def visit_description(self, node):
self.body.append(self._start_tag(node, 'td',
**{'style': 'border: none'}))
self.context.append(self._end_tag(node))
def depart_description(self, node):
self.body.append(self.context.pop()) # td
# -------------------------------
# body elements -- literal blocks
# -------------------------------
def visit_literal_block(self, node):
lang = None
# non-raw literal
if node.rawsource != node.astext():
# include marked with a literal flag
if 'source' in node:
lang = 'none'
# parsed literal
else:
self._literal = True
self.body.append(self._start_tag(node, 'div', suffix=self.nl,
**{'class': 'panel pdl'}))
self.context.append(self._end_tag(node))
self.body.append(self._start_tag(node, 'pre',
**{'class': 'panelContent'}))
self.context.append(self._end_tag(node))
self.body.append(self._start_tag(node, 'code'))
self.context.append(self._end_tag(node))
return
if not lang:
lang = node.get('language', self._highlight).lower()
if self.builder.lang_transform:
lang = self.builder.lang_transform(lang)
elif lang in LITERAL2LANG_MAP.keys():
lang = LITERAL2LANG_MAP[lang]
else:
if lang not in self._tracked_unknown_code_lang:
self.warn('unknown code language: {}'.format(lang))
self._tracked_unknown_code_lang.append(lang)
lang = LITERAL2LANG_MAP[FALLBACK_HIGHLIGHT_STYLE]
data = self.nl.join(node.astext().splitlines())
title = node.get('scb-caption', None)
if title:
title = self._encode_sf(title)
if node.get('linenos', False):
num = 'true'
elif data.count('\n') >= self._linenothreshold:
num = 'true'
else:
num = 'false'
firstline = None
if num == 'true':
try:
firstline = node.attributes['highlight_args']['linenostart']
except KeyError:
pass
if self.can_code:
self.body.append(self._start_ac_macro(node, 'code'))
self.body.append(self._build_ac_parameter(node, 'language', lang))
self.body.append(self._build_ac_parameter(node, 'linenumbers', num))
if firstline is not None and firstline > 1:
self.body.append(
self._build_ac_parameter(node, 'firstline', str(firstline))
)
if title:
self.body.append(self._build_ac_parameter(node, 'title', title))
self.body.append(self._start_ac_plain_text_body_macro(node))
self.body.append(self._escape_cdata(data))
self.body.append(self._end_ac_plain_text_body_macro(node))
self.body.append(self._end_ac_macro(node))
else:
self.body.append(self._start_tag(
node, 'hr', suffix=self.nl, empty=True))
self.body.append(self._start_tag(node, 'pre'))
self.body.append(self._encode_sf(data))
self.body.append(self._end_tag(node))
self.body.append(self._start_tag(
node, 'hr', suffix=self.nl, empty=True))
raise nodes.SkipNode
def depart_literal_block(self, node):
self._literal = False
# note: depart is only invoked for parsed-literals
self.body.append(self.context.pop()) # code
self.body.append(self.context.pop()) # pre
self.body.append(self.context.pop()) # div
def visit_highlightlang(self, node):
self._highlight = node.get('lang', DEFAULT_HIGHLIGHT_STYLE)
self._linenothreshold = node.get('linenothreshold', sys.maxsize)
raise nodes.SkipNode
def visit_doctest_block(self, node):
data = self.nl.join(node.astext().splitlines())
if self.can_code:
self.body.append(self._start_ac_macro(node, 'code'))
self.body.append(self._build_ac_parameter(
node, 'language', 'python')) # python-specific
self.body.append(self._start_ac_plain_text_body_macro(node))
self.body.append(self._escape_cdata(data))
self.body.append(self._end_ac_plain_text_body_macro(node))
self.body.append(self._end_ac_macro(node))
else:
self.body.append(self._start_tag(
node, 'hr', suffix=self.nl, empty=True))
self.body.append(self._start_tag(node, 'pre'))
self.body.append(self._encode_sf(data))
self.body.append(self._end_tag(node))
self.body.append(self._start_tag(
node, 'hr', suffix=self.nl, empty=True))
raise nodes.SkipNode
# -----------------------------
# body elements -- block quotes
# -----------------------------
def visit_block_quote(self, node):
if node.traverse(nodes.attribution):
self.body.append(self._start_tag(node, 'blockquote'))
self.context.append(self._end_tag(node))
else:
style = ''
# Confluece's WYSIWYG, when indenting paragraphs, will produce
# paragraphs will margin values offset by 30 pixels units. The same
# indentation is applied here via a style value.
style += 'margin-left: {}px;'.format(INDENT)
# Confluence's provided styles remove first-child elements leading
# margins. This causes some unexpected styling issues when various
# indentation patterns are applied (between div elements and
# multiple paragraphs). To overcome this, the indent container being
# added will be given a top-padding-offset matching Confluence's
# common non-first-child element top-margins (i.e. 10 pixels).
#
# Note that this offset does not style well when multiple
# indentations are observed; sub-level containers can result in
# stacked padding (not desired). For example:
#
# first-line
# (10px of padding)
# (10px of padding)
# first-line
# first-line
#
# To prevent this from happening, if the next child container is
# another block quote, no padding is added:
#
# first-line
# (10px of padding)
# first-line
# first-line
#
# Ideally, a padding-offset is not desired (as it may required
# tweaking if Confluence's themes change); however, the quirk works
# for now.
firstchild_margin = True
next_child = first(node.traverse(include_self=False))
if isinstance(next_child, nodes.block_quote):
firstchild_margin = False
if firstchild_margin:
style += 'padding-top: {}px;'.format(FCMMO)
self.body.append(self._start_tag(node, 'div', suffix=self.nl,
**{'style': style}))
self.context.append(self._end_tag(node))
def depart_block_quote(self, node):
self.body.append(self.context.pop()) # blockquote/div
def visit_attribution(self, node):
self.body.append('-- ')
def depart_attribution(self, node):
pass
# -----------
# admonitions
# -----------
def _visit_admonition(self, node, atype, title=None, logo=True):
if self.can_admonition:
self.body.append(self._start_ac_macro(node, atype))
if title:
self.body.append(self._build_ac_parameter(node, 'title', title))
if not logo:
self.body.append(
self._build_ac_parameter(node, 'icon', 'false'))
self.body.append(self._start_ac_rich_text_body_macro(node))
self.context.append(self._end_ac_rich_text_body_macro(node) +
self._end_ac_macro(node))
else:
self.body.append(self._start_tag(node, 'blockquote'))
self.context.append(self._end_tag(node))
def _depart_admonition(self, node):
self.body.append(self.context.pop()) # macro (or blockquote)
def _visit_info(self, node):
self._visit_admonition(node, 'info')
def _visit_note(self, node):
self._visit_admonition(node, 'note')
def _visit_tip(self, node):
self._visit_admonition(node, 'tip')
def _visit_todo_node(self, node):
if not self.todo_include_todos:
raise nodes.SkipNode
if 'ids' in node and node['ids'] and self.can_anchor:
self.body.append(self._start_ac_macro(node, 'anchor'))
self.body.append(self._build_ac_parameter(node, '', node['ids'][0]))
self.body.append(self._end_ac_macro(node))
self._visit_admonition(node, 'info', title=_('Todo'))
def _visit_warning(self, node):
self._visit_admonition(node, 'warning')
def visit_admonition(self, node):
title_node = first(node.traverse(nodes.title))
if title_node:
title = title_node.astext()
self._visit_admonition(node, 'info', title, logo=False)
else:
self._visit_admonition(node, 'info', logo=False)
depart_admonition = _depart_admonition
visit_attention = _visit_note
depart_attention = _depart_admonition
visit_caution = _visit_note
depart_caution = _depart_admonition
visit_danger = _visit_warning
depart_danger = _depart_admonition
visit_error = _visit_warning
depart_error = _depart_admonition
visit_hint = _visit_tip
depart_hint = _depart_admonition
visit_important = _visit_warning
depart_important = _depart_admonition
visit_note = _visit_info
depart_note = _depart_admonition
visit_tip = _visit_tip
depart_tip = _depart_admonition
visit_todo_node = _visit_todo_node
depart_todo_node = _depart_admonition
visit_warning = _visit_warning
depart_warning = _depart_admonition
# ------
# tables
# ------
def visit_table(self, node):
title_node = first(node.traverse(nodes.title))
if title_node:
self.body.append(self._start_tag(node, 'p'))
self.body.append(self._start_tag(node, 'strong'))
self.body.append(self._encode_sf(title_node.astext()))
self.body.append(self._end_tag(node))
self.body.append(self._end_tag(node))
self.body.append(self._start_tag(node, 'table', suffix=self.nl))
self.context.append(self._end_tag(node))
# track the thead context
#
# When writing a table cell (visit_entry), it needs to be known if the
# cell is in the header (th) or is a data cell (td). A "thead context"
# keeps track of whether or not an cell/entry being written is of the
# proper type. A context list is needed to support nested tables.
self._thead_context.append(False)
def depart_table(self, node):
self.body.append(self.context.pop()) # table
self._thead_context.pop()
def visit_tgroup(self, node):
node.stubs = []
# if column widths are explicitly given, apply specific column widths
table_classes = node.parent.get('classes', [])
if 'colwidths-given' in table_classes:
has_colspec = False
for colspec in node.traverse(nodes.colspec):
if not has_colspec:
self.body.append(self._start_tag(node, 'colgroup'))
has_colspec = True
self.body.append(self._start_tag(node, 'col', empty=True,
**{'style': 'width: {}%'.format(colspec['colwidth'])}))
if has_colspec:
self.body.append(self._end_tag(node))
def depart_tgroup(self, node):
pass
def visit_thead(self, node):
self._thead_context.append(True) # thead context (see visit_table)
self.body.append(self._start_tag(node, 'thead', suffix=self.nl))
self.context.append(self._end_tag(node))
def depart_thead(self, node):
self.body.append(self.context.pop()) # thead context (see visit_table)
self._thead_context.pop()
def visit_tbody(self, node):
self.body.append(self._start_tag(node, 'tbody', suffix=self.nl))
self.context.append(self._end_tag(node))
def depart_tbody(self, node):
self.body.append(self.context.pop()) # tbody
def visit_row(self, node):
node.column = 0
self.body.append(self._start_tag(node, 'tr', suffix=self.nl))
self.context.append(self._end_tag(node))
def depart_row(self, node):
self.body.append(self.context.pop()) # tr
def visit_entry(self, node):
if self._thead_context[-1]:
target_tag = 'th'
elif node.parent.parent.parent.stubs[node.parent.column]:
target_tag = 'th'
else:
target_tag = 'td'
node.parent.column += 1
attribs = {}
if 'morecols' in node:
attribs['colspan'] = node['morecols'] + 1
if 'morerows' in node:
attribs['rowspan'] = node['morerows'] + 1
self.body.append(self._start_tag(node, target_tag, **attribs))
self.context.append(self._end_tag(node))
def depart_entry(self, node):
self.body.append(self.context.pop()) # td/th
def visit_tabular_col_spec(self, node):
raise nodes.SkipNode
def visit_colspec(self, node):
self.colspecs.append(node)
node.parent.stubs.append(node.attributes.get('stub'))
def depart_colspec(self, node):
pass
# -------------------
# references - common
# -------------------
def visit_reference(self, node):
# ignore reference if it is wrapped by another reference; observed
# when a local table of contents contains a section name which is a
# reference to another document
if self._reference_context:
ConfluenceLogger.verbose('skipping nested reference container')
return
if 'iscurrent' in node:
pass
elif 'top-reference' in node:
self._visit_reference_top(node)
elif 'refuri' in node:
# If a document provides an anchor target directly in the reference,
# attempt to extract the anchor value and pass it into the internal
# reference processing instead.
if node['refuri'].startswith('#'):
node['refid'] = node['refuri'][1:]
del node['refuri']
self._visit_reference_intern_id(node)
elif 'refdocname' in node or (
'internal' in node and node['internal']):
self._visit_reference_intern_uri(node)
else:
self._visit_reference_extern(node)
elif 'refid' in node:
self._visit_reference_intern_id(node)
def _visit_reference_extern(self, node):
uri = node['refuri']
uri = self._encode_sf(uri)
attribs = {}
attribs['href'] = uri
if 'reftitle' in node:
title = node['reftitle']
title = self._encode_sf(title)
attribs['title'] = title
self.body.append(self._start_tag(node, 'a', **attribs))
self._reference_context.append(self._end_tag(node, suffix=''))
def _visit_reference_intern_id(self, node):
raw_anchor = ''.join(node['refid'].split())
if self.builder.name == 'singleconfluence':
docname = self._docnames[-1]
anchorname = '%s/#%s' % (docname, raw_anchor)
if anchorname not in self.builder.secnumbers:
anchorname = '%s/' % raw_anchor
else:
anchorname = '{}#{}'.format(self.docname, raw_anchor)
# check if this target is reachable without an anchor; if so, use the
# identifier value instead
target = ConfluenceState.target(anchorname)
if target:
anchor_value = target
anchor_value = self._encode_sf(anchor_value)
elif not self.can_anchor:
anchor_value = None
else:
anchor_value = raw_anchor
is_citation = ('ids' in node and node['ids']
and 'internal' in node and node['internal'])
if (self.can_anchor and anchor_value and (is_citation or self._topic)
and 'ids' in node):
for id in node['ids']:
self.body.append(self._start_ac_macro(node, 'anchor'))
self.body.append(self._build_ac_parameter(node, '', id))
self.body.append(self._end_ac_macro(node))
if is_citation:
self.body.append(self._start_tag(node, 'sup'))
if anchor_value:
# build link to internal anchor (on the same page)
# Note: plain-text-link body cannot have inline markup; content
# will be added into body already and skip-children should be
# invoked for this use case.
self.body.append(self._start_ac_link(node, anchor_value))
self.body.append(self._start_ac_link_body(node))
self._reference_context.append(self._end_ac_link_body(node))
self._reference_context.append(self._end_ac_link(node))
if is_citation:
self._reference_context.append(self._end_tag(node, suffix='')) # sup
def _visit_reference_intern_uri(self, node):
docname = posixpath.normpath(
self.docparent + path.splitext(node['refuri'].split('#')[0])[0])
doctitle = ConfluenceState.title(docname)
if not doctitle:
self.warn('unable to build link to document due to '
'missing title (in {}): {}'.format(self.docname, docname))
# build a broken link
self.body.append(self._start_tag(node, 'a', **{'href': '#'}))
self._reference_context.append(self._end_tag(node, suffix=''))
return
anchor_value = None
if '#' in node['refuri']:
anchor = node['refuri'].split('#')[1]
target_name = '{}#{}'.format(docname, anchor)
# check if this target is reachable without an anchor; if so, use
# the identifier value instead
target = ConfluenceState.target(target_name)
if target:
anchor_value = target
anchor_value = self._encode_sf(anchor_value)
elif self.can_anchor:
anchor_value = anchor
navnode = getattr(node, '_navnode', False)
if navnode:
float = 'right' if node._navnode_next else 'left'
self.body.append(self._start_tag(node, 'div',
**{'style': 'float: ' + float + ';'}))
# build link to internal anchor (on another page)
# Note: plain-text-link body cannot have inline markup; add the node
# contents into body and skip processing the rest of this node.
doctitle = self._encode_sf(doctitle)
self.body.append(self._start_ac_link(node, anchor_value))
self.body.append(self._start_tag(node, 'ri:page',
suffix=self.nl, empty=True, **{'ri:content-title': doctitle}))
self.body.append(self._start_ac_link_body(node))
# style navigation references with an aui-button look
if navnode:
self.body.append(self._start_tag(
node, 'span', **{'class': 'aui-button'}))
self._reference_context.append(self._end_tag(node, suffix=''))
if self.add_secnumbers and node.get('secnumber'):
self.body.append('.'.join(map(str, node['secnumber'])) +
self.secnumber_suffix)
self._reference_context.append(self._end_ac_link_body(node))
self._reference_context.append(self._end_ac_link(node))
if navnode:
self._reference_context.append(self._end_tag(node))
def _visit_reference_top(self, node):
self.body.append(self._start_tag(node, 'a', **{'href': '#top'}))
self._reference_context.append(self._end_tag(node, suffix=''))
def depart_reference(self, node):
for element in self._reference_context:
self.body.append(element)
self._reference_context = []
def visit_target(self, node):
if not self.can_anchor:
raise nodes.SkipNode
if 'refid' in node:
anchor = ''.join(node['refid'].split())
# only build an anchor if required (e.g. is a reference label
# already provided by a build section element)
target_name = '{}#{}'.format(self.docname, anchor)
target = ConfluenceState.target(target_name)
if not target:
self.body.append(self._start_ac_macro(node, 'anchor'))
self.body.append(self._build_ac_parameter(node, '', anchor))
self.body.append(self._end_ac_macro(node))
elif 'ids' in node and 'refuri' not in node:
for id in node['ids']:
self.body.append(self._start_ac_macro(node, 'anchor'))
self.body.append(self._build_ac_parameter(node, '', id))
self.body.append(self._end_ac_macro(node))
raise nodes.SkipNode
# --------------------------------
# references - footnotes/citations
# --------------------------------
def visit_footnote(self, node):
label_node = node.next_node()
if not isinstance(label_node, nodes.label):
raise nodes.SkipNode
# if the first foonote/citation, start building a table
if not self._building_footnotes:
self.body.append(self._start_tag(node, 'table', suffix=self.nl))
self.context.append(self._end_tag(node))
self.body.append(self._start_tag(node, 'tbody', suffix=self.nl))
self.context.append(self._end_tag(node))
self._building_footnotes = True
label_text = label_node.astext()
self.body.append(self._start_tag(node, 'tr', suffix=self.nl))
self.context.append(self._end_tag(node))
self.body.append(self._start_tag(node, 'td'))
# footnote anchor
if self.can_anchor:
self.body.append(self._start_ac_macro(node, 'anchor'))
self.body.append(self._build_ac_parameter(node, '', node['ids'][0]))
self.body.append(self._end_ac_macro(node))
# footnote label and back reference(s)
if (not self.can_anchor
or 'backrefs' not in node or not node['backrefs']):
label_text = self._encode_sf(label_text)
self.body.append(label_text)
elif len(node['backrefs']) > 1:
label_text = self._encode_sf(label_text)
self.body.append(label_text)
self.body.append(self._start_tag(node, 'div'))
self.body.append(self._start_tag(node, 'em'))
self.body.append('(')
for idx, backref in enumerate(node['backrefs']):
if idx != 0:
self.body.append(', ')
self.body.append(self._start_ac_link(node, backref))
self.body.append(
self._start_ac_plain_text_link_body_macro(node))
self.body.append(self._escape_cdata(str(idx + 1)))
self.body.append(self._end_ac_plain_text_link_body_macro(node))
self.body.append(self._end_ac_link(node))
self.body.append(')')
self.body.append(self._end_tag(node, suffix='')) # em
self.body.append(self._end_tag(node)) # div
else:
self.body.append(self._start_ac_link(node, node['backrefs'][0]))
self.body.append(self._start_ac_plain_text_link_body_macro(node))
self.body.append(self._escape_cdata(label_text))
self.body.append(self._end_ac_plain_text_link_body_macro(node))
self.body.append(self._end_ac_link(node))
self.body.append(self._end_tag(node))
self.body.append(self._start_tag(node, 'td'))
self.context.append(self._end_tag(node))
def depart_footnote(self, node):
self.body.append(self.context.pop()) # td
self.body.append(self.context.pop()) # tr
# if next entry is not another footnote or citation, close off the table
next_sibling = first(node.traverse(
include_self=False, descend=False, siblings=True))
if not isinstance(next_sibling, (nodes.citation, nodes.footnote)):
self.body.append(self.context.pop()) # tbody
self.body.append(self.context.pop()) # table
self._building_footnotes = False
def visit_footnote_reference(self, node):
text = "[{}]".format(node.astext())
if not self.can_anchor:
self.body.append(self._start_tag(node, 'sup'))
self.body.append(self._encode_sf(text))
self.body.append(self._end_tag(node, suffix='')) # sup
raise nodes.SkipNode
# build an anchor for back reference
self.body.append(self._start_ac_macro(node, 'anchor'))
self.body.append(self._build_ac_parameter(node, '', node['ids'][0]))
self.body.append(self._end_ac_macro(node))
# link to anchor
target_anchor = ''.join(node['refid'].split())
self.body.append(self._start_tag(node, 'sup'))
self.body.append(self._start_ac_link(node, target_anchor))
self.body.append(self._start_ac_plain_text_link_body_macro(node))
self.body.append(self._escape_cdata(text))
self.body.append(self._end_ac_plain_text_link_body_macro(node))
self.body.append(self._end_ac_link(node))
self.body.append(self._end_tag(node, suffix='')) # sup
raise nodes.SkipNode
def visit_label(self, node):
# Label entries are skipped as their context has been already processed
# from within footnote/citation processing (see visit_footnote).
raise nodes.SkipNode
visit_citation = visit_footnote
depart_citation = depart_footnote
# -------------
# inline markup
# -------------
def visit_emphasis(self, node):
self.body.append(self._start_tag(node, 'em'))
self.context.append(self._end_tag(node, suffix=''))
def depart_emphasis(self, node):
self.body.append(self.context.pop()) # em
def visit_literal(self, node):
self.body.append(self._start_tag(node, 'code'))
self.context.append(self._end_tag(node, suffix=''))
def depart_literal(self, node):
self.body.append(self.context.pop()) # code
def visit_strong(self, node):
self.body.append(self._start_tag(node, 'strong'))
self.context.append(self._end_tag(node, suffix=''))
def depart_strong(self, node):
self.body.append(self.context.pop()) # strong
def visit_subscript(self, node):
self.body.append(self._start_tag(node, 'sub'))
self.context.append(self._end_tag(node, suffix=''))
def depart_subscript(self, node):
self.body.append(self.context.pop()) # sub
def visit_superscript(self, node):
self.body.append(self._start_tag(node, 'sup'))
self.context.append(self._end_tag(node, suffix=''))
def depart_superscript(self, node):
self.body.append(self.context.pop()) # sup
def visit_inline(self, node):
has_added = False
classes = node.get('classes', [])
if classes in [['guilabel']]:
self.body.append(self._start_tag(node, 'em'))
has_added = True
elif classes in [['accelerator']]:
self.body.append(self._start_tag(node, 'u'))
has_added = True
elif isinstance(node.parent, addnodes.desc_parameter):
# check if an identifier in signature
if classes in [['n']]:
self.body.append(self._start_tag(node, 'em'))
has_added = True
if has_added:
self.context.append(self._end_tag(node, suffix=''))
else:
# ignoring; no special handling of other inline entries
self.context.append('')
def depart_inline(self, node):
self.body.append(self.context.pop())
visit_literal_emphasis = visit_emphasis
depart_literal_emphasis = depart_emphasis
visit_literal_strong = visit_strong
depart_literal_strong = depart_strong
visit_title_reference = visit_emphasis
depart_title_reference = depart_emphasis
# -------------
# images markup
# -------------
def visit_caption(self, node):
# if a caption for a literal block, pass the caption data to it can be
# rendered in the macro's title field
if self.can_code:
next_sibling = first(node.traverse(
include_self=False, descend=False, siblings=True))
if isinstance(next_sibling, nodes.literal_block):
# anything that is not a parsed literals
if node.rawsource == node.astext() or 'source' in node:
next_sibling['scb-caption'] = node.astext()
raise nodes.SkipNode
attribs = {}
attribs['style'] = 'clear: both;'
self._figure_context.append('')
alignment = self._fetch_alignment(node)
if alignment and alignment != 'left':
attribs['style'] = '{}text-align: {};'.format(
attribs['style'], alignment)
self.body.append(self._start_tag(node, 'p', **attribs))
self.add_fignumber(node.parent)
self.context.append(self._end_tag(node))
def depart_caption(self, node):
self.body.append(self.context.pop()) # p
def visit_figure(self, node):
if self.can_admonition:
self.body.append(self._start_ac_macro(node, 'info'))
self.body.append(self._build_ac_parameter(node, 'icon', 'false'))
self.body.append(self._start_ac_rich_text_body_macro(node))
self.context.append(self._end_ac_rich_text_body_macro(node) +
self._end_ac_macro(node))
else:
self.body.append(self._start_tag(
node, 'hr', suffix=self.nl, empty=True))
self.body.append(self._start_tag(node, 'div'))
self.context.append(self._end_tag(node) + self._start_tag(
node, 'hr', suffix=self.nl, empty=True))
def depart_figure(self, node):
# force clear from a floating confluence image if not handled in caption
if self._figure_context:
self._figure_context.pop()
else:
self.body.append('<div style="clear: both"> </div>\n')
self.body.append(self.context.pop()) # <dynamic>
def visit_image(self, node):
if 'uri' not in node or not node['uri']:
ConfluenceLogger.verbose('skipping image with no uri')
raise nodes.SkipNode
uri = node['uri']
uri = self._encode_sf(uri)
if node.get('from_math') and node.get('math_depth'):
math_depth = node['math_depth']
self.body.append(self._start_tag(node, 'span',
**{'style': 'vertical-align: {}px'.format(-1 * math_depth)}))
self.context.append(self._end_tag(node))
if node.get('from_math') and node.get('number'):
if self.builder.config.math_numfig and self.builder.config.numfig:
figtype = 'displaymath'
if self.builder.name == 'singleconfluence':
key = '%s/%s' % (self._docnames[-1], figtype)
else:
key = figtype
id = node['ids'][0]
number = self.builder.fignumbers.get(key, {}).get(id, ())
number = '.'.join(map(str, number))
else:
number = node['number']
self.body.append(self._start_tag(node, 'div',
**{'style': 'float: right'}))
self.body.append('({})'.format(number))
self.body.append(self._end_tag(node))
attribs = {}
alignment = self._fetch_alignment(node)
if alignment:
attribs['ac:align'] = alignment
if alignment == 'right':
attribs['ac:style'] = 'float: right;'
if 'alt' in node:
alt = node['alt']
alt = self._encode_sf(alt)
attribs['ac:alt'] = alt
if 'scale' in node and 'width' not in node:
fulluri = path.join(self.builder.srcdir, uri)
size = get_image_size(fulluri)
if size is None:
self.warn('could not obtain image size; :scale: option is '
'ignored for {}'.format(fulluri))
else:
scale = node['scale'] / 100.0
node['width'] = str(int(math.ceil(size[0] * scale))) + 'px'
if 'height' in node:
self.warn('height value for image is unsupported in confluence')
if 'width' in node:
width = node['width']
attribs['ac:width'] = width
if not width.endswith('px'):
self.warn('unsupported unit type for confluence: ' + width)
if uri.find('://') != -1 or uri.startswith('data:'):
# an external or embedded image
#
# Note: it would be rare that embedded images will be detected at
# this stage as Sphinx's post-transform processor would
# translate these images into embedded images. Nonetheless an
# embedded image is still stacked into Confluence image
# entity (although, currently, some (if not all) Confluence
# versions do not consider embedded images as valid URI values
# so users might see a "broken images" block).
self.body.append(self._start_ac_image(node, **attribs))
self.body.append(self._start_tag(node, 'ri:url',
suffix=self.nl, empty=True, **{'ri:value': uri}))
self.body.append(self._end_ac_image(node))
else:
asset_docname = None
if self.builder.name == 'singleconfluence':
asset_docname = self._docnames[-1]
image_key, hosting_docname = self.assets.fetch(node,
docname=asset_docname)
if not image_key:
self.warn('unable to find image: ' '{}'.format(node['uri']))
raise nodes.SkipNode
hosting_doctitle = ConfluenceState.title(
hosting_docname, hosting_docname)
hosting_doctitle = self._encode_sf(hosting_doctitle)
self.body.append(self._start_ac_image(node, **attribs))
self.body.append(self._start_ri_attachment(node, image_key))
if hosting_docname != self.docname:
self.body.append(self._start_tag(node, 'ri:page', empty=True,
**{'ri:content-title': hosting_doctitle}))
self.body.append(self._end_ri_attachment(node))
self.body.append(self._end_ac_image(node))
def depart_image(self, node):
if node.get('from_math') and node.get('math_depth'):
self.body.append(self.context.pop()) # span
def visit_legend(self, node):
attribs = {}
alignment = self._fetch_alignment(node)
if alignment and alignment != 'left':
attribs['style'] = 'text-align: {};'.format(alignment)
self.body.append(self._start_tag(node, 'div', **attribs))
self.context.append(self._end_tag(node))
def depart_legend(self, node):
self.body.append(self.context.pop()) # div
# ------------------
# sphinx -- download
# ------------------
def visit_download_reference(self, node):
uri = node['reftarget']
uri = self._encode_sf(uri)
if uri.find('://') != -1:
self.body.append(self._start_tag(node, 'strong'))
self.context.append(self._end_tag(node, suffix=''))
self.body.append(self._start_tag(node, 'a', **{'href': uri}))
self.context.append(self._end_tag(node, suffix=''))
else:
asset_docname = None
if self.builder.name == 'singleconfluence':
asset_docname = self._docnames[-1]
file_key, hosting_docname = self.assets.fetch(node,
docname=asset_docname)
if not file_key:
self.warn('unable to find download: ' '{}'.format(
node['reftarget']))
raise nodes.SkipNode
hosting_doctitle = ConfluenceState.title(hosting_docname)
hosting_doctitle = self._encode_sf(hosting_doctitle)
# If the view-file macro is permitted along with it not being an
# explicitly referenced asset.
if self.can_viewfile and ('refexplicit' not in node or
not node['refexplicit']):
# a 'view-file' macro takes an attachment tag as a body; build
# the tags in an interim list
attachment = []
attachment.append(self._start_ri_attachment(node, file_key))
if hosting_docname != self.docname:
attachment.append(self._start_tag(node, 'ri:page',
empty=True, **{'ri:content-title': hosting_doctitle}))
attachment.append(self._end_ri_attachment(node))
self.body.append(self._start_ac_macro(node, 'view-file'))
self.body.append(self._build_ac_parameter(
node, 'name', ''.join(attachment)))
self.body.append(self._end_ac_macro(node))
else:
self.body.append(self._start_ac_link(node))
self.body.append(self._start_ri_attachment(node, file_key))
if hosting_docname != self.docname:
self.body.append(self._start_tag(node, 'ri:page',
empty=True, **{'ri:content-title': hosting_doctitle}))
self.body.append(self._end_ri_attachment(node))
self.body.append(
self._start_ac_plain_text_link_body_macro(node))
self.body.append(self._escape_cdata(node.astext()))
self.body.append(self._end_ac_plain_text_link_body_macro(node))
self.body.append(self._end_ac_link(node))
raise nodes.SkipNode
def depart_download_reference(self, node):
self.body.append(self.context.pop()) # a
self.body.append(self.context.pop()) # strong
# ---------------
# sphinx -- hlist
# ---------------
def visit_hlist(self, node):
self.body.append(self._start_tag(node, 'table', suffix=self.nl))
self.context.append(self._end_tag(node))
self.body.append(self._start_tag(node, 'tbody', suffix=self.nl,
**{'style': 'border: none'}))
self.context.append(self._end_tag(node))
self.body.append(self._start_tag(node, 'tr', suffix=self.nl))
self.context.append(self._end_tag(node))
def depart_hlist(self, node):
self.body.append(self.context.pop()) # tr
self.body.append(self.context.pop()) # tbody
self.body.append(self.context.pop()) # table
def visit_hlistcol(self, node):
self.body.append(self._start_tag(node, 'td',
**{'style': 'border: none'}))
self.context.append(self._end_tag(node))
def depart_hlistcol(self, node):
self.body.append(self.context.pop()) # td
# -----------------
# sphinx -- manpage
# -----------------
def visit_manpage(self, node):
self.visit_emphasis(node)
if self._manpage_url:
node['refuri'] = self._manpage_url.format(**node.attributes)
self._visit_reference_extern(node)
def depart_manpage(self, node):
if self._manpage_url:
self.depart_reference(node)
self.depart_emphasis(node)
# -------------------------
# sphinx -- production list
# -------------------------
def visit_productionlist(self, node):
max_len = max(len(production['tokenname']) for production in node)
self.body.append(self._start_tag(node, 'pre'))
for production in node:
if production['tokenname']:
formatted_token = production['tokenname'].ljust(max_len)
formatted_token = self._encode_sf(formatted_token)
self.body.append('{} ::='.format(formatted_token))
lastname = production['tokenname']
else:
self.body.append('{} '.format(' ' * len(lastname)))
text = production.astext()
text = self._encode_sf(text)
self.body.append(text + self.nl)
self.body.append(self._end_tag(node))
raise nodes.SkipNode
# -----------------
# sphinx -- toctree
# -----------------
def visit_compound(self, node):
# If this has not been a manipulated toctree (refer to hierarchy mode
# and see builder's process_tree_structure) and the invoker wishes to
# use Confluence children macro instead, swap out of the toctree for the
# macro.
if 'toctree-wrapper' in node['classes']:
if self.apply_hierarchy_children_macro:
self.body.append(self._start_ac_macro(node, 'children'))
if self._tocdepth:
self.body.append(self._build_ac_parameter(
node, 'depth', str(self._tocdepth)))
else:
self.body.append(self._build_ac_parameter(
node, 'all', 'true'))
self.body.append(self._end_ac_macro(node))
raise nodes.SkipNode
def depart_compound(self, node):
pass
# -----------------
# sphinx -- domains
# -----------------
def visit_desc(self, node):
self.body.append(self._start_tag(node, 'dl', suffix=self.nl))
self.context.append(self._end_tag(node))
def depart_desc(self, node):
self.body.append(self.context.pop()) # dl
def visit_desc_signature(self, node):
# capture ids which anchors can be generated and placed into the first
# dt tag (since multiple may be generated)
self._desc_sig_ids = node.attributes.get('ids', [])
self.body.append(self._start_tag(node, 'dt'))
self.context.append(self._end_tag(node))
if not node.get('is_multiline'):
self.visit_desc_signature_line(node)
def depart_desc_signature(self, node):
if not node.get('is_multiline'):
self.depart_desc_signature_line(node)
self.body.append(self.context.pop()) # dt
def visit_desc_signature_line(self, node):
if self._desc_sig_ids and self.can_anchor:
for id in self._desc_sig_ids:
self.body.append(self._start_ac_macro(node, 'anchor'))
self.body.append(self._build_ac_parameter(node, '', id))
self.body.append(self._end_ac_macro(node))
if self._desc_sig_ids is None:
self.body.append(self._start_tag(
node, 'br', suffix=self.nl, empty=True))
self._desc_sig_ids = None
def depart_desc_signature_line(self, node):
pass
def visit_desc_annotation(self, node):
self.body.append(self._start_tag(node, 'em'))
self.context.append(self._end_tag(node, suffix=''))
def depart_desc_annotation(self, node):
self.body.append(self.context.pop()) # em
def visit_desc_addname(self, node):
self.body.append(self._start_tag(node, 'code'))
self.context.append(self._end_tag(node, suffix=''))
def depart_desc_addname(self, node):
self.body.append(self.context.pop()) # code
def visit_desc_name(self, node):
self.body.append(self._start_tag(node, 'strong'))
self.context.append(self._end_tag(node, suffix=''))
self.body.append(self._start_tag(node, 'code'))
self.context.append(self._end_tag(node, suffix=''))
def depart_desc_name(self, node):
self.body.append(self.context.pop()) # code
self.body.append(self.context.pop()) # strong
def visit_desc_type(self, node):
pass
def depart_desc_type(self, node):
pass
def visit_desc_returns(self, node):
self.body.append(' -> ')
def depart_desc_returns(self, node):
pass
def visit_desc_optional(self, node):
self.body.append('[')
def depart_desc_optional(self, node):
self.body.append(']')
def visit_desc_parameterlist(self, node):
self._first_desc_parameter = True
self.body.append('(')
def depart_desc_parameterlist(self, node):
self.body.append(')')
def visit_desc_parameter(self, node):
if self._first_desc_parameter:
self._first_desc_parameter = False
else:
self.body.append(', ')
if not node.get('noemph'):
self.body.append(self._start_tag(node, 'em'))
self.context.append(self._end_tag(node, suffix=''))
def depart_desc_parameter(self, node):
if not node.get('noemph'):
self.body.append(self.context.pop()) # em
def visit_desc_content(self, node):
self.body.append(self._start_tag(node, 'dd'))
self.context.append(self._end_tag(node))
def depart_desc_content(self, node):
self.body.append(self.context.pop()) # dd
# -----------------------
# sphinx -- miscellaneous
# -----------------------
def visit_centered(self, node):
self.body.append(self._start_tag(node, 'h2',
**{'style': 'text-align: center'}))
self.context.append(self._end_tag(node))
self.body.append(self._start_tag(node, 'strong'))
self.context.append(self._end_tag(node, suffix=''))
def depart_centered(self, node):
self.body.append(self.context.pop()) # strong
self.body.append(self.context.pop()) # h2
def visit_rubric(self, node):
self.body.append(self._start_tag(node, 'p',
**{'style': 'font-weight: bold; margin-top: 30px'}))
self.context.append(self._end_tag(node))
def depart_rubric(self, node):
self.body.append(self.context.pop()) # p
def visit_seealso(self, node):
self._visit_admonition(node, 'info', admonitionlabels['seealso'])
depart_seealso = _depart_admonition
def visit_versionmodified(self, node):
if node['type'] == 'deprecated' or node['type'] == 'versionchanged':
self._visit_note(node)
elif node['type'] == 'versionadded':
self._visit_info(node)
else:
self.warn('unsupported version modification type: '
'{}'.format(node['type']))
self._visit_info(node)
depart_versionmodified = _depart_admonition
# -----------------------------------------
# sphinx -- extension -- confluence builder
# -----------------------------------------
def visit_ConfluenceNavigationNode(self, node):
if node.bottom:
self.body.append(self._start_tag(
node, 'hr', suffix=self.nl, empty=True,
**{'style': 'padding-bottom: 10px; margin-top: 30px'}))
def depart_ConfluenceNavigationNode(self, node):
if node.top:
self.body.append(self._start_tag(
node, 'hr', suffix=self.nl, empty=True,
**{'style':
'clear: both; padding-top: 10px; margin-bottom: 30px'}))
else:
self.body.append('<div style="clear: both"> </div>\n')
def visit_confluence_expand(self, node):
if not self.can_expand:
raise nodes.SkipNode
self.body.append(self._start_ac_macro(node, 'expand'))
if 'title' in node:
self.body.append(
self._build_ac_parameter(node, 'title', node['title']))
self.body.append(self._start_ac_rich_text_body_macro(node))
self.context.append(self._end_ac_rich_text_body_macro(node) +
self._end_ac_macro(node))
def depart_confluence_expand(self, node):
self.body.append(self.context.pop()) # macro
# ------------------------------------------
# confluence-builder -- enhancements -- jira
# ------------------------------------------
def _visit_jira_node(self, node):
if not self.can_jira:
raise nodes.SkipNode
self.body.append(self._start_ac_macro(node, 'jira'))
for k, v in sorted(node.params.items()):
self.body.append(self._build_ac_parameter(node, k, str(v)))
self.body.append(self._end_ac_macro(node))
raise nodes.SkipNode
visit_jira = _visit_jira_node
visit_jira_issue = _visit_jira_node
# -------------
# miscellaneous
# -------------
def visit_abbreviation(self, node):
attribs = {}
if 'explanation' in node:
title_value = node['explanation']
title_value = self._encode_sf(title_value)
attribs['title'] = title_value
self.body.append(self._start_tag(node, 'abbr', **attribs))
self.context.append(self._end_tag(node, suffix=''))
def depart_abbreviation(self, node):
self.body.append(self.context.pop()) # abbr
def visit_acronym(self, node):
# Note: docutils indicates this directive is "to be completed"
self.body.append(self._start_tag(node, 'acronym'))
self.context.append(self._end_tag(node, suffix=''))
def depart_acronym(self, node):
self.body.append(self.context.pop()) # acronym
def depart_line(self, node):
next_sibling = first(node.traverse(
include_self=False, descend=False, siblings=True))
if isinstance(next_sibling, nodes.line):
self.body.append('<br />')
def visit_line_block(self, node):
self.body.append(self._start_tag(node, 'p'))
self.context.append(self._end_tag(node))
def depart_line_block(self, node):
self.body.append(self.context.pop()) # p
def visit_raw(self, node):
# providing an advanced option to allow raw html injection in the output
#
# This is not always guaranteed to work; the raw html content may not
# be compatible with Atlassian's storage format. Results may fail to
# publish or contents may be suppressed on the Confluence instance. This
# is provided to help users wanted to somewhat support raw HTML content
# generated from Markdown sources.
if self.builder.config.confluence_adv_permit_raw_html:
if 'html' in node.get('format', '').split():
self.body.append(self.nl.join(node.astext().splitlines()))
raise nodes.SkipNode
if 'confluence_storage' in node.get('format', '').split():
self.body.append(self.nl.join(node.astext().splitlines()))
else:
# support deprecated 'confluence' format for an interim
ConfluenceBaseTranslator.visit_raw(self, node)
raise nodes.SkipNode
# ##########################################################################
# # #
# # helpers #
# # #
# ##########################################################################
def _start_tag(self, node, tag, suffix=None, empty=False, **kwargs):
"""
generates start tag content for a given node
A helper used to return content to be appended to a document which
initializes the start of a storage format element (i.e. generates a
start tag). The element of type `tag` will be initialized. This method
may use provided `node` to tweak the final content.
Args:
node: the node processing the start-tag
tag: the type of tag
suffix (optional): the suffix to add (defaults to nothing)
empty (optional): tag will not hold child nodes (defaults to False)
**kwargs (optional): dictionary of attributes to include in the tag
Returns:
the content
"""
tag = tag.lower()
data = [tag]
attribs = {}
for key, value in kwargs.items():
attribs[key.lower()] = value
for key, value in sorted(attribs.items()):
data.append('{}="{}"'.format(key, value))
if suffix is None:
suffix = ''
suffix = '>' + suffix
if empty:
suffix = ' /' + suffix
else:
try:
node.__confluence_tag.append(tag)
except AttributeError:
node.__confluence_tag = [tag]
return '<{}{}'.format(' '.join(data), suffix)
def _end_tag(self, node, suffix=None):
"""
generates end tag content for a given node
A helper used to return content to be appended to a document which
finalizes a storage format element (i.e. generates an end tag). This
method should* be used to help close a _start_tag call (*with the
exception of when _start_tag is invoked with empty=True).
Args:
node: the node processing the end-tag
suffix (optional): the suffix to add (defaults to newline)
Returns:
the content
"""
try:
tag = node.__confluence_tag.pop()
except IndexError:
raise ConfluenceError('end tag invoke without matching start tag')
if suffix is None:
suffix = self.nl
return '</{}>{}'.format(tag, suffix)
def _build_ac_parameter(self, node, name, value):
"""
generates a confluence parameter element
A helper used to return content to be appended to a document which
builds a complete storage format parameter element. The 'ac:parameter'
element will be built. This method may use provided `node` to tweak the
final content.
Args:
node: the node processing the parameter
name: the parameter name
value: the value for the parameter
Returns:
the content
"""
return (self._start_tag(node, 'ac:parameter', **{'ac:name': name}) +
value + self._end_tag(node))
def _start_ac_image(self, node, **kwargs):
"""
generates a confluence image start tag
A helper used to return content to be appended to a document which
initializes the start of a storage format image element. The 'ac:image'
element will be initialized. This method may use provided `node` to
tweak the final content.
Args:
node: the node processing the image
Returns:
the content
"""
return self._start_tag(node, 'ac:image', suffix=self.nl, **kwargs)
def _end_ac_image(self, node):
"""
generates confluence image end tag content for a node
A helper used to return content to be appended to a document which
finalizes a storage format image element. This method should be used to
help close a _start_ac_image call.
Args:
node: the node processing the image
Returns:
the content
"""
return self._end_tag(node, suffix='')
def _start_ac_link(self, node, anchor=None):
"""
generates a confluence link start tag
A helper used to return content to be appended to a document which
initializes the start of a storage format link element of a specific
`type`. The 'ac:link' element will be initialized. This method may use
provided `node` to tweak the final content.
Args:
node: the node processing the link
anchor (optional): the anchor value to use (defaults to None)
Returns:
the content
"""
attribs = {}
if anchor:
attribs['ac:anchor'] = anchor
return self._start_tag(node, 'ac:link', suffix=self.nl, **attribs)
def _end_ac_link(self, node):
"""
generates confluence link end tag content for a node
A helper used to return content to be appended to a document which
finalizes a storage format link element. This method should be used to
help close a _start_ac_link call.
Args:
node: the node processing the link
Returns:
the content
"""
return self._end_tag(node, suffix='')
def _start_ac_macro(self, node, type, empty=False):
"""
generates a confluence macro start tag
A helper used to return content to be appended to a document which
initializes the start of a storage format macro element of a specific
`type`. The 'ac:structured-macro' element will be initialized. This
method may use provided `node` to tweak the final content.
Args:
node: the node processing the macro
type: the type of macro
empty (optional): tag will not hold child nodes (defaults to False)
Returns:
the content
"""
return self._start_tag(node, 'ac:structured-macro',
suffix=self.nl, empty=empty, **{'ac:name': type})
def _end_ac_macro(self, node):
"""
generates confluence macro end tag content for a node
A helper used to return content to be appended to a document which
finalizes a storage format macro element. This method should* be used to
help close a _start_ac_macro call (*with the exception of when
_start_ac_macro is invoked with empty=True).
Args:
node: the node processing the macro
Returns:
the content
"""
return self._end_tag(node)
def _start_ac_link_body(self, node):
"""
generates a confluence link-body start tag
A helper used to return content to be appended to a document which
initializes the start of a storage format link-body element. The
'ac:link-body' element will be initialized. This method may use provided
`node` to tweak the final content.
Args:
node: the node processing the macro
Returns:
the content
"""
return self._start_tag(node, 'ac:link-body')
def _end_ac_link_body(self, node):
"""
generates confluence link-body end tag content for a node
A helper used to return content to be appended to a document which
finalizes a storage format link-body element. This method should be used
to help close a _start_ac_link_body call.
Args:
node: the node processing the macro
Returns:
the content
"""
return self._end_tag(node)
def _start_ac_rich_text_body_macro(self, node):
"""
generates a confluence rich-text-body start tag
A helper used to return content to be appended to a document which
initializes the start of a storage format rich-text-body element. The
'ac:rich-text-body' element will be initialized. This method may use
provided `node` to tweak the final content.
Args:
node: the node processing the macro
Returns:
the content
"""
return self._start_tag(node, 'ac:rich-text-body', suffix=self.nl)
def _end_ac_rich_text_body_macro(self, node):
"""
generates confluence rich-text-body end tag content for a node
A helper used to return content to be appended to a document which
finalizes a storage format rich-text-body element. This method should
be used to help close a _start_ac_rich_text_body_macro call.
Args:
node: the node processing the macro
Returns:
the content
"""
return self._end_tag(node)
def _start_ac_plain_text_body_macro(self, node):
"""
generates a confluence plain-text-body start tag
A helper used to return content to be appended to a document which
initializes the start of a storage format plain-text-body element. The
'ac:plain-text-body' element will be initialized. This method may use
provided `node` to tweak the final content.
Args:
node: the node processing the macro
Returns:
the content
"""
return self._start_tag(node, 'ac:plain-text-body', suffix='<![CDATA[')
def _end_ac_plain_text_body_macro(self, node):
"""
generates confluence plain-text-body end tag content for a node
A helper used to return content to be appended to a document which
finalizes a storage format plain-text-body element. This method should
be used to help close a _start_ac_plain_text_body_macro call.
Args:
node: the node processing the macro
Returns:
the content
"""
return ']]>' + self._end_tag(node)
def _start_ac_plain_text_link_body_macro(self, node):
"""
generates a confluence plain-text-link-body start tag
A helper used to return content to be appended to a document which
initializes the start of a storage format plain-text-body element. The
'ac:plain-text-body' element will be initialized. This method may use
provided `node` to tweak the final content.
Args:
node: the node processing the macro
Returns:
the content
"""
return self._start_tag(node, 'ac:plain-text-link-body',
suffix='<![CDATA[')
def _end_ac_plain_text_link_body_macro(self, node):
"""
generates confluence plain-text-link-body end tag content for a node
A helper used to return content to be appended to a document which
finalizes a storage format plain-text-link-body element. This method
should be used to help close a _start_ac_plain_text_link_body_macro
call.
Args:
node: the node processing the macro
Returns:
the content
"""
return ']]>' + self._end_tag(node)
def _start_ri_attachment(self, node, filename):
"""
generates a confluence attachment start tag
A helper used to return content to be appended to a document which
initializes the start of a storage format attachment element. The
'ri:attachment' element will be initialized. This method may use
provided `node` to tweak the final content.
Args:
node: the node processing the attachment
filename: the filename of the attachment
Returns:
the content
"""
return self._start_tag(node, 'ri:attachment',
**{'ri:filename': filename})
def _end_ri_attachment(self, node):
"""
generates confluence attachment end tag content for a node
A helper used to return content to be appended to a document which
finalizes a storage format attachment element. This method should be
used to help close a _start_ri_attachment call.
Args:
node: the node processing the attachment
Returns:
the content
"""
return self._end_tag(node)
def _escape_cdata(self, data):
"""
escapes text to be inserted into a cdata
A helper used to return content that has been properly escaped and can
be directly placed inside a CDATA container.
Args:
data: the text
Returns:
the escaped text
"""
data = data.replace(']]>', ']]]]><![CDATA[>')
return ConfluenceBaseTranslator.encode(self, data)
def _encode_sf(self, data):
"""
encodes text to be inserted directly into a storage format area
A helper used to return content that has been properly encoded and can
be directly placed inside a Confluence storage-format-prepared document.
Args:
data: the text
Returns:
the encoded text
"""
STORAGE_FORMAT_REPLACEMENTS = {
('<', '<'),
('>', '>'),
('"', '"'),
("'", '''),
}
# first pass needs to handle ampersand
data = unicode(data).replace('&', '&')
for find, encoded in STORAGE_FORMAT_REPLACEMENTS:
data = data.replace(find, encoded)
return data
def _fetch_alignment(self, node):
"""
fetch the alignment to be used on a node
A helper used to return content that has been properly encoded and can
be directly placed inside a Confluence storage-format-prepared document.
Args:
node: the node
Returns:
the alignment to configure; may be `None`
"""
alignment = None
if 'align' in node:
alignment = node['align']
# if the parent is a figure, either take the assigned alignment from the
# figure node; otherwise, apply the default alignment for the node
elif isinstance(node.parent, nodes.figure):
if 'align' in node.parent:
alignment = node.parent['align']
if not alignment or alignment == 'default':
alignment = self._default_alignment
if alignment:
alignment = self._encode_sf(alignment)
return alignment
| 37.91456
| 83
| 0.596429
|
4a120fd6beccc7b72c2a16e9829834ad1dddbaff
| 2,218
|
py
|
Python
|
fetchy/utils.py
|
ThomasKluiters/fetchy
|
dfe1a73aa72cad4338445bec370be064707bff0c
|
[
"MIT"
] | 114
|
2019-07-20T19:27:34.000Z
|
2022-03-11T16:49:22.000Z
|
fetchy/utils.py
|
ThomasKluiters/fetchy
|
dfe1a73aa72cad4338445bec370be064707bff0c
|
[
"MIT"
] | 49
|
2019-07-20T21:44:20.000Z
|
2019-10-03T16:52:40.000Z
|
fetchy/utils.py
|
ThomasKluiters/fetchy
|
dfe1a73aa72cad4338445bec370be064707bff0c
|
[
"MIT"
] | 10
|
2019-07-26T21:26:32.000Z
|
2022-02-25T08:43:31.000Z
|
import os
import gzip
import urllib
import distro
import shutil
import platform
import logging
import validators
from tqdm import tqdm
from pathlib import Path
logger = logging.getLogger(__name__)
_known_versions = {
"ubuntu": [
"devel",
"precise",
"cosmic",
"trusty",
"xenial",
"disco",
"eoan",
"bionic",
],
"debian": [
"buzz",
"rex",
"bo",
"hamm",
"slink",
"potato",
"woody",
"sarge",
"etch",
"lenny",
"squeeze",
"wheezy",
"jessie",
"stretch",
"buster",
"bullseye",
"sid",
],
}
def is_os_supported(distribution=None):
if distribution is None:
distribution = distro.id()
return distribution in ["debian", "ubuntu"]
def is_version_supported(distribution, version):
if not is_os_supported(distribution=distribution):
return False
return version in _known_versions[distribution]
def get_supported_versions_for(distribution):
return reversed(_known_versions[distribution])
def get_distribution():
"""Function to acquire current Distribution
This function will return the current distribution
if the user is running on a Linux machine.
"""
return distro.id()
def get_distribution_version():
"""
Function to acquire current Distribution Version
This function will return the current distribution version
if the user is running on a Linux machine.
"""
return distro.codename()
def get_architecture():
"""Function to acquire machine architecture
For now let's make some simple assumptions that 64bit -> amd64.
"""
(arch, _) = platform.architecture()
mapping = {"64bit": "amd64", "32bit": "i386"}
if arch not in mapping:
logger.error(
f"{arch} is not recognized. Please specify the architecture you want to use (e.g. --architecture amd64)."
)
return mapping[arch]
def get_cache_dir():
if "XDG_CACHE_HOME" in os.environ:
return os.path.join(os.environ["XDG_CACHE_HOME"], "fetchy")
return os.path.join(str(Path.home()), ".cache", "fetchy")
| 21.960396
| 117
| 0.62083
|
4a121116fc9e145fd5a631f8b93b92a8c287a61a
| 534
|
py
|
Python
|
tests/test_openapierr.py
|
shyrwinsia/saxo_openapi
|
8e5c1bf336654d059ea87ba2ff7e7aaef33d1262
|
[
"MIT"
] | 52
|
2019-03-13T13:27:36.000Z
|
2022-03-18T08:27:22.000Z
|
tests/test_openapierr.py
|
shyrwinsia/saxo_openapi
|
8e5c1bf336654d059ea87ba2ff7e7aaef33d1262
|
[
"MIT"
] | 15
|
2019-03-14T19:42:51.000Z
|
2021-12-19T16:14:02.000Z
|
tests/test_openapierr.py
|
shyrwinsia/saxo_openapi
|
8e5c1bf336654d059ea87ba2ff7e7aaef33d1262
|
[
"MIT"
] | 23
|
2019-03-13T13:45:22.000Z
|
2022-02-26T21:20:49.000Z
|
# -*- coding: utf-8 -*-
"""Tests for `saxo_openapi` package."""
from .unittestsetup import ReqMockTest
from saxo_openapi.exceptions import OpenAPIError
class TestSaxo_Exceptions(ReqMockTest):
"""Tests for exceptions."""
def setUp(self):
super(TestSaxo_Exceptions, self).setUp()
def test_openapierr(self):
err = OpenAPIError(401, "Unauthorized", "xxx")
self.assertTrue(err.content == 'xxx' and
err.code == 401 and
err.reason == 'Unauthorized')
| 25.428571
| 54
| 0.621723
|
4a1211654182603e43a3e149ba8c49c5aec61ca7
| 4,213
|
py
|
Python
|
snappy_wrappers/wrappers/salmon/wrapper.py
|
PotatoThrone/snappy-pipeline
|
31200eba84bff8e459e9e210d6d95e2984627f5c
|
[
"MIT"
] | 5
|
2021-02-26T10:39:56.000Z
|
2021-12-23T07:53:26.000Z
|
snappy_wrappers/wrappers/salmon/wrapper.py
|
PotatoThrone/snappy-pipeline
|
31200eba84bff8e459e9e210d6d95e2984627f5c
|
[
"MIT"
] | 93
|
2021-02-22T11:23:59.000Z
|
2022-03-31T09:58:39.000Z
|
snappy_wrappers/wrappers/salmon/wrapper.py
|
PotatoThrone/snappy-pipeline
|
31200eba84bff8e459e9e210d6d95e2984627f5c
|
[
"MIT"
] | 3
|
2021-02-22T11:44:59.000Z
|
2021-06-21T19:33:53.000Z
|
# -*- coding: utf-8 -*-
"""CUBI+Snakemake wrapper code for STAR: Snakemake wrapper.py
"""
from snakemake import shell
__author__ = "Manuel Holtgrewe <manuel.holtgrewe@bihealth.de>"
shell.executable("/bin/bash")
# Input fastqs are passed through snakemake.params.
# snakemake.input is a .done file touched after linking files in.
reads_left = snakemake.params.args["input"]["reads_left"]
reads_right = snakemake.params.args["input"].get("reads_right", "")
# salmon flag for first reads changes for single-end data.
if reads_right:
read_flag = "-1"
else:
read_flag = "-r"
this_file = __file__
shell(
r"""
set -x
# Write out information about conda installation.
conda list >{snakemake.log.conda_list}
conda info >{snakemake.log.conda_info}
md5sum {snakemake.log.conda_list} >{snakemake.log.conda_list_md5}
md5sum {snakemake.log.conda_info} >{snakemake.log.conda_info_md5}
# Also pipe stderr to log file
if [[ -n "{snakemake.log.log}" ]]; then
if [[ "$(set +e; tty; set -e)" != "" ]]; then
rm -f "{snakemake.log.log}" && mkdir -p $(dirname {snakemake.log.log})
exec 2> >(tee -a "{snakemake.log.log}" >&2)
else
rm -f "{snakemake.log.log}" && mkdir -p $(dirname {snakemake.log.log})
echo "No tty, logging disabled" >"{snakemake.log.log}"
fi
fi
# Setup auto-cleaned TMPDIR
export TMPDIR=$(mktemp -d)
trap "rm -rf $TMPDIR" EXIT
mkdir -p $TMPDIR/tmp.d $TMPDIR/pre.d
# Define left and right reads as Bash arrays
declare -a reads_left=({reads_left})
# declared but never used
declare -a reads_right=({reads_right})
left_files=$(IFS=" " ; echo "${{reads_left[*]}}")
left_files_prefixed=" ${{left_files}}"
right_files_prefixed=""
if [[ "{reads_right}" != "" ]]; then
right_files=$(IFS=" " ; echo "${{reads_right[*]}}")
right_files_prefixed=" -2 ${{right_files}}"
fi
t2g="{snakemake.config[step_config][gene_expression_quantification][salmon][path_transcript_to_gene]}"
t2g_cmd=""
if [[ "$t2g" != "" ]] && [[ "$t2g" != "REQUIRED" ]] && [[ -r "$t2g" ]]
then
t2g_cmd=" -g $t2g"
fi
libraryType="A"
if [[ {snakemake.config[step_config][gene_expression_quantification][strand]} -ge 0 ]]
then
libraryType="I"
if [[ {snakemake.config[step_config][gene_expression_quantification][strand]} -gt 0 ]]
then
libraryType="${{libraryType}}S"
if [[ {snakemake.config[step_config][gene_expression_quantification][strand]} -eq 1 ]]
then
libraryType="${{libraryType}}F"
else
libraryType="${{libraryType}}R"
fi
else
libraryType="${{libraryType}}U"
fi
fi
salmon quant \
-i {snakemake.config[step_config][gene_expression_quantification][salmon][path_index]} \
-l $libraryType \
{read_flag} ${{left_files_prefixed}} ${{right_files_prefixed}} \
${{t2g_cmd}} \
-o $TMPDIR \
-p {snakemake.config[step_config][gene_expression_quantification][salmon][num_threads]} \
--auxDir aux \
{snakemake.config[step_config][gene_expression_quantification][salmon][salmon_params]}
# Copy over the output files
cp $TMPDIR/quant.sf {snakemake.output.transcript_sf}
md5sum {snakemake.output.transcript_sf} > {snakemake.output.transcript_sf_md5}
if [[ "${{t2g_cmd}}" != "" ]]
then
cp $TMPDIR/quant.genes.sf {snakemake.output.gene_sf}
md5sum {snakemake.output.gene_sf} > {snakemake.output.gene_sf_md5}
fi
# Copy log files
log=$(dirname {snakemake.log.log})
cp $TMPDIR/cmd_info.json $log/cmd_info.json
cp $TMPDIR/lib_format_counts.json $log/lib_format_counts.json
cp $TMPDIR/logs/salmon_quant.log $log/salmon_quant.log
md5sum $log/cmd_info.json > $log/cmd_info.json.md5
md5sum $log/lib_format_counts.json > $log/lib_format_counts.json.md5
md5sum $log/salmon_quant.log > $log/salmon_quant.log.md5
# Copy extra files
aux=$(dirname {snakemake.output.transcript_sf})/aux
mkdir -p $aux
cp -R $TMPDIR/aux/* $aux/.
# Logging: Save a copy this wrapper (with the pickle details in the header)
cp {this_file} $(dirname $log)/wrapper_star.py
# Logging: Save a permanent copy of the environment file used
# Commented out: this_file is the temp copy, not the original
# cp $(dirname {this_file})/environment.yaml $(dirname $log)/environment_wrapper_star.yaml
"""
)
| 32.658915
| 102
| 0.694517
|
4a121258e732fad4dc75218ada4915b9079b0d56
| 24,148
|
py
|
Python
|
defi/defi_tools.py
|
jko0401/defi
|
812c827ac96c195df746284d2431674dd08f8f66
|
[
"MIT"
] | null | null | null |
defi/defi_tools.py
|
jko0401/defi
|
812c827ac96c195df746284d2431674dd08f8f66
|
[
"MIT"
] | null | null | null |
defi/defi_tools.py
|
jko0401/defi
|
812c827ac96c195df746284d2431674dd08f8f66
|
[
"MIT"
] | null | null | null |
"""Tools for use in DeFi
"""
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import datetime, requests
from scipy import interpolate
import matplotlib.cm as cm
from matplotlib.gridspec import GridSpec
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
def iloss(price_ratio, numerical=False):
"""return the impermanent loss result in compare with buy&hold A&B assets
Args:
price_ratio (float): Variation A Asset / Variation B Asset
price_ratio formula:
price_ratio = (var_A/100 + 1) / (var_B/100 + 1)
var_A: Asset A % variation
var_B: Asset B % variation
numerical (bool): if True, returns impermanent loss as a decimal expr, ie "5%"" => 0.05 (Default: False)
Returns:
TYPE: impermanent loss as a string percentual value
"""
il = 2 * (price_ratio**0.5 / (1 + price_ratio)) - 1
r = f"{il:.2%}" if not numerical else il
return r
def compare(days, var_A=0, var_B=0, rw_pool_A=0, rw_pool_B=0, rw_pool_AB=0, fees_AB=0):
"""Compare for 2 assets, buy&hold strategy with separate staking and farming by liquidity pool providing.
Considering: impermanent loss, fees earned and farming/staking rewards
Args:
days (int): days for strategy
var_A (float, optional): Percentual variation for A token. Ex 10 for 10%
var_B (float, optional): Percentual variation for B token. Ex 10 for 10%
rw_pool_A (float, optional): Percentual rewards per day for one asset pool (Token A)
rw_pool_B (float, optional): Percentual rewards per day for one asset pool (Token B)
rw_pool_AB (float, optional): Percentual rewards per day for two asset farm (LP Token AB)
fees_AB (float, optional): Percentual provider liquidity fees earned per day
Returns:
dict: Percentual returns for each strategy:
buy_hold two assets in your wallet
stake two assets at individual pools
farming by liquidity pool
"""
buy_hold = (0.5 * var_A + 0.5 * var_B)/100
x = (var_A/100 + 1) / (var_B/100 + 1)
perdida_impermanente = 2 * (x**0.5 / (1 + x)) - 1
stake = buy_hold + 0.5 * days * (rw_pool_A/100 + rw_pool_B/100)
farm = buy_hold * (1+perdida_impermanente) + days * (rw_pool_AB/100 + fees_AB/100)
mejor = 'Farm' if farm > stake else 'Stake'
return {'buy_hold':f'{buy_hold:.2%}', 'stake':f'{stake:.2%}', 'farm':f'{farm:.2%}', 'Best': mejor}
######################################################################
## ##
## Llama API ##
## Public API https://docs.llama.fi/api ##
## ##
######################################################################
def getProtocols():
"""Get list all DeFi protocols across all blockchains
Returns:
DataFrame: All DeFi dApps
"""
url = "https://api.llama.fi/protocols"
r = requests.get(url)
r_json = r.json()
df = pd.DataFrame(r_json)
df.set_index('name', inplace=True)
return df
def getProtocol(protocol):
"""Get metrics and historic TVL for one DeFi dApp
Args:
protocol (String): Name of protocol ie "Uniswap"
Returns:
tuple (Dictionary, DataFrame): Dictionary with protocol metadata & DataFrame with historical TVL
"""
url = f"https://api.llama.fi/protocol/{protocol}"
r = requests.get(url)
r_json = r.json()
df = pd.DataFrame(r_json['tvl'])
df.date = pd.to_datetime(df.date, unit='s')
df = df.set_index('date')
del r_json['tvl']
metadata = r_json
return metadata, df
def getChart():
"""Get historical TVL across all DeFi dApps, cummulative result
Returns:
DataFrame: DataFrame date-indexed with all days TVL
"""
url = "https://api.llama.fi/charts"
r = requests.get(url)
r_json = r.json()
df = pd.DataFrame(r_json)
df.date = pd.to_datetime(df.date, unit='s')
df = df.set_index('date')
return df
######################################################################
## ##
## CoinGecko API ##
## Public API https://www.coingecko.com/es/api ##
## ##
######################################################################
def geckoPrice(tokens, quote='usd'):
"""get price of combine pairs
Args:
tokens (comma separated strings): ie "bitcoin,ethereum"
quote (comma separated fiat or quote currency): ie: "usd,eur"
Returns:
dictionary: Returns pairs quotes
"""
url = "https://api.coingecko.com/api/v3/simple/price"
params = {"ids":tokens, "vs_currencies":quote}
r = requests.get(url, params).json()
return r
def geckoPriceAt(token, date, quote='usd'):
"""get price of token at historical date
Args:
token (string): ie "bitcoin"
quote (fiat or quote currency): ie: "usd"
Returns:
float: Return token price
"""
from json.decoder import JSONDecodeError
result = None
while result == None:
try:
url = f"https://api.coingecko.com/api/v3/coins/{token}/history?date={date}&localization=false"
r = requests.get(url).json()
result = r['market_data']['current_price'][quote]
except JSONDecodeError as e:
from time import sleep
print('Access Denied...Waiting...')
sleep(65)
return result
def geckoFullList(page=1, per_page=250, names=False):
"""Returns list of full detail conGecko currency list
Args:
page (int, optional): number of pages
per_page (int, optional): number of records per page
Returns:
DataFrame: list of full detail conGecko currency list
"""
url = "https://api.coingecko.com/api/v3/coins/markets"
params = {"vs_currency":"usd", "order":"market_cap_desc", "per_page":per_page, "page":page}
r = requests.get(url, params).json()
df = pd.DataFrame(r)
if names:
df = df[['symbol', 'id', 'name']]
return df
def geckoGetSymbol(name):
"""Returns ticker of coin given name
Args:
name (int, required): name of coin (e.g. 'bitcoin')
Returns:
r: string of corresponding ticker
"""
from json.decoder import JSONDecodeError
result = None
while result == None:
try:
url = f"https://api.coingecko.com/api/v3/coins/{name}"
r = requests.get(url).json()
result = r['symbol'].upper()
except JSONDecodeError as e:
from time import sleep
print('Access Denied...Waiting...')
sleep(65)
return result
def geckoMarkets(name):
"""Get top100 markets (pairs, quotes, exchanges, volume, spreads and more)
Args:
name (string): gecko ID, ie "bitcoin"
Returns:
DataFrame: Full detail markets available
"""
url = f"https://api.coingecko.com/api/v3/coins/{name}/tickers"
r = requests.get(url).json()['tickers']
df = pd.DataFrame(r)
df['exchange'] = df['market'].apply(pd.Series)['name']
df['volume_usd'] = df['converted_volume'].apply(pd.Series)['usd']
df['price_usd'] = df['converted_last'].apply(pd.Series)['usd']
df.set_index('exchange', inplace=True)
cols = ['base','target','last', 'volume','bid_ask_spread_percentage','timestamp',
'volume_usd','price_usd','trust_score']
df = df.loc[:,cols]
cols[4] = 'spread'
df.columns = cols
df.timestamp = pd.to_datetime(df.timestamp)
return df.sort_values('volume_usd', ascending=False)
def geckoHistorical(name, vs_currency='usd', days='max'):
"""Historical prices from coinGecko
Args:
name (string): gecko ID, ie "bitcoin"
vs_currency (str, optional): ie "usd" (default)
days (str, optional): ie "20", "max" (default)
Returns:
DataFrame: Full history: date, price, market cap & volume
"""
url = f"https://api.coingecko.com/api/v3/coins/{name}/market_chart"
params = {"vs_currency":{vs_currency}, "days":days}
r = requests.get(url, params).json()
prices = pd.DataFrame(r['prices'])
market_caps = pd.DataFrame(r['market_caps'])
total_volumes = pd.DataFrame(r['total_volumes'])
df = pd.concat([prices, market_caps[1], total_volumes[1]], axis=1)
df[0] = pd.to_datetime(df[0], unit='ms')
df.columns = ['date','price','market_caps','total_volumes']
df.set_index('date', inplace=True)
return df
def geckoHistoricalRange(name, begin, end, vs_currency='usd'):
"""Historical prices from coinGecko
Args:
name (string): gecko ID, ie "bitcoin"
vs_currency (str, optional): ie "usd" (default)
days (str, optional): ie "20", "max" (default)
Returns:
DataFrame: Full history: date, price, market cap & volume
"""
url = f"https://api.coingecko.com/api/v3/coins/{name}/market_chart"
params = {"vs_currency":{vs_currency}, "from":begin, 'to':end}
r = requests.get(url, params).json()
prices = pd.DataFrame(r['prices'])
market_caps = pd.DataFrame(r['market_caps'])
total_volumes = pd.DataFrame(r['total_volumes'])
df = pd.concat([prices, market_caps[1], total_volumes[1]], axis=1)
df[0] = pd.to_datetime(df[0], unit='ms')
df.columns = ['date','price','market_caps','total_volumes']
df.set_index('date', inplace=True)
return df
def getGeckoIDs(pages):
"""IDs List from coinGecko
Returns:
list: First 5000 coingecko IDs by marketCap rank
"""
ids_list = []
for i in range(pages):
print(f'searching coins page: {i} ', end='\r')
ids_list = ids_list + geckoFullList(page=i, per_page=250)['id'].tolist()
return ids_list
def farmSimulate(pair, apr, start='2021-01-01'):
"""Simulate farm result with historical prices & APR value
Args:
pair (list): gecko IDs list ["bitcoin",'tether']
apr (float): ie 25 (for 25% Anual rewards)
start (str, optional): ISO Format YYYY-MM-DD ie "2021-01-01", "2021-01-01" (default)
Returns:
Dict & Plot: Full farming strategy results
"""
prices = pd.DataFrame()
for coin in pair:
print(f'Downloading {coin}')
try:
df = geckoHistorical(coin)
prices[coin] = df['price']
except:
print(f'Error geting {coin} prices')
if len(prices.columns)==2:
prices = prices.dropna().iloc[:]
start = datetime.datetime.strptime(start, '%Y-%m-%d')
farm = prices.loc[prices.index>=start]
farm = farm.divide(farm.iloc[0])
farm['ratio'] = farm.iloc[:,1].divide(farm.iloc[:,0])
farm['iloss'] = 2 * (farm['ratio']**0.5 / (1 + farm['ratio'])) - 1
farm['rewards'] = pd.Series(2*apr/100/365, index=farm.index).cumsum()
farm['buy_hold'] = (farm.iloc[:,0] + farm.iloc[:,1])/2
farm['farm'] = farm.buy_hold - farm.iloss + farm.rewards
cagrs = farm.iloc[-1]**(1/(365/len(farm)))-1
sigmas = farm.pct_change().std() * 365**0.5
sharpes = cagrs.divide(sigmas).round(2)
dd = farm/farm.cummax()-1
fig = plt.figure(figsize=(15,8))
gs = GridSpec(nrows=2,ncols=4, figure=fig, height_ratios=[2,1], hspace=0.45, wspace=0.35, top=.9)
ax_upleft = fig.add_subplot(gs[0,0:2])
ax_upright = fig.add_subplot(gs[0,2:])
cols = 4
ax_down = [fig.add_subplot(gs[1,i]) for i in range(cols)]
ax_upleft.plot(farm.iloss.abs(), label='Impermanent Loss')
ax_upleft.plot(farm.rewards, label='Farming Rewards')
ax_upleft.legend()
ax_upleft.grid()
ax_upleft.set_title('Impermanent Loss vs Farming Rewards')
ax_upleft.tick_params(axis='x', rotation=45)
ax_upright.plot(farm.iloc[:,:2])
ax_upright.plot(farm.buy_hold)
ax_upright.plot(farm.farm)
ax_upright.grid()
ax_upright.legend([pair[0],pair[1],'Buy&Hold','Farming Strategy'])
ax_upright.set_title(f'{pair[0]} vs {pair[1]} vs Buy & Hold vs Farming strategy payoff')
ax_upright.tick_params(axis='x', rotation=45)
cagrs[[pair[0],pair[1],'buy_hold','farm']].plot(kind='bar', ax=ax_down[0])
sigmas[[pair[0],pair[1],'buy_hold','farm']].plot(kind='bar', ax=ax_down[1])
sharpes[[pair[0],pair[1],'buy_hold','farm']].plot(kind='bar', ax=ax_down[2])
dd[[pair[0],pair[1],'buy_hold','farm']].min().plot(kind='bar', ax=ax_down[3])
ax_down[0].set_title('CAGR', fontsize=12)
ax_down[1].set_title('Anualized Volatility', fontsize=12)
ax_down[2].set_title('Sharpe Ratio', fontsize=12)
ax_down[3].set_title('Max DrawDowns', fontsize=12)
[ax_down[i].grid(alpha=0.4) for i in range(cols)]
for i in range(cols):
ax_down[i].spines['top'].set_visible(False)
ax_down[i].spines['right'].set_visible(False)
b_h = (farm.iloc[-1].iloc[0] + farm.iloc[-1].iloc[1])/2 - 1
iloss = farm.iloc[-1].iloss
rewards = farm.iloc[-1].rewards
net_farming = b_h - iloss + rewards
result = {'Token 1': pair[0], 'Token 2': pair[1], 'start':start.isoformat()[:10],
'fixed APR': f'{apr/100:.0%}', 'Buy & Hold': f'{b_h:.2%}',
'Impermanent Loss':f'{iloss:.2%}', 'Farming Rewards': f'{rewards:.2%}',
'Farming + Rewards - IL': f'{net_farming:.2%}' }
else:
result = 'Error geting historical prices, see geckoIDs() function to get CoinGecko IDs'
return result
######################################################################
## ##
## Pancake Swap API ##
## API https://github.com/pancakeswap/pancake-info-api ##
## ##
######################################################################
def toFloatPartial(df):
for i in df.columns:
try:
df[[i]] = df[[i]].astype(float)
except:
pass
return df
def pcsSummary(as_df = True ):
url = "https://api.pancakeswap.info/api/v2/summary"
r = requests.get(url).json()
data = r.get('data', None)
upd = r.get('updated_at')/1000
upd_dt = datetime.datetime.fromtimestamp(upd)
if as_df:
df = pd.DataFrame.from_dict(data, orient='index')
df = toFloatPartial(df)
df['updated'] = upd_dt
return df
else:
return r
def pcsTokens(as_df = True):
"""get all token listed in pancakeswap
Args:
as_df (bool, optional): if True (default), return is a dataframe, else is a dictionary
Returns:
DataFrame with next columns: name symbol price price_BNB updated
"""
# ultimo precio y volumen de base/quote de todos los pares
url = "https://api.pancakeswap.info/api/v2/tokens"
r = requests.get(url).json()
data = r.get('data', None)
upd = r.get('updated_at')/1000
upd_dt = datetime.datetime.fromtimestamp(upd)
if as_df:
df = pd.DataFrame.from_dict(data, orient='index')
df = toFloatPartial(df)
df['updated'] = upd_dt
return df
else:
return r
def pcsPairs(as_df = True):
"""get top 1000 pancakeswap pairs LP order by reserves
Args:
as_df (bool, optional): if True (default), return is a dataframe, else is a dictionary
Returns:
DataFrame with next columns: 'pair_address', 'base_name', 'base_symbol', 'base_address',
'quote_name', 'quote_symbol', 'quote_address', 'price', 'base_volume',
'quote_volume', 'liquidity', 'liquidity_BNB', 'updated'
"""
url = "https://api.pancakeswap.info/api/v2/pairs"
r = requests.get(url).json()
data = r.get('data', None)
upd = r.get('updated_at')/1000
upd_dt = datetime.datetime.fromtimestamp(upd)
if as_df:
df = pd.DataFrame.from_dict(data, orient='index')
df = toFloatPartial(df)
df['updated'] = upd_dt
return df
else:
return r
def pcsTokenInfo(search):
"""get info from a token
Args:
search (string): Token symbol or contract address
Returns:
Dict:
{
'name': 'Wrapped BNB',
'symbol': 'WBNB',
'price': '524.5429',
'price_BNB': '1'
}
"""
search = 'WBNB' if search.upper() == 'BNB' else search
url = "https://api.pancakeswap.info/api/v2/tokens"
r = requests.get(url).json()
data = r.get('data', None)
res = f"Not found: {search}"
for contract, values in data.items():
if search.upper() == values['symbol'].upper() or search.upper()==contract.upper():
res = data[contract]
break
return res
def pcsPairInfo(base, quote):
"""get info from a token pair LP
Args:
base (string): Base LP token, ie "CAKE"
quote (string): Quote LP token, ie "BNB"
its the same if you call pcsPAirInfo('cake', 'bnb') or pcsPAirInfo('bnb', 'cake')
Returns:
Dict: {
'pair_address': '0xA527a61703D82139F8a06Bc30097cC9CAA2df5A6',
'base_name': 'PancakeSwap Token',
'base_symbol': 'Cake',
'base_address': '0x0E09FaBB73Bd3Ade0a17ECC321fD13a19e81cE82',
'quote_name': 'Wrapped BNB',
'quote_symbol': 'WBNB',
'quote_address': '0xbb4CdB9CBd36B01bD1cBaEBF2De08d9173bc095c',
'price': '0.04311198194009326668',
'base_volume': '22248744.85',
'quote_volume': '934856.36',
'liquidity': '982769040.63',
'liquidity_BNB': '1878155.84'
}
* price is actually a ratio between base/quote tokens
"""
url = "https://api.pancakeswap.info/api/pairs"
r = requests.get(url).json()
data = r.get('data', None)
res = f"Not found: {base}-{quote}"
base = 'WBNB' if base.upper() == 'BNB' else base
quote = 'WBNB' if quote.upper() == 'BNB' else quote
for contract, values in data.items():
base_ = base.upper() == values['base_symbol'].upper()
quote_ = quote.upper() == values['quote_symbol'].upper()
base_cross = base.upper() == values['quote_symbol'].upper()
quote_cross = quote.upper() == values['base_symbol'].upper()
if (base_ and quote_) or (base_cross and quote_cross):
res = data[contract]
break
return res
from scipy import interpolate
import matplotlib.cm as cm
def iloss_simulate(base_token, quote_token, value=100, base_pct_chg=0, quote_pct_chg=0):
"""Calculate simulated impermanent loss from an initial value invested, get real time prices from pancakeswap API
This method create a 3D interpolated surface for impermanent loss and initial/final value invested
Args:
base_token (string): Pair first token, ie CAKE
quote_token (string): Pais second token, ie BNB
value (int, optional): Value investen in LP default=100
base_pct_chg (int, optional): value assming will change first token of LP pair, ie 10 (for +10% change)
quote_pct_chg (int, optional): value assming will change first token of LP pair, ie -30 (for -30% change)
Returns:
tuple (value_f, iloss): final value of value invested, and decimal impermanent loss
"""
base_token = 'WBNB' if base_token.upper() == 'BNB' else base_token
quote_token = 'WBNB' if quote_token.upper() == 'BNB' else quote_token
# get real time prices
tokens = pcsTokens()
px_base = float(tokens.loc[tokens.symbol.str.upper()==base_token.upper()].price)
px_quote = float(tokens.loc[tokens.symbol.str.upper()==quote_token.upper()].price)
# Prepare grid
q_base, q_quote = (value/2)/px_base, (value/2)/px_quote
px_base, px_quote, q_base, q_quote
pxs_base = [px_base*i/100 for i in range(1,301)]
pxs_quote = [px_quote*i/100 for i in range(1,301)]
rows = []
for px_b in pxs_base:
for px_q in pxs_quote:
ratio = (px_b / px_base) / (px_q / px_quote)
iloss = 2 * (ratio**0.5 / (1 + ratio)) - 1
row = {'px_base':px_b, 'px_quote':px_q,
'ratio':(px_b / px_base) / (px_q / px_quote),
'impremante_loss':iloss}
rows.append(row)
df = pd.DataFrame(rows)
df_ok = df.loc[:,['px_base','px_quote','impremante_loss']]
df_ok = df_ok.replace('NaN',np.nan).dropna()
if all(isinstance(i, (int, float)) for i in (value, base_pct_chg, quote_pct_chg)):
px_base_f = px_base * (1+base_pct_chg/100)
px_quote_f = px_quote * (1+quote_pct_chg/100)
ratio = (px_base_f / px_base) / ( px_quote_f / px_quote)
iloss = 2 * (ratio**0.5 / (1 + ratio)) - 1
value_f = (px_base_f*q_base + px_quote_f * q_quote) * (iloss+1)
else:
px_base_f, px_quote_f = px_base, px_quote
iloss = 0
value_f = None
print('must input numerical amount and pct change for base and quote to calculations of final value')
# Ploting surface
fig = plt.figure(figsize=(8,8))
x1 = np.linspace(df_ok['px_base'].min(), df_ok['px_base'].max(), len(df_ok['px_base'].unique()))
y1 = np.linspace(df_ok['px_quote'].min(), df_ok['px_quote'].max(), len(df_ok['px_quote'].unique()))
x2, y2 = np.meshgrid(x1, y1)
Z = interpolate.griddata((df_ok['px_base'], df_ok['px_quote']), df_ok['impremante_loss'], (x2, y2))
Z[np.isnan(Z)] = df_ok.impremante_loss.mean()
ax = plt.axes(projection='3d', alpha=0.2)
ax.plot_wireframe(x2, y2, Z, color='tab:blue', lw=1, cmap='viridis', alpha=0.6)
# Start values ploting
xmax = df_ok.px_base.max()
ymax = df_ok.px_quote.max()
ax.plot([px_base, px_base], [0,px_quote], [-1,-1], ls='--', c='k', lw=1)
ax.plot([px_base, px_base], [px_quote,px_quote], [0,-1], ls='--', c='k', lw=1)
ax.plot([px_base, 0], [px_quote, px_quote], [-1,-1], ls='--', c='k', lw=1)
# End values ploting
ax.plot([px_base_f, px_base_f], [0,px_quote_f], [-1,-1], ls='--', c='gray', lw=1)
ax.plot([px_base_f, px_base_f], [px_quote_f,px_quote_f], [iloss,-1], ls='--', c='gray', lw=1)
ax.plot([px_base_f, 0], [px_quote_f, px_quote_f], [-1,-1], ls='--', c='gray', lw=1)
ax.plot([px_base_f, px_base_f], [px_quote_f,ymax], [iloss,iloss], ls='--', c='gray', lw=1)
ax.plot([px_base_f, 0], [ymax,ymax], [iloss,iloss], ls='--', c='gray', lw=1)
# Plot settings
# Colorbar only for plot_surface() method instead plot_wireframe()
# m = cm.ScalarMappable(cmap=cm.viridis)
# m.set_array(df_ok['impremante_loss'])
# plt.colorbar(m, fraction=0.02, pad=0.1)
x, y, z = (px_base, px_quote,.05)
p = ax.scatter(x, y, z, c='k', marker='v', s=300)
ax.set_title('Impermanent Loss 3D Surface', y=0.95)
ax.set_xlabel(f'Price {base_token}')
ax.set_ylabel(f'Price {quote_token}')
ax.set_zlabel('Impremante loss')
ax.view_init(elev=25, azim=240) # start view angle
print (f"\nStart value USD {value:.0f}, {base_token} USD {px_base:.2f}, {quote_token} USD {px_quote:.2f}")
print(f"\nResults assuming {base_token.upper()} {base_pct_chg}%, and {quote_token.upper()} {quote_pct_chg}%")
print (f"End value estimate USD {value_f:.0f}, iloss: {iloss:.2%}")
plt.show()
return value_f , iloss
| 35.881129
| 117
| 0.576984
|
4a12137c783bd7e75143bd4c541929054911094e
| 1,329
|
py
|
Python
|
grow/pods/document_cache.py
|
davidwtbuxton/grow
|
3fcc201ec5802381a8273bd767450be755e1251b
|
[
"MIT"
] | 1
|
2019-01-25T17:00:42.000Z
|
2019-01-25T17:00:42.000Z
|
grow/pods/document_cache.py
|
davidwtbuxton/grow
|
3fcc201ec5802381a8273bd767450be755e1251b
|
[
"MIT"
] | null | null | null |
grow/pods/document_cache.py
|
davidwtbuxton/grow
|
3fcc201ec5802381a8273bd767450be755e1251b
|
[
"MIT"
] | 1
|
2019-04-23T19:38:19.000Z
|
2019-04-23T19:38:19.000Z
|
"""
Cache for storing and retrieving data specific to a document.
Supports caching specific to the pod_path of a document.
The contents of the cache should be raw and not internationalized as it will
be shared between locales with the same pod_path.
"""
class DocumentCache(object):
def __init__(self):
self.reset()
def _ensure_exists(self, doc, value=None):
if doc.pod_path not in self._cache:
self._cache[doc.pod_path] = value or {}
return doc.pod_path
def add(self, doc, value):
self._cache[doc.pod_path] = value
def add_all(self, path_to_cached):
for path, value in path_to_cached.iteritems():
self._cache[path] = value
def add_property(self, doc, prop, value):
path = self._ensure_exists(doc)
self._cache[path][prop] = value
def remove(self, doc):
return self.remove_by_path(doc.pod_path)
def remove_by_path(self, path):
return self._cache.pop(path, None)
def export(self):
return self._cache
def get(self, doc):
return self._cache.get(doc.pod_path, None)
def get_property(self, doc, prop):
if doc.pod_path in self._cache:
return self._cache[doc.pod_path].get(prop, None)
return None
def reset(self):
self._cache = {}
| 26.58
| 76
| 0.64936
|
4a12139d1e158d661f84279cc6cdffd4178edd86
| 4,011
|
py
|
Python
|
vkwave/bots/core/dispatching/dp/dp.py
|
deleteduser0206/vkwave
|
8e641bcdd9bec5c30818c82f34662fbd14053e88
|
[
"MIT"
] | 222
|
2020-03-30T18:09:20.000Z
|
2022-03-27T18:25:04.000Z
|
vkwave/bots/core/dispatching/dp/dp.py
|
deleteduser0206/vkwave
|
8e641bcdd9bec5c30818c82f34662fbd14053e88
|
[
"MIT"
] | 62
|
2020-03-30T18:31:25.000Z
|
2021-12-21T17:00:44.000Z
|
vkwave/bots/core/dispatching/dp/dp.py
|
deleteduser0206/vkwave
|
8e641bcdd9bec5c30818c82f34662fbd14053e88
|
[
"MIT"
] | 91
|
2020-03-30T18:34:49.000Z
|
2022-03-23T12:58:49.000Z
|
import logging
from typing import List, NewType, Optional, cast, Union
from vkwave.api.methods import API
from vkwave.api.token.token import AnyABCToken
from vkwave.bots.core.dispatching.events.base import BaseEvent, BotEvent, UserEvent
from vkwave.bots.core.dispatching.events.raw import ExtensionEvent
from vkwave.bots.core.dispatching.router.router import HANDLER_NOT_FOUND, BaseRouter
from vkwave.bots.core.tokens.storage import TokenStorage, UserTokenStorage
from vkwave.bots.core.tokens.types import GroupId
from vkwave.bots.core.types.bot_type import BotType
from vkwave.types.bot_events import get_event_object
from vkwave.types.user_events import get_event_object as user_get_event_object
from .middleware.middleware import MiddlewareManager
from .processing_options import ProcessEventOptions
from .result_caster import ResultCaster
ProcessingResult = NewType("ProcessingResult", bool)
logger = logging.getLogger(__name__)
class Dispatcher:
def __init__(
self,
api: API,
token_storage: Union[TokenStorage, UserTokenStorage],
bot_type: BotType = BotType.BOT,
):
self.bot_type: BotType = bot_type
self.api: API = api
self.middleware_manager = MiddlewareManager()
self.token_storage: Union[TokenStorage, UserTokenStorage] = token_storage
self.routers: List[BaseRouter] = []
self.result_caster: ResultCaster = ResultCaster()
def add_router(self, router: BaseRouter):
self.routers.append(router)
async def process_event(
self, revent: ExtensionEvent, options: ProcessEventOptions
) -> ProcessingResult:
event: BaseEvent
logger.debug(f"ProcessEventOptions:\n{options}")
logger.debug(f"New event! Raw:\n{revent}")
if options.do_not_handle:
logger.debug("ProcessEventOptions.do_not_handle is True")
logger.debug("Event was skipped")
return ProcessingResult(False)
if revent.bot_type is BotType.BOT:
revent.raw_event = cast(dict, revent.raw_event)
group_id = revent.raw_event["group_id"]
token = await self.token_storage.get_token(GroupId(group_id))
event = BotEvent(get_event_object(revent.raw_event), self.api.with_token(token))
else:
revent.raw_event = cast(list, revent.raw_event)
obj = user_get_event_object(revent.raw_event)
event = UserEvent(obj, self.api.with_token(await self.token_storage.get_token()))
logger.debug(f"New event! Formatted:\n{event}")
if not await self.middleware_manager.execute_pre_process_event(event):
return ProcessingResult(False)
for router in self.routers:
if await router.is_suitable(event):
result = await router.process_event(event)
if result is HANDLER_NOT_FOUND:
continue
await self.result_caster.cast(result, event)
logger.debug("Event was successfully handled")
await self.middleware_manager.execute_post_process_event(event)
return ProcessingResult(True)
logger.debug("Event wasn't handled")
await self.middleware_manager.execute_post_process_event(event)
return ProcessingResult(False)
async def cache_potential_tokens(self, tokens: Optional[List[AnyABCToken]] = None):
tokens_to_cache = self.api.default_api_options.tokens.copy()
if tokens is not None:
tokens_to_cache.extend(tokens)
for token in tokens_to_cache:
ctx = self.api.with_token(token)
id_to_cache: int
if self.bot_type is BotType.BOT:
groups = (await ctx.groups.get_by_id()).response
id_to_cache = cast(int, groups[0].id)
else:
users = (await ctx.users.get()).response
id_to_cache = cast(int, users[0].id)
self.token_storage.append(id_to_cache, token)
| 41.350515
| 93
| 0.68711
|
4a1213ce6c39e923dbd6e1c456583adc00c56c39
| 12,988
|
py
|
Python
|
zerver/tornado/socket.py
|
yakkl/yakkl
|
89ecf4ee8998554a0634667067e16f428e4c480c
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
zerver/tornado/socket.py
|
yakkl/yakkl
|
89ecf4ee8998554a0634667067e16f428e4c480c
|
[
"ECL-2.0",
"Apache-2.0"
] | 4
|
2020-06-06T00:51:42.000Z
|
2022-02-10T21:38:40.000Z
|
zerver/tornado/socket.py
|
yakkl/yakkl
|
89ecf4ee8998554a0634667067e16f428e4c480c
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# See https://yakkl.readthedocs.io/en/latest/subsystems/sending-messages.html#websockets
# for high-level documentation on this subsystem.
from typing import Any, Dict, Mapping, Optional, Union
from django.conf import settings
from django.utils.timezone import now as timezone_now
from django.utils.translation import ugettext as _
from django.contrib.sessions.models import Session as djSession
try:
from django.middleware.csrf import _compare_salted_tokens
except ImportError:
# This function was added in Django 1.10.
def _compare_salted_tokens(token1: str, token2: str) -> bool:
return token1 == token2
import sockjs.tornado
from sockjs.tornado.session import ConnectionInfo
import tornado.ioloop
import ujson
import logging
from zerver.models import UserProfile, get_user_profile_by_id
from zerver.lib.queue import queue_json_publish
from zerver.decorator import JsonableError
from zerver.middleware import record_request_start_data, record_request_stop_data, \
record_request_restart_data, write_log_line, format_timedelta
from zerver.lib.redis_utils import get_redis_client
from zerver.lib.sessions import get_session_user
from zerver.tornado.event_queue import get_client_descriptor
from zerver.tornado.exceptions import BadEventQueueIdError
from zerver.tornado.sharding import tornado_return_queue_name
logger = logging.getLogger('yakkl.socket')
def get_user_profile(session_id: Optional[str]) -> Optional[UserProfile]:
if session_id is None:
return None
try:
djsession = djSession.objects.get(expire_date__gt=timezone_now(),
session_key=session_id)
except djSession.DoesNotExist:
return None
try:
return get_user_profile_by_id(get_session_user(djsession))
except (UserProfile.DoesNotExist, KeyError):
return None
connections = dict() # type: Dict[Union[int, str], 'SocketConnection']
def get_connection(id: Union[int, str]) -> Optional['SocketConnection']:
return connections.get(id)
def register_connection(id: Union[int, str], conn: 'SocketConnection') -> None:
# Kill any old connections if they exist
if id in connections:
connections[id].close()
conn.client_id = id
connections[conn.client_id] = conn
def deregister_connection(conn: 'SocketConnection') -> None:
assert conn.client_id is not None
del connections[conn.client_id]
redis_client = get_redis_client()
def req_redis_key(req_id: str) -> str:
return 'socket_req_status:%s' % (req_id,)
class CloseErrorInfo:
def __init__(self, status_code: int, err_msg: str) -> None:
self.status_code = status_code
self.err_msg = err_msg
class SocketConnection(sockjs.tornado.SockJSConnection):
client_id = None # type: Optional[Union[int, str]]
def on_open(self, info: ConnectionInfo) -> None:
log_data = dict(extra='[transport=%s]' % (self.session.transport_name,))
record_request_start_data(log_data)
ioloop = tornado.ioloop.IOLoop.instance()
self.authenticated = False
self.session.user_profile = None
self.close_info = None # type: Optional[CloseErrorInfo]
self.did_close = False
try:
self.browser_session_id = info.get_cookie(settings.SESSION_COOKIE_NAME).value
self.csrf_token = info.get_cookie(settings.CSRF_COOKIE_NAME).value
except AttributeError:
# The request didn't contain the necessary cookie values. We can't
# close immediately because sockjs-tornado doesn't expect a close
# inside on_open(), so do it on the next tick.
self.close_info = CloseErrorInfo(403, "Initial cookie lacked required values")
ioloop.add_callback(self.close)
return
def auth_timeout() -> None:
self.close_info = CloseErrorInfo(408, "Timeout while waiting for authentication")
self.close()
self.timeout_handle = ioloop.call_later(10, auth_timeout)
write_log_line(log_data, path='/socket/open', method='SOCKET',
remote_ip=info.ip, email='unknown', client_name='?')
def authenticate_client(self, msg: Dict[str, Any]) -> None:
if self.authenticated:
self.session.send_message({'req_id': msg['req_id'], 'type': 'response',
'response': {'result': 'error',
'msg': 'Already authenticated'}})
return
user_profile = get_user_profile(self.browser_session_id)
if user_profile is None:
raise JsonableError(_('Unknown or missing session'))
self.session.user_profile = user_profile
if 'csrf_token' not in msg['request']:
# Debugging code to help with understanding #6961
logging.error("CSRF token missing from websockets auth request: %s" % (msg['request'],))
raise JsonableError(_('CSRF token entry missing from request'))
if not _compare_salted_tokens(msg['request']['csrf_token'], self.csrf_token):
raise JsonableError(_('CSRF token does not match that in cookie'))
if 'queue_id' not in msg['request']:
raise JsonableError(_("Missing 'queue_id' argument"))
queue_id = msg['request']['queue_id']
client = get_client_descriptor(queue_id)
if client is None:
raise BadEventQueueIdError(queue_id)
if user_profile.id != client.user_profile_id:
raise JsonableError(_("You are not the owner of the queue with id '%s'") % (queue_id,))
self.authenticated = True
register_connection(queue_id, self)
response = {'req_id': msg['req_id'], 'type': 'response',
'response': {'result': 'success', 'msg': ''}}
status_inquiries = msg['request'].get('status_inquiries')
if status_inquiries is not None:
results = {} # type: Dict[str, Dict[str, str]]
for inquiry in status_inquiries:
status = redis_client.hgetall(req_redis_key(inquiry)) # type: Dict[bytes, bytes]
if len(status) == 0:
result = {'status': 'not_received'}
elif b'response' not in status:
result = {'status': status[b'status'].decode('utf-8')}
else:
result = {'status': status[b'status'].decode('utf-8'),
'response': ujson.loads(status[b'response'])}
results[str(inquiry)] = result
response['response']['status_inquiries'] = results
self.session.send_message(response)
ioloop = tornado.ioloop.IOLoop.instance()
ioloop.remove_timeout(self.timeout_handle)
def on_message(self, msg_raw: str) -> None:
log_data = dict(extra='[transport=%s' % (self.session.transport_name,))
record_request_start_data(log_data)
msg = ujson.loads(msg_raw)
if self.did_close:
user_email = 'unknown'
if self.session.user_profile is not None:
user_email = self.session.user_profile.email
logger.info("Received message on already closed socket! transport=%s user=%s client_id=%s"
% (self.session.transport_name,
user_email,
self.client_id))
self.session.send_message({'req_id': msg['req_id'], 'type': 'ack'})
if msg['type'] == 'auth':
log_data['extra'] += ']'
try:
self.authenticate_client(msg)
# TODO: Fill in the correct client
write_log_line(log_data, path='/socket/auth', method='SOCKET',
remote_ip=self.session.conn_info.ip,
email=self.session.user_profile.email,
client_name='?')
except JsonableError as e:
response = e.to_json()
self.session.send_message({'req_id': msg['req_id'], 'type': 'response',
'response': response})
write_log_line(log_data, path='/socket/auth', method='SOCKET',
remote_ip=self.session.conn_info.ip,
email='unknown', client_name='?',
status_code=403, error_content=ujson.dumps(response))
return
else:
if not self.authenticated:
response = {'result': 'error', 'msg': "Not yet authenticated"}
self.session.send_message({'req_id': msg['req_id'], 'type': 'response',
'response': response})
write_log_line(log_data, path='/socket/service_request', method='SOCKET',
remote_ip=self.session.conn_info.ip,
email='unknown', client_name='?',
status_code=403, error_content=ujson.dumps(response))
return
redis_key = req_redis_key(msg['req_id'])
with redis_client.pipeline() as pipeline:
pipeline.hmset(redis_key, {'status': 'received'})
pipeline.expire(redis_key, 60 * 60 * 24)
pipeline.execute()
record_request_stop_data(log_data)
request_environ = dict(REMOTE_ADDR=self.session.conn_info.ip)
queue_json_publish("message_sender",
dict(request=msg['request'],
req_id=msg['req_id'],
server_meta=dict(user_id=self.session.user_profile.id,
client_id=self.client_id,
return_queue=tornado_return_queue_name(self.port),
log_data=log_data,
request_environ=request_environ)))
def on_close(self) -> None:
log_data = dict(extra='[transport=%s]' % (self.session.transport_name,))
record_request_start_data(log_data)
if self.close_info is not None:
write_log_line(log_data, path='/socket/close', method='SOCKET',
remote_ip=self.session.conn_info.ip, email='unknown',
client_name='?', status_code=self.close_info.status_code,
error_content=self.close_info.err_msg)
else:
deregister_connection(self)
email = self.session.user_profile.email \
if self.session.user_profile is not None else 'unknown'
write_log_line(log_data, path='/socket/close', method='SOCKET',
remote_ip=self.session.conn_info.ip, email=email,
client_name='?')
self.did_close = True
def respond_send_message(data: Mapping[str, Any]) -> None:
log_data = data['server_meta']['log_data']
record_request_restart_data(log_data)
worker_log_data = data['server_meta']['worker_log_data']
forward_queue_delay = worker_log_data['time_started'] - log_data['time_stopped']
return_queue_delay = log_data['time_restarted'] - data['server_meta']['time_request_finished']
service_time = data['server_meta']['time_request_finished'] - worker_log_data['time_started']
log_data['extra'] += ', queue_delay: %s/%s, service_time: %s]' % (
format_timedelta(forward_queue_delay), format_timedelta(return_queue_delay),
format_timedelta(service_time))
client_id = data['server_meta']['client_id']
connection = get_connection(client_id)
if connection is None:
logger.info("Could not find connection to send response to! client_id=%s" % (client_id,))
else:
connection.session.send_message({'req_id': data['req_id'], 'type': 'response',
'response': data['response']})
# TODO: Fill in client name
# TODO: Maybe fill in the status code correctly
write_log_line(log_data, path='/socket/service_request', method='SOCKET',
remote_ip=connection.session.conn_info.ip,
email=connection.session.user_profile.email, client_name='?')
# We disable the eventsource and htmlfile transports because they cannot
# securely send us the yakkl.com cookie, which we use as part of our
# authentication scheme.
sockjs_url = '%s/static/third/sockjs/sockjs-0.3.4.js' % (settings.ROOT_DOMAIN_URI,)
sockjs_router = sockjs.tornado.SockJSRouter(SocketConnection, "/sockjs",
{'sockjs_url': sockjs_url,
'disabled_transports': ['eventsource', 'htmlfile']})
def get_sockjs_router(port: int) -> sockjs.tornado.SockJSRouter:
sockjs_router._connection.port = port
return sockjs_router
| 46.056738
| 102
| 0.618032
|
4a1213f82d9716c937a88720a425630db61d1bf1
| 1,036
|
py
|
Python
|
tutorials/tensorflow2/infeed_outfeed/test/test_mnist_with_feeds.py
|
LRVerkin/tutorials
|
365757b0dee90f63a53851e40bfad790aca3cf8d
|
[
"MIT"
] | null | null | null |
tutorials/tensorflow2/infeed_outfeed/test/test_mnist_with_feeds.py
|
LRVerkin/tutorials
|
365757b0dee90f63a53851e40bfad790aca3cf8d
|
[
"MIT"
] | null | null | null |
tutorials/tensorflow2/infeed_outfeed/test/test_mnist_with_feeds.py
|
LRVerkin/tutorials
|
365757b0dee90f63a53851e40bfad790aca3cf8d
|
[
"MIT"
] | 1
|
2022-02-25T12:07:16.000Z
|
2022-02-25T12:07:16.000Z
|
# Copyright (c) 2021 Graphcore Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import pytest
from tutorials_tests.testing_util import SubProcessChecker
working_path = Path(__file__).parent.parent.joinpath("completed_code")
class TestWithFeeds(SubProcessChecker):
@pytest.mark.category2
@pytest.mark.ipus(1)
def test_with_feeds(self):
self.run_command("python3 mnist_with_feeds.py",
working_path,
"Time taken")
| 33.419355
| 74
| 0.72973
|
4a121495a8d36ac0cf2d0ffd560bebbf467108dc
| 644
|
py
|
Python
|
reverse-string/rev-string.py
|
purveshpatel511/rust-integrated-python
|
ce4ba1dd23d8d7f7077b9e6a4b9a1989808a505b
|
[
"MIT"
] | null | null | null |
reverse-string/rev-string.py
|
purveshpatel511/rust-integrated-python
|
ce4ba1dd23d8d7f7077b9e6a4b9a1989808a505b
|
[
"MIT"
] | null | null | null |
reverse-string/rev-string.py
|
purveshpatel511/rust-integrated-python
|
ce4ba1dd23d8d7f7077b9e6a4b9a1989808a505b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2021-04-11 21:47:53
# @Author : Your Name (you@example.org)
# @Link : link
# @Version : 1.0.0
import os, string, random
import revstring # rust lib
def reverse_string_python(value):
return value[::-1]
# Benchmark Code
iters = 1000000
value = "".join(random.choice(string.ascii_letters) for i in range(iters))
def test_reverse_string_python(benchmark):
benchmark(reverse_string_python, value)
def test_reverse_string_rust(benchmark):
benchmark(revstring.reverse_string_rust, value)
# def test_count_doubles_zip(benchmark):
# benchmark(count_doubles_zip, value)
| 25.76
| 74
| 0.726708
|
4a12156f05c7e4a1c705dfe7a6004e572d0fcd78
| 618
|
py
|
Python
|
domino_conf.py
|
hashnfv/hashnfv-domino
|
eb2fbb1315e6489dd159c8227030d035bdeb1864
|
[
"Apache-2.0"
] | null | null | null |
domino_conf.py
|
hashnfv/hashnfv-domino
|
eb2fbb1315e6489dd159c8227030d035bdeb1864
|
[
"Apache-2.0"
] | null | null | null |
domino_conf.py
|
hashnfv/hashnfv-domino
|
eb2fbb1315e6489dd159c8227030d035bdeb1864
|
[
"Apache-2.0"
] | null | null | null |
logfile = None
LOGLEVEL = 'WARNING' #'WARNING' #ERROR, DEBUG, INFO
#Client Parameters
DOMINO_CLIENT_PORT = 9091
INTERACTIVE = 'FALSE'
CLIENT_SEQNO = 0
DOMINO_SERVER_IP = 'localhost'
DOMINO_CLI_PORT = 9100
UDID_DESIRED = '12345678123456781234567812345678'
LIST_SUPPORTED_TEMPLATES = ['tosca-nfv-v1.0']
DEFAULT_TOSCA_PUBFILE = './tosca-templates/tosca_helloworld_nfv.yaml'
TOSCA_RX_DIR = './toscafiles/'
#Server Parameters
DOMINO_SERVER_PORT = 9090
SERVER_UDID = 'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF'
TOSCADIR = './toscafiles/'
TOSCA_DEFAULT_FNAME = 'template1.yaml'
SERVER_DBFILE = 'dominoserver.db'
SERVER_SEQNO = 0
| 24.72
| 69
| 0.791262
|
4a1217e5a8c66f1f56a1b93d8b5df6473fee7bfb
| 3,196
|
py
|
Python
|
ext/Outros/moduloteste.py
|
brunoalmeidamartins/pox
|
2c9f13ef53f7cf15c369d15ba4b1051036d74e00
|
[
"Apache-2.0"
] | null | null | null |
ext/Outros/moduloteste.py
|
brunoalmeidamartins/pox
|
2c9f13ef53f7cf15c369d15ba4b1051036d74e00
|
[
"Apache-2.0"
] | null | null | null |
ext/Outros/moduloteste.py
|
brunoalmeidamartins/pox
|
2c9f13ef53f7cf15c369d15ba4b1051036d74e00
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2012 James McCauley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A super simple OpenFlow learning switch that installs rules for
each pair of L2 addresses.
"""
import json
# These next two imports are common POX convention
from pox.core import core
import pox.openflow.libopenflow_01 as of
from pox.lib.util import dpidToStr
from pox.lib.addresses import IPAddr, EthAddr
# Even a simple usage of the logger is much nicer than print!
log = core.getLogger()
# This table maps (switch,MAC-addr) pairs to the port on 'switch' at
# which we last saw a packet *from* 'MAC-addr'.
# (In this case, we use a Connection object for the switch.)
table = {}
table2 = {}
# To send out all ports, we can use either of the special ports
# OFPP_FLOOD or OFPP_ALL. We'd like to just use OFPP_FLOOD,
# but it's not clear if all switches support this, so we make
# it selectable.
all_ports = of.OFPP_FLOOD
# Handle messages the switch has sent us because it has no
# matching rule.
def _handle_PacketIn (event):
packet = event.parsed
# Learn the source
table[(event.connection,packet.src)] = event.port
table2[(dpidToStr(event.dpid),packet.src)] = event.port
try:
arquivo = open('tabela.json','w')
arquivo.write(str(table2))
arquivo.close()
except Exception as erro:
print(format(erro))
dst_port = table.get((event.connection,packet.dst))
if dst_port is None:
# We don't know where the destination is yet. So, we'll just
# send the packet out all ports (except the one it came in on!)
# and hope the destination is out there somewhere. :)
msg = of.ofp_packet_out(data = event.ofp)
msg.actions.append(of.ofp_action_output(port = all_ports))
event.connection.send(msg)
else:
# Since we know the switch ports for both the source and dest
# MACs, we can install rules for both directions.
msg = of.ofp_flow_mod()
msg.match.dl_dst = packet.src
msg.match.dl_src = packet.dst
msg.actions.append(of.ofp_action_output(port = event.port))
event.connection.send(msg)
# This is the packet that just came in -- we want to
# install the rule and also resend the packet.
msg = of.ofp_flow_mod()
msg.data = event.ofp # Forward the incoming packet
msg.match.dl_src = packet.src
msg.match.dl_dst = packet.dst
msg.actions.append(of.ofp_action_output(port = dst_port))
event.connection.send(msg)
log.debug("Installing %s <-> %s" % (packet.src, packet.dst))
def launch (disable_flood = False):
global all_ports
if disable_flood:
all_ports = of.OFPP_ALL
core.openflow.addListenerByName("PacketIn", _handle_PacketIn)
log.info("Pair-Learning switch running.")
| 31.96
| 74
| 0.720901
|
4a1218950007dccab74e1f1e5b5ad32f1e76590a
| 470
|
py
|
Python
|
data/scripts/templates/object/draft_schematic/space/capacitor/shared_extended_life_battery_mk3.py
|
obi-two/GameServer
|
7d37024e2291a97d49522610cd8f1dbe5666afc2
|
[
"MIT"
] | 20
|
2015-02-23T15:11:56.000Z
|
2022-03-18T20:56:48.000Z
|
data/scripts/templates/object/draft_schematic/space/capacitor/shared_extended_life_battery_mk3.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | null | null | null |
data/scripts/templates/object/draft_schematic/space/capacitor/shared_extended_life_battery_mk3.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | 20
|
2015-04-04T16:35:59.000Z
|
2022-03-24T14:54:37.000Z
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/space/capacitor/shared_extended_life_battery_mk3.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
| 27.647059
| 96
| 0.738298
|
4a12196b72b7bee98747ed4d128c858fa624613d
| 6,497
|
py
|
Python
|
XFouth course/7th semester/Discrete modeling/lab6/main.py
|
tekcellat/University
|
9a0196a45c9cf33ac58018d636c3e4857eba0330
|
[
"MIT"
] | null | null | null |
XFouth course/7th semester/Discrete modeling/lab6/main.py
|
tekcellat/University
|
9a0196a45c9cf33ac58018d636c3e4857eba0330
|
[
"MIT"
] | null | null | null |
XFouth course/7th semester/Discrete modeling/lab6/main.py
|
tekcellat/University
|
9a0196a45c9cf33ac58018d636c3e4857eba0330
|
[
"MIT"
] | 7
|
2020-12-04T07:26:46.000Z
|
2022-03-08T17:47:47.000Z
|
import sys
from PyQt5 import uic
from PyQt5.QtCore import pyqtSlot
from PyQt5.QtWidgets import QApplication, QWidget
from generator import ConstGenerator, UniformGenerator, nr
from modeller import RequestGenerator, RequestProcessor, event_based_modelling
class MainWindow(QWidget):
def __init__(self, parent=None):
super(MainWindow, self).__init__(parent)
self._ui = uic.loadUi("window.ui", self)
@property
def parameters(self):
u = self._ui
return {
'pg0_m': float(u.le_pg0_m.text()),
'pg1_m': float(u.le_pg1_m.text()),
'pg0_d': float(u.le_pg0_d.text()),
'pg1_d': float(u.le_pg1_d.text()),
'ev0_m': float(u.le_ev0_m.text()),
'ev1_m': float(u.le_ev1_m.text()),
'ev2_m': float(u.le_ev2_m.text()),
'ev0_d': float(u.le_ev0_d.text()),
'ev1_d': float(u.le_ev1_d.text()),
'ev2_d': float(u.le_ev2_d.text()),
'cid0_m': float(u.le_cid0_m.text()),
'cid1_m': float(u.le_cid1_m.text()),
'cid2_m': float(u.le_cid2_m.text()),
'cid3_m': float(u.le_cid3_m.text()),
'cid0_d': float(u.le_cid0_d.text()),
'cid1_d': float(u.le_cid1_d.text()),
'cid2_d': float(u.le_cid2_d.text()),
'cid3_d': float(u.le_cid3_d.text()),
'pcd0_m': float(u.le_pcd0_m.text()),
'pcd1_m': float(u.le_pcd1_m.text()),
'pcd0_d': float(u.le_pcd0_d.text()),
'pcd1_d': float(u.le_pcd1_d.text()),
'cc0_m': float(u.le_cc0_m.text()),
'cc0_d': float(u.le_cc0_d.text()),
'c_count': 10000
}
@pyqtSlot()
def on_pushButton_clicked(self):
print('+-------------------------------+')
procfmt = '|{0:13}|{1:5}|{2:5}|{3:5}|'
print(procfmt.format('этап', 'заявы', 'отказ', 'толпа'))
devices = self.start_modelling(**self.parameters)
for dev in devices:
if type(dev) is RequestGenerator:
print(procfmt.format(dev.name, dev.requests, dev.dropped_requests, ''))
else:
print(procfmt.format(dev.name, dev.requests,
dev.dropped_requests, dev.queue_size))
print('+-------------------------------+')
print(procfmt.format('',
sum(dev.requests for dev in devices),
sum(dev.dropped_requests for dev in devices),
''))
u = self._ui
u.le_ev0_wt.setText('{:.2f}'.format(devices[2].max_waiting_time))
u.le_ev1_wt.setText('{:.2f}'.format(devices[3].max_waiting_time))
u.le_ev2_wt.setText('{:.2f}'.format(devices[4].max_waiting_time))
u.le_cid0_wt.setText('{:.2f}'.format(devices[5].max_waiting_time))
u.le_cid1_wt.setText('{:.2f}'.format(devices[6].max_waiting_time))
u.le_cid2_wt.setText('{:.2f}'.format(devices[7].max_waiting_time))
u.le_cid3_wt.setText('{:.2f}'.format(devices[8].max_waiting_time))
u.le_pcd0_wt.setText('{:.2f}'.format(devices[9].max_waiting_time))
u.le_pcd1_wt.setText('{:.2f}'.format(devices[10].max_waiting_time))
u.le_cc0_wt.setText('{:.2f}'.format(devices[11].max_waiting_time))
def start_modelling(self, pg0_m, pg1_m, pg0_d, pg1_d,
ev0_m, ev1_m, ev2_m, ev0_d, ev1_d, ev2_d,
cid0_m, cid1_m, cid2_m, cid3_m, cid0_d, cid1_d, cid2_d, cid3_d,
pcd0_m, pcd1_m, pcd0_d, pcd1_d,
cc0_m, cc0_d,
c_count):
random = nr.RandomState()
passenger_generator0 = RequestGenerator(UniformGenerator(pg0_m, pg0_d, random),
'отправитель1')
passenger_generator1 = RequestGenerator(UniformGenerator(pg1_m, pg1_d, random),
'отправитель2')
passengers = (passenger_generator0, passenger_generator1)
entrance_validator0 = RequestProcessor(UniformGenerator(ev0_m, ev0_d, random),
'касса1')
entrance_validator1 = RequestProcessor(UniformGenerator(ev1_m, ev1_d, random),
'касса2')
entrance_validator2 = RequestProcessor(UniformGenerator(ev2_m, ev2_d, random),
'касса3')
entrance = (entrance_validator0, entrance_validator1, entrance_validator2)
checkin_desk0 = RequestProcessor(UniformGenerator(cid0_m, cid0_d, random),
'лента1')
checkin_desk1 = RequestProcessor(UniformGenerator(cid1_m, cid1_d, random),
'лента2')
checkin_desk2 = RequestProcessor(UniformGenerator(cid2_m, cid2_d, random),
'лента3')
checkin_desk3 = RequestProcessor(UniformGenerator(cid3_m, cid3_d, random),
'лента4')
checkin = (checkin_desk0, checkin_desk1, checkin_desk2, checkin_desk3)
passport_control_desk0 = RequestProcessor(UniformGenerator(pcd0_m, pcd0_d, random),
'контроль1', can_drop=True)
passport_control_desk1 = RequestProcessor(UniformGenerator(pcd1_m, pcd1_d, random),
'контроль2', can_drop=True)
passport_control = (passport_control_desk0, passport_control_desk1)
customs_control = (RequestProcessor(UniformGenerator(cc0_m, cc0_d, random),
'', can_drop=True),)
plane = RequestProcessor(ConstGenerator(0), 'таможня', is_exit=True)
for p in passengers: p.add_receivers(entrance)
for e in entrance: e.add_receivers(checkin)
for c in checkin: c.add_receivers(passport_control)
for p in passport_control: p.add_receivers(customs_control)
for cc in customs_control: cc.add_receiver(plane)
devices = passengers + entrance + checkin + passport_control + customs_control + (plane,)
event_based_modelling(devices, lambda: plane.processed_requests == c_count)
return devices
def main():
app = QApplication(sys.argv)
window = MainWindow()
window.show()
return app.exec()
if __name__ == '__main__':
sys.exit(main())
| 47.07971
| 97
| 0.567647
|
4a1219ee2b5e60e04a47677449f3160d4435b474
| 62,157
|
py
|
Python
|
TimeWrapper_JE/venv/Lib/site-packages/pygments/lexers/_mapping.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | null | null | null |
TimeWrapper_JE/venv/Lib/site-packages/pygments/lexers/_mapping.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | null | null | null |
TimeWrapper_JE/venv/Lib/site-packages/pygments/lexers/_mapping.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | null | null | null |
"""
pygments.lexers._mapping
~~~~~~~~~~~~~~~~~~~~~~~~
Lexer mapping definitions. This file is generated by itself. Everytime
you change something on a builtin lexer definition, run this script from
the lexers folder to update it.
Do not alter the LEXERS dictionary by hand.
:copyright: Copyright 2006-2014, 2016 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
LEXERS = {
'ABAPLexer': ('pygments.lexers.business', 'ABAP', ('abap',), ('*.abap', '*.ABAP'), ('text/x-abap',)),
'AMDGPULexer': ('pygments.lexers.amdgpu', 'AMDGPU', ('amdgpu',), ('*.isa',), ()),
'APLLexer': ('pygments.lexers.apl', 'APL', ('apl',), ('*.apl', '*.aplf', '*.aplo', '*.apln', '*.aplc', '*.apli', '*.dyalog'), ()),
'AbnfLexer': ('pygments.lexers.grammar_notation', 'ABNF', ('abnf',), ('*.abnf',), ('text/x-abnf',)),
'ActionScript3Lexer': ('pygments.lexers.actionscript', 'ActionScript 3', ('actionscript3', 'as3'), ('*.as',), ('application/x-actionscript3', 'text/x-actionscript3', 'text/actionscript3')),
'ActionScriptLexer': ('pygments.lexers.actionscript', 'ActionScript', ('actionscript', 'as'), ('*.as',), ('application/x-actionscript', 'text/x-actionscript', 'text/actionscript')),
'AdaLexer': ('pygments.lexers.pascal', 'Ada', ('ada', 'ada95', 'ada2005'), ('*.adb', '*.ads', '*.ada'), ('text/x-ada',)),
'AdlLexer': ('pygments.lexers.archetype', 'ADL', ('adl',), ('*.adl', '*.adls', '*.adlf', '*.adlx'), ()),
'AgdaLexer': ('pygments.lexers.haskell', 'Agda', ('agda',), ('*.agda',), ('text/x-agda',)),
'AheuiLexer': ('pygments.lexers.esoteric', 'Aheui', ('aheui',), ('*.aheui',), ()),
'AlloyLexer': ('pygments.lexers.dsls', 'Alloy', ('alloy',), ('*.als',), ('text/x-alloy',)),
'AmbientTalkLexer': ('pygments.lexers.ambient', 'AmbientTalk', ('ambienttalk', 'ambienttalk/2', 'at'), ('*.at',), ('text/x-ambienttalk',)),
'AmplLexer': ('pygments.lexers.ampl', 'Ampl', ('ampl',), ('*.run',), ()),
'Angular2HtmlLexer': ('pygments.lexers.templates', 'HTML + Angular2', ('html+ng2',), ('*.ng2',), ()),
'Angular2Lexer': ('pygments.lexers.templates', 'Angular2', ('ng2',), (), ()),
'AntlrActionScriptLexer': ('pygments.lexers.parsers', 'ANTLR With ActionScript Target', ('antlr-actionscript', 'antlr-as'), ('*.G', '*.g'), ()),
'AntlrCSharpLexer': ('pygments.lexers.parsers', 'ANTLR With C# Target', ('antlr-csharp', 'antlr-c#'), ('*.G', '*.g'), ()),
'AntlrCppLexer': ('pygments.lexers.parsers', 'ANTLR With CPP Target', ('antlr-cpp',), ('*.G', '*.g'), ()),
'AntlrJavaLexer': ('pygments.lexers.parsers', 'ANTLR With Java Target', ('antlr-java',), ('*.G', '*.g'), ()),
'AntlrLexer': ('pygments.lexers.parsers', 'ANTLR', ('antlr',), (), ()),
'AntlrObjectiveCLexer': ('pygments.lexers.parsers', 'ANTLR With ObjectiveC Target', ('antlr-objc',), ('*.G', '*.g'), ()),
'AntlrPerlLexer': ('pygments.lexers.parsers', 'ANTLR With Perl Target', ('antlr-perl',), ('*.G', '*.g'), ()),
'AntlrPythonLexer': ('pygments.lexers.parsers', 'ANTLR With Python Target', ('antlr-python',), ('*.G', '*.g'), ()),
'AntlrRubyLexer': ('pygments.lexers.parsers', 'ANTLR With Ruby Target', ('antlr-ruby', 'antlr-rb'), ('*.G', '*.g'), ()),
'ApacheConfLexer': ('pygments.lexers.configs', 'ApacheConf', ('apacheconf', 'aconf', 'apache'), ('.htaccess', 'apache.conf', 'apache2.conf'), ('text/x-apacheconf',)),
'AppleScriptLexer': ('pygments.lexers.scripting', 'AppleScript', ('applescript',), ('*.applescript',), ()),
'ArduinoLexer': ('pygments.lexers.c_like', 'Arduino', ('arduino',), ('*.ino',), ('text/x-arduino',)),
'ArrowLexer': ('pygments.lexers.arrow', 'Arrow', ('arrow',), ('*.arw',), ()),
'AspectJLexer': ('pygments.lexers.jvm', 'AspectJ', ('aspectj',), ('*.aj',), ('text/x-aspectj',)),
'AsymptoteLexer': ('pygments.lexers.graphics', 'Asymptote', ('asymptote', 'asy'), ('*.asy',), ('text/x-asymptote',)),
'AugeasLexer': ('pygments.lexers.configs', 'Augeas', ('augeas',), ('*.aug',), ()),
'AutoItLexer': ('pygments.lexers.automation', 'AutoIt', ('autoit',), ('*.au3',), ('text/x-autoit',)),
'AutohotkeyLexer': ('pygments.lexers.automation', 'autohotkey', ('autohotkey', 'ahk'), ('*.ahk', '*.ahkl'), ('text/x-autohotkey',)),
'AwkLexer': ('pygments.lexers.textedit', 'Awk', ('awk', 'gawk', 'mawk', 'nawk'), ('*.awk',), ('application/x-awk',)),
'BBCBasicLexer': ('pygments.lexers.basic', 'BBC Basic', ('bbcbasic',), ('*.bbc',), ()),
'BBCodeLexer': ('pygments.lexers.markup', 'BBCode', ('bbcode',), (), ('text/x-bbcode',)),
'BCLexer': ('pygments.lexers.algebra', 'BC', ('bc',), ('*.bc',), ()),
'BSTLexer': ('pygments.lexers.bibtex', 'BST', ('bst', 'bst-pybtex'), ('*.bst',), ()),
'BareLexer': ('pygments.lexers.bare', 'BARE', ('bare',), ('*.bare',), ()),
'BaseMakefileLexer': ('pygments.lexers.make', 'Base Makefile', ('basemake',), (), ()),
'BashLexer': ('pygments.lexers.shell', 'Bash', ('bash', 'sh', 'ksh', 'zsh', 'shell'), ('*.sh', '*.ksh', '*.bash', '*.ebuild', '*.eclass', '*.exheres-0', '*.exlib', '*.zsh', '.bashrc', 'bashrc', '.bash_*', 'bash_*', 'zshrc', '.zshrc', 'PKGBUILD'), ('application/x-sh', 'application/x-shellscript', 'text/x-shellscript')),
'BashSessionLexer': ('pygments.lexers.shell', 'Bash Session', ('console', 'shell-session'), ('*.sh-session', '*.shell-session'), ('application/x-shell-session', 'application/x-sh-session')),
'BatchLexer': ('pygments.lexers.shell', 'Batchfile', ('batch', 'bat', 'dosbatch', 'winbatch'), ('*.bat', '*.cmd'), ('application/x-dos-batch',)),
'BefungeLexer': ('pygments.lexers.esoteric', 'Befunge', ('befunge',), ('*.befunge',), ('application/x-befunge',)),
'BibTeXLexer': ('pygments.lexers.bibtex', 'BibTeX', ('bibtex', 'bib'), ('*.bib',), ('text/x-bibtex',)),
'BlitzBasicLexer': ('pygments.lexers.basic', 'BlitzBasic', ('blitzbasic', 'b3d', 'bplus'), ('*.bb', '*.decls'), ('text/x-bb',)),
'BlitzMaxLexer': ('pygments.lexers.basic', 'BlitzMax', ('blitzmax', 'bmax'), ('*.bmx',), ('text/x-bmx',)),
'BnfLexer': ('pygments.lexers.grammar_notation', 'BNF', ('bnf',), ('*.bnf',), ('text/x-bnf',)),
'BoaLexer': ('pygments.lexers.boa', 'Boa', ('boa',), ('*.boa',), ()),
'BooLexer': ('pygments.lexers.dotnet', 'Boo', ('boo',), ('*.boo',), ('text/x-boo',)),
'BoogieLexer': ('pygments.lexers.verification', 'Boogie', ('boogie',), ('*.bpl',), ()),
'BrainfuckLexer': ('pygments.lexers.esoteric', 'Brainfuck', ('brainfuck', 'bf'), ('*.bf', '*.b'), ('application/x-brainfuck',)),
'BugsLexer': ('pygments.lexers.modeling', 'BUGS', ('bugs', 'winbugs', 'openbugs'), ('*.bug',), ()),
'CAmkESLexer': ('pygments.lexers.esoteric', 'CAmkES', ('camkes', 'idl4'), ('*.camkes', '*.idl4'), ()),
'CLexer': ('pygments.lexers.c_cpp', 'C', ('c',), ('*.c', '*.h', '*.idc'), ('text/x-chdr', 'text/x-csrc')),
'CMakeLexer': ('pygments.lexers.make', 'CMake', ('cmake',), ('*.cmake', 'CMakeLists.txt'), ('text/x-cmake',)),
'CObjdumpLexer': ('pygments.lexers.asm', 'c-objdump', ('c-objdump',), ('*.c-objdump',), ('text/x-c-objdump',)),
'CPSALexer': ('pygments.lexers.lisp', 'CPSA', ('cpsa',), ('*.cpsa',), ()),
'CSharpAspxLexer': ('pygments.lexers.dotnet', 'aspx-cs', ('aspx-cs',), ('*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd'), ()),
'CSharpLexer': ('pygments.lexers.dotnet', 'C#', ('csharp', 'c#'), ('*.cs',), ('text/x-csharp',)),
'Ca65Lexer': ('pygments.lexers.asm', 'ca65 assembler', ('ca65',), ('*.s',), ()),
'CadlLexer': ('pygments.lexers.archetype', 'cADL', ('cadl',), ('*.cadl',), ()),
'CapDLLexer': ('pygments.lexers.esoteric', 'CapDL', ('capdl',), ('*.cdl',), ()),
'CapnProtoLexer': ('pygments.lexers.capnproto', "Cap'n Proto", ('capnp',), ('*.capnp',), ()),
'CbmBasicV2Lexer': ('pygments.lexers.basic', 'CBM BASIC V2', ('cbmbas',), ('*.bas',), ()),
'CddlLexer': ('pygments.lexers.cddl', 'CDDL', ('cddl',), ('*.cddl',), ('text/x-cddl',)),
'CeylonLexer': ('pygments.lexers.jvm', 'Ceylon', ('ceylon',), ('*.ceylon',), ('text/x-ceylon',)),
'Cfengine3Lexer': ('pygments.lexers.configs', 'CFEngine3', ('cfengine3', 'cf3'), ('*.cf',), ()),
'ChaiscriptLexer': ('pygments.lexers.scripting', 'ChaiScript', ('chaiscript', 'chai'), ('*.chai',), ('text/x-chaiscript', 'application/x-chaiscript')),
'ChapelLexer': ('pygments.lexers.chapel', 'Chapel', ('chapel', 'chpl'), ('*.chpl',), ()),
'CharmciLexer': ('pygments.lexers.c_like', 'Charmci', ('charmci',), ('*.ci',), ()),
'CheetahHtmlLexer': ('pygments.lexers.templates', 'HTML+Cheetah', ('html+cheetah', 'html+spitfire', 'htmlcheetah'), (), ('text/html+cheetah', 'text/html+spitfire')),
'CheetahJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Cheetah', ('javascript+cheetah', 'js+cheetah', 'javascript+spitfire', 'js+spitfire'), (), ('application/x-javascript+cheetah', 'text/x-javascript+cheetah', 'text/javascript+cheetah', 'application/x-javascript+spitfire', 'text/x-javascript+spitfire', 'text/javascript+spitfire')),
'CheetahLexer': ('pygments.lexers.templates', 'Cheetah', ('cheetah', 'spitfire'), ('*.tmpl', '*.spt'), ('application/x-cheetah', 'application/x-spitfire')),
'CheetahXmlLexer': ('pygments.lexers.templates', 'XML+Cheetah', ('xml+cheetah', 'xml+spitfire'), (), ('application/xml+cheetah', 'application/xml+spitfire')),
'CirruLexer': ('pygments.lexers.webmisc', 'Cirru', ('cirru',), ('*.cirru',), ('text/x-cirru',)),
'ClayLexer': ('pygments.lexers.c_like', 'Clay', ('clay',), ('*.clay',), ('text/x-clay',)),
'CleanLexer': ('pygments.lexers.clean', 'Clean', ('clean',), ('*.icl', '*.dcl'), ()),
'ClojureLexer': ('pygments.lexers.jvm', 'Clojure', ('clojure', 'clj'), ('*.clj',), ('text/x-clojure', 'application/x-clojure')),
'ClojureScriptLexer': ('pygments.lexers.jvm', 'ClojureScript', ('clojurescript', 'cljs'), ('*.cljs',), ('text/x-clojurescript', 'application/x-clojurescript')),
'CobolFreeformatLexer': ('pygments.lexers.business', 'COBOLFree', ('cobolfree',), ('*.cbl', '*.CBL'), ()),
'CobolLexer': ('pygments.lexers.business', 'COBOL', ('cobol',), ('*.cob', '*.COB', '*.cpy', '*.CPY'), ('text/x-cobol',)),
'CoffeeScriptLexer': ('pygments.lexers.javascript', 'CoffeeScript', ('coffeescript', 'coffee-script', 'coffee'), ('*.coffee',), ('text/coffeescript',)),
'ColdfusionCFCLexer': ('pygments.lexers.templates', 'Coldfusion CFC', ('cfc',), ('*.cfc',), ()),
'ColdfusionHtmlLexer': ('pygments.lexers.templates', 'Coldfusion HTML', ('cfm',), ('*.cfm', '*.cfml'), ('application/x-coldfusion',)),
'ColdfusionLexer': ('pygments.lexers.templates', 'cfstatement', ('cfs',), (), ()),
'CommonLispLexer': ('pygments.lexers.lisp', 'Common Lisp', ('common-lisp', 'cl', 'lisp'), ('*.cl', '*.lisp'), ('text/x-common-lisp',)),
'ComponentPascalLexer': ('pygments.lexers.oberon', 'Component Pascal', ('componentpascal', 'cp'), ('*.cp', '*.cps'), ('text/x-component-pascal',)),
'CoqLexer': ('pygments.lexers.theorem', 'Coq', ('coq',), ('*.v',), ('text/x-coq',)),
'CppLexer': ('pygments.lexers.c_cpp', 'C++', ('cpp', 'c++'), ('*.cpp', '*.hpp', '*.c++', '*.h++', '*.cc', '*.hh', '*.cxx', '*.hxx', '*.C', '*.H', '*.cp', '*.CPP'), ('text/x-c++hdr', 'text/x-c++src')),
'CppObjdumpLexer': ('pygments.lexers.asm', 'cpp-objdump', ('cpp-objdump', 'c++-objdumb', 'cxx-objdump'), ('*.cpp-objdump', '*.c++-objdump', '*.cxx-objdump'), ('text/x-cpp-objdump',)),
'CrmshLexer': ('pygments.lexers.dsls', 'Crmsh', ('crmsh', 'pcmk'), ('*.crmsh', '*.pcmk'), ()),
'CrocLexer': ('pygments.lexers.d', 'Croc', ('croc',), ('*.croc',), ('text/x-crocsrc',)),
'CryptolLexer': ('pygments.lexers.haskell', 'Cryptol', ('cryptol', 'cry'), ('*.cry',), ('text/x-cryptol',)),
'CrystalLexer': ('pygments.lexers.crystal', 'Crystal', ('cr', 'crystal'), ('*.cr',), ('text/x-crystal',)),
'CsoundDocumentLexer': ('pygments.lexers.csound', 'Csound Document', ('csound-document', 'csound-csd'), ('*.csd',), ()),
'CsoundOrchestraLexer': ('pygments.lexers.csound', 'Csound Orchestra', ('csound', 'csound-orc'), ('*.orc', '*.udo'), ()),
'CsoundScoreLexer': ('pygments.lexers.csound', 'Csound Score', ('csound-score', 'csound-sco'), ('*.sco',), ()),
'CssDjangoLexer': ('pygments.lexers.templates', 'CSS+Django/Jinja', ('css+django', 'css+jinja'), (), ('text/css+django', 'text/css+jinja')),
'CssErbLexer': ('pygments.lexers.templates', 'CSS+Ruby', ('css+ruby', 'css+erb'), (), ('text/css+ruby',)),
'CssGenshiLexer': ('pygments.lexers.templates', 'CSS+Genshi Text', ('css+genshitext', 'css+genshi'), (), ('text/css+genshi',)),
'CssLexer': ('pygments.lexers.css', 'CSS', ('css',), ('*.css',), ('text/css',)),
'CssPhpLexer': ('pygments.lexers.templates', 'CSS+PHP', ('css+php',), (), ('text/css+php',)),
'CssSmartyLexer': ('pygments.lexers.templates', 'CSS+Smarty', ('css+smarty',), (), ('text/css+smarty',)),
'CudaLexer': ('pygments.lexers.c_like', 'CUDA', ('cuda', 'cu'), ('*.cu', '*.cuh'), ('text/x-cuda',)),
'CypherLexer': ('pygments.lexers.graph', 'Cypher', ('cypher',), ('*.cyp', '*.cypher'), ()),
'CythonLexer': ('pygments.lexers.python', 'Cython', ('cython', 'pyx', 'pyrex'), ('*.pyx', '*.pxd', '*.pxi'), ('text/x-cython', 'application/x-cython')),
'DLexer': ('pygments.lexers.d', 'D', ('d',), ('*.d', '*.di'), ('text/x-dsrc',)),
'DObjdumpLexer': ('pygments.lexers.asm', 'd-objdump', ('d-objdump',), ('*.d-objdump',), ('text/x-d-objdump',)),
'DarcsPatchLexer': ('pygments.lexers.diff', 'Darcs Patch', ('dpatch',), ('*.dpatch', '*.darcspatch'), ()),
'DartLexer': ('pygments.lexers.javascript', 'Dart', ('dart',), ('*.dart',), ('text/x-dart',)),
'Dasm16Lexer': ('pygments.lexers.asm', 'DASM16', ('dasm16',), ('*.dasm16', '*.dasm'), ('text/x-dasm16',)),
'DebianControlLexer': ('pygments.lexers.installers', 'Debian Control file', ('debcontrol', 'control'), ('control',), ()),
'DelphiLexer': ('pygments.lexers.pascal', 'Delphi', ('delphi', 'pas', 'pascal', 'objectpascal'), ('*.pas', '*.dpr'), ('text/x-pascal',)),
'DevicetreeLexer': ('pygments.lexers.devicetree', 'Devicetree', ('devicetree', 'dts'), ('*.dts', '*.dtsi'), ('text/x-c',)),
'DgLexer': ('pygments.lexers.python', 'dg', ('dg',), ('*.dg',), ('text/x-dg',)),
'DiffLexer': ('pygments.lexers.diff', 'Diff', ('diff', 'udiff'), ('*.diff', '*.patch'), ('text/x-diff', 'text/x-patch')),
'DjangoLexer': ('pygments.lexers.templates', 'Django/Jinja', ('django', 'jinja'), (), ('application/x-django-templating', 'application/x-jinja')),
'DockerLexer': ('pygments.lexers.configs', 'Docker', ('docker', 'dockerfile'), ('Dockerfile', '*.docker'), ('text/x-dockerfile-config',)),
'DtdLexer': ('pygments.lexers.html', 'DTD', ('dtd',), ('*.dtd',), ('application/xml-dtd',)),
'DuelLexer': ('pygments.lexers.webmisc', 'Duel', ('duel', 'jbst', 'jsonml+bst'), ('*.duel', '*.jbst'), ('text/x-duel', 'text/x-jbst')),
'DylanConsoleLexer': ('pygments.lexers.dylan', 'Dylan session', ('dylan-console', 'dylan-repl'), ('*.dylan-console',), ('text/x-dylan-console',)),
'DylanLexer': ('pygments.lexers.dylan', 'Dylan', ('dylan',), ('*.dylan', '*.dyl', '*.intr'), ('text/x-dylan',)),
'DylanLidLexer': ('pygments.lexers.dylan', 'DylanLID', ('dylan-lid', 'lid'), ('*.lid', '*.hdp'), ('text/x-dylan-lid',)),
'ECLLexer': ('pygments.lexers.ecl', 'ECL', ('ecl',), ('*.ecl',), ('application/x-ecl',)),
'ECLexer': ('pygments.lexers.c_like', 'eC', ('ec',), ('*.ec', '*.eh'), ('text/x-echdr', 'text/x-ecsrc')),
'EarlGreyLexer': ('pygments.lexers.javascript', 'Earl Grey', ('earl-grey', 'earlgrey', 'eg'), ('*.eg',), ('text/x-earl-grey',)),
'EasytrieveLexer': ('pygments.lexers.scripting', 'Easytrieve', ('easytrieve',), ('*.ezt', '*.mac'), ('text/x-easytrieve',)),
'EbnfLexer': ('pygments.lexers.parsers', 'EBNF', ('ebnf',), ('*.ebnf',), ('text/x-ebnf',)),
'EiffelLexer': ('pygments.lexers.eiffel', 'Eiffel', ('eiffel',), ('*.e',), ('text/x-eiffel',)),
'ElixirConsoleLexer': ('pygments.lexers.erlang', 'Elixir iex session', ('iex',), (), ('text/x-elixir-shellsession',)),
'ElixirLexer': ('pygments.lexers.erlang', 'Elixir', ('elixir', 'ex', 'exs'), ('*.ex', '*.eex', '*.exs', '*.leex'), ('text/x-elixir',)),
'ElmLexer': ('pygments.lexers.elm', 'Elm', ('elm',), ('*.elm',), ('text/x-elm',)),
'EmacsLispLexer': ('pygments.lexers.lisp', 'EmacsLisp', ('emacs-lisp', 'elisp', 'emacs'), ('*.el',), ('text/x-elisp', 'application/x-elisp')),
'EmailLexer': ('pygments.lexers.email', 'E-mail', ('email', 'eml'), ('*.eml',), ('message/rfc822',)),
'ErbLexer': ('pygments.lexers.templates', 'ERB', ('erb',), (), ('application/x-ruby-templating',)),
'ErlangLexer': ('pygments.lexers.erlang', 'Erlang', ('erlang',), ('*.erl', '*.hrl', '*.es', '*.escript'), ('text/x-erlang',)),
'ErlangShellLexer': ('pygments.lexers.erlang', 'Erlang erl session', ('erl',), ('*.erl-sh',), ('text/x-erl-shellsession',)),
'EvoqueHtmlLexer': ('pygments.lexers.templates', 'HTML+Evoque', ('html+evoque',), ('*.html',), ('text/html+evoque',)),
'EvoqueLexer': ('pygments.lexers.templates', 'Evoque', ('evoque',), ('*.evoque',), ('application/x-evoque',)),
'EvoqueXmlLexer': ('pygments.lexers.templates', 'XML+Evoque', ('xml+evoque',), ('*.xml',), ('application/xml+evoque',)),
'ExeclineLexer': ('pygments.lexers.shell', 'execline', ('execline',), ('*.exec',), ()),
'EzhilLexer': ('pygments.lexers.ezhil', 'Ezhil', ('ezhil',), ('*.n',), ('text/x-ezhil',)),
'FSharpLexer': ('pygments.lexers.dotnet', 'F#', ('fsharp', 'f#'), ('*.fs', '*.fsi'), ('text/x-fsharp',)),
'FStarLexer': ('pygments.lexers.ml', 'FStar', ('fstar',), ('*.fst', '*.fsti'), ('text/x-fstar',)),
'FactorLexer': ('pygments.lexers.factor', 'Factor', ('factor',), ('*.factor',), ('text/x-factor',)),
'FancyLexer': ('pygments.lexers.ruby', 'Fancy', ('fancy', 'fy'), ('*.fy', '*.fancypack'), ('text/x-fancysrc',)),
'FantomLexer': ('pygments.lexers.fantom', 'Fantom', ('fan',), ('*.fan',), ('application/x-fantom',)),
'FelixLexer': ('pygments.lexers.felix', 'Felix', ('felix', 'flx'), ('*.flx', '*.flxh'), ('text/x-felix',)),
'FennelLexer': ('pygments.lexers.lisp', 'Fennel', ('fennel', 'fnl'), ('*.fnl',), ()),
'FishShellLexer': ('pygments.lexers.shell', 'Fish', ('fish', 'fishshell'), ('*.fish', '*.load'), ('application/x-fish',)),
'FlatlineLexer': ('pygments.lexers.dsls', 'Flatline', ('flatline',), (), ('text/x-flatline',)),
'FloScriptLexer': ('pygments.lexers.floscript', 'FloScript', ('floscript', 'flo'), ('*.flo',), ()),
'ForthLexer': ('pygments.lexers.forth', 'Forth', ('forth',), ('*.frt', '*.fs'), ('application/x-forth',)),
'FortranFixedLexer': ('pygments.lexers.fortran', 'FortranFixed', ('fortranfixed',), ('*.f', '*.F'), ()),
'FortranLexer': ('pygments.lexers.fortran', 'Fortran', ('fortran',), ('*.f03', '*.f90', '*.F03', '*.F90'), ('text/x-fortran',)),
'FoxProLexer': ('pygments.lexers.foxpro', 'FoxPro', ('foxpro', 'vfp', 'clipper', 'xbase'), ('*.PRG', '*.prg'), ()),
'FreeFemLexer': ('pygments.lexers.freefem', 'Freefem', ('freefem',), ('*.edp',), ('text/x-freefem',)),
'FutharkLexer': ('pygments.lexers.futhark', 'Futhark', ('futhark',), ('*.fut',), ('text/x-futhark',)),
'GAPLexer': ('pygments.lexers.algebra', 'GAP', ('gap',), ('*.g', '*.gd', '*.gi', '*.gap'), ()),
'GDScriptLexer': ('pygments.lexers.gdscript', 'GDScript', ('gdscript', 'gd'), ('*.gd',), ('text/x-gdscript', 'application/x-gdscript')),
'GLShaderLexer': ('pygments.lexers.graphics', 'GLSL', ('glsl',), ('*.vert', '*.frag', '*.geo'), ('text/x-glslsrc',)),
'GasLexer': ('pygments.lexers.asm', 'GAS', ('gas', 'asm'), ('*.s', '*.S'), ('text/x-gas',)),
'GcodeLexer': ('pygments.lexers.gcodelexer', 'g-code', ('gcode',), ('*.gcode',), ()),
'GenshiLexer': ('pygments.lexers.templates', 'Genshi', ('genshi', 'kid', 'xml+genshi', 'xml+kid'), ('*.kid',), ('application/x-genshi', 'application/x-kid')),
'GenshiTextLexer': ('pygments.lexers.templates', 'Genshi Text', ('genshitext',), (), ('application/x-genshi-text', 'text/x-genshi')),
'GettextLexer': ('pygments.lexers.textfmts', 'Gettext Catalog', ('pot', 'po'), ('*.pot', '*.po'), ('application/x-gettext', 'text/x-gettext', 'text/gettext')),
'GherkinLexer': ('pygments.lexers.testing', 'Gherkin', ('gherkin', 'cucumber'), ('*.feature',), ('text/x-gherkin',)),
'GnuplotLexer': ('pygments.lexers.graphics', 'Gnuplot', ('gnuplot',), ('*.plot', '*.plt'), ('text/x-gnuplot',)),
'GoLexer': ('pygments.lexers.go', 'Go', ('go',), ('*.go',), ('text/x-gosrc',)),
'GoloLexer': ('pygments.lexers.jvm', 'Golo', ('golo',), ('*.golo',), ()),
'GoodDataCLLexer': ('pygments.lexers.business', 'GoodData-CL', ('gooddata-cl',), ('*.gdc',), ('text/x-gooddata-cl',)),
'GosuLexer': ('pygments.lexers.jvm', 'Gosu', ('gosu',), ('*.gs', '*.gsx', '*.gsp', '*.vark'), ('text/x-gosu',)),
'GosuTemplateLexer': ('pygments.lexers.jvm', 'Gosu Template', ('gst',), ('*.gst',), ('text/x-gosu-template',)),
'GraphvizLexer': ('pygments.lexers.graphviz', 'Graphviz', ('graphviz', 'dot'), ('*.gv', '*.dot'), ('text/x-graphviz', 'text/vnd.graphviz')),
'GroffLexer': ('pygments.lexers.markup', 'Groff', ('groff', 'nroff', 'man'), ('*.[1234567]', '*.man'), ('application/x-troff', 'text/troff')),
'GroovyLexer': ('pygments.lexers.jvm', 'Groovy', ('groovy',), ('*.groovy', '*.gradle'), ('text/x-groovy',)),
'HLSLShaderLexer': ('pygments.lexers.graphics', 'HLSL', ('hlsl',), ('*.hlsl', '*.hlsli'), ('text/x-hlsl',)),
'HamlLexer': ('pygments.lexers.html', 'Haml', ('haml',), ('*.haml',), ('text/x-haml',)),
'HandlebarsHtmlLexer': ('pygments.lexers.templates', 'HTML+Handlebars', ('html+handlebars',), ('*.handlebars', '*.hbs'), ('text/html+handlebars', 'text/x-handlebars-template')),
'HandlebarsLexer': ('pygments.lexers.templates', 'Handlebars', ('handlebars',), (), ()),
'HaskellLexer': ('pygments.lexers.haskell', 'Haskell', ('haskell', 'hs'), ('*.hs',), ('text/x-haskell',)),
'HaxeLexer': ('pygments.lexers.haxe', 'Haxe', ('haxe', 'hxsl', 'hx'), ('*.hx', '*.hxsl'), ('text/haxe', 'text/x-haxe', 'text/x-hx')),
'HexdumpLexer': ('pygments.lexers.hexdump', 'Hexdump', ('hexdump',), (), ()),
'HsailLexer': ('pygments.lexers.asm', 'HSAIL', ('hsail', 'hsa'), ('*.hsail',), ('text/x-hsail',)),
'HspecLexer': ('pygments.lexers.haskell', 'Hspec', ('hspec',), (), ()),
'HtmlDjangoLexer': ('pygments.lexers.templates', 'HTML+Django/Jinja', ('html+django', 'html+jinja', 'htmldjango'), (), ('text/html+django', 'text/html+jinja')),
'HtmlGenshiLexer': ('pygments.lexers.templates', 'HTML+Genshi', ('html+genshi', 'html+kid'), (), ('text/html+genshi',)),
'HtmlLexer': ('pygments.lexers.html', 'HTML', ('html',), ('*.html', '*.htm', '*.xhtml', '*.xslt'), ('text/html', 'application/xhtml+xml')),
'HtmlPhpLexer': ('pygments.lexers.templates', 'HTML+PHP', ('html+php',), ('*.phtml',), ('application/x-php', 'application/x-httpd-php', 'application/x-httpd-php3', 'application/x-httpd-php4', 'application/x-httpd-php5')),
'HtmlSmartyLexer': ('pygments.lexers.templates', 'HTML+Smarty', ('html+smarty',), (), ('text/html+smarty',)),
'HttpLexer': ('pygments.lexers.textfmts', 'HTTP', ('http',), (), ()),
'HxmlLexer': ('pygments.lexers.haxe', 'Hxml', ('haxeml', 'hxml'), ('*.hxml',), ()),
'HyLexer': ('pygments.lexers.lisp', 'Hy', ('hylang',), ('*.hy',), ('text/x-hy', 'application/x-hy')),
'HybrisLexer': ('pygments.lexers.scripting', 'Hybris', ('hybris', 'hy'), ('*.hy', '*.hyb'), ('text/x-hybris', 'application/x-hybris')),
'IDLLexer': ('pygments.lexers.idl', 'IDL', ('idl',), ('*.pro',), ('text/idl',)),
'IconLexer': ('pygments.lexers.unicon', 'Icon', ('icon',), ('*.icon', '*.ICON'), ()),
'IdrisLexer': ('pygments.lexers.haskell', 'Idris', ('idris', 'idr'), ('*.idr',), ('text/x-idris',)),
'IgorLexer': ('pygments.lexers.igor', 'Igor', ('igor', 'igorpro'), ('*.ipf',), ('text/ipf',)),
'Inform6Lexer': ('pygments.lexers.int_fiction', 'Inform 6', ('inform6', 'i6'), ('*.inf',), ()),
'Inform6TemplateLexer': ('pygments.lexers.int_fiction', 'Inform 6 template', ('i6t',), ('*.i6t',), ()),
'Inform7Lexer': ('pygments.lexers.int_fiction', 'Inform 7', ('inform7', 'i7'), ('*.ni', '*.i7x'), ()),
'IniLexer': ('pygments.lexers.configs', 'INI', ('ini', 'cfg', 'dosini'), ('*.ini', '*.cfg', '*.inf'), ('text/x-ini', 'text/inf')),
'IoLexer': ('pygments.lexers.iolang', 'Io', ('io',), ('*.io',), ('text/x-iosrc',)),
'IokeLexer': ('pygments.lexers.jvm', 'Ioke', ('ioke', 'ik'), ('*.ik',), ('text/x-iokesrc',)),
'IrcLogsLexer': ('pygments.lexers.textfmts', 'IRC logs', ('irc',), ('*.weechatlog',), ('text/x-irclog',)),
'IsabelleLexer': ('pygments.lexers.theorem', 'Isabelle', ('isabelle',), ('*.thy',), ('text/x-isabelle',)),
'JLexer': ('pygments.lexers.j', 'J', ('j',), ('*.ijs',), ('text/x-j',)),
'JagsLexer': ('pygments.lexers.modeling', 'JAGS', ('jags',), ('*.jag', '*.bug'), ()),
'JasminLexer': ('pygments.lexers.jvm', 'Jasmin', ('jasmin', 'jasminxt'), ('*.j',), ()),
'JavaLexer': ('pygments.lexers.jvm', 'Java', ('java',), ('*.java',), ('text/x-java',)),
'JavascriptDjangoLexer': ('pygments.lexers.templates', 'JavaScript+Django/Jinja', ('javascript+django', 'js+django', 'javascript+jinja', 'js+jinja'), (), ('application/x-javascript+django', 'application/x-javascript+jinja', 'text/x-javascript+django', 'text/x-javascript+jinja', 'text/javascript+django', 'text/javascript+jinja')),
'JavascriptErbLexer': ('pygments.lexers.templates', 'JavaScript+Ruby', ('javascript+ruby', 'js+ruby', 'javascript+erb', 'js+erb'), (), ('application/x-javascript+ruby', 'text/x-javascript+ruby', 'text/javascript+ruby')),
'JavascriptGenshiLexer': ('pygments.lexers.templates', 'JavaScript+Genshi Text', ('js+genshitext', 'js+genshi', 'javascript+genshitext', 'javascript+genshi'), (), ('application/x-javascript+genshi', 'text/x-javascript+genshi', 'text/javascript+genshi')),
'JavascriptLexer': ('pygments.lexers.javascript', 'JavaScript', ('javascript', 'js'), ('*.js', '*.jsm', '*.mjs', '*.cjs'), ('application/javascript', 'application/x-javascript', 'text/x-javascript', 'text/javascript')),
'JavascriptPhpLexer': ('pygments.lexers.templates', 'JavaScript+PHP', ('javascript+php', 'js+php'), (), ('application/x-javascript+php', 'text/x-javascript+php', 'text/javascript+php')),
'JavascriptSmartyLexer': ('pygments.lexers.templates', 'JavaScript+Smarty', ('javascript+smarty', 'js+smarty'), (), ('application/x-javascript+smarty', 'text/x-javascript+smarty', 'text/javascript+smarty')),
'JclLexer': ('pygments.lexers.scripting', 'JCL', ('jcl',), ('*.jcl',), ('text/x-jcl',)),
'JsgfLexer': ('pygments.lexers.grammar_notation', 'JSGF', ('jsgf',), ('*.jsgf',), ('application/jsgf', 'application/x-jsgf', 'text/jsgf')),
'JsonBareObjectLexer': ('pygments.lexers.data', 'JSONBareObject', (), (), ()),
'JsonLdLexer': ('pygments.lexers.data', 'JSON-LD', ('jsonld', 'json-ld'), ('*.jsonld',), ('application/ld+json',)),
'JsonLexer': ('pygments.lexers.data', 'JSON', ('json', 'json-object'), ('*.json', 'Pipfile.lock'), ('application/json', 'application/json-object')),
'JspLexer': ('pygments.lexers.templates', 'Java Server Page', ('jsp',), ('*.jsp',), ('application/x-jsp',)),
'JuliaConsoleLexer': ('pygments.lexers.julia', 'Julia console', ('jlcon', 'julia-repl'), (), ()),
'JuliaLexer': ('pygments.lexers.julia', 'Julia', ('julia', 'jl'), ('*.jl',), ('text/x-julia', 'application/x-julia')),
'JuttleLexer': ('pygments.lexers.javascript', 'Juttle', ('juttle',), ('*.juttle',), ('application/juttle', 'application/x-juttle', 'text/x-juttle', 'text/juttle')),
'KalLexer': ('pygments.lexers.javascript', 'Kal', ('kal',), ('*.kal',), ('text/kal', 'application/kal')),
'KconfigLexer': ('pygments.lexers.configs', 'Kconfig', ('kconfig', 'menuconfig', 'linux-config', 'kernel-config'), ('Kconfig*', '*Config.in*', 'external.in*', 'standard-modules.in'), ('text/x-kconfig',)),
'KernelLogLexer': ('pygments.lexers.textfmts', 'Kernel log', ('kmsg', 'dmesg'), ('*.kmsg', '*.dmesg'), ()),
'KokaLexer': ('pygments.lexers.haskell', 'Koka', ('koka',), ('*.kk', '*.kki'), ('text/x-koka',)),
'KotlinLexer': ('pygments.lexers.jvm', 'Kotlin', ('kotlin',), ('*.kt', '*.kts'), ('text/x-kotlin',)),
'KuinLexer': ('pygments.lexers.kuin', 'Kuin', ('kuin',), ('*.kn',), ()),
'LSLLexer': ('pygments.lexers.scripting', 'LSL', ('lsl',), ('*.lsl',), ('text/x-lsl',)),
'LassoCssLexer': ('pygments.lexers.templates', 'CSS+Lasso', ('css+lasso',), (), ('text/css+lasso',)),
'LassoHtmlLexer': ('pygments.lexers.templates', 'HTML+Lasso', ('html+lasso',), (), ('text/html+lasso', 'application/x-httpd-lasso', 'application/x-httpd-lasso[89]')),
'LassoJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Lasso', ('javascript+lasso', 'js+lasso'), (), ('application/x-javascript+lasso', 'text/x-javascript+lasso', 'text/javascript+lasso')),
'LassoLexer': ('pygments.lexers.javascript', 'Lasso', ('lasso', 'lassoscript'), ('*.lasso', '*.lasso[89]'), ('text/x-lasso',)),
'LassoXmlLexer': ('pygments.lexers.templates', 'XML+Lasso', ('xml+lasso',), (), ('application/xml+lasso',)),
'LeanLexer': ('pygments.lexers.theorem', 'Lean', ('lean',), ('*.lean',), ('text/x-lean',)),
'LessCssLexer': ('pygments.lexers.css', 'LessCss', ('less',), ('*.less',), ('text/x-less-css',)),
'LighttpdConfLexer': ('pygments.lexers.configs', 'Lighttpd configuration file', ('lighttpd', 'lighty'), ('lighttpd.conf',), ('text/x-lighttpd-conf',)),
'LimboLexer': ('pygments.lexers.inferno', 'Limbo', ('limbo',), ('*.b',), ('text/limbo',)),
'LiquidLexer': ('pygments.lexers.templates', 'liquid', ('liquid',), ('*.liquid',), ()),
'LiterateAgdaLexer': ('pygments.lexers.haskell', 'Literate Agda', ('literate-agda', 'lagda'), ('*.lagda',), ('text/x-literate-agda',)),
'LiterateCryptolLexer': ('pygments.lexers.haskell', 'Literate Cryptol', ('literate-cryptol', 'lcryptol', 'lcry'), ('*.lcry',), ('text/x-literate-cryptol',)),
'LiterateHaskellLexer': ('pygments.lexers.haskell', 'Literate Haskell', ('literate-haskell', 'lhaskell', 'lhs'), ('*.lhs',), ('text/x-literate-haskell',)),
'LiterateIdrisLexer': ('pygments.lexers.haskell', 'Literate Idris', ('literate-idris', 'lidris', 'lidr'), ('*.lidr',), ('text/x-literate-idris',)),
'LiveScriptLexer': ('pygments.lexers.javascript', 'LiveScript', ('livescript', 'live-script'), ('*.ls',), ('text/livescript',)),
'LlvmLexer': ('pygments.lexers.asm', 'LLVM', ('llvm',), ('*.ll',), ('text/x-llvm',)),
'LlvmMirBodyLexer': ('pygments.lexers.asm', 'LLVM-MIR Body', ('llvm-mir-body',), (), ()),
'LlvmMirLexer': ('pygments.lexers.asm', 'LLVM-MIR', ('llvm-mir',), ('*.mir',), ()),
'LogosLexer': ('pygments.lexers.objective', 'Logos', ('logos',), ('*.x', '*.xi', '*.xm', '*.xmi'), ('text/x-logos',)),
'LogtalkLexer': ('pygments.lexers.prolog', 'Logtalk', ('logtalk',), ('*.lgt', '*.logtalk'), ('text/x-logtalk',)),
'LuaLexer': ('pygments.lexers.scripting', 'Lua', ('lua',), ('*.lua', '*.wlua'), ('text/x-lua', 'application/x-lua')),
'MIMELexer': ('pygments.lexers.mime', 'MIME', ('mime',), (), ('multipart/mixed', 'multipart/related', 'multipart/alternative')),
'MOOCodeLexer': ('pygments.lexers.scripting', 'MOOCode', ('moocode', 'moo'), ('*.moo',), ('text/x-moocode',)),
'MSDOSSessionLexer': ('pygments.lexers.shell', 'MSDOS Session', ('doscon',), (), ()),
'MakefileLexer': ('pygments.lexers.make', 'Makefile', ('make', 'makefile', 'mf', 'bsdmake'), ('*.mak', '*.mk', 'Makefile', 'makefile', 'Makefile.*', 'GNUmakefile'), ('text/x-makefile',)),
'MakoCssLexer': ('pygments.lexers.templates', 'CSS+Mako', ('css+mako',), (), ('text/css+mako',)),
'MakoHtmlLexer': ('pygments.lexers.templates', 'HTML+Mako', ('html+mako',), (), ('text/html+mako',)),
'MakoJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Mako', ('javascript+mako', 'js+mako'), (), ('application/x-javascript+mako', 'text/x-javascript+mako', 'text/javascript+mako')),
'MakoLexer': ('pygments.lexers.templates', 'Mako', ('mako',), ('*.mao',), ('application/x-mako',)),
'MakoXmlLexer': ('pygments.lexers.templates', 'XML+Mako', ('xml+mako',), (), ('application/xml+mako',)),
'MaqlLexer': ('pygments.lexers.business', 'MAQL', ('maql',), ('*.maql',), ('text/x-gooddata-maql', 'application/x-gooddata-maql')),
'MarkdownLexer': ('pygments.lexers.markup', 'Markdown', ('markdown', 'md'), ('*.md', '*.markdown'), ('text/x-markdown',)),
'MaskLexer': ('pygments.lexers.javascript', 'Mask', ('mask',), ('*.mask',), ('text/x-mask',)),
'MasonLexer': ('pygments.lexers.templates', 'Mason', ('mason',), ('*.m', '*.mhtml', '*.mc', '*.mi', 'autohandler', 'dhandler'), ('application/x-mason',)),
'MathematicaLexer': ('pygments.lexers.algebra', 'Mathematica', ('mathematica', 'mma', 'nb'), ('*.nb', '*.cdf', '*.nbp', '*.ma'), ('application/mathematica', 'application/vnd.wolfram.mathematica', 'application/vnd.wolfram.mathematica.package', 'application/vnd.wolfram.cdf')),
'MatlabLexer': ('pygments.lexers.matlab', 'Matlab', ('matlab',), ('*.m',), ('text/matlab',)),
'MatlabSessionLexer': ('pygments.lexers.matlab', 'Matlab session', ('matlabsession',), (), ()),
'MiniDLexer': ('pygments.lexers.d', 'MiniD', ('minid',), (), ('text/x-minidsrc',)),
'MiniScriptLexer': ('pygments.lexers.scripting', 'MiniScript', ('miniscript', 'ms'), ('*.ms',), ('text/x-minicript', 'application/x-miniscript')),
'ModelicaLexer': ('pygments.lexers.modeling', 'Modelica', ('modelica',), ('*.mo',), ('text/x-modelica',)),
'Modula2Lexer': ('pygments.lexers.modula2', 'Modula-2', ('modula2', 'm2'), ('*.def', '*.mod'), ('text/x-modula2',)),
'MoinWikiLexer': ('pygments.lexers.markup', 'MoinMoin/Trac Wiki markup', ('trac-wiki', 'moin'), (), ('text/x-trac-wiki',)),
'MonkeyLexer': ('pygments.lexers.basic', 'Monkey', ('monkey',), ('*.monkey',), ('text/x-monkey',)),
'MonteLexer': ('pygments.lexers.monte', 'Monte', ('monte',), ('*.mt',), ()),
'MoonScriptLexer': ('pygments.lexers.scripting', 'MoonScript', ('moonscript', 'moon'), ('*.moon',), ('text/x-moonscript', 'application/x-moonscript')),
'MoselLexer': ('pygments.lexers.mosel', 'Mosel', ('mosel',), ('*.mos',), ()),
'MozPreprocCssLexer': ('pygments.lexers.markup', 'CSS+mozpreproc', ('css+mozpreproc',), ('*.css.in',), ()),
'MozPreprocHashLexer': ('pygments.lexers.markup', 'mozhashpreproc', ('mozhashpreproc',), (), ()),
'MozPreprocJavascriptLexer': ('pygments.lexers.markup', 'Javascript+mozpreproc', ('javascript+mozpreproc',), ('*.js.in',), ()),
'MozPreprocPercentLexer': ('pygments.lexers.markup', 'mozpercentpreproc', ('mozpercentpreproc',), (), ()),
'MozPreprocXulLexer': ('pygments.lexers.markup', 'XUL+mozpreproc', ('xul+mozpreproc',), ('*.xul.in',), ()),
'MqlLexer': ('pygments.lexers.c_like', 'MQL', ('mql', 'mq4', 'mq5', 'mql4', 'mql5'), ('*.mq4', '*.mq5', '*.mqh'), ('text/x-mql',)),
'MscgenLexer': ('pygments.lexers.dsls', 'Mscgen', ('mscgen', 'msc'), ('*.msc',), ()),
'MuPADLexer': ('pygments.lexers.algebra', 'MuPAD', ('mupad',), ('*.mu',), ()),
'MxmlLexer': ('pygments.lexers.actionscript', 'MXML', ('mxml',), ('*.mxml',), ()),
'MySqlLexer': ('pygments.lexers.sql', 'MySQL', ('mysql',), (), ('text/x-mysql',)),
'MyghtyCssLexer': ('pygments.lexers.templates', 'CSS+Myghty', ('css+myghty',), (), ('text/css+myghty',)),
'MyghtyHtmlLexer': ('pygments.lexers.templates', 'HTML+Myghty', ('html+myghty',), (), ('text/html+myghty',)),
'MyghtyJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Myghty', ('javascript+myghty', 'js+myghty'), (), ('application/x-javascript+myghty', 'text/x-javascript+myghty', 'text/javascript+mygthy')),
'MyghtyLexer': ('pygments.lexers.templates', 'Myghty', ('myghty',), ('*.myt', 'autodelegate'), ('application/x-myghty',)),
'MyghtyXmlLexer': ('pygments.lexers.templates', 'XML+Myghty', ('xml+myghty',), (), ('application/xml+myghty',)),
'NCLLexer': ('pygments.lexers.ncl', 'NCL', ('ncl',), ('*.ncl',), ('text/ncl',)),
'NSISLexer': ('pygments.lexers.installers', 'NSIS', ('nsis', 'nsi', 'nsh'), ('*.nsi', '*.nsh'), ('text/x-nsis',)),
'NasmLexer': ('pygments.lexers.asm', 'NASM', ('nasm',), ('*.asm', '*.ASM'), ('text/x-nasm',)),
'NasmObjdumpLexer': ('pygments.lexers.asm', 'objdump-nasm', ('objdump-nasm',), ('*.objdump-intel',), ('text/x-nasm-objdump',)),
'NemerleLexer': ('pygments.lexers.dotnet', 'Nemerle', ('nemerle',), ('*.n',), ('text/x-nemerle',)),
'NesCLexer': ('pygments.lexers.c_like', 'nesC', ('nesc',), ('*.nc',), ('text/x-nescsrc',)),
'NestedTextLexer': ('pygments.lexers.configs', 'NestedText', ('nestedtext', 'nt'), ('*.nt',), ()),
'NewLispLexer': ('pygments.lexers.lisp', 'NewLisp', ('newlisp',), ('*.lsp', '*.nl', '*.kif'), ('text/x-newlisp', 'application/x-newlisp')),
'NewspeakLexer': ('pygments.lexers.smalltalk', 'Newspeak', ('newspeak',), ('*.ns2',), ('text/x-newspeak',)),
'NginxConfLexer': ('pygments.lexers.configs', 'Nginx configuration file', ('nginx',), ('nginx.conf',), ('text/x-nginx-conf',)),
'NimrodLexer': ('pygments.lexers.nimrod', 'Nimrod', ('nimrod', 'nim'), ('*.nim', '*.nimrod'), ('text/x-nim',)),
'NitLexer': ('pygments.lexers.nit', 'Nit', ('nit',), ('*.nit',), ()),
'NixLexer': ('pygments.lexers.nix', 'Nix', ('nixos', 'nix'), ('*.nix',), ('text/x-nix',)),
'NotmuchLexer': ('pygments.lexers.textfmts', 'Notmuch', ('notmuch',), (), ()),
'NuSMVLexer': ('pygments.lexers.smv', 'NuSMV', ('nusmv',), ('*.smv',), ()),
'NumPyLexer': ('pygments.lexers.python', 'NumPy', ('numpy',), (), ()),
'ObjdumpLexer': ('pygments.lexers.asm', 'objdump', ('objdump',), ('*.objdump',), ('text/x-objdump',)),
'ObjectiveCLexer': ('pygments.lexers.objective', 'Objective-C', ('objective-c', 'objectivec', 'obj-c', 'objc'), ('*.m', '*.h'), ('text/x-objective-c',)),
'ObjectiveCppLexer': ('pygments.lexers.objective', 'Objective-C++', ('objective-c++', 'objectivec++', 'obj-c++', 'objc++'), ('*.mm', '*.hh'), ('text/x-objective-c++',)),
'ObjectiveJLexer': ('pygments.lexers.javascript', 'Objective-J', ('objective-j', 'objectivej', 'obj-j', 'objj'), ('*.j',), ('text/x-objective-j',)),
'OcamlLexer': ('pygments.lexers.ml', 'OCaml', ('ocaml',), ('*.ml', '*.mli', '*.mll', '*.mly'), ('text/x-ocaml',)),
'OctaveLexer': ('pygments.lexers.matlab', 'Octave', ('octave',), ('*.m',), ('text/octave',)),
'OdinLexer': ('pygments.lexers.archetype', 'ODIN', ('odin',), ('*.odin',), ('text/odin',)),
'OmgIdlLexer': ('pygments.lexers.c_like', 'OMG Interface Definition Language', ('omg-idl',), ('*.idl', '*.pidl'), ()),
'OocLexer': ('pygments.lexers.ooc', 'Ooc', ('ooc',), ('*.ooc',), ('text/x-ooc',)),
'OpaLexer': ('pygments.lexers.ml', 'Opa', ('opa',), ('*.opa',), ('text/x-opa',)),
'OpenEdgeLexer': ('pygments.lexers.business', 'OpenEdge ABL', ('openedge', 'abl', 'progress'), ('*.p', '*.cls'), ('text/x-openedge', 'application/x-openedge')),
'PacmanConfLexer': ('pygments.lexers.configs', 'PacmanConf', ('pacmanconf',), ('pacman.conf',), ()),
'PanLexer': ('pygments.lexers.dsls', 'Pan', ('pan',), ('*.pan',), ()),
'ParaSailLexer': ('pygments.lexers.parasail', 'ParaSail', ('parasail',), ('*.psi', '*.psl'), ('text/x-parasail',)),
'PawnLexer': ('pygments.lexers.pawn', 'Pawn', ('pawn',), ('*.p', '*.pwn', '*.inc'), ('text/x-pawn',)),
'PegLexer': ('pygments.lexers.grammar_notation', 'PEG', ('peg',), ('*.peg',), ('text/x-peg',)),
'Perl6Lexer': ('pygments.lexers.perl', 'Perl6', ('perl6', 'pl6', 'raku'), ('*.pl', '*.pm', '*.nqp', '*.p6', '*.6pl', '*.p6l', '*.pl6', '*.6pm', '*.p6m', '*.pm6', '*.t', '*.raku', '*.rakumod', '*.rakutest', '*.rakudoc'), ('text/x-perl6', 'application/x-perl6')),
'PerlLexer': ('pygments.lexers.perl', 'Perl', ('perl', 'pl'), ('*.pl', '*.pm', '*.t', '*.perl'), ('text/x-perl', 'application/x-perl')),
'PhpLexer': ('pygments.lexers.php', 'PHP', ('php', 'php3', 'php4', 'php5'), ('*.php', '*.php[345]', '*.inc'), ('text/x-php',)),
'PigLexer': ('pygments.lexers.jvm', 'Pig', ('pig',), ('*.pig',), ('text/x-pig',)),
'PikeLexer': ('pygments.lexers.c_like', 'Pike', ('pike',), ('*.pike', '*.pmod'), ('text/x-pike',)),
'PkgConfigLexer': ('pygments.lexers.configs', 'PkgConfig', ('pkgconfig',), ('*.pc',), ()),
'PlPgsqlLexer': ('pygments.lexers.sql', 'PL/pgSQL', ('plpgsql',), (), ('text/x-plpgsql',)),
'PointlessLexer': ('pygments.lexers.pointless', 'Pointless', ('pointless',), ('*.ptls',), ()),
'PonyLexer': ('pygments.lexers.pony', 'Pony', ('pony',), ('*.pony',), ()),
'PostScriptLexer': ('pygments.lexers.graphics', 'PostScript', ('postscript', 'postscr'), ('*.ps', '*.eps'), ('application/postscript',)),
'PostgresConsoleLexer': ('pygments.lexers.sql', 'PostgreSQL console (psql)', ('psql', 'postgresql-console', 'postgres-console'), (), ('text/x-postgresql-psql',)),
'PostgresLexer': ('pygments.lexers.sql', 'PostgreSQL SQL dialect', ('postgresql', 'postgres'), (), ('text/x-postgresql',)),
'PovrayLexer': ('pygments.lexers.graphics', 'POVRay', ('pov',), ('*.pov', '*.inc'), ('text/x-povray',)),
'PowerShellLexer': ('pygments.lexers.shell', 'PowerShell', ('powershell', 'posh', 'ps1', 'psm1'), ('*.ps1', '*.psm1'), ('text/x-powershell',)),
'PowerShellSessionLexer': ('pygments.lexers.shell', 'PowerShell Session', ('ps1con',), (), ()),
'PraatLexer': ('pygments.lexers.praat', 'Praat', ('praat',), ('*.praat', '*.proc', '*.psc'), ()),
'PrologLexer': ('pygments.lexers.prolog', 'Prolog', ('prolog',), ('*.ecl', '*.prolog', '*.pro', '*.pl'), ('text/x-prolog',)),
'PromQLLexer': ('pygments.lexers.promql', 'PromQL', ('promql',), ('*.promql',), ()),
'PropertiesLexer': ('pygments.lexers.configs', 'Properties', ('properties', 'jproperties'), ('*.properties',), ('text/x-java-properties',)),
'ProtoBufLexer': ('pygments.lexers.dsls', 'Protocol Buffer', ('protobuf', 'proto'), ('*.proto',), ()),
'PsyshConsoleLexer': ('pygments.lexers.php', 'PsySH console session for PHP', ('psysh',), (), ()),
'PugLexer': ('pygments.lexers.html', 'Pug', ('pug', 'jade'), ('*.pug', '*.jade'), ('text/x-pug', 'text/x-jade')),
'PuppetLexer': ('pygments.lexers.dsls', 'Puppet', ('puppet',), ('*.pp',), ()),
'PyPyLogLexer': ('pygments.lexers.console', 'PyPy Log', ('pypylog', 'pypy'), ('*.pypylog',), ('application/x-pypylog',)),
'Python2Lexer': ('pygments.lexers.python', 'Python 2.x', ('python2', 'py2'), (), ('text/x-python2', 'application/x-python2')),
'Python2TracebackLexer': ('pygments.lexers.python', 'Python 2.x Traceback', ('py2tb',), ('*.py2tb',), ('text/x-python2-traceback',)),
'PythonConsoleLexer': ('pygments.lexers.python', 'Python console session', ('pycon',), (), ('text/x-python-doctest',)),
'PythonLexer': ('pygments.lexers.python', 'Python', ('python', 'py', 'sage', 'python3', 'py3'), ('*.py', '*.pyw', '*.jy', '*.sage', '*.sc', 'SConstruct', 'SConscript', '*.bzl', 'BUCK', 'BUILD', 'BUILD.bazel', 'WORKSPACE', '*.tac'), ('text/x-python', 'application/x-python', 'text/x-python3', 'application/x-python3')),
'PythonTracebackLexer': ('pygments.lexers.python', 'Python Traceback', ('pytb', 'py3tb'), ('*.pytb', '*.py3tb'), ('text/x-python-traceback', 'text/x-python3-traceback')),
'QBasicLexer': ('pygments.lexers.basic', 'QBasic', ('qbasic', 'basic'), ('*.BAS', '*.bas'), ('text/basic',)),
'QVToLexer': ('pygments.lexers.qvt', 'QVTO', ('qvto', 'qvt'), ('*.qvto',), ()),
'QmlLexer': ('pygments.lexers.webmisc', 'QML', ('qml', 'qbs'), ('*.qml', '*.qbs'), ('application/x-qml', 'application/x-qt.qbs+qml')),
'RConsoleLexer': ('pygments.lexers.r', 'RConsole', ('rconsole', 'rout'), ('*.Rout',), ()),
'RNCCompactLexer': ('pygments.lexers.rnc', 'Relax-NG Compact', ('rng-compact', 'rnc'), ('*.rnc',), ()),
'RPMSpecLexer': ('pygments.lexers.installers', 'RPMSpec', ('spec',), ('*.spec',), ('text/x-rpm-spec',)),
'RacketLexer': ('pygments.lexers.lisp', 'Racket', ('racket', 'rkt'), ('*.rkt', '*.rktd', '*.rktl'), ('text/x-racket', 'application/x-racket')),
'RagelCLexer': ('pygments.lexers.parsers', 'Ragel in C Host', ('ragel-c',), ('*.rl',), ()),
'RagelCppLexer': ('pygments.lexers.parsers', 'Ragel in CPP Host', ('ragel-cpp',), ('*.rl',), ()),
'RagelDLexer': ('pygments.lexers.parsers', 'Ragel in D Host', ('ragel-d',), ('*.rl',), ()),
'RagelEmbeddedLexer': ('pygments.lexers.parsers', 'Embedded Ragel', ('ragel-em',), ('*.rl',), ()),
'RagelJavaLexer': ('pygments.lexers.parsers', 'Ragel in Java Host', ('ragel-java',), ('*.rl',), ()),
'RagelLexer': ('pygments.lexers.parsers', 'Ragel', ('ragel',), (), ()),
'RagelObjectiveCLexer': ('pygments.lexers.parsers', 'Ragel in Objective C Host', ('ragel-objc',), ('*.rl',), ()),
'RagelRubyLexer': ('pygments.lexers.parsers', 'Ragel in Ruby Host', ('ragel-ruby', 'ragel-rb'), ('*.rl',), ()),
'RawTokenLexer': ('pygments.lexers.special', 'Raw token data', (), (), ('application/x-pygments-tokens',)),
'RdLexer': ('pygments.lexers.r', 'Rd', ('rd',), ('*.Rd',), ('text/x-r-doc',)),
'ReasonLexer': ('pygments.lexers.ml', 'ReasonML', ('reasonml', 'reason'), ('*.re', '*.rei'), ('text/x-reasonml',)),
'RebolLexer': ('pygments.lexers.rebol', 'REBOL', ('rebol',), ('*.r', '*.r3', '*.reb'), ('text/x-rebol',)),
'RedLexer': ('pygments.lexers.rebol', 'Red', ('red', 'red/system'), ('*.red', '*.reds'), ('text/x-red', 'text/x-red-system')),
'RedcodeLexer': ('pygments.lexers.esoteric', 'Redcode', ('redcode',), ('*.cw',), ()),
'RegeditLexer': ('pygments.lexers.configs', 'reg', ('registry',), ('*.reg',), ('text/x-windows-registry',)),
'ResourceLexer': ('pygments.lexers.resource', 'ResourceBundle', ('resourcebundle', 'resource'), (), ()),
'RexxLexer': ('pygments.lexers.scripting', 'Rexx', ('rexx', 'arexx'), ('*.rexx', '*.rex', '*.rx', '*.arexx'), ('text/x-rexx',)),
'RhtmlLexer': ('pygments.lexers.templates', 'RHTML', ('rhtml', 'html+erb', 'html+ruby'), ('*.rhtml',), ('text/html+ruby',)),
'RideLexer': ('pygments.lexers.ride', 'Ride', ('ride',), ('*.ride',), ('text/x-ride',)),
'RoboconfGraphLexer': ('pygments.lexers.roboconf', 'Roboconf Graph', ('roboconf-graph',), ('*.graph',), ()),
'RoboconfInstancesLexer': ('pygments.lexers.roboconf', 'Roboconf Instances', ('roboconf-instances',), ('*.instances',), ()),
'RobotFrameworkLexer': ('pygments.lexers.robotframework', 'RobotFramework', ('robotframework',), ('*.robot',), ('text/x-robotframework',)),
'RqlLexer': ('pygments.lexers.sql', 'RQL', ('rql',), ('*.rql',), ('text/x-rql',)),
'RslLexer': ('pygments.lexers.dsls', 'RSL', ('rsl',), ('*.rsl',), ('text/rsl',)),
'RstLexer': ('pygments.lexers.markup', 'reStructuredText', ('restructuredtext', 'rst', 'rest'), ('*.rst', '*.rest'), ('text/x-rst', 'text/prs.fallenstein.rst')),
'RtsLexer': ('pygments.lexers.trafficscript', 'TrafficScript', ('trafficscript', 'rts'), ('*.rts',), ()),
'RubyConsoleLexer': ('pygments.lexers.ruby', 'Ruby irb session', ('rbcon', 'irb'), (), ('text/x-ruby-shellsession',)),
'RubyLexer': ('pygments.lexers.ruby', 'Ruby', ('ruby', 'rb', 'duby'), ('*.rb', '*.rbw', 'Rakefile', '*.rake', '*.gemspec', '*.rbx', '*.duby', 'Gemfile'), ('text/x-ruby', 'application/x-ruby')),
'RustLexer': ('pygments.lexers.rust', 'Rust', ('rust', 'rs'), ('*.rs', '*.rs.in'), ('text/rust', 'text/x-rust')),
'SASLexer': ('pygments.lexers.sas', 'SAS', ('sas',), ('*.SAS', '*.sas'), ('text/x-sas', 'text/sas', 'application/x-sas')),
'SLexer': ('pygments.lexers.r', 'S', ('splus', 's', 'r'), ('*.S', '*.R', '.Rhistory', '.Rprofile', '.Renviron'), ('text/S-plus', 'text/S', 'text/x-r-source', 'text/x-r', 'text/x-R', 'text/x-r-history', 'text/x-r-profile')),
'SMLLexer': ('pygments.lexers.ml', 'Standard ML', ('sml',), ('*.sml', '*.sig', '*.fun'), ('text/x-standardml', 'application/x-standardml')),
'SarlLexer': ('pygments.lexers.jvm', 'SARL', ('sarl',), ('*.sarl',), ('text/x-sarl',)),
'SassLexer': ('pygments.lexers.css', 'Sass', ('sass',), ('*.sass',), ('text/x-sass',)),
'ScalaLexer': ('pygments.lexers.jvm', 'Scala', ('scala',), ('*.scala',), ('text/x-scala',)),
'ScamlLexer': ('pygments.lexers.html', 'Scaml', ('scaml',), ('*.scaml',), ('text/x-scaml',)),
'ScdocLexer': ('pygments.lexers.scdoc', 'scdoc', ('scdoc', 'scd'), ('*.scd', '*.scdoc'), ()),
'SchemeLexer': ('pygments.lexers.lisp', 'Scheme', ('scheme', 'scm'), ('*.scm', '*.ss'), ('text/x-scheme', 'application/x-scheme')),
'ScilabLexer': ('pygments.lexers.matlab', 'Scilab', ('scilab',), ('*.sci', '*.sce', '*.tst'), ('text/scilab',)),
'ScssLexer': ('pygments.lexers.css', 'SCSS', ('scss',), ('*.scss',), ('text/x-scss',)),
'ShExCLexer': ('pygments.lexers.rdf', 'ShExC', ('shexc', 'shex'), ('*.shex',), ('text/shex',)),
'ShenLexer': ('pygments.lexers.lisp', 'Shen', ('shen',), ('*.shen',), ('text/x-shen', 'application/x-shen')),
'SieveLexer': ('pygments.lexers.sieve', 'Sieve', ('sieve',), ('*.siv', '*.sieve'), ()),
'SilverLexer': ('pygments.lexers.verification', 'Silver', ('silver',), ('*.sil', '*.vpr'), ()),
'SingularityLexer': ('pygments.lexers.configs', 'Singularity', ('singularity',), ('*.def', 'Singularity'), ()),
'SlashLexer': ('pygments.lexers.slash', 'Slash', ('slash',), ('*.sla',), ()),
'SlimLexer': ('pygments.lexers.webmisc', 'Slim', ('slim',), ('*.slim',), ('text/x-slim',)),
'SlurmBashLexer': ('pygments.lexers.shell', 'Slurm', ('slurm', 'sbatch'), ('*.sl',), ()),
'SmaliLexer': ('pygments.lexers.dalvik', 'Smali', ('smali',), ('*.smali',), ('text/smali',)),
'SmalltalkLexer': ('pygments.lexers.smalltalk', 'Smalltalk', ('smalltalk', 'squeak', 'st'), ('*.st',), ('text/x-smalltalk',)),
'SmartGameFormatLexer': ('pygments.lexers.sgf', 'SmartGameFormat', ('sgf',), ('*.sgf',), ()),
'SmartyLexer': ('pygments.lexers.templates', 'Smarty', ('smarty',), ('*.tpl',), ('application/x-smarty',)),
'SnobolLexer': ('pygments.lexers.snobol', 'Snobol', ('snobol',), ('*.snobol',), ('text/x-snobol',)),
'SnowballLexer': ('pygments.lexers.dsls', 'Snowball', ('snowball',), ('*.sbl',), ()),
'SolidityLexer': ('pygments.lexers.solidity', 'Solidity', ('solidity',), ('*.sol',), ()),
'SourcePawnLexer': ('pygments.lexers.pawn', 'SourcePawn', ('sp',), ('*.sp',), ('text/x-sourcepawn',)),
'SourcesListLexer': ('pygments.lexers.installers', 'Debian Sourcelist', ('debsources', 'sourceslist', 'sources.list'), ('sources.list',), ()),
'SparqlLexer': ('pygments.lexers.rdf', 'SPARQL', ('sparql',), ('*.rq', '*.sparql'), ('application/sparql-query',)),
'SqlLexer': ('pygments.lexers.sql', 'SQL', ('sql',), ('*.sql',), ('text/x-sql',)),
'SqliteConsoleLexer': ('pygments.lexers.sql', 'sqlite3con', ('sqlite3',), ('*.sqlite3-console',), ('text/x-sqlite3-console',)),
'SquidConfLexer': ('pygments.lexers.configs', 'SquidConf', ('squidconf', 'squid.conf', 'squid'), ('squid.conf',), ('text/x-squidconf',)),
'SspLexer': ('pygments.lexers.templates', 'Scalate Server Page', ('ssp',), ('*.ssp',), ('application/x-ssp',)),
'StanLexer': ('pygments.lexers.modeling', 'Stan', ('stan',), ('*.stan',), ()),
'StataLexer': ('pygments.lexers.stata', 'Stata', ('stata', 'do'), ('*.do', '*.ado'), ('text/x-stata', 'text/stata', 'application/x-stata')),
'SuperColliderLexer': ('pygments.lexers.supercollider', 'SuperCollider', ('supercollider', 'sc'), ('*.sc', '*.scd'), ('application/supercollider', 'text/supercollider')),
'SwiftLexer': ('pygments.lexers.objective', 'Swift', ('swift',), ('*.swift',), ('text/x-swift',)),
'SwigLexer': ('pygments.lexers.c_like', 'SWIG', ('swig',), ('*.swg', '*.i'), ('text/swig',)),
'SystemVerilogLexer': ('pygments.lexers.hdl', 'systemverilog', ('systemverilog', 'sv'), ('*.sv', '*.svh'), ('text/x-systemverilog',)),
'TAPLexer': ('pygments.lexers.testing', 'TAP', ('tap',), ('*.tap',), ()),
'TNTLexer': ('pygments.lexers.tnt', 'Typographic Number Theory', ('tnt',), ('*.tnt',), ()),
'TOMLLexer': ('pygments.lexers.configs', 'TOML', ('toml',), ('*.toml', 'Pipfile', 'poetry.lock'), ()),
'Tads3Lexer': ('pygments.lexers.int_fiction', 'TADS 3', ('tads3',), ('*.t',), ()),
'TasmLexer': ('pygments.lexers.asm', 'TASM', ('tasm',), ('*.asm', '*.ASM', '*.tasm'), ('text/x-tasm',)),
'TclLexer': ('pygments.lexers.tcl', 'Tcl', ('tcl',), ('*.tcl', '*.rvt'), ('text/x-tcl', 'text/x-script.tcl', 'application/x-tcl')),
'TcshLexer': ('pygments.lexers.shell', 'Tcsh', ('tcsh', 'csh'), ('*.tcsh', '*.csh'), ('application/x-csh',)),
'TcshSessionLexer': ('pygments.lexers.shell', 'Tcsh Session', ('tcshcon',), (), ()),
'TeaTemplateLexer': ('pygments.lexers.templates', 'Tea', ('tea',), ('*.tea',), ('text/x-tea',)),
'TealLexer': ('pygments.lexers.teal', 'teal', ('teal',), ('*.teal',), ()),
'TeraTermLexer': ('pygments.lexers.teraterm', 'Tera Term macro', ('teratermmacro', 'teraterm', 'ttl'), ('*.ttl',), ('text/x-teratermmacro',)),
'TermcapLexer': ('pygments.lexers.configs', 'Termcap', ('termcap',), ('termcap', 'termcap.src'), ()),
'TerminfoLexer': ('pygments.lexers.configs', 'Terminfo', ('terminfo',), ('terminfo', 'terminfo.src'), ()),
'TerraformLexer': ('pygments.lexers.configs', 'Terraform', ('terraform', 'tf'), ('*.tf',), ('application/x-tf', 'application/x-terraform')),
'TexLexer': ('pygments.lexers.markup', 'TeX', ('tex', 'latex'), ('*.tex', '*.aux', '*.toc'), ('text/x-tex', 'text/x-latex')),
'TextLexer': ('pygments.lexers.special', 'Text only', ('text',), ('*.txt',), ('text/plain',)),
'ThingsDBLexer': ('pygments.lexers.thingsdb', 'ThingsDB', ('ti', 'thingsdb'), ('*.ti',), ()),
'ThriftLexer': ('pygments.lexers.dsls', 'Thrift', ('thrift',), ('*.thrift',), ('application/x-thrift',)),
'TiddlyWiki5Lexer': ('pygments.lexers.markup', 'tiddler', ('tid',), ('*.tid',), ('text/vnd.tiddlywiki',)),
'TodotxtLexer': ('pygments.lexers.textfmts', 'Todotxt', ('todotxt',), ('todo.txt', '*.todotxt'), ('text/x-todo',)),
'TransactSqlLexer': ('pygments.lexers.sql', 'Transact-SQL', ('tsql', 't-sql'), ('*.sql',), ('text/x-tsql',)),
'TreetopLexer': ('pygments.lexers.parsers', 'Treetop', ('treetop',), ('*.treetop', '*.tt'), ()),
'TurtleLexer': ('pygments.lexers.rdf', 'Turtle', ('turtle',), ('*.ttl',), ('text/turtle', 'application/x-turtle')),
'TwigHtmlLexer': ('pygments.lexers.templates', 'HTML+Twig', ('html+twig',), ('*.twig',), ('text/html+twig',)),
'TwigLexer': ('pygments.lexers.templates', 'Twig', ('twig',), (), ('application/x-twig',)),
'TypeScriptLexer': ('pygments.lexers.javascript', 'TypeScript', ('typescript', 'ts'), ('*.ts', '*.tsx'), ('text/x-typescript',)),
'TypoScriptCssDataLexer': ('pygments.lexers.typoscript', 'TypoScriptCssData', ('typoscriptcssdata',), (), ()),
'TypoScriptHtmlDataLexer': ('pygments.lexers.typoscript', 'TypoScriptHtmlData', ('typoscripthtmldata',), (), ()),
'TypoScriptLexer': ('pygments.lexers.typoscript', 'TypoScript', ('typoscript',), ('*.typoscript',), ('text/x-typoscript',)),
'UcodeLexer': ('pygments.lexers.unicon', 'ucode', ('ucode',), ('*.u', '*.u1', '*.u2'), ()),
'UniconLexer': ('pygments.lexers.unicon', 'Unicon', ('unicon',), ('*.icn',), ('text/unicon',)),
'UrbiscriptLexer': ('pygments.lexers.urbi', 'UrbiScript', ('urbiscript',), ('*.u',), ('application/x-urbiscript',)),
'UsdLexer': ('pygments.lexers.usd', 'USD', ('usd', 'usda'), ('*.usd', '*.usda'), ()),
'VBScriptLexer': ('pygments.lexers.basic', 'VBScript', ('vbscript',), ('*.vbs', '*.VBS'), ()),
'VCLLexer': ('pygments.lexers.varnish', 'VCL', ('vcl',), ('*.vcl',), ('text/x-vclsrc',)),
'VCLSnippetLexer': ('pygments.lexers.varnish', 'VCLSnippets', ('vclsnippets', 'vclsnippet'), (), ('text/x-vclsnippet',)),
'VCTreeStatusLexer': ('pygments.lexers.console', 'VCTreeStatus', ('vctreestatus',), (), ()),
'VGLLexer': ('pygments.lexers.dsls', 'VGL', ('vgl',), ('*.rpf',), ()),
'ValaLexer': ('pygments.lexers.c_like', 'Vala', ('vala', 'vapi'), ('*.vala', '*.vapi'), ('text/x-vala',)),
'VbNetAspxLexer': ('pygments.lexers.dotnet', 'aspx-vb', ('aspx-vb',), ('*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd'), ()),
'VbNetLexer': ('pygments.lexers.dotnet', 'VB.net', ('vb.net', 'vbnet'), ('*.vb', '*.bas'), ('text/x-vbnet', 'text/x-vba')),
'VelocityHtmlLexer': ('pygments.lexers.templates', 'HTML+Velocity', ('html+velocity',), (), ('text/html+velocity',)),
'VelocityLexer': ('pygments.lexers.templates', 'Velocity', ('velocity',), ('*.vm', '*.fhtml'), ()),
'VelocityXmlLexer': ('pygments.lexers.templates', 'XML+Velocity', ('xml+velocity',), (), ('application/xml+velocity',)),
'VerilogLexer': ('pygments.lexers.hdl', 'verilog', ('verilog', 'v'), ('*.v',), ('text/x-verilog',)),
'VhdlLexer': ('pygments.lexers.hdl', 'vhdl', ('vhdl',), ('*.vhdl', '*.vhd'), ('text/x-vhdl',)),
'VimLexer': ('pygments.lexers.textedit', 'VimL', ('vim',), ('*.vim', '.vimrc', '.exrc', '.gvimrc', '_vimrc', '_exrc', '_gvimrc', 'vimrc', 'gvimrc'), ('text/x-vim',)),
'WDiffLexer': ('pygments.lexers.diff', 'WDiff', ('wdiff',), ('*.wdiff',), ()),
'WatLexer': ('pygments.lexers.webassembly', 'WebAssembly', ('wast', 'wat'), ('*.wat', '*.wast'), ()),
'WebIDLLexer': ('pygments.lexers.webidl', 'Web IDL', ('webidl',), ('*.webidl',), ()),
'WhileyLexer': ('pygments.lexers.whiley', 'Whiley', ('whiley',), ('*.whiley',), ('text/x-whiley',)),
'X10Lexer': ('pygments.lexers.x10', 'X10', ('x10', 'xten'), ('*.x10',), ('text/x-x10',)),
'XQueryLexer': ('pygments.lexers.webmisc', 'XQuery', ('xquery', 'xqy', 'xq', 'xql', 'xqm'), ('*.xqy', '*.xquery', '*.xq', '*.xql', '*.xqm'), ('text/xquery', 'application/xquery')),
'XmlDjangoLexer': ('pygments.lexers.templates', 'XML+Django/Jinja', ('xml+django', 'xml+jinja'), (), ('application/xml+django', 'application/xml+jinja')),
'XmlErbLexer': ('pygments.lexers.templates', 'XML+Ruby', ('xml+ruby', 'xml+erb'), (), ('application/xml+ruby',)),
'XmlLexer': ('pygments.lexers.html', 'XML', ('xml',), ('*.xml', '*.xsl', '*.rss', '*.xslt', '*.xsd', '*.wsdl', '*.wsf'), ('text/xml', 'application/xml', 'image/svg+xml', 'application/rss+xml', 'application/atom+xml')),
'XmlPhpLexer': ('pygments.lexers.templates', 'XML+PHP', ('xml+php',), (), ('application/xml+php',)),
'XmlSmartyLexer': ('pygments.lexers.templates', 'XML+Smarty', ('xml+smarty',), (), ('application/xml+smarty',)),
'XorgLexer': ('pygments.lexers.xorg', 'Xorg', ('xorg.conf',), ('xorg.conf',), ()),
'XsltLexer': ('pygments.lexers.html', 'XSLT', ('xslt',), ('*.xsl', '*.xslt', '*.xpl'), ('application/xsl+xml', 'application/xslt+xml')),
'XtendLexer': ('pygments.lexers.jvm', 'Xtend', ('xtend',), ('*.xtend',), ('text/x-xtend',)),
'XtlangLexer': ('pygments.lexers.lisp', 'xtlang', ('extempore',), ('*.xtm',), ()),
'YamlJinjaLexer': ('pygments.lexers.templates', 'YAML+Jinja', ('yaml+jinja', 'salt', 'sls'), ('*.sls',), ('text/x-yaml+jinja', 'text/x-sls')),
'YamlLexer': ('pygments.lexers.data', 'YAML', ('yaml',), ('*.yaml', '*.yml'), ('text/x-yaml',)),
'YangLexer': ('pygments.lexers.yang', 'YANG', ('yang',), ('*.yang',), ('application/yang',)),
'ZeekLexer': ('pygments.lexers.dsls', 'Zeek', ('zeek', 'bro'), ('*.zeek', '*.bro'), ()),
'ZephirLexer': ('pygments.lexers.php', 'Zephir', ('zephir',), ('*.zep',), ()),
'ZigLexer': ('pygments.lexers.zig', 'Zig', ('zig',), ('*.zig',), ('text/zig',)),
'apdlexer': ('pygments.lexers.apdlexer', 'ANSYS parametric design language', ('ansys', 'apdl'), ('*.ans',), ()),
}
if __name__ == '__main__': # pragma: no cover
import sys
import os
# lookup lexers
found_lexers = []
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
for root, dirs, files in os.walk('.'):
for filename in files:
if filename.endswith('.py') and not filename.startswith('_'):
module_name = 'pygments.lexers%s.%s' % (
root[1:].replace('/', '.'), filename[:-3])
print(module_name)
module = __import__(module_name, None, None, [''])
for lexer_name in module.__all__:
lexer = getattr(module, lexer_name)
found_lexers.append(
'%r: %r' % (lexer_name,
(module_name,
lexer.name,
tuple(lexer.aliases),
tuple(lexer.filenames),
tuple(lexer.mimetypes))))
# sort them to make the diff minimal
found_lexers.sort()
# extract useful sourcecode from this file
with open(__file__) as fp:
content = fp.read()
# replace crnl to nl for Windows.
#
# Note that, originally, contributers should keep nl of master
# repository, for example by using some kind of automatic
# management EOL, like `EolExtension
# <https://www.mercurial-scm.org/wiki/EolExtension>`.
content = content.replace("\r\n", "\n")
header = content[:content.find('LEXERS = {')]
footer = content[content.find("if __name__ == '__main__':"):]
# write new file
with open(__file__, 'w') as fp:
fp.write(header)
fp.write('LEXERS = {\n %s,\n}\n\n' % ',\n '.join(found_lexers))
fp.write(footer)
print ('=== %d lexers processed.' % len(found_lexers))
| 110.403197
| 352
| 0.572325
|
4a121a4d5b285c13c37d05cc2c85b8f37f9c84c9
| 4,351
|
py
|
Python
|
python_modules/dagster/dagster_tests/core_tests/host_representation_tests/test_external_data.py
|
makotonium/dagster
|
f5d56514b7e7c5bca28ea14060316d242f51b71b
|
[
"Apache-2.0"
] | 4,606
|
2018-06-21T17:45:20.000Z
|
2022-03-31T23:39:42.000Z
|
python_modules/dagster/dagster_tests/core_tests/host_representation_tests/test_external_data.py
|
makotonium/dagster
|
f5d56514b7e7c5bca28ea14060316d242f51b71b
|
[
"Apache-2.0"
] | 6,221
|
2018-06-12T04:36:01.000Z
|
2022-03-31T21:43:05.000Z
|
python_modules/dagster/dagster_tests/core_tests/host_representation_tests/test_external_data.py
|
makotonium/dagster
|
f5d56514b7e7c5bca28ea14060316d242f51b71b
|
[
"Apache-2.0"
] | 619
|
2018-08-22T22:43:09.000Z
|
2022-03-31T22:48:06.000Z
|
from typing import Dict
from dagster import AssetKey, In, Out, pipeline
from dagster.core.decorator_utils import get_function_params
from dagster.core.definitions.decorators.op import _Op
from dagster.core.host_representation.external_data import (
ExternalAssetDependency,
ExternalAssetNode,
ExternalSensorData,
ExternalTargetData,
external_asset_graph_from_defs,
)
from dagster.serdes import deserialize_json_to_dagster_namedtuple
def asset(fn):
asset_name = fn.__name__
ins: Dict[str, In] = {}
for input_param in get_function_params(fn):
input_param_name = input_param.name
asset_key = AssetKey(input_param_name)
ins[input_param_name] = In(asset_key=asset_key)
out = Out(asset_key=AssetKey(asset_name))
return _Op(
name=asset_name,
ins=ins,
out=out,
)(fn)
def test_single_asset_pipeline():
@asset
def asset1():
return 1
@pipeline
def my_graph():
asset1()
external_asset_nodes = external_asset_graph_from_defs([my_graph])
assert external_asset_nodes == [
ExternalAssetNode(
asset_key=AssetKey("asset1"),
dependencies=[],
op_name="asset1",
op_description=None,
job_names=["my_graph"],
)
]
def test_two_asset_pipeline():
@asset
def asset1():
return 1
@asset
def asset2(asset1):
assert asset1 == 1
@pipeline
def my_graph():
asset2(asset1())
external_asset_nodes = external_asset_graph_from_defs([my_graph])
assert external_asset_nodes == [
ExternalAssetNode(
asset_key=AssetKey("asset1"),
dependencies=[],
op_name="asset1",
op_description=None,
job_names=["my_graph"],
),
ExternalAssetNode(
asset_key=AssetKey("asset2"),
dependencies=[
ExternalAssetDependency(upstream_asset_key=AssetKey("asset1"), input_name="asset1")
],
op_name="asset2",
op_description=None,
job_names=["my_graph"],
),
]
def test_cross_pipeline_asset_dependency():
@asset
def asset1():
return 1
@asset
def asset2(asset1):
assert asset1 == 1
@pipeline
def asset1_graph():
asset1()
@pipeline
def asset2_graph():
asset2() # pylint: disable=no-value-for-parameter
external_asset_nodes = external_asset_graph_from_defs([asset1_graph, asset2_graph])
assert external_asset_nodes == [
ExternalAssetNode(
asset_key=AssetKey("asset1"),
dependencies=[],
op_name="asset1",
op_description=None,
job_names=["asset1_graph"],
),
ExternalAssetNode(
asset_key=AssetKey("asset2"),
dependencies=[
ExternalAssetDependency(upstream_asset_key=AssetKey("asset1"), input_name="asset1")
],
op_name="asset2",
op_description=None,
job_names=["asset2_graph"],
),
]
def test_same_asset_in_multiple_pipelines():
@asset
def asset1():
return 1
@pipeline
def graph1():
asset1()
@pipeline
def graph2():
asset1()
external_asset_nodes = external_asset_graph_from_defs([graph1, graph2])
assert external_asset_nodes == [
ExternalAssetNode(
asset_key=AssetKey("asset1"),
dependencies=[],
op_name="asset1",
op_description=None,
job_names=["graph1", "graph2"],
),
]
def test_back_compat_external_sensor():
SERIALIZED_0_12_10_SENSOR = '{"__class__": "ExternalSensorData", "description": null, "min_interval": null, "mode": "default", "name": "my_sensor", "pipeline_name": "my_pipeline", "solid_selection": null}'
external_sensor_data = deserialize_json_to_dagster_namedtuple(SERIALIZED_0_12_10_SENSOR)
assert isinstance(external_sensor_data, ExternalSensorData)
assert len(external_sensor_data.target_dict) == 1
assert "my_pipeline" in external_sensor_data.target_dict
target = external_sensor_data.target_dict["my_pipeline"]
assert isinstance(target, ExternalTargetData)
assert target.pipeline_name == "my_pipeline"
| 26.530488
| 209
| 0.633188
|
4a121b361d3a0b6952d9d12cba5d2c22bf74226f
| 16,190
|
py
|
Python
|
egs/heroico/asr/simple_v1/ctc_train.py
|
johnjosephmorgan/snowfall
|
604d789c0aed035626d6745e6d7a427168063cae
|
[
"Apache-2.0"
] | null | null | null |
egs/heroico/asr/simple_v1/ctc_train.py
|
johnjosephmorgan/snowfall
|
604d789c0aed035626d6745e6d7a427168063cae
|
[
"Apache-2.0"
] | null | null | null |
egs/heroico/asr/simple_v1/ctc_train.py
|
johnjosephmorgan/snowfall
|
604d789c0aed035626d6745e6d7a427168063cae
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2020 Xiaomi Corporation (authors: Daniel Povey, Haowen Qiu)
# Apache 2.0
import k2
import logging
import math
import numpy as np
import os
import sys
import torch
import torch.optim as optim
from datetime import datetime
from pathlib import Path
from torch.nn.utils import clip_grad_value_
from torch.utils.tensorboard import SummaryWriter
from typing import Dict, Optional, Tuple
from lhotse import CutSet
from lhotse.dataset import CutConcatenate, CutMix, K2SpeechRecognitionDataset, SingleCutSampler
from lhotse.utils import fix_random_seed
from snowfall.common import describe
from snowfall.common import get_phone_symbols
from snowfall.common import load_checkpoint, save_checkpoint
from snowfall.common import save_training_info
from snowfall.common import setup_logger
from snowfall.models import AcousticModel
from snowfall.models.tdnn_lstm import TdnnLstm1b
from snowfall.training.ctc_graph import CtcTrainingGraphCompiler
def get_tot_objf_and_num_frames(tot_scores: torch.Tensor,
frames_per_seq: torch.Tensor
) -> Tuple[float, int, int]:
''' Figures out the total score(log-prob) over all successful supervision segments
(i.e. those for which the total score wasn't -infinity), and the corresponding
number of frames of neural net output
Args:
tot_scores: a Torch tensor of shape (num_segments,) containing total scores
from forward-backward
frames_per_seq: a Torch tensor of shape (num_segments,) containing the number of
frames for each segment
Returns:
Returns a tuple of 3 scalar tensors: (tot_score, ok_frames, all_frames)
where ok_frames is the frames for successful (finite) segments, and
all_frames is the frames for all segments (finite or not).
'''
mask = torch.ne(tot_scores, -math.inf)
# finite_indexes is a tensor containing successful segment indexes, e.g.
# [ 0 1 3 4 5 ]
finite_indexes = torch.nonzero(mask).squeeze(1)
if False:
bad_indexes = torch.nonzero(~mask).squeeze(1)
if bad_indexes.shape[0] > 0:
print("Bad indexes: ", bad_indexes, ", bad lengths: ",
frames_per_seq[bad_indexes], " vs. max length ",
torch.max(frames_per_seq), ", avg ",
(torch.sum(frames_per_seq) / frames_per_seq.numel()))
# print("finite_indexes = ", finite_indexes, ", tot_scores = ", tot_scores)
ok_frames = frames_per_seq[finite_indexes].sum()
all_frames = frames_per_seq.sum()
return (tot_scores[finite_indexes].sum(), ok_frames, all_frames)
def get_objf(batch: Dict,
model: AcousticModel,
device: torch.device,
graph_compiler: CtcTrainingGraphCompiler,
training: bool,
optimizer: Optional[torch.optim.Optimizer] = None):
feature = batch['inputs']
supervisions = batch['supervisions']
supervision_segments = torch.stack(
(supervisions['sequence_idx'],
torch.floor_divide(supervisions['start_frame'],
model.subsampling_factor),
torch.floor_divide(supervisions['num_frames'],
model.subsampling_factor)), 1).to(torch.int32)
indices = torch.argsort(supervision_segments[:, 2], descending=True)
supervision_segments = supervision_segments[indices]
texts = supervisions['text']
texts = [texts[idx] for idx in indices]
assert feature.ndim == 3
# print(supervision_segments[:, 1] + supervision_segments[:, 2])
feature = feature.to(device)
# at entry, feature is [N, T, C]
feature = feature.permute(0, 2, 1) # now feature is [N, C, T]
if training:
nnet_output = model(feature)
else:
with torch.no_grad():
nnet_output = model(feature)
# nnet_output is [N, C, T]
nnet_output = nnet_output.permute(0, 2, 1) # now nnet_output is [N, T, C]
decoding_graph = graph_compiler.compile(texts).to(device)
# nnet_output2 = nnet_output.clone()
# blank_bias = -7.0
# nnet_output2[:,:,0] += blank_bias
dense_fsa_vec = k2.DenseFsaVec(nnet_output, supervision_segments)
assert decoding_graph.is_cuda()
assert decoding_graph.device == device
assert nnet_output.device == device
target_graph = k2.intersect_dense(decoding_graph, dense_fsa_vec, 10.0)
tot_scores = target_graph.get_tot_scores(
log_semiring=True,
use_double_scores=True)
(tot_score, tot_frames,
all_frames) = get_tot_objf_and_num_frames(tot_scores,
supervision_segments[:, 2])
if training:
optimizer.zero_grad()
(-tot_score).backward()
clip_grad_value_(model.parameters(), 5.0)
optimizer.step()
ans = -tot_score.detach().cpu().item(), tot_frames.cpu().item(
), all_frames.cpu().item()
return ans
def get_validation_objf(dataloader: torch.utils.data.DataLoader,
model: AcousticModel, device: torch.device,
graph_compiler: CtcTrainingGraphCompiler):
total_objf = 0.
total_frames = 0. # for display only
total_all_frames = 0. # all frames including those seqs that failed.
model.eval()
for batch_idx, batch in enumerate(dataloader):
objf, frames, all_frames = get_objf(batch, model, device,
graph_compiler, False)
total_objf += objf
total_frames += frames
total_all_frames += all_frames
return total_objf, total_frames, total_all_frames
def train_one_epoch(dataloader: torch.utils.data.DataLoader,
valid_dataloader: torch.utils.data.DataLoader,
model: AcousticModel, device: torch.device,
graph_compiler: CtcTrainingGraphCompiler,
optimizer: torch.optim.Optimizer,
current_epoch: int,
tb_writer: SummaryWriter,
num_epochs: int,
global_batch_idx_train: int):
total_objf, total_frames, total_all_frames = 0., 0., 0.
valid_average_objf = float('inf')
time_waiting_for_batch = 0
prev_timestamp = datetime.now()
model.train()
for batch_idx, batch in enumerate(dataloader):
global_batch_idx_train += 1
timestamp = datetime.now()
time_waiting_for_batch += (timestamp - prev_timestamp).total_seconds()
curr_batch_objf, curr_batch_frames, curr_batch_all_frames = \
get_objf(batch, model, device, graph_compiler, True, optimizer)
total_objf += curr_batch_objf
total_frames += curr_batch_frames
total_all_frames += curr_batch_all_frames
if batch_idx % 10 == 0:
logging.info(
'batch {}, epoch {}/{} '
'global average objf: {:.6f} over {} '
'frames ({:.1f}% kept), current batch average objf: {:.6f} over {} frames ({:.1f}% kept) '
'avg time waiting for batch {:.3f}s'.format(
batch_idx, current_epoch, num_epochs,
total_objf / total_frames, total_frames,
100.0 * total_frames / total_all_frames,
curr_batch_objf / (curr_batch_frames + 0.001),
curr_batch_frames,
100.0 * curr_batch_frames / curr_batch_all_frames,
time_waiting_for_batch / max(1, batch_idx)))
tb_writer.add_scalar('train/global_average_objf',
total_objf / total_frames, global_batch_idx_train)
tb_writer.add_scalar('train/current_batch_average_objf',
curr_batch_objf / (curr_batch_frames + 0.001),
global_batch_idx_train)
# if batch_idx >= 10:
# print("Exiting early to get profile info")
# sys.exit(0)
if batch_idx > 0 and batch_idx % 200 == 0:
total_valid_objf, total_valid_frames, total_valid_all_frames = get_validation_objf(
dataloader=valid_dataloader,
model=model,
device=device,
graph_compiler=graph_compiler)
valid_average_objf = total_valid_objf / total_valid_frames
model.train()
logging.info(
'Validation average objf: {:.6f} over {} frames ({:.1f}% kept)'
.format(valid_average_objf,
total_valid_frames,
100.0 * total_valid_frames / total_valid_all_frames))
tb_writer.add_scalar('train/global_valid_average_objf',
valid_average_objf,
global_batch_idx_train)
prev_timestamp = datetime.now()
return total_objf / total_frames, valid_average_objf, global_batch_idx_train
def main():
fix_random_seed(42)
start_epoch = 0
num_epochs = 8
exp_dir = 'exp-lstm-adam-ctc'
setup_logger('{}/log/log-train'.format(exp_dir))
tb_writer = SummaryWriter(log_dir=f'{exp_dir}/tensorboard')
# load L, G, symbol_table
lang_dir = Path('data/lang')
phone_symbol_table = k2.SymbolTable.from_file(lang_dir / 'phones.txt')
word_symbol_table = k2.SymbolTable.from_file(lang_dir / 'words.txt')
logging.info("Loading L.fst")
if (lang_dir / 'Linv.pt').exists():
L_inv = k2.Fsa.from_dict(torch.load(lang_dir / 'Linv.pt'))
else:
with open(lang_dir / 'L.fst.txt') as f:
L = k2.Fsa.from_openfst(f.read(), acceptor=False)
L_inv = k2.arc_sort(L.invert_())
torch.save(L_inv.as_dict(), lang_dir / 'Linv.pt')
graph_compiler = CtcTrainingGraphCompiler(
L_inv=L_inv,
phones=phone_symbol_table,
words=word_symbol_table
)
phone_ids = get_phone_symbols(phone_symbol_table)
# load dataset
feature_dir = Path('exp/data')
logging.info("About to get train cuts")
cuts_train = CutSet.from_json(feature_dir /
'cuts_train.json.gz')
logging.info("About to get dev cuts")
cuts_dev = CutSet.from_json(feature_dir / 'cuts_devtest.json.gz')
logging.info("About to create train dataset")
train = K2SpeechRecognitionDataset(
cuts_train
)
train_sampler = SingleCutSampler(
cuts_train,
max_frames=90000,
shuffle=True,
)
logging.info("About to create train dataloader")
train_dl = torch.utils.data.DataLoader(
train,
sampler=train_sampler,
batch_size=None,
num_workers=4
)
logging.info("About to create dev dataset")
validate = K2SpeechRecognitionDataset(cuts_dev)
valid_sampler = SingleCutSampler(cuts_dev, max_frames=90000)
logging.info("About to create dev dataloader")
valid_dl = torch.utils.data.DataLoader(
validate,
sampler=valid_sampler,
batch_size=None,
num_workers=1
)
if not torch.cuda.is_available():
logging.error('No GPU detected!')
sys.exit(-1)
logging.info("About to create model")
device_id = 0
device = torch.device('cuda', device_id)
model = TdnnLstm1b(
num_features=80,
num_classes=len(phone_ids) + 1, # +1 for the blank symbol
subsampling_factor=4)
model.to(device)
describe(model)
learning_rate = 1e-3
optimizer = optim.AdamW(model.parameters(),
lr=learning_rate,
weight_decay=5e-4)
best_objf = np.inf
best_valid_objf = np.inf
best_epoch = start_epoch
best_model_path = os.path.join(exp_dir, 'best_model.pt')
best_epoch_info_filename = os.path.join(exp_dir, 'best-epoch-info')
global_batch_idx_train = 0 # for logging only
if start_epoch > 0:
model_path = os.path.join(exp_dir, 'epoch-{}.pt'.format(start_epoch - 1))
ckpt = load_checkpoint(filename=model_path, model=model, optimizer=optimizer)
best_objf = ckpt['objf']
best_valid_objf = ckpt['valid_objf']
global_batch_idx_train = ckpt['global_batch_idx_train']
logging.info(f"epoch = {ckpt['epoch']}, objf = {best_objf}, valid_objf = {best_valid_objf}")
for epoch in range(start_epoch, num_epochs):
train_sampler.set_epoch(epoch)
curr_learning_rate = 1e-3
# curr_learning_rate = learning_rate * pow(0.4, epoch)
# for param_group in optimizer.param_groups:
# param_group['lr'] = curr_learning_rate
tb_writer.add_scalar('learning_rate', curr_learning_rate, epoch)
logging.info('epoch {}, learning rate {}'.format(
epoch, curr_learning_rate))
objf, valid_objf, global_batch_idx_train = train_one_epoch(dataloader=train_dl,
valid_dataloader=valid_dl,
model=model,
device=device,
graph_compiler=graph_compiler,
optimizer=optimizer,
current_epoch=epoch,
tb_writer=tb_writer,
num_epochs=num_epochs,
global_batch_idx_train=global_batch_idx_train)
# the lower, the better
if valid_objf < best_valid_objf:
best_valid_objf = valid_objf
best_objf = objf
best_epoch = epoch
save_checkpoint(filename=best_model_path,
model=model,
epoch=epoch,
optimizer=None,
scheduler=None,
learning_rate=curr_learning_rate,
objf=objf,
valid_objf=valid_objf,
global_batch_idx_train=global_batch_idx_train)
save_training_info(filename=best_epoch_info_filename,
model_path=best_model_path,
current_epoch=epoch,
learning_rate=curr_learning_rate,
objf=best_objf,
best_objf=best_objf,
valid_objf=valid_objf,
best_valid_objf=best_valid_objf,
best_epoch=best_epoch)
# we always save the model for every epoch
model_path = os.path.join(exp_dir, 'epoch-{}.pt'.format(epoch))
save_checkpoint(filename=model_path,
model=model,
optimizer=optimizer,
scheduler=None,
epoch=epoch,
learning_rate=curr_learning_rate,
objf=objf,
valid_objf=valid_objf,
global_batch_idx_train=global_batch_idx_train)
epoch_info_filename = os.path.join(exp_dir,
'epoch-{}-info'.format(epoch))
save_training_info(filename=epoch_info_filename,
model_path=model_path,
current_epoch=epoch,
learning_rate=curr_learning_rate,
objf=objf,
best_objf=best_objf,
valid_objf=valid_objf,
best_valid_objf=best_valid_objf,
best_epoch=best_epoch)
logging.warning('Done')
torch.set_num_threads(1)
torch.set_num_interop_threads(1)
if __name__ == '__main__':
main()
| 40.576441
| 113
| 0.591044
|
4a121b4b8cf061919e3db4dbe358c35d25c05437
| 43
|
py
|
Python
|
orkestra/exceptions.py
|
knowsuchagency/composer
|
b422ed4048b4d421e5100ea1770cbed37c4fb158
|
[
"MIT"
] | 37
|
2021-05-24T22:34:59.000Z
|
2022-02-22T04:47:06.000Z
|
orkestra/exceptions.py
|
knowsuchagency/composer
|
b422ed4048b4d421e5100ea1770cbed37c4fb158
|
[
"MIT"
] | 21
|
2021-05-26T09:14:05.000Z
|
2021-06-15T08:08:55.000Z
|
orkestra/exceptions.py
|
knowsuchagency/composer
|
b422ed4048b4d421e5100ea1770cbed37c4fb158
|
[
"MIT"
] | 2
|
2021-06-22T09:51:39.000Z
|
2022-01-28T20:00:30.000Z
|
class CompositionError(Exception):
...
| 14.333333
| 34
| 0.697674
|
4a121b6ea2717513f0dc4d6acf29b62b5f5d0099
| 30,405
|
py
|
Python
|
tensorlayer/cost.py
|
IAmSuyogJadhav/tensorlayer
|
b9115e027f8a47f5b8c3c92ade30603560c5e987
|
[
"Apache-2.0"
] | 7
|
2019-09-04T07:24:03.000Z
|
2021-04-24T19:55:32.000Z
|
tensorlayer/cost.py
|
IAmSuyogJadhav/tensorlayer
|
b9115e027f8a47f5b8c3c92ade30603560c5e987
|
[
"Apache-2.0"
] | null | null | null |
tensorlayer/cost.py
|
IAmSuyogJadhav/tensorlayer
|
b9115e027f8a47f5b8c3c92ade30603560c5e987
|
[
"Apache-2.0"
] | 1
|
2018-08-12T20:06:21.000Z
|
2018-08-12T20:06:21.000Z
|
# -*- coding: utf-8 -*-
import logging
import tensorflow as tf
__all__ = [
'cross_entropy',
'sigmoid_cross_entropy',
'binary_cross_entropy',
'mean_squared_error',
'normalized_mean_square_error',
'absolute_difference_error',
'dice_coe',
'dice_hard_coe',
'iou_coe',
'cross_entropy_seq',
'cross_entropy_seq_with_mask',
'cosine_similarity',
'li_regularizer',
'lo_regularizer',
'maxnorm_regularizer',
'maxnorm_o_regularizer',
'maxnorm_i_regularizer',
]
def cross_entropy(output, target, name=None):
"""Softmax cross-entropy operation, returns the TensorFlow expression of cross-entropy for two distributions, it implements
softmax internally. See ``tf.nn.sparse_softmax_cross_entropy_with_logits``.
Parameters
----------
output : Tensor
A batch of distribution with shape: [batch_size, num of classes].
target : Tensor
A batch of index with shape: [batch_size, ].
name : string
Name of this loss.
Examples
--------
>>> ce = tl.cost.cross_entropy(y_logits, y_target_logits, 'my_loss')
References
-----------
- About cross-entropy: `<https://en.wikipedia.org/wiki/Cross_entropy>`__.
- The code is borrowed from: `<https://en.wikipedia.org/wiki/Cross_entropy>`__.
"""
# try: # old
# return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=output, targets=target))
# except: # TF 1.0
if name is None:
raise Exception("Please give a unique name to tl.cost.cross_entropy for TF1.0+")
return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=target, logits=output, name=name))
def sigmoid_cross_entropy(output, target, name=None):
"""Sigmoid cross-entropy operation, see ``tf.nn.sigmoid_cross_entropy_with_logits``.
Parameters
----------
output : Tensor
A batch of distribution with shape: [batch_size, num of classes].
target : Tensor
A batch of index with shape: [batch_size, ].
name : string
Name of this loss.
"""
# try: # TF 1.0
return tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=target, logits=output, name=name))
# except:
# return tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=output, targets=target))
def binary_cross_entropy(output, target, epsilon=1e-8, name='bce_loss'):
"""Binary cross entropy operation.
Parameters
----------
output : Tensor
Tensor with type of `float32` or `float64`.
target : Tensor
The target distribution, format the same with `output`.
epsilon : float
A small value to avoid output to be zero.
name : str
An optional name to attach to this function.
References
-----------
- `ericjang-DRAW <https://github.com/ericjang/draw/blob/master/draw.py#L73>`__
"""
# from tensorflow.python.framework import ops
# with ops.op_scope([output, target], name, "bce_loss") as name:
# output = ops.convert_to_tensor(output, name="preds")
# target = ops.convert_to_tensor(targets, name="target")
with tf.name_scope(name):
return tf.reduce_mean(tf.reduce_sum(-(target * tf.log(output + epsilon) + (1. - target) * tf.log(1. - output + epsilon)), axis=1))
# For brevity, let `x = output`, `z = target`. The binary cross entropy loss is
#
# loss(x, z) = - sum_i (x[i] * log(z[i]) + (1 - x[i]) * log(1 - z[i]))
def mean_squared_error(output, target, is_mean=False, name="mean_squared_error"):
"""Return the TensorFlow expression of mean-square-error (L2) of two batch of data.
Parameters
----------
output : Tensor
2D, 3D or 4D tensor i.e. [batch_size, n_feature], [batch_size, height, width] or [batch_size, height, width, channel].
target : Tensor
The target distribution, format the same with `output`.
is_mean : boolean
Whether compute the mean or sum for each example.
- If True, use ``tf.reduce_mean`` to compute the loss between one target and predict data.
- If False, use ``tf.reduce_sum`` (default).
References
------------
- `Wiki Mean Squared Error <https://en.wikipedia.org/wiki/Mean_squared_error>`__
"""
with tf.name_scope(name):
if output.get_shape().ndims == 2: # [batch_size, n_feature]
if is_mean:
mse = tf.reduce_mean(tf.reduce_mean(tf.squared_difference(output, target), 1))
else:
mse = tf.reduce_mean(tf.reduce_sum(tf.squared_difference(output, target), 1))
elif output.get_shape().ndims == 3: # [batch_size, w, h]
if is_mean:
mse = tf.reduce_mean(tf.reduce_mean(tf.squared_difference(output, target), [1, 2]))
else:
mse = tf.reduce_mean(tf.reduce_sum(tf.squared_difference(output, target), [1, 2]))
elif output.get_shape().ndims == 4: # [batch_size, w, h, c]
if is_mean:
mse = tf.reduce_mean(tf.reduce_mean(tf.squared_difference(output, target), [1, 2, 3]))
else:
mse = tf.reduce_mean(tf.reduce_sum(tf.squared_difference(output, target), [1, 2, 3]))
else:
raise Exception("Unknow dimension")
return mse
def normalized_mean_square_error(output, target):
"""Return the TensorFlow expression of normalized mean-square-error of two distributions.
Parameters
----------
output : Tensor
2D, 3D or 4D tensor i.e. [batch_size, n_feature], [batch_size, height, width] or [batch_size, height, width, channel].
target : Tensor
The target distribution, format the same with `output`.
"""
with tf.name_scope("mean_squared_error_loss"):
if output.get_shape().ndims == 2: # [batch_size, n_feature]
nmse_a = tf.sqrt(tf.reduce_sum(tf.squared_difference(output, target), axis=1))
nmse_b = tf.sqrt(tf.reduce_sum(tf.square(target), axis=1))
elif output.get_shape().ndims == 3: # [batch_size, w, h]
nmse_a = tf.sqrt(tf.reduce_sum(tf.squared_difference(output, target), axis=[1, 2]))
nmse_b = tf.sqrt(tf.reduce_sum(tf.square(target), axis=[1, 2]))
elif output.get_shape().ndims == 4: # [batch_size, w, h, c]
nmse_a = tf.sqrt(tf.reduce_sum(tf.squared_difference(output, target), axis=[1, 2, 3]))
nmse_b = tf.sqrt(tf.reduce_sum(tf.square(target), axis=[1, 2, 3]))
nmse = tf.reduce_mean(nmse_a / nmse_b)
return nmse
def absolute_difference_error(output, target, is_mean=False):
"""Return the TensorFlow expression of absolute difference error (L1) of two batch of data.
Parameters
----------
output : Tensor
2D, 3D or 4D tensor i.e. [batch_size, n_feature], [batch_size, height, width] or [batch_size, height, width, channel].
target : Tensor
The target distribution, format the same with `output`.
is_mean : boolean
Whether compute the mean or sum for each example.
- If True, use ``tf.reduce_mean`` to compute the loss between one target and predict data.
- If False, use ``tf.reduce_sum`` (default).
"""
with tf.name_scope("mean_squared_error_loss"):
if output.get_shape().ndims == 2: # [batch_size, n_feature]
if is_mean:
loss = tf.reduce_mean(tf.reduce_mean(tf.abs(output - target), 1))
else:
loss = tf.reduce_mean(tf.reduce_sum(tf.abs(output - target), 1))
elif output.get_shape().ndims == 3: # [batch_size, w, h]
if is_mean:
loss = tf.reduce_mean(tf.reduce_mean(tf.abs(output - target), [1, 2]))
else:
loss = tf.reduce_mean(tf.reduce_sum(tf.abs(output - target), [1, 2]))
elif output.get_shape().ndims == 4: # [batch_size, w, h, c]
if is_mean:
loss = tf.reduce_mean(tf.reduce_mean(tf.abs(output - target), [1, 2, 3]))
else:
loss = tf.reduce_mean(tf.reduce_sum(tf.abs(output - target), [1, 2, 3]))
else:
raise Exception("Unknow dimension")
return loss
def dice_coe(output, target, loss_type='jaccard', axis=(1, 2, 3), smooth=1e-5):
"""Soft dice (Sørensen or Jaccard) coefficient for comparing the similarity
of two batch of data, usually be used for binary image segmentation
i.e. labels are binary. The coefficient between 0 to 1, 1 means totally match.
Parameters
-----------
output : Tensor
A distribution with shape: [batch_size, ....], (any dimensions).
target : Tensor
The target distribution, format the same with `output`.
loss_type : str
``jaccard`` or ``sorensen``, default is ``jaccard``.
axis : tuple of int
All dimensions are reduced, default ``[1,2,3]``.
smooth : float
This small value will be added to the numerator and denominator.
- If both output and target are empty, it makes sure dice is 1.
- If either output or target are empty (all pixels are background), dice = ```smooth/(small_value + smooth)``, then if smooth is very small, dice close to 0 (even the image values lower than the threshold), so in this case, higher smooth can have a higher dice.
Examples
---------
>>> outputs = tl.act.pixel_wise_softmax(network.outputs)
>>> dice_loss = 1 - tl.cost.dice_coe(outputs, y_)
References
-----------
- `Wiki-Dice <https://en.wikipedia.org/wiki/Sørensen–Dice_coefficient>`__
"""
inse = tf.reduce_sum(output * target, axis=axis)
if loss_type == 'jaccard':
l = tf.reduce_sum(output * output, axis=axis)
r = tf.reduce_sum(target * target, axis=axis)
elif loss_type == 'sorensen':
l = tf.reduce_sum(output, axis=axis)
r = tf.reduce_sum(target, axis=axis)
else:
raise Exception("Unknow loss_type")
## old axis=[0,1,2,3]
# dice = 2 * (inse) / (l + r)
# epsilon = 1e-5
# dice = tf.clip_by_value(dice, 0, 1.0-epsilon) # if all empty, dice = 1
## new haodong
dice = (2. * inse + smooth) / (l + r + smooth)
##
dice = tf.reduce_mean(dice)
return dice
def dice_hard_coe(output, target, threshold=0.5, axis=(1, 2, 3), smooth=1e-5):
"""Non-differentiable Sørensen–Dice coefficient for comparing the similarity
of two batch of data, usually be used for binary image segmentation i.e. labels are binary.
The coefficient between 0 to 1, 1 if totally match.
Parameters
-----------
output : tensor
A distribution with shape: [batch_size, ....], (any dimensions).
target : tensor
The target distribution, format the same with `output`.
threshold : float
The threshold value to be true.
axis : tuple of integer
All dimensions are reduced, default ``(1,2,3)``.
smooth : float
This small value will be added to the numerator and denominator, see ``dice_coe``.
References
-----------
- `Wiki-Dice <https://en.wikipedia.org/wiki/Sørensen–Dice_coefficient>`__
"""
output = tf.cast(output > threshold, dtype=tf.float32)
target = tf.cast(target > threshold, dtype=tf.float32)
inse = tf.reduce_sum(tf.multiply(output, target), axis=axis)
l = tf.reduce_sum(output, axis=axis)
r = tf.reduce_sum(target, axis=axis)
## old axis=[0,1,2,3]
# hard_dice = 2 * (inse) / (l + r)
# epsilon = 1e-5
# hard_dice = tf.clip_by_value(hard_dice, 0, 1.0-epsilon)
## new haodong
hard_dice = (2. * inse + smooth) / (l + r + smooth)
##
hard_dice = tf.reduce_mean(hard_dice)
return hard_dice
def iou_coe(output, target, threshold=0.5, axis=(1, 2, 3), smooth=1e-5):
"""Non-differentiable Intersection over Union (IoU) for comparing the
similarity of two batch of data, usually be used for evaluating binary image segmentation.
The coefficient between 0 to 1, and 1 means totally match.
Parameters
-----------
output : tensor
A batch of distribution with shape: [batch_size, ....], (any dimensions).
target : tensor
The target distribution, format the same with `output`.
threshold : float
The threshold value to be true.
axis : tuple of integer
All dimensions are reduced, default ``(1,2,3)``.
smooth : float
This small value will be added to the numerator and denominator, see ``dice_coe``.
Notes
------
- IoU cannot be used as training loss, people usually use dice coefficient for training, IoU and hard-dice for evaluating.
"""
pre = tf.cast(output > threshold, dtype=tf.float32)
truth = tf.cast(target > threshold, dtype=tf.float32)
inse = tf.reduce_sum(tf.multiply(pre, truth), axis=axis) # AND
union = tf.reduce_sum(tf.cast(tf.add(pre, truth) >= 1, dtype=tf.float32), axis=axis) # OR
## old axis=[0,1,2,3]
# epsilon = 1e-5
# batch_iou = inse / (union + epsilon)
## new haodong
batch_iou = (inse + smooth) / (union + smooth)
iou = tf.reduce_mean(batch_iou)
return iou #, pre, truth, inse, union
# ## test soft/hard dice and iou
# import numpy as np
# y = np.zeros((1,10,10,1))
# # y[0,0:5,0:5]=1.0
# o = np.zeros((1,10,10,1))
# # o[:,:,:,:] = 0 # what we want: dice=0 iou=0 OK
# # o[0,0:2,0:2]=0.3 # what we want: dice larger iou=0 OK
# # o[0,0:2,0:2]=0.6 # what we want: dice larger iou small OK
# # o[0,0:3,0:3]=0.6 # what we want: dice larger iou larger OK
# # o[0,0:3,0:3]=1 # what we want: dice larger iou same OK
# # o[0,0:5,0:5]=1 # what we want: dice=1 iou=1 OK
# # o[0,0:5,0:5]=0.3 # what we want: dice smaller iou=0 OK
# # o[0,0:5,0:5]=1e-2 # what we want: dice≈0 iou=0 OK
# # o[0,8:10,8:10]=1.0 # what we want: dice=0 iou=0 OK
# # o[0,8:10,8:10]=1e-10 # what we want: dice=0 iou=0 OK
# # y[:,:,:,:] = o[:,:,:,:] = 0 # what we want: dice=1 iou=1 OK
# ## why in u-net, dice=1 hard-dice=1 iou=1 exist?? print bug?
#
# d = dice_coe(o, y, 'jaccard', smooth=1.)
# hd = dice_hard_coe(o, y, smooth=1e-5)
# i = iou_coe(o, y, smooth=1e-5)
# sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
# # sess.run(tf.local_variables_initializer())
# print(sess.run([d,hd,i]))
# # p, t, i, u = sess.run([pre, truth, inse, union])
# # import pprint
# # pprint.pprint(((y>0.5)*(o>0.5)).astype(int).tolist())
# # pprint.pprint(p.tolist())
# # pprint.pprint(t.tolist())
# # pprint.pprint(i)
# # pprint.pprint(u)
# exit()
def cross_entropy_seq(logits, target_seqs, batch_size=None): #, batch_size=1, num_steps=None):
"""Returns the expression of cross-entropy of two sequences, implement
softmax internally. Normally be used for fixed length RNN outputs, see `PTB example <https://github.com/zsdonghao/tensorlayer/blob/master/example/tutorial_ptb_lstm_state_is_tuple.py>`__.
Parameters
----------
logits : Tensor
2D tensor with shape of `[batch_size * n_steps, n_classes]`.
target_seqs : Tensor
The target sequence, 2D tensor `[batch_size, n_steps]`, if the number of step is dynamic, please use ``tl.cost.cross_entropy_seq_with_mask`` instead.
batch_size : None or int.
Whether to divide the cost by batch size.
- If integer, the return cost will be divided by `batch_size`.
- If None (default), the return cost will not be divided by anything.
Examples
--------
>>> see `PTB example <https://github.com/zsdonghao/tensorlayer/blob/master/example/tutorial_ptb_lstm_state_is_tuple.py>`__.for more details
>>> input_data = tf.placeholder(tf.int32, [batch_size, n_steps])
>>> targets = tf.placeholder(tf.int32, [batch_size, n_steps])
>>> # build the network
>>> print(net.outputs)
... (batch_size * n_steps, n_classes)
>>> cost = tl.cost.cross_entropy_seq(network.outputs, targets)
"""
# try: # TF 1.0
sequence_loss_by_example_fn = tf.contrib.legacy_seq2seq.sequence_loss_by_example
# except:
# sequence_loss_by_example_fn = tf.nn.seq2seq.sequence_loss_by_example
loss = sequence_loss_by_example_fn([logits], [tf.reshape(target_seqs, [-1])], [tf.ones_like(tf.reshape(target_seqs, [-1]), dtype=tf.float32)])
# [tf.ones([batch_size * num_steps])])
cost = tf.reduce_sum(loss) #/ batch_size
if batch_size is not None:
cost = cost / batch_size
return cost
def cross_entropy_seq_with_mask(logits, target_seqs, input_mask, return_details=False, name=None):
"""Returns the expression of cross-entropy of two sequences, implement
softmax internally. Normally be used for Dynamic RNN with Synced sequence input and output.
Parameters
-----------
logits : Tensor
2D tensor with shape of [batch_size * ?, n_classes], `?` means dynamic IDs for each example.
- Can be get from `DynamicRNNLayer` by setting ``return_seq_2d`` to `True`.
target_seqs : Tensor
int of tensor, like word ID. [batch_size, ?], `?` means dynamic IDs for each example.
input_mask : Tensor
The mask to compute loss, it has the same size with `target_seqs`, normally 0 or 1.
return_details : boolean
Whether to return detailed losses.
- If False (default), only returns the loss.
- If True, returns the loss, losses, weights and targets (see source code).
Examples
--------
>>> batch_size = 64
>>> vocab_size = 10000
>>> embedding_size = 256
>>> input_seqs = tf.placeholder(dtype=tf.int64, shape=[batch_size, None], name="input")
>>> target_seqs = tf.placeholder(dtype=tf.int64, shape=[batch_size, None], name="target")
>>> input_mask = tf.placeholder(dtype=tf.int64, shape=[batch_size, None], name="mask")
>>> net = tl.layers.EmbeddingInputlayer(
... inputs = input_seqs,
... vocabulary_size = vocab_size,
... embedding_size = embedding_size,
... name = 'seq_embedding')
>>> net = tl.layers.DynamicRNNLayer(net,
... cell_fn = tf.contrib.rnn.BasicLSTMCell,
... n_hidden = embedding_size,
... dropout = (0.7 if is_train else None),
... sequence_length = tl.layers.retrieve_seq_length_op2(input_seqs),
... return_seq_2d = True,
... name = 'dynamicrnn')
>>> print(net.outputs)
... (?, 256)
>>> net = tl.layers.DenseLayer(net, n_units=vocab_size, name="output")
>>> print(net.outputs)
... (?, 10000)
>>> loss = tl.cost.cross_entropy_seq_with_mask(net.outputs, target_seqs, input_mask)
"""
targets = tf.reshape(target_seqs, [-1]) # to one vector
weights = tf.to_float(tf.reshape(input_mask, [-1])) # to one vector like targets
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=targets, name=name) * weights
#losses = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=targets, name=name)) # for TF1.0 and others
# try: ## TF1.0
loss = tf.divide(
tf.reduce_sum(losses), # loss from mask. reduce_sum before element-wise mul with mask !!
tf.reduce_sum(weights),
name="seq_loss_with_mask")
# except: ## TF0.12
# loss = tf.div(tf.reduce_sum(losses), # loss from mask. reduce_sum before element-wise mul with mask !!
# tf.reduce_sum(weights),
# name="seq_loss_with_mask")
if return_details:
return loss, losses, weights, targets
else:
return loss
def cosine_similarity(v1, v2):
"""Cosine similarity [-1, 1].
Parameters
----------
v1, v2 : Tensor
Tensor with the same shape [batch_size, n_feature].
Returns
-------
Tensor
a tensor of shape [batch_size].
References
----------
- `<https://en.wikipedia.org/wiki/Cosine_similarity>`__.
"""
# try: ## TF1.0
cost = tf.reduce_sum(tf.multiply(v1, v2), 1) / (tf.sqrt(tf.reduce_sum(tf.multiply(v1, v1), 1)) * tf.sqrt(tf.reduce_sum(tf.multiply(v2, v2), 1)))
# except: ## TF0.12
# cost = tf.reduce_sum(tf.mul(v1, v2), reduction_indices=1) / (tf.sqrt(tf.reduce_sum(tf.mul(v1, v1), reduction_indices=1)) * tf.sqrt(tf.reduce_sum(tf.mul(v2, v2), reduction_indices=1)))
return cost
## Regularization Functions
def li_regularizer(scale, scope=None):
"""Li regularization removes the neurons of previous layer. The `i` represents `inputs`.
Returns a function that can be used to apply group li regularization to weights.
The implementation follows `TensorFlow contrib <https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/layers/python/layers/regularizers.py>`__.
Parameters
----------
scale : float
A scalar multiplier `Tensor`. 0.0 disables the regularizer.
scope: str
An optional scope name for this function.
Returns
--------
A function with signature `li(weights, name=None)` that apply Li regularization.
Raises
------
ValueError : if scale is outside of the range [0.0, 1.0] or if scale is not a float.
"""
import numbers
from tensorflow.python.framework import ops
from tensorflow.python.ops import standard_ops
# from tensorflow.python.platform import tf_logging as logging
if isinstance(scale, numbers.Integral):
raise ValueError('scale cannot be an integer: %s' % scale)
if isinstance(scale, numbers.Real):
if scale < 0.:
raise ValueError('Setting a scale less than 0 on a regularizer: %g' % scale)
if scale >= 1.:
raise ValueError('Setting a scale greater than 1 on a regularizer: %g' % scale)
if scale == 0.:
logging.info('Scale of 0 disables regularizer.')
return lambda _, name=None: None
def li(weights):
"""Applies li regularization to weights."""
with tf.name_scope('li_regularizer') as scope:
my_scale = ops.convert_to_tensor(scale, dtype=weights.dtype.base_dtype, name='scale')
# if tf.__version__ <= '0.12':
# standard_ops_fn = standard_ops.mul
# else:
standard_ops_fn = standard_ops.multiply
return standard_ops_fn(my_scale, standard_ops.reduce_sum(standard_ops.sqrt(standard_ops.reduce_sum(tf.square(weights), 1))), name=scope)
return li
def lo_regularizer(scale):
"""Lo regularization removes the neurons of current layer. The `o` represents `outputs`
Returns a function that can be used to apply group lo regularization to weights.
The implementation follows `TensorFlow contrib <https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/layers/python/layers/regularizers.py>`__.
Parameters
----------
scale : float
A scalar multiplier `Tensor`. 0.0 disables the regularizer.
Returns
-------
A function with signature `lo(weights, name=None)` that apply Lo regularization.
Raises
------
ValueError : If scale is outside of the range [0.0, 1.0] or if scale is not a float.
"""
import numbers
from tensorflow.python.framework import ops
from tensorflow.python.ops import standard_ops
# from tensorflow.python.platform import tf_logging as logging
if isinstance(scale, numbers.Integral):
raise ValueError('scale cannot be an integer: %s' % scale)
if isinstance(scale, numbers.Real):
if scale < 0.:
raise ValueError('Setting a scale less than 0 on a regularizer: %g' % scale)
if scale >= 1.:
raise ValueError('Setting a scale greater than 1 on a regularizer: %g' % scale)
if scale == 0.:
logging.info('Scale of 0 disables regularizer.')
return lambda _, name=None: None
def lo(weights, name='lo_regularizer'):
"""Applies group column regularization to weights."""
with tf.name_scope(name) as scope:
my_scale = ops.convert_to_tensor(scale, dtype=weights.dtype.base_dtype, name='scale')
# if tf.__version__ <= '0.12':
# standard_ops_fn = standard_ops.mul
# else:
standard_ops_fn = standard_ops.multiply
return standard_ops_fn(my_scale, standard_ops.reduce_sum(standard_ops.sqrt(standard_ops.reduce_sum(tf.square(weights), 0))), name=scope)
return lo
def maxnorm_regularizer(scale=1.0):
"""Max-norm regularization returns a function that can be used to apply max-norm regularization to weights.
More about max-norm, see `wiki-max norm <https://en.wikipedia.org/wiki/Matrix_norm#Max_norm>`_.
The implementation follows `TensorFlow contrib <https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/layers/python/layers/regularizers.py>`__.
Parameters
----------
scale : float
A scalar multiplier `Tensor`. 0.0 disables the regularizer.
Returns
---------
A function with signature `mn(weights, name=None)` that apply Lo regularization.
Raises
--------
ValueError : If scale is outside of the range [0.0, 1.0] or if scale is not a float.
"""
import numbers
from tensorflow.python.framework import ops
from tensorflow.python.ops import standard_ops
if isinstance(scale, numbers.Integral):
raise ValueError('scale cannot be an integer: %s' % scale)
if isinstance(scale, numbers.Real):
if scale < 0.:
raise ValueError('Setting a scale less than 0 on a regularizer: %g' % scale)
# if scale >= 1.:
# raise ValueError('Setting a scale greater than 1 on a regularizer: %g' %
# scale)
if scale == 0.:
logging.info('Scale of 0 disables regularizer.')
return lambda _, name=None: None
def mn(weights, name='max_regularizer'):
"""Applies max-norm regularization to weights."""
with tf.name_scope(name) as scope:
my_scale = ops.convert_to_tensor(scale, dtype=weights.dtype.base_dtype, name='scale')
# if tf.__version__ <= '0.12':
# standard_ops_fn = standard_ops.mul
# else:
standard_ops_fn = standard_ops.multiply
return standard_ops_fn(my_scale, standard_ops.reduce_max(standard_ops.abs(weights)), name=scope)
return mn
def maxnorm_o_regularizer(scale):
"""Max-norm output regularization removes the neurons of current layer.
Returns a function that can be used to apply max-norm regularization to each column of weight matrix.
The implementation follows `TensorFlow contrib <https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/layers/python/layers/regularizers.py>`__.
Parameters
----------
scale : float
A scalar multiplier `Tensor`. 0.0 disables the regularizer.
Returns
---------
A function with signature `mn_o(weights, name=None)` that apply Lo regularization.
Raises
---------
ValueError : If scale is outside of the range [0.0, 1.0] or if scale is not a float.
"""
import numbers
from tensorflow.python.framework import ops
from tensorflow.python.ops import standard_ops
if isinstance(scale, numbers.Integral):
raise ValueError('scale cannot be an integer: %s' % scale)
if isinstance(scale, numbers.Real):
if scale < 0.:
raise ValueError('Setting a scale less than 0 on a regularizer: %g' % scale)
# if scale >= 1.:
# raise ValueError('Setting a scale greater than 1 on a regularizer: %g' %
# scale)
if scale == 0.:
logging.info('Scale of 0 disables regularizer.')
return lambda _, name=None: None
def mn_o(weights, name='maxnorm_o_regularizer'):
"""Applies max-norm regularization to weights."""
with tf.name_scope(name) as scope:
my_scale = ops.convert_to_tensor(scale, dtype=weights.dtype.base_dtype, name='scale')
if tf.__version__ <= '0.12':
standard_ops_fn = standard_ops.mul
else:
standard_ops_fn = standard_ops.multiply
return standard_ops_fn(my_scale, standard_ops.reduce_sum(standard_ops.reduce_max(standard_ops.abs(weights), 0)), name=scope)
return mn_o
def maxnorm_i_regularizer(scale):
"""Max-norm input regularization removes the neurons of previous layer.
Returns a function that can be used to apply max-norm regularization to each row of weight matrix.
The implementation follows `TensorFlow contrib <https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/layers/python/layers/regularizers.py>`__.
Parameters
----------
scale : float
A scalar multiplier `Tensor`. 0.0 disables the regularizer.
Returns
---------
A function with signature `mn_i(weights, name=None)` that apply Lo regularization.
Raises
---------
ValueError : If scale is outside of the range [0.0, 1.0] or if scale is not a float.
"""
import numbers
from tensorflow.python.framework import ops
from tensorflow.python.ops import standard_ops
if isinstance(scale, numbers.Integral):
raise ValueError('scale cannot be an integer: %s' % scale)
if isinstance(scale, numbers.Real):
if scale < 0.:
raise ValueError('Setting a scale less than 0 on a regularizer: %g' % scale)
# if scale >= 1.:
# raise ValueError('Setting a scale greater than 1 on a regularizer: %g' %
# scale)
if scale == 0.:
logging.info('Scale of 0 disables regularizer.')
return lambda _, name=None: None
def mn_i(weights, name='maxnorm_i_regularizer'):
"""Applies max-norm regularization to weights."""
with tf.name_scope(name) as scope:
my_scale = ops.convert_to_tensor(scale, dtype=weights.dtype.base_dtype, name='scale')
if tf.__version__ <= '0.12':
standard_ops_fn = standard_ops.mul
else:
standard_ops_fn = standard_ops.multiply
return standard_ops_fn(my_scale, standard_ops.reduce_sum(standard_ops.reduce_max(standard_ops.abs(weights), 1)), name=scope)
return mn_i
| 41.032389
| 273
| 0.63779
|
4a121b7646482586436886ba5d751ade670f1803
| 36,741
|
py
|
Python
|
tests/client_test.py
|
trygveaa/matrix-nio
|
8208a61674e383279c063225fd5512ecd02acce9
|
[
"Apache-2.0"
] | null | null | null |
tests/client_test.py
|
trygveaa/matrix-nio
|
8208a61674e383279c063225fd5512ecd02acce9
|
[
"Apache-2.0"
] | null | null | null |
tests/client_test.py
|
trygveaa/matrix-nio
|
8208a61674e383279c063225fd5512ecd02acce9
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import json
from uuid import uuid4
import pytest
from helpers import FrameFactory, ephemeral, ephemeral_dir, faker
from nio import (Client, DeviceList, DeviceOneTimeKeyCount, DownloadResponse,
EncryptionError,
HttpClient, JoinedMembersResponse, KeysQueryResponse,
KeysUploadResponse, LocalProtocolError, LoginResponse,
LogoutResponse, MegolmEvent, ProfileGetAvatarResponse,
ProfileGetDisplayNameResponse, ProfileGetResponse,
ProfileSetAvatarResponse, ProfileSetDisplayNameResponse,
RoomCreateResponse,
RoomEncryptionEvent, RoomForgetResponse, RoomInfo,
RoomKeyRequestResponse, RoomMember, RoomMemberEvent, Rooms,
RoomSummary, RoomTypingResponse, RoomRedactResponse,
ShareGroupSessionResponse, SyncResponse,
Timeline, ThumbnailResponse, TransportType, TypingNoticeEvent,
InviteMemberEvent, InviteInfo, ClientConfig)
from nio.event_builders import ToDeviceMessage
HOST = "example.org"
USER = "example"
DEVICE_ID = "DEVICEID"
BOB_ID = "@bob:example.org"
TEST_ROOM_ID = "!testroom:example.org"
TEST_EVENT_ID = "$15163622445EBvZJ:localhost"
ALICE_ID = "@alice:example.org"
ALICE_DEVICE_ID = "JLAFKJWSCS"
CAROL_ID = "@carol:example.org"
@pytest.fixture
def synced_client(tempdir):
http_client = HttpClient("example.org", "ephemeral", "DEVICEID", tempdir)
http_client.connect(TransportType.HTTP2)
http_client.login("1234")
http_client.receive(TestClass().login_byte_response)
response = http_client.next_response()
assert isinstance(response, LoginResponse)
assert http_client.access_token == "ABCD"
http_client.sync()
http_client.receive(TestClass().sync_byte_response)
response = http_client.next_response()
assert isinstance(response, SyncResponse)
assert http_client.access_token == "ABCD"
return http_client
class TestClass(object):
example_response_headers = [
(':status', '200'),
('server', 'fake-serv/0.1.0')
]
@property
def login_response(self):
return LoginResponse("@ephemeral:example.org", "DEVICEID", "abc123")
@property
def logout_response(self):
return LogoutResponse()
@staticmethod
def _load_response(filename):
with open(filename) as f:
return json.loads(f.read())
@staticmethod
def _load_byte_response(filename):
with open(filename, "rb") as f:
return f.read()
@property
def login_byte_response(self):
frame_factory = FrameFactory()
f = frame_factory.build_headers_frame(
headers=self.example_response_headers, stream_id=1
)
login_body = json.dumps({
"user_id": "@ephemeral:example.org",
"access_token": "ABCD",
"device_id": "DEVICEID",
}).encode("utf-8")
data = frame_factory.build_data_frame(
data=login_body,
stream_id=1,
flags=['END_STREAM']
)
return f.serialize() + data.serialize()
@property
def sync_byte_response(self):
frame_factory = FrameFactory()
f = frame_factory.build_headers_frame(
headers=self.example_response_headers, stream_id=3
)
body = self._load_byte_response("tests/data/sync.json")
data = frame_factory.build_data_frame(
data=body,
stream_id=3,
flags=['END_STREAM']
)
return f.serialize() + data.serialize()
def file_byte_response(self, stream_id=5, header_filename=""):
frame_factory = FrameFactory()
headers = self.example_response_headers + [
("content-type", "image/png")
]
if header_filename:
headers.append(
(
"content-disposition",
'inline; filename="{}"'.format(header_filename),
),
)
f = frame_factory.build_headers_frame(
headers=headers, stream_id=stream_id
)
body = self._load_byte_response("tests/data/file_response")
data = frame_factory.build_data_frame(
data=body,
stream_id=stream_id,
flags=['END_STREAM']
)
return f.serialize() + data.serialize()
def empty_response(self, stream_id=5):
frame_factory = FrameFactory()
f = frame_factory.build_headers_frame(
headers=self.example_response_headers, stream_id=stream_id
)
body = b"{}"
data = frame_factory.build_data_frame(
data=body,
stream_id=stream_id,
flags=['END_STREAM']
)
return f.serialize() + data.serialize()
def room_id_response(self, stream_id=5, room_id=TEST_ROOM_ID):
frame_factory = FrameFactory()
f = frame_factory.build_headers_frame(
headers=self.example_response_headers, stream_id=stream_id
)
body = json.dumps({"room_id": room_id}).encode()
data = frame_factory.build_data_frame(
data=body,
stream_id=stream_id,
flags=['END_STREAM']
)
return f.serialize() + data.serialize()
def event_id_response(self, stream_id=5, event_id=TEST_EVENT_ID):
frame_factory = FrameFactory()
f = frame_factory.build_headers_frame(
headers=self.example_response_headers, stream_id=stream_id
)
body = json.dumps({"event_id": event_id}).encode()
data = frame_factory.build_data_frame(
data=body,
stream_id=stream_id,
flags=['END_STREAM'],
)
return f.serialize() + data.serialize()
def get_displayname_byte_response(self, displayname, stream_id=5):
frame_factory = FrameFactory()
f = frame_factory.build_headers_frame(
headers=self.example_response_headers, stream_id=stream_id
)
body = json.dumps({"displayname": displayname}).encode("utf-8")
data = frame_factory.build_data_frame(
data=body,
stream_id=stream_id,
flags=['END_STREAM']
)
return f.serialize() + data.serialize()
def get_avatar_byte_response(self, avatar_url, stream_id=5):
frame_factory = FrameFactory()
f = frame_factory.build_headers_frame(
headers=self.example_response_headers, stream_id=stream_id
)
body = json.dumps({"avatar_url": avatar_url}).encode("utf-8")
data = frame_factory.build_data_frame(
data=body,
stream_id=stream_id,
flags=['END_STREAM']
)
return f.serialize() + data.serialize()
def get_profile_byte_response(self, displayname, avatar_url, stream_id=5):
frame_factory = FrameFactory()
f = frame_factory.build_headers_frame(
headers=self.example_response_headers, stream_id=stream_id
)
body = json.dumps(
{"displayname": displayname, "avatar_url": avatar_url}
).encode("utf-8")
data = frame_factory.build_data_frame(
data=body,
stream_id=stream_id,
flags=['END_STREAM']
)
return f.serialize() + data.serialize()
@property
def sync_response(self):
timeline = Timeline(
[
RoomMemberEvent(
{"event_id": "event_id_1",
"sender": ALICE_ID,
"origin_server_ts": 1516809890615},
ALICE_ID,
"join",
None,
{"membership": "join"}
),
RoomMemberEvent(
{"event_id": "event_id_2",
"sender": ALICE_ID,
"origin_server_ts": 1516809890615},
CAROL_ID,
"invite",
None,
{"membership": "invite"},
),
RoomEncryptionEvent(
{
"event_id": "event_id_3",
"sender": ALICE_ID,
"origin_server_ts": 1516809890615
}
)
],
False,
"prev_batch_token"
)
test_room_info = RoomInfo(
timeline,
[],
[TypingNoticeEvent([ALICE_ID])],
[],
RoomSummary(invited_member_count=1, joined_member_count=2),
)
rooms = Rooms(
{},
{
TEST_ROOM_ID: test_room_info
},
{}
)
return SyncResponse(
"token123",
rooms,
DeviceOneTimeKeyCount(49, 50),
DeviceList([ALICE_ID], []),
[
RoomEncryptionEvent(
{
"event_id": "event_id_2",
"sender": ALICE_ID,
"origin_server_ts": 1516809890615
}
)
]
)
@property
def sync_invite_response(self):
state = [
InviteMemberEvent(
{},
"@BOB:example.org",
ALICE_ID,
"invite",
None,
{
"membership": "invite",
"display_name": None,
}
)
]
test_room_info = InviteInfo(state)
rooms = Rooms(
{
TEST_ROOM_ID: test_room_info
},
{},
{}
)
return SyncResponse(
"token123",
rooms,
DeviceOneTimeKeyCount(49, 50),
DeviceList([ALICE_ID], []),
[]
)
@property
def downgrade_sync(self):
timeline = Timeline(
[
RoomMemberEvent(
{"event_id": "event_id_1",
"sender": ALICE_ID,
"origin_server_ts": 1516809890615},
ALICE_ID,
"join",
None,
{"membership": "join"}
),
],
False,
"prev_batch_token"
)
test_room_info = RoomInfo(timeline, [], [], [], RoomSummary(1, 2, []))
rooms = Rooms(
{},
{
TEST_ROOM_ID: test_room_info
},
{}
)
return SyncResponse(
"token123",
rooms,
DeviceOneTimeKeyCount(49, 50),
DeviceList([ALICE_ID], []),
[]
)
@property
def second_sync(self):
timeline = Timeline(
[
RoomMemberEvent(
{"event_id": "event_id_1",
"sender": ALICE_ID,
"origin_server_ts": 1516809890615},
ALICE_ID,
"join",
None,
{"membership": "join"}
),
RoomEncryptionEvent(
{
"event_id": "event_id_2",
"sender": ALICE_ID,
"origin_server_ts": 1516809890615
}
)
],
True,
"prev_batch_token"
)
test_room_info = RoomInfo(timeline, [], [], [], RoomSummary(1, 2, []))
rooms = Rooms(
{},
{
TEST_ROOM_ID: test_room_info
},
{}
)
return SyncResponse(
"token123",
rooms,
DeviceOneTimeKeyCount(49, 50),
DeviceList([], []),
[]
)
@property
def keys_query_response(self):
parsed_dict = TestClass._load_response(
"tests/data/keys_query.json")
return KeysQueryResponse.from_dict(parsed_dict)
@property
def joined_members(self):
return JoinedMembersResponse(
[
RoomMember(BOB_ID, None, None), # joined
RoomMember(ALICE_ID, None, None), # joined
RoomMember(CAROL_ID, None, None), # invited
],
TEST_ROOM_ID
)
def test_client_protocol_error(self):
client = Client(USER, DEVICE_ID)
with pytest.raises(LocalProtocolError):
client.olm_account_shared
with pytest.raises(LocalProtocolError):
client.blacklist_device(faker.olm_device())
with pytest.raises(LocalProtocolError):
client.unblacklist_device(faker.olm_device())
with pytest.raises(LocalProtocolError):
client.verify_device(faker.olm_device())
with pytest.raises(LocalProtocolError):
client.unverify_device(faker.olm_device())
with pytest.raises(LocalProtocolError):
client.decrypt_event(None)
with pytest.raises(LocalProtocolError):
client.decrypt_event(None)
with pytest.raises(LocalProtocolError):
client.device_store
client = HttpClient(HOST, USER, DEVICE_ID)
with pytest.raises(LocalProtocolError):
client.share_group_session(None)
with pytest.raises(LocalProtocolError):
client.keys_claim(None)
with pytest.raises(LocalProtocolError):
client.keys_query(None)
def test_client_create(self, client):
assert isinstance(client, Client)
assert not client.store
def test_client_invalid_response(self, client):
with pytest.raises(ValueError):
client.receive_response(None)
def test_client_login(self, client):
assert not client.access_token
assert not client.store
assert not client.olm
client.receive_response(self.login_response)
assert client.access_token
assert client.store
assert client.olm
def test_client_logout(self, client):
client.receive_response(self.login_response)
assert client.access_token
client.receive_response(self.logout_response)
assert client.access_token == ""
def test_client_account_sharing(self, client):
client.receive_response(self.login_response)
with pytest.raises(ValueError):
client.decrypt_event(None)
assert not client.olm_account_shared
assert client.should_upload_keys
assert client.device_store
client.receive_response(KeysUploadResponse(49, 49))
assert client.should_upload_keys
client.receive_response(KeysUploadResponse(50, 50))
assert not client.should_upload_keys
def test_client_room_creation(self, client):
client.receive_response(self.login_response)
client.receive_response(KeysUploadResponse(50, 50))
assert not client.should_query_keys
client.receive_response(self.sync_response)
assert client.rooms[TEST_ROOM_ID]
room = client.rooms[TEST_ROOM_ID]
assert room.encrypted
assert client.should_query_keys
def test_device_store(self, tempdir):
client = Client("ephemeral", "DEVICEID", tempdir)
client.receive_response(self.login_response)
client.receive_response(KeysUploadResponse(50, 50))
assert not client.should_query_keys
client.receive_response(self.sync_response)
client.receive_response(self.keys_query_response)
assert list(client.device_store.users) == [ALICE_ID, CAROL_ID]
alice_device = client.device_store[ALICE_ID][ALICE_DEVICE_ID]
assert alice_device
client = Client("ephemeral", "DEVICEID", tempdir)
client.receive_response(self.login_response)
assert list(client.device_store.users) == [ALICE_ID]
alice_device = client.device_store[ALICE_ID][ALICE_DEVICE_ID]
assert alice_device
def test_client_key_query(self, client):
assert not client.should_query_keys
client.receive_response(self.login_response)
client.receive_response(KeysUploadResponse(50, 50))
assert not client.should_query_keys
client.receive_response(self.sync_response)
assert not client.device_store.users
assert client.rooms[TEST_ROOM_ID]
room = client.rooms[TEST_ROOM_ID]
assert room.encrypted
assert room.summary
assert len(room.users) == 2
assert room.member_count == 3
assert room.summary.invited_member_count == 1
assert room.summary.joined_member_count == 2
assert client.should_query_keys
assert not client.device_store.users
client.receive_response(self.keys_query_response)
assert not client.should_query_keys
assert client.device_store.users
assert not room.members_synced
client.receive_response(self.joined_members)
assert room.members_synced
assert client.should_query_keys
assert client.users_for_key_query == {BOB_ID}
@ephemeral
def test_query_rule(self):
client = Client("ephemeral", "DEVICEID", ephemeral_dir)
client.receive_response(self.login_response)
assert client.store is not None
client.receive_response(KeysUploadResponse(50, 50))
assert not client.should_query_keys
client.receive_response(self.sync_response)
assert client.should_query_keys
client.receive_response(self.keys_query_response)
assert client.olm.tracked_users == {ALICE_ID, CAROL_ID}
assert list(client.device_store.users) == [ALICE_ID, CAROL_ID]
assert not client.should_query_keys
del client
client = Client("ephemeral", "DEVICEID", ephemeral_dir)
client.receive_response(self.login_response)
assert not client.should_upload_keys
assert not client.should_query_keys
assert list(client.device_store.users) == [ALICE_ID]
assert client.device_store.active_user_devices(ALICE_ID)
alice_device = client.device_store[ALICE_ID][ALICE_DEVICE_ID]
assert alice_device
client.receive_response(self.second_sync)
assert client.should_query_keys
client.users_for_key_query == {ALICE_ID}
client.receive_response(self.joined_members)
client.users_for_key_query == {ALICE_ID, BOB_ID}
client.receive_response(self.keys_query_response)
assert client.olm.tracked_users == {ALICE_ID, CAROL_ID}
assert client.users_for_key_query == {BOB_ID}
assert client.should_query_keys
@ephemeral
def test_early_store_loading(self):
client = Client("ephemeral")
with pytest.raises(LocalProtocolError):
client.load_store()
client = Client("ephemeral", store_path=ephemeral_dir)
client.user_id = "@ephemeral:example.org"
with pytest.raises(LocalProtocolError):
client.load_store()
client.user_id = None
client.device_id = "DEVICEID"
with pytest.raises(LocalProtocolError):
client.load_store()
client.receive_response(self.login_response)
del client
client = Client("ephemeral", "DEVICEID", ephemeral_dir)
client.user_id = "@ephemeral:example.org"
assert not client.store
assert not client.olm
client.load_store()
assert client.store
assert client.olm
def test_marking_sessions_as_shared(self, client):
client.receive_response(self.login_response)
client.receive_response(self.sync_response)
client.receive_response(self.joined_members)
client.receive_response(self.keys_query_response)
room = client.rooms[TEST_ROOM_ID]
assert room.encrypted
assert len(room.users) == 3
assert ALICE_ID in client.device_store.users
assert BOB_ID not in client.device_store.users
with pytest.raises(EncryptionError):
client.olm.share_group_session(TEST_ROOM_ID, room.users)
shared_with, to_device = client.olm.share_group_session(
TEST_ROOM_ID,
room.users,
True
)
session = client.olm.outbound_group_sessions[TEST_ROOM_ID]
assert (ALICE_ID, ALICE_DEVICE_ID) in session.users_ignored
response = ShareGroupSessionResponse.from_dict({}, TEST_ROOM_ID, set())
client.receive_response(response)
assert session.shared
def test_storing_room_encryption_state(self, client):
client.receive_response(self.login_response)
assert not client.encrypted_rooms
client.receive_response(self.sync_response)
assert TEST_ROOM_ID in client.encrypted_rooms
encrypted_rooms = client.store.load_encrypted_rooms()
assert TEST_ROOM_ID in encrypted_rooms
client2 = Client(client.user, client.device_id, client.store_path)
client2.receive_response(self.login_response)
assert TEST_ROOM_ID in client2.encrypted_rooms
client2.receive_response(self.downgrade_sync)
room = client2.rooms[TEST_ROOM_ID]
assert room.encrypted
def test_http_client_login(self, http_client):
http_client.connect(TransportType.HTTP2)
_, _ = http_client.login("1234")
http_client.receive(self.login_byte_response)
response = http_client.next_response()
assert isinstance(response, LoginResponse)
assert http_client.access_token == "ABCD"
def test_http_client_sync(self, http_client):
http_client.connect(TransportType.HTTP2)
_, _ = http_client.login("1234")
http_client.receive(self.login_byte_response)
response = http_client.next_response()
assert isinstance(response, LoginResponse)
assert http_client.access_token == "ABCD"
_, _ = http_client.sync()
http_client.receive(self.sync_byte_response)
response = http_client.next_response()
assert isinstance(response, SyncResponse)
assert http_client.access_token == "ABCD"
def test_http_client_keys_query(self, http_client):
http_client.connect(TransportType.HTTP2)
_, _ = http_client.login("1234")
http_client.receive(self.login_byte_response)
response = http_client.next_response()
assert isinstance(response, LoginResponse)
assert http_client.access_token == "ABCD"
_, _ = http_client.sync()
http_client.receive(self.sync_byte_response)
response = http_client.next_response()
assert isinstance(response, SyncResponse)
assert http_client.access_token == "ABCD"
event = MegolmEvent.from_dict(
self._load_response("tests/data/events/megolm.json")
)
http_client.request_room_key(event)
http_client.receive(self.empty_response(5))
response = http_client.next_response()
assert isinstance(response, RoomKeyRequestResponse)
assert ("X3lUlvLELLYxeTx4yOVu6UDpasGEVO0Jbu+QFnm0cKQ" in
http_client.outgoing_key_requests)
def test_http_client_room_create(self, http_client):
http_client.connect(TransportType.HTTP2)
_, _ = http_client.login("1234")
http_client.receive(self.login_byte_response)
response = http_client.next_response()
assert isinstance(response, LoginResponse)
assert http_client.access_token == "ABCD"
_, _ = http_client.sync()
http_client.receive(self.sync_byte_response)
response = http_client.next_response()
assert isinstance(response, SyncResponse)
assert http_client.access_token == "ABCD"
_, _ = http_client.room_create()
http_client.receive(self.room_id_response(5))
response = http_client.next_response()
assert isinstance(response, RoomCreateResponse)
assert response.room_id == TEST_ROOM_ID
def test_http_client_room_forget(self, http_client):
http_client.connect(TransportType.HTTP2)
_, _ = http_client.login("1234")
http_client.receive(self.login_byte_response)
response = http_client.next_response()
assert isinstance(response, LoginResponse)
assert http_client.access_token == "ABCD"
_, _ = http_client.sync()
http_client.receive(self.sync_byte_response)
response = http_client.next_response()
assert isinstance(response, SyncResponse)
assert http_client.access_token == "ABCD"
room_id = next(iter(http_client.rooms))
_, _ = http_client.room_forget(room_id)
http_client.receive(self.empty_response(5))
response = http_client.next_response()
assert isinstance(response, RoomForgetResponse)
def test_http_client_room_redact(self, synced_client):
room_id = next(iter(synced_client.rooms))
event_id = "$15163622445EBvZJ:localhost"
tx_id = uuid4()
reason = "for no reason"
synced_client.room_redact(room_id, event_id, reason, tx_id)
synced_client.receive(self.event_id_response(5))
response = synced_client.next_response()
assert isinstance(response, RoomRedactResponse)
def test_http_client_room_typing(self, http_client):
http_client.connect(TransportType.HTTP2)
_, _ = http_client.login("1234")
http_client.receive(self.login_byte_response)
response = http_client.next_response()
assert isinstance(response, LoginResponse)
assert http_client.access_token == "ABCD"
_, _ = http_client.sync()
http_client.receive(self.sync_byte_response)
response = http_client.next_response()
assert isinstance(response, SyncResponse)
assert http_client.access_token == "ABCD"
assert http_client.rooms
room_id = list(http_client.rooms.keys())[0]
_, _ = http_client.room_typing(room_id, typing_state=False)
http_client.receive(self.empty_response(5))
response = http_client.next_response()
assert isinstance(response, RoomTypingResponse)
def test_http_client_download(self, http_client):
http_client.connect(TransportType.HTTP2)
server_name = "example.og"
media_id = "ascERGshawAWawugaAcauga",
filename = "example&.png" # has unsafe character to test % encoding
_, _ = http_client.download(server_name, media_id, allow_remote=False)
http_client.receive(self.file_byte_response(1))
response = http_client.next_response()
assert isinstance(response, DownloadResponse)
assert response.body == self._load_byte_response(
"tests/data/file_response"
)
assert response.content_type == "image/png"
assert response.filename is None
_, _ = http_client.download(server_name, media_id, filename)
http_client.receive(self.file_byte_response(3, filename))
response = http_client.next_response()
assert isinstance(response, DownloadResponse)
assert response.body == self._load_byte_response(
"tests/data/file_response"
)
assert response.content_type == "image/png"
assert response.filename == filename
def test_http_client_thumbnail(self, http_client):
http_client.connect(TransportType.HTTP2)
_, _ = http_client.thumbnail(
"example.org",
"ascERGshawAWawugaAcauga",
32,
32,
allow_remote=False
)
http_client.receive(self.file_byte_response(1))
response = http_client.next_response()
assert isinstance(response, ThumbnailResponse)
assert response.body == self._load_byte_response(
"tests/data/file_response"
)
assert response.content_type == "image/png"
def test_http_client_get_profile(self, http_client):
http_client.connect(TransportType.HTTP2)
name = faker.name()
avatar = faker.avatar_url().replace("#auto", "")
_, _ = http_client.get_profile()
http_client.receive(self.get_profile_byte_response(name, avatar, 1))
response = http_client.next_response()
assert isinstance(response, ProfileGetResponse)
assert response.displayname == name
assert response.avatar_url.replace("#auto", "") == avatar
def test_http_client_get_set_displayname(self, http_client):
http_client.connect(TransportType.HTTP2)
_, _ = http_client.login("1234")
http_client.receive(self.login_byte_response)
response = http_client.next_response()
assert isinstance(response, LoginResponse)
assert http_client.access_token == "ABCD"
_, _ = http_client.sync()
http_client.receive(self.sync_byte_response)
response = http_client.next_response()
assert isinstance(response, SyncResponse)
assert http_client.access_token == "ABCD"
_, _ = http_client.get_displayname()
http_client.receive(self.get_displayname_byte_response(None, 5))
response = http_client.next_response()
assert isinstance(response, ProfileGetDisplayNameResponse)
assert not response.displayname
new_name = faker.name()
_, _ = http_client.set_displayname(new_name)
http_client.receive(self.empty_response(7))
response = http_client.next_response()
assert isinstance(response, ProfileSetDisplayNameResponse)
_, _ = http_client.get_displayname()
http_client.receive(self.get_displayname_byte_response(new_name, 9))
response = http_client.next_response()
assert isinstance(response, ProfileGetDisplayNameResponse)
assert response.displayname == new_name
def test_http_client_get_set_avatar(self, http_client):
http_client.connect(TransportType.HTTP2)
_, _ = http_client.login("1234")
http_client.receive(self.login_byte_response)
response = http_client.next_response()
assert isinstance(response, LoginResponse)
assert http_client.access_token == "ABCD"
_, _ = http_client.sync()
http_client.receive(self.sync_byte_response)
response = http_client.next_response()
assert isinstance(response, SyncResponse)
assert http_client.access_token == "ABCD"
_, _ = http_client.get_avatar()
http_client.receive(self.get_avatar_byte_response(None, 5))
response = http_client.next_response()
assert isinstance(response, ProfileGetAvatarResponse)
assert not response.avatar_url
new_avatar = faker.avatar_url().replace("#auto", "")
_, _ = http_client.set_avatar(new_avatar)
http_client.receive(self.empty_response(7))
response = http_client.next_response()
assert isinstance(response, ProfileSetAvatarResponse)
_, _ = http_client.get_avatar()
http_client.receive(self.get_avatar_byte_response(new_avatar, 9))
response = http_client.next_response()
assert isinstance(response, ProfileGetAvatarResponse)
assert response.avatar_url.replace("#auto", "") == new_avatar
def test_event_callback(self, client):
client.receive_response(self.login_response)
class CallbackException(Exception):
pass
def cb(room, event):
if isinstance(event, RoomMemberEvent):
raise CallbackException()
client.add_event_callback(cb, (RoomMemberEvent, RoomEncryptionEvent))
with pytest.raises(CallbackException):
client.receive_response(self.sync_response)
def test_to_device_cb(self, client):
client.receive_response(self.login_response)
class CallbackException(Exception):
pass
def cb(event):
if isinstance(event, RoomEncryptionEvent):
raise CallbackException()
client.add_to_device_callback(cb, RoomEncryptionEvent)
with pytest.raises(CallbackException):
client.receive_response(self.sync_response)
def test_ephemeral_cb(self, client):
client.receive_response(self.login_response)
class CallbackException(Exception):
pass
def cb(_, event):
raise CallbackException()
client.add_ephemeral_callback(cb, TypingNoticeEvent)
with pytest.raises(CallbackException):
client.receive_response(self.sync_response)
def test_no_encryption(self, client_no_e2e):
client_no_e2e.receive_response(self.login_response)
assert client_no_e2e.logged_in
assert not client_no_e2e.olm
client_no_e2e.receive_response(self.sync_response)
assert len(client_no_e2e.rooms) == 1
room = list(client_no_e2e.rooms.values())[0]
assert room.encrypted
client_no_e2e.receive_response(self.second_sync)
with pytest.raises(LocalProtocolError):
client_no_e2e.device_store
with pytest.raises(LocalProtocolError):
client_no_e2e.olm_account_shared
assert not client_no_e2e.should_query_keys
assert not client_no_e2e.users_for_key_query
assert not client_no_e2e.key_verifications
assert not client_no_e2e.outgoing_to_device_messages
assert not client_no_e2e.get_active_sas(ALICE_ID, ALICE_DEVICE_ID)
to_device = ToDeviceMessage("m.test", ALICE_ID, ALICE_DEVICE_ID, {})
client_no_e2e.room_contains_unverified(room.room_id)
with pytest.raises(LocalProtocolError):
client_no_e2e.invalidate_outbound_session(room.room_id)
client_no_e2e.receive_response(self.keys_query_response)
def test_event_cb_for_invited_rooms(self, client):
client.receive_response(self.login_response)
class CallbackException(Exception):
pass
def cb(_, event):
raise CallbackException()
client.add_event_callback(cb, InviteMemberEvent)
with pytest.raises(CallbackException):
client.receive_response(self.sync_invite_response)
def test_homeserver_url_parsing(self):
host, path = HttpClient._parse_homeserver("https://example.org:8080")
assert host == "example.org:8080"
assert path == ""
host, path = HttpClient._parse_homeserver("example.org:8080")
assert host == "example.org:8080"
assert path == ""
host, path = HttpClient._parse_homeserver("example.org/_matrix")
assert host == "example.org:443"
assert path == "_matrix"
host, path = HttpClient._parse_homeserver(
"https://example.org:8008/_matrix"
)
assert host == "example.org:8008"
assert path == "_matrix"
def test_room_devices(self, client):
client.receive_response(self.login_response)
client.receive_response(self.sync_response)
client.receive_response(self.keys_query_response)
room_devices = client.room_devices(TEST_ROOM_ID)
assert ALICE_ID in room_devices
assert ALICE_DEVICE_ID in room_devices[ALICE_ID]
alice_device = room_devices[ALICE_ID][ALICE_DEVICE_ID]
assert alice_device
def test_soft_logout(self, client):
client.receive_response(self.login_response)
assert client.logged_in
error_response = SyncResponse.from_dict(
{
"errcode": "M_UNKNOWN_TOKEN",
"error": "Access token has expired",
"soft_logout": True
}
)
client.receive_response(error_response)
assert not client.logged_in
def test_sync_token_restoring(self, client):
user = client.user_id
device_id = client.device_id
path= client.store_path
del client
config = ClientConfig(store_sync_tokens=True)
client = Client(user, device_id, path, config=config)
client.receive_response(self.login_response)
assert not client.next_batch
assert not client.loaded_sync_token
client.receive_response(self.sync_response)
assert client.next_batch
client = Client(user, device_id, path, config=config)
client.receive_response(self.login_response)
assert client.loaded_sync_token
| 31.673276
| 79
| 0.628508
|
4a121cc206cbe7548d1dbe678641d7b099413430
| 61,861
|
py
|
Python
|
python/ccxt/async_support/qtrade.py
|
jspenc72/ccxt
|
5eb43754ddb85aa24fb16860ce80d18790c288be
|
[
"MIT"
] | null | null | null |
python/ccxt/async_support/qtrade.py
|
jspenc72/ccxt
|
5eb43754ddb85aa24fb16860ce80d18790c288be
|
[
"MIT"
] | 1
|
2022-01-27T19:54:13.000Z
|
2022-01-27T19:54:13.000Z
|
python/ccxt/async_support/qtrade.py
|
jspenc72/ccxt
|
5eb43754ddb85aa24fb16860ce80d18790c288be
|
[
"MIT"
] | 1
|
2022-03-15T22:51:08.000Z
|
2022-03-15T22:51:08.000Z
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
# -----------------------------------------------------------------------------
try:
basestring # Python 3
except NameError:
basestring = str # Python 2
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import RateLimitExceeded
class qtrade(Exchange):
def describe(self):
return self.deep_extend(super(qtrade, self).describe(), {
'id': 'qtrade',
'name': 'qTrade',
'countries': ['US'],
'rateLimit': 1000,
'version': 'v1',
'urls': {
'logo': 'https://user-images.githubusercontent.com/51840849/80491487-74a99c00-896b-11ea-821e-d307e832f13e.jpg',
'api': 'https://api.qtrade.io',
'www': 'https://qtrade.io',
'doc': 'https://qtrade-exchange.github.io/qtrade-docs',
'referral': 'https://qtrade.io/?ref=BKOQWVFGRH2C',
},
'has': {
'cancelOrder': True,
'CORS': None,
'createMarketOrder': None,
'createOrder': True,
'fetchBalance': True,
'fetchClosedOrders': True,
'fetchCurrencies': True,
'fetchDeposit': True,
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchMarkets': True,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchTicker': True,
'fetchTickers': True,
'fetchTrades': True,
'fetchTransactions': None,
'fetchWithdrawal': True,
'fetchWithdrawals': True,
'withdraw': True,
},
'timeframes': {
'5m': 'fivemin',
'15m': 'fifteenmin',
'30m': 'thirtymin',
'1h': 'onehour',
'2h': 'twohour',
'4h': 'fourhour',
'1d': 'oneday',
},
'api': {
'public': {
'get': [
'ticker/{market_string}',
'tickers',
'currency/{code}',
'currencies',
'common',
'market/{market_string}',
'markets',
'market/{market_string}/trades',
'orderbook/{market_string}',
'market/{market_string}/ohlcv/{interval}',
],
},
'private': {
'get': [
'me',
'balances',
'balances_all', # undocumented
'market/{market_string}',
'orders',
'order/{order_id}',
'trades',
'withdraw/{withdraw_id}',
'withdraws',
'deposit/{deposit_id}',
'deposits',
'transfers',
],
'post': [
'cancel_order',
'withdraw',
'deposit_address/{currency}',
'sell_limit',
'buy_limit',
],
},
},
'fees': {
'trading': {
'feeSide': 'quote',
'tierBased': True,
'percentage': True,
'taker': 0.005,
'maker': 0.0,
},
'funding': {
'withdraw': {},
},
},
'commonCurrencies': {
'BTM': 'Bitmark',
},
'exceptions': {
'exact': {
'invalid_auth': AuthenticationError,
'insuff_funds': InsufficientFunds,
'market_not_found': BadSymbol, # {"errors":[{"code":"market_not_found","title":"Requested market does not exist"}]}
'too_small': InvalidOrder,
'limit_exceeded': RateLimitExceeded, # {"errors":[{"code":"limit_exceeded","title":"You have exceeded the windowed rate limit. Please see docs."}]}
},
},
})
async def fetch_markets(self, params={}):
response = await self.publicGetMarkets(params)
#
# {
# "data":{
# "markets":[
# {
# "id":5,
# "market_currency":"BAC",
# "base_currency":"BTC",
# "maker_fee":"0.0025",
# "taker_fee":"0.0025",
# "metadata":{
# "delisting_date":"7/15/2018",
# "market_notices":[
# {
# "message":"Delisting Notice: This market has been delisted due to low volume. Please cancel your orders and withdraw your funds by 7/15/2018.",
# "type":"warning"
# }
# ]
# },
# "can_trade":false,
# "can_cancel":true,
# "can_view":false,
# "market_string":"BAC_BTC",
# "minimum_sell_amount":"0.0001",
# "minimum_buy_value":"0.0001",
# "market_precision":8,
# "base_precision":8
# },
# ],
# }
# }
#
data = self.safe_value(response, 'data', {})
markets = self.safe_value(data, 'markets', [])
result = []
for i in range(0, len(markets)):
market = markets[i]
marketId = self.safe_string(market, 'market_string')
numericId = self.safe_integer(market, 'id')
baseId = self.safe_string(market, 'market_currency')
quoteId = self.safe_string(market, 'base_currency')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
precision = {
'amount': self.safe_integer(market, 'market_precision'),
'price': self.safe_integer(market, 'base_precision'),
}
canView = self.safe_value(market, 'can_view', False)
canTrade = self.safe_value(market, 'can_trade', False)
active = canTrade and canView
result.append({
'symbol': symbol,
'id': marketId,
'numericId': numericId,
'baseId': baseId,
'quoteId': quoteId,
'base': base,
'quote': quote,
'type': 'spot',
'spot': True,
'active': active,
'precision': precision,
'taker': self.safe_number(market, 'taker_fee'),
'maker': self.safe_number(market, 'maker_fee'),
'limits': {
'amount': {
'min': self.safe_number(market, 'minimum_sell_value'),
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': self.safe_number(market, 'minimum_buy_value'),
'max': None,
},
},
'info': market,
})
return result
async def fetch_currencies(self, params={}):
response = await self.publicGetCurrencies(params)
#
# {
# "data":{
# "currencies":[
# {
# "code":"DGB",
# "long_name":"Digibyte",
# "type":"bitcoin_like",
# "precision":8,
# "config":{
# "price":0.0035,
# "withdraw_fee":"10",
# "deposit_types":[
# {
# "label":"Address",
# "lookup_mode":"address",
# "render_type":"address",
# "deposit_type":"address",
# "lookup_config":{}
# }
# ],
# "default_signer":103,
# "address_version":30,
# "satoshi_per_byte":300,
# "required_confirmations":200,
# "required_generate_confirmations":300
# },
# "metadata":{},
# "minimum_order":"0.0001",
# "status":"ok",
# "can_withdraw":true,
# "delisted":false,
# "deposit_disabled":false,
# "withdraw_disabled":false,
# "deposit_warn_codes":[],
# "withdraw_warn_codes":[]
# },
# ],
# }
# }
#
data = self.safe_value(response, 'data', {})
currencies = self.safe_value(data, 'currencies', [])
result = {}
for i in range(0, len(currencies)):
currency = currencies[i]
id = self.safe_string(currency, 'code')
code = self.safe_currency_code(id)
name = self.safe_string(currency, 'long_name')
type = self.safe_string(currency, 'type')
canWithdraw = self.safe_value(currency, 'can_withdraw', True)
withdrawDisabled = self.safe_value(currency, 'withdraw_disabled', False)
depositDisabled = self.safe_value(currency, 'deposit_disabled', False)
deposit = not depositDisabled
withdraw = canWithdraw and not withdrawDisabled
config = self.safe_value(currency, 'config', {})
status = self.safe_string(currency, 'status')
active = withdraw and deposit and (status == 'ok')
result[code] = {
'id': id,
'code': code,
'info': currency,
'type': type,
'name': name,
'fee': self.safe_number(config, 'withdraw_fee'),
'precision': self.safe_integer(currency, 'precision'),
'active': active,
'deposit': deposit,
'withdraw': withdraw,
'limits': {
'amount': {
'min': self.safe_number(currency, 'minimum_order'),
'max': None,
},
'withdraw': {
'min': None,
'max': None,
},
},
}
return result
def parse_ohlcv(self, ohlcv, market=None):
#
# {
# "time":"2019-12-07T22:55:00Z",
# "open":"0.00197",
# "high":"0.00197",
# "low":"0.00197",
# "close":"0.00197",
# "volume":"0.00016676",
# "market_volume":"0.08465047"
# }
#
return [
self.parse8601(self.safe_string(ohlcv, 'time')),
self.safe_number(ohlcv, 'open'),
self.safe_number(ohlcv, 'high'),
self.safe_number(ohlcv, 'low'),
self.safe_number(ohlcv, 'close'),
self.safe_number(ohlcv, 'market_volume'),
]
async def fetch_ohlcv(self, symbol, timeframe='5m', since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'market_string': market['id'],
'interval': self.timeframes[timeframe],
}
response = await self.publicGetMarketMarketStringOhlcvInterval(self.extend(request, params))
#
# {
# "data":{
# "slices":[
# {"time":"2019-12-07T22:55:00Z","open":"0.00197","high":"0.00197","low":"0.00197","close":"0.00197","volume":"0.00016676","market_volume":"0.08465047"},
# {"time":"2019-12-07T23:00:00Z","open":"0.00197","high":"0.00197","low":"0.00197","close":"0.00197","volume":"0","market_volume":"0"},
# {"time":"2019-12-07T23:05:00Z","open":"0.00197","high":"0.00197","low":"0.00197","close":"0.00197","volume":"0","market_volume":"0"},
# ]
# }
# }
#
data = self.safe_value(response, 'data', {})
ohlcvs = self.safe_value(data, 'slices', [])
return self.parse_ohlcvs(ohlcvs, market, timeframe, since, limit)
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
marketId = self.market_id(symbol)
request = {'market_string': marketId}
response = await self.publicGetOrderbookMarketString(self.extend(request, params))
#
# {
# "data":{
# "buy":{
# "0.00700015":"4.76196367",
# "0.00700017":"1.89755391",
# "0.00700018":"2.13214088",
# },
# "last_change":1588539869958811,
# "sell":{
# "0.02418662":"0.19513696",
# "0.02465627":"0.2439212",
# "0.02530277":"0.663475931274359255",
# }
# }
# }
#
data = self.safe_value(response, 'data', {})
orderbook = {}
sides = {'buy': 'bids', 'sell': 'asks'}
keys = list(sides.keys())
for i in range(0, len(keys)):
key = keys[i]
side = sides[key]
bidasks = self.safe_value(data, key, {})
prices = list(bidasks.keys())
result = []
for j in range(0, len(prices)):
priceAsString = prices[j]
price = self.safe_number(prices, j)
amount = self.safe_number(bidasks, priceAsString)
result.append([price, amount])
orderbook[side] = result
timestamp = self.safe_integer_product(data, 'last_change', 0.001)
return self.parse_order_book(orderbook, symbol, timestamp)
def parse_ticker(self, ticker, market=None):
#
# fetchTicker, fetchTickers
#
# {
# "ask":"0.02423119",
# "bid":"0.0230939",
# "day_avg_price":"0.0247031874349301",
# "day_change":"-0.0237543162270376",
# "day_high":"0.02470552",
# "day_low":"0.02470172",
# "day_open":"0.02530277",
# "day_volume_base":"0.00268074",
# "day_volume_market":"0.10851798",
# "id":41,
# "id_hr":"ETH_BTC",
# "last":"0.02470172",
# "last_change":1588533365354609
# }
#
marketId = self.safe_string(ticker, 'id_hr')
symbol = self.safe_symbol(marketId, market, '_')
timestamp = self.safe_integer_product(ticker, 'last_change', 0.001)
previous = self.safe_number(ticker, 'day_open')
last = self.safe_number(ticker, 'last')
day_change = self.safe_number(ticker, 'day_change')
percentage = None
change = None
average = self.safe_number(ticker, 'day_avg_price')
if day_change is not None:
percentage = day_change * 100
if previous is not None:
change = day_change * previous
baseVolume = self.safe_number(ticker, 'day_volume_market')
quoteVolume = self.safe_number(ticker, 'day_volume_base')
vwap = self.vwap(baseVolume, quoteVolume)
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_number(ticker, 'day_high'),
'low': self.safe_number(ticker, 'day_low'),
'bid': self.safe_number(ticker, 'bid'),
'bidVolume': None,
'ask': self.safe_number(ticker, 'ask'),
'askVolume': None,
'vwap': vwap,
'open': previous,
'close': last,
'last': last,
'previousClose': None,
'change': change,
'percentage': percentage,
'average': average,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}, market)
async def fetch_tickers(self, symbols=None, params={}):
await self.load_markets()
response = await self.publicGetTickers(params)
#
# {
# "data":{
# "markets":[
# {
# "ask":"0.0000003",
# "bid":"0.00000029",
# "day_avg_price":"0.0000002999979728",
# "day_change":"0.0344827586206897",
# "day_high":"0.0000003",
# "day_low":"0.0000003",
# "day_open":"0.00000029",
# "day_volume_base":"0.00591958",
# "day_volume_market":"19732.06666665",
# "id":36,
# "id_hr":"DOGE_BTC",
# "last":"0.0000003",
# "last_change":1588534202130778
# },
# ]
# }
# }
#
data = self.safe_value(response, 'data', {})
tickers = self.safe_value(data, 'markets', [])
result = {}
for i in range(0, len(tickers)):
ticker = self.parse_ticker(tickers[i])
symbol = ticker['symbol']
result[symbol] = ticker
return self.filter_by_array(result, 'symbol', symbols)
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'market_string': market['id'],
}
response = await self.publicGetTickerMarketString(self.extend(request, params))
#
# {
# "data":{
# "ask":"0.02423119",
# "bid":"0.0230939",
# "day_avg_price":"0.0247031874349301",
# "day_change":"-0.0237543162270376",
# "day_high":"0.02470552",
# "day_low":"0.02470172",
# "day_open":"0.02530277",
# "day_volume_base":"0.00268074",
# "day_volume_market":"0.10851798",
# "id":41,
# "id_hr":"ETH_BTC",
# "last":"0.02470172",
# "last_change":1588533365354609
# }
# }
#
data = self.safe_value(response, 'data', {})
return self.parse_ticker(data, market)
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'market_string': market['id'],
# 'older_than': 123, # returns trades with id < older_than
# 'newer_than': 123, # returns trades with id > newer_than
}
response = await self.publicGetMarketMarketStringTrades(self.extend(request, params))
#
# {
# "data":{
# "trades":[
# {
# "id":85507,
# "amount":"0.09390502",
# "price":"0.02556325",
# "base_volume":"0.00240051",
# "seller_taker":true,
# "side":"sell",
# "created_at":"0001-01-01T00:00:00Z",
# "created_at_ts":1581560391338718
# },
# ]
# }
# }
#
data = self.safe_value(response, 'data', {})
trades = self.safe_value(data, 'trades', [])
return self.parse_trades(trades, market, since, limit)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
request = {
'desc': True, # Returns newest trades first when True
# 'older_than': 123, # returns trades with id < older_than
# 'newer_than': 123, # returns trades with id > newer_than
}
market = None
numericId = self.safe_value(params, 'market_id')
if numericId is not None:
request['market_id'] = numericId # mutually exclusive with market_string
elif symbol is not None:
market = self.market(symbol)
request['market_string'] = market['id']
response = await self.privateGetTrades(self.extend(request, params))
#
# {
# "data":{
# "trades":[
# {
# "id":107331,
# "market_amount":"0.1082536946986",
# "price":"0.0230939",
# "base_amount":"0.00249999",
# "order_id":13790596,
# "market_id":41,
# "market_string":"ETH_BTC",
# "taker":true,
# "base_fee":"0.00001249",
# "side":"sell",
# "created_at":"2020-05-04T06:08:18.513413Z"
# }
# ]
# }
# }
#
data = self.safe_value(response, 'data', {})
trades = self.safe_value(data, 'trades', [])
return self.parse_trades(trades, market, since, limit)
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# "id":85507,
# "amount":"0.09390502",
# "price":"0.02556325",
# "base_volume":"0.00240051",
# "seller_taker":true,
# "side":"sell",
# "created_at":"0001-01-01T00:00:00Z",
# "created_at_ts":1581560391338718
# }
#
# fetchMyTrades(private)
#
# {
# "id":107331,
# "market_amount":"0.1082536946986",
# "price":"0.0230939",
# "base_amount":"0.00249999",
# "order_id":13790596,
# "market_id":41,
# "market_string":"ETH_BTC",
# "taker":true,
# "base_fee":"0.00001249",
# "side":"sell",
# "created_at":"2020-05-04T06:08:18.513413Z"
# }
#
# createOrder, fetchOrders, fetchOpenOrders, fetchClosedOrders
#
# {
# "base_amount": "9.58970687",
# "base_fee": "0.02397426",
# "created_at": "0001-01-01T00:00:00Z",
# "id": 0,
# "market_amount": "0.97179355",
# "price": "9.86804952",
# "taker": True
# }
#
id = self.safe_string(trade, 'id')
timestamp = self.safe_integer_product(trade, 'created_at_ts', 0.001)
if timestamp is None:
timestamp = self.parse8601(self.safe_string(trade, 'created_at'))
side = self.safe_string(trade, 'side')
marketId = self.safe_string(trade, 'market_string')
market = self.safe_market(marketId, market)
cost = self.safe_string_2(trade, 'base_volume', 'base_amount')
price = self.safe_string(trade, 'price')
amount = self.safe_string_2(trade, 'market_amount', 'amount')
fee = None
feeCost = self.safe_string(trade, 'base_fee')
if feeCost is not None:
feeCurrencyCode = None if (market is None) else market['quote']
fee = {
'currency': feeCurrencyCode,
'cost': feeCost,
}
taker = self.safe_value(trade, 'taker', True)
takerOrMaker = 'taker' if taker else 'maker'
orderId = self.safe_string(trade, 'order_id')
return self.safe_trade({
'id': id,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'order': orderId,
'type': None,
'side': side,
'takerOrMaker': takerOrMaker,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}, market)
def parse_balance(self, response):
data = self.safe_value(response, 'data', {})
balances = self.safe_value(data, 'balances', [])
result = {
'info': response,
'timestamp': None,
'datetime': None,
}
for i in range(0, len(balances)):
balance = balances[i]
currencyId = self.safe_string(balance, 'currency')
code = self.safe_currency_code(currencyId)
account = result[code] if (code in result) else self.account()
account['free'] = self.safe_string(balance, 'balance')
account['used'] = '0'
result[code] = account
balances = self.safe_value(data, 'order_balances', [])
for i in range(0, len(balances)):
balance = balances[i]
currencyId = self.safe_string(balance, 'currency')
code = self.safe_currency_code(currencyId)
account = result[code] if (code in result) else self.account()
account['used'] = self.safe_string(balance, 'balance')
result[code] = account
return self.safe_balance(result)
async def fetch_balance(self, params={}):
await self.load_markets()
response = await self.privateGetBalancesAll(params)
#
# {
# "data":{
# "balances": [
# {"balance": "100000000", "currency": "BCH"},
# {"balance": "99992435.78253015", "currency": "LTC"},
# {"balance": "99927153.76074182", "currency": "BTC"},
# ],
# "order_balances":[],
# "limit_used":0,
# "limit_remaining":4000,
# "limit":4000
# }
# }
#
return self.parse_balance(response)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
if type != 'limit':
raise InvalidOrder(self.id + ' createOrder() allows limit orders only')
await self.load_markets()
market = self.market(symbol)
request = {
'amount': self.amount_to_precision(symbol, amount),
'market_id': market['numericId'],
'price': self.price_to_precision(symbol, price),
}
method = 'privatePostSellLimit' if (side == 'sell') else 'privatePostBuyLimit'
response = await getattr(self, method)(self.extend(request, params))
#
# {
# "data": {
# "order": {
# "created_at": "2018-04-06T20:46:52.899248Z",
# "id": 13253,
# "market_amount": "1",
# "market_amount_remaining": "0",
# "market_id": 1,
# "open": False,
# "order_type": "sell_limit",
# "price": "0.01",
# "trades": [
# {
# "base_amount": "0.27834267",
# "base_fee": "0.00069585",
# "created_at": "0001-01-01T00:00:00Z",
# "id": 0,
# "market_amount": "0.02820645",
# "price": "9.86805058",
# "taker": True
# },
# {
# "base_amount": "9.58970687",
# "base_fee": "0.02397426",
# "created_at": "0001-01-01T00:00:00Z",
# "id": 0,
# "market_amount": "0.97179355",
# "price": "9.86804952",
# "taker": True
# }
# ]
# }
# }
# }
#
data = self.safe_value(response, 'data', {})
order = self.safe_value(data, 'order', {})
return self.parse_order(order, market)
def parse_order(self, order, market=None):
#
# createOrder
#
# {
# "created_at": "2018-04-06T20:46:52.899248Z",
# "id": 13253,
# "market_amount": "1",
# "market_amount_remaining": "0",
# "market_id": 1,
# "open": False,
# "order_type": "sell_limit",
# "price": "0.01",
# "trades": [
# {
# "base_amount": "0.27834267",
# "base_fee": "0.00069585",
# "created_at": "0001-01-01T00:00:00Z",
# "id": 0,
# "market_amount": "0.02820645",
# "price": "9.86805058",
# "taker": True
# },
# {
# "base_amount": "9.58970687",
# "base_fee": "0.02397426",
# "created_at": "0001-01-01T00:00:00Z",
# "id": 0,
# "market_amount": "0.97179355",
# "price": "9.86804952",
# "taker": True
# }
# ]
# }
#
# fetchOrder
#
# {
# id: 13790596,
# market_amount: "0.15",
# market_amount_remaining: "0",
# created_at: "2020-05-04T06:08:18.513413Z",
# price: "0.0230939",
# base_amount: "0",
# order_type: "sell_limit",
# market_id: 41,
# market_string: "ETH_BTC",
# open: False,
# trades: [
# {
# id: 107331,
# market_amount: "0.1082536946986",
# price: "0.0230939",
# base_amount: "0.00249999",
# taker: True,
# base_fee: "0.00001249",
# created_at: "2020-05-04T06:08:18.513413Z",
# }
# ],
# close_reason: "canceled"
# }
#
id = self.safe_string(order, 'id')
timestamp = self.parse8601(self.safe_string(order, 'created_at'))
sideType = self.safe_string(order, 'order_type')
orderType = None
side = None
if sideType is not None:
parts = sideType.split('_')
side = self.safe_string(parts, 0)
orderType = self.safe_string(parts, 1)
price = self.safe_string(order, 'price')
amount = self.safe_string(order, 'market_amount')
remaining = self.safe_string(order, 'market_amount_remaining')
open = self.safe_value(order, 'open', False)
closeReason = self.safe_string(order, 'close_reason')
status = None
if open:
status = 'open'
elif closeReason == 'canceled':
status = 'canceled'
else:
status = 'closed'
marketId = self.safe_string(order, 'market_string')
market = self.safe_market(marketId, market, '_')
symbol = market['symbol']
rawTrades = self.safe_value(order, 'trades', [])
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': orderType,
'timeInForce': None,
'postOnly': None,
'side': side,
'price': price,
'stopPrice': None,
'average': None,
'amount': amount,
'remaining': remaining,
'filled': None,
'status': status,
'fee': None,
'fees': None,
'cost': None,
'trades': rawTrades,
}, market)
async def cancel_order(self, id, symbol=None, params={}):
request = {
'id': int(id),
}
# successful cancellation returns 200 with no payload
return await self.privatePostCancelOrder(self.extend(request, params))
async def fetch_order(self, id, symbol=None, params={}):
await self.load_markets()
request = {'order_id': id}
response = await self.privateGetOrderOrderId(self.extend(request, params))
#
# {
# "data":{
# "order":{
# "id":13790596,
# "market_amount":"0.15",
# "market_amount_remaining":"0.0417463053014",
# "created_at":"2020-05-04T06:08:18.513413Z",
# "price":"0.0230939",
# "order_type":"sell_limit",
# "market_id":41,
# "market_string":"ETH_BTC",
# "open":true,
# "trades":[
# {
# "id":107331,
# "market_amount":"0.1082536946986",
# "price":"0.0230939",
# "base_amount":"0.00249999",
# "taker":true,
# "base_fee":"0.00001249",
# "created_at":"2020-05-04T06:08:18.513413Z"
# }
# ]
# }
# }
# }
#
data = self.safe_value(response, 'data', {})
order = self.safe_value(data, 'order', {})
return self.parse_order(order)
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
request = {
# 'open': True,
# 'older_than': 123, # returns orders with id < older_than
# 'newer_than': 123, # returns orders with id > newer_than
}
market = None
numericId = self.safe_value(params, 'market_id')
if numericId is not None:
request['market_id'] = numericId # mutually exclusive with market_string
elif symbol is not None:
market = self.market(symbol)
request['market_string'] = market['id']
response = await self.privateGetOrders(self.extend(request, params))
#
# {
# "data":{
# "orders":[
# {
# "id":13790596,
# "market_amount":"0.15",
# "market_amount_remaining":"0.0417463053014",
# "created_at":"2020-05-04T06:08:18.513413Z",
# "price":"0.0230939",
# "order_type":"sell_limit",
# "market_id":41,
# "market_string":"ETH_BTC",
# "open":true,
# "trades":[
# {
# "id":107331,
# "market_amount":"0.1082536946986",
# "price":"0.0230939",
# "base_amount":"0.00249999",
# "taker":true,
# "base_fee":"0.00001249",
# "created_at":"2020-05-04T06:08:18.513413Z"
# }
# ]
# }
# ]
# }
# }
#
data = self.safe_value(response, 'data', {})
orders = self.safe_value(data, 'orders', [])
return self.parse_orders(orders, market, since, limit)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
request = {'open': True}
return await self.fetch_orders(symbol, since, limit, self.extend(request, params))
async def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
request = {'open': False}
return await self.fetch_orders(symbol, since, limit, self.extend(request, params))
def parse_deposit_address(self, depositAddress, currency=None):
#
# {
# "address":"0xe0cd26f9A60118555247aE6769A5d241D91f07f2",
# "currency_status":"ok",
# "deposit_methods":{
# "address":{
# "deposit_type":"address",
# "render_type":"address",
# "label":"Address",
# "address":"0xe0cd26f9A60118555247aE6769A5d241D91f07f2",
# },
# },
# }
#
code = None if (currency is None) else currency['code']
address = self.safe_string(depositAddress, 'address')
tag = None
if address is not None:
parts = address.split(':')
address = self.safe_string(parts, 0)
tag = self.safe_string(parts, 1)
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'network': None,
'info': depositAddress,
}
async def fetch_deposit_address(self, code, params={}):
await self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
}
response = await self.privatePostDepositAddressCurrency(self.extend(request, params))
#
# {
# "data":{
# "address":"0xe0cd26f9A60118555247aE6769A5d241D91f07f2",
# "currency_status":"ok",
# "deposit_methods":{
# "address":{
# "deposit_type":"address",
# "render_type":"address",
# "label":"Address",
# "address":"0xe0cd26f9A60118555247aE6769A5d241D91f07f2",
# },
# },
# },
# }
#
data = self.safe_value(response, 'data', {})
return self.parse_deposit_address(data, currency)
async def fetch_deposit(self, id, code=None, params={}):
await self.load_markets()
request = {
'deposit_id': id,
}
response = await self.privateGetDepositDepositId(self.extend(request, params))
#
# {
# "data":{
# "deposit":{
# "id":"0xaa6e65ed274c4786e5dec3671de96f81021cacdbc453b1a133ab84356f3620a0",
# "amount":"0.13",
# "currency":"ETH",
# "address":"0xe0cd26f9A60118555247aE6769A5d241D91f07f2",
# "status":"credited",
# "relay_status":"",
# "network_data":{
# "confirms":87,
# "sweep_txid":"0xa16e65ed274d4686e5dec3671de96f81021cacdbc453b1a133ab85356f3630a0",
# "sweep_balance":"0.150000000000000000",
# "confirms_required":80,
# "unsigned_sweep_tx":{
# "chainId":1,
# "from":"0xe0cd26f9A60118555247aE6769A5d241D91f07f2",
# "gas":"0x5208",
# "gasPrice":"0x19b45a500",
# "nonce":"0x0",
# "to":"0x76Cd80202a2C31e9D8F595a31ed071CE7F75BB93",
# "value":"0x214646b6347d800"
# },
# "txid":"0xaa6e65ed274c4786e5dec3671de96f81021cacdbc453b1a133ab84356f3620a0",
# "tx_index":"0x6f",
# "tx_value":"0.130000000000000000",
# "key_index":311,
# "blockheight":9877869,
# "signed_sweep_tx":{
# "hash":"0xa16e65ed274d4686e5dec3671de96f81021cacdbc453b1a133ab85356f3630a0",
# "rawTransaction":"0xd86c8085019b45a1008252099476cb80202b2c31e9d7f595a31fd071ce7f75bb93880214646b6347d8008046a08c6e3bfe8b25bff2b6851c87ea17c63d7b23591210ab0779a568eaa43dc40435a030e964bb2b667072ea7cbc8ab554403e3f3ead9b554743f2fdc2b1e06e998df9"
# },
# "estimated_sweep_tx_fee":144900000000000
# },
# "created_at":"2020-05-04T05:38:42.145162Z"
# }
# }
# }
data = self.safe_value(response, 'data', {})
deposit = self.safe_value(data, 'deposit', {})
return self.parse_transaction(deposit)
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
await self.load_markets()
currency = None
if code is not None:
currency = self.currency(code)
response = await self.privateGetDeposits(params)
#
# {
# "data":{
# "deposits":[
# {
# "id":"0xaa6e65ed274c4786e5dec3671de96f81021cacdbc453b1a133ab84356f3620a0",
# "amount":"0.13",
# "currency":"ETH",
# "address":"0xe0cd26f9A60118555247aE6769A5d241D91f07f2",
# "status":"credited",
# "relay_status":"",
# "network_data":{
# "confirms":87,
# "sweep_txid":"0xa16e65ed274d4686e5dec3671de96f81021cacdbc453b1a133ab85356f3630a0",
# "sweep_balance":"0.150000000000000000",
# "confirms_required":80,
# "unsigned_sweep_tx":{
# "chainId":1,
# "from":"0xe0cd26f9A60118555247aE6769A5d241D91f07f2",
# "gas":"0x5208",
# "gasPrice":"0x19b45a500",
# "nonce":"0x0",
# "to":"0x76Cd80202a2C31e9D8F595a31ed071CE7F75BB93",
# "value":"0x214646b6347d800"
# },
# "txid":"0xaa6e65ed274c4786e5dec3671de96f81021cacdbc453b1a133ab84356f3620a0",
# "tx_index":"0x6f",
# "tx_value":"0.130000000000000000",
# "key_index":311,
# "blockheight":9877869,
# "signed_sweep_tx":{
# "hash":"0xa16e65ed274d4686e5dec3671de96f81021cacdbc453b1a133ab85356f3630a0",
# "rawTransaction":"0xd86c8085019b45a1008252099476cb80202b2c31e9d7f595a31fd071ce7f75bb93880214646b6347d8008046a08c6e3bfe8b25bff2b6851c87ea17c63d7b23591210ab0779a568eaa43dc40435a030e964bb2b667072ea7cbc8ab554403e3f3ead9b554743f2fdc2b1e06e998df9"
# },
# "estimated_sweep_tx_fee":144900000000000
# },
# "created_at":"2020-05-04T05:38:42.145162Z"
# }
# ]
# }
# }
#
data = self.safe_value(response, 'data', {})
deposits = self.safe_value(data, 'deposits', [])
return self.parse_transactions(deposits, currency, since, limit)
async def fetch_withdrawal(self, id, code=None, params={}):
await self.load_markets()
request = {
'withdraw_id': id,
}
response = await self.privateGetWithdrawWithdrawId(self.extend(request, params))
#
# {
# data: {
# withdraw: {
# "id":25524,
# "amount":"0.0417463053014",
# "user_id":0,
# "currency":"ETH",
# "network_data":{
# "unsigned_tx":{
# "chainId":1,
# "from":"0x76Cd80202a2C31e9D8F595a31ed071CE7F75BB93",
# "gas":"0x5208",
# "gasPrice":"0x20c8558e9",
# "nonce":"0xf3",
# "to":"0xe0cd26f9A60118555247aE6769A5d241D91f07f2",
# "value":"0x71712bcd113308"
# },
# "estimated_tx_fee":184800004893000,
# "confirms_required":80,
# "txid":"0x79439b62473d61d99ce1dc6c3b8a417da36d45323a394bb0d4af870608fef38d",
# "confirms":83,
# "signed_tx":{
# "hash":"0x79439b62473d61d99ce1dc6c3b8a417da36d45323a394bb0d4af870608fef38d",
# "rawTransaction":"0xf86c81f385021c8558e98252089401b0a9b7b4cde774af0f3e87cb4f1c2ccdba08068771712acd1133078025a0088157d119d924d47413c81b91b9f18ff148623a2ef13dab1895ca3ba546b771a046a021b1e1f64d1a60bb66c19231f641b352326188a9ed3b931b698a939f78d0"
# }
# },
# "address":"0xe0cd26f9A60118555247aE6769A5d241D91f07f2",
# "status":"confirmed",
# "relay_status":"",
# "created_at":"2020-05-05T06:32:19.907061Z",
# "cancel_requested":false
# }
# }
# }
#
data = self.safe_value(response, 'data', {})
withdrawal = self.safe_value(data, 'withdraw', {})
return self.parse_transaction(withdrawal)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
await self.load_markets()
currency = None
if code is not None:
currency = self.currency(code)
response = await self.privateGetWithdraws(params)
# {
# "data":{
# "withdraws":[
# {
# "id":25524,
# "amount":"0.0417463053014",
# "user_id":0,
# "currency":"ETH",
# "network_data":{
# "unsigned_tx":{
# "chainId":1,
# "from":"0x76Cd80202a2C31e9D8F595a31ed071CE7F75BB93",
# "gas":"0x5208",
# "gasPrice":"0x20c8558e9",
# "nonce":"0xf3",
# "to":"0xe0cd26f9A60118555247aE6769A5d241D91f07f2",
# "value":"0x71712bcd113308"
# },
# "estimated_tx_fee":184800004893000,
# "confirms_required":80,
# "txid":"0x79439b62473d61d99ce1dc6c3b8a417da36d45323a394bb0d4af870608fef38d",
# "confirms":83,
# "signed_tx":{
# "hash":"0x79439b62473d61d99ce1dc6c3b8a417da36d45323a394bb0d4af870608fef38d",
# "rawTransaction":"0xf86c81f385021c8558e98252089401b0a9b7b4cde774af0f3e87cb4f1c2ccdba08068771712acd1133078025a0088157d119d924d47413c81b91b9f18ff148623a2ef13dab1895ca3ba546b771a046a021b1e1f64d1a60bb66c19231f641b352326188a9ed3b931b698a939f78d0"
# }
# },
# "address":"0xe0cd26f9A60118555247aE6769A5d241D91f07f2",
# "status":"confirmed",
# "relay_status":"",
# "created_at":"2020-05-05T06:32:19.907061Z",
# "cancel_requested":false
# }
# ]
# }
# }
#
data = self.safe_value(response, 'data', {})
withdrawals = self.safe_value(data, 'withdraws', [])
return self.parse_transactions(withdrawals, currency, since, limit)
def parse_transaction(self, transaction, currency=None):
#
# fetchDeposits, fetchDeposit
#
# {
# "id":"0xaa6e65ed274c4786e5dec3671de96f81021cacdbc453b1a133ab84356f3620a0",
# "amount":"0.13",
# "currency":"ETH",
# "address":"0xe0cd26f9A60118555247aE6769A5d241D91f07f2",
# "status":"credited",
# "relay_status":"",
# "network_data":{
# "confirms":87,
# "sweep_txid":"0xa16e65ed274d4686e5dec3671de96f81021cacdbc453b1a133ab85356f3630a0",
# "sweep_balance":"0.150000000000000000",
# "confirms_required":80,
# "unsigned_sweep_tx":{
# "chainId":1,
# "from":"0xe0cd26f9A60118555247aE6769A5d241D91f07f2",
# "gas":"0x5208",
# "gasPrice":"0x19b45a500",
# "nonce":"0x0",
# "to":"0x76Cd80202a2C31e9D8F595a31ed071CE7F75BB93",
# "value":"0x214646b6347d800"
# },
# "txid":"0xaa6e65ed274c4786e5dec3671de96f81021cacdbc453b1a133ab84356f3620a0",
# "tx_index":"0x6f",
# "tx_value":"0.130000000000000000",
# "key_index":311,
# "blockheight":9877869,
# "signed_sweep_tx":{
# "hash":"0xa16e65ed274d4686e5dec3671de96f81021cacdbc453b1a133ab85356f3630a0",
# "rawTransaction":"0xd86c8085019b45a1008252099476cb80202b2c31e9d7f595a31fd071ce7f75bb93880214646b6347d8008046a08c6e3bfe8b25bff2b6851c87ea17c63d7b23591210ab0779a568eaa43dc40435a030e964bb2b667072ea7cbc8ab554403e3f3ead9b554743f2fdc2b1e06e998df9"
# },
# "estimated_sweep_tx_fee":144900000000000
# },
# "created_at":"2020-05-04T05:38:42.145162Z"
# }
#
# fetchWithdrawals, fetchWithdrawal
#
# {
# "id":25524,
# "amount":"0.0417463053014",
# "user_id":0,
# "currency":"ETH",
# "network_data":{
# "unsigned_tx":{
# "chainId":1,
# "from":"0x76Cd80202a2C31e9D8F595a31ed071CE7F75BB93",
# "gas":"0x5208",
# "gasPrice":"0x20c8558e9",
# "nonce":"0xf3",
# "to":"0xe0cd26f9A60118555247aE6769A5d241D91f07f2",
# "value":"0x71712bcd113308"
# },
# "estimated_tx_fee":184800004893000,
# "confirms_required":80,
# "txid":"0x79439b62473d61d99ce1dc6c3b8a417da36d45323a394bb0d4af870608fef38d",
# "confirms":83,
# "signed_tx":{
# "hash":"0x79439b62473d61d99ce1dc6c3b8a417da36d45323a394bb0d4af870608fef38d",
# "rawTransaction":"0xf86c81f385021c8558e98252089401b0a9b7b4cde774af0f3e87cb4f1c2ccdba08068771712acd1133078025a0088157d119d924d47413c81b91b9f18ff148623a2ef13dab1895ca3ba546b771a046a021b1e1f64d1a60bb66c19231f641b352326188a9ed3b931b698a939f78d0"
# }
# },
# "address":"0xe0cd26f9A60118555247aE6769A5d241D91f07f2",
# "status":"confirmed",
# "relay_status":"",
# "created_at":"2020-05-05T06:32:19.907061Z",
# "cancel_requested":false
# }
#
# withdraw
#
# {
# "code": "initiated",
# "id": 3,
# "result": "Withdraw initiated. Please allow 3-5 minutes for our system to process."
# }
#
timestamp = self.parse8601(self.safe_string(transaction, 'created_at'))
id = self.safe_string(transaction, 'id')
networkData = self.safe_value(transaction, 'network_data', {})
unsignedTx = self.safe_value(networkData, 'unsigned_tx', {})
addressFrom = self.safe_string(unsignedTx, 'from')
txid = self.safe_string(networkData, 'txid')
address = self.safe_string(transaction, 'address')
tag = None
if address is not None:
parts = address.split(':')
numParts = len(parts)
if numParts > 1:
address = self.safe_string(parts, 0)
tag = self.safe_string(parts, 1)
addressTo = address
tagFrom = None
tagTo = tag
cancelRequested = self.safe_value(transaction, 'cancel_requested')
type = 'deposit' if (cancelRequested is None) else 'withdrawal'
amount = self.safe_number(transaction, 'amount')
currencyId = self.safe_string(transaction, 'currency')
code = self.safe_currency_code(currencyId)
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
statusCode = self.safe_string(transaction, 'code')
if cancelRequested:
status = 'canceled'
elif status is None:
status = self.parse_transaction_status(statusCode)
fee = None
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'network': None,
'addressFrom': addressFrom,
'addressTo': addressTo,
'address': address,
'tagFrom': tagFrom,
'tagTo': tagTo,
'tag': tag,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': None,
'fee': fee,
}
def parse_transaction_status(self, status):
statuses = {
'initiated': 'pending',
'needs_create': 'pending',
'credited': 'ok',
'confirmed': 'ok',
}
return self.safe_string(statuses, status, status)
async def withdraw(self, code, amount, address, tag=None, params={}):
tag, params = self.handle_withdraw_tag_and_params(tag, params)
await self.load_markets()
currency = self.currency(code)
request = {
'address': address,
'amount': amount,
'currency': currency['id'],
}
if tag is not None:
request['address'] += ':' + tag
response = await self.privatePostWithdraw(self.extend(request, params))
#
# {
# "data": {
# "code": "initiated",
# "id": 3,
# "result": "Withdraw initiated. Please allow 3-5 minutes for our system to process."
# }
# }
#
data = self.safe_value(response, 'data', {})
result = self.parse_transaction(data)
return self.extend(result, {
'currency': code,
'address': address,
'addressTo': address,
'tag': tag,
'tagTo': tag,
'amount': amount,
})
def nonce(self):
return self.milliseconds()
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = '/' + self.version + '/'
if api == 'private':
url += 'user/'
url += self.implode_params(path, params)
request = self.omit(params, self.extract_params(path))
if method == 'POST':
body = self.json(request)
else:
if request:
url += '?' + self.urlencode(request)
if api == 'private':
timestamp = str(self.milliseconds())
bodyAsString = body if (method == 'POST') else ''
auth = "\n".join([
method,
url,
timestamp,
bodyAsString,
self.secret,
]) # eslint-disable-line quotes
hash = self.hash(self.encode(auth), 'sha256', 'base64')
key = self.apiKey
if not isinstance(key, basestring):
key = str(key)
signature = 'HMAC-SHA256 ' + key + ':' + hash
headers = {
'Authorization': signature,
'HMAC-Timestamp': timestamp,
}
if method == 'POST':
headers['Content-Type'] = 'application/json'
url = self.urls['api'] + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
#
# {"errors":[{"code":"insuff_funds","title":"Your available balance is too low for that action"}]}
# {"errors":[{"code": "invalid_auth","title": "Invalid HMAC signature"}]}
#
if response is None:
return
errors = self.safe_value(response, 'errors', [])
numErrors = len(errors)
if numErrors < 1:
return
feedback = self.id + ' ' + body
for i in range(0, len(errors)):
error = errors[i]
errorCode = self.safe_string(error, 'code')
self.throw_exactly_matched_exception(self.exceptions['exact'], errorCode, feedback)
raise ExchangeError(feedback) # unknown message
| 42.574673
| 279
| 0.441247
|
4a121cdb0570f7facfedecfc7d4d6953bdfdbdb1
| 156
|
py
|
Python
|
pick_three/main.py
|
matthewdeanmartin/pick_three
|
9551cf89c55974ec51eec1daeacad1a5e4c7d8c3
|
[
"MIT"
] | null | null | null |
pick_three/main.py
|
matthewdeanmartin/pick_three
|
9551cf89c55974ec51eec1daeacad1a5e4c7d8c3
|
[
"MIT"
] | 1
|
2018-11-09T10:16:39.000Z
|
2018-11-09T10:16:39.000Z
|
pick_three/main.py
|
matthewdeanmartin/pick_three
|
9551cf89c55974ec51eec1daeacad1a5e4c7d8c3
|
[
"MIT"
] | null | null | null |
# coding=utf-8
"""
What if you played pick 3 with friends?
"""
from pick_three.game import Game
if __name__ == "__main__":
game = Game()
game.go()
| 15.6
| 39
| 0.647436
|
4a121d4f24090a4b53db813974de78554b37aba5
| 5,937
|
py
|
Python
|
examples/3_mnist_training.py
|
filemaster/aihwkit
|
473eda8c3c89f49acdfc2da9bd03b27e22e13b1a
|
[
"Apache-2.0"
] | null | null | null |
examples/3_mnist_training.py
|
filemaster/aihwkit
|
473eda8c3c89f49acdfc2da9bd03b27e22e13b1a
|
[
"Apache-2.0"
] | null | null | null |
examples/3_mnist_training.py
|
filemaster/aihwkit
|
473eda8c3c89f49acdfc2da9bd03b27e22e13b1a
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# (C) Copyright 2020 IBM. All Rights Reserved.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""aihwkit example 3: MNIST training.
MNIST training example based on the paper:
https://www.frontiersin.org/articles/10.3389/fnins.2016.00333/full
Uses learning rates of η = 0.01, 0.005, and 0.0025
for epochs 0–10, 11–20, and 21–30, respectively.
"""
from time import time
# Imports from PyTorch.
import torch
from torch import nn
from torch.optim.lr_scheduler import StepLR
from torchvision import datasets, transforms
# Imports from aihwkit.
from aihwkit.nn import AnalogLinear
from aihwkit.optim import AnalogSGD
from aihwkit.simulator.configs import SingleRPUConfig
from aihwkit.simulator.configs.devices import ConstantStepDevice
# Path where the datasets will be stored.
TRAIN_DATASET = 'data/TRAIN_DATASET'
TEST_DATASET = 'data/TEST_DATASET'
# Network definition.
INPUT_SIZE = 784
HIDDEN_SIZES = [256, 128]
OUTPUT_SIZE = 10
# Training parameters.
EPOCHS = 30
BATCH_SIZE = 64
def load_images():
"""Load images for train from the torchvision datasets."""
transform = transforms.Compose([transforms.ToTensor()])
# Load the images.
train_set = datasets.MNIST(TRAIN_DATASET,
download=True, train=True, transform=transform)
val_set = datasets.MNIST(TEST_DATASET,
download=True, train=False, transform=transform)
train_data = torch.utils.data.DataLoader(
train_set, batch_size=BATCH_SIZE, shuffle=True)
validation_data = torch.utils.data.DataLoader(
val_set, batch_size=BATCH_SIZE, shuffle=True)
return train_data, validation_data
def create_analog_network(input_size, hidden_sizes, output_size):
"""Create the neural network using analog and digital layers.
Args:
input_size (int): size of the Tensor at the input.
hidden_sizes (list): list of sizes of the hidden layers (2 layers).
output_size (int): size of the Tensor at the output.
"""
model = nn.Sequential(
AnalogLinear(input_size, hidden_sizes[0], True,
rpu_config=SingleRPUConfig(device=ConstantStepDevice())),
nn.Sigmoid(),
AnalogLinear(hidden_sizes[0], hidden_sizes[1], True,
rpu_config=SingleRPUConfig(device=ConstantStepDevice())),
nn.Sigmoid(),
AnalogLinear(hidden_sizes[1], output_size, True,
rpu_config=SingleRPUConfig(device=ConstantStepDevice())),
nn.LogSoftmax(dim=1)
)
print(model)
return model
def create_sgd_optimizer(model):
"""Create the analog-aware optimizer.
Args:
model (nn.Module): model to be trained.
"""
optimizer = AnalogSGD(model.parameters(), lr=0.05)
optimizer.regroup_param_groups(model)
return optimizer
def train(model, train_set):
"""Train the network.
Args:
model (nn.Module): model to be trained.
train_set (DataLoader): dataset of elements to use as input for training.
"""
classifier = nn.NLLLoss()
optimizer = create_sgd_optimizer(model)
scheduler = StepLR(optimizer, step_size=10, gamma=0.5)
time_init = time()
for epoch_number in range(EPOCHS):
total_loss = 0
for images, labels in train_set:
# Flatten MNIST images into a 784 vector.
images = images.view(images.shape[0], -1)
optimizer.zero_grad()
# Add training Tensor to the model (input).
output = model(images)
loss = classifier(output, labels)
# Run training (backward propagation).
loss.backward()
# Optimize weights.
optimizer.step()
total_loss += loss.item()
print('Epoch {} - Training loss: {:.16f}'.format(
epoch_number, total_loss / len(train_set)))
# Decay learning rate if needed.
scheduler.step()
print('\nTraining Time (s) = {}'.format(time()-time_init))
def test_evaluation(model, val_set):
"""Test trained network
Args:
model (nn.Model): Trained model to be evaluated
val_set (DataLoader): Validation set to perform the evaluation
"""
# Setup counter of images predicted to 0.
predicted_ok = 0
total_images = 0
for images, labels in val_set:
# Predict image.
for i in range(len(labels)):
image = images[i].view(1, INPUT_SIZE)
with torch.no_grad():
pred = model(image)
probabilities_tensor = torch.exp(pred)
probabilities = list(probabilities_tensor.numpy()[0])
# Get labels.
predicted_label = probabilities.index(max(probabilities))
validation_label = labels.numpy()[-1]
# Check if predicted image match with validation label.
if validation_label == predicted_label:
predicted_ok += 1
total_images += 1
print('\nNumber Of Images Tested = {}'.format(total_images))
print('Model Accuracy = {}'.format(predicted_ok/total_images))
def main():
"""Train a PyTorch analog model with the MNIST dataset."""
# Load datasets.
train_dataset, validation_dataset = load_images()
# Prepare the model.
model = create_analog_network(INPUT_SIZE, HIDDEN_SIZES, OUTPUT_SIZE)
# Train the model.
train(model, train_dataset)
# Evaluate the trained model.
test_evaluation(model, validation_dataset)
if __name__ == '__main__':
# Execute only if run as the entry point into the program.
main()
| 30.603093
| 81
| 0.667846
|
4a121fa97ebc11d836592ad1a12d2a636cf49291
| 10,358
|
py
|
Python
|
adaptnlp/file_utils.py
|
DinaraN/adaptnlp
|
71a83793965769cef5c7e478f82335a90faa024b
|
[
"Apache-2.0"
] | 5
|
2020-03-30T12:50:56.000Z
|
2022-01-20T22:45:29.000Z
|
adaptnlp/file_utils.py
|
DinaraN/adaptnlp
|
71a83793965769cef5c7e478f82335a90faa024b
|
[
"Apache-2.0"
] | 9
|
2020-11-13T18:41:44.000Z
|
2022-02-10T01:58:28.000Z
|
adaptnlp/file_utils.py
|
DinaraN/adaptnlp
|
71a83793965769cef5c7e478f82335a90faa024b
|
[
"Apache-2.0"
] | 1
|
2020-03-30T17:29:05.000Z
|
2020-03-30T17:29:05.000Z
|
"""
Utilities for working with the local dataset cache. Adapted from AllenNLP.
"""
import os
import logging
import shutil
import tempfile
import json
from urllib.parse import urlparse
from pathlib import Path
from typing import Optional, Tuple, Union, IO, Callable, Set
from hashlib import sha256
from functools import wraps
from tqdm import tqdm as _tqdm
import boto3
import botocore
from botocore.exceptions import ClientError
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
import adaptnlp
logger = logging.getLogger(__name__)
CACHE_ROOT = adaptnlp.cache_root
CACHE_DIRECTORY = str(CACHE_ROOT / "cache")
def url_to_filename(url: str, etag: str = None) -> str:
"""
Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the url's, delimited
by a period.
"""
url_bytes = url.encode("utf-8")
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode("utf-8")
etag_hash = sha256(etag_bytes)
filename += "." + etag_hash.hexdigest()
return filename
def filename_to_url(filename: str, cache_dir: str = None) -> Tuple[str, str]:
"""
Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``FileNotFoundError`` if `filename` or its stored metadata do not exist.
"""
if cache_dir is None:
cache_dir = CACHE_DIRECTORY
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise FileNotFoundError("file {} not found".format(cache_path))
meta_path = cache_path + ".json"
if not os.path.exists(meta_path):
raise FileNotFoundError("file {} not found".format(meta_path))
with open(meta_path) as meta_file:
metadata = json.load(meta_file)
url = metadata["url"]
etag = metadata["etag"]
return url, etag
def cached_path(url_or_filename: Union[str, Path], cache_dir: str = None) -> str:
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
"""
if cache_dir is None:
cache_dir = CACHE_DIRECTORY
if isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
url_or_filename = os.path.expanduser(url_or_filename)
parsed = urlparse(url_or_filename)
if parsed.scheme in ("http", "https", "s3"):
# URL, so get it from the cache (downloading if necessary)
return get_from_cache(url_or_filename, cache_dir)
elif os.path.exists(url_or_filename):
# File, and it exists.
return url_or_filename
elif parsed.scheme == "":
# File, but it doesn't exist.
raise FileNotFoundError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError(
"unable to parse {} as a URL or as a local path".format(url_or_filename)
)
def is_url_or_existing_file(url_or_filename: Union[str, Path, None]) -> bool:
"""
Given something that might be a URL (or might be a local path),
determine check if it's url or an existing file path.
"""
if url_or_filename is None:
return False
url_or_filename = os.path.expanduser(str(url_or_filename))
parsed = urlparse(url_or_filename)
return parsed.scheme in ("http", "https", "s3") or os.path.exists(url_or_filename)
def split_s3_path(url: str) -> Tuple[str, str]:
"""Split a full s3 path into the bucket name and path."""
parsed = urlparse(url)
if not parsed.netloc or not parsed.path:
raise ValueError("bad s3 path {}".format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
# Remove '/' at beginning of path.
if s3_path.startswith("/"):
s3_path = s3_path[1:]
return bucket_name, s3_path
def s3_request(func: Callable):
"""
Wrapper function for s3 requests in order to create more helpful error
messages.
"""
@wraps(func)
def wrapper(url: str, *args, **kwargs):
try:
return func(url, *args, **kwargs)
except ClientError as exc:
if int(exc.response["Error"]["Code"]) == 404:
raise FileNotFoundError("file {} not found".format(url))
else:
raise
return wrapper
def get_s3_resource():
session = boto3.session.Session()
if session.get_credentials() is None:
# Use unsigned requests.
s3_resource = session.resource(
"s3", config=botocore.client.Config(signature_version=botocore.UNSIGNED)
)
else:
s3_resource = session.resource("s3")
return s3_resource
@s3_request
def s3_etag(url: str) -> Optional[str]:
"""Check ETag on S3 object."""
s3_resource = get_s3_resource()
bucket_name, s3_path = split_s3_path(url)
s3_object = s3_resource.Object(bucket_name, s3_path)
return s3_object.e_tag
@s3_request
def s3_get(url: str, temp_file: IO) -> None:
"""Pull a file directly from S3."""
s3_resource = get_s3_resource()
bucket_name, s3_path = split_s3_path(url)
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
def session_with_backoff() -> requests.Session:
"""
We ran into an issue where http requests to s3 were timing out,
possibly because we were making too many requests too quickly.
This helper function returns a requests session that has retry-with-backoff
built in.
see stackoverflow.com/questions/23267409/how-to-implement-retry-mechanism-into-python-requests-library
"""
session = requests.Session()
retries = Retry(total=5, backoff_factor=1, status_forcelist=[502, 503, 504])
session.mount("http://", HTTPAdapter(max_retries=retries))
session.mount("https://", HTTPAdapter(max_retries=retries))
return session
def http_get(url: str, temp_file: IO) -> None:
with session_with_backoff() as session:
req = session.get(url, stream=True)
content_length = req.headers.get("Content-Length")
total = int(content_length) if content_length is not None else None
progress = Tqdm.tqdm(unit="B", total=total)
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
def get_from_cache(url: str, cache_dir: str = None) -> str:
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""
if cache_dir is None:
cache_dir = CACHE_DIRECTORY
os.makedirs(cache_dir, exist_ok=True)
# Get eTag to add to filename, if it exists.
if url.startswith("s3://"):
etag = s3_etag(url)
else:
with session_with_backoff() as session:
response = session.head(url, allow_redirects=True)
if response.status_code != 200:
raise IOError(
"HEAD request failed for url {} with status code {}".format(
url, response.status_code
)
)
etag = response.headers.get("ETag")
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with tempfile.NamedTemporaryFile() as temp_file:
logger.info("%s not found in cache, downloading to %s", url, temp_file.name)
# GET file object
if url.startswith("s3://"):
s3_get(url, temp_file)
else:
http_get(url, temp_file)
# we are copying the file before closing it, so flush to avoid truncation
temp_file.flush()
# shutil.copyfileobj() starts at the current position, so go to the start
temp_file.seek(0)
logger.info("copying %s to cache at %s", temp_file.name, cache_path)
with open(cache_path, "wb") as cache_file:
shutil.copyfileobj(temp_file, cache_file)
logger.info("creating metadata file for %s", cache_path)
meta = {"url": url, "etag": etag}
meta_path = cache_path + ".json"
with open(meta_path, "w") as meta_file:
json.dump(meta, meta_file)
logger.info("removing temp file %s", temp_file.name)
return cache_path
def read_set_from_file(filename: str) -> Set[str]:
"""
Extract a de-duped collection (set) of text from a file.
Expected file format is one item per line.
"""
collection = set()
with open(filename, "r") as file_:
for line in file_:
collection.add(line.rstrip())
return collection
def get_file_extension(path: str, dot=True, lower: bool = True):
ext = os.path.splitext(path)[1]
ext = ext if dot else ext[1:]
return ext.lower() if lower else ext
class Tqdm:
# These defaults are the same as the argument defaults in tqdm.
default_mininterval: float = 0.1
@staticmethod
def set_default_mininterval(value: float) -> None:
Tqdm.default_mininterval = value
@staticmethod
def set_slower_interval(use_slower_interval: bool) -> None:
"""
If ``use_slower_interval`` is ``True``, we will dramatically slow down ``tqdm's`` default
output rate. ``tqdm's`` default output rate is great for interactively watching progress,
but it is not great for log files. You might want to set this if you are primarily going
to be looking at output through log files, not the terminal.
"""
if use_slower_interval:
Tqdm.default_mininterval = 10.0
else:
Tqdm.default_mininterval = 0.1
@staticmethod
def tqdm(*args, **kwargs):
new_kwargs = {"mininterval": Tqdm.default_mininterval, **kwargs}
return _tqdm(*args, **new_kwargs)
| 33.198718
| 106
| 0.65476
|
4a12204dc6e24ec4a4105bca957c91ed0b72033f
| 4,448
|
py
|
Python
|
openstack_dashboard/api/trove.py
|
aristanetworks/horizon
|
6b4ba5194d46360bf1a436b6f9531facfbf5084a
|
[
"Apache-2.0"
] | null | null | null |
openstack_dashboard/api/trove.py
|
aristanetworks/horizon
|
6b4ba5194d46360bf1a436b6f9531facfbf5084a
|
[
"Apache-2.0"
] | null | null | null |
openstack_dashboard/api/trove.py
|
aristanetworks/horizon
|
6b4ba5194d46360bf1a436b6f9531facfbf5084a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2013 Rackspace Hosting.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.conf import settings
from troveclient.v1 import client
from openstack_dashboard.api import base
from horizon.utils import functions as utils
LOG = logging.getLogger(__name__)
def troveclient(request):
insecure = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False)
cacert = getattr(settings, 'OPENSTACK_SSL_CACERT', None)
trove_url = base.url_for(request, 'database')
LOG.debug('troveclient connection created using token "%s" and url "%s"' %
(request.user.token.id, trove_url))
c = client.Client(request.user.username,
request.user.token.id,
project_id=request.user.project_id,
auth_url=trove_url,
insecure=insecure,
cacert=cacert,
http_log_debug=settings.DEBUG)
c.client.auth_token = request.user.token.id
c.client.management_url = trove_url
return c
def instance_list(request, marker=None):
page_size = utils.get_page_size(request)
return troveclient(request).instances.list(limit=page_size, marker=marker)
def instance_get(request, instance_id):
return troveclient(request).instances.get(instance_id)
def instance_delete(request, instance_id):
return troveclient(request).instances.delete(instance_id)
def instance_create(request, name, volume, flavor, databases=None,
users=None, restore_point=None, nics=None,
datastore=None, datastore_version=None):
# TODO(dklyle): adding conditional to support trove without volume
# support for now until API supports checking for volume support
if volume > 0:
volume_params = {'size': volume}
else:
volume_params = None
return troveclient(request).instances.create(
name,
flavor,
volume=volume_params,
databases=databases,
users=users,
restorePoint=restore_point,
nics=nics,
datastore=datastore,
datastore_version=datastore_version)
def instance_resize_volume(request, instance_id, size):
return troveclient(request).instances.resize_volume(instance_id, size)
def instance_backups(request, instance_id):
return troveclient(request).instances.backups(instance_id)
def instance_restart(request, instance_id):
return troveclient(request).instances.restart(instance_id)
def database_list(request, instance_id):
return troveclient(request).databases.list(instance_id)
def database_delete(request, instance_id, db_name):
return troveclient(request).databases.delete(instance_id, db_name)
def backup_list(request):
return troveclient(request).backups.list()
def backup_get(request, backup_id):
return troveclient(request).backups.get(backup_id)
def backup_delete(request, backup_id):
return troveclient(request).backups.delete(backup_id)
def backup_create(request, name, instance_id, description=None,
parent_id=None):
return troveclient(request).backups.create(name, instance_id,
description, parent_id)
def flavor_list(request):
return troveclient(request).flavors.list()
def flavor_get(request, flavor_id):
return troveclient(request).flavors.get(flavor_id)
def users_list(request, instance_id):
return troveclient(request).users.list(instance_id)
def user_delete(request, instance_id, user):
return troveclient(request).users.delete(instance_id, user)
def user_list_access(request, instance_id, user):
return troveclient(request).users.list_access(instance_id, user)
def datastore_list(request):
return troveclient(request).datastores.list()
def datastore_version_list(request, datastore):
return troveclient(request).datastore_versions.list(datastore)
| 31.104895
| 78
| 0.716277
|
4a122081f0b79eea5dd7091b4381f27e406f9ef6
| 13,200
|
py
|
Python
|
pdil/tool/fossil/rigging/dogFrontLeg.py
|
patcorwin/fossil
|
8e471c5233e4a2d81dc66bd8e2a3d6387e71ef61
|
[
"BSD-3-Clause"
] | 41
|
2017-04-24T09:43:24.000Z
|
2021-10-06T04:11:43.000Z
|
pdil/tool/fossil/rigging/dogFrontLeg.py
|
patcorwin/fossil
|
8e471c5233e4a2d81dc66bd8e2a3d6387e71ef61
|
[
"BSD-3-Clause"
] | 22
|
2018-04-18T21:56:01.000Z
|
2021-08-05T20:57:45.000Z
|
pdil/tool/fossil/rigging/dogFrontLeg.py
|
patcorwin/fossil
|
8e471c5233e4a2d81dc66bd8e2a3d6387e71ef61
|
[
"BSD-3-Clause"
] | 9
|
2017-04-24T09:43:27.000Z
|
2021-05-14T05:38:33.000Z
|
'''
reload(pdil.tool.fossil.rigging.dogFrontLeg)
c = PyNode('asdf_card')
c.removeRig()
c.removeBones()
pdil.tool.fossil.main.RigTool.buildBones([c])
pdil.tool.fossil.main.RigTool.buildRig([c])
c.outputCenter.fk.IkSwitch.set(1)
c.outputCenter.ik.display.set(0)
pointConstraint('asdf02', 'locator5', mo=0)
pointConstraint('asdf04', 'locator4', mo=0)
'''
from __future__ import absolute_import, division, print_function
from collections import OrderedDict
import math
from pymel.core import createNode, delete, dt, expression, group, hide, ikHandle, orientConstraint, parentConstraint, poleVectorConstraint, pointConstraint, PyNode, xform
import pdil
from .... import core
from .... import lib
from .... import nodeApi
from .. import controllerShape
from ..cardRigging import MetaControl, ParamInfo
from .. import space
from . import _util as util
from .. import rig
from .. import node
@util.adds('stretch', 'length')
@util.defaultspec( {'shape': 'box', 'size': 10, 'color': 'green 0.22' },
bend={'shape': 'disc', 'size': 10, 'color': 'green 0.22' },
pv={'shape': 'sphere', 'size': 5, 'color': 'green 0.22' },
socket={'shape': 'sphere', 'size': 5, 'color': 'green 0.22', 'visGroup': 'socket' } )
def buildDogFrontLeg(hipJoint, end, aim='x', upVector=dt.Vector(1, 0, 0), pvLen=None, name='Dogleg', endOrientType=util.EndOrient.TRUE_ZERO_FOOT, groupName='', controlSpec={}):
boundChain = util.getChain(hipJoint, end)
container = group(n=name + '_dogFrontleg', em=True, p=node.mainGroup())
chainGrp = group( p=container, n=name + "_ikChain", em=True )
parentConstraint( hipJoint.getParent(), chainGrp, mo=True )
# Make the control to translate/offset the limb's socket.
socketOffset = controllerShape.build( name + '_socket', controlSpec['socket'], type=controllerShape.ControlType.TRANSLATE )
core.dagObj.lock(socketOffset, 'r s')
core.dagObj.moveTo( socketOffset, hipJoint )
socketZero = core.dagObj.zero(socketOffset)
socketZero.setParent( chainGrp )
footCtrl = controllerShape.build( name, controlSpec['main'], type=controllerShape.ControlType.IK)
core.dagObj.lock(footCtrl, 's')
core.dagObj.moveTo( footCtrl, end )
if endOrientType == util.EndOrient.TRUE_ZERO:
util.trueZeroSetup(end, footCtrl)
elif endOrientType == util.EndOrient.TRUE_ZERO_FOOT:
util.trueZeroFloorPlane(end, footCtrl)
elif endOrientType == util.EndOrient.JOINT:
core.dagObj.matchTo(footCtrl, end)
footCtrl.rx.set( util.shortestAxis(footCtrl.rx.get()) )
footCtrl.ry.set( util.shortestAxis(footCtrl.ry.get()) )
footCtrl.rz.set( util.shortestAxis(footCtrl.rz.get()) )
core.dagObj.zero(footCtrl)
elif endOrientType == util.EndOrient.WORLD:
# Do nothing, it's built world oriented
pass
util.createMatcher(footCtrl, end).setParent(container)
# Make the main ik chain which gives overall compression
masterChain = util.dupChain(hipJoint, end, '{0}_compress')
masterChain[-1].setParent( masterChain[-3] )
pdil.anim.orientJoint(masterChain[-3], masterChain[-1], aim=aim, upVector=upVector)
delete( masterChain[-2] )
del masterChain[-2]
refChain = util.dupChain(hipJoint, end, '{0}_ref')
hide(refChain[0])
refChain[0].setParent( socketOffset )
refIk = util.ikRP('refIk', refChain[0], refChain[-1])
refIk.setParent( footCtrl )
pdil.dagObj.lock(refIk)
mainIk = ikHandle( sol='ikRPsolver', sj=masterChain[0], ee=masterChain[-1] )[0]
PyNode('ikSpringSolver').message >> mainIk.ikSolver
mainIk.rename('mainIk')
hide(mainIk)
masterChain[0].setParent( socketOffset )
# Create the polevector. This needs to happen first so things don't flip out later
out = util.calcOutVector(masterChain[0], masterChain[1], masterChain[-1])
if not pvLen or pvLen < 0:
pvLen = util.chainLength(masterChain[1:]) * 0.5
pvPos = out * pvLen + dt.Vector(xform(boundChain[1], q=True, ws=True, t=True))
pvCtrl = controllerShape.build( name + '_pv', controlSpec['pv'], type=controllerShape.ControlType.POLEVECTOR )
core.dagObj.lock(pvCtrl, 'r s')
xform(pvCtrl, ws=True, t=pvPos)
poleVectorConstraint( pvCtrl, mainIk )
# Verify the knees are in the same place
delta = boundChain[1].getTranslation('world') - masterChain[1].getTranslation('world')
if delta.length() > 0.1:
mainIk.twist.set(180)
# Make sub IKs so the chain can be offset
offsetChain = util.dupChain(hipJoint, end)
hide(offsetChain[0])
offsetChain[0].rename( 'OffsetChain' )
offsetChain[0].setParent(container)
controllerShape.connectingLine(pvCtrl, offsetChain[1] )
constraints = util.constrainAtoB( util.getChain(hipJoint, end), offsetChain, mo=False )
pointConstraint( masterChain[0], offsetChain[0] )
ankleIk = util.ikRP('ankle', offsetChain[0], offsetChain[-2])
offsetIk = util.ikRP( 'metatarsusIk', offsetChain[-2], offsetChain[-1])
bend = controllerShape.build( name + '_bend', controlSpec['bend'], type=controllerShape.ControlType.ROTATE )
offsetContainer = group(em=True, n='OffsetSpace')
offsetContainer.setParent( footCtrl )
pdil.dagObj.moveTo(offsetContainer, end)
if end.tx.get() < 0:
lib.anim.orientJoint(offsetContainer, boundChain[-2], upTarget=boundChain[-3], aim='-y', up='-x')
else:
lib.anim.orientJoint(offsetContainer, boundChain[-2], upTarget=boundChain[-3], aim='y', up='x')
bend.setParent(offsetContainer)
bend.t.set(0, 0, 0)
bend.r.set(0, 0, 0)
core.dagObj.zero(bend)
pdil.dagObj.lock( bend, 't s' )
parentConstraint( masterChain[-1], offsetContainer, mo=True )
''' NOTE - This is from dog hind leg. I need to find a repro to test.
This is really dumb.
Sometimes maya will rotate everything by 180 but I'm not sure how to
calculate the proper offset, which normally results in one axis being off
by 360, so account for that too.
'''
temp = orientConstraint( footCtrl, offsetChain[-1], mo=True)
if not core.math.isClose( offsetChain[-1].r.get(), [0, 0, 0] ):
badVals = offsetChain[-1].r.get()
delete(temp)
offsetChain[-1].r.set( -badVals )
temp = orientConstraint( footCtrl, offsetChain[-1], mo=True)
for a in 'xyz':
val = offsetChain[-1].attr('r' + a).get()
if abs(val - 360) < 0.00001:
attr = temp.attr( 'offset' + a.upper() )
attr.set( attr.get() - 360 )
elif abs(val + 360) < 0.00001:
attr = temp.attr( 'offset' + a.upper() )
attr.set( attr.get() + 360 )
# Hopefully the end of dumbness
mainIk.setParent( footCtrl )
offsetIk.setParent( footCtrl )
core.dagObj.zero(footCtrl).setParent( container )
hide(masterChain[0])
poleVectorConstraint( pvCtrl, ankleIk )
poleVectorConstraint( pvCtrl, offsetIk )
# Adding the pv constraint might require a counter rotation of the offsetIk
counterTwist = offsetChain[-2].rx.get() * (1.0 if offsetChain[-2].tx.get() < 0 else -1.0)
offsetIk.twist.set( counterTwist )
core.dagObj.zero(pvCtrl).setParent( container )
# Make stretchy ik, but the secondary chain needs the stretch hooked up too
strechPlug, _, nodes = util.makeStretchyNonSpline(footCtrl, refIk)
for src, dest in zip( refChain[1:], offsetChain[1:] ):
src.tx >> dest.tx
refChain[1].tx >> masterChain[1].tx
# Law of cosines to determine the master chain's 'forearm' bone length
formula = '{jnt} = sqrt( pow({sideA}, 2) + pow({sideB}, 2) - 2 * {sideA} * {sideB} * cos({angle}) );'\
.format(
jnt=masterChain[-1].tx,
sideA=refChain[-1].tx,
sideB=refChain[-2].tx,
angle=math.radians(util.angleBetween(*refChain[-3:])[0])
)
expression( s=formula )
ankleIk.setParent( bend )
# Finish setting up the bend control to be lerpable from user controlled to fully straight
bendAnchor = group(em=True, n='bendAnchor')
core.dagObj.matchTo(bendAnchor, ankleIk)
bendAnchor.setParent( bend )
refChain[3].tx >> bendAnchor.ty
autoStraighten = pointConstraint( [bendAnchor, refChain[-2]], ankleIk )
dynamicW, straightW = autoStraighten.getWeightAliasList()
#? = begin straightening value
#remap 0,0 --> ?,0 -> 1, 1
straighten = .95
remap = createNode('remapValue')
remap.value[2].value_Position.set(straighten)
remap.value[2].value_FloatValue.set(0)
remap.value[2].value_Interp.set(1)
pdil.math.divide( nodes['distToController'], nodes['computedTotalScaled'] ) >> remap.inputValue
dynamicW.set(1)
straightW.set(0)
remap.outValue >> straightW
pdil.math.opposite( remap.outValue ) >> dynamicW
util.drive(footCtrl, 'straighten', remap.value[2].value_Position, 0, 1, dv=straighten)
footCtrl.straighten.set(straighten)
#-
footCtrl = nodeApi.RigController.convert(footCtrl)
footCtrl.container = container
footCtrl.subControl['socket'] = socketOffset
footCtrl.subControl['pv'] = pvCtrl
footCtrl.subControl['bend'] = bend
# Add default spaces
space.addMain( pvCtrl )
space.add( pvCtrl, footCtrl )
space.add( pvCtrl, footCtrl, mode=space.Mode.TRANSLATE)
if hipJoint.getParent():
space.add( pvCtrl, hipJoint.getParent())
space.addMain( footCtrl )
space.add( footCtrl, hipJoint.getParent() )
return footCtrl, constraints
class DogFrontLeg(MetaControl):
''' 4 joint dog front leg.
Acts like a 3 joint ik, with the end two joints
moving as a single bone but bendable via controller.
'''
'''
`Seg Leg #` is -10 to 10
-10 = Zero length
0 = Original length
10 = 2x original length
Length is -10 to 10
-10 = Half length
0 = Original length
10 = 2x length
'''
ik_ = 'pdil.tool.fossil.rigging.dogFrontLeg.buildDogFrontLeg'
fkArgs = {'translatable': True}
ikInput = OrderedDict( [
('name', ParamInfo( 'Name', 'Name', ParamInfo.STR, 'Leg')),
('pvLen', ParamInfo('PV Length', 'How far the pole vector should be from the chain', ParamInfo.FLOAT, default=0) ),
('endOrientType', ParamInfo('Control Orient', 'How to orient the last control', ParamInfo.ENUM, default=util.EndOrient.TRUE_ZERO_FOOT, enum=util.EndOrient.asChoices())),
] )
class activator(object):
@staticmethod
def getIkHandle(ctrl):
for ik in ctrl.listRelatives(type='ikHandle'):
if ik.name().count( 'metatarsusIk' ):
return ik
else:
raise Exception('Unable to determine IK handle on {0} to match'.format(ctrl))
@classmethod
def prep(cls, ctrl):
ik = cls.getIkHandle(ctrl)
chain = util.getChainFromIk(ik)
chain.insert(0, chain[0].getParent() )
chain.insert(0, chain[0].getParent() )
bound = util.getConstraineeChain(chain)
return {
'matcher': util.getMatcher(ctrl),
'hip': bound[0],
'knee': bound[1],
'ankle': bound[2],
'ball': bound[3],
}
@staticmethod
def harvest(objects):
return {
'matcher': util.worldInfo( objects['matcher']),
'hip': util.worldInfo( objects['hip']),
'knee': util.worldInfo( objects['knee']),
'ankle': util.worldInfo( objects['ankle']),
'ball': util.worldInfo( objects['ball']),
'length': abs(sum( [b.tx.get() for b in (objects['knee'], objects['ankle'], objects['ball'])] )),
'ankleMatrix': xform( objects['ankle'], q=True, ws=True, m=True),
}
WORLD_INFO = ['matcher', 'hip', 'knee', 'ankle', 'ball']
@classmethod
def split(cls, values):
''' Turns all the `worldInfo` into separate dictionaries. '''
pos, rot = {}, {}
for key in cls.WORLD_INFO:
pos[key] = dt.Vector( values[key][0] )
rot[key] = values[key][1]
return pos, rot
@classmethod
def apply(cls, objects, values, ctrl):
pos, rot = cls.split(values)
out = rig.calcOutVector(pos['hip'], pos['knee'], pos['ankle'])
out *= values['length']
pvPos = values['knee'][0] + out
util.applyWorldInfo(ctrl, values['matcher'])
xform( ctrl.subControl['pv'], ws=True, t=pvPos )
# Aim Y at ball
matrix = values['ankleMatrix']
bendNormal = dt.Vector(matrix[4:7]) * -1.0
ybasis = (pos['ankle'] - pos['ball']).normal()
xbasis = ybasis.cross( bendNormal )
zbasis = xbasis.cross( ybasis )
if objects['ball'].tx.get() < 0:
ybasis *= -1
xbasis *= -1
r = pdil.math.eulerFromMatrix( [xbasis, ybasis, zbasis], degrees=True )
xform( ctrl.subControl['bend'], ws=True, ro=r )
| 35.869565
| 177
| 0.629848
|
4a12208acbdafa2247fc2a27452b0e54d30cb63a
| 10,241
|
py
|
Python
|
models/pytorch-image-models/timm/models/densenet.py
|
crutcher/stylelens
|
8df3704f56fe6a30395eadcb1aee2e11563dfabb
|
[
"MIT"
] | null | null | null |
models/pytorch-image-models/timm/models/densenet.py
|
crutcher/stylelens
|
8df3704f56fe6a30395eadcb1aee2e11563dfabb
|
[
"MIT"
] | null | null | null |
models/pytorch-image-models/timm/models/densenet.py
|
crutcher/stylelens
|
8df3704f56fe6a30395eadcb1aee2e11563dfabb
|
[
"MIT"
] | null | null | null |
"""Pytorch Densenet implementation w/ tweaks
This file is a copy of https://github.com/pytorch/vision 'densenet.py' (BSD-3-Clause) with
fixed kwargs passthrough and addition of dynamic global avg/max pool.
"""
import re
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from .adaptive_avgmax_pool import select_adaptive_pool2d
from .helpers import load_pretrained
from .registry import register_model
__all__ = ["DenseNet"]
def _cfg(url=""):
return {
"url": url,
"num_classes": 1000,
"input_size": (3, 224, 224),
"pool_size": (7, 7),
"crop_pct": 0.875,
"interpolation": "bicubic",
"mean": IMAGENET_DEFAULT_MEAN,
"std": IMAGENET_DEFAULT_STD,
"first_conv": "features.conv0",
"classifier": "classifier",
}
default_cfgs = {
"densenet121": _cfg(
url="https://download.pytorch.org/models/densenet121-a639ec97.pth"
),
"densenet169": _cfg(
url="https://download.pytorch.org/models/densenet169-b2777c0a.pth"
),
"densenet201": _cfg(
url="https://download.pytorch.org/models/densenet201-c1103571.pth"
),
"densenet161": _cfg(
url="https://download.pytorch.org/models/densenet161-8d451a50.pth"
),
}
class _DenseLayer(nn.Sequential):
def __init__(self, num_input_features, growth_rate, bn_size, drop_rate):
super(_DenseLayer, self).__init__()
self.add_module("norm1", nn.BatchNorm2d(num_input_features)),
self.add_module("relu1", nn.ReLU(inplace=True)),
self.add_module(
"conv1",
nn.Conv2d(
num_input_features,
bn_size * growth_rate,
kernel_size=1,
stride=1,
bias=False,
),
),
self.add_module("norm2", nn.BatchNorm2d(bn_size * growth_rate)),
self.add_module("relu2", nn.ReLU(inplace=True)),
self.add_module(
"conv2",
nn.Conv2d(
bn_size * growth_rate,
growth_rate,
kernel_size=3,
stride=1,
padding=1,
bias=False,
),
),
self.drop_rate = drop_rate
def forward(self, x):
new_features = super(_DenseLayer, self).forward(x)
if self.drop_rate > 0:
new_features = F.dropout(
new_features, p=self.drop_rate, training=self.training
)
return torch.cat([x, new_features], 1)
class _DenseBlock(nn.Sequential):
def __init__(self, num_layers, num_input_features, bn_size, growth_rate, drop_rate):
super(_DenseBlock, self).__init__()
for i in range(num_layers):
layer = _DenseLayer(
num_input_features + i * growth_rate, growth_rate, bn_size, drop_rate
)
self.add_module("denselayer%d" % (i + 1), layer)
class _Transition(nn.Sequential):
def __init__(self, num_input_features, num_output_features):
super(_Transition, self).__init__()
self.add_module("norm", nn.BatchNorm2d(num_input_features))
self.add_module("relu", nn.ReLU(inplace=True))
self.add_module(
"conv",
nn.Conv2d(
num_input_features,
num_output_features,
kernel_size=1,
stride=1,
bias=False,
),
)
self.add_module("pool", nn.AvgPool2d(kernel_size=2, stride=2))
class DenseNet(nn.Module):
r"""Densenet-BC model class, based on
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`
Args:
growth_rate (int) - how many filters to add each layer (`k` in paper)
block_config (list of 4 ints) - how many layers in each pooling block
num_init_features (int) - the number of filters to learn in the first convolution layer
bn_size (int) - multiplicative factor for number of bottle neck layers
(i.e. bn_size * k features in the bottleneck layer)
drop_rate (float) - dropout rate after each dense layer
num_classes (int) - number of classification classes
"""
def __init__(
self,
growth_rate=32,
block_config=(6, 12, 24, 16),
num_init_features=64,
bn_size=4,
drop_rate=0,
num_classes=1000,
in_chans=3,
global_pool="avg",
):
self.global_pool = global_pool
self.num_classes = num_classes
super(DenseNet, self).__init__()
# First convolution
self.features = nn.Sequential(
OrderedDict(
[
(
"conv0",
nn.Conv2d(
in_chans,
num_init_features,
kernel_size=7,
stride=2,
padding=3,
bias=False,
),
),
("norm0", nn.BatchNorm2d(num_init_features)),
("relu0", nn.ReLU(inplace=True)),
("pool0", nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
]
)
)
# Each denseblock
num_features = num_init_features
for i, num_layers in enumerate(block_config):
block = _DenseBlock(
num_layers=num_layers,
num_input_features=num_features,
bn_size=bn_size,
growth_rate=growth_rate,
drop_rate=drop_rate,
)
self.features.add_module("denseblock%d" % (i + 1), block)
num_features = num_features + num_layers * growth_rate
if i != len(block_config) - 1:
trans = _Transition(
num_input_features=num_features,
num_output_features=num_features // 2,
)
self.features.add_module("transition%d" % (i + 1), trans)
num_features = num_features // 2
# Final batch norm
self.features.add_module("norm5", nn.BatchNorm2d(num_features))
# Linear layer
self.classifier = nn.Linear(num_features, num_classes)
self.num_features = num_features
def get_classifier(self):
return self.classifier
def reset_classifier(self, num_classes, global_pool="avg"):
self.global_pool = global_pool
self.num_classes = num_classes
del self.classifier
if num_classes:
self.classifier = nn.Linear(self.num_features, num_classes)
else:
self.classifier = None
def forward_features(self, x, pool=True):
x = self.features(x)
x = F.relu(x, inplace=True)
if pool:
x = select_adaptive_pool2d(x, self.global_pool)
x = x.view(x.size(0), -1)
return x
def forward(self, x):
return self.classifier(self.forward_features(x, pool=True))
def _filter_pretrained(state_dict):
pattern = re.compile(
r"^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$"
)
for key in list(state_dict.keys()):
res = pattern.match(key)
if res:
new_key = res.group(1) + res.group(2)
state_dict[new_key] = state_dict[key]
del state_dict[key]
return state_dict
@register_model
def densenet121(pretrained=False, num_classes=1000, in_chans=3, **kwargs):
r"""Densenet-121 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`
"""
default_cfg = default_cfgs["densenet121"]
model = DenseNet(
num_init_features=64,
growth_rate=32,
block_config=(6, 12, 24, 16),
num_classes=num_classes,
in_chans=in_chans,
**kwargs
)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(
model, default_cfg, num_classes, in_chans, filter_fn=_filter_pretrained
)
return model
@register_model
def densenet169(pretrained=False, num_classes=1000, in_chans=3, **kwargs):
r"""Densenet-169 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`
"""
default_cfg = default_cfgs["densenet169"]
model = DenseNet(
num_init_features=64,
growth_rate=32,
block_config=(6, 12, 32, 32),
num_classes=num_classes,
in_chans=in_chans,
**kwargs
)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(
model, default_cfg, num_classes, in_chans, filter_fn=_filter_pretrained
)
return model
@register_model
def densenet201(pretrained=False, num_classes=1000, in_chans=3, **kwargs):
r"""Densenet-201 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`
"""
default_cfg = default_cfgs["densenet201"]
model = DenseNet(
num_init_features=64,
growth_rate=32,
block_config=(6, 12, 48, 32),
num_classes=num_classes,
in_chans=in_chans,
**kwargs
)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(
model, default_cfg, num_classes, in_chans, filter_fn=_filter_pretrained
)
return model
@register_model
def densenet161(pretrained=False, num_classes=1000, in_chans=3, **kwargs):
r"""Densenet-201 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`
"""
default_cfg = default_cfgs["densenet161"]
model = DenseNet(
num_init_features=96,
growth_rate=48,
block_config=(6, 12, 36, 24),
num_classes=num_classes,
in_chans=in_chans,
**kwargs
)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(
model, default_cfg, num_classes, in_chans, filter_fn=_filter_pretrained
)
return model
| 31.903427
| 104
| 0.590567
|
4a1220a9db59bf1f46ea38fb93d43dec0d73818a
| 1,591
|
py
|
Python
|
test/cli/test_output.py
|
mnalband/schemathesis
|
42b351589fae3f407a1de248750bc82d6b5796d1
|
[
"MIT"
] | null | null | null |
test/cli/test_output.py
|
mnalband/schemathesis
|
42b351589fae3f407a1de248750bc82d6b5796d1
|
[
"MIT"
] | null | null | null |
test/cli/test_output.py
|
mnalband/schemathesis
|
42b351589fae3f407a1de248750bc82d6b5796d1
|
[
"MIT"
] | null | null | null |
import pytest
from schemathesis import runner, utils
from schemathesis.cli import output
@pytest.mark.parametrize(
"title,separator,printed,expected",
[
("TEST", "-", "data in section", "------- TEST -------\ndata in section\n--------------------\n"),
("TEST", "*", "data in section", "******* TEST *******\ndata in section\n********************\n"),
],
)
def test_print_in_section(title, separator, printed, expected):
with utils.stdout_listener() as getvalue:
with output.print_in_section(title, separator=separator, line_length=20):
print(printed)
printed = getvalue()
assert printed == expected
def test_pretty_print_stats(mocker):
mocker.patch("schemathesis.cli.output.print_in_section")
with utils.stdout_listener() as getvalue:
output.pretty_print_stats(
runner.StatsCollector(
{
"not_a_server_error": {"total": 5, "ok": 3, "error": 2},
"different_check": {"total": 1, "ok": 1, "error": 0},
}
)
)
result = getvalue()
assert result == (
"not_a_server_error 3 / 5 passed FAILED \n"
"different_check 1 / 1 passed PASSED \n"
)
def test_pretty_print_stats_empty(mocker):
mocker.patch("schemathesis.cli.output.print_in_section")
with utils.stdout_listener() as getvalue:
output.pretty_print_stats(runner.StatsCollector({}))
result = getvalue()
assert result == "No checks were performed.\n"
| 31.196078
| 106
| 0.577624
|
4a1220b1c14c945c19909758da855bb0085e78bc
| 7,671
|
py
|
Python
|
tests/integration-tests/benchmarks/common/metrics_reporter.py
|
Takuya-Miyazaki/aws-parallelcluster
|
ef3393fe6430f9a6232cf0368b1d3177f1304b82
|
[
"Apache-2.0"
] | 1
|
2020-10-15T16:33:20.000Z
|
2020-10-15T16:33:20.000Z
|
tests/integration-tests/benchmarks/common/metrics_reporter.py
|
QPC-database/aws-parallelcluster
|
8c2e9595ca171340df21695c27d85dc00f19d3e4
|
[
"Apache-2.0"
] | 109
|
2020-05-29T08:20:29.000Z
|
2022-02-21T09:32:47.000Z
|
tests/integration-tests/benchmarks/common/metrics_reporter.py
|
demartinofra/aws-parallelcluster
|
6f8e978b9ef2cea8855a80112b26b9968543a7e5
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file.
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import datetime
import logging
import os
from time import sleep
import boto3
from retrying import RetryError, retry
from time_utils import seconds
from utils import _describe_cluster_instances
METRIC_WIDGET_TEMPLATE = """
{{
"metrics": [
[ "ParallelCluster/benchmarking/{cluster_name}", "ComputeNodesCount", {{ "stat": "Maximum", "label": \
"ComputeNodesCount Max" }} ],
[ "...", {{ "stat": "Minimum", "label": "ComputeNodesCount Min" }} ],
[ "ParallelCluster/benchmarking/{cluster_name}", "EC2NodesCount", {{ "stat": "Maximum", "label": \
"EC2NodesCount Max" }} ],
[ "...", {{ "stat": "Minimum", "label": "EC2NodesCount Min" }} ]
],
"view": "timeSeries",
"stacked": false,
"stat": "Maximum",
"period": 1,
"title": "{title}",
"width": 1400,
"height": 700,
"start": "{graph_start_time}",
"end": "{graph_end_time}",
"annotations": {{
"horizontal": [
{{
"label": "Scaling Target",
"value": {scaling_target}
}}
],
"vertical": [
{{
"label": "Start Time",
"value": "{start_time}"
}},
{{
"label": "End Time",
"value": "{end_time}"
}}
]
}},
"yAxis": {{
"left": {{
"showUnits": false,
"label": "Count"
}},
"right": {{
"showUnits": true
}}
}}
}}"""
def publish_compute_nodes_metric(scheduler_commands, max_monitoring_time, region, cluster_name):
logging.info("Monitoring scheduler status and publishing metrics")
cw_client = boto3.client("cloudwatch", region_name=region)
compute_nodes_time_series = []
ec2_nodes_time_series = []
timestamps = [datetime.datetime.utcnow()]
@retry(
# Retry until EC2 and Scheduler capacities scale down to 0
# Also make sure cluster scaled up before scaling down
retry_on_result=lambda _: ec2_nodes_time_series[-1] != 0
or compute_nodes_time_series[-1] != 0
or max(ec2_nodes_time_series) == 0
or max(compute_nodes_time_series) == 0,
wait_fixed=seconds(20),
stop_max_delay=max_monitoring_time,
)
def _watch_compute_nodes_allocation():
try:
compute_nodes = scheduler_commands.compute_nodes_count()
logging.info("Publishing schedueler compute metric: count={0}".format(compute_nodes))
cw_client.put_metric_data(
Namespace="ParallelCluster/benchmarking/{cluster_name}".format(cluster_name=cluster_name),
MetricData=[{"MetricName": "ComputeNodesCount", "Value": compute_nodes, "Unit": "Count"}],
)
ec2_instances_count = len(_describe_cluster_instances(cluster_name, region, filter_by_node_type="Compute"))
logging.info("Publishing EC2 compute metric: count={0}".format(ec2_instances_count))
cw_client.put_metric_data(
Namespace="ParallelCluster/benchmarking/{cluster_name}".format(cluster_name=cluster_name),
MetricData=[{"MetricName": "EC2NodesCount", "Value": ec2_instances_count, "Unit": "Count"}],
)
# add values only if there is a transition.
if (
len(ec2_nodes_time_series) == 0
or ec2_nodes_time_series[-1] != ec2_instances_count
or compute_nodes_time_series[-1] != compute_nodes
):
ec2_nodes_time_series.append(ec2_instances_count)
compute_nodes_time_series.append(compute_nodes)
timestamps.append(datetime.datetime.utcnow())
except Exception as e:
logging.warning("Failed while watching nodes allocation with exception: %s", e)
raise
try:
_watch_compute_nodes_allocation()
except RetryError:
# ignoring this error in order to perform assertions on the collected data.
pass
end_time = datetime.datetime.utcnow()
logging.info(
"Monitoring completed: compute_nodes_time_series [ %s ], timestamps [ %s ]",
" ".join(map(str, compute_nodes_time_series)),
" ".join(map(str, timestamps)),
)
logging.info("Sleeping for 3 minutes to wait for the metrics to propagate...")
sleep(180)
return compute_nodes_time_series, timestamps, end_time
def enable_asg_metrics(region, cluster):
logging.info("Enabling ASG metrics for %s", cluster.asg)
boto3.client("autoscaling", region_name=region).enable_metrics_collection(
AutoScalingGroupName=cluster.asg,
Metrics=["GroupDesiredCapacity", "GroupInServiceInstances", "GroupTerminatingInstances"],
Granularity="1Minute",
)
def _publish_metric(region, instance, os, scheduler, state, count):
cw_client = boto3.client("cloudwatch", region_name=region)
logging.info("Publishing metric: state={0} count={1}".format(state, count))
cw_client.put_metric_data(
Namespace="parallelcluster/benchmarking/test_scaling_speed/{region}/{instance}/{os}/{scheduler}".format(
region=region, instance=instance, os=os, scheduler=scheduler
),
MetricData=[
{
"MetricName": "ComputeNodesCount",
"Dimensions": [{"Name": "state", "Value": state}],
"Value": count,
"Unit": "Count",
}
],
)
def produce_benchmark_metrics_report(
benchmark_params, region, cluster_name, start_time, end_time, scaling_target, request
):
title = ", ".join("{0}={1}".format(key, val) for (key, val) in benchmark_params.items())
graph_start_time = _to_datetime(start_time) - datetime.timedelta(minutes=2)
graph_end_time = _to_datetime(end_time) + datetime.timedelta(minutes=2)
scaling_target = scaling_target
widget_metric = METRIC_WIDGET_TEMPLATE.format(
cluster_name=cluster_name,
start_time=start_time,
end_time=end_time,
title=title,
graph_start_time=graph_start_time,
graph_end_time=graph_end_time,
scaling_target=scaling_target,
)
logging.info(widget_metric)
cw_client = boto3.client("cloudwatch", region_name=region)
response = cw_client.get_metric_widget_image(MetricWidget=widget_metric)
_write_results_to_outdir(request, response["MetricWidgetImage"])
def _to_datetime(timestamp):
return datetime.datetime.strptime(timestamp, "%Y-%m-%dT%H:%M:%S.%f%z")
def _write_results_to_outdir(request, image_bytes):
out_dir = request.config.getoption("output_dir")
os.makedirs("{out_dir}/benchmarks".format(out_dir=out_dir), exist_ok=True)
graph_dst = "{out_dir}/benchmarks/{test_name}.png".format(
out_dir=out_dir, test_name=request.node.nodeid.replace("::", "-")
)
with open(graph_dst, "wb") as image:
image.write(image_bytes)
| 39.541237
| 119
| 0.626124
|
4a1221d1880b3c7718a059fe2b4c4cc207001c83
| 13,225
|
py
|
Python
|
openquake.hazardlib/openquake/hazardlib/tests/acceptance/peer_test.py
|
rainzhop/ConvNetQuake
|
a3e6de3f7992eac72f1b9883fec36b8c7fdefd48
|
[
"MIT"
] | null | null | null |
openquake.hazardlib/openquake/hazardlib/tests/acceptance/peer_test.py
|
rainzhop/ConvNetQuake
|
a3e6de3f7992eac72f1b9883fec36b8c7fdefd48
|
[
"MIT"
] | null | null | null |
openquake.hazardlib/openquake/hazardlib/tests/acceptance/peer_test.py
|
rainzhop/ConvNetQuake
|
a3e6de3f7992eac72f1b9883fec36b8c7fdefd48
|
[
"MIT"
] | null | null | null |
# The Hazard Library
# Copyright (C) 2012-2016 GEM Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Tests are based on report "PEER 2010/106 - Verification of Probabilistic
Seismic Hazard Analysis Computer Programs" by Patricia Thomas, Ivan Wong,
Norman Abrahamson, see
`http://peer.berkeley.edu/publications/peer_reports/reports_2010/web_PEER_10106_THOMASetal.pdf`_.
"""
import unittest
from decimal import Decimal
import numpy
from openquake.hazardlib import const
from openquake.hazardlib.site import SiteCollection
from openquake.hazardlib.source import AreaSource, SimpleFaultSource, \
NonParametricSeismicSource, Rupture
from openquake.hazardlib.pmf import PMF
from openquake.hazardlib.geo import NodalPlane, RectangularMesh, \
SimpleFaultSurface, Point
from openquake.hazardlib.scalerel import PeerMSR, PointMSR
from openquake.hazardlib.gsim.sadigh_1997 import SadighEtAl1997
from openquake.hazardlib.calc.hazard_curve import calc_hazard_curves
from openquake.hazardlib.tom import PoissonTOM
from openquake.hazardlib.tests.acceptance import _peer_test_data as test_data
def assert_hazard_curve_is(testcase, actual, expected, atol, rtol):
actual, expected = numpy.array(actual), numpy.array(expected)
testcase.assertTrue(numpy.allclose(actual, expected, atol=atol, rtol=rtol),
"%s != %s" % (actual, expected))
class Set1TestCase(unittest.TestCase):
def test_case_10(self):
hypocenter_pmf = PMF([(1, test_data.SET1_CASE10_HYPOCENTER_DEPTH)])
sources = [AreaSource(
source_id='area', name='area',
tectonic_region_type=const.TRT.ACTIVE_SHALLOW_CRUST,
mfd=test_data.SET1_CASE10_MFD,
nodal_plane_distribution=PMF([(1, NodalPlane(0.0, 90.0, 0.0))]),
hypocenter_distribution=hypocenter_pmf,
upper_seismogenic_depth=0.0,
lower_seismogenic_depth=10.0,
magnitude_scaling_relationship=PointMSR(),
rupture_aspect_ratio=test_data.SET1_RUPTURE_ASPECT_RATIO,
temporal_occurrence_model=PoissonTOM(1.),
polygon=test_data.SET1_CASE10_SOURCE_POLYGON,
area_discretization=10.0,
rupture_mesh_spacing=10.0
)]
sites = SiteCollection([
test_data.SET1_CASE10_SITE1, test_data.SET1_CASE10_SITE2,
test_data.SET1_CASE10_SITE3, test_data.SET1_CASE10_SITE4
])
gsims = {const.TRT.ACTIVE_SHALLOW_CRUST: SadighEtAl1997()}
truncation_level = 0
imts = {str(test_data.IMT): test_data.SET1_CASE10_IMLS}
curves = calc_hazard_curves(
sources, sites, imts, gsims, truncation_level)
s1hc, s2hc, s3hc, s4hc = curves[str(test_data.IMT)]
assert_hazard_curve_is(self, s1hc, test_data.SET1_CASE10_SITE1_POES,
atol=1e-4, rtol=1e-1)
assert_hazard_curve_is(self, s2hc, test_data.SET1_CASE10_SITE2_POES,
atol=1e-4, rtol=1e-1)
assert_hazard_curve_is(self, s3hc, test_data.SET1_CASE10_SITE3_POES,
atol=1e-4, rtol=1e-1)
assert_hazard_curve_is(self, s4hc, test_data.SET1_CASE10_SITE4_POES,
atol=1e-4, rtol=1e-1)
def test_case_11(self):
hypocenter_probability = (
Decimal(1) / len(test_data.SET1_CASE11_HYPOCENTERS)
)
hypocenter_pmf = PMF([
(hypocenter_probability, hypocenter)
for hypocenter in test_data.SET1_CASE11_HYPOCENTERS
])
# apart from hypocenter pmf repeats case 10
sources = [AreaSource(
source_id='area', name='area',
tectonic_region_type=const.TRT.ACTIVE_SHALLOW_CRUST,
mfd=test_data.SET1_CASE11_MFD,
nodal_plane_distribution=PMF([(1, NodalPlane(0.0, 90.0, 0.0))]),
hypocenter_distribution=hypocenter_pmf,
upper_seismogenic_depth=0.0,
lower_seismogenic_depth=10.0,
magnitude_scaling_relationship=PointMSR(),
rupture_aspect_ratio=test_data.SET1_RUPTURE_ASPECT_RATIO,
temporal_occurrence_model=PoissonTOM(1.),
polygon=test_data.SET1_CASE11_SOURCE_POLYGON,
area_discretization=10.0,
rupture_mesh_spacing=10.0
)]
sites = SiteCollection([
test_data.SET1_CASE11_SITE1, test_data.SET1_CASE11_SITE2,
test_data.SET1_CASE11_SITE3, test_data.SET1_CASE11_SITE4
])
gsims = {const.TRT.ACTIVE_SHALLOW_CRUST: SadighEtAl1997()}
truncation_level = 0
imts = {str(test_data.IMT): test_data.SET1_CASE11_IMLS}
curves = calc_hazard_curves(
sources, sites, imts, gsims, truncation_level)
s1hc, s2hc, s3hc, s4hc = curves[str(test_data.IMT)]
assert_hazard_curve_is(self, s1hc, test_data.SET1_CASE11_SITE1_POES,
atol=1e-4, rtol=1e-1)
assert_hazard_curve_is(self, s2hc, test_data.SET1_CASE11_SITE2_POES,
atol=1e-4, rtol=1e-1)
assert_hazard_curve_is(self, s3hc, test_data.SET1_CASE11_SITE3_POES,
atol=1e-4, rtol=1e-1)
assert_hazard_curve_is(self, s4hc, test_data.SET1_CASE11_SITE4_POES,
atol=1e-4, rtol=1e-1)
def test_case_2(self):
sources = [SimpleFaultSource(
source_id='fault1', name='fault1',
tectonic_region_type=const.TRT.ACTIVE_SHALLOW_CRUST,
mfd=test_data.SET1_CASE2_MFD,
rupture_mesh_spacing=1.0,
magnitude_scaling_relationship=PeerMSR(),
rupture_aspect_ratio=test_data.SET1_RUPTURE_ASPECT_RATIO,
temporal_occurrence_model=PoissonTOM(1.),
upper_seismogenic_depth=test_data.SET1_CASE1TO9_UPPER_SEISMOGENIC_DEPTH,
lower_seismogenic_depth=test_data.SET1_CASE1TO9_LOWER_SEISMOGENIC_DEPTH,
fault_trace=test_data.SET1_CASE1TO9_FAULT_TRACE,
dip=test_data.SET1_CASE1TO9_DIP,
rake=test_data.SET1_CASE1TO9_RAKE
)]
sites = SiteCollection([
test_data.SET1_CASE1TO9_SITE1, test_data.SET1_CASE1TO9_SITE2,
test_data.SET1_CASE1TO9_SITE3, test_data.SET1_CASE1TO9_SITE4,
test_data.SET1_CASE1TO9_SITE5, test_data.SET1_CASE1TO9_SITE6,
test_data.SET1_CASE1TO9_SITE7
])
gsims = {const.TRT.ACTIVE_SHALLOW_CRUST: SadighEtAl1997()}
truncation_level = 0
imts = {str(test_data.IMT): test_data.SET1_CASE2_IMLS}
curves = calc_hazard_curves(
sources, sites, imts, gsims, truncation_level)
s1hc, s2hc, s3hc, s4hc, s5hc, s6hc, s7hc = curves[str(test_data.IMT)]
assert_hazard_curve_is(self, s1hc, test_data.SET1_CASE2_SITE1_POES,
atol=3e-3, rtol=1e-5)
assert_hazard_curve_is(self, s2hc, test_data.SET1_CASE2_SITE2_POES,
atol=2e-5, rtol=1e-5)
assert_hazard_curve_is(self, s3hc, test_data.SET1_CASE2_SITE3_POES,
atol=2e-5, rtol=1e-5)
assert_hazard_curve_is(self, s4hc, test_data.SET1_CASE2_SITE4_POES,
atol=1e-3, rtol=1e-5)
assert_hazard_curve_is(self, s5hc, test_data.SET1_CASE2_SITE5_POES,
atol=1e-3, rtol=1e-5)
assert_hazard_curve_is(self, s6hc, test_data.SET1_CASE2_SITE6_POES,
atol=1e-3, rtol=1e-5)
assert_hazard_curve_is(self, s7hc, test_data.SET1_CASE2_SITE7_POES,
atol=2e-5, rtol=1e-5)
def test_case_5(self):
# only mfd differs from case 2
sources = [SimpleFaultSource(source_id='fault1', name='fault1',
tectonic_region_type=const.TRT.ACTIVE_SHALLOW_CRUST,
mfd=test_data.SET1_CASE5_MFD,
rupture_mesh_spacing=1.0,
magnitude_scaling_relationship=PeerMSR(),
rupture_aspect_ratio=test_data.SET1_RUPTURE_ASPECT_RATIO,
temporal_occurrence_model=PoissonTOM(1.),
upper_seismogenic_depth=test_data.SET1_CASE1TO9_UPPER_SEISMOGENIC_DEPTH,
lower_seismogenic_depth=test_data.SET1_CASE1TO9_LOWER_SEISMOGENIC_DEPTH,
fault_trace=test_data.SET1_CASE1TO9_FAULT_TRACE,
dip=test_data.SET1_CASE1TO9_DIP,
rake=test_data.SET1_CASE1TO9_RAKE
)]
sites = SiteCollection([
test_data.SET1_CASE1TO9_SITE1, test_data.SET1_CASE1TO9_SITE2,
test_data.SET1_CASE1TO9_SITE3, test_data.SET1_CASE1TO9_SITE4,
test_data.SET1_CASE1TO9_SITE5, test_data.SET1_CASE1TO9_SITE6,
test_data.SET1_CASE1TO9_SITE7
])
gsims = {const.TRT.ACTIVE_SHALLOW_CRUST: SadighEtAl1997()}
truncation_level = 0
imts = {str(test_data.IMT): test_data.SET1_CASE5_IMLS}
curves = calc_hazard_curves(
sources, sites, imts, gsims, truncation_level)
s1hc, s2hc, s3hc, s4hc, s5hc, s6hc, s7hc = curves[str(test_data.IMT)]
assert_hazard_curve_is(self, s1hc, test_data.SET1_CASE5_SITE1_POES,
atol=1e-3, rtol=1e-5)
assert_hazard_curve_is(self, s2hc, test_data.SET1_CASE5_SITE2_POES,
atol=1e-3, rtol=1e-5)
assert_hazard_curve_is(self, s3hc, test_data.SET1_CASE5_SITE3_POES,
atol=1e-3, rtol=1e-5)
assert_hazard_curve_is(self, s4hc, test_data.SET1_CASE5_SITE4_POES,
atol=1e-3, rtol=1e-5)
assert_hazard_curve_is(self, s5hc, test_data.SET1_CASE5_SITE5_POES,
atol=1e-3, rtol=1e-5)
assert_hazard_curve_is(self, s6hc, test_data.SET1_CASE5_SITE6_POES,
atol=1e-3, rtol=1e-5)
assert_hazard_curve_is(self, s7hc, test_data.SET1_CASE5_SITE7_POES,
atol=1e-3, rtol=1e-5)
def test_non_parametric_source(self):
# non-parametric source equivalent to case 2 simple fault source
data = test_data.SET1_CASE2_SOURCE_DATA
ruptures = []
for i in range(data['num_rups_dip']):
for j in range(data['num_rups_strike']):
lons = data['lons']
lats = data['lats'][j]
depths = data['depths'][i]
mesh = RectangularMesh(lons, lats, depths)
surf = SimpleFaultSurface(mesh)
hypo = Point(
data['hypo_lons'][i, j],
data['hypo_lats'][i, j],
data['hypo_depths'][i, j]
)
rup = Rupture(data['mag'], data['rake'],
data['tectonic_region_type'], hypo, surf,
data['source_typology'])
ruptures.append((rup, data['pmf']))
npss = NonParametricSeismicSource(
'id', 'name', data['tectonic_region_type'], ruptures
)
sites = SiteCollection([
test_data.SET1_CASE1TO9_SITE1, test_data.SET1_CASE1TO9_SITE2,
test_data.SET1_CASE1TO9_SITE3, test_data.SET1_CASE1TO9_SITE4,
test_data.SET1_CASE1TO9_SITE5, test_data.SET1_CASE1TO9_SITE6,
test_data.SET1_CASE1TO9_SITE7
])
gsims = {const.TRT.ACTIVE_SHALLOW_CRUST: SadighEtAl1997()}
truncation_level = 0
imts = {str(test_data.IMT): test_data.SET1_CASE2_IMLS}
curves = calc_hazard_curves([npss], sites, imts, gsims,
truncation_level)
s1hc, s2hc, s3hc, s4hc, s5hc, s6hc, s7hc = curves[str(test_data.IMT)]
assert_hazard_curve_is(self, s1hc, test_data.SET1_CASE2_SITE1_POES,
atol=3e-3, rtol=1e-5)
assert_hazard_curve_is(self, s2hc, test_data.SET1_CASE2_SITE2_POES,
atol=2e-5, rtol=1e-5)
assert_hazard_curve_is(self, s3hc, test_data.SET1_CASE2_SITE3_POES,
atol=2e-5, rtol=1e-5)
assert_hazard_curve_is(self, s4hc, test_data.SET1_CASE2_SITE4_POES,
atol=1e-3, rtol=1e-5)
assert_hazard_curve_is(self, s5hc, test_data.SET1_CASE2_SITE5_POES,
atol=1e-3, rtol=1e-5)
assert_hazard_curve_is(self, s6hc, test_data.SET1_CASE2_SITE6_POES,
atol=1e-3, rtol=1e-5)
assert_hazard_curve_is(self, s7hc, test_data.SET1_CASE2_SITE7_POES,
atol=2e-5, rtol=1e-5)
| 48.800738
| 97
| 0.644083
|
4a12223b6b6acb85c5fda2392783a963497b7090
| 21,439
|
py
|
Python
|
NYUSH_solution_no_violation_hpc.py
|
AlisonYao/Stnet-Star-Shuttle-Opt
|
9ac6235c7460ba87f0b4b8f07a0d5d06ae8e992a
|
[
"Apache-2.0"
] | null | null | null |
NYUSH_solution_no_violation_hpc.py
|
AlisonYao/Stnet-Star-Shuttle-Opt
|
9ac6235c7460ba87f0b4b8f07a0d5d06ae8e992a
|
[
"Apache-2.0"
] | null | null | null |
NYUSH_solution_no_violation_hpc.py
|
AlisonYao/Stnet-Star-Shuttle-Opt
|
9ac6235c7460ba87f0b4b8f07a0d5d06ae8e992a
|
[
"Apache-2.0"
] | null | null | null |
"""
Author: Yuhan Yao (yy2564@nyu.edu)
Date: Feb 5, 2022
I am having trouble getting a feasible solution. I have done more than 60 experiments, but still have not obtain a solution that doesn't violate any constraint.
Therefore, I made this version so that it only stops when there is a feasible solution.
WARINING: it's possible that it will never end...
So I dumped this into a hpc and keeps it running there. We will see what will happen.
"""
import random
import numpy as np
import time
import matplotlib.pyplot as plt
from datetime import datetime
def generate_random_N_paths(N, path_length):
'''
Randomize N paths where each path is like 00 01 00 01 01 01
'''
one_solution = []
while len(one_solution) < N:
one_path_single_digit = random.choices(population=[0, 1], weights=[1-initial_prob, initial_prob], k=path_length)
one_path_double_digit = ''
for i in one_path_single_digit:
if i == 0:
one_path_double_digit += '00'
elif i == 1:
one_path_double_digit += random.choices(population=['10', '01'], weights=[1-pusan_prob, pusan_prob])[0]
if check_path_integrity(one_path_double_digit):
one_solution.append(one_path_double_digit)
return one_solution
def check_solution_integrity(solution):
for one_path_double_digit in solution:
if not check_path_integrity(one_path_double_digit):
return False
return True
def check_path_integrity(one_path_double_digit):
last_visited = None
for i in range(len(one_path_double_digit)):
if i % 2 == 0:
two_digits = one_path_double_digit[i:i+2]
if two_digits != '00':
# first time going to AB
if last_visited is None:
last_visited = 'AB'
# following times
elif last_visited == 'JQJY':
if two_digits == '01':
return False
else: # '10'
last_visited = 'AB'
elif last_visited == 'PS':
if two_digits == '10':
return False
else: # '01'
last_visited = 'AB'
else:
if two_digits == '10':
last_visited = 'JQJY'
else: # '01'
last_visited = 'PS'
return True
def decode_one_path(one_path_double_digit):
decoded, initial_node, last_visited = [], None, None
for i in range(len(one_path_double_digit)):
if i % 2 == 0:
two_digits = one_path_double_digit[i:i+2]
if two_digits == '00':
if last_visited is None:
decoded.append([0, 0, 0, 0, 0, 0, 0])
elif last_visited == 'JQJY':
decoded.append([1, 0, 0, 0, 0, 0, 0])
elif last_visited == 'AB':
decoded.append([0, 0, 0, 1, 0, 0, 0])
else: # PS
decoded.append([0, 0, 0, 0, 0, 0, 1])
elif two_digits == '10':
if last_visited is None:
initial_node = 0
last_visited = 'AB'
decoded.append([0, 1, 0, 0, 0, 0, 0])
elif last_visited == 'AB':
last_visited = 'JQJY'
decoded.append([0, 0, 1, 0, 0, 0, 0])
elif last_visited == 'JQJY':
last_visited = 'AB'
decoded.append([0, 1, 0, 0, 0, 0, 0])
else:
print('SOMETHING IS WRONG1!!!')
elif two_digits == '01':
if last_visited is None:
initial_node = -1
last_visited = 'AB'
decoded.append([0, 0, 0, 0, 0, 1, 0])
elif last_visited == 'AB':
last_visited = 'PS'
decoded.append([0, 0, 0, 0, 1, 0, 0])
elif last_visited == 'PS':
last_visited = 'AB'
decoded.append([0, 0, 0, 0, 0, 1, 0])
else:
print('SOMETHING IS WRONG2!!!')
decoded = np.array(decoded).T
decoded_sum = decoded.sum(axis=0)
if sum(decoded_sum) == 0:
if random.random() <= pusan_prob:
decoded[0, :] = 0
else:
decoded[0, :] = 1
return decoded
k = 0
while decoded_sum[k] == 0:
decoded[initial_node, k] = 1
k += 1
return decoded
def demand_constraint(binary_N_paths, tolerance):
'''
make sure the demand is met
'''
directional_N_paths = [decode_one_path(one_path) for one_path in binary_N_paths]
link = sum(directional_N_paths)
link_JQJY = link[:4, :]
link_PS = link[-1:2:-1, :]
JQJY_supply_demand_difference = np.greater_equal(demand_JQJY - tolerance, link_JQJY[1:3, :] * D)
JQJY_mask = (demand_JQJY - tolerance) - (link_JQJY[1:3, :] * D)
PS_supply_demand_difference = np.greater_equal(demand_PS - tolerance, link_PS[1:3, :] * D)
PS_mask = (demand_PS - tolerance) - (link_PS[1:3, :] * D)
missedDemandNumJQJY = np.sum(JQJY_supply_demand_difference * JQJY_mask)
missedDemandNumPS = np.sum(PS_supply_demand_difference * PS_mask)
return int(missedDemandNumJQJY + missedDemandNumPS) == 0, int(missedDemandNumJQJY + missedDemandNumPS)
def rush_hour_constraint(binary_N_paths):
'''
during rush hours, one interval is not enough time to commute
'''
violationCount = 0
for one_path_double_digit in binary_N_paths:
one_path_single_digit_list = []
one_path_double_digit_list = list(one_path_double_digit)
for i in range(len(one_path_double_digit_list)):
if i % 2 == 0:
one_path_single_digit_list.append(int(one_path_double_digit_list[i]) + int(one_path_double_digit_list[i+1]))
# morning rush hour
if one_path_single_digit_list[1] + one_path_single_digit_list[2] == 2:
violationCount += 1
# evening rush hour
if one_path_single_digit_list[21] + one_path_single_digit_list[22] == 2:
violationCount += 1
return int(violationCount) == 0, int(violationCount)
def max_working_hour_constraint(binary_N_paths):
'''
make sure that no driver works more than a few hours continuously
'''
violationCount = 0
for one_path_double_digit in binary_N_paths:
one_path_single_digit_list = []
one_path_double_digit_list = list(one_path_double_digit)
for i in range(len(one_path_double_digit_list)):
if i % 2 == 0:
one_path_single_digit_list.append(int(one_path_double_digit_list[i]) + int(one_path_double_digit_list[i+1]))
num, num_list = 0, []
one_path_copy = one_path_single_digit_list.copy()
# first check if rush hour 10 actually is 11.
if checkRushHourFlag:
if one_path_copy[1] == 1 and one_path_copy[2] == 0:
one_path_copy[2] = 1
if one_path_copy[21] == 1 and one_path_copy[22] == 0:
one_path_copy[22] = 1
for i, node in enumerate(one_path_copy):
num += node
if i+1 == len(one_path_copy):
num_list.append(num)
continue
if node == 1 and one_path_copy[i+1] == 0:
num_list.append(num)
num = 0
violationCount += sum(np.array(num_list) > maxWorkingHour / intervalDuration)
return int(violationCount) == 0, int(violationCount)
def check_feasibility(binary_N_paths, checkDemand=True, checkRushHour=False, checkMaxWorkingHour=False):
'''
s.t. constraints (make sure initial paths & crossover paths & mutated paths are feasible)
constraint1: meet demand
constraint2: during rush hours, one interval is not enough time to commute (optional)
constraint3: make sure that no driver works more than a few hours continuously
'''
demandFlag, rushHour, maxWorkingHour = True, True, True
if checkDemand:
demandFlag, demandViolationNum = demand_constraint(binary_N_paths, tolerance)
if checkRushHour:
rushHour, rushHourViolationNum = rush_hour_constraint(binary_N_paths)
if checkMaxWorkingHour:
maxWorkingHour, maxWorkingHourViolationNum = max_working_hour_constraint(binary_N_paths)
if not demandFlag:
f.write('d' + str(demandViolationNum))
if not rushHour:
f.write('r' + str(rushHourViolationNum))
if not maxWorkingHour:
f.write('w' + str(maxWorkingHourViolationNum))
f.write('\n')
return demandFlag and rushHour and maxWorkingHour
def fitness(binary_N_paths, addPenalty=False):
"""
objective function ish -> natural selection to pick the good ones
the lower the better!!
"""
total_cost = 0
# operation costs according to the path cost function
for one_path_double_digit in binary_N_paths:
one_path_single_digit_list = []
one_path_double_digit_list = list(one_path_double_digit)
for i in range(len(one_path_double_digit_list)):
if i % 2 == 0:
one_path_single_digit_list.append(int(one_path_double_digit_list[i]) + int(one_path_double_digit_list[i+1]))
one_path_single_digit_np = np.array(one_path_single_digit_list)
target_indices = np.where(one_path_single_digit_np == 1)[0]
if len(target_indices) == 0:
duration_interval_num = 0 # bus did not operate at all
else:
duration_interval_num = int(target_indices[-1] - target_indices[0] + 1)
duration = duration_interval_num * intervalDuration
if duration_interval_num == 0:
total_cost += 0
elif duration_interval_num == 1:
total_cost += 75
elif duration <= 6:
if target_indices[0] == 0:
assert target_indices[-1] <= 11
total_cost += 110
elif target_indices[0] >= 12:
total_cost += 150
else:
total_cost += 153.2 * np.log10(6) + 7.22 * 6
else:
total_cost += 153.2 * np.log10(duration) + 7.22 * duration
# add penalty
if addPenalty:
demandFlag, demandViolationNum = demand_constraint(binary_N_paths, tolerance)
rushHour, rushHourViolatonNum = rush_hour_constraint(binary_N_paths)
maxWorkingHour, maxWorkingHourViolationNum = max_working_hour_constraint(binary_N_paths)
if checkDemandFlag:
total_cost += alpha * demandViolationNum * demandViolationPenalty
if checkRushHourFlag:
total_cost += rushHourViolatonNum * rushHourViolationPenalty
if maxWorkingHourViolationPenalty:
total_cost += maxWorkingHourViolationNum * maxWorkingHourViolationPenalty
return total_cost
def generate_population(population_size):
population, fitness_scores_add_penalty = [], []
for _ in range(population_size):
binary_N_paths = generate_random_N_paths(N, intervalNum)
population.append(binary_N_paths)
fitness_score_add_penalty = fitness(binary_N_paths, addPenalty=True)
fitness_scores_add_penalty.append(fitness_score_add_penalty)
return np.array(population), np.array(fitness_scores_add_penalty)
def elitism(population, fitness_scores, elitism_cutoff=2):
elite_indices = np.argpartition(np.array(fitness_scores), elitism_cutoff)[:elitism_cutoff]
return population[elite_indices, :]
def create_next_generation(population, population_fitnesses_add_penalty, population_size, elitism_cutoff):
"""
Randomly pick the good ones and cross them over
"""
children = []
while True:
parents = random.choices(
population=population,
weights=[(max(population_fitnesses_add_penalty) - score + 1)/(max(population_fitnesses_add_penalty) * len(population_fitnesses_add_penalty) - sum(population_fitnesses_add_penalty) + len(population_fitnesses_add_penalty)) for score in population_fitnesses_add_penalty],
k=2
)
kid1, kid2 = single_point_crossover(parents[0], parents[1])
kid1 = single_mutation(kid1)
children.append(kid1)
if len(children) == population_size - elitism_cutoff:
return np.array(children)
kid2 = single_mutation(kid2)
children.append(kid2)
if len(children) == population_size - elitism_cutoff:
return np.array(children)
def single_point_crossover(parent1, parent2):
"""
Randomly pick the good ones and cross them over
"""
assert parent1.size == parent2.size
length = len(parent1)
if length < 2:
return parent1, parent2
count = 0
while count <= loop_limit:
cut = random.randint(1, length - 1) * 2
kid1 = np.array(list(parent1)[:cut] + list(parent2)[cut:])
kid2 = np.array(list(parent2)[:cut] + list(parent1)[cut:])
if check_solution_integrity(kid1) and check_solution_integrity(kid2):
return kid1, kid2
elif check_solution_integrity(kid1) and not check_solution_integrity(kid2):
return kid1, None
elif not check_solution_integrity(kid1) and check_solution_integrity(kid2):
return None, kid2
count += 1
return parent1, parent2
def single_mutation(binary_N_paths):
"""
Mutate only one node in one path for now
"""
count = 0
binary_N_paths_copy = binary_N_paths.copy()
while count <= loop_limit:
mutate_path = np.random.randint(0, N)
mutate_index = np.random.randint(0, intervalNum) * 2
double_digits_to_mutate = binary_N_paths_copy[mutate_path][mutate_index:mutate_index+2]
pool = ['00', '01', '10']
pool.remove(double_digits_to_mutate)
mutated_double_digits = random.choices(population=pool)[0]
original_string = binary_N_paths_copy[mutate_path]
mutated_string = original_string[:mutate_index] + mutated_double_digits + original_string[mutate_index+2:]
if check_path_integrity(mutated_string):
binary_N_paths_copy[mutate_path] = mutated_string
return binary_N_paths_copy
count += 1
return binary_N_paths
def result_stats(progress_with_penalty, progress):
"""
print important stats & visulize progress_with_penalty
"""
# write to file
f.write('**************************************************************' + '\n')
f.write("Progress_with_penalty of improvement: " + str(progress_with_penalty[0]) + " to " + str(progress_with_penalty[-1]) + '\n')
f.write("Progress of improvement: " + str(progress[0]) + ' to ' + str(progress[-1]) + '\n')
f.write("Improvement Rate of progress: " + str(abs(progress[-1] - progress[0])/progress[0]) + '\n')
f.write('**************************************************************' + '\n')
# show plot
plt.plot(progress_with_penalty, data=progress_with_penalty, label='Fitness Score')
plt.plot(progress, data=progress, label='Operation Cost')
plt.xlabel("Generation")
plt.ylabel("Cost")
plt.legend()
plt.savefig(str(save_name) + '.png')
plt.clf()
def run_evolution(population_size, evolution_depth, elitism_cutoff):
'''
Main function of Genetic Algorithm
'''
tic = time.time()
# first initialize a population
population, population_fitnesses_add_penalty = generate_population(population_size)
initialization_end = time.time()
population_fitnesses = [fitness(binary_N_paths) for binary_N_paths in population]
# keep track of improvement
progress_with_penalty, progress = [], []
allFeasibilityFlag = False
i = 0
ii = 0
startover = False
# start evolving :)
while (allFeasibilityFlag is False) or (ii <= evolution_depth):
progress_with_penalty.append(min(population_fitnesses_add_penalty))
progress.append(min(population_fitnesses))
elitism_begin = time.time()
elites = elitism(population, population_fitnesses_add_penalty, elitism_cutoff)
children = create_next_generation(population, population_fitnesses_add_penalty, population_size, elitism_cutoff)
population = np.concatenate([elites, children])
population_fitnesses_add_penalty = [fitness(binary_N_paths, addPenalty=True) for binary_N_paths in population]
population_fitnesses = [fitness(binary_N_paths) for binary_N_paths in population]
evol_end = time.time()
# check best solution feasibility
minIndex = population_fitnesses_add_penalty.index(min(population_fitnesses_add_penalty))
best_solution = population[minIndex]
allFeasibilityFlag = check_feasibility(best_solution, checkRushHour=checkRushHourFlag, checkMaxWorkingHour=checkMaxWorkingHourFlag)
# best solution
directional_N_paths = [decode_one_path(one_path) for one_path in population[minIndex]]
link = sum(directional_N_paths)
i += 1
if allFeasibilityFlag:
ii += 1
if i % 20 == 0:
f.write('----------------------------- generation ' + str(i+1) + ' Start! -----------------------------\n')
f.write('Min Cost Penalty: ' + str(min(population_fitnesses_add_penalty)) + ' -> ' + str(min(population_fitnesses)) + '\n')
if i - ii >= max_iter_num:
startover = True
break
if startover:
return False
else:
# plot results
result_stats(progress_with_penalty, progress)
# print best solution
minIndex = population_fitnesses_add_penalty.index(min(population_fitnesses_add_penalty))
best_solution = population[minIndex]
f.write('best solution (path):\n' + str(best_solution) + '\n')
# check if all constraints are met (ideally True)
f.write("All constraints met? " + str(check_feasibility(best_solution, checkDemand=checkDemandFlag, checkRushHour=checkRushHourFlag, checkMaxWorkingHour=checkMaxWorkingHourFlag)) + '\n')
directional_N_paths = [decode_one_path(one_path) for one_path in population[minIndex]]
link = sum(directional_N_paths)
f.write('best solution (link): \n' + str(link) + '\n')
f.write('#iteration: ' + str(i) + '\n')
return True
if __name__ == "__main__":
SUCCESS = False
"""initialization for genetic algo"""
# starting from a lower initial_prob will give you fewer 1s,
# then demand constraint is violated,
# but rush hour constraint and max working hour constraint are likely to be satisfied
# starting from a higher initial_prob will give you more 1s,
# then demand constraint is unlikely to be violated,
# but rush hour constraint and max working hour constraint are probably violated.
# So there's the tradeoff
initial_prob = 0.3 # # here I am going to start small
pusan_prob = 0.2
population_size = 20
elitism_cutoff = 2
loop_limit = 100
evolution_depth = 30000
max_iter_num = 20000
"""initialization for buses"""
# # of buses
N = 19
# #seats on each bus
D = 50
tolerance = 0
intervalDuration = 0.5
# numerical example
demand = np.array([
[114,106,132,132,117,83,57,52,13,8,18,13,26,3,13,10,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,14,2,0,7,12,7,9,5,7,7,12,9,32,39,53,35,30,18,60,44,60,53,90,58,78,71,35,55]
])
demand_JQJY = demand
demand_JQJY = demand_JQJY.astype(int)
demand_PS = np.around(demand / 9)
demand_PS = demand_PS.astype(int)
intervalNum = demand.shape[-1]
maxWorkingHour = 4
checkDemandFlag, checkRushHourFlag, checkMaxWorkingHourFlag = True, True, True
alpha, demandViolationPenalty, rushHourViolationPenalty, maxWorkingHourViolationPenalty = 1, 100, 100, 100 # 20, 17, 15
# run main function & save everything to txt and png
while not SUCCESS:
save_name = 'test_results/'+str(evolution_depth)+'_'+str(initial_prob)+'_N'+str(N)+'_'+datetime.now().strftime("%Y-%m-%d_%H-%M-%S")+'_nv'
f = open(save_name + '.txt', 'w')
f.write('initial_prob: ' + str(initial_prob) + '\n')
f.write('pusan_prob: ' + str(pusan_prob) + '\n')
f.write('population_size: ' + str(population_size) + '\n')
f.write('elitism_cutoff: ' + str(elitism_cutoff) + '\n')
f.write('loop_limit: ' + str(loop_limit) + '\n')
f.write('evolution_depth: ' + str(evolution_depth) + '\n')
f.write('max_iter_num: ' + str(max_iter_num) + '\n')
f.write('N: ' + str(N) + '\n')
f.write('D: ' + str(D) + '\n')
f.write('tolerance: ' + str(tolerance) + '\n')
f.write('intervalDuration:' + str(intervalDuration) + '\n')
f.write('demand:' + str(demand) + '\n')
f.write('maxWorkingHour: ' + str(maxWorkingHour) + '\n')
f.write('alpha, demandViolationPenalty, rushHourViolationPenalty, maxWorkingHourViolationPenalty: '+str(alpha)+', '+str(demandViolationPenalty)+', '+str(rushHourViolationPenalty)+', '+str(maxWorkingHourViolationPenalty)+'\n')
start_time = time.time()
SUCCESS = run_evolution(population_size, evolution_depth, elitism_cutoff)
end_time = time.time()
f.write('total run time: ' + str(end_time - start_time) + 's')
f.close()
| 43.753061
| 280
| 0.629833
|
4a122243112af2d5f17197cf8c56d1844a74b986
| 5,314
|
py
|
Python
|
imcsdk/mometa/storage/StorageSasExpander.py
|
vadimkuznetsov/imcsdk
|
ed038ce1dbc8031f99d2dfb3ccee3bf0b48309d8
|
[
"Apache-2.0"
] | null | null | null |
imcsdk/mometa/storage/StorageSasExpander.py
|
vadimkuznetsov/imcsdk
|
ed038ce1dbc8031f99d2dfb3ccee3bf0b48309d8
|
[
"Apache-2.0"
] | null | null | null |
imcsdk/mometa/storage/StorageSasExpander.py
|
vadimkuznetsov/imcsdk
|
ed038ce1dbc8031f99d2dfb3ccee3bf0b48309d8
|
[
"Apache-2.0"
] | 1
|
2019-11-10T18:42:04.000Z
|
2019-11-10T18:42:04.000Z
|
"""This module contains the general information for StorageSasExpander ManagedObject."""
from ...imcmo import ManagedObject
from ...imccoremeta import MoPropertyMeta, MoMeta
from ...imcmeta import VersionMeta
class StorageSasExpanderConsts:
MIXED6_G12_GDRIVE_SUPPORT_DISABLED = "Disabled"
MIXED6_G12_GDRIVE_SUPPORT_ENABLED = "Enabled"
MIXED6_G12_GDRIVE_SUPPORT_N_A = "N/A"
MIXED6_G12_GDRIVE_SUPPORT_PENDING = "Pending"
class StorageSasExpander(ManagedObject):
"""This is StorageSasExpander class."""
consts = StorageSasExpanderConsts()
naming_props = set([u'id'])
mo_meta = {
"classic": MoMeta("StorageSasExpander", "storageSasExpander", "sas-expander-[id]", VersionMeta.Version2013e, "OutputOnly", 0xf, [], ["read-only"], [u'topSystem'], [u'mgmtController'], ["Get"]),
"modular": MoMeta("StorageSasExpander", "storageSasExpander", "sas-expander-[id]", VersionMeta.Version2013e, "InputOutput", 0x1f, [], ["read-only"], [u'equipmentChassis'], [u'faultInst', u'mgmtController', u'storageSasUplink'], ["Get"])
}
prop_meta = {
"classic": {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version2013e, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"current_fw_version": MoPropertyMeta("current_fw_version", "currentFwVersion", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"description": MoPropertyMeta("description", "description", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, 0x2, 0, 255, None, [], []),
"id": MoPropertyMeta("id", "id", "uint", VersionMeta.Version2013e, MoPropertyMeta.NAMING, None, None, None, None, [], ["0-999"]),
"name": MoPropertyMeta("name", "name", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, 0x4, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, 0x8, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
},
"modular": {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version2013e, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"current_fw_version": MoPropertyMeta("current_fw_version", "currentFwVersion", "string", VersionMeta.Version303a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"description": MoPropertyMeta("description", "description", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x2, 0, 255, None, [], []),
"id": MoPropertyMeta("id", "id", "uint", VersionMeta.Version2013e, MoPropertyMeta.NAMING, None, None, None, None, [], ["0-999"]),
"name": MoPropertyMeta("name", "name", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x4, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x8, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
"mixed6_g12_g_drive_support": MoPropertyMeta("mixed6_g12_g_drive_support", "mixed6G12GDriveSupport", "string", VersionMeta.Version303a, MoPropertyMeta.READ_WRITE, 0x10, None, None, None, ["Disabled", "Enabled", "disabled", "enabled"], []),
"sas_address": MoPropertyMeta("sas_address", "sasAddress", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
},
}
prop_map = {
"classic": {
"childAction": "child_action",
"currentFwVersion": "current_fw_version",
"description": "description",
"dn": "dn",
"id": "id",
"name": "name",
"rn": "rn",
"status": "status",
},
"modular": {
"childAction": "child_action",
"currentFwVersion": "current_fw_version",
"description": "description",
"dn": "dn",
"id": "id",
"name": "name",
"rn": "rn",
"status": "status",
"mixed6G12GDriveSupport": "mixed6_g12_g_drive_support",
"sasAddress": "sas_address",
},
}
def __init__(self, parent_mo_or_dn, id, **kwargs):
self._dirty_mask = 0
self.id = id
self.child_action = None
self.current_fw_version = None
self.description = None
self.name = None
self.status = None
self.mixed6_g12_g_drive_support = None
self.sas_address = None
ManagedObject.__init__(self, "StorageSasExpander", parent_mo_or_dn, **kwargs)
| 55.354167
| 252
| 0.627776
|
4a1222db0b241ede92b654b3b73c7214eb33bc82
| 324
|
py
|
Python
|
django_vue_chat/asgi.py
|
borisliu/django-vue-chat
|
40ee07579ac55411965c06761000941e7e36f1dd
|
[
"MIT"
] | 1
|
2019-06-11T20:21:10.000Z
|
2019-06-11T20:21:10.000Z
|
django_vue_chat/asgi.py
|
borisliu/django-vue-chat
|
40ee07579ac55411965c06761000941e7e36f1dd
|
[
"MIT"
] | null | null | null |
django_vue_chat/asgi.py
|
borisliu/django-vue-chat
|
40ee07579ac55411965c06761000941e7e36f1dd
|
[
"MIT"
] | 2
|
2019-02-14T04:25:05.000Z
|
2020-03-10T07:21:20.000Z
|
"""
ASGI entrypoint. Configures Django and then runs the application
defined in the ASGI_APPLICATION setting.
"""
import os
import django
from channels.routing import get_default_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "django_vue_chat.settings")
django.setup()
application = get_default_application()
| 24.923077
| 75
| 0.824074
|
4a1222e2b22afb1bfb4d49dc2703d2199de37bfb
| 3,943
|
py
|
Python
|
nlp_architect/data/cdc_resources/data_types/wiki/wikipedia_pages.py
|
ikuyamada/nlp-architect
|
2769bbf948b2509b4ac7dc287fddf907046bf283
|
[
"Apache-2.0"
] | 1
|
2020-07-18T08:35:52.000Z
|
2020-07-18T08:35:52.000Z
|
nlp_architect/data/cdc_resources/data_types/wiki/wikipedia_pages.py
|
SIVASHANKAR-S/nlp-architect
|
b9d7df0afde39b62b2c23e24211e368b82623abc
|
[
"Apache-2.0"
] | null | null | null |
nlp_architect/data/cdc_resources/data_types/wiki/wikipedia_pages.py
|
SIVASHANKAR-S/nlp-architect
|
b9d7df0afde39b62b2c23e24211e368b82623abc
|
[
"Apache-2.0"
] | 1
|
2020-09-30T17:29:26.000Z
|
2020-09-30T17:29:26.000Z
|
# ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
class WikipediaPages(object):
def __init__(self):
"""
Object represent a set of Wikipedia Pages
"""
self.pages = set()
self.is_empty_norm_phrase = True
def get_pages(self):
return self.pages
def add_page(self, page):
self.pages.add(page)
if page.orig_phrase_norm is not None and page.orig_phrase_norm != "":
self.is_empty_norm_phrase = False
def get_and_set_all_disambiguation(self):
all_disambiguations = []
for page in self.pages:
if page.relations.disambiguation_links_norm is not None:
all_disambiguations.extend(page.relations.disambiguation_links_norm)
if page.relations.disambiguation_links is not None:
all_disambiguations.extend(page.relations.disambiguation_links)
return set(all_disambiguations)
def get_and_set_all_categories(self):
all_categories = []
for page in self.pages:
if page.relations.categories_norm is not None:
all_categories.extend(page.relations.categories_norm)
if page.relations.categories is not None:
all_categories.extend(page.relations.categories)
return set(all_categories)
def get_and_set_all_aliases(self):
all_aliases = []
for page in self.pages:
if page.relations.aliases_norm is not None:
all_aliases.extend(page.relations.aliases_norm)
if page.relations.aliases is not None:
all_aliases.extend(page.relations.aliases)
return set(all_aliases)
def get_and_set_parenthesis(self):
all_parenthesis = []
for page in self.pages:
if page.relations.title_parenthesis_norm is not None:
all_parenthesis.extend(page.relations.title_parenthesis_norm)
if page.relations.title_parenthesis is not None:
all_parenthesis.extend(page.relations.title_parenthesis)
return set(all_parenthesis)
def get_and_set_be_comp(self):
all_be_comp = []
for page in self.pages:
if page.relations.be_comp_norm is not None:
all_be_comp.extend(page.relations.be_comp_norm)
if page.relations.be_comp is not None:
all_be_comp.extend(page.relations.be_comp)
return set(all_be_comp)
def get_and_set_titles(self):
all_titles = []
for page in self.pages:
if page.orig_phrase != "":
all_titles.append(page.orig_phrase)
all_titles.append(page.orig_phrase_norm)
if page.wiki_title != "":
all_titles.append(page.wiki_title)
all_titles.append(page.wiki_title_norm)
return set(all_titles)
def toJson(self):
result_dict = {}
page_list = []
for page in self.pages:
page_list.append(page.toJson())
result_dict["pages"] = page_list
return result_dict
def __str__(self) -> str:
result_str = ""
for page in self.pages:
result_str += str(page) + ", "
return result_str.strip()
| 37.552381
| 84
| 0.62034
|
4a1222ff06136c6f4684620519857cb42d6e4c28
| 23,378
|
py
|
Python
|
intersight/models/iam_session.py
|
sdnit-se/intersight-python
|
551f7685c0f76bb8af60ec83ffb6f9672d49a4ae
|
[
"Apache-2.0"
] | 21
|
2018-03-29T14:20:35.000Z
|
2021-10-13T05:11:41.000Z
|
intersight/models/iam_session.py
|
sdnit-se/intersight-python
|
551f7685c0f76bb8af60ec83ffb6f9672d49a4ae
|
[
"Apache-2.0"
] | 14
|
2018-01-30T15:45:46.000Z
|
2022-02-23T14:23:21.000Z
|
intersight/models/iam_session.py
|
sdnit-se/intersight-python
|
551f7685c0f76bb8af60ec83ffb6f9672d49a4ae
|
[
"Apache-2.0"
] | 18
|
2018-01-03T15:09:56.000Z
|
2021-07-16T02:21:54.000Z
|
# coding: utf-8
"""
Cisco Intersight OpenAPI specification.
The Cisco Intersight OpenAPI specification.
OpenAPI spec version: 1.0.9-1461
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class IamSession(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'account_moid': 'str',
'create_time': 'datetime',
'domain_group_moid': 'str',
'mod_time': 'datetime',
'moid': 'str',
'object_type': 'str',
'owners': 'list[str]',
'shared_scope': 'str',
'tags': 'list[MoTag]',
'version_context': 'MoVersionContext',
'ancestors': 'list[MoBaseMoRef]',
'parent': 'MoBaseMoRef',
'permission_resources': 'list[MoBaseMoRef]',
'account_permissions': 'list[IamAccountPermissions]',
'client_ip_address': 'str',
'expiration': 'datetime',
'idle_time_expiration': 'datetime',
'last_login_client': 'str',
'last_login_time': 'datetime',
'permission': 'IamPermissionRef',
'user': 'IamUserRef'
}
attribute_map = {
'account_moid': 'AccountMoid',
'create_time': 'CreateTime',
'domain_group_moid': 'DomainGroupMoid',
'mod_time': 'ModTime',
'moid': 'Moid',
'object_type': 'ObjectType',
'owners': 'Owners',
'shared_scope': 'SharedScope',
'tags': 'Tags',
'version_context': 'VersionContext',
'ancestors': 'Ancestors',
'parent': 'Parent',
'permission_resources': 'PermissionResources',
'account_permissions': 'AccountPermissions',
'client_ip_address': 'ClientIpAddress',
'expiration': 'Expiration',
'idle_time_expiration': 'IdleTimeExpiration',
'last_login_client': 'LastLoginClient',
'last_login_time': 'LastLoginTime',
'permission': 'Permission',
'user': 'User'
}
def __init__(self, account_moid=None, create_time=None, domain_group_moid=None, mod_time=None, moid=None, object_type=None, owners=None, shared_scope=None, tags=None, version_context=None, ancestors=None, parent=None, permission_resources=None, account_permissions=None, client_ip_address=None, expiration=None, idle_time_expiration=None, last_login_client=None, last_login_time=None, permission=None, user=None):
"""
IamSession - a model defined in Swagger
"""
self._account_moid = None
self._create_time = None
self._domain_group_moid = None
self._mod_time = None
self._moid = None
self._object_type = None
self._owners = None
self._shared_scope = None
self._tags = None
self._version_context = None
self._ancestors = None
self._parent = None
self._permission_resources = None
self._account_permissions = None
self._client_ip_address = None
self._expiration = None
self._idle_time_expiration = None
self._last_login_client = None
self._last_login_time = None
self._permission = None
self._user = None
if account_moid is not None:
self.account_moid = account_moid
if create_time is not None:
self.create_time = create_time
if domain_group_moid is not None:
self.domain_group_moid = domain_group_moid
if mod_time is not None:
self.mod_time = mod_time
if moid is not None:
self.moid = moid
if object_type is not None:
self.object_type = object_type
if owners is not None:
self.owners = owners
if shared_scope is not None:
self.shared_scope = shared_scope
if tags is not None:
self.tags = tags
if version_context is not None:
self.version_context = version_context
if ancestors is not None:
self.ancestors = ancestors
if parent is not None:
self.parent = parent
if permission_resources is not None:
self.permission_resources = permission_resources
if account_permissions is not None:
self.account_permissions = account_permissions
if client_ip_address is not None:
self.client_ip_address = client_ip_address
if expiration is not None:
self.expiration = expiration
if idle_time_expiration is not None:
self.idle_time_expiration = idle_time_expiration
if last_login_client is not None:
self.last_login_client = last_login_client
if last_login_time is not None:
self.last_login_time = last_login_time
if permission is not None:
self.permission = permission
if user is not None:
self.user = user
@property
def account_moid(self):
"""
Gets the account_moid of this IamSession.
The Account ID for this managed object.
:return: The account_moid of this IamSession.
:rtype: str
"""
return self._account_moid
@account_moid.setter
def account_moid(self, account_moid):
"""
Sets the account_moid of this IamSession.
The Account ID for this managed object.
:param account_moid: The account_moid of this IamSession.
:type: str
"""
self._account_moid = account_moid
@property
def create_time(self):
"""
Gets the create_time of this IamSession.
The time when this managed object was created.
:return: The create_time of this IamSession.
:rtype: datetime
"""
return self._create_time
@create_time.setter
def create_time(self, create_time):
"""
Sets the create_time of this IamSession.
The time when this managed object was created.
:param create_time: The create_time of this IamSession.
:type: datetime
"""
self._create_time = create_time
@property
def domain_group_moid(self):
"""
Gets the domain_group_moid of this IamSession.
The DomainGroup ID for this managed object.
:return: The domain_group_moid of this IamSession.
:rtype: str
"""
return self._domain_group_moid
@domain_group_moid.setter
def domain_group_moid(self, domain_group_moid):
"""
Sets the domain_group_moid of this IamSession.
The DomainGroup ID for this managed object.
:param domain_group_moid: The domain_group_moid of this IamSession.
:type: str
"""
self._domain_group_moid = domain_group_moid
@property
def mod_time(self):
"""
Gets the mod_time of this IamSession.
The time when this managed object was last modified.
:return: The mod_time of this IamSession.
:rtype: datetime
"""
return self._mod_time
@mod_time.setter
def mod_time(self, mod_time):
"""
Sets the mod_time of this IamSession.
The time when this managed object was last modified.
:param mod_time: The mod_time of this IamSession.
:type: datetime
"""
self._mod_time = mod_time
@property
def moid(self):
"""
Gets the moid of this IamSession.
The unique identifier of this Managed Object instance.
:return: The moid of this IamSession.
:rtype: str
"""
return self._moid
@moid.setter
def moid(self, moid):
"""
Sets the moid of this IamSession.
The unique identifier of this Managed Object instance.
:param moid: The moid of this IamSession.
:type: str
"""
self._moid = moid
@property
def object_type(self):
"""
Gets the object_type of this IamSession.
The fully-qualified type of this managed object, i.e. the class name. This property is optional. The ObjectType is implied from the URL path. If specified, the value of objectType must match the class name specified in the URL path.
:return: The object_type of this IamSession.
:rtype: str
"""
return self._object_type
@object_type.setter
def object_type(self, object_type):
"""
Sets the object_type of this IamSession.
The fully-qualified type of this managed object, i.e. the class name. This property is optional. The ObjectType is implied from the URL path. If specified, the value of objectType must match the class name specified in the URL path.
:param object_type: The object_type of this IamSession.
:type: str
"""
self._object_type = object_type
@property
def owners(self):
"""
Gets the owners of this IamSession.
The array of owners which represent effective ownership of this object.
:return: The owners of this IamSession.
:rtype: list[str]
"""
return self._owners
@owners.setter
def owners(self, owners):
"""
Sets the owners of this IamSession.
The array of owners which represent effective ownership of this object.
:param owners: The owners of this IamSession.
:type: list[str]
"""
self._owners = owners
@property
def shared_scope(self):
"""
Gets the shared_scope of this IamSession.
Intersight provides pre-built workflows, tasks and policies to end users through global catalogs. Objects that are made available through global catalogs are said to have a 'shared' ownership. Shared objects are either made globally available to all end users or restricted to end users based on their license entitlement. Users can use this property to differentiate the scope (global or a specific license tier) to which a shared MO belongs.
:return: The shared_scope of this IamSession.
:rtype: str
"""
return self._shared_scope
@shared_scope.setter
def shared_scope(self, shared_scope):
"""
Sets the shared_scope of this IamSession.
Intersight provides pre-built workflows, tasks and policies to end users through global catalogs. Objects that are made available through global catalogs are said to have a 'shared' ownership. Shared objects are either made globally available to all end users or restricted to end users based on their license entitlement. Users can use this property to differentiate the scope (global or a specific license tier) to which a shared MO belongs.
:param shared_scope: The shared_scope of this IamSession.
:type: str
"""
self._shared_scope = shared_scope
@property
def tags(self):
"""
Gets the tags of this IamSession.
The array of tags, which allow to add key, value meta-data to managed objects.
:return: The tags of this IamSession.
:rtype: list[MoTag]
"""
return self._tags
@tags.setter
def tags(self, tags):
"""
Sets the tags of this IamSession.
The array of tags, which allow to add key, value meta-data to managed objects.
:param tags: The tags of this IamSession.
:type: list[MoTag]
"""
self._tags = tags
@property
def version_context(self):
"""
Gets the version_context of this IamSession.
The versioning info for this managed object.
:return: The version_context of this IamSession.
:rtype: MoVersionContext
"""
return self._version_context
@version_context.setter
def version_context(self, version_context):
"""
Sets the version_context of this IamSession.
The versioning info for this managed object.
:param version_context: The version_context of this IamSession.
:type: MoVersionContext
"""
self._version_context = version_context
@property
def ancestors(self):
"""
Gets the ancestors of this IamSession.
The array containing the MO references of the ancestors in the object containment hierarchy.
:return: The ancestors of this IamSession.
:rtype: list[MoBaseMoRef]
"""
return self._ancestors
@ancestors.setter
def ancestors(self, ancestors):
"""
Sets the ancestors of this IamSession.
The array containing the MO references of the ancestors in the object containment hierarchy.
:param ancestors: The ancestors of this IamSession.
:type: list[MoBaseMoRef]
"""
self._ancestors = ancestors
@property
def parent(self):
"""
Gets the parent of this IamSession.
The direct ancestor of this managed object in the containment hierarchy.
:return: The parent of this IamSession.
:rtype: MoBaseMoRef
"""
return self._parent
@parent.setter
def parent(self, parent):
"""
Sets the parent of this IamSession.
The direct ancestor of this managed object in the containment hierarchy.
:param parent: The parent of this IamSession.
:type: MoBaseMoRef
"""
self._parent = parent
@property
def permission_resources(self):
"""
Gets the permission_resources of this IamSession.
A slice of all permission resources (organizations) associated with this object. Permission ties resources and its associated roles/privileges. These resources which can be specified in a permission is PermissionResource. Currently only organizations can be specified in permission. All logical and physical resources part of an organization will have organization in PermissionResources field. If DeviceRegistration contains another DeviceRegistration and if parent is in org1 and child is part of org2, then child objects will have PermissionResources as org1 and org2. Parent Objects will have PermissionResources as org1. All profiles/policies created with in an organization will have the organization as PermissionResources.
:return: The permission_resources of this IamSession.
:rtype: list[MoBaseMoRef]
"""
return self._permission_resources
@permission_resources.setter
def permission_resources(self, permission_resources):
"""
Sets the permission_resources of this IamSession.
A slice of all permission resources (organizations) associated with this object. Permission ties resources and its associated roles/privileges. These resources which can be specified in a permission is PermissionResource. Currently only organizations can be specified in permission. All logical and physical resources part of an organization will have organization in PermissionResources field. If DeviceRegistration contains another DeviceRegistration and if parent is in org1 and child is part of org2, then child objects will have PermissionResources as org1 and org2. Parent Objects will have PermissionResources as org1. All profiles/policies created with in an organization will have the organization as PermissionResources.
:param permission_resources: The permission_resources of this IamSession.
:type: list[MoBaseMoRef]
"""
self._permission_resources = permission_resources
@property
def account_permissions(self):
"""
Gets the account_permissions of this IamSession.
The accounts and the permissions within each account which a user can select after authentication. After authentication if user has access to multiple permissions, then user and session object are created in onboarding user account and asked to select one of these permissions.
:return: The account_permissions of this IamSession.
:rtype: list[IamAccountPermissions]
"""
return self._account_permissions
@account_permissions.setter
def account_permissions(self, account_permissions):
"""
Sets the account_permissions of this IamSession.
The accounts and the permissions within each account which a user can select after authentication. After authentication if user has access to multiple permissions, then user and session object are created in onboarding user account and asked to select one of these permissions.
:param account_permissions: The account_permissions of this IamSession.
:type: list[IamAccountPermissions]
"""
self._account_permissions = account_permissions
@property
def client_ip_address(self):
"""
Gets the client_ip_address of this IamSession.
The user agent IP address from which the session is launched.
:return: The client_ip_address of this IamSession.
:rtype: str
"""
return self._client_ip_address
@client_ip_address.setter
def client_ip_address(self, client_ip_address):
"""
Sets the client_ip_address of this IamSession.
The user agent IP address from which the session is launched.
:param client_ip_address: The client_ip_address of this IamSession.
:type: str
"""
self._client_ip_address = client_ip_address
@property
def expiration(self):
"""
Gets the expiration of this IamSession.
Expiration time for the session.
:return: The expiration of this IamSession.
:rtype: datetime
"""
return self._expiration
@expiration.setter
def expiration(self, expiration):
"""
Sets the expiration of this IamSession.
Expiration time for the session.
:param expiration: The expiration of this IamSession.
:type: datetime
"""
self._expiration = expiration
@property
def idle_time_expiration(self):
"""
Gets the idle_time_expiration of this IamSession.
Idle time expiration for the session.
:return: The idle_time_expiration of this IamSession.
:rtype: datetime
"""
return self._idle_time_expiration
@idle_time_expiration.setter
def idle_time_expiration(self, idle_time_expiration):
"""
Sets the idle_time_expiration of this IamSession.
Idle time expiration for the session.
:param idle_time_expiration: The idle_time_expiration of this IamSession.
:type: datetime
"""
self._idle_time_expiration = idle_time_expiration
@property
def last_login_client(self):
"""
Gets the last_login_client of this IamSession.
The client address from which last login is initiated.
:return: The last_login_client of this IamSession.
:rtype: str
"""
return self._last_login_client
@last_login_client.setter
def last_login_client(self, last_login_client):
"""
Sets the last_login_client of this IamSession.
The client address from which last login is initiated.
:param last_login_client: The last_login_client of this IamSession.
:type: str
"""
self._last_login_client = last_login_client
@property
def last_login_time(self):
"""
Gets the last_login_time of this IamSession.
The last login time for user.
:return: The last_login_time of this IamSession.
:rtype: datetime
"""
return self._last_login_time
@last_login_time.setter
def last_login_time(self, last_login_time):
"""
Sets the last_login_time of this IamSession.
The last login time for user.
:param last_login_time: The last_login_time of this IamSession.
:type: datetime
"""
self._last_login_time = last_login_time
@property
def permission(self):
"""
Gets the permission of this IamSession.
Permissions associated with the web session. Permission provides a way to assign roles to a user or user group to perform operations on object hierarchy.
:return: The permission of this IamSession.
:rtype: IamPermissionRef
"""
return self._permission
@permission.setter
def permission(self, permission):
"""
Sets the permission of this IamSession.
Permissions associated with the web session. Permission provides a way to assign roles to a user or user group to perform operations on object hierarchy.
:param permission: The permission of this IamSession.
:type: IamPermissionRef
"""
self._permission = permission
@property
def user(self):
"""
Gets the user of this IamSession.
A collection of references to the [iam.User](mo://iam.User) Managed Object. When this managed object is deleted, the referenced [iam.User](mo://iam.User) MO unsets its reference to this deleted MO.
:return: The user of this IamSession.
:rtype: IamUserRef
"""
return self._user
@user.setter
def user(self, user):
"""
Sets the user of this IamSession.
A collection of references to the [iam.User](mo://iam.User) Managed Object. When this managed object is deleted, the referenced [iam.User](mo://iam.User) MO unsets its reference to this deleted MO.
:param user: The user of this IamSession.
:type: IamUserRef
"""
self._user = user
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, IamSession):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 34.078717
| 738
| 0.645436
|
4a122330dc354757e3c6319dcf86a7a27b7abdde
| 30,216
|
py
|
Python
|
socialsent/evaluate_methods.py
|
olgasilyutina/socialsent
|
8e7895db769aa4386d28301431de1819bee443bd
|
[
"Apache-2.0"
] | null | null | null |
socialsent/evaluate_methods.py
|
olgasilyutina/socialsent
|
8e7895db769aa4386d28301431de1819bee443bd
|
[
"Apache-2.0"
] | null | null | null |
socialsent/evaluate_methods.py
|
olgasilyutina/socialsent
|
8e7895db769aa4386d28301431de1819bee443bd
|
[
"Apache-2.0"
] | null | null | null |
#! /usr/bin/env python
from socialsent import constants
from socialsent import util
from socialsent import polarity_induction_methods
from socialsent import seeds
from socialsent import lexicons
import sys
import random
import numpy as np
import scipy as sp
from . import embedding_transformer
from operator import itemgetter
from socialsent.historical import vocab
from sklearn.metrics import roc_auc_score, average_precision_score, confusion_matrix, f1_score, accuracy_score
from scipy.stats import kendalltau
from socialsent.representations.representation_factory import create_representation
DEFAULT_ARGUMENTS = dict(
# for iterative graph algorithms
similarity_power=1,
arccos=True,
max_iter=50,
epsilon=1e-6,
sym=True,
# for learning embeddings transformation
n_epochs=50,
force_orthogonal=False,
batch_size=100,
cosine=False,
## bootstrap
num_boots=1,
n_procs=1,
)
def evaluate_methods():
"""
Evaluates different methods on standard English.
"""
print("Getting evalution words..")
np.random.seed(0)
lexicon = lexicons.load_lexicon("inquirer", remove_neutral=False)
kuperman = lexicons.load_lexicon("kuperman", remove_neutral=False)
eval_words = set(lexicon.keys())
# load in WordNet lexicon and pad with zeros for missing words
# (since these are implicitly zero for this method)
qwn = lexicons.load_lexicon("qwn-scores")
for word in lexicon:
if not word in qwn:
qwn[word] = 0
positive_seeds, negative_seeds = seeds.hist_seeds()
common_embed = create_representation("GIGA", constants.GOOGLE_EMBEDDINGS,
eval_words.union(positive_seeds).union(negative_seeds))
embed_words = set(common_embed.iw)
eval_words = eval_words.intersection(embed_words)
eval_words = [word for word in eval_words
if not word in positive_seeds
and not word in negative_seeds]
print("Evaluating with ", len(eval_words), "out of", len(lexicon))
# print
# print "WordNet:"
# evaluate(qwn, lexicon, eval_words, tau_lexicon=kuperman)
#
# print "Densifier:"
# polarities = run_method(positive_seeds, negative_seeds,
# common_embed.get_subembed(set(eval_words).union(negative_seeds).union(positive_seeds)),
# method=polarity_induction_methods.bootstrap, score_method=polarity_induction_methods.densify,
# **DEFAULT_ARGUMENTS)
# evaluate(polarities, lexicon, eval_words, tau_lexicon=kuperman)
print("SentProp:")
polarities = run_method(positive_seeds, negative_seeds,
common_embed.get_subembed(set(eval_words).union(negative_seeds).union(positive_seeds)),
method=polarity_induction_methods.label_propagate_probabilistic,
#method=polarity_induction_methods.bootstrap,
beta=0.99, nn=10,
**DEFAULT_ARGUMENTS)
evaluate(polarities, lexicon, eval_words, tau_lexicon=kuperman)
util.write_pickle(polarities, "tmp/gi-cc-walk-pols.pkl")
def hyperparam_eval():
print("Getting evaluation words and embeddings")
lexicon = lexicons.load_lexicon("bingliu", remove_neutral=False)
eval_words = set(lexicon.keys())
positive_seeds, negative_seeds = seeds.hist_seeds()
common_embed = create_representation("GIGA", constants.COMMON_EMBEDDINGS,
eval_words.union(positive_seeds).union(negative_seeds))
common_words = set(common_embed.iw)
eval_words = eval_words.intersection(common_words)
hist_embed = create_representation("SVD", constants.SVD_EMBEDDINGS + "1990")
hist_words = set(hist_embed.iw)
eval_words = eval_words.intersection(hist_words)
eval_words = [word for word in eval_words
if not word in positive_seeds
and not word in negative_seeds]
print("SentProp...")
for nn in [5, 10, 25, 50]:
for beta in [0.8, 0.9, 0.95, 0.99]:
print("Common")
polarities = run_method(positive_seeds, negative_seeds,
common_embed.get_subembed(set(eval_words).union(negative_seeds).union(positive_seeds)),
method=polarity_induction_methods.random_walk,
nn=nn, beta=beta,
**DEFAULT_ARGUMENTS)
evaluate(polarities, lexicon, eval_words)
print("Hist")
polarities = run_method(positive_seeds, negative_seeds,
hist_embed.get_subembed(set(eval_words).union(negative_seeds).union(positive_seeds)),
method=polarity_induction_methods.random_walk,
nn=nn, beta=beta,
**DEFAULT_ARGUMENTS)
evaluate(polarities, lexicon, eval_words)
print("Densify...")
for lr in [0.001, 0.01, 0.1, 0.5]:
for reg in [0.001, 0.01, 0.1, 0.5]:
print("LR : ", lr, "Reg: ", reg)
print("Common")
polarities = run_method(positive_seeds, negative_seeds,
common_embed.get_subembed(set(eval_words).union(negative_seeds).union(positive_seeds)),
method=polarity_induction_methods.densify,
lr=lr, regularization_strength=reg,
**DEFAULT_ARGUMENTS)
evaluate(polarities, lexicon, eval_words, tern=False)
print("Hist")
polarities = run_method(positive_seeds, negative_seeds,
hist_embed.get_subembed(set(eval_words).union(negative_seeds).union(positive_seeds)),
method=polarity_induction_methods.densify,
lr=lr, regularization_strength=reg,
**DEFAULT_ARGUMENTS)
evaluate(polarities, lexicon, eval_words, tern=False)
def evaluate_overlap_methods():
"""
Evaluate different methods on standard English,
but restrict to words that are present in the 1990s portion of historical data.
"""
print("Getting evalution words and embeddings..")
np.random.seed(0)
lexicon = lexicons.load_lexicon("inquirer", remove_neutral=False)
kuperman = lexicons.load_lexicon("kuperman", remove_neutral=False)
eval_words = set(lexicon.keys())
# load in WordNet lexicon and pad with zeros for missing words
# (since these are implicitly zero for this method)
qwn = lexicons.load_lexicon("qwn-scores")
for word in lexicon:
if not word in qwn:
qwn[word] = 0
positive_seeds, negative_seeds = seeds.hist_seeds()
# common_embed = create_representation("GIGA", constants.COMMON_EMBEDDINGS,
# eval_words.union(positive_seeds).union(negative_seeds))
# common_words = set(common_embed.iw)
# eval_words = eval_words.intersection(common_words)
hist_embed = create_representation("SVD", constants.COHA_EMBEDDINGS + "2000")
hist_counts = create_representation("Explicit", constants.COHA_COUNTS + "2000", normalize=False)
hist_words = set(hist_embed.iw)
eval_words = eval_words.intersection(hist_words)
eval_words = [word for word in eval_words
if not word in positive_seeds
and not word in negative_seeds]
hist_counts = hist_counts.get_subembed(set(eval_words).union(positive_seeds).union(negative_seeds),
restrict_context=False)
print("Evaluating with ", len(eval_words), "out of", len(lexicon))
print("PMI")
polarities = run_method(positive_seeds, negative_seeds,
hist_counts,
method=polarity_induction_methods.bootstrap,
score_method=polarity_induction_methods.pmi,
**DEFAULT_ARGUMENTS)
evaluate(polarities, lexicon, eval_words, tau_lexicon=kuperman)
print()
evaluate(qwn, lexicon, eval_words, tau_lexicon=kuperman)
print("SentProp with 1990s Fic embeddings")
polarities = run_method(positive_seeds, negative_seeds,
hist_embed.get_subembed(set(eval_words).union(negative_seeds).union(positive_seeds)),
method=polarity_induction_methods.bootstrap,
score_method=polarity_induction_methods.random_walk,
nn=25, beta=0.9,
**DEFAULT_ARGUMENTS)
evaluate(polarities, lexicon, eval_words, tau_lexicon=kuperman)
print()
print("Densifier with 1990s Fic embeddings")
polarities = run_method(positive_seeds, negative_seeds,
hist_embed.get_subembed(set(eval_words).union(negative_seeds).union(positive_seeds)),
method=polarity_induction_methods.bootstrap,
score_method=polarity_induction_methods.densify,
**DEFAULT_ARGUMENTS)
evaluate(polarities, lexicon, eval_words, tau_lexicon=kuperman)
print()
print("Velikovich with 1990s Fic embeddings")
hist_counts.normalize()
polarities = run_method(positive_seeds, negative_seeds,
hist_counts,
method=polarity_induction_methods.bootstrap,
score_method=polarity_induction_methods.graph_propagate,
T=3,
**DEFAULT_ARGUMENTS)
evaluate(polarities, lexicon, eval_words, tau_lexicon=kuperman)
print()
# print "SentProp with CC"
# polarities = run_method( positive_seeds, negative_seeds,
# common_embed.get_subembed(set(eval_words).union(negative_seeds).union(positive_seeds)),
# method=polarity_induction_methods.bootstrap,
# score_method=polarity_induction_methods.random_walk,
# beta=0.99, nn=10,
# **DEFAULT_ARGUMENTS)
# evaluate(polarities, lexicon, eval_words, tau_lexicon=kuperman)
#
# print "Densifier with CC"
# polarities = run_method( positive_seeds, negative_seeds,
# common_embed.get_subembed(set(eval_words).union(negative_seeds).union(positive_seeds)),
# method=polarity_induction_methods.bootstrap,
# score_method=polarity_induction_methods.densify,
# **DEFAULT_ARGUMENTS)
# evaluate(polarities, lexicon, eval_words, tau_lexicon=kuperman)
def evaluate_adj_methods():
"""
Evaluate different methods on standard English,
but restrict to words that are present in the 1990s portion of historical data.
"""
print("Getting evalution words and embeddings..")
np.random.seed(0)
lexicon = lexicons.load_lexicon("inquirer", remove_neutral=False)
kuperman = lexicons.load_lexicon("kuperman", remove_neutral=False)
eval_words = set(lexicon.keys())
adjs = vocab.pos_words("1990", "ADJ")
# load in WordNet lexicon and pad with zeros for missing words
# (since these are implicitly zero for this method)
qwn = lexicons.load_lexicon("qwn-scores")
for word in lexicon:
if not word in qwn:
qwn[word] = 0
positive_seeds, negative_seeds = seeds.adj_seeds()
common_embed = create_representation("GIGA", constants.COMMON_EMBEDDINGS,
eval_words.union(positive_seeds).union(negative_seeds))
common_words = set(common_embed.iw)
eval_words = eval_words.intersection(common_words)
hist_embed = create_representation("SVD", constants.COHA_EMBEDDINGS + "2000")
hist_counts = create_representation("Explicit", constants.COUNTS + "1990", normalize=False)
hist_words = set(hist_embed.iw)
eval_words = eval_words.intersection(hist_words)
embed_words = [word for word in adjs if word in hist_words and word in common_words]
eval_words = [word for word in eval_words if word in embed_words
and not word in positive_seeds
and not word in negative_seeds]
hist_counts = hist_counts.get_subembed(set(eval_words).union(positive_seeds).union(negative_seeds),
restrict_context=False)
print("Evaluating with ", len(eval_words), "out of", len(lexicon))
print("Embeddings with ", len(embed_words))
print("PMI")
polarities = run_method(positive_seeds, negative_seeds,
hist_counts,
method=polarity_induction_methods.bootstrap,
score_method=polarity_induction_methods.pmi,
boot_size=6,
**DEFAULT_ARGUMENTS)
evaluate(polarities, lexicon, eval_words, tau_lexicon=kuperman)
print()
evaluate(qwn, lexicon, eval_words, tau_lexicon=kuperman)
print("Dist with 1990s Fic embeddings")
polarities = run_method(positive_seeds, negative_seeds,
hist_embed.get_subembed(set(embed_words).union(negative_seeds).union(positive_seeds)),
method=polarity_induction_methods.dist,
**DEFAULT_ARGUMENTS)
evaluate(polarities, lexicon, eval_words, tau_lexicon=kuperman)
print()
print("Densifier with 1990s Fic embeddings")
polarities = run_method(positive_seeds, negative_seeds,
hist_embed.get_subembed(set(embed_words).union(negative_seeds).union(positive_seeds)),
method=polarity_induction_methods.bootstrap,
score_method=polarity_induction_methods.densify,
boot_size=6,
**DEFAULT_ARGUMENTS)
evaluate(polarities, lexicon, eval_words, tau_lexicon=kuperman)
print()
print("SentProp with 1990s Fic embeddings")
polarities = run_method(positive_seeds, negative_seeds,
hist_embed.get_subembed(set(embed_words).union(negative_seeds).union(positive_seeds)),
method=polarity_induction_methods.bootstrap,
nn=25, beta=0.9,
boot_size=6,
**DEFAULT_ARGUMENTS)
evaluate(polarities, lexicon, eval_words, tau_lexicon=kuperman)
print()
print("Velikovich with 1990s Fic embeddings")
hist_counts.normalize()
polarities = run_method(positive_seeds, negative_seeds,
hist_counts,
method=polarity_induction_methods.bootstrap,
score_method=polarity_induction_methods.graph_propagate,
T=3,
boot_size=6,
**DEFAULT_ARGUMENTS)
evaluate(polarities, lexicon, eval_words, tau_lexicon=kuperman)
print()
print("SentProp with CC")
polarities = run_method( positive_seeds, negative_seeds,
common_embed.get_subembed(set(embed_words).union(negative_seeds).union(positive_seeds)),
method=polarity_induction_methods.bootstrap,
score_method=polarity_induction_methods.random_walk,
beta=0.99, nn=10,
boot_size=6,
**DEFAULT_ARGUMENTS)
evaluate(polarities, lexicon, eval_words, tau_lexicon=kuperman)
print("Densifier with CC")
polarities = run_method( positive_seeds, negative_seeds,
common_embed.get_subembed(set(embed_words).union(negative_seeds).union(positive_seeds)),
method=polarity_induction_methods.bootstrap,
score_method=polarity_induction_methods.densify,
boot_size=6,
**DEFAULT_ARGUMENTS)
evaluate(polarities, lexicon, eval_words, tau_lexicon=kuperman)
def evaluate_finance_methods():
np.random.seed(0)
print("Getting evalution words and embeddings..")
gi = lexicons.load_lexicon("inquirer", remove_neutral=False)
lexicon = lexicons.load_lexicon("finance", remove_neutral=True)
### padding in neutrals from GI lexicon
gi_neut = [word for word in gi if gi[word] == 0]
gi_neut = np.random.choice(gi_neut, int( (float(len(gi_neut))/(len(gi)-len(gi_neut)) * len(lexicon))))
for word in gi_neut:
lexicon[word] = 0
positive_seeds, negative_seeds = seeds.finance_seeds()
stock_embed = create_representation("SVD", constants.STOCK_EMBEDDINGS)
stock_counts = create_representation("Explicit", constants.STOCK_COUNTS)
common_embed = create_representation("GIGA", constants.COMMON_EMBEDDINGS, set(lexicon.keys()).union(positive_seeds).union(negative_seeds))
stock_words = set(stock_embed.iw)
common_words = set(common_embed)
eval_words = [word for word in lexicon if word in stock_words and
word in common_words and
not word in positive_seeds and
not word in negative_seeds]
stock_counts = stock_counts.get_subembed(set(eval_words).union(positive_seeds).union(negative_seeds), restrict_context=False)
print("Evaluating with ", len(eval_words), "out of", len(lexicon))
print("Velikovich with 1990s Fic embeddings")
stock_counts.normalize()
polarities = run_method(positive_seeds, negative_seeds,
stock_counts,
method=polarity_induction_methods.bootstrap,
score_method=polarity_induction_methods.graph_propagate,
T=3,
boot_size=6,
**DEFAULT_ARGUMENTS)
evaluate(polarities, lexicon, eval_words, tau_lexicon=None)
print()
print("PMI")
polarities = run_method(positive_seeds, negative_seeds,
stock_counts,
method=polarity_induction_methods.bootstrap,
score_method=polarity_induction_methods.pmi,
**DEFAULT_ARGUMENTS)
evaluate(polarities, lexicon, eval_words)
print()
print("SentProp with stock embeddings")
polarities = run_method(positive_seeds, negative_seeds,
stock_embed.get_subembed(set(eval_words).union(negative_seeds).union(positive_seeds)),
method=polarity_induction_methods.bootstrap,
beta=0.9, nn=25,
**DEFAULT_ARGUMENTS)
evaluate(polarities, lexicon, eval_words)
print("Densifier with stock embeddings")
polarities = run_method(positive_seeds, negative_seeds,
stock_embed.get_subembed(set(eval_words).union(negative_seeds).union(positive_seeds)),
method=polarity_induction_methods.bootstrap,
score_method=polarity_induction_methods.densify,
**DEFAULT_ARGUMENTS)
evaluate(polarities, lexicon, eval_words)
def evaluate_twitter_methods():
np.random.seed(0)
print("Getting evalution words and embeddings..")
gi = lexicons.load_lexicon("inquirer", remove_neutral=False)
lexicon = lexicons.load_lexicon("twitter", remove_neutral=True)
scores = lexicons.load_lexicon("twitter-scores", remove_neutral=True)
sent140 = lexicons.load_lexicon("140-scores", remove_neutral=False)
# padding lexicon with neutral from GI
gi_neut = [word for word in gi if gi[word] == 0]
gi_neut = np.random.choice(gi_neut, int( (float(len(gi_neut))/(len(gi)-len(gi_neut)) * len(lexicon))))
for word in gi_neut:
lexicon[word] = 0
positive_seeds, negative_seeds = seeds.twitter_seeds()
embed = create_representation("GIGA", constants.TWITTER_EMBEDDINGS, set(lexicon.keys()).union(positive_seeds).union(negative_seeds))
print(len((set(positive_seeds).union(negative_seeds)).intersection(embed.iw)))
embed_words = set(embed.iw)
s140_words = set(sent140.keys())
eval_words = [word for word in lexicon if word in s140_words and
not word in positive_seeds
and not word in negative_seeds
and word in embed_words]
print("Evaluating with ", len(eval_words), "out of", len(lexicon))
print("Sentiment 140")
evaluate(sent140, lexicon, eval_words, tau_lexicon=scores)
print()
print("SentProp")
polarities = run_method(positive_seeds, negative_seeds,
embed,
method=polarity_induction_methods.bootstrap,
score_method=polarity_induction_methods.densify,
lr=0.01, regularization_strength=0.5,
**DEFAULT_ARGUMENTS)
util.write_pickle(polarities, "twitter-test.pkl")
evaluate(polarities, lexicon, eval_words, tau_lexicon=scores)
print("SentProp")
polarities = run_method(positive_seeds, negative_seeds,
embed,
method=polarity_induction_methods.bootstrap,
score_method=polarity_induction_methods.random_walk,
beta=0.9, nn=25,
**DEFAULT_ARGUMENTS)
evaluate(polarities, lexicon, eval_words, tau_lexicon=scores)
def run_method(positive_seeds, negative_seeds, embeddings, transform_embeddings=False, post_densify=False,
method=polarity_induction_methods.densify, **kwargs):
if transform_embeddings:
print("Transforming embeddings...")
embeddings = embedding_transformer.apply_embedding_transformation(embeddings, positive_seeds, negative_seeds, n_dim=50)
if post_densify:
polarities = method(embeddings, positive_seeds, negative_seeds, **kwargs)
top_pos = [word for word in
sorted(polarities, key = lambda w : -polarities[w])[:150]]
top_neg = [word for word in
sorted(polarities, key = lambda w : polarities[w])[:150]]
top_pos.extend(positive_seeds)
top_neg.extend(negative_seeds)
return polarity_induction_methods.densify(embeddings, top_pos, top_neg)
positive_seeds = [s for s in positive_seeds if s in embeddings]
negative_seeds = [s for s in negative_seeds if s in embeddings]
return method(embeddings, positive_seeds, negative_seeds, **kwargs)
def print_polarities(polarities, lexicon):
for w, p in sorted(list(polarities.items()), key=itemgetter(1), reverse=True):
print((util.GREEN if lexicon[w] == 1 else util.RED) + \
"{:}: {:0.5f}".format(w, p) + util.ENDC)
def evaluate(polarities, lexicon, eval_words, tau_lexicon=None, tern=True):
acc, auc, avg_prec = binary_metrics(polarities, lexicon, eval_words)
if auc < 0.5:
polarities = {word:-1*polarities[word] for word in polarities}
acc, auc, avg_prec = binary_metrics(polarities, lexicon, eval_words)
print("Binary metrics:")
print("==============")
print("Accuracy with optimal threshold: {:.4f}".format(acc))
print("ROC AUC Score: {:.4f}".format(auc))
print("Average Precision Score: {:.4f}".format(avg_prec))
if not tern:
return
tau, cmn_f1, maj_f1, conf_mat = ternary_metrics(polarities, lexicon, eval_words, tau_lexicon=tau_lexicon)
print("Ternary metrics:")
print("==============")
print("Majority macro F1 baseline {:.4f}".format(maj_f1))
print("Macro F1 with cmn threshold: {:.4f}".format(cmn_f1))
if tau:
print("Kendall Tau {:.4f}".format(tau))
print("Confusion matrix: ")
print(conf_mat)
print("Neg :", float(conf_mat[0,0]) / np.sum(conf_mat[0,:]))
print("Neut :", float(conf_mat[1,1]) / np.sum(conf_mat[1,:]))
print("Pos :", float(conf_mat[2,2]) / np.sum(conf_mat[2,:]))
print()
if tau:
print("Latex table line: {:2.1f} & {:2.1f} & {:.2f}\\\\".format(100*auc, 100*cmn_f1, tau))
else:
print("Latex table line: {:2.1f} & {:2.1f}\\\\".format(100*auc, 100*cmn_f1))
def multiclass_accuracy(polarities, lexicon, eval_words, print_predictions=False, top_perc=None):
print(eval_words[0] + ' is the first word in eval list')
print(type(lexicon))
lexicon_w = [x for x in list(lexicon.keys())]
print(lexicon_w[0] + ' is the first word in lexicon')
lexicon_i = list(lexicon.values())
lexicon = dict(list(zip(lexicon_w, lexicon_i)))
print(type(lexicon))
print(str(len(eval_words)) + ' words to evaluate')
eval_words_new = []
for word in eval_words:
if word in lexicon:
eval_words_new.append(word)
eval_words = eval_words_new
y_prob, y_true = [], []
if top_perc:
polarities = {word:polarities[word] for word in
sorted(eval_words, key = lambda w : abs(polarities[w]-0.5), reverse=True)[:int(top_perc*len(polarities))]}
else:
polarities = {word:polarities[word] for word in eval_words}
for w in polarities:
if polarities[w] > 0:
y_prob.append(1)
elif polarities[w] < 0:
y_prob.append(-1)
else:
y_prob.append(0)
y_true.append(lexicon[w])
print(str(len(y_true)) + ' words from lexicon found')
return accuracy_score(y_true, y_prob)
def binary_metrics(polarities, lexicon, eval_words, print_predictions=False, top_perc=None):
print(eval_words[0] + ' is the first word in eval list')
print(type(lexicon))
lexicon_w = [x for x in list(lexicon.keys())]
print(lexicon_w[0] + ' is the first word in lexicon')
lexicon_i = list(lexicon.values())
lexicon = dict(list(zip(lexicon_w, lexicon_i)))
print(type(lexicon))
print(str(len(eval_words)) + ' words to evaluate')
eval_words_new = []
for word in eval_words:
if word in lexicon:
if lexicon[word] != 0:
eval_words_new.append(word)
eval_words = eval_words_new
y_prob, y_true = [], []
if top_perc:
polarities = {word:polarities[word] for word in
sorted(eval_words, key = lambda w : abs(polarities[w]-0.5), reverse=True)[:int(top_perc*len(polarities))]}
else:
polarities = {word:polarities[word] for word in eval_words}
for w in polarities:
y_prob.append(polarities[w])
if lexicon[w] == 1:
y_true.append(lexicon[w])
else:
y_true.append(1 + lexicon[w])
# y_true.append(1 + lexicon[w] / 2)
n = len(y_true)
ordered_labels = [y_true[i] for i in sorted(list(range(n)), key=lambda i: y_prob[i])]
positive = sum(ordered_labels)
cumsum = np.cumsum(ordered_labels)
best_accuracy = max([(1 + i - cumsum[i] + positive - cumsum[i]) / float(n) for i in range(n)])
print(str(n) + ' words from lexicon found')
return best_accuracy, average_precision_score(y_true, y_prob), roc_auc_score(y_true, y_prob)
def ternary_metrics(polarities, lexicon, eval_words, tau_lexicon=None):
if not tau_lexicon == None:
kendall_words = list(set(eval_words).intersection(tau_lexicon))
y_prob, y_true = [], []
polarities = {word:polarities[word] for word in eval_words}
for w in polarities:
y_prob.append(polarities[w])
y_true.append(lexicon[w])
y_prob = np.array(y_prob)
y_true = np.array(y_true)
y_prob = 2*(y_prob - np.min(y_prob)) / (np.max(y_prob) - np.min(y_prob)) - 1
neg_prop = np.sum(np.array(list(lexicon.values())) == -1) / float(len(lexicon))
pos_prop = np.sum(np.array(list(lexicon.values())) == 1) / float(len(lexicon))
sorted_probs = sorted(y_prob)
neg_thresh = sorted_probs[int(np.round(neg_prop*len(sorted_probs)))]
pos_thresh = sorted_probs[-int(np.round(pos_prop*len(sorted_probs)))]
cmn_labels = [1 if val >= pos_thresh else -1 if val <= neg_thresh else 0 for val in y_prob]
if not tau_lexicon == None:
tau = kendalltau(*list(zip(*[(polarities[word], tau_lexicon[word]) for word in kendall_words])))[0]
else:
tau = None
maj_f1 = f1_score(y_true, np.repeat(sp.stats.mode(y_true)[0][0], len(y_true)), average="macro")
cmn_f1 = f1_score(y_true, cmn_labels, average="macro")
label_func = lambda entry : 1 if entry > pos_thresh else -1 if entry < neg_thresh else 0
conf_mat = confusion_matrix(y_true, [label_func(entry) for entry in y_prob])
return tau, cmn_f1, maj_f1, conf_mat
def optimal_tern_acc(polarities, lexicon, eval_words, threshes=np.arange(0.95, 0.0, -0.01)):
"""
Performs grid search to determine optimal ternary accuracy.
"""
y_prob, y_true = [], []
polarities = {word:polarities[word] for word in eval_words}
for w in polarities:
y_prob.append(polarities[w])
y_true.append(lexicon[w])
y_prob = np.array(y_prob)
y_true = np.array(y_true)
y_prob = 2*(y_prob - np.min(y_prob)) / (np.max(y_prob) - np.min(y_prob)) - 1
f1s = np.zeros((len(threshes)**2,))
for i, pos_thresh in enumerate(threshes):
for k, neg_thresh in enumerate(threshes):
labels = []
for j in range(len(y_prob)):
if y_prob[j] > pos_thresh:
labels.append(1)
elif y_prob[j] < -1*neg_thresh:
labels.append(-1)
else:
labels.append(0)
f1s[i*len(threshes)+k] = f1_score(y_true, labels, average="macro")
print("(Oracle) majority baseline {:.4f}".format(
f1_score(y_true, np.repeat(sp.stats.mode(y_true)[0][0], len(y_true)), average="macro")))
print("Accuracy with optimal threshold: {:.4f}".format(np.max(f1s)))
best_iter = int(np.argmax(f1s))
pos_thresh = threshes[best_iter / len(threshes)]
neg_thresh = -1*threshes[best_iter % len(threshes)]
print("Optimal positive threshold: {:.4f}".format(pos_thresh))
print("Optimal negative threshold: {:.4f}".format(neg_thresh))
print("Confusion matrix: ")
label_func = lambda entry : 1 if entry > pos_thresh else -1 if entry < neg_thresh else 0
conf_mat = confusion_matrix(y_true, [label_func(entry) for entry in y_prob])
print(conf_mat)
print("Neg :", float(conf_mat[0,0]) / np.sum(conf_mat[0,:]))
print("Neut :", float(conf_mat[1,1]) / np.sum(conf_mat[1,:]))
print("Pos :", float(conf_mat[2,2]) / np.sum(conf_mat[2,:]))
if __name__ == '__main__':
random.seed(0)
if sys.argv[1] == "twitter":
evaluate_twitter_methods()
elif sys.argv[1] == "finance":
evaluate_finance_methods()
elif sys.argv[1] == "overlap":
evaluate_overlap_methods()
elif sys.argv[1] == "adj":
evaluate_adj_methods()
elif sys.argv[1] == "hyper":
hyperparam_eval()
else:
evaluate_methods()
| 43.982533
| 142
| 0.65366
|
4a1223b15bf477a8bce670985366586d8e7839b1
| 6,158
|
py
|
Python
|
mak/libs/ircc/ir_grammar/ir_metadata.py
|
motor-dev/Motor
|
98cb099fe1c2d31e455ed868cc2a25eae51e79f0
|
[
"BSD-3-Clause"
] | null | null | null |
mak/libs/ircc/ir_grammar/ir_metadata.py
|
motor-dev/Motor
|
98cb099fe1c2d31e455ed868cc2a25eae51e79f0
|
[
"BSD-3-Clause"
] | null | null | null |
mak/libs/ircc/ir_grammar/ir_metadata.py
|
motor-dev/Motor
|
98cb099fe1c2d31e455ed868cc2a25eae51e79f0
|
[
"BSD-3-Clause"
] | null | null | null |
from ..ir_ast import IrTypeMetadata, IrMetadataDeclaration, IrMetadataString, IrMetadataInteger, IrMetadataLink, IrMetadataNode, IrSpecializedMetadata, IrMetadataNull, IrMetadataFlagList, IrReference
from motor_typing import TYPE_CHECKING
def p_ir_metadata_list_opt(p):
# type: (YaccProduction) -> None
"""
ir-metadata-list-opt : METADATA_NAME METADATA_REF ir-metadata-list-opt
"""
p[0] = [(IrMetadataLink(p[1]), IrMetadataLink(p[2]))] + p[3]
def p_ir_metadata_list_opt_empty(p):
# type: (YaccProduction) -> None
"""
ir-metadata-list-opt :
"""
p[0] = []
def p_ir_metadata(p):
# type: (YaccProduction) -> None
"""
ir-metadata : METADATA_NAME EQUAL ir-metadata-distinct ir-metadata-value
| METADATA_REF EQUAL ir-metadata-distinct ir-metadata-value
"""
p[0] = (IrReference(p[1]), IrMetadataDeclaration(p[4]))
def p_ir_metadata_distinct(p):
# type: (YaccProduction) -> None
"""
ir-metadata-distinct : DISTINCT
| empty
"""
p[0] = None
def p_ir_metadata_value(p):
# type: (YaccProduction) -> None
"""
ir-metadata-value : ir-metadata-string
| ir-metadata-node
| ir-metadata-debug-node
| ir-metadata-ref
"""
p[0] = p[1]
def p_ir_metadata_null(p):
# type: (YaccProduction) -> None
"""
ir-metadata-value : NULL
"""
p[0] = IrMetadataNull()
def p_ir_metadata_ref(p):
# type: (YaccProduction) -> None
"""
ir-metadata-ref : METADATA_REF
"""
p[0] = IrMetadataLink(p[1])
def p_ir_metadata_string(p):
# type: (YaccProduction) -> None
"""
ir-metadata-string : METADATA_MARK LITERAL_STRING
"""
p[0] = IrMetadataString(p[2])
def p_ir_metadata_node(p):
# type: (YaccProduction) -> None
"""
ir-metadata-node : METADATA_MARK LBRACE ir-metadata-param-list RBRACE
"""
p[0] = IrMetadataNode(p[3])
def p_ir_metadata_param_list(p):
# type: (YaccProduction) -> None
"""
ir-metadata-param-list : ir-metadata-param COMMA ir-metadata-param-list
"""
p[0] = [p[1]] + p[3]
def p_ir_metadata_param_list_end(p):
# type: (YaccProduction) -> None
"""
ir-metadata-param-list : ir-metadata-param
| empty
"""
p[0] = [p[1]] if p[1] is not None else []
def p_ir_metadata_param(p):
# type: (YaccProduction) -> None
"""
ir-metadata-param : ir-value
| ir-metadata-value
"""
p[0] = p[1]
def p_lex_disable_keywords(p):
# type: (YaccProduction) -> None
"""
lex-disable-keywords : empty
"""
p.lexer.disable_keywords()
def p_lex_enable_keywords(p):
# type: (YaccProduction) -> None
"""
lex-enable-keywords : empty
"""
p.lexer.enable_keywords()
def p_ir_metadata_debug_node(p):
# type: (YaccProduction) -> None
"""
ir-metadata-debug-node : METADATA_NAME LPAREN lex-disable-keywords LPAREN_MARK ir-metadata-debug-attribute-list-opt RPAREN lex-enable-keywords
"""
p[0] = IrSpecializedMetadata(p[1][1:], p[5])
def p_ir_metadata_debug_attribute_list(p):
# type: (YaccProduction) -> None
"""
ir-metadata-debug-attribute-list-opt : ID_LABEL lex-enable-keywords COLON ir-metadata-debug-attribute lex-disable-keywords COMMA ir-metadata-debug-attribute-list-opt
| ID_LABEL lex-enable-keywords COLON ir-metadata-debug-attribute lex-disable-keywords
"""
if p[4]:
p[0] = [(p[1], p[4])]
else:
p[0] = []
if len(p) > 6:
p[0] += p[7]
def p_ir_metadata_debug_attribute_list_unnamed(p):
# type: (YaccProduction) -> None
"""
ir-metadata-debug-attribute-list-opt : ir-metadata-debug-attribute COMMA ir-metadata-debug-attribute-list-opt
| ir-metadata-debug-attribute
"""
p[0] = [(None, p[1])]
if len(p) > 2:
p[0] += p[3]
def p_ir_metadata_debug_attribute_list_end(p):
# type: (YaccProduction) -> None
"""
ir-metadata-debug-attribute-list-opt : empty
"""
p[0] = []
def p_ir_metadata_debug_attribute(p):
# type: (YaccProduction) -> None
"""
ir-metadata-debug-attribute : ir-metadata-value
| ir-value
| ir-metadata-debug-flag-combination
"""
p[0] = p[1]
def p_ir_metadata_debug_attribute_string(p):
# type: (YaccProduction) -> None
"""
ir-metadata-debug-attribute : LITERAL_STRING
"""
p[0] = IrMetadataString(getattr(p.slice[1], 'parsed_value'))
def p_ir_metadata_debug_attribute_integer(p):
# type: (YaccProduction) -> None
"""
ir-metadata-debug-attribute : LITERAL_DECIMAL
"""
p[0] = IrMetadataInteger(getattr(p.slice[1], 'parsed_value'))
def p_ir_metadata_debug_attribute_bool(p):
# type: (YaccProduction) -> None
"""
ir-metadata-debug-attribute : TRUE
| FALSE
"""
def p_ir_metadata_debug_attribute_none(p):
# type: (YaccProduction) -> None
"""
ir-metadata-debug-attribute : NONE
"""
p[0] = None #IrMetadataInteger(getattr(p.slice[1], 'parsed_value'))
def p_ir_metadata_debug_attribute_error(p):
# type: (YaccProduction) -> None
"""
ir-metadata-debug-attribute : error
"""
p[0] = None
def p_ir_metadata_debug_flag_combination(p):
# type: (YaccProduction) -> None
"""
ir-metadata-debug-flag-combination : ir-metadata-debug-flag PIPE ir-metadata-debug-flag-combination
| ir-metadata-debug-flag
"""
if len(p) > 2:
p[0] = p[3]
p[0].add_flag(p[1])
else:
p[0] = IrMetadataFlagList(p[1])
def p_ir_metadata_debug_flag(p):
# type: (YaccProduction) -> None
"""
ir-metadata-debug-flag : ID_LABEL
"""
p[0] = p[1]
if TYPE_CHECKING:
from ply.yacc import YaccProduction
| 25.983122
| 199
| 0.586229
|
4a12249aceeedaf5dbbff7116e066727dbc59b0c
| 1,041
|
py
|
Python
|
notes/30 - dictionaries/dictionaries_ex6.py
|
Drv4MC/ICS3-Python-Notes
|
3614a5adfb4199bfc67dad8bbeb3bfee45401a5d
|
[
"MIT"
] | 3
|
2022-02-10T19:06:28.000Z
|
2022-03-25T17:55:56.000Z
|
notes/30 - dictionaries/dictionaries_ex6.py
|
Drv4MC/ICS3-Python-Notes
|
3614a5adfb4199bfc67dad8bbeb3bfee45401a5d
|
[
"MIT"
] | null | null | null |
notes/30 - dictionaries/dictionaries_ex6.py
|
Drv4MC/ICS3-Python-Notes
|
3614a5adfb4199bfc67dad8bbeb3bfee45401a5d
|
[
"MIT"
] | 17
|
2020-09-15T16:40:23.000Z
|
2022-03-22T17:52:32.000Z
|
#-----------------------------------------------------------------------------
# Name: Dictionaries Example 6 - Ordering a List (kinda!)
# Purpose: And Example file demoing actors
#
# Author: Mr. Brooks
# Created: 11-Nov-2020
# Updated: 11-Nov-2020
#-----------------------------------------------------------------------------
x = {'a': 2, 'b': 4, 'c': 3, 'd': 1, 'e': 0}
print(x.keys())
print(x.values())
print(x.items())
#https://www.w3schools.com/python/ref_func_sorted.asp
#Dicts are inherently unordered...but you can put them into a list and then order the values
#Works well if you only want to DISPLAY the sorted values
print(sorted(x.items())) #Returns a sorted list...but how to we tell it what to sort by?
#https://docs.python.org/3/library/operator.html#operator.itemgetter
#Use the item getter command
import operator #Remember that imports should normally be at the beginning of your program
print(sorted(x.items(), key=operator.itemgetter(1))) #Tell it to sort by the second element of each tuple
| 43.375
| 105
| 0.602305
|
4a1225d9eb23f5a4d58962bf35d8fbb9d39b0496
| 28,061
|
py
|
Python
|
python/resnetMultiLabel.py
|
cdaube/sharedFunctionalFeatures
|
3b7e8b17973a7fef195626a34bed54517cfd3915
|
[
"MIT"
] | 1
|
2021-11-18T18:13:39.000Z
|
2021-11-18T18:13:39.000Z
|
python/resnetMultiLabel.py
|
cdaube/sharedFunctionalFeatures
|
3b7e8b17973a7fef195626a34bed54517cfd3915
|
[
"MIT"
] | null | null | null |
python/resnetMultiLabel.py
|
cdaube/sharedFunctionalFeatures
|
3b7e8b17973a7fef195626a34bed54517cfd3915
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
from __future__ import absolute_import
import warnings
from keras.layers import merge, Input, Add
from keras.layers import Dense, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D, AveragePooling2D
from keras.layers import BatchNormalization
from keras.models import Model
from keras import backend as K
from keras.utils.layer_utils import convert_all_kernels_in_model
from keras.utils.data_utils import get_file
from keras.applications.imagenet_utils import decode_predictions, preprocess_input
import numpy as np
import keras
add_layer = keras.layers.Add()
#fcOut = 2000
fcOutDefault = 2000
modeArg = 0 #2
def bottleneck(input_tensor, kernel_size, filters, stage, block):
'''The identity_block is the block that has no conv layer at shortcut
# Arguments
input_tensor: input tensor
kernel_size: defualt 3, the kernel size of middle conv layer at main path
filters: list of integers, the nb_filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
'''
nb_filter1, nb_filter2, nb_filter3 = filters
if K.image_dim_ordering() == 'tf':
bn_axis = 3
else:
bn_axis = 1
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = Convolution2D(nb_filter1, 1, 1, name=conv_name_base + '2a')(input_tensor)
x = BatchNormalization(mode=modeArg, axis=bn_axis, name=bn_name_base + '2a')(x)
x = Activation('relu')(x)
x = Convolution2D(nb_filter2, kernel_size, kernel_size,
border_mode='same', name=conv_name_base + '2b')(x)
x = BatchNormalization(mode=modeArg, axis=bn_axis, name=bn_name_base + '2b')(x)
x = Activation('relu')(x)
x = Convolution2D(nb_filter3, 1, 1, name=conv_name_base + '2c')(x)
x = BatchNormalization(mode=modeArg, axis=bn_axis, name=bn_name_base + '2c')(x)
x = merge([x, input_tensor], mode='sum')
x = Activation('relu')(x)
return x
def conv_bottleneck(input_tensor, kernel_size, filters, stage, block, strides=(2, 2)):
'''conv_block is the block that has a conv layer at shortcut
# Arguments
input_tensor: input tensor
kernel_size: defualt 3, the kernel size of middle conv layer at main path
filters: list of integers, the nb_filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
Note that from stage 3, the first conv layer at main path is with subsample=(2,2)
And the shortcut should have subsample=(2,2) as well
'''
nb_filter1, nb_filter2, nb_filter3 = filters
if K.image_dim_ordering() == 'tf':
bn_axis = 3
else:
bn_axis = 1
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = Convolution2D(nb_filter1, 1, 1, subsample=strides,
name=conv_name_base + '2a')(input_tensor)
x = BatchNormalization(mode=modeArg, axis=bn_axis, name=bn_name_base + '2a')(x)
x = Activation('relu')(x)
x = Convolution2D(nb_filter2, kernel_size, kernel_size, border_mode='same',
name=conv_name_base + '2b')(x)
x = BatchNormalization(mode=modeArg, axis=bn_axis, name=bn_name_base + '2b')(x)
x = Activation('relu')(x)
x = Convolution2D(nb_filter3, 1, 1, name=conv_name_base + '2c')(x)
x = BatchNormalization(mode=modeArg, axis=bn_axis, name=bn_name_base + '2c')(x)
shortcut = Convolution2D(nb_filter3, 1, 1, subsample=strides,
name=conv_name_base + '1')(input_tensor)
shortcut = BatchNormalization(mode=modeArg, axis=bn_axis, name=bn_name_base + '1')(shortcut)
x = merge([x, shortcut], mode='sum')
x = Activation('relu')(x)
return x
##########################
def basicblock(input_tensor, kernel_size, filters, stage, block):
'''The identity_block is the block that has no conv layer at shortcut
# Arguments
input_tensor: input tensor
kernel_size: defualt 3, the kernel size of middle conv layer at main path
filters: list of integers, the nb_filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
'''
nb_filter1, nb_filter2 = filters
if K.image_dim_ordering() == 'tf':
bn_axis = 3
else:
bn_axis = 1
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = Convolution2D(nb_filter1, kernel_size, kernel_size, border_mode='same', name=conv_name_base + '2a')(input_tensor)
x = BatchNormalization(mode=modeArg, axis=bn_axis, name=bn_name_base + '2a')(x)
x = Activation('relu')(x)
x = Convolution2D(nb_filter2, kernel_size, kernel_size,
border_mode='same', name=conv_name_base + '2b')(x)
x = BatchNormalization(mode=modeArg, axis=bn_axis, name=bn_name_base + '2b')(x)
x = merge([x, input_tensor], mode='sum')
x = Activation('relu')(x)
return x
def conv_basicblock(input_tensor, kernel_size, filters, stage, block, strides=(2, 2)):
'''conv_block is the block that has a conv layer at shortcut
# Arguments
input_tensor: input tensor
kernel_size: defualt 3, the kernel size of middle conv layer at main path
filters: list of integers, the nb_filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
Note that from stage 3, the first conv layer at main path is with subsample=(2,2)
And the shortcut should have subsample=(2,2) as well
'''
nb_filter1, nb_filter2 = filters
if K.image_dim_ordering() == 'tf':
bn_axis = 3
else:
bn_axis = 1
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = Convolution2D(nb_filter1, kernel_size, kernel_size, border_mode='same', subsample=strides, name=conv_name_base + '2a')(input_tensor)
x = BatchNormalization(mode=modeArg, axis=bn_axis, name=bn_name_base + '2a')(x)
x = Activation('relu')(x)
x = Convolution2D(nb_filter2, kernel_size, kernel_size, border_mode='same',
name=conv_name_base + '2b')(x)
x = BatchNormalization(mode=modeArg, axis=bn_axis, name=bn_name_base + '2b')(x)
shortcut = Convolution2D(nb_filter2, 1, 1, subsample=strides, name=conv_name_base + '1')(input_tensor)
shortcut = BatchNormalization(mode=modeArg, axis=bn_axis, name=bn_name_base + '1')(shortcut)
x = add_layer([x, shortcut])
x = Activation('relu')(x)
return x
###########################
def ResNet50Tian(include_top=True, weights=None,
input_tensor=None, fcOut=fcOutDefault):
'''
# Arguments
include_top: whether to include the 3 fully-connected
layers at the top of the network.
weights: one of `None` (random initialization)
or "imagenet" (pre-training on ImageNet).
input_tensor: optional Keras tensor (i.e. xput of `layers.Input()`)
to use as image input for the model.
# Returns
A Keras model instance.
'''
if weights not in {'imagenet', None}:
raise ValueError('The `weights` argument should be either '
'`None` (random initialization) or `imagenet` '
'(pre-training on ImageNet).')
# Determine proper input shape
if K.image_dim_ordering() == 'th':
if include_top:
input_shape = (3, 224, 224)
else:
input_shape = (3, None, None)
else:
if include_top:
input_shape = (224, 224, 3)
else:
input_shape = (None, None, 3)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
if K.image_dim_ordering() == 'tf':
bn_axis = 3
else:
bn_axis = 1
x = ZeroPadding2D((3, 3))(img_input)
x = Convolution2D(64, 7, 7, subsample=(2, 2), name='conv1')(x)
x = BatchNormalization(mode=modeArg, axis=bn_axis, name='bn_conv1')(x)
x = Activation('relu')(x)
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = conv_bottleneck(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
x = bottleneck(x, 3, [64, 64, 256], stage=2, block='b')
x = bottleneck(x, 3, [64, 64, 256], stage=2, block='c')
x = conv_bottleneck(x, 3, [128, 128, 512], stage=3, block='a')
x = bottleneck(x, 3, [128, 128, 512], stage=3, block='b')
x = bottleneck(x, 3, [128, 128, 512], stage=3, block='c')
x = bottleneck(x, 3, [128, 128, 512], stage=3, block='d')
x = conv_bottleneck(x, 3, [256, 256, 1024], stage=4, block='a')
x = bottleneck(x, 3, [256, 256, 1024], stage=4, block='b')
x = bottleneck(x, 3, [256, 256, 1024], stage=4, block='c')
x = bottleneck(x, 3, [256, 256, 1024], stage=4, block='d')
x = bottleneck(x, 3, [256, 256, 1024], stage=4, block='e')
x = bottleneck(x, 3, [256, 256, 1024], stage=4, block='f')
x = conv_bottleneck(x, 3, [512, 512, 2048], stage=5, block='a')
x = bottleneck(x, 3, [512, 512, 2048], stage=5, block='b')
x = bottleneck(x, 3, [512, 512, 2048], stage=5, block='c')
x = AveragePooling2D((7, 7), name='avg_pool')(x)
if include_top:
x = Flatten()(x)
x = Dense(fcOut, activation='softmax', name='fcOut')(x)
model = Model(img_input, x)
# load weights
return model
########################3
def ResNet34Tian(include_top=True, weights=None,
input_tensor=None, fcOut=fcOutDefault):
'''
# Arguments
include_top: whether to include the 3 fully-connected
layers at the top of the network.
weights: one of `None` (random initialization)
or "imagenet" (pre-training on ImageNet).
input_tensor: optional Keras tensor (i.e. xput of `layers.Input()`)
to use as image input for the model.
# Returns
A Keras model instance.
'''
if weights not in {'imagenet', None}:
raise ValueError('The `weights` argument should be either '
'`None` (random initialization) or `imagenet` '
'(pre-training on ImageNet).')
# Determine proper input shape
if K.image_dim_ordering() == 'th':
if include_top:
input_shape = (3, 224, 224)
else:
input_shape = (3, None, None)
else:
if include_top:
input_shape = (224, 224, 3)
else:
input_shape = (None, None, 3)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
if K.image_dim_ordering() == 'tf':
bn_axis = 3
else:
bn_axis = 1
x = ZeroPadding2D((3, 3))(img_input)
x = Convolution2D(64, 7, 7, subsample=(2, 2), name='conv1')(x)
x = BatchNormalization(mode=modeArg, axis=bn_axis, name='bn_conv1')(x)
x = Activation('relu')(x)
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = conv_basicblock(x, 3, [64, 64], stage=2, block='a', strides=(1, 1))
x = basicblock(x, 3, [64, 64], stage=2, block='b')
x = basicblock(x, 3, [64, 64], stage=2, block='c')
x = conv_basicblock(x, 3, [128, 128], stage=3, block='a')
x = basicblock(x, 3, [128, 128], stage=3, block='b')
x = basicblock(x, 3, [128, 128], stage=3, block='c')
x = basicblock(x, 3, [128, 128], stage=3, block='d')
x = conv_basicblock(x, 3, [256, 256], stage=4, block='a')
x = basicblock(x, 3, [256, 256], stage=4, block='b')
x = basicblock(x, 3, [256, 256], stage=4, block='c')
x = basicblock(x, 3, [256, 256], stage=4, block='d')
x = basicblock(x, 3, [256, 256], stage=4, block='e')
x = basicblock(x, 3, [256, 256], stage=4, block='f')
x = conv_basicblock(x, 3, [512, 512], stage=5, block='a')
x = basicblock(x, 3, [512, 512], stage=5, block='b')
x = basicblock(x, 3, [512, 512], stage=5, block='c')
x = AveragePooling2D((7, 7), name='avg_pool')(x)
if include_top:
x = Flatten()(x)
x = Dense(fcOut, activation='softmax', name='fcOut')(x)
model = Model(img_input, x)
# load weights
return model
##################
def ResNet18Tian(include_top=True, weights=None,
input_tensor=None, fcOut=fcOutDefault):
'''
# Arguments
include_top: whether to include the 3 fully-connected
layers at the top of the network.
weights: one of `None` (random initialization)
or "imagenet" (pre-training on ImageNet).
input_tensor: optional Keras tensor (i.e. xput of `layers.Input()`)
to use as image input for the model.
# Returns
A Keras model instance.
'''
if weights not in {'imagenet', None}:
raise ValueError('The `weights` argument should be either '
'`None` (random initialization) or `imagenet` '
'(pre-training on ImageNet).')
# Determine proper input shape
if K.image_dim_ordering() == 'th':
if include_top:
input_shape = (3, 224, 224)
else:
input_shape = (3, None, None)
else:
if include_top:
input_shape = (224, 224, 3)
else:
input_shape = (None, None, 3)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
if K.image_dim_ordering() == 'tf':
bn_axis = 3
else:
bn_axis = 1
x = ZeroPadding2D((3, 3))(img_input)
x = Convolution2D(64, 7, 7, subsample=(2, 2), name='conv1')(x)
x = BatchNormalization(mode=modeArg, axis=bn_axis, name='bn_conv1')(x)
x = Activation('relu')(x)
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = conv_basicblock(x, 3, [64, 64], stage=2, block='a', strides=(1, 1))
x = basicblock(x, 3, [64, 64], stage=2, block='b')
x = conv_basicblock(x, 3, [128, 128], stage=3, block='a')
x = basicblock(x, 3, [128, 128], stage=3, block='b')
x = conv_basicblock(x, 3, [256, 256], stage=4, block='a')
x = basicblock(x, 3, [256, 256], stage=4, block='b')
x = conv_basicblock(x, 3, [512, 512], stage=5, block='a')
x = basicblock(x, 3, [512, 512], stage=5, block='b')
x = AveragePooling2D((7, 7), name='avg_pool')(x)
if include_top:
x = Flatten()(x)
x = Dense(fcOut, activation='softmax', name='fcOut')(x)
model = Model(img_input, x)
# load weights
return model
##################
def ResNet10Tian(include_top=True, weights=None,
input_tensor=None, fcOut=fcOutDefault):
'''
# Arguments
include_top: whether to include the 3 fully-connected
layers at the top of the network.
weights: one of `None` (random initialization)
or "imagenet" (pre-training on ImageNet).
input_tensor: optional Keras tensor (i.e. xput of `layers.Input()`)
to use as image input for the model.
# Returns
A Keras model instance.
'''
if weights not in {'imagenet', None}:
raise ValueError('The `weights` argument should be either '
'`None` (random initialization) or `imagenet` '
'(pre-training on ImageNet).')
# Determine proper input shape
if K.image_dim_ordering() == 'th':
if include_top:
input_shape = (3, 224, 224)
else:
input_shape = (3, None, None)
else:
if include_top:
input_shape = (224, 224, 3)
else:
input_shape = (None, None, 3)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
if K.image_dim_ordering() == 'tf':
bn_axis = 3
else:
bn_axis = 1
x = ZeroPadding2D((3, 3))(img_input)
x = Convolution2D(64, 7, 7, subsample=(2, 2), name='conv1')(x)
x = BatchNormalization(mode=modeArg, axis=bn_axis, name='bn_conv1')(x)
x = Activation('relu')(x)
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = conv_basicblock(x, 3, [64, 64], stage=2, block='a', strides=(1, 1))
x = conv_basicblock(x, 3, [128, 128], stage=3, block='a')
x = conv_basicblock(x, 3, [256, 256], stage=4, block='a')
x = conv_basicblock(x, 3, [512, 512], stage=5, block='a')
x = AveragePooling2D((7, 7), name='avg_pool')(x)
if include_top:
x = Flatten()(x)
x = Dense(fcOut, activation='softmax', name='fcOut')(x)
model = Model(img_input, x)
# load weights
return model
##################
def ResNet10MultiLabel(include_top=True, weights=None,
input_tensor=None, fcOut=fcOutDefault):
'''
# Arguments
include_top: whether to include the 3 fully-connected
layers at the top of the network.
weights: one of `None` (random initialization)
or "imagenet" (pre-training on ImageNet).
input_tensor: optional Keras tensor (i.e. xput of `layers.Input()`)
to use as image input for the model.
# Returns
A Keras model instance.
'''
if weights not in {'imagenet', None}:
raise ValueError('The `weights` argument should be either '
'`None` (random initialization) or `imagenet` '
'(pre-training on ImageNet).')
# Determine proper input shape
if K.image_dim_ordering() == 'th':
if include_top:
input_shape = (3, 224, 224)
else:
input_shape = (3, None, None)
else:
if include_top:
input_shape = (224, 224, 3)
else:
input_shape = (None, None, 3)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
if K.image_dim_ordering() == 'tf':
bn_axis = 3
else:
bn_axis = 1
x = ZeroPadding2D((3, 3))(img_input)
x = Convolution2D(64, 7, 7, subsample=(2, 2), name='conv1')(x)
x = BatchNormalization(mode=modeArg, axis=bn_axis, name='bn_conv1')(x)
x = Activation('relu')(x)
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = conv_basicblock(x, 3, [64, 64], stage=2, block='a', strides=(1, 1))
x = conv_basicblock(x, 3, [128, 128], stage=3, block='a')
x = conv_basicblock(x, 3, [256, 256], stage=4, block='a')
x = conv_basicblock(x, 3, [512, 512], stage=5, block='a')
x = AveragePooling2D((7, 7), name='avg_pool')(x)
if include_top:
x = Flatten()(x)
#x = Dense(fcOut, activation='sigmoid', name='fcOut')(x)
x = Dense(fcOut, name='fcOut')(x) # regression
model = Model(img_input, x)
# load weights
return model
##################
fcID_Default = 2000
fcVector_Default = 500
fcGender_Default = 2
fcEthn_Default = 2
fcAge_Default = 3
fcEmo_Default = 7
fcAnglex_Default = 5
fcAngley_Default = 5
fcAnglelx_Default = 5
fcAnglely_Default = 5
def ResNet10MultiTask(include_top=True, weights=None, input_tensor=None,
fcID=fcID_Default,fcVector=fcVector_Default,fcGender=fcGender_Default,
fcEthn=fcEthn_Default,fcAge=fcAge_Default,fcEmo=fcEmo_Default,
fcAnglex=fcAnglex_Default,fcAngley=fcAngley_Default,
fcAnglelx=fcAnglelx_Default,fcAnglely=fcAnglely_Default,fcActFun='softmax'):
'''
# Arguments
include_top: whether to include the 3 fully-connected
layers at the top of the network.
weights: one of `None` (random initialization)
or "imagenet" (pre-training on ImageNet).
input_tensor: optional Keras tensor (i.e. xput of `layers.Input()`)
to use as image input for the model.
# Returns
A Keras model instance.
'''
if weights not in {'imagenet', None}:
raise ValueError('The `weights` argument should be either '
'`None` (random initialization) or `imagenet` '
'(pre-training on ImageNet).')
# Determine proper input shape
if K.image_dim_ordering() == 'th':
if include_top:
input_shape = (3, 224, 224)
else:
input_shape = (3, None, None)
else:
if include_top:
input_shape = (224, 224, 3)
else:
input_shape = (None, None, 3)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
if K.image_dim_ordering() == 'tf':
bn_axis = 3
else:
bn_axis = 1
x = ZeroPadding2D((3, 3))(img_input)
x = Convolution2D(64, 7, 7, subsample=(2, 2), name='conv1')(x)
x = BatchNormalization(mode=modeArg, axis=bn_axis, name='bn_conv1')(x)
x = Activation('relu')(x)
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = conv_basicblock(x, 3, [64, 64], stage=2, block='a', strides=(1, 1))
x = conv_basicblock(x, 3, [128, 128], stage=3, block='a')
x = conv_basicblock(x, 3, [256, 256], stage=4, block='a')
x = conv_basicblock(x, 3, [512, 512], stage=5, block='a')
x = AveragePooling2D((7, 7), name='avg_pool')(x)
if include_top:
#x = Flatten()(x)
#x = Dense(fcOut, activation='softmax', name='fcOut')(x)
x = Flatten()(x)
#447_1M_2EA_25_6Sadness_anglex5_angley1_anglelx5_anglely4.png 1785
xID = Dense(fcID, activation=fcActFun, name='fcID')(x)
xVector = Dense(fcVector, activation=fcActFun, name='fcVector')(x)
xGender = Dense(fcGender, activation=fcActFun, name='fcGender')(x)
xEthn = Dense(fcEthn, activation=fcActFun, name='fcEthn')(x) #Ethnicity
xAge = Dense(fcAge, activation=fcActFun, name='fcAge')(x)
xEmo = Dense(fcEmo, activation=fcActFun, name='fcEmo')(x)
xAnglex = Dense(fcAnglex, activation=fcActFun, name='fcAnglex')(x)
xAngley = Dense(fcAngley, activation=fcActFun, name='fcAngley')(x)
xAnglelx = Dense(fcAnglelx, activation=fcActFun, name='fcAnglelx')(x)
xAnglely = Dense(fcAnglely, activation=fcActFun, name='fcAnglely')(x)
#output=np.concatenate((xID,xVector,xGender), axis=1)
model = Model(input=img_input, output=[xID,xVector,xGender,xEthn,xAge,xEmo,xAnglex,xAngley,xAnglelx,xAnglely])
#model = Model(img_input, {'fcID': xID, 'fcVector': xVector, 'fcGender': xGender})
# load weights
return model
def ResNet10DualTask(include_top=True, weights=None, input_tensor=None, extraTask=None,
fcID=fcID_Default,fcVector=fcVector_Default,fcGender=fcGender_Default,
fcEthn=fcEthn_Default,fcAge=fcAge_Default,fcEmo=fcEmo_Default,
fcAnglex=fcAnglex_Default,fcAngley=fcAngley_Default,
fcAnglelx=fcAnglelx_Default,fcAnglely=fcAnglely_Default,fcActFun='softmax'):
'''
# Arguments
include_top: whether to include the 3 fully-connected
layers at the top of the network.
weights: one of `None` (random initialization)
or "imagenet" (pre-training on ImageNet).
input_tensor: optional Keras tensor (i.e. xput of `layers.Input()`)
to use as image input for the model.
# Returns
A Keras model instance.
'''
if weights not in {'imagenet', None}:
raise ValueError('The `weights` argument should be either '
'`None` (random initialization) or `imagenet` '
'(pre-training on ImageNet).')
# Determine proper input shape
if K.image_dim_ordering() == 'th':
if include_top:
input_shape = (3, 224, 224)
else:
input_shape = (3, None, None)
else:
if include_top:
input_shape = (224, 224, 3)
else:
input_shape = (None, None, 3)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
if K.image_dim_ordering() == 'tf':
bn_axis = 3
else:
bn_axis = 1
x = ZeroPadding2D((3, 3))(img_input)
x = Convolution2D(64, 7, 7, subsample=(2, 2), name='conv1')(x)
x = BatchNormalization(mode=modeArg, axis=bn_axis, name='bn_conv1')(x)
x = Activation('relu')(x)
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = conv_basicblock(x, 3, [64, 64], stage=2, block='a', strides=(1, 1))
x = conv_basicblock(x, 3, [128, 128], stage=3, block='a')
x = conv_basicblock(x, 3, [256, 256], stage=4, block='a')
x = conv_basicblock(x, 3, [512, 512], stage=5, block='a')
x = AveragePooling2D((7, 7), name='avg_pool')(x)
if include_top:
x = Flatten()(x)
xID = Dense(fcID, activation=fcActFun, name='fcID')(x)
if extraTask is None:
outPut = [xID]
elif extraTask=='vector':
print('model recognised extra task as vector')
xVector = Dense(fcVector, activation=fcActFun, name='fcVector')(x)
outPut = [xID, xVector]
elif extraTask=='gender':
print('model recognised extra task as gender')
xGender = Dense(fcGender, activation=fcActFun, name='fcGender')(x)
outPut = [xID, xGender]
elif extraTask=='ethnicity':
print('model recognised extra task as ethnicity')
xEthn = Dense(fcEthn, activation=fcActFun, name='fcEthn')(x)
outPut = [xID, xEthn]
elif extraTask=='age':
print('model recognised extra task as age')
xAge = Dense(fcAge, activation=fcActFun, name='fcAge')(x)
outPut = [xID, xAge]
elif extraTask=='emotion':
print('model recognised extra task as emotion')
xEmo = Dense(fcEmo, activation=fcActFun, name='fcEmo')(x)
outPut = [xID, xEmo]
elif extraTask=='anglex':
print('model recognised extra task as angle x')
xAnglex = Dense(fcAnglex, activation=fcActFun, name='fcAnglex')(x)
outPut = [xID, xAnglex]
elif extraTask=='angley':
print('model recognised extra task as angle y')
xAngley = Dense(fcAngley, activation=fcActFun, name='fcAngley')(x)
outPut = [xID, xAngley]
elif extraTask=='anglelx':
print('model recognised extra task as angle l x')
xAnglelx = Dense(fcAnglelx, activation=fcActFun, name='fcAnglelx')(x)
outPut = [xID, xAnglelx]
elif extraTask=='anglely':
print('model recognised extra task as angle y')
xAnglely = Dense(fcAnglely, activation=fcActFun, name='fcAnglely')(x)
outPut = [xID, xAnglely]
model = Model(input=img_input, output=outPut)
return model
| 36.585398
| 140
| 0.606928
|
4a1226f632a9a6a63b4b9963deadb5a7fc554b3a
| 23,426
|
py
|
Python
|
model_raisim/DRMPC/SaTiZ_3D/test.py
|
Stylite-Y/XArm-Simulation
|
654dca390e635b6294a8b5066727d0f4d6736eb1
|
[
"MIT"
] | null | null | null |
model_raisim/DRMPC/SaTiZ_3D/test.py
|
Stylite-Y/XArm-Simulation
|
654dca390e635b6294a8b5066727d0f4d6736eb1
|
[
"MIT"
] | null | null | null |
model_raisim/DRMPC/SaTiZ_3D/test.py
|
Stylite-Y/XArm-Simulation
|
654dca390e635b6294a8b5066727d0f4d6736eb1
|
[
"MIT"
] | null | null | null |
import numpy as np
import sys
import do_mpc
from casadi import * # symbolic library CasADi
import datetime
import raisimpy as raisim
import yaml
import time
from mpl_toolkits.mplot3d import axes3d
def tanh_sig(x):
return 0.5 + 0.5 * np.tanh(1000 * x)
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib import cm
show_animation = True
mpl.rcParams['font.size'] = 18
mpl.rcParams['lines.linewidth'] = 3
mpl.rcParams['axes.grid'] = True
def MPCControl():
TraPoint_x = np.array([-0.2, -0.5, 0.1])
TraPoint_y = np.array([0.0, 0.6, 0.6])
flag = 0
m = 0.4
z_ref = 0.5
v_zref = -6.0
v_xref = -6.0
v_yref = -6.0
dx_ref = 0.6
dy_ref = 0.6
sim_t_step = 0.001
xtra = 0.0
ytra = 0.0
# v_xref = 6
index = 0
g = -9.8
flag = 0
pos_init = np.array([0.0, 0.0, 0.45])
v_init = np.array([5, 0.0, -5])
x0 = np.concatenate([pos_init, v_init])
x0 = x0.reshape(-1, 1)
model_type = 'continuous' # either 'discrete' or 'continuous'
model = do_mpc.model.Model(model_type)
x_b = model.set_variable(var_type='_x', var_name='x_b', shape=(1, 1))
y_b = model.set_variable(var_type='_x', var_name='y_b', shape=(1, 1))
z_b = model.set_variable(var_type='_x', var_name='z_b', shape=(1, 1))
dx_b = model.set_variable(var_type='_x', var_name='dx_b', shape=(1, 1))
dy_b = model.set_variable(var_type='_x', var_name='dy_b', shape=(1, 1))
dz_b = model.set_variable(var_type='_x', var_name='dz_b', shape=(1, 1))
u_x = model.set_variable(var_type='_u', var_name='u_x', shape=(1, 1))
u_y = model.set_variable(var_type='_u', var_name='u_y', shape=(1, 1))
u_z = model.set_variable(var_type='_u', var_name='u_z', shape=(1, 1))
model.set_rhs('x_b', dx_b)
model.set_rhs('y_b', dy_b)
model.set_rhs('z_b', dz_b)
dx_b_next = vertcat(
tanh_sig(z_b - z_ref) * u_x / m,
)
dy_b_next = vertcat(
tanh_sig(z_b - z_ref) * u_y / m,
)
dz_b_next = vertcat(
g + tanh_sig(z_b - z_ref) * u_z / m,
)
model.set_rhs('dx_b', dx_b_next)
model.set_rhs('dy_b', dy_b_next)
model.set_rhs('dz_b', dz_b_next)
model.setup()
mpc = do_mpc.controller.MPC(model)
setup_mpc = {
'n_horizon': 150,
't_step': sim_t_step,
'n_robust': 1,
'store_full_solution': True,
}
mpc.set_param(**setup_mpc)
xq1 = 2000.0
yq2 = 1000.0
zq3 = 1000.0
vxq1 = 2000.0
vyq2 = 1000.0
vzq3 = 2000.0
r1 = 0.001
r2 = 0.001
r3 = 0.0001
lterm = xq1 * (model.x['x_b'] - xtra) ** 2 + yq2 * (model.x['y_b'] - ytra) ** 2 + zq3 * (model.x['z_b'] - z_ref) ** 2 + \
vxq1 * (model.x['dx_b'] - v_xref) ** 2 + vyq2 * (model.x['dy_b'] - v_yref) ** 2 + vzq3 * (model.x['dz_b'] - v_zref) ** 2 + \
r1 * (model.u['u_x']) ** 2 + r2 * (model.u['u_y']) ** 2 + r3 * (model.u['u_z']) ** 2
mterm = xq1 * (model.x['x_b'] - xtra) ** 2 + yq2 * (model.x['y_b'] - ytra) ** 2 + zq3 * (model.x['z_b'] - z_ref) ** 2 + \
vxq1 * (model.x['dx_b'] - v_xref) ** 2 + vyq2 * (model.x['dy_b'] - v_yref) ** 2 + vzq3 * (model.x['dz_b'] - v_zref) ** 2
mpc.set_objective(mterm=mterm, lterm=lterm)
mpc.bounds['lower', '_x', 'x_b'] = -1.5
mpc.bounds['upper', '_x', 'x_b'] = 1.0
mpc.bounds['lower', '_x', 'y_b'] = -0.5
mpc.bounds['upper', '_x', 'y_b'] = 1.5
mpc.bounds['lower', '_x', 'z_b'] = 0.0
mpc.bounds['upper', '_x', 'z_b'] = 1.0
mpc.bounds['lower', '_u', 'u_x'] = -500.0
mpc.bounds['upper', '_u', 'u_x'] = 500.0
mpc.bounds['lower', '_u', 'u_y'] = -500.0
mpc.bounds['upper', '_u', 'u_y'] = 500.0
mpc.bounds['lower', '_u', 'u_z'] = -500.0
mpc.bounds['upper', '_u', 'u_z'] = 0.0
mpc.setup()
simulator = do_mpc.simulator.Simulator(model)
simulator.set_param(t_step=sim_t_step)
simulator.setup()
estimator = do_mpc.estimator.StateFeedback(model)
simulator.x0 = x0
mpc.x0 = x0
estimator.x0 = x0
mpc.set_initial_guess()
mpc.reset_history()
fig, ax, graphics = do_mpc.graphics.default_plot(mpc.data)
plt.ion()
for i in range(20000):
if flag == 0:
if index == 0:
v_xref = dx_ref / z_ref * v_zref
v_yref = 0.0
xtra = TraPoint_x[index] + dx_ref
ytra = 0.0
elif index == 1:
v_xref = - (dx_ref / 3) / z_ref * v_zref
v_yref = - (2 * dy_ref / 3)/ z_ref * v_zref
xtra = TraPoint_x[index] - dx_ref / 3
ytra = TraPoint_y[index] + dy_ref / 3
# break
elif index == 2:
v_xref = - (dx_ref / 3) / z_ref * v_zref
v_yref = (2 * dy_ref / 3) / z_ref * v_zref
xtra = TraPoint_x[index] - dx_ref / 3
ytra = TraPoint_y[index] + (2 * dy_ref) / 3
flag = 1
Force = mpc.make_step(x0)
y_next = simulator.make_step(Force)
x0 = estimator.make_step(y_next)
print("**********************************************************************************************")
print("x0: ", x0)
print("Force: ", Force[0, 0], Force[1, 0], Force[2, 0])
print("xtra, ytra, v_xref, v_yref, v_zref: ", xtra, ytra, v_xref, v_yref, v_zref)
if show_animation:
# mpc_graphics.plot_results(t_ind=k)
# mpc_graphics.plot_predictions(t_ind=k)
# mpc_graphics.reset_axes()
graphics.plot_results(t_ind=i)
graphics.plot_predictions(t_ind=i)
graphics.reset_axes()
plt.show()
plt.pause(0.01)
# graphics.plot_predictions(t_ind=0)
graphics.plot_results()
graphics.reset_axes()
plt.show()
def TriCal():
t = 0.2
pos_init = np.array([0.88, 0.0, 0.833])
v_init = np.array([-0.8, 0.0, 0.075])
pos_tar = np.array([0.4, 0.0, 0.5])
v_tar = np.array([-7.2, 0.0, -6])
b_x = np.array([pos_init[0], v_init[0], pos_tar[0], v_tar[0]])
b_y = np.array([pos_init[1], v_init[1], pos_tar[1], v_tar[1]])
b_z = np.array([pos_init[2], v_init[2], pos_tar[2], v_tar[2]])
A = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [1, t, t ** 2, t ** 3], [0, 1, 2 * t, 3 * t ** 2]])
x_coef = np.linalg.solve(A, b_x)
y_coef = np.linalg.solve(A, b_y)
z_coef = np.linalg.solve(A, b_z)
RefCoef = np.array([[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]]])
Coef = np.array([[x_coef, y_coef, z_coef]])
RefCoef = np.concatenate([RefCoef, Coef], axis = 0)
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
# load some test data for demonstration and plot a wireframe
X, Y, Z = axes3d.get_test_data(0.1)
ax.plot_wireframe(X, Y, Z, rstride=5, cstride=5)
# rotate the axes and update
for angle in range(0, 360):
ax.view_init(30, angle)
plt.draw()
plt.pause(.001)
# t = np.linspace(0.0, 0.2, 50)
# t_test = 0.171
# x = x_coef[0] + x_coef[1] * t + x_coef[2] * t ** 2 + x_coef[3] * t ** 3
# x_test = x_coef[0] + x_coef[1] * t_test + x_coef[2] * t_test ** 2 + x_coef[3] * t_test ** 3
# vx = x_coef[1] + 2 * x_coef[2] * t + 3 * x_coef[3] * t ** 2
# vx_test = x_coef[1] + 2 * x_coef[2] * t_test + 3 * x_coef[3] * t_test ** 2
# ax = x_coef[2] * 2 + x_coef[3] * t * 6
# z = z_coef[0] + z_coef[1] * t + z_coef[2] * t ** 2 + z_coef[3] * t ** 3
# vz = z_coef[1] + 2 * z_coef[2] * t + 3 * z_coef[3] * t ** 2
# az = z_coef[2] * 2 + z_coef[3] * t * 6
# plt.figure(1)
# plt.plot(t, x)
# plt.plot(t, vx)
# # plt.plot(t, ax)
# plt.title('x')
# plt.scatter(-0.2, 0.15, c = 'r')
# plt.figure(2)
# # plt.plot(t, vz)
# plt.plot(t, z)
# # plt.plot(t, az)
# plt.title('z')
# plt.show()
print("x_coef, y_coef, z_coef: ", x_coef, y_coef, z_coef)
print("t_test is: ", Coef, RefCoef)
return x_coef, y_coef, z_coef
if __name__ == "__main__":
# MPCControl()
TriCal()
| 90.1
| 9,407
| 0.184453
|
4a1226fbd3990bfc48162d9a66056be0be4f8cf1
| 1,043
|
py
|
Python
|
python/company/lex_sort.py
|
hs634/algorithms
|
e411d747eb27a1c784605e44111de2703f6dde4d
|
[
"MIT"
] | null | null | null |
python/company/lex_sort.py
|
hs634/algorithms
|
e411d747eb27a1c784605e44111de2703f6dde4d
|
[
"MIT"
] | null | null | null |
python/company/lex_sort.py
|
hs634/algorithms
|
e411d747eb27a1c784605e44111de2703f6dde4d
|
[
"MIT"
] | null | null | null |
__author__ = 'hs634'
class LexSort():
def __init__(self, arr, order):
self.arr = arr
self.order = order
def _custom_cmp(self, a, b):
p1 = 0
p2 = 0
for i in range(min(len(a), len(b))):
if p1 != p2:
break
p1 = self.order.index(a[i])
p2 = self.order.index(b[i])
if p1 == p2 and len(a) != len(b):
return len(a) - len(b)
return p1 - p2
def sort_arr(self):
return sorted(self.arr, cmp=self._custom_cmp)
class TestLexSort():
def test1(self):
l = LexSort(["acb", "abc", "bca"], "abc")
assert l.sort_arr() == ["abc","acb","bca"]
def test2(self):
l = LexSort(["acb", "abc", "bca"], "cba")
assert l.sort_arr() == ["bca", "acb", "abc"]
def test3(self):
l = LexSort(["aaa","aa",""], "a")
assert l.sort_arr() == ["", "aa", "aaa"]
def run_tests(self):
self.test1()
self.test2()
self.test3()
TestLexSort().run_tests()
| 22.673913
| 53
| 0.478428
|
4a122727675c6be850e2755b637f46f65ee8bff7
| 6,519
|
py
|
Python
|
dso/dso/run.py
|
brendenpetersen/deep-symbolic-optimization
|
8724839dab910022e24d03debdf564236683474b
|
[
"BSD-3-Clause"
] | 134
|
2021-07-06T06:14:02.000Z
|
2022-03-31T18:24:08.000Z
|
dso/dso/run.py
|
brendenpetersen/deep-symbolic-optimization
|
8724839dab910022e24d03debdf564236683474b
|
[
"BSD-3-Clause"
] | 15
|
2021-06-10T17:03:09.000Z
|
2022-01-21T20:15:35.000Z
|
dso/dso/run.py
|
brendenpetersen/deep-symbolic-optimization
|
8724839dab910022e24d03debdf564236683474b
|
[
"BSD-3-Clause"
] | 44
|
2021-06-26T19:11:28.000Z
|
2022-03-25T04:07:41.000Z
|
"""Parallelized, single-point launch script to run DSO on a set of benchmarks."""
import os
import sys
import time
import multiprocessing
from copy import deepcopy
from datetime import datetime
import click
from dso import DeepSymbolicOptimizer
from dso.logeval import LogEval
from dso.config import load_config
from dso.utils import safe_update_summary
def train_dso(config):
"""Trains DSO and returns dict of reward, expression, and traversal"""
print("\n== TRAINING SEED {} START ============".format(config["experiment"]["seed"]))
# For some reason, for the control task, the environment needs to be instantiated
# before creating the pool. Otherwise, gym.make() hangs during the pool initializer
if config["task"]["task_type"] == "control" and config["training"]["n_cores_batch"] > 1:
import gym
import dso.task.control # Registers custom and third-party environments
gym.make(config["task"]["env"])
# Train the model
model = DeepSymbolicOptimizer(deepcopy(config))
start = time.time()
result = model.train()
result["t"] = time.time() - start
result.pop("program")
save_path = model.config_experiment["save_path"]
summary_path = os.path.join(save_path, "summary.csv")
print("== TRAINING SEED {} END ==============".format(config["experiment"]["seed"]))
return result, summary_path
def print_summary(config, runs, messages):
text = '\n== EXPERIMENT SETUP START ===========\n'
text += 'Task type : {}\n'.format(config["task"]["task_type"])
if config["task"]["task_type"] == "regression":
text += 'Dataset : {}\n'.format(config["task"]["dataset"])
elif config["task"]["task_type"] == "control":
text += 'Environment : {}\n'.format(config["task"]["env"])
text += 'Starting seed : {}\n'.format(config["experiment"]["seed"])
text += 'Runs : {}\n'.format(runs)
if len(messages) > 0:
text += 'Additional context :\n'
for message in messages:
text += " {}\n".format(message)
text += '== EXPERIMENT SETUP END ============='
print(text)
@click.command()
@click.argument('config_template', default="")
@click.option('--runs', '--r', default=1, type=int, help="Number of independent runs with different seeds")
@click.option('--n_cores_task', '--n', default=1, help="Number of cores to spread out across tasks")
@click.option('--seed', '--s', default=None, type=int, help="Starting seed (overwrites seed in config), incremented for each independent run")
@click.option('--benchmark', '--b', default=None, type=str, help="Name of benchmark")
def main(config_template, runs, n_cores_task, seed, benchmark):
"""Runs DSO in parallel across multiple seeds using multiprocessing."""
messages = []
# Load the experiment config
config_template = config_template if config_template != "" else None
config = load_config(config_template)
# Overwrite named benchmark (for tasks that support them)
task_type = config["task"]["task_type"]
if benchmark is not None:
# For regression, --b overwrites config["task"]["dataset"]
if task_type == "regression":
config["task"]["dataset"] = benchmark
# For control, --b overwrites config["task"]["env"]
elif task_type == "control":
config["task"]["env"] = benchmark
else:
raise ValueError("--b is not supported for task {}.".format(task_type))
# Overwrite config seed, if specified
if seed is not None:
if config["experiment"]["seed"] is not None:
messages.append(
"INFO: Replacing config seed {} with command-line seed {}.".format(
config["experiment"]["seed"], seed))
config["experiment"]["seed"] = seed
# Save starting seed and run command
config["experiment"]["starting_seed"] = config["experiment"]["seed"]
config["experiment"]["cmd"] = " ".join(sys.argv)
# Set timestamp once to be used by all workers
timestamp = datetime.now().strftime("%Y-%m-%d-%H%M%S")
config["experiment"]["timestamp"] = timestamp
# Fix incompatible configurations
if n_cores_task == -1:
n_cores_task = multiprocessing.cpu_count()
if n_cores_task > runs:
messages.append(
"INFO: Setting 'n_cores_task' to {} because there are only {} runs.".format(
runs, runs))
n_cores_task = runs
if config["training"]["verbose"] and n_cores_task > 1:
messages.append(
"INFO: Setting 'verbose' to False for parallelized run.")
config["training"]["verbose"] = False
if config["training"]["n_cores_batch"] != 1 and n_cores_task > 1:
messages.append(
"INFO: Setting 'n_cores_batch' to 1 to avoid nested child processes.")
config["training"]["n_cores_batch"] = 1
# Start training
print_summary(config, runs, messages)
# Generate configs (with incremented seeds) for each run
configs = [deepcopy(config) for _ in range(runs)]
for i, config in enumerate(configs):
config["experiment"]["seed"] += i
# Farm out the work
if n_cores_task > 1:
pool = multiprocessing.Pool(n_cores_task)
for i, (result, summary_path) in enumerate(pool.imap_unordered(train_dso, configs)):
if not safe_update_summary(summary_path, result):
print("Warning: Could not update summary stats at {}".format(summary_path))
print("INFO: Completed run {} of {} in {:.0f} s".format(i + 1, runs, result["t"]))
else:
for i, config in enumerate(configs):
result, summary_path = train_dso(config)
if not safe_update_summary(summary_path, result):
print("Warning: Could not update summary stats at {}".format(summary_path))
print("INFO: Completed run {} of {} in {:.0f} s".format(i + 1, runs, result["t"]))
# Evaluate the log files
print("\n== POST-PROCESS START =================")
log = LogEval(config_path=os.path.dirname(summary_path))
log.analyze_log(
show_count=config["postprocess"]["show_count"],
show_hof=config["training"]["hof"] is not None and config["training"]["hof"] > 0,
show_pf=config["training"]["save_pareto_front"],
save_plots=config["postprocess"]["save_plots"])
print("== POST-PROCESS END ===================")
if __name__ == "__main__":
main()
| 41.522293
| 142
| 0.625863
|
4a1227824ccd4dc72c824ada305a27088714fec8
| 799
|
py
|
Python
|
cron_validator/scheduler.py
|
skyxie/cron-validator
|
a61bb3113aed92b7da960015f81ac78a7187b48f
|
[
"MIT"
] | null | null | null |
cron_validator/scheduler.py
|
skyxie/cron-validator
|
a61bb3113aed92b7da960015f81ac78a7187b48f
|
[
"MIT"
] | null | null | null |
cron_validator/scheduler.py
|
skyxie/cron-validator
|
a61bb3113aed92b7da960015f81ac78a7187b48f
|
[
"MIT"
] | null | null | null |
import datetime
from cron_validator.validator import CronValidator
class CronScheduler(CronValidator):
def __init__(self, expression):
super().__init__()
self.gen = self.get_execution_time(expression, None, None)
self.next_execution_time = next(self.gen)
def time_for_execution(self):
now_rounded = self._round_down_to_nearest_minute(datetime.datetime.now())
is_time_for_execution = False
if now_rounded == self.next_execution_time:
self.next_execution_time = next(self.gen)
is_time_for_execution = True
return is_time_for_execution
@staticmethod
def _round_down_to_nearest_minute(dt):
return dt - datetime.timedelta(minutes=dt.minute % 1, seconds=dt.second, microseconds=dt.microsecond)
| 31.96
| 109
| 0.717146
|
4a12280b74f4f6413e75e47d037197855f20335d
| 3,410
|
py
|
Python
|
ITP1/ITP1_11_B.py
|
yu8ikmnbgt6y/MyAOJ
|
474b21a2a0c25e1c1f3d6d66d2a2ea52aecaa39b
|
[
"Unlicense"
] | 1
|
2020-01-08T16:33:46.000Z
|
2020-01-08T16:33:46.000Z
|
ITP1/ITP1_11_B.py
|
yu8ikmnbgt6y/MyAOJ
|
474b21a2a0c25e1c1f3d6d66d2a2ea52aecaa39b
|
[
"Unlicense"
] | null | null | null |
ITP1/ITP1_11_B.py
|
yu8ikmnbgt6y/MyAOJ
|
474b21a2a0c25e1c1f3d6d66d2a2ea52aecaa39b
|
[
"Unlicense"
] | null | null | null |
import sys
import io
import time
import pprint
input_txt = """
1 2 3 4 5 6
3
6 5
1 3
3 2
"""
sys.stdin = io.StringIO(input_txt);input()
#sys.stdin = open('ITP2_1_C_in9.test')
#sys.stdout = open('out.test','w')
start = time.time()
# copy the below part and paste to the submission form.
# ---------function------------
import sys
class Dice:
__TOP = 0
__FRONT = 1
__RIGHT = 2
__LEFT = 3
__BACK = 4
__BOTTOM = 5
def __init__(self, face_val=None):
self.f_keys = [self.__TOP, # 0
self.__FRONT, # 1
self.__RIGHT, # 2
self.__LEFT, # 3
self.__BACK, # 4
self.__BOTTOM # 5
]
if face_val is None or len(face_val) != 6:
face_val = ['1', '2', '3', '4', '5', '6']
self.f_key_to_val = {}
self.f_val_to_key = {}
for f_key, val in zip(self.f_keys, face_val):
self.f_key_to_val[f_key] = val
self.f_val_to_key[val] = f_key
MOVE_SWAP_FACES = {
'S': [(__BACK, __TOP), (__TOP, __FRONT), (__FRONT, __BOTTOM), (__BOTTOM, __BACK)],
'W': [(__RIGHT, __TOP), (__TOP, __LEFT), (__LEFT, __BOTTOM), (__BOTTOM, __RIGHT)],
'E': [(__LEFT, __TOP), (__TOP, __RIGHT), (__RIGHT, __BOTTOM), (__BOTTOM, __LEFT)],
'N': [(__FRONT, __TOP), (__TOP, __BACK), (__BACK, __BOTTOM), (__BOTTOM, __FRONT)],
'RCW' : [(__LEFT, __FRONT), (__FRONT, __RIGHT), (__RIGHT, __BACK), (__BACK, __LEFT)], # rotate clockwise
'RCCW': [(__RIGHT, __FRONT), (__FRONT, __LEFT), (__LEFT, __BACK), (__BACK, __RIGHT)], # rotate counter clockwise
}
def dice_move(self, direction_list):
for direction in direction_list:
prev_faces = self.f_keys[:]
for prev_f, next_f in self.MOVE_SWAP_FACES[direction]:
self.f_keys[next_f] = prev_faces[prev_f]
#print(self.f_keys)
MOVE_TO_TOP = {__TOP:[], __FRONT:['N'], __RIGHT:['W'], __LEFT:['E'], __BACK:['S'], __BOTTOM:['N','N']}
def dice_fix(self, top, front):
if type(top) == str:
top = self.f_val_to_key[top]
front = self.f_val_to_key[front]
# 上面を指定された面にする
if top != self.f_keys[self.__TOP]:
now_top = self.f_keys.index(top)
move = self.MOVE_TO_TOP[now_top]
self.dice_move(move)
if top != self.f_keys[self.__TOP]:
raise AssertionError # not top
# 前面を指定された面にする
if front != self.f_keys[self.__FRONT]:
for i in range(3):
self.dice_move(['RCW'])
if front == self.f_keys[self.__FRONT]:
break
else:
raise AssertionError # 3回回転するうちに指定された面がfrontに来ないとおかしい
def get_value(self, face):
return self.f_key_to_val[self.f_keys[face]]
def __repr__(self):
return f'({",".join(map(str, self.f_keys))})'
faces = input().split()
nq = int(input())
dice = Dice(faces)
for q in range(nq):
top_val, front_val = input().split()
dice.dice_fix(top=top_val, front=front_val)
print(dice.get_value(Dice._Dice__RIGHT))
# -----------------------------
print("elapsed:", time.time()-start)
sys.stdin = sys.__stdin__
| 31
| 123
| 0.531965
|
4a122833371f0fc65abeee644d4c1e87ea187370
| 11,314
|
py
|
Python
|
tests/test_interface.py
|
alliedtelesis/py-networking
|
6c5d4bdafabfb4feef235a02344432e1f0336e48
|
[
"Apache-2.0"
] | 4
|
2015-04-24T20:36:56.000Z
|
2021-05-03T20:21:54.000Z
|
tests/test_interface.py
|
alliedtelesis/py-networking
|
6c5d4bdafabfb4feef235a02344432e1f0336e48
|
[
"Apache-2.0"
] | 1
|
2019-07-14T07:07:21.000Z
|
2019-07-14T07:07:21.000Z
|
tests/test_interface.py
|
alliedtelesis/py-networking
|
6c5d4bdafabfb4feef235a02344432e1f0336e48
|
[
"Apache-2.0"
] | 3
|
2015-04-24T20:37:04.000Z
|
2017-03-02T15:14:46.000Z
|
# -*- coding: utf-8 -*-
import pytest
from pynetworking.Device import Device
from jinja2 import Template
show_interface_template = Template("""
Interface {{ interface }}
Scope: both
Link is {{ link }}, administrative state is {{ state }}
Thrash-limiting
Status Not Detected, Action learn-disable, Timeout 1(s)
Hardware is {{ hardware }}, address is 0015.77ea.17e5
index 5001 metric 1 mru 1500
{% if hardware == 'Ethernet' -%}
{% if link == 'UP' -%}
current duplex full, current speed 1000, current polarity mdix
{% endif -%}
configured duplex auto, configured speed auto, configured polarity auto
{% endif -%}
<UP,BROADCAST,RUNNING,MULTICAST>
SNMP link-status traps: Disabled
input packets 3082, bytes 327520, dropped 0, multicast packets 466
output packets 656, bytes 176318, multicast packets 252 broadcast packets 4
Time since last state change: 0 days 00:08:18
""")
def setup_dut(dut):
dut.reset()
dut.add_cmd({'cmd': 'show version', 'state': -1, 'action': 'PRINT', 'args': ["""
AlliedWare Plus (TM) 5.4.2 09/25/13 12:57:26
Build name : x600-5.4.2-3.14.rel
Build date : Wed Sep 25 12:57:26 NZST 2013
Build type : RELEASE
"""]})
dut.add_cmd({'cmd': 'show running-config', 'state': 0, 'action': 'PRINT', 'args': ["""
!
interface port1.0.1-1.0.10
description test1
switchport mode access
!
interface port1.0.11-1.0.50
description "this is a test description"
switchport mode trunk
this is an unknown command
!
interface vlan1
description testvlan
!
interface vlan10
description testvlan
!
vlan database
vlan 10 name marketing
vlan 10 state enable
vlan 7 name admin state enable
vlan 8-100 mtu 1200
vlan 6,7 mtu 1000
!
end
"""]})
def test_config(dut, log_level, use_mock):
if dut.mode != 'emulated':
pytest.skip("only on emulated")
setup_dut(dut)
show_interface = ''
max_if = 51
for interface in range(1, max_if):
env = {
'interface': 'port1.0.{0}'.format(interface),
'link': 'UP',
'state': 'DOWN',
'hardware': 'Ethernet',
}
if interface == 10:
env['link'] = 'DOWN'
show_interface += show_interface_template.render(env).encode('ascii', 'ignore')
env = {
'interface': 'lo',
'link': 'UP',
'state': 'UP',
'hardware': 'Loopback',
}
show_interface += show_interface_template.render(env).encode('ascii', 'ignore')
for vlan in [1, 8, 10, 7]:
env = {
'interface': 'vlan{0}'.format(vlan),
'link': 'UP',
'state': 'UP',
'hardware': 'VLAN',
}
show_interface += show_interface_template.render(env).encode('ascii', 'ignore')
dut.add_cmd({'cmd': 'show interface', 'state': 0, 'action': 'PRINT', 'args': [show_interface]})
d = Device(host=dut.host, port=dut.port, protocol=dut.protocol, log_level=log_level, mock=use_mock)
d.open()
assert d.facts['os'] == 'awp'
# configuration check
assert d.interface['1.0.15']['configured duplex'] == 'auto'
assert d.interface['1.0.15']['configured speed'] == 'auto'
assert d.interface['1.0.15']['configured polarity'] == 'auto'
# status check
assert d.interface['1.0.15']['link'] is True
assert d.interface['1.0.15']['current polarity'] == 'mdix'
assert d.interface['1.0.15']['enable'] is False
assert d.interface['1.0.15']['current duplex'] == 'full'
assert d.interface['1.0.15']['current speed'] == '1000'
assert d.interface['1.0.10']['link'] is False
# description check
assert d.interface['1.0.1']['description'] == 'test1'
assert d.interface['1.0.5']['description'] == 'test1'
assert d.interface['1.0.8']['description'] == 'test1'
assert d.interface['1.0.31']['description'] == 'this is a test description'
assert d.interface['1.0.50']['description'] == 'this is a test description'
assert d.interface['vlan1']['description'] == 'testvlan'
assert d.interface['vlan10']['description'] == 'testvlan'
d.close()
def test_enable(dut, log_level, use_mock):
setup_dut(dut)
show_interface = ''
for interface in range(1, 51):
env = {
'interface': 'port1.0.{0}'.format(interface),
'link': 'UP',
'state': 'UP',
'hardware': 'Ethernet',
}
show_interface += show_interface_template.render(env).encode('ascii', 'ignore')
dut.add_cmd({'cmd': 'show interface', 'state': 0, 'action': 'PRINT', 'args': [show_interface]})
dut.add_cmd({'cmd': 'interface port1.0.10', 'state': 0, 'action': 'SET_PROMPT', 'args': ['(config-if)#']})
dut.add_cmd({'cmd': 'interface port1.0.10', 'state': 0, 'action': 'SET_STATE', 'args': [1]})
dut.add_cmd({'cmd': 'shutdown', 'state': 1, 'action': 'SET_STATE', 'args': [2]})
show_interface = ''
for interface in range(1, 51):
env = {
'interface': 'port1.0.{0}'.format(interface),
'link': 'UP',
'state': 'DOWN',
'hardware': 'Ethernet',
}
show_interface += show_interface_template.render(env).encode('ascii', 'ignore')
dut.add_cmd({'cmd': 'show interface', 'state': 2, 'action': 'PRINT', 'args': [show_interface]})
dut.add_cmd({'cmd': 'interface port1.0.10', 'state': 2, 'action': 'SET_PROMPT', 'args': ['(config-if)#']})
dut.add_cmd({'cmd': 'interface port1.0.10', 'state': 2, 'action': 'SET_STATE', 'args': [3]})
dut.add_cmd({'cmd': 'no shutdown', 'state': 3, 'action': 'SET_STATE', 'args': [4]})
show_interface = ''
for interface in range(1, 51):
env = {
'interface': 'port1.0.{0}'.format(interface),
'link': 'UP',
'state': 'UP',
'hardware': 'Ethernet',
}
show_interface += show_interface_template.render(env).encode('ascii', 'ignore')
dut.add_cmd({'cmd': 'show interface', 'state': 4, 'action': 'PRINT', 'args': [show_interface]})
d = Device(host=dut.host, port=dut.port, protocol=dut.protocol, log_level=log_level, mock=use_mock)
d.open()
assert d.interface['1.0.10']['enable'] is True
assert ("1.0.10", {"current polarity": "mdix", "description": "test1", "configured duplex": "auto", "current duplex": "full", "configured speed": "auto",
"enable": True, "configured polarity": "auto", "current speed": "1000", "link": True}) in d.interface.items()
d.interface.update('1.0.10', enable=False)
assert d.interface['1.0.10']['enable'] is False
d.interface.update('1.0.10', enable=True)
assert d.interface['1.0.10']['enable'] is True
d.close()
def test_description(dut, log_level, use_mock):
if dut.mode != 'emulated':
pytest.skip("only on emulated")
setup_dut(dut)
show_interface = ''
for interface in range(1, 11):
env = {
'interface': 'port1.0.{0}'.format(interface),
'link': 'UP',
'state': 'UP',
'hardware': 'Ethernet',
}
show_interface += show_interface_template.render(env).encode('ascii', 'ignore')
dut.add_cmd({'cmd': 'show running-config', 'state': 0, 'action': 'PRINT', 'args': ["""
!
interface port1.0.1-1.0.10
description test1
!
end
"""]})
dut.add_cmd({'cmd': 'show interface', 'state': 0, 'action': 'PRINT', 'args': [show_interface]})
dut.add_cmd({'cmd': 'interface port1.0.10', 'state': 0, 'action': 'SET_PROMPT', 'args': ['(config-if)#']})
dut.add_cmd({'cmd': 'interface port1.0.10', 'state': 0, 'action': 'SET_STATE', 'args': [1]})
dut.add_cmd({'cmd': 'description camera_1', 'state': 1, 'action': 'SET_STATE', 'args': [2]})
dut.add_cmd({'cmd': 'show running-config', 'state': 2, 'action': 'PRINT', 'args': ["""
!
interface port1.0.1-1.0.9
description test1
!
interface port1.0.10
description camera_1
!
end
"""]})
dut.add_cmd({'cmd': 'show interface', 'state': 2, 'action': 'PRINT', 'args': [show_interface]})
dut.add_cmd({'cmd': 'interface port1.0.10', 'state': 2, 'action': 'SET_PROMPT', 'args': ['(config-if)#']})
dut.add_cmd({'cmd': 'interface port1.0.10', 'state': 2, 'action': 'SET_STATE', 'args': [3]})
dut.add_cmd({'cmd': 'description camera_1', 'state': 3, 'action': 'SET_STATE', 'args': [4]})
dut.add_cmd({'cmd': 'show running-config', 'state': 4, 'action': 'PRINT', 'args': ["""
!
interface port1.0.1-1.0.9
description test1
!
interface port1.0.10
description camera_1
!
end
"""]})
dut.add_cmd({'cmd': 'show interface', 'state': 4, 'action': 'PRINT', 'args': [show_interface]})
dut.add_cmd({'cmd': 'interface port1.0.10', 'state': 4, 'action': 'SET_PROMPT', 'args': ['(config-if)#']})
dut.add_cmd({'cmd': 'interface port1.0.10', 'state': 4, 'action': 'SET_STATE', 'args': [5]})
dut.add_cmd({'cmd': 'description "cam one"', 'state': 5, 'action': 'SET_STATE', 'args': [6]})
dut.add_cmd({'cmd': 'show running-config', 'state': 6, 'action': 'PRINT', 'args': ["""
!
interface port1.0.1-1.0.9
description test1
!
interface port1.0.10
description "cam one"
!
end
"""]})
dut.add_cmd({'cmd': 'show interface', 'state': 6, 'action': 'PRINT', 'args': [show_interface]})
d = Device(host=dut.host, port=dut.port, protocol=dut.protocol, log_level=log_level, mock=use_mock)
d.open()
assert d.interface['1.0.10']['description'] == 'test1'
d.interface.update('1.0.10', description='camera_1')
assert d.interface['1.0.10']['description'] == 'camera_1'
d.interface.update('1.0.10', description='camera_1')
assert d.interface['1.0.10']['description'] == 'camera_1'
d.interface.update('1.0.10', description='cam one')
d.close()
def test_unexisting_interface(dut, log_level, use_mock):
setup_dut(dut)
show_interface = ''
max_if_id = 24
max_if_cmd = 'interface port1.0.{0}'.format(max_if_id + 1)
max_if_name = '1.0.{0}'.format(max_if_id + 1)
max_if_str = 'interface 1.0.{0} does not exist'.format(max_if_id + 1)
for interface in range(1, max_if_id):
env = {
'interface': 'port1.0.{0}'.format(interface),
'link': 'UP',
'state': 'DOWN',
'hardware': 'Ethernet',
}
show_interface += show_interface_template.render(env).encode('ascii', 'ignore')
dut.add_cmd({'cmd': 'show interface', 'state': 0, 'action': 'PRINT', 'args': [show_interface]})
dut.add_cmd({'cmd': max_if_cmd, 'state': 0, 'action': 'SET_PROMPT', 'args': ['(config-if)#']})
dut.add_cmd({'cmd': max_if_cmd, 'state': 0, 'action': 'SET_STATE', 'args': [1]})
dut.add_cmd({'cmd': 'show interface', 'state': 1, 'action': 'PRINT', 'args': [show_interface]})
d = Device(host=dut.host, port=dut.port, protocol=dut.protocol, log_level=log_level, mock=use_mock)
d.open()
with pytest.raises(ValueError) as excinfo:
d.interface.update(max_if_name, enable=False)
assert max_if_str in excinfo.value
with pytest.raises(KeyError) as excinfo:
d.interface['1.0.{0}'.format(max_if_id + 1)]['description'] == 'test1'
assert 'interface {0} does not exist'.format(max_if_name) in excinfo.value
with pytest.raises(TypeError) as excinfo:
d.interface[True]['description'] == 'test1'
assert 'invalid argument type' in excinfo.value
assert (d.interface.__str__() != '')
d.close()
| 39.421603
| 157
| 0.607654
|
4a1228c4a01f90dd7e37db178d18e7d835ad1dda
| 2,174
|
py
|
Python
|
three-column/load.py
|
hansonrobotics/hr-solr
|
568289aafe70f7f3091aa72ee6d2ddcce7a9dcc5
|
[
"BSD-3-Clause"
] | 3
|
2018-03-17T21:33:57.000Z
|
2021-12-13T07:03:27.000Z
|
three-column/load.py
|
hansonrobotics/hr-solr
|
568289aafe70f7f3091aa72ee6d2ddcce7a9dcc5
|
[
"BSD-3-Clause"
] | 3
|
2018-09-06T18:40:18.000Z
|
2019-01-15T20:37:10.000Z
|
three-column/load.py
|
hansonrobotics/hr-solr
|
568289aafe70f7f3091aa72ee6d2ddcce7a9dcc5
|
[
"BSD-3-Clause"
] | 7
|
2016-03-12T01:33:04.000Z
|
2021-10-15T06:29:25.000Z
|
# Copyright (c) 2016 Hanson Robotics, Ltd.
from sys import argv
import csv
import http.client, urllib.parse
import re, string
import hashlib
import unicodedata
import io
#pattern = re.compile('[\W_]+')
script, conv = argv
conn = http.client.HTTPConnection('localhost', 8983)
# Utility function to update SOLR
def updateSolr(core, docId, title, body):
BODY = """\
[
{
"id" : "DOC%s",
"title" : "%s",
"body" : "%s"
}
]
""" % (docId, title, body)
headers = {'Content-type': 'application/json'}
conn.request('POST', '/solr/%s/update' % (core), BODY, headers)
response = conn.getresponse()
data = response.read()
print(title, body, response.status, response.reason, data)
def cleanString(string):
cleanedString = string.encode('ascii', 'ignore').decode('ascii', 'ignore')
cleanedString = cleanedString.replace('"', '\\"')
return cleanedString
# Read and process CSV
meanings = {}
with io.open(conv, encoding='utf-8') as csvfile:
convread = csv.reader(csvfile, delimiter=',', quotechar='"')
for row in convread:
pattern = row[0]
meaning = row[1]
template = row[2]
if (meaning not in meanings):
meanings[meaning] = {
'patterns': [],
'templates': []
}
if (pattern != '' and pattern not in meanings[meaning]['patterns']):
meanings[meaning]['patterns'].append(pattern)
if (template != '' and template not in meanings[meaning]['templates']):
meanings[meaning]['templates'].append(template)
csvMeanings = sorted(meanings.keys())
for meaning in csvMeanings:
for template in meanings[meaning]['templates']:
docId = hashlib.sha224(template.encode('ascii', 'ignore')).hexdigest()
updateSolr('3coltemplate', docId, cleanString(template), meaning)
for pattern in meanings[meaning]['patterns']:
docId = hashlib.sha224(pattern.encode('ascii', 'ignore')).hexdigest()
updateSolr('3colpattern', docId, cleanString(pattern), meaning)
# Update SOLR cores
conn.request('POST', '/admin/cores?action=RELOAD')
| 28.986667
| 79
| 0.619135
|
4a1229256f0751149b70aa31104d0129a68fbaad
| 1,545
|
py
|
Python
|
setup.py
|
Vamporelol/pydoro
|
d6be687ec3e5ef9839ece4bcd5b576aadfc03532
|
[
"MIT"
] | 403
|
2019-06-15T08:33:22.000Z
|
2022-03-30T12:04:17.000Z
|
setup.py
|
Vamporelol/pydoro
|
d6be687ec3e5ef9839ece4bcd5b576aadfc03532
|
[
"MIT"
] | 142
|
2019-06-17T19:49:16.000Z
|
2022-02-11T19:01:12.000Z
|
setup.py
|
Vamporelol/pydoro
|
d6be687ec3e5ef9839ece4bcd5b576aadfc03532
|
[
"MIT"
] | 56
|
2019-06-15T09:20:58.000Z
|
2022-03-13T18:33:36.000Z
|
from codecs import open
from inspect import getsource
from os.path import abspath, dirname, join
from setuptools import setup, find_packages
here = abspath(dirname(getsource(lambda: 0)))
with open(join(here, "README.rst"), encoding="utf-8") as f:
long_description = f.read()
setup(
name="pydoro",
version="0.2.1",
# Get the description from second line & remove `*` character
description=long_description.splitlines()[2][1:-1],
long_description=long_description,
long_description_content_type="text/x-rst",
url="https://github.com/JaDogg/pydoro",
author="Bhathiya Perera",
author_email="jadogg.coder@gmail.com",
python_requires=">=3.6",
license="MIT",
package_data={"": ["*.rst", "*.wav"]},
install_requires=["prompt-toolkit>=3.0.3"],
extras_require={
'audio:platform_system=="Darwin"': [
"pyobjc-core>=5.2",
"pyobjc-framework-Cocoa>=5.2",
],
'audio:platform_system=="Linux"': ["pycairo>=1.18.1", "PyGObject>=3.32.1"],
},
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Information Technology",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
keywords="tomato pomodoro pydoro timer work",
packages=find_packages(),
entry_points={"console_scripts": ["pydoro = pydoro.pydoro_tui:main"]},
setup_requires=["wheel"],
)
| 33.586957
| 83
| 0.636246
|
4a122a2c11c5fe77a5a9b1f33fd1a672866d72d1
| 1,528
|
py
|
Python
|
test/matrix/test_matrix.py
|
bedlaj/BitSwanPump
|
5fb9f31d0aec0ff6d32c97a45c6a53abd41ab2cf
|
[
"BSD-3-Clause"
] | null | null | null |
test/matrix/test_matrix.py
|
bedlaj/BitSwanPump
|
5fb9f31d0aec0ff6d32c97a45c6a53abd41ab2cf
|
[
"BSD-3-Clause"
] | null | null | null |
test/matrix/test_matrix.py
|
bedlaj/BitSwanPump
|
5fb9f31d0aec0ff6d32c97a45c6a53abd41ab2cf
|
[
"BSD-3-Clause"
] | null | null | null |
import unittest
import time
import bspump
import bspump.matrix
import bspump.unittest
class TestMatrix(bspump.unittest.TestCase):
def test_matrix(self):
matrix = bspump.Matrix(
app = self.App,
dtype = [
('f1', 'i8'),
('f2', 'i8'),
('f3', 'i8'),
]
)
for i in range(100):
n = matrix.add_row()
matrix.Array[n][0] = 1 # Access by a field index
matrix.Array[n]['f2'] = 1 # Access by a field name
matrix.Array[n][2] = 1
self.assertEqual(n, i)
closed = set()
closed |= matrix.ClosedRows
for i in range(20, 40):
matrix.close_row(i)
closed.add(i)
self.assertIn(i, matrix.ClosedRows)
self.assertEqual(closed, matrix.ClosedRows)
for i in range(20):
n = matrix.add_row()
self.assertIn(n, closed)
def test_matrix_zeros(self):
matrix = bspump.Matrix(app=self.App)
for i in range(100):
n = matrix.add_row()
self.assertEqual(n, i)
matrix.zeros()
self.assertEqual(matrix.Array.shape, (0,))
def test_matrix_flush(self):
matrix = bspump.Matrix(app=self.App)
for i in range(100):
n = matrix.add_row()
self.assertEqual(n, i)
for i in range(20, 40):
matrix.close_row(i)
self.assertIn(i, matrix.ClosedRows)
matrix.flush()
self.assertEqual(len(matrix.ClosedRows), 0)
def test_matrix_dtypes(self):
matrix = bspump.Matrix(
app = self.App,
dtype = [
('f1', 'U20'),
('f2', 'i8'),
('f3', 'i8'),
]
)
row_index = matrix.add_row()
row = matrix.Array[row_index]
row['f1'] = "Ahoj"
row['f2'] = 64
| 17.976471
| 53
| 0.626963
|
4a122d176ac717b6367bceef485869e7330a49a6
| 315
|
py
|
Python
|
flask/app/views.py
|
joedots1/dots-app
|
035a2239666edcd646d4d08c2ef944b3d52018c6
|
[
"MIT"
] | null | null | null |
flask/app/views.py
|
joedots1/dots-app
|
035a2239666edcd646d4d08c2ef944b3d52018c6
|
[
"MIT"
] | null | null | null |
flask/app/views.py
|
joedots1/dots-app
|
035a2239666edcd646d4d08c2ef944b3d52018c6
|
[
"MIT"
] | null | null | null |
from flask import render_template
from app import app
import os
@app.route("/")
def index():
app_name = os.getenv("APP_NAME")
if app_name:
return f"Hello from {app_name} running in Docker"
return "hello from flask!"
@app.route("/home")
def home():
return render_template('index.html')
| 18.529412
| 57
| 0.673016
|
4a122e08ef79a4e33c0869fd2c7b8d6fd3c63de7
| 1,729
|
py
|
Python
|
src/python/pants/engine/internals/options_parsing.py
|
silverguo/pants
|
141510d03fbf2b7e1a0b54f66b54088697f6fa51
|
[
"Apache-2.0"
] | null | null | null |
src/python/pants/engine/internals/options_parsing.py
|
silverguo/pants
|
141510d03fbf2b7e1a0b54f66b54088697f6fa51
|
[
"Apache-2.0"
] | null | null | null |
src/python/pants/engine/internals/options_parsing.py
|
silverguo/pants
|
141510d03fbf2b7e1a0b54f66b54088697f6fa51
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from dataclasses import dataclass
from pants.engine.rules import RootRule, rule, subsystem_rule
from pants.init.options_initializer import BuildConfigInitializer, OptionsInitializer
from pants.option.global_options import GlobalOptions
from pants.option.options import Options
from pants.option.options_bootstrapper import OptionsBootstrapper
from pants.option.scope import Scope, ScopedOptions
from pants.util.logging import LogLevel
@dataclass(frozen=True)
class _Options:
"""A wrapper around bootstrapped options values: not for direct consumption."""
options: Options
@rule
def parse_options(options_bootstrapper: OptionsBootstrapper) -> _Options:
# TODO: Because _OptionsBootstapper is currently provided as a Param, this @rule relies on options
# remaining relatively stable in order to be efficient. See #6845 for a discussion of how to make
# minimize the size of that value.
build_config = BuildConfigInitializer.get(options_bootstrapper)
return _Options(
OptionsInitializer.create(options_bootstrapper, build_config, init_subsystems=False)
)
@rule
def scope_options(scope: Scope, options: _Options) -> ScopedOptions:
return ScopedOptions(scope, options.options.for_scope(scope.scope))
@rule
def log_level(global_options: GlobalOptions) -> LogLevel:
log_level: LogLevel = global_options.get_options().level
return log_level
def rules():
return [
scope_options,
parse_options,
subsystem_rule(GlobalOptions),
log_level,
RootRule(Scope),
RootRule(OptionsBootstrapper),
]
| 32.622642
| 102
| 0.770388
|
4a122f8a431fb9fb51800895f46f14c18314470a
| 250
|
py
|
Python
|
testapps/PY/doctor.py
|
EyelynSu/pinpoint-c-agent
|
35cab594fe69c8de3ddb13625590d76a75bf8381
|
[
"Apache-2.0"
] | 44
|
2020-10-27T07:15:50.000Z
|
2022-03-26T13:37:23.000Z
|
testapps/PY/doctor.py
|
EyelynSu/pinpoint-c-agent
|
35cab594fe69c8de3ddb13625590d76a75bf8381
|
[
"Apache-2.0"
] | 120
|
2020-10-19T17:33:24.000Z
|
2022-03-23T06:31:14.000Z
|
testapps/PY/doctor.py
|
EyelynSu/pinpoint-c-agent
|
35cab594fe69c8de3ddb13625590d76a75bf8381
|
[
"Apache-2.0"
] | 23
|
2020-10-22T06:47:40.000Z
|
2022-03-17T08:15:09.000Z
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from person import Person
from pinpoint.common import PinpointCommonPlugin
class Doctor(Person):
@PinpointCommonPlugin("Doctor"+ __name__)
def other(self):
return "Doctor not eating!"
| 19.230769
| 48
| 0.7
|
4a12310d8c672cdf4c75faa10b25e32aafa72551
| 3,744
|
py
|
Python
|
dpfinder/logging/logger.py
|
barryZZJ/dp-finder
|
ddf8e3589110b4b35920b437d605b45dd56291da
|
[
"MIT"
] | null | null | null |
dpfinder/logging/logger.py
|
barryZZJ/dp-finder
|
ddf8e3589110b4b35920b437d605b45dd56291da
|
[
"MIT"
] | null | null | null |
dpfinder/logging/logger.py
|
barryZZJ/dp-finder
|
ddf8e3589110b4b35920b437d605b45dd56291da
|
[
"MIT"
] | null | null | null |
# ==BEGIN LICENSE==
#
# MIT License
#
# Copyright (c) 2018 SRI Lab, ETH Zurich
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# ==END LICENSE==
import logging.config
import datetime
import os
import json
from logging import critical, error, warning, info, debug # necessary for calls from the outside
from dpfinder.utils.utils import create_dir_if_not_exists
timestamp = '{:%Y-%m-%d_%H-%M-%S}'.format(datetime.datetime.now())
self_dir = os.path.dirname(os.path.realpath(__file__))
log_dir = os.path.join(self_dir, 'logs', 'others')
log_file = None
# LOG LEVELS
# existing:
# CRITICAL = 50
# ERROR = 40
# WARNING = 30
# INFO = 20
# DEBUG = 10
DATA = 5
logging.addLevelName(DATA, "DATA")
def data(key, value):
d = {key: value}
return logging.log(DATA, json.dumps(d))
def shutdown(handler_list=[]):
logging.shutdown(handler_list)
called = False
def get_log_dir(label=None):
if label is None:
return log_dir
else:
return os.path.join(log_dir, label)
def get_log_file(label=None):
d = get_log_dir(label)
return os.path.join(d, timestamp)
def set_logfile(filename=None, weak=False, console_loglevel='WARNING'):
global called
global log_file
if called and weak:
return
called = True
if filename is None:
filename = os.path.join(log_dir, timestamp)
console_loglevel = 0
print("console_loglevel={}".format(console_loglevel))
log_file = filename
parent = os.path.dirname(log_file)
create_dir_if_not_exists(parent)
print("Saving logs to", log_file)
default_logging = {
'version': 1,
'formatters': {
'standard': {
'format': '%(asctime)s [%(levelname)s]: %(message)s',
'datefmt': '%Y-%m-%d_%H-%M-%S'
},
'minimal': {
'format': '%(message)s'
},
},
'filters': {
'onlydata': {
'()': OnlyData
}
},
'handlers': {
'default': {
'level': console_loglevel,
'formatter': 'standard',
'class': 'logging.StreamHandler',
},
'fileinfo': {
'level': 'INFO',
'formatter': 'standard',
'filename': log_file + '_info.log',
'mode': 'w',
'class': 'logging.FileHandler',
},
'filedebug': {
'level': 'DEBUG',
'formatter': 'standard',
'filename': log_file + '_debug.log',
'mode': 'w',
'class': 'logging.FileHandler',
},
'filedata': {
'level': 'DATA',
'formatter': 'minimal',
'filename': log_file + '_data.log',
'mode': 'w',
'class': 'logging.FileHandler',
'filters': ['onlydata']
}
},
'loggers': {
'': {
'handlers': ['default', 'fileinfo', 'filedebug', 'filedata'],
'level': 0
}
}
}
logging.config.dictConfig(default_logging)
class OnlyData(logging.Filter):
def filter(self, record):
# print(record.__dict__)
return record.levelno == DATA
| 24
| 97
| 0.676816
|
4a1233387a672c29427acf6d181679b3c92cc82c
| 583
|
py
|
Python
|
pinakes/main/catalog/exceptions.py
|
Alex-Izquierdo/pinakes
|
dfeb855662b47d29a6e976e87fd7c090a262cf3f
|
[
"Apache-2.0"
] | 2
|
2022-03-17T18:53:58.000Z
|
2022-03-17T22:04:22.000Z
|
pinakes/main/catalog/exceptions.py
|
Alex-Izquierdo/pinakes
|
dfeb855662b47d29a6e976e87fd7c090a262cf3f
|
[
"Apache-2.0"
] | 9
|
2022-03-18T08:22:57.000Z
|
2022-03-30T17:14:49.000Z
|
pinakes/main/catalog/exceptions.py
|
Alex-Izquierdo/pinakes
|
dfeb855662b47d29a6e976e87fd7c090a262cf3f
|
[
"Apache-2.0"
] | 7
|
2022-03-17T22:03:08.000Z
|
2022-03-28T21:28:34.000Z
|
"""Application specific exception classes"""
from rest_framework import exceptions, status
from django.utils.translation import gettext_lazy as _
class BadParamsException(exceptions.APIException):
status_code = status.HTTP_400_BAD_REQUEST
default_detail = _("Bad query parameters")
class InvalidSurveyException(exceptions.APIException):
status_code = status.HTTP_400_BAD_REQUEST
default_detail = _("Invalid survey")
class UncancelableException(exceptions.APIException):
status_code = status.HTTP_400_BAD_REQUEST
default_detail = _("Uncancelable Order")
| 30.684211
| 54
| 0.802744
|
4a1233b5af633d53b4a53ffaa0c7873cb436baba
| 853
|
py
|
Python
|
examples/optimization_techniques/decision_tree_optimization.py
|
stjordanis/Hyperactive
|
5acf247d8023ff6761593b9d0954bdd912d20aed
|
[
"MIT"
] | 382
|
2019-07-16T13:30:15.000Z
|
2022-03-30T22:29:07.000Z
|
examples/optimization_techniques/decision_tree_optimization.py
|
stjordanis/Hyperactive
|
5acf247d8023ff6761593b9d0954bdd912d20aed
|
[
"MIT"
] | 46
|
2019-08-27T18:07:47.000Z
|
2022-03-16T16:28:10.000Z
|
examples/optimization_techniques/decision_tree_optimization.py
|
stjordanis/Hyperactive
|
5acf247d8023ff6761593b9d0954bdd912d20aed
|
[
"MIT"
] | 35
|
2019-08-03T00:51:09.000Z
|
2021-12-03T19:06:07.000Z
|
from sklearn.datasets import load_iris
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import cross_val_score
from hyperactive import Hyperactive, DecisionTreeOptimizer
data = load_iris()
X, y = data.data, data.target
def model(opt):
knr = KNeighborsClassifier(n_neighbors=opt["n_neighbors"])
scores = cross_val_score(knr, X, y, cv=5)
score = scores.mean()
return score
search_space = {
"n_neighbors": list(range(1, 100)),
}
hyper = Hyperactive()
hyper.add_search(model, search_space, n_iter=100)
hyper.run()
search_data = hyper.results(model)
optimizer = DecisionTreeOptimizer(
tree_regressor="random_forest",
xi=0.02,
warm_start_smbo=search_data,
rand_rest_p=0.05,
)
hyper = Hyperactive()
hyper.add_search(model, search_space, optimizer=optimizer, n_iter=100)
hyper.run()
| 21.871795
| 70
| 0.751465
|
4a12341fef016142bf30ced94ae41aad9413a72b
| 24,786
|
py
|
Python
|
lib/MotifUtils/MotifUtilsServer.py
|
arwyer/MotifUtils
|
6568303b0894e44fa6fd7cc0de1022a9c4356555
|
[
"MIT"
] | 1
|
2019-07-20T05:53:21.000Z
|
2019-07-20T05:53:21.000Z
|
lib/MotifUtils/MotifUtilsServer.py
|
man4ish/MotifUtils
|
96ab9d99588a46ffe786c735fd74e1c51801762a
|
[
"MIT"
] | null | null | null |
lib/MotifUtils/MotifUtilsServer.py
|
man4ish/MotifUtils
|
96ab9d99588a46ffe786c735fd74e1c51801762a
|
[
"MIT"
] | 1
|
2019-05-22T16:35:41.000Z
|
2019-05-22T16:35:41.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import json
import os
import random as _random
import sys
import traceback
from getopt import getopt, GetoptError
from multiprocessing import Process
from os import environ
from wsgiref.simple_server import make_server
import requests as _requests
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError, \
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from biokbase import log
from MotifUtils.authclient import KBaseAuth as _KBaseAuth
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'MotifUtils'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from MotifUtils.MotifUtilsImpl import MotifUtils # noqa @IgnorePep8
impl_MotifUtils = MotifUtils(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if len(e.args) == 1:
newerr.data = repr(e.args[0])
else:
newerr.data = repr(e.args)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if 'types' in self.method_data[request['method']]:
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'MotifUtils'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_MotifUtils.uploadMotifSet,
name='MotifUtils.uploadMotifSet',
types=[dict])
self.method_authentication['MotifUtils.uploadMotifSet'] = 'required' # noqa
self.rpc_service.add(impl_MotifUtils.parseMotifSet,
name='MotifUtils.parseMotifSet',
types=[dict])
self.method_authentication['MotifUtils.parseMotifSet'] = 'required' # noqa
self.rpc_service.add(impl_MotifUtils.saveMotifSet,
name='MotifUtils.saveMotifSet',
types=[dict])
self.method_authentication['MotifUtils.saveMotifSet'] = 'required' # noqa
self.rpc_service.add(impl_MotifUtils.downloadMotifSet,
name='MotifUtils.downloadMotifSet',
types=[dict])
self.method_authentication['MotifUtils.downloadMotifSet'] = 'required' # noqa
self.rpc_service.add(impl_MotifUtils.UploadFromGibbs,
name='MotifUtils.UploadFromGibbs',
types=[dict])
self.method_authentication['MotifUtils.UploadFromGibbs'] = 'required' # noqa
self.rpc_service.add(impl_MotifUtils.UploadFromHomer,
name='MotifUtils.UploadFromHomer',
types=[dict])
self.method_authentication['MotifUtils.UploadFromHomer'] = 'required' # noqa
self.rpc_service.add(impl_MotifUtils.UploadFromMEME,
name='MotifUtils.UploadFromMEME',
types=[dict])
self.method_authentication['MotifUtils.UploadFromMEME'] = 'required' # noqa
self.rpc_service.add(impl_MotifUtils.status,
name='MotifUtils.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'MotifUtils ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception as e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print('Request method was %s\n' % environ['REQUEST_METHOD'])
# print('Environment dictionary is:\n%s\n' % pprint.pformat(environ))
# print('Request body was: %s' % request_body)
# print('Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result))
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body.encode('utf8')]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print("Monkeypatching std libraries for async")
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print("Listening on port %s" % port)
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print("Host set to %s" % host)
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print("Listening on port %s" % port)
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
| 37.668693
| 151
| 0.544582
|
4a12347d295e709625414c361c887e3ab7b7f36f
| 31
|
py
|
Python
|
First Course/Week 3/Car fueling/tempCodeRunnerFile.py
|
Adarsh-Saurabh/Coursera--CoC-San-Diago-Data-Structures
|
a2488965199cb105db2e381280de00627e154a23
|
[
"Apache-2.0"
] | 2
|
2021-09-18T08:25:59.000Z
|
2021-09-24T11:41:37.000Z
|
First Course/Week 3/Car fueling/tempCodeRunnerFile.py
|
Adarsh-Saurabh/Coursera--CoC-San-Diago-Data-Structures
|
a2488965199cb105db2e381280de00627e154a23
|
[
"Apache-2.0"
] | null | null | null |
First Course/Week 3/Car fueling/tempCodeRunnerFile.py
|
Adarsh-Saurabh/Coursera--CoC-San-Diago-Data-Structures
|
a2488965199cb105db2e381280de00627e154a23
|
[
"Apache-2.0"
] | null | null | null |
i-1]:
# refill -= 1
| 15.5
| 25
| 0.290323
|
4a12363294fe4e1d349511ca16d467dd3dbcc15d
| 47,746
|
py
|
Python
|
MultiClient.py
|
jmabry0163/MultiWorld-Utilities
|
130d11c11e2ffab07a7e2a8982bde078d92086c4
|
[
"MIT"
] | null | null | null |
MultiClient.py
|
jmabry0163/MultiWorld-Utilities
|
130d11c11e2ffab07a7e2a8982bde078d92086c4
|
[
"MIT"
] | null | null | null |
MultiClient.py
|
jmabry0163/MultiWorld-Utilities
|
130d11c11e2ffab07a7e2a8982bde078d92086c4
|
[
"MIT"
] | null | null | null |
import argparse
import asyncio
import json
import logging
import urllib.parse
import atexit
from Utils import get_item_name_from_id, get_location_name_from_address, ReceivedItem
exit_func = atexit.register(input, "Press enter to close.")
import ModuleUpdate
ModuleUpdate.update()
import colorama
import websockets
import prompt_toolkit
from prompt_toolkit.patch_stdout import patch_stdout
import Regions
import Utils
class Context:
def __init__(self, snes_address, server_address, password, found_items):
self.snes_address = snes_address
self.server_address = server_address
self.exit_event = asyncio.Event()
self.watcher_event = asyncio.Event()
self.input_queue = asyncio.Queue()
self.input_requests = 0
self.snes_socket = None
self.snes_state = SNES_DISCONNECTED
self.snes_attached_device = None
self.snes_reconnect_address = None
self.snes_recv_queue = asyncio.Queue()
self.snes_request_lock = asyncio.Lock()
self.is_sd2snes = False
self.snes_write_buffer = []
self.server_task = None
self.socket = None
self.password = password
self.team = None
self.slot = None
self.player_names = {}
self.locations_checked = set()
self.locations_scouted = set()
self.items_received = []
self.locations_info = {}
self.awaiting_rom = False
self.rom = None
self.auth = None
self.found_items = found_items
color_codes = {'reset': 0, 'bold': 1, 'underline': 4, 'black': 30, 'red': 31, 'green': 32, 'yellow': 33, 'blue': 34,
'magenta': 35, 'cyan': 36, 'white': 37, 'black_bg': 40, 'red_bg': 41, 'green_bg': 42, 'yellow_bg': 43,
'blue_bg': 44, 'purple_bg': 45, 'cyan_bg': 46, 'white_bg': 47}
def color_code(*args):
return '\033[' + ';'.join([str(color_codes[arg]) for arg in args]) + 'm'
def color(text, *args):
return color_code(*args) + text + color_code('reset')
RECONNECT_DELAY = 5
ROM_START = 0x000000
WRAM_START = 0xF50000
WRAM_SIZE = 0x20000
SRAM_START = 0xE00000
ROMNAME_START = SRAM_START + 0x2000
ROMNAME_SIZE = 0x15
INGAME_MODES = {0x07, 0x09, 0x0b}
SAVEDATA_START = WRAM_START + 0xF000
SAVEDATA_SIZE = 0x500
RECV_PROGRESS_ADDR = SAVEDATA_START + 0x4D0 # 2 bytes
RECV_ITEM_ADDR = SAVEDATA_START + 0x4D2 # 1 byte
RECV_ITEM_PLAYER_ADDR = SAVEDATA_START + 0x4D3 # 1 byte
ROOMID_ADDR = SAVEDATA_START + 0x4D4 # 2 bytes
ROOMDATA_ADDR = SAVEDATA_START + 0x4D6 # 1 byte
SCOUT_LOCATION_ADDR = SAVEDATA_START + 0x4D7 # 1 byte
SCOUTREPLY_LOCATION_ADDR = SAVEDATA_START + 0x4D8 # 1 byte
SCOUTREPLY_ITEM_ADDR = SAVEDATA_START + 0x4D9 # 1 byte
SCOUTREPLY_PLAYER_ADDR = SAVEDATA_START + 0x4DA # 1 byte
location_table_uw = {"Blind's Hideout - Top": (0x11d, 0x10),
"Blind's Hideout - Left": (0x11d, 0x20),
"Blind's Hideout - Right": (0x11d, 0x40),
"Blind's Hideout - Far Left": (0x11d, 0x80),
"Blind's Hideout - Far Right": (0x11d, 0x100),
'Secret Passage': (0x55, 0x10),
'Waterfall Fairy - Left': (0x114, 0x10),
'Waterfall Fairy - Right': (0x114, 0x20),
"King's Tomb": (0x113, 0x10),
'Floodgate Chest': (0x10b, 0x10),
"Link's House": (0x104, 0x10),
'Kakariko Tavern': (0x103, 0x10),
'Chicken House': (0x108, 0x10),
"Aginah's Cave": (0x10a, 0x10),
"Sahasrahla's Hut - Left": (0x105, 0x10),
"Sahasrahla's Hut - Middle": (0x105, 0x20),
"Sahasrahla's Hut - Right": (0x105, 0x40),
'Kakariko Well - Top': (0x2f, 0x10),
'Kakariko Well - Left': (0x2f, 0x20),
'Kakariko Well - Middle': (0x2f, 0x40),
'Kakariko Well - Right': (0x2f, 0x80),
'Kakariko Well - Bottom': (0x2f, 0x100),
'Lost Woods Hideout': (0xe1, 0x200),
'Lumberjack Tree': (0xe2, 0x200),
'Cave 45': (0x11b, 0x400),
'Graveyard Cave': (0x11b, 0x200),
'Checkerboard Cave': (0x126, 0x200),
'Mini Moldorm Cave - Far Left': (0x123, 0x10),
'Mini Moldorm Cave - Left': (0x123, 0x20),
'Mini Moldorm Cave - Right': (0x123, 0x40),
'Mini Moldorm Cave - Far Right': (0x123, 0x80),
'Mini Moldorm Cave - Generous Guy': (0x123, 0x400),
'Ice Rod Cave': (0x120, 0x10),
'Bonk Rock Cave': (0x124, 0x10),
'Desert Palace - Big Chest': (0x73, 0x10),
'Desert Palace - Torch': (0x73, 0x400),
'Desert Palace - Map Chest': (0x74, 0x10),
'Desert Palace - Compass Chest': (0x85, 0x10),
'Desert Palace - Big Key Chest': (0x75, 0x10),
'Desert Palace - Boss': (0x33, 0x800),
'Eastern Palace - Compass Chest': (0xa8, 0x10),
'Eastern Palace - Big Chest': (0xa9, 0x10),
'Eastern Palace - Cannonball Chest': (0xb9, 0x10),
'Eastern Palace - Big Key Chest': (0xb8, 0x10),
'Eastern Palace - Map Chest': (0xaa, 0x10),
'Eastern Palace - Boss': (0xc8, 0x800),
'Hyrule Castle - Boomerang Chest': (0x71, 0x10),
'Hyrule Castle - Map Chest': (0x72, 0x10),
"Hyrule Castle - Zelda's Chest": (0x80, 0x10),
'Sewers - Dark Cross': (0x32, 0x10),
'Sewers - Secret Room - Left': (0x11, 0x10),
'Sewers - Secret Room - Middle': (0x11, 0x20),
'Sewers - Secret Room - Right': (0x11, 0x40),
'Sanctuary': (0x12, 0x10),
'Castle Tower - Room 03': (0xe0, 0x10),
'Castle Tower - Dark Maze': (0xd0, 0x10),
'Spectacle Rock Cave': (0xea, 0x400),
'Paradox Cave Lower - Far Left': (0xef, 0x10),
'Paradox Cave Lower - Left': (0xef, 0x20),
'Paradox Cave Lower - Right': (0xef, 0x40),
'Paradox Cave Lower - Far Right': (0xef, 0x80),
'Paradox Cave Lower - Middle': (0xef, 0x100),
'Paradox Cave Upper - Left': (0xff, 0x10),
'Paradox Cave Upper - Right': (0xff, 0x20),
'Spiral Cave': (0xfe, 0x10),
'Tower of Hera - Basement Cage': (0x87, 0x400),
'Tower of Hera - Map Chest': (0x77, 0x10),
'Tower of Hera - Big Key Chest': (0x87, 0x10),
'Tower of Hera - Compass Chest': (0x27, 0x20),
'Tower of Hera - Big Chest': (0x27, 0x10),
'Tower of Hera - Boss': (0x7, 0x800),
'Hype Cave - Top': (0x11e, 0x10),
'Hype Cave - Middle Right': (0x11e, 0x20),
'Hype Cave - Middle Left': (0x11e, 0x40),
'Hype Cave - Bottom': (0x11e, 0x80),
'Hype Cave - Generous Guy': (0x11e, 0x400),
'Peg Cave': (0x127, 0x400),
'Pyramid Fairy - Left': (0x116, 0x10),
'Pyramid Fairy - Right': (0x116, 0x20),
'Brewery': (0x106, 0x10),
'C-Shaped House': (0x11c, 0x10),
'Chest Game': (0x106, 0x400),
'Mire Shed - Left': (0x10d, 0x10),
'Mire Shed - Right': (0x10d, 0x20),
'Superbunny Cave - Top': (0xf8, 0x10),
'Superbunny Cave - Bottom': (0xf8, 0x20),
'Spike Cave': (0x117, 0x10),
'Hookshot Cave - Top Right': (0x3c, 0x10),
'Hookshot Cave - Top Left': (0x3c, 0x20),
'Hookshot Cave - Bottom Right': (0x3c, 0x80),
'Hookshot Cave - Bottom Left': (0x3c, 0x40),
'Mimic Cave': (0x10c, 0x10),
'Swamp Palace - Entrance': (0x28, 0x10),
'Swamp Palace - Map Chest': (0x37, 0x10),
'Swamp Palace - Big Chest': (0x36, 0x10),
'Swamp Palace - Compass Chest': (0x46, 0x10),
'Swamp Palace - Big Key Chest': (0x35, 0x10),
'Swamp Palace - West Chest': (0x34, 0x10),
'Swamp Palace - Flooded Room - Left': (0x76, 0x10),
'Swamp Palace - Flooded Room - Right': (0x76, 0x20),
'Swamp Palace - Waterfall Room': (0x66, 0x10),
'Swamp Palace - Boss': (0x6, 0x800),
"Thieves' Town - Big Key Chest": (0xdb, 0x20),
"Thieves' Town - Map Chest": (0xdb, 0x10),
"Thieves' Town - Compass Chest": (0xdc, 0x10),
"Thieves' Town - Ambush Chest": (0xcb, 0x10),
"Thieves' Town - Attic": (0x65, 0x10),
"Thieves' Town - Big Chest": (0x44, 0x10),
"Thieves' Town - Blind's Cell": (0x45, 0x10),
"Thieves' Town - Boss": (0xac, 0x800),
'Skull Woods - Compass Chest': (0x67, 0x10),
'Skull Woods - Map Chest': (0x58, 0x20),
'Skull Woods - Big Chest': (0x58, 0x10),
'Skull Woods - Pot Prison': (0x57, 0x20),
'Skull Woods - Pinball Room': (0x68, 0x10),
'Skull Woods - Big Key Chest': (0x57, 0x10),
'Skull Woods - Bridge Room': (0x59, 0x10),
'Skull Woods - Boss': (0x29, 0x800),
'Ice Palace - Compass Chest': (0x2e, 0x10),
'Ice Palace - Freezor Chest': (0x7e, 0x10),
'Ice Palace - Big Chest': (0x9e, 0x10),
'Ice Palace - Iced T Room': (0xae, 0x10),
'Ice Palace - Spike Room': (0x5f, 0x10),
'Ice Palace - Big Key Chest': (0x1f, 0x10),
'Ice Palace - Map Chest': (0x3f, 0x10),
'Ice Palace - Boss': (0xde, 0x800),
'Misery Mire - Big Chest': (0xc3, 0x10),
'Misery Mire - Map Chest': (0xc3, 0x20),
'Misery Mire - Main Lobby': (0xc2, 0x10),
'Misery Mire - Bridge Chest': (0xa2, 0x10),
'Misery Mire - Spike Chest': (0xb3, 0x10),
'Misery Mire - Compass Chest': (0xc1, 0x10),
'Misery Mire - Big Key Chest': (0xd1, 0x10),
'Misery Mire - Boss': (0x90, 0x800),
'Turtle Rock - Compass Chest': (0xd6, 0x10),
'Turtle Rock - Roller Room - Left': (0xb7, 0x10),
'Turtle Rock - Roller Room - Right': (0xb7, 0x20),
'Turtle Rock - Chain Chomps': (0xb6, 0x10),
'Turtle Rock - Big Key Chest': (0x14, 0x10),
'Turtle Rock - Big Chest': (0x24, 0x10),
'Turtle Rock - Crystaroller Room': (0x4, 0x10),
'Turtle Rock - Eye Bridge - Bottom Left': (0xd5, 0x80),
'Turtle Rock - Eye Bridge - Bottom Right': (0xd5, 0x40),
'Turtle Rock - Eye Bridge - Top Left': (0xd5, 0x20),
'Turtle Rock - Eye Bridge - Top Right': (0xd5, 0x10),
'Turtle Rock - Boss': (0xa4, 0x800),
'Palace of Darkness - Shooter Room': (0x9, 0x10),
'Palace of Darkness - The Arena - Bridge': (0x2a, 0x20),
'Palace of Darkness - Stalfos Basement': (0xa, 0x10),
'Palace of Darkness - Big Key Chest': (0x3a, 0x10),
'Palace of Darkness - The Arena - Ledge': (0x2a, 0x10),
'Palace of Darkness - Map Chest': (0x2b, 0x10),
'Palace of Darkness - Compass Chest': (0x1a, 0x20),
'Palace of Darkness - Dark Basement - Left': (0x6a, 0x10),
'Palace of Darkness - Dark Basement - Right': (0x6a, 0x20),
'Palace of Darkness - Dark Maze - Top': (0x19, 0x10),
'Palace of Darkness - Dark Maze - Bottom': (0x19, 0x20),
'Palace of Darkness - Big Chest': (0x1a, 0x10),
'Palace of Darkness - Harmless Hellway': (0x1a, 0x40),
'Palace of Darkness - Boss': (0x5a, 0x800),
"Ganons Tower - Bob's Torch": (0x8c, 0x400),
'Ganons Tower - Hope Room - Left': (0x8c, 0x20),
'Ganons Tower - Hope Room - Right': (0x8c, 0x40),
'Ganons Tower - Tile Room': (0x8d, 0x10),
'Ganons Tower - Compass Room - Top Left': (0x9d, 0x10),
'Ganons Tower - Compass Room - Top Right': (0x9d, 0x20),
'Ganons Tower - Compass Room - Bottom Left': (0x9d, 0x40),
'Ganons Tower - Compass Room - Bottom Right': (0x9d, 0x80),
'Ganons Tower - DMs Room - Top Left': (0x7b, 0x10),
'Ganons Tower - DMs Room - Top Right': (0x7b, 0x20),
'Ganons Tower - DMs Room - Bottom Left': (0x7b, 0x40),
'Ganons Tower - DMs Room - Bottom Right': (0x7b, 0x80),
'Ganons Tower - Map Chest': (0x8b, 0x10),
'Ganons Tower - Firesnake Room': (0x7d, 0x10),
'Ganons Tower - Randomizer Room - Top Left': (0x7c, 0x10),
'Ganons Tower - Randomizer Room - Top Right': (0x7c, 0x20),
'Ganons Tower - Randomizer Room - Bottom Left': (0x7c, 0x40),
'Ganons Tower - Randomizer Room - Bottom Right': (0x7c, 0x80),
"Ganons Tower - Bob's Chest": (0x8c, 0x80),
'Ganons Tower - Big Chest': (0x8c, 0x10),
'Ganons Tower - Big Key Room - Left': (0x1c, 0x20),
'Ganons Tower - Big Key Room - Right': (0x1c, 0x40),
'Ganons Tower - Big Key Chest': (0x1c, 0x10),
'Ganons Tower - Mini Helmasaur Room - Left': (0x3d, 0x10),
'Ganons Tower - Mini Helmasaur Room - Right': (0x3d, 0x20),
'Ganons Tower - Pre-Moldorm Chest': (0x3d, 0x40),
'Ganons Tower - Validation Chest': (0x4d, 0x10)}
location_table_npc = {'Mushroom': 0x1000,
'King Zora': 0x2,
'Sahasrahla': 0x10,
'Blacksmith': 0x400,
'Magic Bat': 0x8000,
'Sick Kid': 0x4,
'Library': 0x80,
'Potion Shop': 0x2000,
'Old Man': 0x1,
'Ether Tablet': 0x100,
'Catfish': 0x20,
'Stumpy': 0x8,
'Bombos Tablet': 0x200}
location_table_ow = {'Flute Spot': 0x2a,
'Sunken Treasure': 0x3b,
"Zora's Ledge": 0x81,
'Lake Hylia Island': 0x35,
'Maze Race': 0x28,
'Desert Ledge': 0x30,
'Master Sword Pedestal': 0x80,
'Spectacle Rock': 0x3,
'Pyramid': 0x5b,
'Digging Game': 0x68,
'Bumper Cave Ledge': 0x4a,
'Floating Island': 0x5}
location_table_misc = {'Bottle Merchant': (0x3c9, 0x2),
'Purple Chest': (0x3c9, 0x10),
"Link's Uncle": (0x3c6, 0x1),
'Hobo': (0x3c9, 0x1)}
SNES_DISCONNECTED = 0
SNES_CONNECTING = 1
SNES_CONNECTED = 2
SNES_ATTACHED = 3
async def snes_connect(ctx : Context, address):
if ctx.snes_socket is not None:
logging.error('Already connected to snes')
return
ctx.snes_state = SNES_CONNECTING
recv_task = None
address = f"ws://{address}" if "://" not in address else address
logging.info("Connecting to QUsb2snes at %s ..." % address)
seen_problems = set()
while ctx.snes_state == SNES_CONNECTING:
try:
ctx.snes_socket = await websockets.connect(address, ping_timeout=None, ping_interval=None)
except Exception as e:
problem = "%s" % e
# only tell the user about new problems, otherwise silently lay in wait for a working connection
if problem not in seen_problems:
seen_problems.add(problem)
logging.error(f"Error connecting to QUsb2snes ({problem})")
if len(seen_problems) == 1:
# this is the first problem. Let's try launching QUsb2snes if it isn't already running
qusb2snes_path = Utils.get_options()["general_options"]["qusb2snes"]
import os
if not os.path.isfile(qusb2snes_path):
qusb2snes_path = Utils.local_path(qusb2snes_path)
if os.path.isfile(qusb2snes_path):
logging.info(f"Attempting to start {qusb2snes_path}")
import subprocess
subprocess.Popen(qusb2snes_path, cwd=os.path.dirname(qusb2snes_path))
else:
logging.info(
f"Attempt to start (Q)Usb2Snes was aborted as path {qusb2snes_path} was not found, please start it yourself if it is not running")
await asyncio.sleep(1)
else:
ctx.snes_state = SNES_CONNECTED
try:
DeviceList_Request = {
"Opcode": "DeviceList",
"Space": "SNES"
}
await ctx.snes_socket.send(json.dumps(DeviceList_Request))
reply = json.loads(await ctx.snes_socket.recv())
devices = reply['Results'] if 'Results' in reply and len(reply['Results']) > 0 else None
if not devices:
logging.info('No device found, waiting for device. Run multibridge and connect it to QUSB2SNES.')
while not devices:
await asyncio.sleep(1)
await ctx.snes_socket.send(json.dumps(DeviceList_Request))
reply = json.loads(await ctx.snes_socket.recv())
devices = reply['Results'] if 'Results' in reply and len(reply['Results']) > 0 else None
logging.info("Available devices:")
for id, device in enumerate(devices):
logging.info("[%d] %s" % (id + 1, device))
device = None
if len(devices) == 1:
device = devices[0]
elif ctx.snes_reconnect_address:
if ctx.snes_attached_device[1] in devices:
device = ctx.snes_attached_device[1]
else:
device = devices[ctx.snes_attached_device[0]]
else:
while True:
logging.info("Select a device:")
choice = await console_input(ctx)
if choice is None:
raise Exception('Abort input')
if not choice.isdigit() or int(choice) < 1 or int(choice) > len(devices):
logging.warning("Invalid choice (%s)" % choice)
continue
device = devices[int(choice) - 1]
break
logging.info("Attaching to " + device)
Attach_Request = {
"Opcode" : "Attach",
"Space" : "SNES",
"Operands" : [device]
}
await ctx.snes_socket.send(json.dumps(Attach_Request))
ctx.snes_state = SNES_ATTACHED
ctx.snes_attached_device = (devices.index(device), device)
if 'SD2SNES'.lower() in device.lower() or (len(device) == 4 and device[:3] == 'COM'):
logging.info("SD2SNES Detected")
ctx.is_sd2snes = True
await ctx.snes_socket.send(json.dumps({"Opcode" : "Info", "Space" : "SNES"}))
reply = json.loads(await ctx.snes_socket.recv())
if reply and 'Results' in reply:
logging.info(reply['Results'])
else:
ctx.is_sd2snes = False
ctx.snes_reconnect_address = address
recv_task = asyncio.create_task(snes_recv_loop(ctx))
except Exception as e:
if recv_task is not None:
if not ctx.snes_socket.closed:
await ctx.snes_socket.close()
else:
if ctx.snes_socket is not None:
if not ctx.snes_socket.closed:
await ctx.snes_socket.close()
ctx.snes_socket = None
ctx.snes_state = SNES_DISCONNECTED
if not ctx.snes_reconnect_address:
logging.error("Error connecting to snes (%s)" % e)
else:
logging.error(f"Error connecting to snes, attempt again in {RECONNECT_DELAY}s")
asyncio.create_task(snes_autoreconnect(ctx))
async def snes_autoreconnect(ctx: Context):
# unfortunately currently broken. See: https://github.com/prompt-toolkit/python-prompt-toolkit/issues/1033
# with prompt_toolkit.shortcuts.ProgressBar() as pb:
# for _ in pb(range(100)):
# await asyncio.sleep(RECONNECT_DELAY/100)
await asyncio.sleep(RECONNECT_DELAY)
if ctx.snes_reconnect_address and ctx.snes_socket is None:
await snes_connect(ctx, ctx.snes_reconnect_address)
async def snes_recv_loop(ctx : Context):
try:
async for msg in ctx.snes_socket:
ctx.snes_recv_queue.put_nowait(msg)
logging.warning("Snes disconnected")
except Exception as e:
if not isinstance(e, websockets.WebSocketException):
logging.exception(e)
logging.error("Lost connection to the snes, type /snes to reconnect")
finally:
socket, ctx.snes_socket = ctx.snes_socket, None
if socket is not None and not socket.closed:
await socket.close()
ctx.snes_state = SNES_DISCONNECTED
ctx.snes_recv_queue = asyncio.Queue()
ctx.hud_message_queue = []
ctx.rom = None
if ctx.snes_reconnect_address:
logging.info(f"...reconnecting in {RECONNECT_DELAY}s")
asyncio.create_task(snes_autoreconnect(ctx))
async def snes_read(ctx : Context, address, size):
try:
await ctx.snes_request_lock.acquire()
if ctx.snes_state != SNES_ATTACHED or ctx.snes_socket is None or not ctx.snes_socket.open or ctx.snes_socket.closed:
return None
GetAddress_Request = {
"Opcode" : "GetAddress",
"Space" : "SNES",
"Operands" : [hex(address)[2:], hex(size)[2:]]
}
try:
await ctx.snes_socket.send(json.dumps(GetAddress_Request))
except websockets.ConnectionClosed:
return None
data = bytes()
while len(data) < size:
try:
data += await asyncio.wait_for(ctx.snes_recv_queue.get(), 5)
except asyncio.TimeoutError:
break
if len(data) != size:
logging.error('Error reading %s, requested %d bytes, received %d' % (hex(address), size, len(data)))
if len(data):
logging.error(str(data))
if ctx.snes_socket is not None and not ctx.snes_socket.closed:
await ctx.snes_socket.close()
return None
return data
finally:
ctx.snes_request_lock.release()
async def snes_write(ctx : Context, write_list):
try:
await ctx.snes_request_lock.acquire()
if ctx.snes_state != SNES_ATTACHED or ctx.snes_socket is None or not ctx.snes_socket.open or ctx.snes_socket.closed:
return False
PutAddress_Request = {
"Opcode" : "PutAddress",
"Operands" : []
}
if ctx.is_sd2snes:
cmd = b'\x00\xE2\x20\x48\xEB\x48'
for address, data in write_list:
if (address < WRAM_START) or ((address + len(data)) > (WRAM_START + WRAM_SIZE)):
logging.error("SD2SNES: Write out of range %s (%d)" % (hex(address), len(data)))
return False
for ptr, byte in enumerate(data, address + 0x7E0000 - WRAM_START):
cmd += b'\xA9' # LDA
cmd += bytes([byte])
cmd += b'\x8F' # STA.l
cmd += bytes([ptr & 0xFF, (ptr >> 8) & 0xFF, (ptr >> 16) & 0xFF])
cmd += b'\xA9\x00\x8F\x00\x2C\x00\x68\xEB\x68\x28\x6C\xEA\xFF\x08'
PutAddress_Request['Space'] = 'CMD'
PutAddress_Request['Operands'] = ["2C00", hex(len(cmd)-1)[2:], "2C00", "1"]
try:
if ctx.snes_socket is not None:
await ctx.snes_socket.send(json.dumps(PutAddress_Request))
if ctx.snes_socket is not None:
await ctx.snes_socket.send(cmd)
except websockets.ConnectionClosed:
return False
else:
PutAddress_Request['Space'] = 'SNES'
try:
#will pack those requests as soon as qusb2snes actually supports that for real
for address, data in write_list:
PutAddress_Request['Operands'] = [hex(address)[2:], hex(len(data))[2:]]
if ctx.snes_socket is not None:
await ctx.snes_socket.send(json.dumps(PutAddress_Request))
if ctx.snes_socket is not None:
await ctx.snes_socket.send(data)
except websockets.ConnectionClosed:
return False
return True
finally:
ctx.snes_request_lock.release()
def snes_buffered_write(ctx : Context, address, data):
if len(ctx.snes_write_buffer) > 0 and (ctx.snes_write_buffer[-1][0] + len(ctx.snes_write_buffer[-1][1])) == address:
ctx.snes_write_buffer[-1] = (ctx.snes_write_buffer[-1][0], ctx.snes_write_buffer[-1][1] + data)
else:
ctx.snes_write_buffer.append((address, data))
async def snes_flush_writes(ctx : Context):
if not ctx.snes_write_buffer:
return
await snes_write(ctx, ctx.snes_write_buffer)
ctx.snes_write_buffer = []
async def send_msgs(websocket, msgs):
if not websocket or not websocket.open or websocket.closed:
return
await websocket.send(json.dumps(msgs))
async def server_loop(ctx : Context, address = None):
if ctx.socket is not None:
logging.error('Already connected')
return
if address is None:
address = ctx.server_address
while not address:
logging.info('Enter multiworld server address')
address = await console_input(ctx)
address = f"ws://{address}" if "://" not in address else address
port = urllib.parse.urlparse(address).port or 38281
logging.info('Connecting to multiworld server at %s' % address)
try:
ctx.socket = await websockets.connect(address, port=port, ping_timeout=None, ping_interval=None)
logging.info('Connected')
ctx.server_address = address
async for data in ctx.socket:
for msg in json.loads(data):
cmd, args = (msg[0], msg[1]) if len(msg) > 1 else (msg, None)
await process_server_cmd(ctx, cmd, args)
logging.warning('Disconnected from multiworld server, type /connect to reconnect')
except ConnectionRefusedError:
logging.error('Connection refused by the multiworld server')
except (OSError, websockets.InvalidURI):
logging.error('Failed to connect to the multiworld server')
except Exception as e:
logging.error('Lost connection to the multiworld server, type /connect to reconnect')
if not isinstance(e, websockets.WebSocketException):
logging.exception(e)
finally:
ctx.awaiting_rom = False
ctx.auth = None
ctx.items_received = []
ctx.locations_info = {}
socket, ctx.socket = ctx.socket, None
if socket is not None and not socket.closed:
await socket.close()
ctx.server_task = None
if ctx.server_address:
logging.info(f"... reconnecting in {RECONNECT_DELAY}s")
asyncio.create_task(server_autoreconnect(ctx))
async def server_autoreconnect(ctx: Context):
# unfortunately currently broken. See: https://github.com/prompt-toolkit/python-prompt-toolkit/issues/1033
# with prompt_toolkit.shortcuts.ProgressBar() as pb:
# for _ in pb(range(100)):
# await asyncio.sleep(RECONNECT_DELAY/100)
await asyncio.sleep(RECONNECT_DELAY)
if ctx.server_address and ctx.server_task is None:
ctx.server_task = asyncio.create_task(server_loop(ctx))
async def process_server_cmd(ctx : Context, cmd, args):
if cmd == 'RoomInfo':
logging.info('--------------------------------')
logging.info('Room Information:')
logging.info('--------------------------------')
version = args.get("version", "unknown Bonta Protocol")
if isinstance(version, str):
version = ".".join(str(item) for item in version)
logging.info(f'Server protocol version: {version}')
if "tags" in args:
logging.info("Server protocol tags: " + ", ".join(args["tags"]))
if args['password']:
logging.info('Password required')
if len(args['players']) < 1:
logging.info('No player connected')
else:
args['players'].sort()
current_team = -1
logging.info('Connected players:')
for team, slot, name in args['players']:
if team != current_team:
logging.info(f' Team #{team + 1}')
current_team = team
logging.info(' %s (Player %d)' % (name, slot))
await server_auth(ctx, args['password'])
elif cmd == 'ConnectionRefused':
if 'InvalidPassword' in args:
logging.error('Invalid password')
ctx.password = None
await server_auth(ctx, True)
if 'InvalidRom' in args:
if ctx.snes_socket is not None and not ctx.snes_socket.closed:
asyncio.create_task(ctx.snes_socket.close())
raise Exception(
'Invalid ROM detected, please verify that you have loaded the correct rom and reconnect your snes (/snes)')
if 'SlotAlreadyTaken' in args:
raise Exception('Player slot already in use for that team')
if 'IncompatibleVersion' in args:
raise Exception('Server reported your client version as incompatible')
raise Exception('Connection refused by the multiworld host')
elif cmd == 'Connected':
ctx.team, ctx.slot = args[0]
ctx.player_names = {p: n for p, n in args[1]}
msgs = []
if ctx.locations_checked:
msgs.append(['LocationChecks', [Regions.location_table[loc][0] for loc in ctx.locations_checked]])
if ctx.locations_scouted:
msgs.append(['LocationScouts', list(ctx.locations_scouted)])
if msgs:
await send_msgs(ctx.socket, msgs)
elif cmd == 'ReceivedItems':
start_index, items = args
if start_index == 0:
ctx.items_received = []
elif start_index != len(ctx.items_received):
sync_msg = [['Sync']]
if ctx.locations_checked:
sync_msg.append(['LocationChecks', [Regions.location_table[loc][0] for loc in ctx.locations_checked]])
await send_msgs(ctx.socket, sync_msg)
if start_index == len(ctx.items_received):
for item in items:
ctx.items_received.append(ReceivedItem(*item))
ctx.watcher_event.set()
elif cmd == 'LocationInfo':
for location, item, player in args:
if location not in ctx.locations_info:
replacements = {0xA2: 'Small Key', 0x9D: 'Big Key', 0x8D: 'Compass', 0x7D: 'Map'}
item_name = replacements.get(item, get_item_name_from_id(item))
logging.info(
f"Saw {color(item_name, 'red', 'bold')} at {list(Regions.location_table.keys())[location - 1]}")
ctx.locations_info[location] = (item, player)
ctx.watcher_event.set()
elif cmd == 'ItemSent':
player_sent, location, player_recvd, item = args
item = color(get_item_name_from_id(item), 'cyan' if player_sent != ctx.slot else 'green')
player_sent = color(ctx.player_names[player_sent], 'yellow' if player_sent != ctx.slot else 'magenta')
player_recvd = color(ctx.player_names[player_recvd], 'yellow' if player_recvd != ctx.slot else 'magenta')
logging.info(
'%s sent %s to %s (%s)' % (player_sent, item, player_recvd, get_location_name_from_address(location)))
elif cmd == 'ItemFound':
found = ReceivedItem(*args)
item = color(get_item_name_from_id(found.item), 'cyan' if found.player != ctx.slot else 'green')
player_sent = color(ctx.player_names[found.player], 'yellow' if found.player != ctx.slot else 'magenta')
logging.info('%s found %s (%s)' % (player_sent, item, get_location_name_from_address(found.location)))
elif cmd == 'Hint':
hints = [Utils.Hint(*hint) for hint in args]
for hint in hints:
item = color(get_item_name_from_id(hint.item), 'green' if hint.found else 'cyan')
player_find = color(ctx.player_names[hint.finding_player],
'yellow' if hint.finding_player != ctx.slot else 'magenta')
player_recvd = color(ctx.player_names[hint.receiving_player],
'yellow' if hint.receiving_player != ctx.slot else 'magenta')
logging.info(f"[Hint]: {player_recvd}'s {item} can be found "
f"at {get_location_name_from_address(hint.location)} in {player_find}'s World." +
(" (found)" if hint.found else ""))
elif cmd == 'Print':
logging.info(args)
def get_tags(ctx: Context):
tags = ['Berserker']
if ctx.found_items:
tags.append('FoundItems')
return tags
async def server_auth(ctx: Context, password_requested):
if password_requested and not ctx.password:
logging.info('Enter the password required to join this game:')
ctx.password = await console_input(ctx)
if ctx.rom is None:
ctx.awaiting_rom = True
logging.info('No ROM detected, awaiting snes connection to authenticate to the multiworld server (/snes)')
return
ctx.awaiting_rom = False
ctx.auth = ctx.rom.copy()
await send_msgs(ctx.socket, [['Connect', {
'password': ctx.password, 'rom': ctx.auth, 'version': Utils._version_tuple, 'tags': get_tags(ctx)
}]])
async def console_input(ctx : Context):
ctx.input_requests += 1
return await ctx.input_queue.get()
async def disconnect(ctx: Context):
if ctx.socket is not None and not ctx.socket.closed:
await ctx.socket.close()
if ctx.server_task is not None:
await ctx.server_task
async def connect(ctx: Context, address=None):
await disconnect(ctx)
ctx.server_task = asyncio.create_task(server_loop(ctx, address))
from MultiServer import CommandProcessor
class ClientCommandProcessor(CommandProcessor):
def __init__(self, ctx: Context):
self.ctx = ctx
def _cmd_exit(self) -> bool:
"""Close connections and client"""
self.ctx.exit_event.set()
return True
def _cmd_snes(self, snes_address: str = "") -> bool:
"""Connect to a snes. Optionally include network address of a snes to connect to, otherwise show available devices"""
self.ctx.snes_reconnect_address = None
asyncio.create_task(snes_connect(self.ctx, snes_address if snes_address else self.ctx.snes_address))
return True
def _cmd_snes_close(self) -> bool:
"""Close connection to a currently connected snes"""
self.ctx.snes_reconnect_address = None
if self.ctx.snes_socket is not None and not self.ctx.snes_socket.closed:
asyncio.create_task(self.ctx.snes_socket.close())
return True
else:
return False
def _cmd_connect(self, address: str = "") -> bool:
"""Connect to a MultiWorld Server"""
self.ctx.server_address = None
asyncio.create_task(connect(self.ctx, address if address else None))
return True
def _cmd_disconnect(self) -> bool:
"""Disconnect from a MultiWorld Server"""
self.ctx.server_address = None
asyncio.create_task(disconnect(self.ctx))
return True
def _cmd_received(self) -> bool:
"""List all received items"""
logging.info('Received items:')
for index, item in enumerate(self.ctx.items_received, 1):
logging.info('%s from %s (%s) (%d/%d in list)' % (
color(get_item_name_from_id(item.item), 'red', 'bold'),
color(self.ctx.player_names[item.player], 'yellow'),
get_location_name_from_address(item.location), index, len(self.ctx.items_received)))
return True
def _cmd_missing(self) -> bool:
"""List all missing location checks, from your local game state"""
count = 0
for location in [k for k, v in Regions.location_table.items() if type(v[0]) is int]:
if location not in self.ctx.locations_checked:
self.output('Missing: ' + location)
count += 1
if count:
self.output(f"Found {count} missing location checks")
else:
self.output("No missing location checks found.")
return True
def _cmd_show_items(self, toggle: str = "") -> bool:
"""Toggle showing of items received across the team"""
if toggle:
self.ctx.found_items = toggle.lower() in {"1", "true", "on"}
else:
self.ctx.found_items = not self.ctx.found_items
logging.info(f"Set showing team items to {self.ctx.found_items}")
asyncio.create_task(send_msgs(self.ctx.socket, [['UpdateTags', get_tags(self.ctx)]]))
return True
def default(self, raw: str):
asyncio.create_task(send_msgs(self.ctx.socket, [['Say', raw]]))
async def console_loop(ctx: Context):
session = prompt_toolkit.PromptSession()
commandprocessor = ClientCommandProcessor(ctx)
while not ctx.exit_event.is_set():
try:
with patch_stdout():
input_text = await session.prompt_async()
if ctx.input_requests > 0:
ctx.input_requests -= 1
ctx.input_queue.put_nowait(input_text)
continue
if not input_text:
continue
commandprocessor(input_text)
except Exception as e:
logging.exception(e)
await snes_flush_writes(ctx)
async def track_locations(ctx : Context, roomid, roomdata):
new_locations = []
def new_check(location):
ctx.locations_checked.add(location)
logging.info("New check: %s (%d/216)" % (location, len(ctx.locations_checked)))
new_locations.append(Regions.location_table[location][0])
for location, (loc_roomid, loc_mask) in location_table_uw.items():
if location not in ctx.locations_checked and loc_roomid == roomid and (roomdata << 4) & loc_mask != 0:
new_check(location)
uw_begin = 0x129
uw_end = 0
uw_unchecked = {}
for location, (roomid, mask) in location_table_uw.items():
if location not in ctx.locations_checked:
uw_unchecked[location] = (roomid, mask)
uw_begin = min(uw_begin, roomid)
uw_end = max(uw_end, roomid + 1)
if uw_begin < uw_end:
uw_data = await snes_read(ctx, SAVEDATA_START + (uw_begin * 2), (uw_end - uw_begin) * 2)
if uw_data is not None:
for location, (roomid, mask) in uw_unchecked.items():
offset = (roomid - uw_begin) * 2
roomdata = uw_data[offset] | (uw_data[offset + 1] << 8)
if roomdata & mask != 0:
new_check(location)
ow_begin = 0x82
ow_end = 0
ow_unchecked = {}
for location, screenid in location_table_ow.items():
if location not in ctx.locations_checked:
ow_unchecked[location] = screenid
ow_begin = min(ow_begin, screenid)
ow_end = max(ow_end, screenid + 1)
if ow_begin < ow_end:
ow_data = await snes_read(ctx, SAVEDATA_START + 0x280 + ow_begin, ow_end - ow_begin)
if ow_data is not None:
for location, screenid in ow_unchecked.items():
if ow_data[screenid - ow_begin] & 0x40 != 0:
new_check(location)
if not all([location in ctx.locations_checked for location in location_table_npc.keys()]):
npc_data = await snes_read(ctx, SAVEDATA_START + 0x410, 2)
if npc_data is not None:
npc_value = npc_data[0] | (npc_data[1] << 8)
for location, mask in location_table_npc.items():
if npc_value & mask != 0 and location not in ctx.locations_checked:
new_check(location)
if not all([location in ctx.locations_checked for location in location_table_misc.keys()]):
misc_data = await snes_read(ctx, SAVEDATA_START + 0x3c6, 4)
if misc_data is not None:
for location, (offset, mask) in location_table_misc.items():
assert(0x3c6 <= offset <= 0x3c9)
if misc_data[offset - 0x3c6] & mask != 0 and location not in ctx.locations_checked:
new_check(location)
await send_msgs(ctx.socket, [['LocationChecks', new_locations]])
async def game_watcher(ctx : Context):
while not ctx.exit_event.is_set():
try:
await asyncio.wait_for(ctx.watcher_event.wait(), 2)
except asyncio.TimeoutError:
pass
ctx.watcher_event.clear()
if not ctx.rom:
rom = await snes_read(ctx, ROMNAME_START, ROMNAME_SIZE)
if rom is None or rom == bytes([0] * ROMNAME_SIZE):
continue
ctx.rom = list(rom)
ctx.locations_checked = set()
ctx.locations_scouted = set()
if ctx.awaiting_rom:
await server_auth(ctx, False)
if ctx.auth and ctx.auth != ctx.rom:
logging.warning("ROM change detected, please reconnect to the multiworld server")
await disconnect(ctx)
gamemode = await snes_read(ctx, WRAM_START + 0x10, 1)
if gamemode is None or gamemode[0] not in INGAME_MODES:
continue
data = await snes_read(ctx, RECV_PROGRESS_ADDR, 8)
if data is None:
continue
recv_index = data[0] | (data[1] << 8)
assert RECV_ITEM_ADDR == RECV_PROGRESS_ADDR + 2
recv_item = data[2]
assert ROOMID_ADDR == RECV_PROGRESS_ADDR + 4
roomid = data[4] | (data[5] << 8)
assert ROOMDATA_ADDR == RECV_PROGRESS_ADDR + 6
roomdata = data[6]
assert SCOUT_LOCATION_ADDR == RECV_PROGRESS_ADDR + 7
scout_location = data[7]
if recv_index < len(ctx.items_received) and recv_item == 0:
item = ctx.items_received[recv_index]
logging.info('Received %s from %s (%s) (%d/%d in list)' % (
color(get_item_name_from_id(item.item), 'red', 'bold'), color(ctx.player_names[item.player], 'yellow'),
get_location_name_from_address(item.location), recv_index + 1, len(ctx.items_received)))
recv_index += 1
snes_buffered_write(ctx, RECV_PROGRESS_ADDR, bytes([recv_index & 0xFF, (recv_index >> 8) & 0xFF]))
snes_buffered_write(ctx, RECV_ITEM_ADDR, bytes([item.item]))
snes_buffered_write(ctx, RECV_ITEM_PLAYER_ADDR, bytes([item.player if item.player != ctx.slot else 0]))
if scout_location > 0 and scout_location in ctx.locations_info:
snes_buffered_write(ctx, SCOUTREPLY_LOCATION_ADDR, bytes([scout_location]))
snes_buffered_write(ctx, SCOUTREPLY_ITEM_ADDR, bytes([ctx.locations_info[scout_location][0]]))
snes_buffered_write(ctx, SCOUTREPLY_PLAYER_ADDR, bytes([ctx.locations_info[scout_location][1]]))
await snes_flush_writes(ctx)
if scout_location > 0 and scout_location not in ctx.locations_scouted:
ctx.locations_scouted.add(scout_location)
logging.info(f'Scouting item at {list(Regions.location_table.keys())[scout_location - 1]}')
await send_msgs(ctx.socket, [['LocationScouts', [scout_location]]])
await track_locations(ctx, roomid, roomdata)
async def run_game(romfile):
import webbrowser
webbrowser.open(romfile)
async def main():
parser = argparse.ArgumentParser()
parser.add_argument('diff_file', default="", type=str, nargs="?",
help='Path to a Berserker Multiworld Binary Patch file')
parser.add_argument('--snes', default='localhost:8080', help='Address of the QUsb2snes server.')
parser.add_argument('--connect', default=None, help='Address of the multiworld host.')
parser.add_argument('--password', default=None, help='Password of the multiworld host.')
parser.add_argument('--loglevel', default='info', choices=['debug', 'info', 'warning', 'error', 'critical'])
parser.add_argument('--founditems', default=False, action='store_true', help='Show items found by other players for themselves.')
args = parser.parse_args()
logging.basicConfig(format='%(message)s', level=getattr(logging, args.loglevel.upper(), logging.INFO))
if args.diff_file:
import Patch
meta, romfile = Patch.create_rom_file(args.diff_file)
args.connect = meta["server"]
logging.info(f"Wrote rom file to {romfile}")
asyncio.create_task(run_game(romfile))
ctx = Context(args.snes, args.connect, args.password, args.founditems)
input_task = asyncio.create_task(console_loop(ctx))
await snes_connect(ctx, ctx.snes_address)
if ctx.server_task is None:
ctx.server_task = asyncio.create_task(server_loop(ctx))
watcher_task = asyncio.create_task(game_watcher(ctx))
await ctx.exit_event.wait()
ctx.server_address = None
ctx.snes_reconnect_address = None
await watcher_task
if ctx.socket is not None and not ctx.socket.closed:
await ctx.socket.close()
if ctx.server_task is not None:
await ctx.server_task
if ctx.snes_socket is not None and not ctx.snes_socket.closed:
await ctx.snes_socket.close()
while ctx.input_requests > 0:
ctx.input_queue.put_nowait(None)
ctx.input_requests -= 1
await input_task
if __name__ == '__main__':
colorama.init()
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
loop.close()
colorama.deinit()
atexit.unregister(exit_func)
| 44.539179
| 158
| 0.567105
|
4a12368c1adcb4426ca2ec30c6e19733fa328fe0
| 1,182
|
py
|
Python
|
jplib/mouse/mouse.py
|
jabbalaci/jabbapylib3
|
ddc8fe88b89c4379254183b9a7c1405574a3a262
|
[
"MIT"
] | 6
|
2017-03-31T16:58:52.000Z
|
2019-05-11T20:12:07.000Z
|
jplib/mouse/mouse.py
|
jabbalaci/jabbapylib3
|
ddc8fe88b89c4379254183b9a7c1405574a3a262
|
[
"MIT"
] | null | null | null |
jplib/mouse/mouse.py
|
jabbalaci/jabbapylib3
|
ddc8fe88b89c4379254183b9a7c1405574a3a262
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""
Mouse actions.
What we need is:
* move the mouse pointer to a given position
* perform a simple left click
# from jplib.mouse import mouse
"""
import time
import autopy3 as ap
from autopy3.mouse import LEFT_BUTTON
#def left_down():
# ap.mouse.toggle(True, LEFT_BUTTON)
# time.sleep(.1)
# print '# left down'
#
#
#def left_up():
# ap.mouse.toggle(False, LEFT_BUTTON)
# time.sleep(.1)
# print '# left release'
def left_click():
"""Perform a left mouse click."""
ap.mouse.click(LEFT_BUTTON)
time.sleep(.1)
# print "# click"
def move_xy(x, y):
"""Move the mouse pointer to the given position."""
ap.mouse.move(x, y)
def move_to(pos):
"""
Move the mouse pointer to the given position.
pos is a tuple
"""
move_xy(pos[0], pos[1])
def click_to(pos):
"""
Perform a left mouse click at the given position.
pos is a tuple
"""
move_to(pos)
left_click()
def get_pos():
return ap.mouse.get_pos()
#############################################################################
if __name__ == "__main__":
time.sleep(3)
print('# absolute pos:', get_pos())
| 16.885714
| 77
| 0.586294
|
4a1236979572a179e90b8e1f1bff1532e650c172
| 3,843
|
py
|
Python
|
sdk/python/pulumi_azure_nextgen/web/v20190801/list_web_app_azure_storage_accounts_slot.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_nextgen/web/v20190801/list_web_app_azure_storage_accounts_slot.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_nextgen/web/v20190801/list_web_app_azure_storage_accounts_slot.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'ListWebAppAzureStorageAccountsSlotResult',
'AwaitableListWebAppAzureStorageAccountsSlotResult',
'list_web_app_azure_storage_accounts_slot',
]
@pulumi.output_type
class ListWebAppAzureStorageAccountsSlotResult:
"""
AzureStorageInfo dictionary resource.
"""
def __init__(__self__, kind=None, name=None, properties=None, type=None):
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def kind(self) -> Optional[str]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource Name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> Mapping[str, 'outputs.AzureStorageInfoValueResponseResult']:
"""
Azure storage accounts.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableListWebAppAzureStorageAccountsSlotResult(ListWebAppAzureStorageAccountsSlotResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListWebAppAzureStorageAccountsSlotResult(
kind=self.kind,
name=self.name,
properties=self.properties,
type=self.type)
def list_web_app_azure_storage_accounts_slot(name: Optional[str] = None,
resource_group_name: Optional[str] = None,
slot: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListWebAppAzureStorageAccountsSlotResult:
"""
Use this data source to access information about an existing resource.
:param str name: Name of the app.
:param str resource_group_name: Name of the resource group to which the resource belongs.
:param str slot: Name of the deployment slot. If a slot is not specified, the API will update the Azure storage account configurations for the production slot.
"""
__args__ = dict()
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
__args__['slot'] = slot
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:web/v20190801:listWebAppAzureStorageAccountsSlot', __args__, opts=opts, typ=ListWebAppAzureStorageAccountsSlotResult).value
return AwaitableListWebAppAzureStorageAccountsSlotResult(
kind=__ret__.kind,
name=__ret__.name,
properties=__ret__.properties,
type=__ret__.type)
| 35.583333
| 174
| 0.652095
|
4a1236fce096948079417273c5401b0395888371
| 2,824
|
py
|
Python
|
src/typing_inspect_lib/core/get_typing.py
|
Peilonrayz/typing_inspect_lib
|
a94cc2be6b677edb1fa40f12c36f531bc4bef87f
|
[
"MIT"
] | 7
|
2019-03-07T01:01:24.000Z
|
2019-07-24T18:26:36.000Z
|
src/typing_inspect_lib/core/get_typing.py
|
Peilonrayz/typing_inspect_lib
|
a94cc2be6b677edb1fa40f12c36f531bc4bef87f
|
[
"MIT"
] | null | null | null |
src/typing_inspect_lib/core/get_typing.py
|
Peilonrayz/typing_inspect_lib
|
a94cc2be6b677edb1fa40f12c36f531bc4bef87f
|
[
"MIT"
] | null | null | null |
import types
import typing
from .get_origins import _get_last_origin
from .helpers import (
LITERAL_TYPES, PY_OLD, TYPING_OBJECTS, get_special_wrapped,
safe_dict_get, safe_dict_get_both, typing_,
)
if PY_OLD:
def _get_typing(type_):
"""
Returns the typing type and class type of a wrapped or unwrapped type.
This function doesn't work special types, these require another function to
extract the information correctly. Builtin {literal types, class types,
typing types} all are handled before this function runs.
Example:
_get_typing(Mapping[str, int]) == (Mapping, collections.abc.Mapping)
_get_typing(MyClass) == (MyClass, MyClass)
_get_typing(MyClass[str, int]) == (MyClass, MyClass)
"""
origin = _get_last_origin(type_)
if origin is not None:
return origin, safe_dict_get(TYPING_OBJECTS.typing, origin) or origin
if isinstance(type_, typing.GenericMeta):
return type_, type_
return None
else:
def _get_typing(type_):
origin = getattr(type_, '__origin__', None)
if origin is not None:
return safe_dict_get(TYPING_OBJECTS.class_, origin) or origin, origin
if hasattr(type_, '__orig_bases__'):
return type_, type_
return None
def _get_special_typing_universal(type_):
"""Handles the following types for wrapped types: TypeVar, Protocol, NewType"""
if isinstance(type_, typing.TypeVar):
return typing.TypeVar, type_
if isinstance(type_, typing_.ProtocolMeta):
return typing_.Protocol, type_
if isinstance(type_, types.FunctionType) and hasattr(type_, '__supertype__'):
return typing_.NewType, type_
return None
if PY_OLD:
def _get_special_typing(type_):
"""Handles special types that can't be handled through normal means."""
return get_special_wrapped(type_)
else:
def _get_special_typing(type_):
return None
def get_typing(type_):
"""
Gets the typing type and the class type of the type passed to it.
Examples:
get_typing(Mapping) == (Mapping, collections.abc.Mapping)
get_typing(Mapping[str, int]) == (Mapping, collections.abc.Mapping)
get_typing(Union[str, int]) == (Union, Union)
"""
ret = (
_get_special_typing_universal(type_)
or safe_dict_get(LITERAL_TYPES, type_)
or safe_dict_get_both(TYPING_OBJECTS.class_, type_, inv=True)
or safe_dict_get_both(TYPING_OBJECTS.typing, type_)
or _get_typing(type_)
or _get_special_typing(type_)
)
if ret is None:
return None, None
type_type, class_type = ret
if type_type is typing_.NewType:
class_type = type_
return type_type, class_type
| 32.090909
| 83
| 0.671388
|
4a1237db0ec97960089e3c1ab6c6235ce4326d06
| 15,388
|
py
|
Python
|
sdc11073/pysoap/soapclient.py
|
deichmab-draeger/sdc11073-1
|
2cbd4daaa32dc8a52723ecb8209f39a7d19b3c1b
|
[
"MIT"
] | 18
|
2020-07-19T19:38:15.000Z
|
2022-03-21T11:51:04.000Z
|
sdc11073/pysoap/soapclient.py
|
deichmab-draeger/sdc11073-1
|
2cbd4daaa32dc8a52723ecb8209f39a7d19b3c1b
|
[
"MIT"
] | 10
|
2020-09-25T12:00:36.000Z
|
2021-11-26T10:49:29.000Z
|
sdc11073/pysoap/soapclient.py
|
deichmab-draeger/sdc11073-1
|
2cbd4daaa32dc8a52723ecb8209f39a7d19b3c1b
|
[
"MIT"
] | 10
|
2020-08-31T17:44:51.000Z
|
2021-07-13T12:58:18.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Pythonic simple SOAP Client implementation
Using lxml based SoapEnvelope."""
import sys
import traceback
from threading import Lock
import socket
import time
import http.client as httplib
from lxml.etree import XMLSyntaxError
from .. import observableproperties
from .. import commlog
from ..compression import CompressionHandler
from . import soapenvelope
from ..httprequesthandler import HTTPReader, mkchunks
class HTTPConnection_NODELAY(httplib.HTTPConnection):
def connect(self):
httplib.HTTPConnection.connect(self)
self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, True)
class HTTPSConnection_NODELAY(httplib.HTTPSConnection):
def connect(self):
httplib.HTTPSConnection.connect(self)
self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, True)
class HTTPReturnCodeError(httplib.HTTPException):
''' THis class is used to map http return codes to Python exceptions.'''
def __init__(self, status, reason, soapfault):
'''
@param status: integer, e.g. 404
param reason: the provided human readable text
'''
super(HTTPReturnCodeError, self).__init__()
self.status = status
self.reason = reason
self.soapfault = soapfault
def __repr__(self):
if self.soapfault:
return 'HTTPReturnCodeError(status={}, reason={}'.format(self.status, self.soapfault)
else:
return 'HTTPReturnCodeError(status={}, reason={}'.format(self.status, self.reason)
class SoapClient(CompressionHandler):
_usedSoapClients = 0
SOCKET_TIMEOUT = 10 if sys.gettrace() is None else 1000 # higher timeout for debugging
"""SOAP Client"""
roundtrip_time = observableproperties.ObservableProperty()
def __init__(self, netloc, logger, sslContext, sdc_definitions, supportedEncodings=None,
requestEncodings=None, chunked_requests=False):
''' Connects to one url
@param netloc: the location of the service (domainname:port) ###url of the service
@param sslContext: an optional sll.SSLContext instance
@param bicepsSchema:
@param supportedEncodings: configured set of encodings that can be used. If None, all available encodings are used.
This used for decompression of received responses.
If this is an empty list, no compression is supported.
@param requestEncodings: an optional list of encodings that the other side accepts. It is used to compress requests.
If not set, requests will not be commpressed.
If set, then the http request will be compressed using this method
'''
self._log = logger
self._sslContext = sslContext
self._sdc_definitions = sdc_definitions
self._netloc = netloc
self._httpConnection = None # connect later on demand
self.__class__._usedSoapClients += 1 #pylint: disable=protected-access
self._clientNo = self.__class__._usedSoapClients #pylint: disable=protected-access
self._log.info('created soapClient No. {} for {}', self._clientNo, netloc)
self.supportedEncodings = supportedEncodings if supportedEncodings is not None else self.available_encodings
self.requestEncodings = requestEncodings if requestEncodings is not None else [] # these compression alg's does the other side accept ( set at runtime)
self._makeGetHeaders()
self._lock = Lock()
self._chunked_requests = chunked_requests
@property
def netloc(self):
return self._netloc
@property
def sock(self):
return None if self._httpConnection is None else self._httpConnection.sock
def _mkHttpConnection(self):
''' Soap client never sends very large requests, the largest packages are notifications.
Therefore we can use TCP_NODELAY for a little faster transmission.
(Otherwise there would be a chance that receivers windows size decreases, which would result in smaller
packages and therefore higher network load.'''
if self._sslContext is not None:
conn = HTTPSConnection_NODELAY(self._netloc, context=self._sslContext, timeout=self.SOCKET_TIMEOUT)
else:
conn = HTTPConnection_NODELAY(self._netloc, timeout=self.SOCKET_TIMEOUT)
return conn
def connect(self):
self._httpConnection = self._mkHttpConnection()
self._httpConnection.connect() # connect now so that we have own address and port for logging
my_addr = self._httpConnection.sock.getsockname()
self._log.info('soapClient No. {} uses connection={}:{}', self._clientNo, my_addr[0], my_addr[1])
def close(self):
with self._lock:
if self._httpConnection is not None:
self._log.info('closing soapClientNo {} for {}', self._clientNo, self._netloc)
self._httpConnection.close()
self._httpConnection = None
def isClosed(self):
return self._httpConnection is None
def postSoapEnvelopeTo(self, path, soapEnvelopeRequest, responseFactory=None, schema=None, msg='',
request_manipulator=None):
'''
@param path: url path component
@param soapEnvelopeRequest: The soap envelope that shall be sent
@param responseFactory: a callable that creates a response object from received xml. If None, a ReceivedSoap12Envelope will be created
@param schema: If given, the request is validated against this schema
@param msg: used in logs, helps to identify the context in which the method was called
'''
if self.isClosed():
self.connect()
return self.__postSoapEnvelope(soapEnvelopeRequest, responseFactory, schema, path, msg, request_manipulator)
def __postSoapEnvelope(self, soapEnvelopeRequest, responseFactory, schema, path, msg, request_manipulator):
if schema is not None:
soapEnvelopeRequest.validateBody(schema)
if hasattr(request_manipulator, 'manipulate_soapenvelope'):
tmp = request_manipulator.manipulate_soapenvelope(soapEnvelopeRequest)
if tmp:
soapEnvelopeRequest = tmp
normalized_xml_request = soapEnvelopeRequest.as_xml(request_manipulator=request_manipulator)
xml_request = self._sdc_definitions.denormalizeXMLText(normalized_xml_request)
assert (b'utf-8' in xml_request[:100].lower()) # MDPWS:R0007 A text SOAP envelope shall be serialized using utf-8 character encoding
if hasattr(request_manipulator, 'manipulate_string'):
tmp = request_manipulator.manipulate_string(xml_request)
if tmp:
xml_request = tmp
started = time.perf_counter()
try:
xml_response = self._sendSoapRequest(path, xml_request, msg)
finally:
self.roundtrip_time = time.perf_counter() - started # set roundtrip time even if method raises an exception
normalized_xml_response = self._sdc_definitions.normalizeXMLText(xml_response)
my_responseFactory = responseFactory or soapenvelope.ReceivedSoap12Envelope.fromXMLString
try:
return my_responseFactory(normalized_xml_response, schema)
except XMLSyntaxError as ex:
self._log.error('{} XMLSyntaxError in string: "{}"', msg, normalized_xml_response)
raise RuntimeError('{} in "{}"'.format(ex, normalized_xml_response))
def _sendSoapRequest(self, path, xml, msg):
"""Send SOAP request using HTTP"""
if not isinstance(xml, bytes):
xml = xml.encode('utf-8')
headers = {
'Content-type': 'application/soap+xml; charset=utf-8',
'user_agent': 'pysoap',
'Connection': 'keep-alive',
}
commlog.defaultLogger.logSoapReqOut(xml, 'POST')
if self.supportedEncodings:
headers['Accept-Encoding'] = ','.join(self.supportedEncodings)
if self.requestEncodings:
for compr in self.requestEncodings:
if compr in self.supportedEncodings:
xml = self.compressPayload(compr, xml)
headers['Content-Encoding'] = compr
break
if self._chunked_requests:
headers['transfer-encoding'] = "chunked"
xml = mkchunks(xml)
else:
headers['Content-Length'] = str(len(xml))
xml = bytearray(xml) # cast to bytes, required to bypass httplib checks for is str
self._log.debug("{}:POST to netloc='{}' path='{}'", msg, self._netloc, path)
response = None
content = None
def send_request():
do_reopen = False
success = False
try:
self._httpConnection.request('POST', path, body=xml, headers=headers)
return True, do_reopen # success = True
except httplib.CannotSendRequest as ex:
# for whatever reason the response of the previous call was not read. read it and try again
self._log.warn("{}: could not send request, got httplib.CannotSendRequest Error. Will read response and retry", msg)
tmp = self._httpConnection.getresponse()
tmp.read()
except OSError as ex:
if ex.errno in (10053, 10054):
self._log.warn("{}: could not send request, OSError={!r}", msg, ex)
else:
self._log.warn("{}: could not send request, OSError={}", msg, traceback.format_exc())
do_reopen = True
except socket.error as ex:
self._log.warn("{}: could not send request, socket error={!r}", msg, ex)
do_reopen = True
except Exception as ex:
self._log.warn("{}: POST to netloc='{}' path='{}': could not send request, error={!r}\n{}", msg,
self._netloc, path, ex, traceback.format_exc())
return success, do_reopen
def get_response():
try:
return self._httpConnection.getresponse()
except httplib.BadStatusLine as ex:
self._log.warn("{}: invalid http response, error= {!r} ", msg, ex)
raise
except OSError as ex:
if ex.errno in (10053, 10054):
self._log.warn("{}: could not receive response, OSError={!r}", msg, ex)
else:
self._log.warn("{}: could not receive response, OSError={} ({!r})\n{}", msg, ex.errno,
ex, traceback.format_exc())
raise httplib.NotConnected()
except socket.error as ex:
self._log.warn("{}: could not receive response, socket error={!r}", msg, ex)
raise httplib.NotConnected()
except Exception as ex:
self._log.warn("{}: POST to netloc='{}' path='{}': could not receive response, error={!r}\n{}",
msg, self._netloc, path, ex, traceback.format_exc())
raise httplib.NotConnected()
def reopen_http_connection():
self._log.info("{}: will close and reopen the connection and then try again", msg)
self._httpConnection.close()
try:
self._httpConnection.connect()
except Exception as ex:
self._log.error("{}: could not reopen the connection, error={!r}\n{}\ncall-stack ={}",
msg, ex, traceback.format_exc(), ''.join(traceback.format_stack()))
self._httpConnection.close()
raise httplib.NotConnected()
with self._lock:
_retry_send = 2 # ugly construct that allows to retry sending the request once
while _retry_send > 0:
_retry_send -= 1
success, _do_reopen = send_request()
if not success:
if _do_reopen:
reopen_http_connection()
else:
raise httplib.NotConnected()
else:
try:
response = get_response()
_retry_send = -1 # -1 == SUCCESS
except httplib.NotConnected:
self._log.info("{}: will reopen after get_response error", msg)
reopen_http_connection()
if _retry_send != -1:
raise httplib.NotConnected()
content = HTTPReader.read_response_body(response)
if response.status >= 300:
self._log.error(
"{}: POST to netloc='{}' path='{}': could not send request, HTTP response={}\ncontent='{}'", msg,
self._netloc, path, response.status, content)
soapfault = soapenvelope.ReceivedSoapFault.fromXMLString(content)
raise HTTPReturnCodeError(response.status, content, soapfault)
responseHeaders = {k.lower(): v for k, v in response.getheaders()}
self._log.debug('{}: response:{}; content has {} Bytes ', msg, responseHeaders, len(content))
commlog.defaultLogger.logSoapRespIn(content, 'POST')
return content
def _makeGetHeaders(self):
self._getHeaders = {
'user_agent': 'pysoap',
'Connection': 'keep-alive'
}
if sys.version < '3':
# Ensure http_method, location and all headers are binary to prevent
# UnicodeError inside httplib.HTTPConnection._send_output.
# httplib in python3 do the same inside itself, don't need to convert it here
self._getHeaders = dict((str(k), str(v)) for k, v in self._getHeaders.items())
if self.supportedEncodings:
self._getHeaders['Accept-Encoding'] = ', '.join(self.supportedEncodings)
def getUrl(self, url, msg):
if not url.startswith('/'):
url = '/' + url
self._log.debug("{} Get {}/{}", msg, self._netloc, url)
with self._lock:
self._httpConnection.request('GET', url, headers=self._getHeaders)
response = self._httpConnection.getresponse()
headers = {k.lower(): v for k, v in response.getheaders()}
_content = response.read()
if 'content-encoding' in headers:
enc = headers['content-encoding']
if enc in self.supportedEncodings:
content = self.decompress(_content, enc)
else:
self._log.warn("{}: unsupported compression ", headers['content-encoding'])
raise httplib.UnknownTransferEncoding
else:
content = _content
return content
| 46.489426
| 161
| 0.599883
|
4a12381dfd5aa39359c827f9caa3376ed41479d9
| 1,141
|
py
|
Python
|
app/main/models/payments.py
|
mukul-mehta/Syphus
|
572d47b6b8dcdd13bd3a956b2a116a8fa3641087
|
[
"MIT"
] | 4
|
2019-08-15T02:12:52.000Z
|
2020-01-05T17:48:46.000Z
|
app/main/models/payments.py
|
mukul-mehta/Syphus
|
572d47b6b8dcdd13bd3a956b2a116a8fa3641087
|
[
"MIT"
] | 74
|
2019-08-14T20:34:33.000Z
|
2020-04-29T20:29:38.000Z
|
app/main/models/payments.py
|
mukul-mehta/Syphus
|
572d47b6b8dcdd13bd3a956b2a116a8fa3641087
|
[
"MIT"
] | 10
|
2019-08-14T19:35:14.000Z
|
2020-01-25T19:04:57.000Z
|
"""DB model for payments"""
from flask import current_app
from app.main import db
from app.main.util.sendgrid import async_send_mail
class Payment(db.Model):
"""
Description of Payment Model.
Rows
-----------
:id: int [pk]
:username: varchar [ref: > users.username, not null]
:amount: float [not null]
:api_response: text [not null]
"""
pay_id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(256), db.ForeignKey(
"user.username"), nullable=False)
amount = db.Column(db.Float, nullable=False)
api_response = db.Column(db.Text)
def __init__(self, user, amount, api_response):
self.username = user.username
self.amount = amount
self.api_response = api_response
async_send_mail(current_app._get_current_object(),
user.email, "Thanks from Ambit", """
We are very grateful to you.""")
def total(self):
all_pays = self.query.filter_by(username=self.username).all()
sum = 0
for pay in all_pays:
sum += pay.amount
return sum
| 27.829268
| 69
| 0.614373
|
4a12387a45c5e7eeeb4f29aebb955c6e28cd6c05
| 11,965
|
py
|
Python
|
napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/use_multiple_paths/config/__init__.py
|
ckishimo/napalm-yang
|
8f2bd907bd3afcde3c2f8e985192de74748baf6c
|
[
"Apache-2.0"
] | 64
|
2016-10-20T15:47:18.000Z
|
2021-11-11T11:57:32.000Z
|
napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/use_multiple_paths/config/__init__.py
|
ckishimo/napalm-yang
|
8f2bd907bd3afcde3c2f8e985192de74748baf6c
|
[
"Apache-2.0"
] | 126
|
2016-10-05T10:36:14.000Z
|
2019-05-15T08:43:23.000Z
|
napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/use_multiple_paths/config/__init__.py
|
ckishimo/napalm-yang
|
8f2bd907bd3afcde3c2f8e985192de74748baf6c
|
[
"Apache-2.0"
] | 63
|
2016-11-07T15:23:08.000Z
|
2021-09-22T14:41:16.000Z
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class config(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/bgp/peer-groups/peer-group/use-multiple-paths/config. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configuration parameters relating to multipath
"""
__slots__ = ("_path_helper", "_extmethods", "__enabled")
_yang_name = "config"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__enabled = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"bgp",
"peer-groups",
"peer-group",
"use-multiple-paths",
"config",
]
def _get_enabled(self):
"""
Getter method for enabled, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/use_multiple_paths/config/enabled (boolean)
YANG Description: Whether the use of multiple paths for the same NLRI is
enabled for the neighbor. This value is overridden by
any more specific configuration value.
"""
return self.__enabled
def _set_enabled(self, v, load=False):
"""
Setter method for enabled, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/use_multiple_paths/config/enabled (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_enabled is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_enabled() directly.
YANG Description: Whether the use of multiple paths for the same NLRI is
enabled for the neighbor. This value is overridden by
any more specific configuration value.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """enabled must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=True)""",
}
)
self.__enabled = t
if hasattr(self, "_set"):
self._set()
def _unset_enabled(self):
self.__enabled = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=True,
)
enabled = __builtin__.property(_get_enabled, _set_enabled)
_pyangbind_elements = OrderedDict([("enabled", enabled)])
class config(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/bgp/peer-groups/peer-group/use-multiple-paths/config. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configuration parameters relating to multipath
"""
__slots__ = ("_path_helper", "_extmethods", "__enabled")
_yang_name = "config"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__enabled = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"bgp",
"peer-groups",
"peer-group",
"use-multiple-paths",
"config",
]
def _get_enabled(self):
"""
Getter method for enabled, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/use_multiple_paths/config/enabled (boolean)
YANG Description: Whether the use of multiple paths for the same NLRI is
enabled for the neighbor. This value is overridden by
any more specific configuration value.
"""
return self.__enabled
def _set_enabled(self, v, load=False):
"""
Setter method for enabled, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/use_multiple_paths/config/enabled (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_enabled is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_enabled() directly.
YANG Description: Whether the use of multiple paths for the same NLRI is
enabled for the neighbor. This value is overridden by
any more specific configuration value.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """enabled must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=True)""",
}
)
self.__enabled = t
if hasattr(self, "_set"):
self._set()
def _unset_enabled(self):
self.__enabled = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=True,
)
enabled = __builtin__.property(_get_enabled, _set_enabled)
_pyangbind_elements = OrderedDict([("enabled", enabled)])
| 37.984127
| 369
| 0.60443
|
4a1238e40bcb0b52dafd53062e8f06e14ca5e577
| 830
|
py
|
Python
|
update_secrets.py
|
fmaj7/tennis-tracker
|
998cfab411f40fb05398759d42fdb73f8f2a2a15
|
[
"MIT"
] | null | null | null |
update_secrets.py
|
fmaj7/tennis-tracker
|
998cfab411f40fb05398759d42fdb73f8f2a2a15
|
[
"MIT"
] | 1
|
2017-04-23T20:26:22.000Z
|
2017-04-23T20:26:22.000Z
|
update_secrets.py
|
fmaj7/tennis-button
|
998cfab411f40fb05398759d42fdb73f8f2a2a15
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import boto3
import json
bucket = boto3.resource('s3').Bucket('fmaj7-tennis-button')
secret_key = 'poppyseed.json'
def get_secrets():
for obj in bucket.objects.all():
if secret_key == obj.key:
return json.loads(obj.get()['Body'].read())
if __name__ == '__main__':
secrets = get_secrets()
keys = ['group-id', 'payer-id', 'debtor-id', 'csrf-token', 'user-credentials', 'splitwise-session']
new_secrets = {}
for key in keys:
default = secrets.get(key, None)
value = raw_input('{} [{}]: '.format(key, default))
new_secrets[key] = value or default
print "\nUpdating Secrets..."
bucket.put_object(Key=secret_key, Body=json.dumps(new_secrets))
print "\nNew Scerets: \n{}".format(json.dumps(get_secrets(), sort_keys=True, indent=4))
| 29.642857
| 103
| 0.640964
|
4a1238ed31f34943ed052be149f01948f2ac714f
| 3,404
|
py
|
Python
|
models/migrations/0005_auto_20200410_2229.py
|
netvigator/auctions
|
f88bcce800b60083a5d1a6f272c51bb540b8342a
|
[
"MIT"
] | null | null | null |
models/migrations/0005_auto_20200410_2229.py
|
netvigator/auctions
|
f88bcce800b60083a5d1a6f272c51bb540b8342a
|
[
"MIT"
] | 13
|
2019-12-12T03:07:55.000Z
|
2022-03-07T12:59:27.000Z
|
models/migrations/0005_auto_20200410_2229.py
|
netvigator/auctions
|
f88bcce800b60083a5d1a6f272c51bb540b8342a
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.10 on 2020-04-10 22:29
import core.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('models', '0004_auto_20191226_2118'),
]
operations = [
migrations.AlterField(
model_name='model',
name='cExcludeIf',
field=models.TextField(blank=True, help_text='Bot will know item is <b>NOT</b> of this model if any one line matches (each line evaluated separately, put different exclude tests on different lines)<br/>Bot expands hyphens, spaces and number/letter boundaries,<br/><table><tr><td>"208A" </td><td>will exclude</td><td>208A, 208 A & 208-A</td></tr><tr><td>"208 A"</td><td>will exclude</td><td>208A, 208 A & 208-A</td></tr><tr><td>"208-A"</td><td>will exclude</td><td>208A, 208 A & 208-A</td></tr></table>', null=True, verbose_name='Not a hit if this text is found (optional)'),
),
migrations.AlterField(
model_name='model',
name='cKeyWords',
field=models.TextField(blank=True, help_text='Putting text here is optional, but if there is text here, robot will consider it REQUIRED -- must be found in the title <b>IN ADDITION TO</b> model number or name.<br>Put alternate key words on separate lines -- Bot will know item is for this model if words on any one line match.<br/>Bot expands hyphens, spaces and number/letter boundaries,<br/><table><tr><td>"208A" </td><td>will find</td><td>208A, 208 A & 208-A</td></tr><tr><td>"208 A"</td><td>will find</td><td>208A, 208 A & 208-A</td></tr><tr><td>"208-A"</td><td>will find</td><td>208A, 208 A & 208-A</td></tr></table>', null=True, verbose_name='model key words'),
),
migrations.AlterField(
model_name='model',
name='cLookFor',
field=models.TextField(blank=True, help_text='Put nick names, common misspellings and alternate model numbers or names here -- leave blank if Bot only needs to look for the model number or name.<br>Each line is evaluated separately, Bot will know item is in this model if any one line matches.<br/>Bot expands hyphens, spaces and number/letter boundaries,<br/><table><tr><td>"208A" </td><td>will find</td><td>208A, 208 A & 208-A</td></tr><tr><td>"208 A"</td><td>will find</td><td>208A, 208 A & 208-A</td></tr><tr><td>"208-A"</td><td>will find</td><td>208A, 208 A & 208-A</td></tr></table>', null=True, verbose_name='Considered a hit if this text is found (optional -- if you include, bot will also search for this)'),
),
migrations.AlterField(
model_name='model',
name='cTitle',
field=core.models.gotSomethingOutsideTitleParensCharField(db_index=True, help_text='Put the model number or name here -- Bot will search for this in the auction title for each item found.<br/>Optionally, you can put additional description in parentheses (). While searching auction titles, bot will ignore anything in parentheses.<br/>Bot expands hyphens, spaces and number/letter boundaries,<br/><table><tr><td>"208A" </td><td>will find</td><td>208A, 208 A & 208-A</td></tr><tr><td>"208 A"</td><td>will find</td><td>208A, 208 A & 208-A</td></tr><tr><td>"208-A"</td><td>will find</td><td>208A, 208 A & 208-A</td></tr></table>', max_length=48, verbose_name='model number or name'),
),
]
| 97.257143
| 741
| 0.665687
|
4a123cfa86cafb5af022aebe9b3cc497d1050c5c
| 269
|
py
|
Python
|
bionic/utils/gcp_auth.py
|
IDl0T/bionic
|
8eaa868a2e7af81bb561492c045feb414f7c6326
|
[
"Apache-2.0"
] | 98
|
2019-08-29T21:38:44.000Z
|
2022-01-26T04:59:57.000Z
|
bionic/utils/gcp_auth.py
|
IDl0T/bionic
|
8eaa868a2e7af81bb561492c045feb414f7c6326
|
[
"Apache-2.0"
] | 143
|
2019-09-11T15:32:17.000Z
|
2021-06-08T21:48:30.000Z
|
bionic/utils/gcp_auth.py
|
IDl0T/bionic
|
8eaa868a2e7af81bb561492c045feb414f7c6326
|
[
"Apache-2.0"
] | 20
|
2019-09-13T18:13:03.000Z
|
2021-12-03T19:51:01.000Z
|
from bionic.deps.optdep import import_optional_dependency
def get_gcp_project_id():
google_auth = import_optional_dependency(
"google.auth", purpose="Get GCP project id from the environment"
)
_, project = google_auth.default()
return project
| 26.9
| 72
| 0.739777
|
4a123d11c2643e84702f5c07aef29fd95288035b
| 12,571
|
py
|
Python
|
classifier/preprocess_MS_dataset_for_classifier.py
|
googleinterns/smart-content-summary
|
595c8e2cb0e160a87cacb954a2a030953fdce6c5
|
[
"Apache-2.0"
] | 5
|
2020-05-25T23:27:42.000Z
|
2022-01-15T08:57:47.000Z
|
classifier/preprocess_MS_dataset_for_classifier.py
|
googleinterns/smart-content-summary
|
595c8e2cb0e160a87cacb954a2a030953fdce6c5
|
[
"Apache-2.0"
] | 6
|
2020-11-13T18:56:37.000Z
|
2022-02-10T02:13:31.000Z
|
classifier/preprocess_MS_dataset_for_classifier.py
|
googleinterns/smart-content-summary
|
595c8e2cb0e160a87cacb954a2a030953fdce6c5
|
[
"Apache-2.0"
] | 2
|
2020-09-02T08:06:18.000Z
|
2021-10-31T16:56:16.000Z
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Preprocess the Microsoft text summarization dataset for classification task."""
import argparse
import csv
import os
import random
import numpy as np
import preprocess_utils
GRADING_COMMENTS = ["Most important meaning Flawless language", "Most important meaning Minor errors", \
"Most important meaning Disfluent or incomprehensible", "Much meaning Flawless language", \
"Much meaning Minor errors", "Much meaning Disfluent or incomprehensible", \
"Little or none meaning Flawless language", "Little or none meaning Minor errors", \
"Little or none meaning Disfluent or incomprehensible"]
GRADING_NUMBER = ["6", "7", "9", "11", "12", "14", "21", "22", "24"]
GRAMMAR_GRADING_BUCKET = [["9", "14", "24"], ["7", "12", "22"],
["6", "11", "21"]]
MEANING_GRADING_BUCKET = [["21", "22", "24"], ["11", "12", "14"],
["6", "7", "9"]]
PREPROCESSED_FILE_PATH = "~/classifier_preprocessed_MS_dataset.tsv"
TRAIN_FILE_PATH_GRAMMAR = "~/classifier_train_MS_dataset_grammar.tsv"
TUNE_FILE_PATH_GRAMMAR = "~/classifier_tune_MS_dataset_grammar.tsv"
VALID_FILE_PATH_GRAMMAR = "~/classifier_valid_MS_dataset_grammar.tsv"
TRAIN_FILE_PATH_MEANING = "~/classifier_train_MS_dataset_meaning.tsv"
TUNE_FILE_PATH_MEANING = "~/classifier_tune_MS_dataset_meaning.tsv"
VALID_FILE_PATH_MEANING = "~/classifier_valid_MS_dataset_meaning.tsv"
def __process_row(row):
"""Split a row into the original sentence, its corresponding summary and its rating.
Args:
row: a row in the MS dataset tsv file.
Returns:
current_original_sentence: the original sentence of the row
current_shortened_sentences_list: a list of summaries corresponding to the current_original_sentence
grammar_ratings_list: a list of grammar ratings of the summaries in
current_shortened_sentences_list (0 being incorrect or unacceptable and 1 being acceptable)
meaning_ratings_list: a list of meaning ratings of the summaries in
current_shortened_sentences_list (0 being incorrect or unacceptable and 1 being acceptable)
"""
row_flattened = []
for i in range(len(row)):
splitted_row = row[i].split(" ||| ")
for j in range(len(splitted_row)):
if splitted_row[j] not in GRADING_COMMENTS:
row_flattened.append(splitted_row[j])
current_source = row_flattened[2]
current_summary_list = []
current_ratings_list = []
this_summary = row_flattened[3]
this_ratings = []
for i in range(4, len(row_flattened)):
if i + 1 == len(row_flattened):
this_ratings.append(row_flattened[i])
this_ratings = this_ratings[2:]
if len(this_ratings) != 0:
current_summary_list.append(this_summary)
current_ratings_list.append(this_ratings)
elif not row_flattened[i].isnumeric() and not row_flattened[i].split(
";")[0].isnumeric():
this_ratings = this_ratings[2:]
if len(this_ratings) != 0:
current_summary_list.append(this_summary)
current_ratings_list.append(this_ratings)
this_summary = row_flattened[i]
this_ratings = []
else:
this_ratings.append(row_flattened[i])
assert (len(current_summary_list) == len(current_ratings_list))
grammar_ratings_list, meaning_ratings_list = __find_grammar_meaning_ratings(
current_ratings_list)
return current_source, current_summary_list, grammar_ratings_list, meaning_ratings_list
def __find_grammar_meaning_ratings(ratings_list):
""" Given a list of ratings lists, find the grammar and meaning rating.
Args:
ratings_list: a list of list of ratings.
Returns:
grammar_ratings: a list of grammar ratings
meaning_ratings: a list of meaning ratings
"""
grammar_ratings = []
meaning_ratings = []
for rating_list in ratings_list:
grammar_ratings.append(
int(
__find_most_common_bucket(rating_list, GRAMMAR_GRADING_BUCKET) >= 2
))
meaning_ratings.append(
int(
__find_most_common_bucket(rating_list, MEANING_GRADING_BUCKET) >= 2
))
return grammar_ratings, meaning_ratings
def __find_most_common_bucket(items_list, buckets_list):
""" Given a list of buckets and a list of items, find the index of the bucket where
most items are in.
Args:
items_list: a list of objects
buckets_list: a list of object list ("bucket").
Returns:
The index of the bucket where most items are in.
"""
bucket_item_count = np.zeros(len(buckets_list))
for item in items_list:
for bucket_index, bucket in enumerate(buckets_list):
if item in bucket:
bucket_item_count[bucket_index] += 1
return np.argmax(bucket_item_count)
def __process_file(file_path):
"""Process a tsv file in the MS dataset.
Args:
file_path: direct path to the tsv file
Returns:
sentences: a list of original sentences
summaries: a list of summaries corresponding to the original sentences
grammar_ratings: a list of grammar ratings of the summaries
meaning_ratings: a list of grammar ratings of the summaries
"""
tsv_file = open(os.path.expanduser(file_path))
read_tsv = csv.reader(tsv_file, delimiter="\t")
sentences = []
summaries = []
grammar_ratings = []
meaning_ratings = []
for row in read_tsv:
row_sentence, row_summary, row_grammar, row_meaning = __process_row(row)
for i in range(len(row_summary)):
sentences.append(row_sentence)
summaries.append(row_summary[i])
grammar_ratings.append(row_grammar[i])
meaning_ratings.append(row_meaning[i])
return sentences, summaries, grammar_ratings, meaning_ratings
def main(args):
"""Preprocess the Microsoft text summarization dataset.
Args:
args: command line arguments.
"""
data_dir = args.raw_data_dir
if not os.path.isdir(os.path.expanduser(data_dir)):
raise Exception("Data directory not found.")
num_of_tuning_sam = args.num_of_tuning
num_of_valid_sam = args.num_of_validation
if num_of_valid_sam < 0 or num_of_tuning_sam < 0:
raise Exception("Number of samples must be non-negative integers")
train_data_file = data_dir + "/train.tsv"
train_sentences, train_summaries, train_grammar, train_meaning = __process_file(
train_data_file)
test_data_file = data_dir + "/test.tsv"
test_sentences, test_summaries, test_grammar, test_meaning = __process_file(
test_data_file)
valid_data_file = data_dir + "/valid.tsv"
valid_sentences, valid_summaries, valid_grammar, valid_meaning = __process_file(
valid_data_file)
tot_sentences = train_sentences + test_sentences + valid_sentences
tot_summaries = train_summaries + test_summaries + valid_summaries
tot_grammar = train_grammar + test_grammar + valid_grammar
tot_meaning = train_meaning + test_meaning + valid_meaning
cleaned_sentences = preprocess_utils.text_strip(tot_sentences)
cleaned_summaries = preprocess_utils.text_strip(tot_summaries)
cleaned_sentences, cleaned_summaries = preprocess_utils.delete_empty_entry(
cleaned_sentences, cleaned_summaries)
preprocess_utils.validate_dataset(cleaned_sentences, cleaned_summaries)
print("Number of samples is", len(cleaned_sentences))
spaced_sentences = preprocess_utils.tokenize_with_space(cleaned_sentences)
spaced_summaries = preprocess_utils.tokenize_with_space(cleaned_summaries)
with open(os.path.expanduser(PREPROCESSED_FILE_PATH), 'wt') as out_file:
tsv_writer = csv.writer(out_file, delimiter='\t')
for i in range(len(spaced_sentences)):
tsv_writer.writerow([
spaced_sentences[i], spaced_summaries[i], tot_grammar[i],
tot_meaning[i]
])
print("-------Preprocessed data saved to", PREPROCESSED_FILE_PATH, "-------")
print("-------Now splitting dataset.-------")
if num_of_tuning_sam + num_of_valid_sam > len(spaced_sentences):
raise Exception(
"The number of tuning and validation samples together exceeds the total sample size of "
+ str(len(sentences)))
sentence_shuffled = []
summary_shuffled = []
grammar_shuffled = []
meaning_shuffled = []
tune_shuffled = list(range(num_of_tuning_sam))
random.shuffle(tune_shuffled)
valid_shuffled = list(
range(num_of_tuning_sam, num_of_tuning_sam + num_of_valid_sam))
random.shuffle(valid_shuffled)
train_shuffled = list(
range(num_of_tuning_sam + num_of_valid_sam, len(spaced_sentences)))
random.shuffle(train_shuffled)
index_shuffled = tune_shuffled + valid_shuffled + train_shuffled
for i in index_shuffled:
sentence_shuffled.append(spaced_sentences[i])
summary_shuffled.append(spaced_summaries[i])
grammar_shuffled.append(tot_grammar[i])
meaning_shuffled.append(tot_meaning[i])
tuning_range = range(num_of_tuning_sam)
valid_range = range(num_of_tuning_sam, num_of_tuning_sam + num_of_valid_sam)
training_range = range(num_of_tuning_sam + num_of_valid_sam,
len(summary_shuffled))
output_for_grammar_files = [summary_shuffled, grammar_shuffled]
__write_to_file(TUNE_FILE_PATH_GRAMMAR, tuning_range,
output_for_grammar_files)
__write_to_file(VALID_FILE_PATH_GRAMMAR, valid_range,
output_for_grammar_files)
__write_to_file(TRAIN_FILE_PATH_GRAMMAR, training_range,
output_for_grammar_files)
output_for_meaning_files = [
sentence_shuffled, summary_shuffled, meaning_shuffled
]
__write_to_file(TUNE_FILE_PATH_MEANING, tuning_range,
output_for_meaning_files)
__write_to_file(VALID_FILE_PATH_MEANING, valid_range,
output_for_meaning_files)
__write_to_file(TRAIN_FILE_PATH_MEANING, training_range,
output_for_meaning_files)
def __write_to_file(output_path, index_range, lists):
""" Write outputs to a tsv file.
Args:
output_path: the path of the output tsv file
index_range: the range of indices in the list that will be written to the tsv file
lists: a list of lists (columns) that will be written out to the tsv file
"""
with open(os.path.expanduser(output_path), 'wt') as out_file:
tsv_writer = csv.writer(out_file, delimiter='\t')
for i in index_range:
this_row = []
for one_list in lists:
this_row.append(one_list[i])
tsv_writer.writerow(this_row)
print("-------", len(index_range), "samples wrote to", output_path,
"-------")
if __name__ == "__main__":
"""Preprocess the Microsoft text summarization dataset.
Data needs to be downloaded from https://www.microsoft.com/en-us/download/details.aspx?id=54262 and the abosolute
path to the dataset directory is provided as a command line argument.
Dataset is split into training, tuning, and validation sets, with the number of samples in the tuning and validation
set being specified in the command line argument. The three sets are saved in three separate tsv files, and all the
preprocessed data are saved in another tsv file.
usage: preprocess_MS_dataset_for_classifier.py [-h] raw_data_dir num_of_tuning num_of_validation
positional arguments:
raw_data_dir Absolute path to the RawData directory in the MS dataset.
num_of_tuning Number of tuning samples
num_of_validation Number of validation samples
optional arguments:
-h, --help show this help message and exit
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"raw_data_dir",
help="Absolute path to the RawData directory in the MS dataset.")
parser.add_argument("num_of_tuning",
help="Number of tuning samples",
type=int)
parser.add_argument("num_of_validation",
help="Number of validation samples",
type=int)
arguments = parser.parse_args()
main(arguments)
| 39.284375
| 120
| 0.718081
|
4a123d45326de64c3bceeec79e38d3beb6c3b4b7
| 5,651
|
py
|
Python
|
bg_biz/pay/weixinpay.py
|
sluggard6/bgirl
|
3c9fa895189ef16442694830d0c05cf60ee5187b
|
[
"Apache-2.0"
] | null | null | null |
bg_biz/pay/weixinpay.py
|
sluggard6/bgirl
|
3c9fa895189ef16442694830d0c05cf60ee5187b
|
[
"Apache-2.0"
] | null | null | null |
bg_biz/pay/weixinpay.py
|
sluggard6/bgirl
|
3c9fa895189ef16442694830d0c05cf60ee5187b
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding:utf-8 -*-
import uuid
from bg_biz.service.config_service import ConfigService
from sharper.flaskapp.helper import get_client_ip
import xml.etree.ElementTree as ET
import time
import random
import urllib2
import hashlib
from urllib import quote
from bg_biz.pay.callback.charge import ChargeExecutor
__author__ = [
'"John Chan" <chenfazhun@163.com>'
]
url = 'https://api.mch.weixin.qq.com/pay/unifiedorder'
key = 'bYnfpw4cVUNPDdyIxxBoYtrzp1OQlEGu'
hiwifi_appid = 'wx11eaa73053dd1666'
hiwifi_mch_id = '1254530101'
foxconn_appid = 'wxb866e7bbbcae3a4a'
foxconn_mch_id = '1364524902'
class WXpay(object):
def __init__(self):
pass
@classmethod
def get_pay_prepay_id(self, trans, source=None):
notify_url = ConfigService.get_host(ConfigService.Host.API) + "/pay/notify/wxpay"
# amount = decimal_pretty(float(trans.amount) / 100)
params = dict(body=trans.title, out_trade_no=str(trans.id), total_fee=str(trans.amount),
spbill_create_ip=get_client_ip(), notify_url=notify_url, trade_type="APP")
if source:
appid = foxconn_appid
mch_id = foxconn_mch_id
else:
appid = hiwifi_appid
mch_id = hiwifi_mch_id
params['appid'] = appid
params['mch_id'] = mch_id
params['nonce_str'] = self.createNoncestr()
sign = self.getSign(params)
params['sign'] = sign
print '------------------weixin------------------', sign
print '------------------', params
xml = self.arrayToXml(params)
res = urllib2.urlopen(url, xml).read()
print 'res-----------------------------', res
array = self.xmlToArray(res)
print '---------------', array
data = dict(appid=appid, partnerid=mch_id, prepayid=array['prepay_id'], package="Sign=WXPay",
noncestr=self.createNoncestr(), timestamp=int(time.time()))
data_sign = self.getSign(data)
data['sign'] = data_sign
# data['callbackurl'] = '/pay/'+trans.id+'/callback'
return str(data)
@classmethod
def queryOrderTest(self, trans, source=None):
if source:
appid = foxconn_appid
mch_id = foxconn_mch_id
else:
appid = hiwifi_appid
mch_id = hiwifi_mch_id
info = {}
url = 'https://api.mch.weixin.qq.com/pay/orderquery'
params = dict(out_trade_no=trans.id)
params['appid'] = appid
params['mch_id'] = mch_id
params['nonce_str'] = self.createNoncestr()
sign = self.getSign(params)
data = '''<xml>
<appid>%s</appid>
<mch_id>%s</mch_id>
<nonce_str>%s</nonce_str>
<out_trade_no>%s</out_trade_no>
<sign>%s</sign>
</xml>''' % (
appid, mch_id, params['nonce_str'], params['out_trade_no'], sign)
req = urllib2.Request(url=url, headers={'Content-Type': 'application/xml', 'charset': 'UTF-8'}, data=data)
res = urllib2.urlopen(req)
data_str = str(res.read())
print data_str
arry_data = self.xmlToArray(data_str)
print 'data------------------', arry_data
return_code = arry_data['return_code']
result_code = arry_data['result_code']
print return_code, result_code
if return_code == 'SUCCESS' and result_code == 'SUCCESS':
total_fee = arry_data['total_fee']
print 'success-----------', total_fee, '------------', trans.amount
if str(trans.amount) == str(total_fee):
print 'total_fee-', total_fee
# ChargeExecutor.execute(trans)
info = {"id": trans.id, "amount": total_fee}
return info
@classmethod
def arrayToXml(self, arr):
"""array转xml"""
xml = ["<xml>"]
print '-----------', arr
for k, v in arr.iteritems():
print k, v
if v.isdigit():
xml.append("<{0}>{1}</{0}>".format(k, v))
else:
xml.append("<{0}><![CDATA[{1}]]></{0}>".format(k, v))
xml.append("</xml>")
return "".join(xml)
@classmethod
def xmlToArray(self, xml):
"""将xml转为array"""
array_data = {}
root = ET.fromstring(xml)
for child in root:
value = child.text
array_data[child.tag] = value
return array_data
@classmethod
def trimString(self, value):
if value is not None and len(value) == 0:
value = None
return value
@classmethod
def createNoncestr(self, length=32):
"""产生随机字符串,不长于32位"""
chars = "abcdefghijklmnopqrstuvwxyz0123456789"
strs = []
for x in range(length):
strs.append(chars[random.randrange(0, len(chars))])
return "".join(strs)
@classmethod
def formatBizQueryParaMap(self, paraMap, urlencode):
"""格式化参数,签名过程需要使用"""
slist = sorted(paraMap)
buff = []
for k in slist:
v = quote(paraMap[k]) if urlencode else paraMap[k]
buff.append("{0}={1}".format(k, v))
return "&".join(buff)
@classmethod
def getSign(self, obj):
"""生成签名"""
# 签名步骤一:按字典序排序参数,formatBizQueryParaMap已做
String = self.formatBizQueryParaMap(obj, False)
# 签名步骤二:在string后加入KEY
String = "{0}&key={1}".format(String, key)
# 签名步骤三:MD5加密
String = hashlib.md5(String).hexdigest()
# 签名步骤四:所有字符转为大写
result_ = String.upper()
return result_
| 33.046784
| 114
| 0.56114
|
4a123e5f8b073efb400b425743d7c7e5d73de8ff
| 2,661
|
py
|
Python
|
serde/src/gen/thrift/gen-py/org_apache_hadoop_hive_serde/constants.py
|
FANsZL/hive
|
a0f0fc82d538d117b222b0f696f33e40d8cc023f
|
[
"Apache-2.0"
] | 4,140
|
2015-01-07T11:57:35.000Z
|
2022-03-31T06:26:22.000Z
|
serde/src/gen/thrift/gen-py/org_apache_hadoop_hive_serde/constants.py
|
FANsZL/hive
|
a0f0fc82d538d117b222b0f696f33e40d8cc023f
|
[
"Apache-2.0"
] | 1,779
|
2015-05-27T04:32:42.000Z
|
2022-03-31T18:53:19.000Z
|
serde/src/gen/thrift/gen-py/org_apache_hadoop_hive_serde/constants.py
|
FANsZL/hive
|
a0f0fc82d538d117b222b0f696f33e40d8cc023f
|
[
"Apache-2.0"
] | 3,958
|
2015-01-01T15:14:49.000Z
|
2022-03-30T21:08:32.000Z
|
#
# Autogenerated by Thrift Compiler (0.14.1)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
from thrift.TRecursive import fix_spec
import sys
from .ttypes import *
SERIALIZATION_LIB = "serialization.lib"
SERIALIZATION_CLASS = "serialization.class"
SERIALIZATION_FORMAT = "serialization.format"
SERIALIZATION_DDL = "serialization.ddl"
SERIALIZATION_NULL_FORMAT = "serialization.null.format"
SERIALIZATION_ESCAPE_CRLF = "serialization.escape.crlf"
SERIALIZATION_LAST_COLUMN_TAKES_REST = "serialization.last.column.takes.rest"
SERIALIZATION_SORT_ORDER = "serialization.sort.order"
SERIALIZATION_NULL_SORT_ORDER = "serialization.sort.order.null"
SERIALIZATION_USE_JSON_OBJECTS = "serialization.use.json.object"
SERIALIZATION_ENCODING = "serialization.encoding"
FIELD_DELIM = "field.delim"
COLLECTION_DELIM = "collection.delim"
LINE_DELIM = "line.delim"
MAPKEY_DELIM = "mapkey.delim"
QUOTE_CHAR = "quote.delim"
ESCAPE_CHAR = "escape.delim"
HEADER_COUNT = "skip.header.line.count"
FOOTER_COUNT = "skip.footer.line.count"
VOID_TYPE_NAME = "void"
BOOLEAN_TYPE_NAME = "boolean"
TINYINT_TYPE_NAME = "tinyint"
SMALLINT_TYPE_NAME = "smallint"
INT_TYPE_NAME = "int"
BIGINT_TYPE_NAME = "bigint"
FLOAT_TYPE_NAME = "float"
DOUBLE_TYPE_NAME = "double"
STRING_TYPE_NAME = "string"
CHAR_TYPE_NAME = "char"
VARCHAR_TYPE_NAME = "varchar"
DATE_TYPE_NAME = "date"
DATETIME_TYPE_NAME = "datetime"
TIMESTAMP_TYPE_NAME = "timestamp"
DECIMAL_TYPE_NAME = "decimal"
BINARY_TYPE_NAME = "binary"
INTERVAL_YEAR_MONTH_TYPE_NAME = "interval_year_month"
INTERVAL_DAY_TIME_TYPE_NAME = "interval_day_time"
TIMESTAMPLOCALTZ_TYPE_NAME = "timestamp with local time zone"
LIST_TYPE_NAME = "array"
MAP_TYPE_NAME = "map"
STRUCT_TYPE_NAME = "struct"
UNION_TYPE_NAME = "uniontype"
LIST_COLUMNS = "columns"
LIST_COLUMN_TYPES = "columns.types"
LIST_PARTITION_COLUMNS = "partition.columns"
LIST_PARTITION_COLUMN_TYPES = "partition.columns.types"
TIMESTAMP_FORMATS = "timestamp.formats"
COLUMN_NAME_DELIMITER = "column.name.delimiter"
PrimitiveTypes = set((
"void",
"boolean",
"tinyint",
"smallint",
"int",
"bigint",
"float",
"double",
"string",
"varchar",
"char",
"date",
"datetime",
"timestamp",
"interval_year_month",
"interval_day_time",
"decimal",
"binary",
"timestamp with local time zone",
))
CollectionTypes = set((
"array",
"map",
))
IntegralTypes = set((
"tinyint",
"smallint",
"int",
"bigint",
))
| 28.308511
| 93
| 0.759865
|
4a123ef2891ea5919ddc47f7e1abd76b4d92438a
| 791
|
py
|
Python
|
news/migrations/0002_auto_20220216_0751.py
|
AminAliH47/PicoStyle
|
768daccc6f28f08aa848318d633af1a19544e499
|
[
"Apache-2.0"
] | 19
|
2022-02-16T20:00:08.000Z
|
2022-03-08T17:38:59.000Z
|
news/migrations/0002_auto_20220216_0751.py
|
AminAliH47/PicoStyle
|
768daccc6f28f08aa848318d633af1a19544e499
|
[
"Apache-2.0"
] | 3
|
2022-02-16T20:59:11.000Z
|
2022-02-23T20:40:12.000Z
|
news/migrations/0002_auto_20220216_0751.py
|
AminAliH47/PicoStyle
|
768daccc6f28f08aa848318d633af1a19544e499
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 3.2.9 on 2022-02-16 07:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('news', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='categorynews',
name='title',
field=models.CharField(help_text='Max length 20 character', max_length=20),
),
migrations.AlterField(
model_name='news',
name='title',
field=models.CharField(help_text='Max length 30 character', max_length=30),
),
migrations.AlterField(
model_name='newstag',
name='title',
field=models.CharField(help_text='Max length 20 character', max_length=20),
),
]
| 27.275862
| 87
| 0.584071
|
4a12408d6d20d2b2d9b795fee308f8b46f0a9c06
| 9,266
|
py
|
Python
|
kubernetes_asyncio/client/models/v1_persistent_volume_claim_spec.py
|
PidgeyBE/kubernetes_asyncio
|
14d15dc309890253c26b6274a022e84441e05217
|
[
"Apache-2.0"
] | null | null | null |
kubernetes_asyncio/client/models/v1_persistent_volume_claim_spec.py
|
PidgeyBE/kubernetes_asyncio
|
14d15dc309890253c26b6274a022e84441e05217
|
[
"Apache-2.0"
] | null | null | null |
kubernetes_asyncio/client/models/v1_persistent_volume_claim_spec.py
|
PidgeyBE/kubernetes_asyncio
|
14d15dc309890253c26b6274a022e84441e05217
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: v1.13.5
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class V1PersistentVolumeClaimSpec(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'access_modes': 'list[str]',
'data_source': 'V1TypedLocalObjectReference',
'resources': 'V1ResourceRequirements',
'selector': 'V1LabelSelector',
'storage_class_name': 'str',
'volume_mode': 'str',
'volume_name': 'str'
}
attribute_map = {
'access_modes': 'accessModes',
'data_source': 'dataSource',
'resources': 'resources',
'selector': 'selector',
'storage_class_name': 'storageClassName',
'volume_mode': 'volumeMode',
'volume_name': 'volumeName'
}
def __init__(self, access_modes=None, data_source=None, resources=None, selector=None, storage_class_name=None, volume_mode=None, volume_name=None): # noqa: E501
"""V1PersistentVolumeClaimSpec - a model defined in OpenAPI""" # noqa: E501
self._access_modes = None
self._data_source = None
self._resources = None
self._selector = None
self._storage_class_name = None
self._volume_mode = None
self._volume_name = None
self.discriminator = None
if access_modes is not None:
self.access_modes = access_modes
if data_source is not None:
self.data_source = data_source
if resources is not None:
self.resources = resources
if selector is not None:
self.selector = selector
if storage_class_name is not None:
self.storage_class_name = storage_class_name
if volume_mode is not None:
self.volume_mode = volume_mode
if volume_name is not None:
self.volume_name = volume_name
@property
def access_modes(self):
"""Gets the access_modes of this V1PersistentVolumeClaimSpec. # noqa: E501
AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 # noqa: E501
:return: The access_modes of this V1PersistentVolumeClaimSpec. # noqa: E501
:rtype: list[str]
"""
return self._access_modes
@access_modes.setter
def access_modes(self, access_modes):
"""Sets the access_modes of this V1PersistentVolumeClaimSpec.
AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 # noqa: E501
:param access_modes: The access_modes of this V1PersistentVolumeClaimSpec. # noqa: E501
:type: list[str]
"""
self._access_modes = access_modes
@property
def data_source(self):
"""Gets the data_source of this V1PersistentVolumeClaimSpec. # noqa: E501
:return: The data_source of this V1PersistentVolumeClaimSpec. # noqa: E501
:rtype: V1TypedLocalObjectReference
"""
return self._data_source
@data_source.setter
def data_source(self, data_source):
"""Sets the data_source of this V1PersistentVolumeClaimSpec.
:param data_source: The data_source of this V1PersistentVolumeClaimSpec. # noqa: E501
:type: V1TypedLocalObjectReference
"""
self._data_source = data_source
@property
def resources(self):
"""Gets the resources of this V1PersistentVolumeClaimSpec. # noqa: E501
:return: The resources of this V1PersistentVolumeClaimSpec. # noqa: E501
:rtype: V1ResourceRequirements
"""
return self._resources
@resources.setter
def resources(self, resources):
"""Sets the resources of this V1PersistentVolumeClaimSpec.
:param resources: The resources of this V1PersistentVolumeClaimSpec. # noqa: E501
:type: V1ResourceRequirements
"""
self._resources = resources
@property
def selector(self):
"""Gets the selector of this V1PersistentVolumeClaimSpec. # noqa: E501
:return: The selector of this V1PersistentVolumeClaimSpec. # noqa: E501
:rtype: V1LabelSelector
"""
return self._selector
@selector.setter
def selector(self, selector):
"""Sets the selector of this V1PersistentVolumeClaimSpec.
:param selector: The selector of this V1PersistentVolumeClaimSpec. # noqa: E501
:type: V1LabelSelector
"""
self._selector = selector
@property
def storage_class_name(self):
"""Gets the storage_class_name of this V1PersistentVolumeClaimSpec. # noqa: E501
Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 # noqa: E501
:return: The storage_class_name of this V1PersistentVolumeClaimSpec. # noqa: E501
:rtype: str
"""
return self._storage_class_name
@storage_class_name.setter
def storage_class_name(self, storage_class_name):
"""Sets the storage_class_name of this V1PersistentVolumeClaimSpec.
Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 # noqa: E501
:param storage_class_name: The storage_class_name of this V1PersistentVolumeClaimSpec. # noqa: E501
:type: str
"""
self._storage_class_name = storage_class_name
@property
def volume_mode(self):
"""Gets the volume_mode of this V1PersistentVolumeClaimSpec. # noqa: E501
volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. This is a beta feature. # noqa: E501
:return: The volume_mode of this V1PersistentVolumeClaimSpec. # noqa: E501
:rtype: str
"""
return self._volume_mode
@volume_mode.setter
def volume_mode(self, volume_mode):
"""Sets the volume_mode of this V1PersistentVolumeClaimSpec.
volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. This is a beta feature. # noqa: E501
:param volume_mode: The volume_mode of this V1PersistentVolumeClaimSpec. # noqa: E501
:type: str
"""
self._volume_mode = volume_mode
@property
def volume_name(self):
"""Gets the volume_name of this V1PersistentVolumeClaimSpec. # noqa: E501
VolumeName is the binding reference to the PersistentVolume backing this claim. # noqa: E501
:return: The volume_name of this V1PersistentVolumeClaimSpec. # noqa: E501
:rtype: str
"""
return self._volume_name
@volume_name.setter
def volume_name(self, volume_name):
"""Sets the volume_name of this V1PersistentVolumeClaimSpec.
VolumeName is the binding reference to the PersistentVolume backing this claim. # noqa: E501
:param volume_name: The volume_name of this V1PersistentVolumeClaimSpec. # noqa: E501
:type: str
"""
self._volume_name = volume_name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1PersistentVolumeClaimSpec):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 33.451264
| 180
| 0.644075
|
4a1240b71bbb453d03a9248226cca87534df2206
| 1,681
|
py
|
Python
|
MNIST_Fashion/convolutional.py
|
bkchiu0/Deep-Learning-Sandbox
|
139acbe0b15e016dcbeb2238df6f8ea55ad8111d
|
[
"MIT"
] | null | null | null |
MNIST_Fashion/convolutional.py
|
bkchiu0/Deep-Learning-Sandbox
|
139acbe0b15e016dcbeb2238df6f8ea55ad8111d
|
[
"MIT"
] | null | null | null |
MNIST_Fashion/convolutional.py
|
bkchiu0/Deep-Learning-Sandbox
|
139acbe0b15e016dcbeb2238df6f8ea55ad8111d
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
from tensorflow.keras import datasets, layers, models
import numpy as np
import matplotlib.pyplot as plt
from prediction_plotter import plot_image, plot_value_array
data = datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = data.load_data()
# Normalize the data
train_images = train_images / 255.0
test_images = test_images / 255.0
# Add in
model = models.Sequential([
layers.Reshape((28, 28, 1), input_shape=(28, 28)),
layers.Conv2D(128, (3, 3), activation='relu',
input_shape=(28, 28, 1), use_bias=True),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(128, (3, 3), activation='relu', use_bias=True),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(128, (3, 3), activation='relu', use_bias=True),
layers.Flatten(),
layers.Dense(512, activation='relu'),
layers.Dense(10, activation='softmax')
])
print(model.summary())
model.compile(optimizer="nadam", loss="sparse_categorical_crossentropy",
metrics=['accuracy'])
model.fit(train_images, train_labels, epochs=5,
validation_data=(test_images, test_labels))
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
print('\nTest accuracy:', test_acc)
# Visualize predictions
predictions = model.predict(test_images)
num_rows = 5
num_cols = 5
num_images = num_rows*num_cols
plt.figure(figsize=(2*2*num_cols, 2*num_rows))
for i in range(num_images):
plt.subplot(num_rows, 2*num_cols, 2*i+1)
plot_image(i, predictions[i], test_labels, test_images)
plt.subplot(num_rows, 2*num_cols, 2*i+2)
plot_value_array(i, predictions[i], test_labels)
plt.tight_layout()
plt.show()
| 30.017857
| 75
| 0.71743
|
4a124109acad0305ca20f671babb60e36d898cf7
| 655
|
py
|
Python
|
src/simple-random-dan-app.py
|
wahur666/DAN-Demo
|
0dcf553b3e3b8ea300e828dd2d90688fc2774d0a
|
[
"Apache-2.0"
] | 4
|
2019-08-18T18:39:37.000Z
|
2019-11-24T19:22:15.000Z
|
src/simple-random-dan-app.py
|
wahur666/DAN-Demo
|
0dcf553b3e3b8ea300e828dd2d90688fc2774d0a
|
[
"Apache-2.0"
] | null | null | null |
src/simple-random-dan-app.py
|
wahur666/DAN-Demo
|
0dcf553b3e3b8ea300e828dd2d90688fc2774d0a
|
[
"Apache-2.0"
] | null | null | null |
import sys
import matplotlib.pyplot as plt
from common import load_configurations, create_demand_matrix_for_configuration, render_everyting
from network import RandomDanNetwork
def main(show=False):
configurations = load_configurations()
active_config = configurations[5]
# active_config = configurations[2]
demand_matrix = create_demand_matrix_for_configuration(active_config)
network = RandomDanNetwork(demand_matrix)
network.create_dan(active_config['dan'])
if show:
render_everyting(network)
plt.show()
if __name__ == '__main__':
render = len(sys.argv) == 2 and sys.argv[1] == "-r"
main(render)
| 29.772727
| 96
| 0.746565
|
4a12410de3d527bbae6eec778dc0dc13fa89b591
| 5,439
|
py
|
Python
|
docs/source/conf.py
|
bollwyvl/jupyterlab-metadata-service
|
16810a530e4fedf78fc4b348445f360a20984c36
|
[
"BSD-3-Clause"
] | null | null | null |
docs/source/conf.py
|
bollwyvl/jupyterlab-metadata-service
|
16810a530e4fedf78fc4b348445f360a20984c36
|
[
"BSD-3-Clause"
] | null | null | null |
docs/source/conf.py
|
bollwyvl/jupyterlab-metadata-service
|
16810a530e4fedf78fc4b348445f360a20984c36
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'JupyterLab Metadata Service'
copyright = '2019, CalPoly-Quansight'
author = 'CalPoly-Quansight'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = ''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'JupyterLabMetadataServicedoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'JupyterLabMetadataService.tex', 'JupyterLab Metadata Service Documentation',
'CalPoly-Quansight', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'jupyterlabmetadataservice', 'JupyterLab Metadata Service Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'JupyterLabMetadataService', 'JupyterLab Metadata Service Documentation',
author, 'JupyterLabMetadataService', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
| 30.385475
| 94
| 0.656371
|
4a12423fbac25320cf484bc974743a38a93c772e
| 6,098
|
py
|
Python
|
problem_unittests.py
|
keyvantaj/Plagiarism
|
76ce28fddf01744e7d7bd19ef2c8fba722f6a77f
|
[
"MIT"
] | null | null | null |
problem_unittests.py
|
keyvantaj/Plagiarism
|
76ce28fddf01744e7d7bd19ef2c8fba722f6a77f
|
[
"MIT"
] | null | null | null |
problem_unittests.py
|
keyvantaj/Plagiarism
|
76ce28fddf01744e7d7bd19ef2c8fba722f6a77f
|
[
"MIT"
] | null | null | null |
from unittest.mock import MagicMock, patch
import sklearn.naive_bayes
import numpy as np
import pandas as pd
import re
# test csv file
TEST_CSV = 'data/test_info.csv'
class AssertTest(object):
'''Defines general test behavior.'''
def __init__(self, params):
self.assert_param_message = '\n'.join([str(k) + ': ' + str(v) + '' for k, v in params.items()])
def test(self, assert_condition, assert_message):
assert assert_condition, assert_message + '\n\nUnit Test Function Parameters\n' + self.assert_param_message
def _print_success_message():
print('Tests Passed!')
# test clean_dataframe
def test_numerical_df(numerical_dataframe):
# test result
transformed_df = numerical_dataframe(TEST_CSV)
# Check type is a DataFrame
assert isinstance(transformed_df, pd.DataFrame), 'Returned type is {}.'.format(type(transformed_df))
# check columns
column_names = list(transformed_df)
assert 'File' in column_names, 'No File column, found.'
assert 'Task' in column_names, 'No Task column, found.'
assert 'Category' in column_names, 'No Category column, found.'
assert 'Class' in column_names, 'No Class column, found.'
# check conversion values
assert transformed_df.loc[0, 'Category'] == 1, '`heavy` plagiarism mapping test, failed.'
assert transformed_df.loc[2, 'Category'] == 0, '`non` plagiarism mapping test, failed.'
assert transformed_df.loc[30, 'Category'] == 3, '`cut` plagiarism mapping test, failed.'
assert transformed_df.loc[5, 'Category'] == 2, '`light` plagiarism mapping test, failed.'
assert transformed_df.loc[37, 'Category'] == -1, 'original file mapping test, failed; should have a Category = -1.'
assert transformed_df.loc[41, 'Category'] == -1, 'original file mapping test, failed; should have a Category = -1.'
_print_success_message()
def test_containment(complete_df, containment_fn):
# check basic format and value
# for n = 1 and just the fifth file
test_val = containment_fn(complete_df, 1, 'g0pA_taske.txt')
assert isinstance(test_val, float), 'Returned type is {}.'.format(type(test_val))
assert test_val<=1.0, 'It appears that the value is not normalized; expected a value <=1, got: '+str(test_val)
# known vals for first few files
filenames = ['g0pA_taska.txt', 'g0pA_taskb.txt', 'g0pA_taskc.txt', 'g0pA_taskd.txt']
ngram_1 = [0.39814814814814814, 1.0, 0.86936936936936937, 0.5935828877005348]
ngram_3 = [0.0093457943925233638, 0.96410256410256412, 0.61363636363636365, 0.15675675675675677]
# results for comparison
results_1gram = []
results_3gram = []
for i in range(4):
val_1 = containment_fn(complete_df, 1, filenames[i])
val_3 = containment_fn(complete_df, 3, filenames[i])
results_1gram.append(val_1)
results_3gram.append(val_3)
# check correct results
assert all(np.isclose(results_1gram, ngram_1, rtol=1e-04)), \
'n=1 calculations are incorrect. Double check the intersection calculation.'
# check correct results
assert all(np.isclose(results_3gram, ngram_3, rtol=1e-04)), \
'n=3 calculations are incorrect.'
_print_success_message()
def test_lcs(df, lcs_word):
test_index = 10 # file 10
# get answer file text
answer_text = df.loc[test_index, 'Text']
# get text for orig file
# find the associated task type (one character, a-e)
task = df.loc[test_index, 'Task']
# we know that source texts have Class = -1
orig_rows = df[(df['Class'] == -1)]
orig_row = orig_rows[(orig_rows['Task'] == task)]
source_text = orig_row['Text'].values[0]
# calculate LCS
test_val = lcs_word(answer_text, source_text)
# check type
assert isinstance(test_val, float), 'Returned type is {}.'.format(type(test_val))
assert test_val<=1.0, 'It appears that the value is not normalized; expected a value <=1, got: '+str(test_val)
# known vals for first few files
lcs_vals = [0.1917808219178082, 0.8207547169811321, 0.8464912280701754, 0.3160621761658031, 0.24257425742574257]
# results for comparison
results = []
for i in range(5):
# get answer and source text
answer_text = df.loc[i, 'Text']
task = df.loc[i, 'Task']
# we know that source texts have Class = -1
orig_rows = df[(df['Class'] == -1)]
orig_row = orig_rows[(orig_rows['Task'] == task)]
source_text = orig_row['Text'].values[0]
# calc lcs
val = lcs_word(answer_text, source_text)
results.append(val)
# check correct results
assert all(np.isclose(results, lcs_vals, rtol=1e-05)), 'LCS calculations are incorrect.'
_print_success_message()
def test_data_split(train_x, train_y, test_x, test_y):
# check types
assert isinstance(train_x, np.ndarray),\
'train_x is not an array, instead got type: {}'.format(type(train_x))
assert isinstance(train_y, np.ndarray),\
'train_y is not an array, instead got type: {}'.format(type(train_y))
assert isinstance(test_x, np.ndarray),\
'test_x is not an array, instead got type: {}'.format(type(test_x))
assert isinstance(test_y, np.ndarray),\
'test_y is not an array, instead got type: {}'.format(type(test_y))
# should hold all 95 submission files
assert len(train_x) + len(test_x) == 95, \
'Unexpected amount of train + test data. Expecting 95 answer text files, got ' +str(len(train_x) + len(test_x))
assert len(test_x) > 1, \
'Unexpected amount of test data. There should be multiple test files.'
# check shape
assert train_x.shape[1]==2, \
'train_x should have as many columns as selected features, got: {}'.format(train_x.shape[1])
assert len(train_y.shape)==1, \
'train_y should be a 1D array, got shape: {}'.format(train_y.shape)
_print_success_message()
| 40.653333
| 119
| 0.658085
|
4a12426e93ffa9bbcdf747c5e9e81c4748b4a61e
| 3,314
|
py
|
Python
|
tests/fields/test_logical_types.py
|
slawak/dataclasses-avroschema
|
04e69a176b3e72bfa0acd3edbd044ecd161b1a68
|
[
"MIT"
] | null | null | null |
tests/fields/test_logical_types.py
|
slawak/dataclasses-avroschema
|
04e69a176b3e72bfa0acd3edbd044ecd161b1a68
|
[
"MIT"
] | null | null | null |
tests/fields/test_logical_types.py
|
slawak/dataclasses-avroschema
|
04e69a176b3e72bfa0acd3edbd044ecd161b1a68
|
[
"MIT"
] | null | null | null |
import datetime
import uuid
import pytest
from dataclasses_avroschema import fields
from . import consts
@pytest.mark.parametrize(
"python_type,avro_type,logical_type", consts.LOGICAL_TYPES_AND_DEFAULTS
)
def test_logical_types(python_type, avro_type, logical_type):
name = "a logical type"
python_type = python_type
field = fields.Field(name, python_type)
expected = {"name": name, "type": {"type": avro_type, "logicalType": logical_type}}
assert expected == field.to_dict()
@pytest.mark.parametrize(
"python_type,avro_type,logical_type", consts.LOGICAL_TYPES_AND_DEFAULTS
)
def test_logical_types_with_null_as_default(python_type, avro_type, logical_type):
name = "a logical type"
python_type = python_type
field = fields.Field(name, python_type, None)
expected = {
"name": name,
"type": {"type": avro_type, "logicalType": logical_type},
"default": fields.NULL,
}
assert expected == field.to_dict()
def test_logical_type_date_with_default():
name = "a date"
python_type = datetime.date
field = fields.Field(name, python_type, consts.now.date())
date_time = datetime.datetime.combine(consts.now, datetime.datetime.min.time())
ts = (date_time - datetime.datetime(1970, 1, 1)).total_seconds()
expected = {
"name": name,
"type": {"type": fields.INT, "logicalType": fields.DATE},
"default": ts / (3600 * 24),
}
assert expected == field.to_dict()
def test_logical_type_time_with_default():
name = "a time"
python_type = datetime.time
time = consts.now.time()
field = fields.Field(name, python_type, time)
hour, minutes, seconds, microseconds = (
time.hour,
time.minute,
time.second,
time.microsecond,
)
miliseconds = int(
(((hour * 60 + minutes) * 60 + seconds) * 1000) + (microseconds / 1000)
)
expected = {
"name": name,
"type": {"type": fields.INT, "logicalType": fields.TIME_MILLIS},
"default": miliseconds,
}
assert expected == field.to_dict()
def test_logical_type_datetime_with_default():
name = "a datetime"
python_type = datetime.datetime
field = fields.Field(name, python_type, consts.now)
ts = (consts.now - datetime.datetime(1970, 1, 1)).total_seconds()
expected = {
"name": name,
"type": {"type": fields.LONG, "logicalType": fields.TIMESTAMP_MILLIS},
"default": ts * 1000,
}
assert expected == field.to_dict()
def test_logical_type_uuid_with_default():
name = "a uuid"
python_type = uuid.uuid4
default = uuid.uuid4()
field = fields.Field(name, python_type, default)
expected = {
"name": name,
"type": {"type": fields.STRING, "logicalType": fields.UUID},
"default": str(default),
}
assert expected == field.to_dict()
@pytest.mark.parametrize(
"logical_type,invalid_default,msg", consts.LOGICAL_TYPES_AND_INVALID_DEFAULTS
)
def test_invalid_default_values(logical_type, invalid_default, msg):
name = "a_field"
field = fields.Field(name, logical_type, invalid_default)
msg = msg or f"Invalid default type. Default should be {logical_type}"
with pytest.raises(AssertionError, match=msg):
field.to_dict()
| 26.725806
| 87
| 0.661738
|
4a1243c52355a4c845e7235701309e2c58d147e3
| 17,291
|
py
|
Python
|
bzt/modules/locustio.py
|
3dgiordano/taurus
|
77cb31b6f0e5c27545094f600ac2b595fa76d992
|
[
"Apache-2.0"
] | 1
|
2018-02-17T16:00:34.000Z
|
2018-02-17T16:00:34.000Z
|
bzt/modules/locustio.py
|
3dgiordano/taurus
|
77cb31b6f0e5c27545094f600ac2b595fa76d992
|
[
"Apache-2.0"
] | 5
|
2018-03-10T20:50:24.000Z
|
2021-08-20T15:07:32.000Z
|
bzt/modules/locustio.py
|
3dgiordano/taurus
|
77cb31b6f0e5c27545094f600ac2b595fa76d992
|
[
"Apache-2.0"
] | 1
|
2018-05-04T23:06:15.000Z
|
2018-05-04T23:06:15.000Z
|
"""
Module holds all stuff regarding Locust tool usage
Copyright 2015 BlazeMeter Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
import sys
import time
from collections import OrderedDict, Counter
from imp import find_module
from subprocess import STDOUT
import os
from bzt import ToolError, TaurusConfigError
from bzt.six import PY3, iteritems
from bzt.engine import ScenarioExecutor, FileLister, Scenario, HavingInstallableTools, SelfDiagnosable
from bzt.modules.aggregator import ConsolidatingAggregator, ResultsProvider, DataPoint, KPISet
from bzt.modules.console import WidgetProvider, ExecutorWidget
from bzt.modules.jmeter import JTLReader
from bzt.requests_model import HTTPRequest
from bzt.utils import get_full_path, ensure_is_dict, PythonGenerator, FileReader
from bzt.utils import shutdown_process, RequiredTool, BetterDict, dehumanize_time
class LocustIOExecutor(ScenarioExecutor, WidgetProvider, FileLister, HavingInstallableTools, SelfDiagnosable):
def __init__(self):
super(LocustIOExecutor, self).__init__()
self.kpi_jtl = None
self.process = None
self.__out = None
self.is_master = False
self.slaves_ldjson = None
self.expected_slaves = 0
self.scenario = None
self.script = None
self.log_file = None
def prepare(self):
self.install_required_tools()
self.scenario = self.get_scenario()
self.__setup_script()
self.is_master = self.execution.get("master", self.is_master)
if self.is_master:
count_error = TaurusConfigError("Slaves count required when starting in master mode")
slaves = self.execution.get("slaves", count_error)
self.expected_slaves = int(slaves)
self.engine.existing_artifact(self.script)
if self.is_master:
self.slaves_ldjson = self.engine.create_artifact("locust-slaves", ".ldjson")
self.reader = SlavesReader(self.slaves_ldjson, self.expected_slaves, self.log)
else:
self.kpi_jtl = self.engine.create_artifact("kpi", ".jtl")
self.reader = JTLReader(self.kpi_jtl, self.log, None)
if isinstance(self.engine.aggregator, ConsolidatingAggregator):
self.engine.aggregator.add_underling(self.reader)
def install_required_tools(self):
tool = LocustIO(self.log)
if not tool.check_if_installed():
tool.install()
def startup(self):
self.start_time = time.time()
load = self.get_load()
concurrency = load.concurrency or 1
if load.ramp_up:
hatch = concurrency / float(load.ramp_up)
else:
hatch = concurrency
wrapper = os.path.join(get_full_path(__file__, step_up=2), "resources", "locustio-taurus-wrapper.py")
self.env.add_path({"PYTHONPATH": self.engine.artifacts_dir})
self.env.add_path({"PYTHONPATH": os.getcwd()})
self.env.set({"LOCUST_DURATION": dehumanize_time(load.duration)})
self.log_file = self.engine.create_artifact("locust", ".log")
args = [sys.executable, wrapper, '-f', self.script]
args += ['--logfile=%s' % self.log_file]
args += ["--no-web", "--only-summary", ]
args += ["--clients=%d" % concurrency, "--hatch-rate=%f" % hatch]
if load.iterations:
args.append("--num-request=%d" % load.iterations)
if self.is_master:
args.extend(["--master", '--expect-slaves=%s' % self.expected_slaves])
self.env.set({"SLAVES_LDJSON": self.slaves_ldjson})
else:
self.env.set({"JTL": self.kpi_jtl})
host = self.get_scenario().get("default-address", None)
if host is not None:
args.append('--host=%s' % host)
self.__out = open(self.engine.create_artifact("locust", ".out"), 'w')
self.process = self.execute(args, stderr=STDOUT, stdout=self.__out)
def get_widget(self):
"""
Add progress widget to console screen sidebar
:rtype: ExecutorWidget
"""
if not self.widget:
label = "%s" % self
self.widget = ExecutorWidget(self, "Locust.io: " + label.split('/')[1])
return self.widget
def check(self):
# TODO: when we're in master mode and get no results and exceeded duration - shut down then
retcode = self.process.poll()
if retcode is not None:
if retcode != 0:
self.log.warning("Locust exited with non-zero code: %s", retcode)
return True
return False
def resource_files(self):
self.scenario = self.get_scenario()
script = self.scenario.get(Scenario.SCRIPT, None)
if script:
return [script]
else:
return []
def __tests_from_requests(self):
filename = self.engine.create_artifact("generated_locust", ".py")
locust_test = LocustIOScriptBuilder(self.scenario, self.log)
locust_test.build_source_code()
locust_test.save(filename)
return filename
def __setup_script(self):
self.script = self.get_script_path()
if not self.script:
if "requests" in self.scenario:
self.script = self.__tests_from_requests()
else:
msg = "There must be a script file or requests for its generation "
msg += "to run Locust (%s)" % self.execution.get('scenario')
raise TaurusConfigError(msg)
def shutdown(self):
try:
shutdown_process(self.process, self.log)
finally:
if self.__out:
self.__out.close()
def has_results(self):
master_results = self.is_master and self.reader.cumulative
local_results = not self.is_master and self.reader and self.reader.buffer
if master_results or local_results:
return True
else:
return False
def get_error_diagnostics(self):
diagnostics = []
if self.__out is not None:
with open(self.__out.name) as fds:
contents = fds.read().strip()
if contents.strip():
diagnostics.append("Locust STDOUT:\n" + contents)
if self.log_file is not None and os.path.exists(self.log_file):
with open(self.log_file) as fds:
contents = fds.read().strip()
if contents.strip():
diagnostics.append("Locust log:\n" + contents)
return diagnostics
class LocustIO(RequiredTool):
def __init__(self, parent_logger):
super(LocustIO, self).__init__("LocustIO", "")
self.log = parent_logger.getChild(self.__class__.__name__)
def check_if_installed(self):
try:
find_module("locust")
self.already_installed = True
except ImportError:
self.log.error("LocustIO is not installed, see http://docs.locust.io/en/latest/installation.html")
return False
return True
def install(self):
if PY3:
raise ToolError("LocustIO is not currently compatible with Python 3.x")
msg = "Unable to locate locustio package. Please install it like this: pip install locustio"
raise ToolError(msg)
class SlavesReader(ResultsProvider):
def __init__(self, filename, num_slaves, parent_logger):
"""
:type filename: str
:type num_slaves: int
:type parent_logger: logging.Logger
"""
super(SlavesReader, self).__init__()
self.log = parent_logger.getChild(self.__class__.__name__)
self.join_buffer = {}
self.num_slaves = num_slaves
self.file = FileReader(filename=filename, parent_logger=self.log)
self.read_buffer = ""
def _calculate_datapoints(self, final_pass=False):
self.read_buffer += self.file.get_bytes(size=1024 * 1024, last_pass=final_pass)
while "\n" in self.read_buffer:
_line = self.read_buffer[:self.read_buffer.index("\n") + 1]
self.read_buffer = self.read_buffer[len(_line):]
self.fill_join_buffer(json.loads(_line))
max_full_ts = self.get_max_full_ts()
if max_full_ts is not None:
for point in self.merge_datapoints(max_full_ts):
yield point
def merge_datapoints(self, max_full_ts):
for key in sorted(self.join_buffer.keys(), key=int):
if int(key) <= max_full_ts:
sec_data = self.join_buffer.pop(key)
self.log.debug("Processing complete second: %s", key)
point = DataPoint(int(key))
for sid, item in iteritems(sec_data):
point.merge_point(self.point_from_locust(key, sid, item))
point.recalculate()
yield point
def get_max_full_ts(self):
max_full_ts = None
for key in sorted(self.join_buffer.keys(), key=int):
if len(key) >= self.num_slaves:
max_full_ts = int(key)
return max_full_ts
def fill_join_buffer(self, data):
self.log.debug("Got slave data: %s", data)
for stats_item in data['stats']:
for timestamp in stats_item['num_reqs_per_sec'].keys():
if timestamp not in self.join_buffer:
self.join_buffer[timestamp] = {}
self.join_buffer[timestamp][data['client_id']] = data
@staticmethod
def point_from_locust(timestamp, sid, data):
"""
:type timestamp: str
:type sid: str
:type data: dict
:rtype: DataPoint
"""
point = DataPoint(int(timestamp))
point[DataPoint.SOURCE_ID] = sid
overall = KPISet()
for item in data['stats']:
if timestamp not in item['num_reqs_per_sec']:
continue
kpiset = KPISet()
kpiset[KPISet.SAMPLE_COUNT] = item['num_reqs_per_sec'][timestamp]
kpiset[KPISet.CONCURRENCY] = data['user_count']
kpiset[KPISet.BYTE_COUNT] = item['total_content_length']
if item['num_requests']:
avg_rt = (item['total_response_time'] / 1000.0) / item['num_requests']
kpiset.sum_rt = item['num_reqs_per_sec'][timestamp] * avg_rt
for err in data['errors'].values():
if err['name'] == item['name']:
new_err = KPISet.error_item_skel(err['error'], None, err['occurences'], KPISet.ERRTYPE_ERROR,
Counter())
KPISet.inc_list(kpiset[KPISet.ERRORS], ("msg", err['error']), new_err)
kpiset[KPISet.FAILURES] += err['occurences']
point[DataPoint.CURRENT][item['name']] = kpiset
overall.merge_kpis(kpiset)
point[DataPoint.CURRENT][''] = overall
point.recalculate()
return point
class LocustIOScriptBuilder(PythonGenerator):
IMPORTS = """
from gevent import sleep
from re import findall, compile
from locust import HttpLocust, TaskSet, task
"""
def build_source_code(self):
self.log.debug("Generating Python script for LocustIO")
header_comment = self.gen_comment("This script was generated by Taurus", indent=0)
scenario_class = self.gen_class_definition("UserBehaviour", ["TaskSet"])
swarm_class = self.gen_class_definition("GeneratedSwarm", ["HttpLocust"])
imports = self.add_imports()
self.root.append(header_comment)
self.root.append(imports)
self.root.append(scenario_class)
self.root.append(swarm_class)
swarm_class.append(self.gen_statement('task_set = UserBehaviour', indent=4))
default_address = self.scenario.get("default-address", None)
if default_address is None:
default_address = ''
swarm_class.append(self.gen_statement('host = "%s"' % default_address, indent=4))
swarm_class.append(self.gen_statement('min_wait = %s' % 0, indent=4))
swarm_class.append(self.gen_statement('max_wait = %s' % 0, indent=4))
swarm_class.append(self.gen_new_line(indent=0))
scenario_class.append(self.gen_decorator_statement('task(1)'))
scenario_class.append(self.__gen_task())
scenario_class.append(self.gen_new_line(indent=0))
def __gen_task(self):
task = self.gen_method_definition("generated_task", ['self'])
think_time = dehumanize_time(self.scenario.get('think-time', None))
global_headers = self.scenario.get_headers()
if not self.scenario.get("keepalive", True):
global_headers['Connection'] = 'close'
for req in self.scenario.get_requests():
if not isinstance(req, HTTPRequest):
msg = "Locust script generator doesn't support '%s' blocks, skipping"
self.log.warning(msg, req.NAME)
continue
method = req.method.lower()
if method not in ('get', 'delete', 'head', 'options', 'path', 'put', 'post'):
raise TaurusConfigError("Wrong Locust request type: %s" % method)
timeout = req.priority_option('timeout', default='30s')
self.__gen_check(method, req, task, dehumanize_time(timeout), global_headers)
if req.think_time:
task.append(self.gen_statement("sleep(%s)" % dehumanize_time(req.think_time)))
else:
if think_time:
task.append(self.gen_statement("sleep(%s)" % think_time))
task.append(self.gen_new_line())
return task
@staticmethod
def __get_params_line(req, timeout, headers):
param_dict = {'url': '"%s"' % req.url, 'timeout': timeout}
if req.body:
if isinstance(req.body, dict):
param_dict['data'] = json.dumps(req.body)
else:
param_dict['data'] = '"%s"' % req.body
if headers:
param_dict['headers'] = json.dumps(headers)
keys = (list(param_dict.keys()))
keys.sort()
return ', '.join(['%s=%s' % (key, param_dict[key]) for key in keys])
def __gen_check(self, method, req, task, timeout, global_headers):
assertions = req.config.get("assert", [])
first_assert = True
if assertions:
statement = 'with self.client.%s(%s, catch_response=True) as response:'
else:
statement = "self.client.%s(%s)"
headers = OrderedDict()
if global_headers:
sorted_headers = OrderedDict(sorted(global_headers.items(), key=lambda t: t[0]))
headers.update(sorted_headers)
if req.headers:
headers.update(req.headers)
task.append(self.gen_statement(statement % (method, self.__get_params_line(req, timeout, headers))))
for idx, assertion in enumerate(assertions):
assertion = ensure_is_dict(assertions, idx, "contains")
if not isinstance(assertion['contains'], list):
assertion['contains'] = [assertion['contains']]
self.__gen_assertion(task, assertion, first_assert)
first_assert = False
if assertions:
task.append(self.gen_statement('else:', indent=12))
task.append(self.gen_statement('response.success()', indent=16))
def __gen_assertion(self, task, assertion, is_first):
subject = assertion.get("subject", Scenario.FIELD_BODY)
values = [str(_assert) for _assert in assertion['contains']]
if subject == 'body':
content = 'response.content'
elif subject == 'http-code':
content = 'str(response.status_code)'
else:
raise TaurusConfigError('Wrong subject for Locust assertion: %s' % subject)
if assertion.get('not', False):
attr_not = ''
func_name = 'any'
else:
attr_not = ' not'
func_name = 'all'
if assertion.get("regexp", True):
expression = 'findall(compile(str(val)), %(content)s)' % {'content': content}
else:
expression = 'str(val) in %s' % content
statement = 'if%(not)s %(func)s(%(expression)s for val in %(values)s):'
statement = statement % {'not': attr_not, 'func': func_name, 'expression': expression, 'values': values}
if not is_first:
statement = 'el' + statement
task.append(self.gen_statement(statement, indent=12))
statement = 'response.failure("%(values)s%(not)s found in %(subject)s")'
statement = statement % {'values': values, 'not': attr_not, 'subject': subject}
task.append(self.gen_statement(statement, indent=16))
| 39.208617
| 113
| 0.618646
|
4a1244ad8144c58cdd10dab426988123a0b9fa9f
| 2,291
|
py
|
Python
|
project-obj/project_obj - Copy.py
|
HakkaTjakka/earth-reverse-engineering_github
|
6c52e69fcb33c5c06f634db874785d2454fa32a6
|
[
"Unlicense"
] | null | null | null |
project-obj/project_obj - Copy.py
|
HakkaTjakka/earth-reverse-engineering_github
|
6c52e69fcb33c5c06f634db874785d2454fa32a6
|
[
"Unlicense"
] | null | null | null |
project-obj/project_obj - Copy.py
|
HakkaTjakka/earth-reverse-engineering_github
|
6c52e69fcb33c5c06f634db874785d2454fa32a6
|
[
"Unlicense"
] | null | null | null |
import numpy as np
import sys
from projection import projection
EARTH_RADIUS = 6371000
if len(sys.argv)==2:
print (str(len(sys.argv)))
print ('arg[1]='+sys.argv[1])
with open(sys.argv[1]) as fd:
lines = fd.read().splitlines()
elif len(sys.argv)==1:
with open("in.obj") as fd:
lines = fd.read().splitlines()
#lines = np.array(lines, dtype=np.str)
lines = np.array(lines, dtype=str)
#lines = lines[np.logical_not(np.char.startswith(lines, "vn "))] # delete vertex normals
offset_x=3899275.0
offset_y=348997.0
offset_z=5026376.0
#v 3268023.6848134077 27.84330720361322 -5319793.639094348
# extract vertices
idx = np.where(np.char.startswith(lines, "v "))
v = lines[idx]
v = np.char.split(v, " ")
v = np.array(list(v))[:, 1:].astype(float)
o = v
v[:, 0]+=offset_x
v[:, 1]+=offset_y
v[:, 2]+=offset_z
# convert to lat/lon/ele
rad = np.linalg.norm(v, axis=1)[None, :]
lat = np.arcsin(v[:, 2]/rad)*180/np.pi
lon = (np.arctan2(v[:, 1], v[:, 0])*180/np.pi)[None, :]
rad -= EARTH_RADIUS # TODO: find the correct way to get elevation (this is bad but ellipsoid was worse)
v = np.array([lat, lon, rad]).transpose()[:, 0]
# pick the first point, and use it as the origin to find the local transformation matrix
old_origin = v[0, :2]
new_origin = np.array(projection.fromGeo(old_origin[1], old_origin[0]))
i = np.array(projection.fromGeo(old_origin[1], old_origin[0] + 0.01)) - new_origin
j = np.array(projection.fromGeo(old_origin[1] + 0.01, old_origin[0])) - new_origin
basis = 100*np.array((i, j))
# apply the transformation to every lat,lon in the array
v[:, :2] -= old_origin
v[:, :2] = np.einsum("ij,ni->nj", basis, v[:, :2])
v[:, :2] += new_origin
# swap y and z because minecraft is sideways
v[:, 2], v[:, 1] = v[:, 1].copy(), v[:, 2].copy()
o[:, 2]=v[:, 1]
o_out = []
for i in range(len(o)):
o_out.append("v {} {} {}".format(o[i, 0]-offset_x, o[i, 1]-offset_y, o[i, 2]))
o_out = np.array(o_out, dtype=str)
lines[idx] = o_out
with open("out2.obj", "w") as fd:
fd.write("\n".join(lines))
## convert to string
#v_out = []
#for i in range(len(v)):
# v_out.append("v {} {} {}".format(v[i, 0], v[i, 1], v[i, 2]))
#v_out = np.array(v_out, dtype=str)
#
#lines[idx] = v_out
#with open("out.obj", "w") as fd:
# fd.write("\n".join(lines))
| 26.952941
| 104
| 0.633348
|
4a1244bece0414494a040235cd473377a065903d
| 5,246
|
py
|
Python
|
applications/generate/philadelphia/repository.py
|
reaster/philadelphia
|
977460abab8403f5efca7da8b12599633e6fd915
|
[
"Apache-2.0"
] | null | null | null |
applications/generate/philadelphia/repository.py
|
reaster/philadelphia
|
977460abab8403f5efca7da8b12599633e6fd915
|
[
"Apache-2.0"
] | null | null | null |
applications/generate/philadelphia/repository.py
|
reaster/philadelphia
|
977460abab8403f5efca7da8b12599633e6fd915
|
[
"Apache-2.0"
] | null | null | null |
import collections
import itertools
import os
import re
import xml.etree.ElementTree
from . import model
def read_dialect(dirname):
version = _read_version(dirname)
class_name_prefix = _class_name_prefix(version)
package_name = _package_name(class_name_prefix)
name = _name(version)
return model.Dialect(package_name, class_name_prefix, name)
def _class_name_prefix(version):
return ''.join([version.protocol, version.major, version.minor, version.sp])
def _package_name(class_name_prefix):
return 'com.paritytrading.philadelphia.{}'.format(class_name_prefix.lower())
def _name(version):
name = '{} {}.{}'.format(version.protocol, version.major, version.minor)
if version.sp:
return '{} {}'.format(name, version.sp)
else:
return name
_Version = collections.namedtuple('_Version', ['protocol', 'major', 'minor', 'sp'])
def _read_version(dirname):
filename = _messages_path(dirname)
tree = xml.etree.ElementTree.parse(filename)
value = tree.getroot().get('version')
match = re.match(r'(?P<protocol>.+)\.(?P<major>\d+)\.(?P<minor>\d+)((?P<sp>SP\d+))?', value)
return _Version(protocol=match.group('protocol'), major=match.group('major'),
minor=match.group('minor'), sp=match.group('sp') or '')
def read_messages(dirname):
def message(elem):
name = elem.find('Name').text
msg_type = elem.find('MsgType').text
return model.Message(name, msg_type)
filename = _messages_path(dirname)
tree = xml.etree.ElementTree.parse(filename)
return [message(elem) for elem in tree.findall('Message')]
def read_fields(dirname):
tag_fields = _read_tag_fields(dirname)
tag_values = _read_tag_values(dirname)
def field_with_values(tag, field):
name = field.name
values = _values(tag, field, tag_values)
type_ = _type(field.type_, values)
return model.Field(tag, name, type_, values)
fields = [field_with_values(tag, field) for tag, field in tag_fields.items()]
return sorted(fields, key=lambda field: int(field.tag))
_TYPES = {
'Char': 'char',
'char': 'char',
'Int': 'int',
'int': 'int',
'MultipleCharValue': 'char',
'MultipleStringValue': 'String',
'MultipleValueString': 'String',
'NumInGroup': 'int',
'String': 'String',
}
def _type(field_type, values):
type_ = _TYPES.get(field_type)
if type_ == 'char' and values and max(len(value.value) for value in values) > 1:
return 'String'
else:
return type_
def _values(tag, field, tag_values):
if field.type_ == 'Boolean' or field.name == 'MsgType':
return []
else:
return tag_values.get(tag, [])
_Field = collections.namedtuple('_Field', ['name', 'type_'])
def _read_tag_fields(dirname):
def tag(elem):
return elem.find('Tag').text
def field(elem):
name = elem.find('Name').text
type_ = elem.find('Type').text
return _Field(name, type_)
filename = _fields_path(dirname)
tree = xml.etree.ElementTree.parse(filename)
return {tag(elem): field(elem) for elem in tree.findall('Field')}
def _read_tag_values(dirname):
def value(enum):
return model.Value(name=enum.symbolic_name, value=enum.value)
def values(enums):
return [value(enum) for enum in _sort_enums(enums)]
enums = _read_enums(dirname)
return {tag: values(list(enums)) for tag, enums in
itertools.groupby(enums, lambda enum: enum.tag)}
_Enum = collections.namedtuple('_Enum', ['tag', 'value', 'symbolic_name', 'sort'])
def _read_enums(dirname):
def enums(elem):
tag = elem.find('Tag').text
value = _value(elem, tag)
symbolic_names = _symbolic_names(elem, tag, value)
sort = _sort(elem)
return [_Enum(tag, value, symbolic_name, sort)
for symbolic_name in symbolic_names]
filename = _enums_path(dirname)
tree = xml.etree.ElementTree.parse(filename)
return sorted([enum for elem in tree.findall('Enum') for enum in enums(elem)],
key=lambda enum: int(enum.tag))
def _sort_enums(enums):
if all(enum.sort is not None for enum in enums):
return sorted(enums, key=lambda enum: enum.sort)
else:
return enums
_VALUES = {
('276', 'f '): 'f',
}
def _value(elem, tag):
value = elem.find('Value').text
return _VALUES.get((tag, value), value)
_SYMBOLIC_NAMES = {
('574', '63'): 'CrossAuction2',
('574', '64'): 'CounterOrderSelection2',
('574', '65'): 'CallAuction2',
}
_SYMBOLIC_NAME_ALIASES = {
('434', '2'): [
'OrderCancelReplaceRequest',
]
}
def _symbolic_names(elem, tag, value):
symbolic_name = elem.find('SymbolicName').text
primary = _SYMBOLIC_NAMES.get((tag, value), symbolic_name)
aliases = _SYMBOLIC_NAME_ALIASES.get((tag, value), [])
return [primary] + aliases
def _sort(elem):
value = elem.find('Sort')
return int(value.text) if value is not None else None
def _enums_path(dirname):
return os.path.join(dirname, 'Enums.xml')
def _fields_path(dirname):
return os.path.join(dirname, 'Fields.xml')
def _messages_path(dirname):
return os.path.join(dirname, 'Messages.xml')
| 27.465969
| 96
| 0.654975
|
4a1245b7431e1166d0fc58a519a2ec23c53c10dc
| 994
|
py
|
Python
|
aiotdlib/api/functions/resend_authentication_code.py
|
jraylan/aiotdlib
|
4528fcfca7c5c69b54a878ce6ce60e934a2dcc73
|
[
"MIT"
] | 37
|
2021-05-04T10:41:41.000Z
|
2022-03-30T13:48:05.000Z
|
aiotdlib/api/functions/resend_authentication_code.py
|
jraylan/aiotdlib
|
4528fcfca7c5c69b54a878ce6ce60e934a2dcc73
|
[
"MIT"
] | 13
|
2021-07-17T19:54:51.000Z
|
2022-02-26T06:50:00.000Z
|
aiotdlib/api/functions/resend_authentication_code.py
|
jraylan/aiotdlib
|
4528fcfca7c5c69b54a878ce6ce60e934a2dcc73
|
[
"MIT"
] | 7
|
2021-09-22T21:27:11.000Z
|
2022-02-20T02:33:19.000Z
|
# =============================================================================== #
# #
# This file has been generated automatically!! Do not change this manually! #
# #
# =============================================================================== #
from __future__ import annotations
from pydantic import Field
from ..base_object import BaseObject
class ResendAuthenticationCode(BaseObject):
"""
Re-sends an authentication code to the user. Works only when the current authorization state is authorizationStateWaitCode, the next_code_type of the result is not null and the server-specified timeout has passed
"""
ID: str = Field("resendAuthenticationCode", alias="@type")
@staticmethod
def read(q: dict) -> ResendAuthenticationCode:
return ResendAuthenticationCode.construct(**q)
| 41.416667
| 216
| 0.496982
|
4a1248c2a1441d8c074b44da75adbd9bc0307756
| 778
|
py
|
Python
|
djstripe/management/commands/djstripe_sync_customers.py
|
TigerDX/dj-stripe
|
2fd4897abaedf2d9faa3dd5af86402dae3ab86a3
|
[
"BSD-3-Clause"
] | null | null | null |
djstripe/management/commands/djstripe_sync_customers.py
|
TigerDX/dj-stripe
|
2fd4897abaedf2d9faa3dd5af86402dae3ab86a3
|
[
"BSD-3-Clause"
] | null | null | null |
djstripe/management/commands/djstripe_sync_customers.py
|
TigerDX/dj-stripe
|
2fd4897abaedf2d9faa3dd5af86402dae3ab86a3
|
[
"BSD-3-Clause"
] | 1
|
2021-08-30T10:51:49.000Z
|
2021-08-30T10:51:49.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.management.base import BaseCommand
from ...settings import get_subscriber_model
from ...sync import sync_subscriber
class Command(BaseCommand):
help = "Sync subscriber data with stripe"
def handle(self, *args, **options):
qs = get_subscriber_model().objects.filter(customer__isnull=True)
count = 0
total = qs.count()
for subscriber in qs:
count += 1
perc = int(round(100 * (float(count) / float(total))))
print(
"[{0}/{1} {2}%] Syncing {3} [{4}]".format(
count, total, perc, subscriber.email, subscriber.pk
)
)
sync_subscriber(subscriber)
| 28.814815
| 73
| 0.587404
|
4a1248d2cefac98b1c47e06e7433a8cf414ec7c0
| 529
|
py
|
Python
|
Domain_Generalization/models/model_factory.py
|
GuardSkill/IntraClassInfoMax
|
ddf71318575f9a2183c2bcad7a0cb48972212fe4
|
[
"BSD-2-Clause",
"MIT"
] | 139
|
2020-07-17T09:51:23.000Z
|
2022-03-29T07:46:31.000Z
|
Domain_Generalization/models/model_factory.py
|
GuardSkill/IntraClassInfoMax
|
ddf71318575f9a2183c2bcad7a0cb48972212fe4
|
[
"BSD-2-Clause",
"MIT"
] | 22
|
2020-08-30T02:50:27.000Z
|
2022-03-05T14:02:13.000Z
|
Domain_Generalization/models/model_factory.py
|
GuardSkill/IntraClassInfoMax
|
ddf71318575f9a2183c2bcad7a0cb48972212fe4
|
[
"BSD-2-Clause",
"MIT"
] | 22
|
2020-07-22T06:39:17.000Z
|
2021-11-24T03:19:04.000Z
|
from models import caffenet
from models import mnist
from models import patch_based
from models import alexnet
from models import resnet
nets_map = {
'caffenet': caffenet.caffenet,
'alexnet': alexnet.alexnet,
'resnet18': resnet.resnet18,
'resnet50': resnet.resnet50,
'lenet': mnist.lenet
}
def get_network(name):
if name not in nets_map:
raise ValueError('Name of network unknown %s' % name)
def get_network_fn(**kwargs):
return nets_map[name](**kwargs)
return get_network_fn
| 22.041667
| 61
| 0.705104
|
4a12498f997f000c823209884fe728a44e02bb47
| 11,792
|
py
|
Python
|
inicheck/output.py
|
micahjohnson150/inicheck
|
bad39cc54e0ce53cc700d4d3ca1b55bc0c4336e9
|
[
"CC0-1.0"
] | 1
|
2018-08-10T20:41:20.000Z
|
2018-08-10T20:41:20.000Z
|
inicheck/output.py
|
micahjohnson150/inicheck
|
bad39cc54e0ce53cc700d4d3ca1b55bc0c4336e9
|
[
"CC0-1.0"
] | 64
|
2018-05-09T15:20:02.000Z
|
2022-01-03T17:46:05.000Z
|
inicheck/output.py
|
USDA-ARS-NWRC/inicheck
|
283c4cb5e49cbc0301dbcba8e622d1ad7537d7bb
|
[
"CC0-1.0"
] | 5
|
2020-01-03T02:47:16.000Z
|
2020-09-23T03:02:41.000Z
|
import os
import sys
from datetime import date
from .utilities import mk_lst
def generate_config(config_obj, fname, cli=False):
"""
Generates a list of strings using the config data then its written to an
.ini file
Args:
config_obj: config object containing data to be outputted
fname: String path to the output location for the new config file
cli : Boolean value that adds the line "file generated using
inicheck.cli", Default = False
"""
header_len = 80
pg_sep = '#' * header_len
# Header surround each commented titles in the ini file
section_header = pg_sep + '\n' + ('# {0}\n') + pg_sep
# Construct the section strings
config_str = ""
config_str += pg_sep
# File header with specific package option
if config_obj.mcfg.header is not None:
header = config_obj.mcfg.header.split('\n')
for line in header:
config_str += ('\n# ' + line)
else:
config_str += "\n# Configuration File "
# Add in the date generated
config_str += "\n#\n# Date generated: {0}".format(date.today())
# Generated with inicheck
if cli:
config_str += "\n#\n# Generated using: inicheck <filename> -w"
config_str += "\n#\n# For more inicheck help see:" + \
"\n# http://inicheck.readthedocs.io/en/latest/\n"
config = config_obj.cfg
mcfg = config_obj.mcfg.cfg
# Check to see if section titles were provided
has_section_titles = hasattr(config_obj.mcfg, 'titles')
# Generate the string for the file, creating them in order.
for section in mcfg.keys():
if section in config.keys():
config_str += '\n' * 2
# Add a section header
s_hdr = pg_sep
if has_section_titles:
if section in config_obj.mcfg.titles.keys():
# Add the header
s_hdr = section_header.format(
config_obj.mcfg.titles[section])
else:
config_str += s_hdr
config_str += s_hdr
config_str += '\n'
config_str += '\n[{0}]\n'.format(section)
# Add section items and values
for k in config[section].keys():
v = config[section][k]
if type(v) == list:
astr = ", ".join(str(c).strip() for c in v)
else:
astr = str(v)
config_str += "{0:<30} {1:<10}\n".format((k + ':'), astr)
# Write out the string generated
with open(os.path.abspath(fname), 'w') as f:
f.writelines(config_str)
f.close()
def print_config_report(warnings, errors, logger=None):
"""
Pass in the list of string messages generated by check_config file.
print out in a pretty format the issues
Args:
warnings: List of non-critical messages returned from
:func:`~utilities.check_config`.
errors: List of critical messages returned from
:func:`~utilities.check_config`.
logger: pass in the logger function being used. If no logger is
provided, print is used. Default = None
"""
msg = "{: <20} {: <25} {: <60}"
# Check to see if user wants the logger or stdout
if logger is not None:
out = logger.info
else:
out = print
msg_len = 90
out(" ")
out(" ")
out("Configuration File Status Report:")
header = "=" * msg_len
out(header)
any_warnings = False
any_errors = False
# Output warnings
if len(warnings) > 0:
any_warnings = True
out("WARNINGS:")
out(" ")
out(msg.format(" Section", "Item", "Message"))
out("-" * msg_len)
for w in warnings:
out(w)
out(" ")
out(" ")
# Output errors
if len(errors) > 0:
any_errors = True
out("ERRORS:")
out(" ")
out(msg.format("Section", "Item", "Message"))
out("-" * msg_len)
for e in errors:
out(e)
out(" ")
out(" ")
if not any_errors and not any_warnings:
out("No errors or warnings were reported with the config file.\n")
def print_recipe_summary(lst_recipes):
"""
Prints out the recipes found and how they are interpretted
Args:
lst_recipes: list of the recipe entry objects
"""
# len of recipe separators
msg_len = 80
header = "=" * msg_len
recipe_hdr = "-" * msg_len
r_msg = "\n{0: <20}\n" + recipe_hdr
cfg_msg = "\t\t{0: <20} {1: <20} {2: <20}"
msg = "\t\t{0: <20} {1: <25}"
print('\n\n')
print("Below are the recipes applied to the config file:")
print("Recipes Summary:")
print(header)
for r in lst_recipes:
print(r_msg.format(r.name))
print("\tConditionals:")
for n, t in r.triggers.items():
for i, c in enumerate(t.conditions):
if type(c) == list:
c = ", ".join(c)
if i == 0:
print(msg.format(n, c))
else:
print(msg.format("", c))
print_cfg_for_recipe(r.adj_config, cfg_msg, hdr="\n\tEdits:")
# print('\n')
print('\n')
def print_cfg_for_recipe(cfg, fmt, hdr=None):
if hdr is not None:
print(hdr)
for section in cfg.keys():
for item, value in cfg[section].items():
if type(value) != list:
v = [value]
else:
v = value
for qq in v:
print(fmt.format(section, item, qq))
def print_details(details, mcfg):
"""
Prints out the details for a list of provided options designed for use
with the CLI. Details about a section, or an item can be requested by
passing in a list of in the section,item order. If a section is only passed
then we the details provided are for the entire section
Args:
details: a list in [section item value] requesting details.
mcfg: master config dictionary to gather the details from
"""
msg = "{: <15} {: <15} {: <15} {: <25} {: <60}"
hdr = '\n' + msg.format('Section', 'Item', 'Default', 'Options',
'Description')
print(hdr)
print('=' * len(hdr))
nopts = len(details)
# At least a section was provided
if nopts >= 1:
if details[0] in mcfg.keys():
# A section and item was provided
if nopts == 2:
if details[1] in mcfg[details[0]].keys():
print(msg.format(
details[0], details[1],
str(mcfg[details[0]][details[1]].default),
str(mcfg[details[0]][details[1]].options),
str(mcfg[details[0]][details[1]].description)
))
else:
print("Item {0} in not a registered item."
"".format(details[1]))
sys.exit()
# Print the whole section
else:
for k, v in mcfg[details[0]].items():
print(msg.format(details[0],
k,
str(v.default),
str(v.options),
str(v.description)))
# Section does not exist
else:
print("Section {0} in not a valid section.".format(details[0]))
sys.exit()
else:
print("Please provide at least a section for information")
sys.exit()
def print_non_defaults(ucfg):
"""
Prints out the options used that were not default option values.
Args:
ucfg: config object containing options that are not default
"""
mcfg = ucfg.mcfg.cfg
cfg = ucfg.cfg
msg = "{: <20} {: <20} {: <40} {: <40}"
hdr = '\n' + msg.format("Section", "Item", "Value", "Default")
print("\n\nConfiguration File Non-Defaults Report:")
print("Items with non-default values specified:")
print("=" * len(hdr))
print(hdr)
print('-' * len(hdr))
# Cycle through option/items checking defaults, print em if they don't
# match
for s in mcfg.keys():
# if the master section is in the users
if s in cfg.keys():
for i in mcfg[s].keys():
# If the masters item is in the users config
if i in cfg[s].keys():
# Grab the default, make it a string list
default_lst = mk_lst(mcfg[s][i].default)
str_default_lst = [
str(kk).lower() for kk in default_lst
if str(kk).lower() != 'none'
]
# Grab the default, make it a string list
user_lst = mk_lst(cfg[s][i])
str_lst = [str(kk).lower() for kk in user_lst]
for uv in str_lst:
# Single entries
for v in str_default_lst:
if v != 'none':
if uv not in str_default_lst:
print(
msg.format(
s, i, uv,
", ".join(str_default_lst)))
break
print("")
def print_change_report(
potential_changes, required_changes, ucfg, logger=None):
"""
Pass in the list of changes generated by check_config file.
print out in a pretty format the changes required
Args:
potential_changes: List of warnings about config property changes
especialy about defaults retruned from
:func:`~changes.ChangeLog.get_active_changes`.
potential_required: List of critical changes returned from
:func:`~changes.ChangeLog.get_active_changes`.
"""
msg = "{: <20} {: <25} {: <25} {: <25}"
# Check to see if user wants the logger or stdout
if logger is not None:
out = logger.info
else:
out = print
msg_len = 90
out(" ")
out(" ")
title = "Configuration File Change Log Report:"
out(title)
header = "=" * len(title)
out(header)
# Output warnings
if len(potential_changes) > 0:
out("Default changes - Warnings issued only when old default "
"values are detected in file.\n")
out("Default Changes:")
out("No. of default changes: {:0.0f}".format(len(potential_changes)))
any_warnings = True
out(" ")
out(msg.format(" Section", "Item", "Old Default", "New Default"))
out("-" * msg_len)
for w in potential_changes:
out(msg.format(w[0][0], w[0][1], w[0][3], w[1][3]))
out(" ")
out(" ")
# Output errors
msg = "{: <50} {: <50}"
if len(required_changes) > 0:
any_errors = True
out("Required Changes:")
out("No. of necessary changes: {:0.0f}".format(len(required_changes)))
out(" ")
out(msg.format("From", "To"))
out("-" * msg_len)
for e in required_changes:
orig = "{}/{}".format(e[0][0], e[0][1])
if e[1] != "removed":
to = "{}/{}".format(e[1][0], e[1][1])
else:
to = e[1]
out(msg.format(orig, to))
out(" ")
out(" ")
if not any_errors and not any_warnings:
out("No required changes or old defaults were reported with the config"
" file.\n")
| 30.549223
| 79
| 0.516367
|
4a1249b1f5cb1ef608af51f3c95b3594d89f4f34
| 2,714
|
py
|
Python
|
pytorch_neat/standardise_eval.py
|
archit120/PyTorch-NEAT
|
efd0f20c807e88b73e93eaad6cc8564b0c95676b
|
[
"Apache-2.0"
] | null | null | null |
pytorch_neat/standardise_eval.py
|
archit120/PyTorch-NEAT
|
efd0f20c807e88b73e93eaad6cc8564b0c95676b
|
[
"Apache-2.0"
] | null | null | null |
pytorch_neat/standardise_eval.py
|
archit120/PyTorch-NEAT
|
efd0f20c807e88b73e93eaad6cc8564b0c95676b
|
[
"Apache-2.0"
] | 2
|
2020-06-06T15:24:34.000Z
|
2020-06-07T14:23:54.000Z
|
# Copyright (c) 2020 Archit Rungta
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import math
from pytorch_neat.multi_env_eval import MultiEnvEvaluator
class StandardEnvEvaluator(MultiEnvEvaluator):
def __init__(self, make_net, activate_net, max_rewards=100000, batch_size=1, max_env_steps=None, make_env=None, envs=None):
self.max_rewards = max_rewards
self.all_rewards = np.zeros((max_rewards))
self.reward_idx = 0
super().__init__(make_net, activate_net, batch_size=batch_size, max_env_steps=max_env_steps, make_env=make_env, envs=envs)
def eval_genome(self, genome, config, debug=False):
net = self.make_net(genome, config, self.batch_size)
cmean = 0
std = 1
# print(self.reward_idx)
if self.reward_idx >= self.max_rewards:
# print("Standardisation active now")
cmean = np.mean(self.all_rewards)
std = np.std(self. all_rewards)
fitness = 0
val_fitness = 0
states = [env.reset() for env in self.envs]
dones = [False] * self.batch_size
step_num = 0
while True:
step_num += 1
if self.max_env_steps is not None and step_num == self.max_env_steps:
break
if debug:
actions = self.activate_net(
net, states, debug=True, step_num=step_num)
else:
actions = self.activate_net(net, states)
assert len(actions) == len(self.envs)
for i, (env, action, done) in enumerate(zip(self.envs, actions, dones)):
if not done:
state, reward, done, _ = env.step(action)
self.all_rewards[(self.reward_idx+1)%self.max_rewards] = reward
self.reward_idx += 1
fitness += (reward-cmean)/std
val_fitness += reward
if not done:
states[i] = state
dones[i] = done
if all(dones):
break
genome.val_fitness = val_fitness/self.batch_size
return fitness / self.batch_size
| 37.178082
| 130
| 0.609801
|
4a124b393d25a9fdf07e57b1b34e222c40363008
| 17,778
|
py
|
Python
|
resqpy/olio/dataframe.py
|
berland/resqpy
|
91d6f809cf84100c49459588f756618144f7deeb
|
[
"MIT"
] | null | null | null |
resqpy/olio/dataframe.py
|
berland/resqpy
|
91d6f809cf84100c49459588f756618144f7deeb
|
[
"MIT"
] | null | null | null |
resqpy/olio/dataframe.py
|
berland/resqpy
|
91d6f809cf84100c49459588f756618144f7deeb
|
[
"MIT"
] | null | null | null |
"""dataframe.py: classes for storing and retrieving dataframes as RESQML objects.
note that this module uses the obj_Grid2dRepresentation class in a way that was not envisaged
when the RESQML standard was defined; software that does not use resqpy is unlikely to be
able to do much with data stored in this way
"""
version = '6th May 2021'
import logging
log = logging.getLogger(__name__)
log.debug(f'dataframe.py version {version}')
import numpy as np
import pandas as pd
import resqpy.property as rqp
import resqpy.surface as rqs
import resqpy.time_series as rqts
import resqpy.crs as rqc
import resqpy.olio.xml_et as rqet
# todo: add support for building an ensemble of dataframes using the same mesh support
class DataFrame:
"""Class for storing and retrieving a pandas dataframe of numerical data as a RESQML property.
notes:
actual values are stored either as z values in a Mesh (Grid2d) object, or as a property on
such a mesh when multiple raalizations are in use; a regular Mesh object is created to act
as a supporting representation; columns are mapped onto I and rows onto J; if a property is
used then the indexable elements are 'nodes'; column titles are stored in a related StringLookup
object, indexed by column number; column units are optionally treated in the same way (uom for
the property is generally set to Euc); all values are stored as floats; use the derived TimeTable
class if rows relate to steps in a TimeSeries
"""
def __init__(self, model, support_root = None, df = None, uom_list = None, realization = None,
title = 'dataframe', column_lookup_uuid = None, uom_lookup_uuid = None):
"""Create a new Dataframe object from either a previously stored property or a pandas dataframe.
arguments:
model (model.Model): the model to which the new Dataframe will be attached
support_root (lxml.Element, optional): the xml root node of an existing Grid2dRepresentation
object acting as support for a dataframe property (or holding the dataframe as z values)
df (pandas.DataFrame, optional): a dataframe from which the new Dataframe is to be created;
if both support_root and df are supplied, realization must not be None and a new
realization property will be created
uom_list (list of str, optional): a list holding the units of measure for each
column; if present, length of list must match number of columns in df; ignored if
support_root is not None
realization (int, optional): if present, the realization number of the RESQML property
holding the dataframe
title (str, default 'dataframe'): used as the citation title for the Mesh (and property);
ignored if support_root is not None
column_lookup_uuid (uuid, optional): if present, the uuid of a string lookup table holding
the column names; if present, the contents and order of the table must match the columns
in the dataframe; if absent, a new lookup table will be created; ignored if support_root
is not None
uom_lookup_uuid (uuid, optional): if present, the uuid of a string lookup table holding
the units of measure for each column; if None and uom_list is present, a new table
will be created; if both uom_list and uom_lookup_uuid are present, their contents
must match; ignored if support_root is not None
returns:
a newly created Dataframe object
notes:
when initialising from a support_root, the supporting mesh and its property should have been
originally created using this class; when working with ensembles, each object of this class
will only handle the data for one realization, though they may share a common support_root
"""
assert support_root is not None or df is not None
assert support_root is None or df is None or realization is not None
self.model = model
self.df = None
self.n_rows = self.n_cols = 0
self.uom_list = None
self.realization = realization
self.title = title
self.mesh = None # only generated when needed for write_hdf5(), create_xml()
self.pc = None # property collection; only generated when needed for write_hdf5(), create_xml()
self.column_lookup_uuid = column_lookup_uuid
self.column_lookup = None # string lookup table mapping column index (0 based) to column name
self.uom_lookup_uuid = uom_lookup_uuid
self.uom_lookup = None # string lookup table mapping column index (0 based) to uom
if support_root is not None:
assert rqet.node_type(support_root) == 'obj_Grid2dRepresentation'
extra = rqet.load_metadata_from_xml(support_root)
assert 'dataframe' in extra and extra['dataframe'] == 'true'
self.mesh = rqs.Mesh(self.model, root_node = support_root)
self.title = self.mesh.title
self.n_rows, self.n_cols = self.mesh.nj, self.mesh.ni
cl_root = self.model.root(obj_type = 'StringTableLookup', related_uuid = self.mesh.uuid, title = 'dataframe columns')
assert cl_root is not None, 'column name lookup table not found for dataframe'
self.column_lookup = rqp.StringLookup(self.model, root_node = cl_root)
self.column_lookup_uuid = self.column_lookup.uuid
assert self.column_lookup.length() == self.n_cols
ul_root = self.model.root(obj_type = 'StringTableLookup', related_uuid = self.mesh.uuid, title = 'dataframe units')
if ul_root is not None:
self.uom_lookup = rqp.StringLookup(self.model, root_node = ul_root)
self.uom_lookup_uuid = self.uom_lookup.uuid
self.uom_list = self.uom_lookup.get_list()
da = self.mesh.full_array_ref()[..., 2] # dataframe data as 2D numpy array, defaulting to z values in mesh
existing_pc = rqp.PropertyCollection(support = self.mesh)
existing_count = 0 if existing_pc is None else existing_pc.number_of_parts()
if df is None: # existing dara, either in mesh or property
if existing_count > 0: # use property data instead of z values
if existing_count == 1:
if self.realization is not None:
assert existing_pc.realization_for_part(existing_pc.singleton()) == self.realization
else:
assert self.realization is not None, 'no realization specified when accessing ensemble dataframe'
da = existing_pc.single_array_ref(realization = self.realization)
assert da is not None and da.ndim == 2 and da.shape == (self.n_rows, self.n_cols)
else:
assert realization is None
self.df = pd.DataFrame(da, columns = self.column_lookup.get_list())
else: # both support_root and df supplied: add a new realisation
if existing_count > 0:
assert existing_pc.singleton(realization = self.realization) is None, 'dataframe realization already exists'
self.df = df.copy()
assert len(self.df) == self.n_rows
assert len(self.df.columns) == self.n_rows
else:
assert df is not None, 'no dataframe (or support root) provided when instantiating DataFrame object'
self.df = df.copy()
# todo: check data type of columns – restrict to numerical data
self.n_rows = len(self.df)
self.n_cols = len(self.df.columns)
if column_lookup_uuid is not None:
self.column_lookup = rqp.StringLookup(self.model, root_node = self.model.root(uuid = column_lookup_uuid))
assert self.column_lookup is not None
assert self.column_lookup.length() == self.n_cols
assert all(self.df.columns == self.column_lookup.get_list()) # exact match of column names required!
if uom_lookup_uuid is not None:
self.uom_lookup = rqp.StringLookup(self.model, root_node = self.model.root(uuid = uom_lookup_uuid))
assert self.uom_lookup is not None
if uom_list is not None:
assert len(uom_list) == self.n_cols
self.uom_list = uom_list.copy()
if self.uom_lookup is not None:
assert self.uom_list == self.uom_lookup.get_list()
elif self.uom_lookup is not None:
self.uom_list = self.uom_lookup.get_list()
def dataframe(self):
"""Returns the Dataframe as a pandas DataFrame."""
return self.df
def column_uom(self, col_index):
"""Returns units of measure for the specified column, or Euc if no units present."""
if self.units_table is None: return 'Euc'
assert 0 <= col_index < self.n_cols, 'column index out of range'
return self.units_table.get_string(col_index)
def write_hdf5_and_create_xml(self):
"""Write dataframe data to hdf5 file and create xml for RESQML objects to represent dataframe."""
self._set_mesh_from_df() # writes hdf5 data and creates xml for mesh (and property)
if self.column_lookup is None:
self.column_lookup = rqp.StringLookup(self.model, int_to_str_dict = dict(enumerate(self.df.columns)),
title = 'dataframe columns')
self.column_lookup_uuid = self.column_lookup.uuid
sl_node = self.column_lookup.create_xml()
else:
sl_node = self.column_lookup.root_node
if sl_node is not None:
self.model.create_reciprocal_relationship(self.mesh.root_node, 'destinationObject', sl_node, 'sourceObject')
if self.uom_list and self.uom_lookup is None:
self.uom_lookup = rqp.StringLookup(self.model, int_to_str_dict = dict(enumerate(self.uom_list)),
title = 'dataframe units')
self.uom_lookup_uuid = self.uom_lookup.uuid
ul_node = self.uom_lookup.create_xml()
elif self.uom_lookup is not None:
ul_node = self.uom_lookup.root_node
else:
ul_node = None
if ul_node is not None:
self.model.create_reciprocal_relationship(self.mesh.root_node, 'destinationObject', ul_node, 'sourceObject')
def _set_mesh_from_df(self):
"""Creates Mesh object; called before writing to hdf5 or creating xml."""
# note: actual data is stored in related Property if realization number is present, directly in Mesh otherwise
assert self.n_rows == len(self.df)
assert self.n_cols == len(self.df.columns)
if self.mesh is None:
origin = (0.0, 0.0, 0.0)
dxyz_dij = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]])
crs_uuids = self.model.uuids(obj_type = 'LocalDepth3dCrs')
if len(crs_uuids) == 0:
crs = rqc.Crs(self.model)
crs.create_xml()
crs_uuid = crs.uuid
else: # use any available crs
crs_uuid = crs_uuids[0]
if self.realization is None:
self.mesh = rqs.Mesh(self.model, mesh_flavour = 'reg&z', ni = self.n_cols, nj = self.n_rows,
dxyz_dij = dxyz_dij, origin = origin,
z_values = np.array(self.df), crs_uuid = crs_uuid)
else:
self.mesh = rqs.Mesh(self.model, mesh_flavour = 'regular', ni = self.n_cols, nj = self.n_rows,
dxyz_dij = dxyz_dij, origin = origin, crs_uuid = crs_uuid)
self.mesh.write_hdf5()
mesh_root = self.mesh.create_xml(title = self.title)
rqet.create_metadata_xml(mesh_root, {'dataframe': 'true'})
if self.realization is not None:
self.pc = rqp.PropertyCollection()
self.pc.set_support(support = self.mesh)
dataframe_pk_uuid = self.model.uuid(obj_type = 'PropertyKind', title = 'dataframe')
if dataframe_pk_uuid is None:
dataframe_pk = rqp.PropertyKind(self.model, title = 'dataframe', example_uom = 'Euc')
dataframe_pk.create_xml()
dataframe_pk_uuid = dataframe_pk.uuid
self.pc.add_cached_array_to_imported_list(np.array(self.df), 'dataframe', self.title,
uom = 'Euc', property_kind = 'dataframe',
local_property_kind_uuid = dataframe_pk_uuid,
realization = self.realization,
indexable_element = 'nodes')
self.pc.write_hdf5_for_imported_list()
self.pc.create_xml_for_imported_list_and_add_parts_to_model()
class TimeTable(DataFrame):
"""Class for storing and retrieving a pandas dataframe where rows relate to steps in a time series.
note:
inherits from DataFrame class
"""
def __init__(self, model, support_root = None, df = None, uom_list = None,
realization = None, time_series = None, title = 'timetable',
column_lookup_uuid = None, uom_lookup_uuid = None):
"""Create a new TimeTable object from either a previously stored property or a pandas dataframe.
note:
see DataFrame class docstring for details of arguments
"""
# todo: add option to set up time series from a column in the dataframe?
assert support_root is not None or (df is not None and time_series is not None)
super().__init__(model, support_root = support_root, df = df, uom_list = uom_list,
realization = realization, title = title,
column_lookup_uuid = column_lookup_uuid, uom_lookup_uuid = uom_lookup_uuid)
if support_root is not None:
ts_root = self.model.root(obj_type = 'TimeSeries', related_uuid = self.mesh.uuid)
assert ts_root is not None, 'no time series related to mesh holding dataframe'
self.ts = rqts.TimeSeries(self.model, time_series_root = ts_root)
else:
assert time_series is not None
assert time_series.number_of_timestamps() == self.n_rows
self.ts = time_series
def time_series(self):
"""Returns the TimeSeries object in use by the time table."""
return self.ts
def write_hdf5_and_create_xml(self):
"""Write time table data to hdf5 file and create xml for RESQML objects to represent dataframe."""
super().write_hdf5_and_create_xml()
# note: time series xml must be created before calling this method
self.model.create_reciprocal_relationship(self.mesh.root_node, 'destinationObject',
self.ts.time_series_root, 'sourceObject')
def dataframe_parts_in_model(model, timetables = None, title = None, related_uuid = None):
"""Returns list of part names within model that are representing DataFrame support objects.
arguments:
model (model.Model): the model to be inspected for dataframes
timetables (boolean or None): if True, only TimeTable dataframe parts will be included; if False
only DataFrame parts that are not representing TimeTable objects will be included; if None,
both parts for both types of dataframe will be included
title (str, optional): if present, only parts with a citation title exactly matching will be
included
related_uuid (uuid, optional): if present, only parts relating to this uuid are included
returns:
list of str, each element in the list is a part name, within model, which is representing the
support for a DataFrame object
"""
df_parts_list = model.parts(obj_type = 'Grid2dRepresentation', title = title,
extra = {'dataframe': 'true'}, related_uuid = related_uuid)
if timetables is not None:
filtered_list = []
for df_part in df_parts_list:
is_tt = (model.part(obj_type = 'TimeSeries', related_uuid = model.uuid_for_part(df_part)) is not None)
if timetables == is_tt: filtered_list.append(df_part)
df_parts_list = filtered_list
return df_parts_list
def timetable_parts_in_model(model, title = None, related_uuid = None):
"""Returns list of part names within model that are representing TimeTable dataframe support objects.
arguments:
model (model.Model): the model to be inspected for dataframes
title (str, optional): if present, only parts with a citation title exactly matching will be
included
related_uuid (uuid, optional): if present, only parts relating to this uuid are included
returns:
list of str, each element in the list is a part name, within model, which is representing the support
for a TimeTable object
"""
return dataframe_parts_in_model(model, timetables = True, title = title, related_uuid = related_uuid)
def dataframe_for_title(model, title, realization = None):
"""Returns a DataFrame object loaded from model, with given title (optionally for given realization)."""
df_parts = dataframe_parts_in_model(model, title = title)
if df_parts is None or len(df_parts) == 0: return None
assert len(df_parts) == 1
return DataFrame(model, support_root = model.root_for_part(df_parts[0]), realization = realization)
def timetable_for_title(model, title, realization = None):
"""Returns a TimeTable object loaded from model, with given title (optionally for given realization)."""
tt_parts = timetable_parts_in_model(model, title = title)
if tt_parts is None or len(tt_parts) == 0: return None
assert len(tt_parts) == 1
return TimeTable(model, support_root = model.root_for_part(tt_parts[0]), realization = realization)
| 50.362606
| 126
| 0.668242
|
4a124b8d646ff31ee0b9e136b68c8185181227d8
| 2,300
|
py
|
Python
|
Software/Python/grove_encoder_read.py
|
TheVinhLuong102/GrovePi
|
cc5511129b98b02c13e260eeea1c70ee23368cc9
|
[
"MIT"
] | 482
|
2015-01-09T03:06:14.000Z
|
2022-03-24T10:05:07.000Z
|
Software/Python/grove_encoder_read.py
|
TheVinhLuong102/GrovePi
|
cc5511129b98b02c13e260eeea1c70ee23368cc9
|
[
"MIT"
] | 257
|
2015-01-13T14:08:17.000Z
|
2022-01-20T08:43:50.000Z
|
Software/Python/grove_encoder_read.py
|
TheVinhLuong102/GrovePi
|
cc5511129b98b02c13e260eeea1c70ee23368cc9
|
[
"MIT"
] | 510
|
2015-01-27T17:15:44.000Z
|
2022-03-29T01:27:13.000Z
|
#!/usr/bin/env python
#
# GrovePi Example for using the Grove Encoder(http://www.seeedstudio.com/depot/Grove-Encoder-p-1352.html) with the GrovePi
#
# The GrovePi connects the Raspberry Pi and Grove sensors. You can learn more about GrovePi here: http://www.dexterindustries.com/GrovePi
#
# Have a question about this example? Ask on the forums here: http://forum.dexterindustries.com/c/grovepi
#
'''
## License
The MIT License (MIT)
GrovePi for the Raspberry Pi: an open source platform for connecting Grove Sensors to the Raspberry Pi.
Copyright (C) 2017 Dexter Industries
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
# USAGE
#
# Connect the grove encoder to D2 on the GrovePi.
# The encoder values go from 0 up to 32
# (although these can be subsequently changed by utilizing a different parameter for the encoder_en function)
import time
import grovepi
print("Reading from the Grove Encoder")
# default pin is 2 and default number of steps is 32
grovepi.encoder_en()
time_to_run = 10 # 10 seconds
start = time.time() # current time in seconds
old_val = 0
while start + time_to_run > time.time():
# defaults to pin 2
new_val = grovepi.encoderRead()
if old_val != new_val:
print("{:3d}/32 position".format(new_val))
old_val = new_val
# and disable the interrupt on pin 2
grovepi.encoder_dis()
| 36.507937
| 139
| 0.774783
|
4a124c9ec386ae3f4e6e40b1d4285fcf67c07dad
| 3,930
|
py
|
Python
|
garageofcode/highdim/main.py
|
tpi12jwe/garageofcode
|
3cfaf01f6d77130bb354887e6ed9921c791db849
|
[
"MIT"
] | 2
|
2020-02-11T10:32:06.000Z
|
2020-02-11T17:00:47.000Z
|
garageofcode/highdim/main.py
|
jonatanwestholm/garageofcode
|
630e99c6fb4c42875beadb6adf8dc958501c5ab8
|
[
"MIT"
] | null | null | null |
garageofcode/highdim/main.py
|
jonatanwestholm/garageofcode
|
630e99c6fb4c42875beadb6adf8dc958501c5ab8
|
[
"MIT"
] | null | null | null |
import time
from itertools import chain, combinations
from collections import deque
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
import networkx as nx
from garageofcode.mip.convex_hull import in_hull
def powerset(s):
return chain.from_iterable(combinations(s, r) for r in range(len(s) + 1))
def ncube_corners(n):
dims = list(range(n))
A = np.zeros([2**n, n])
for i, corner in enumerate(powerset(dims)):
for j in corner:
A[i, j] = 1
return A
def rotation(N, theta, num_pairs=None):
A = np.eye(N)
if num_pairs is not None:
def get_pairs():
for _ in range(num_pairs):
yield np.random.choice(N, 2, replace=False)
else:
def get_pairs():
for i in range(N):
for j in range(i):
yield i, j
for x1, x2 in get_pairs():
A = np.dot(A, subrotation(theta, x1, x2, N))
return A
def subrotation(theta, x1, x2, N):
A = np.eye(N)
A[x1, x1] = np.cos(theta)
A[x1, x2] = -np.sin(theta)
A[x2, x1] = np.sin(theta)
A[x2, x2] = np.cos(theta)
return A
def get_contour(V):
#return [u for i, u in enumerate(V)
# if not in_hull(u, [v for j, v in enumerate(V) if i != j], verbose=False)]
U = []
idxs = []
for i, u in enumerate(V):
W = [v for j, v in enumerate(V) if np.linalg.norm(v - u) > 1e-6]
if not in_hull(u, W, verbose=False):
U.append(u)
idxs.append(i)
return U, idxs
def get_corner_graph(N):
corners = [tuple(elem) for elem in ncube_corners(N)]
G = nx.Graph()
for i, u in enumerate(corners):
u_l = list(u)
for k in range(N):
if u_l[k]:
u_l[k] = 0
v = tuple(u_l)
j = corners.index(v)
G.add_edge(i, j)
u_l[k] = 1
return G
#def get_closest_to_xy(P):
# return np.argmin(np.sum(P[2:, :], axis=0))
def get_visible(G, P, contour):
queue = deque(contour)
visible = list(contour)
while queue:
i = queue.popleft()
for j in G[i]:
if j in visible:
continue
if np.sum(P[2:, i]) >= np.sum(P[2:, j]):
visible.append(j)
queue.append(j)
return visible
def main():
np.random.seed(0)
N = 5
num_iter = 100
scale = np.sqrt(N) + 0.5
P = ncube_corners(N).T * 2 - 1
G = get_corner_graph(N)
#print(points)
A0 = rotation(N, 0.063)
A0 /= np.linalg.det(A0)
for k in range(num_iter):
U, contour_idxs = get_contour(P[:2, :].T)
#print(U)
U = list(sorted(U, key=lambda x: np.arctan2(x[1], x[0])))
visible = get_visible(G, P, contour_idxs)
#print(len(visible))
#visible = range(2**N)
#print(visible)
#print(U)
#print()
polygon = Polygon(U, True)
p = PatchCollection([polygon])
fig, ax = plt.subplots()
ax.add_collection(p)
for i in visible:
for j in G[i]:
if j in visible:
x, y = zip(P[:2, i], P[:2, j])
ax.plot(x, y, "r", zorder=99)
x, y = zip(*[P[:2, i] for i in visible])
plt.scatter(x, y, color="r", zorder=99)
ax.set_xlim([-scale, scale])
ax.set_ylim([-scale, scale])
ax.axis("off")
plt.savefig(f"../../../results/highdim/projections/{k:04d}.png")
plt.close()
P = np.dot(A0, P)
if __name__ == '__main__':
main()
#G = get_corner_graph(5)
#print(len(G.edges))
# how to plot only visible points?
# strategy idea:
# find point closest to x1 = x2 = 0
# this must be visible
# so must its neighbours
# keep branching, except from corners on the contour
| 25.855263
| 86
| 0.534606
|
4a124d4d2ab90105e60925bca8909ccc5a039357
| 7,592
|
py
|
Python
|
examples/table_variants.py
|
iwob/evoplotter
|
816f1b19bf6656c1ac35bc9dbe8c090a6325f5a1
|
[
"MIT"
] | 1
|
2020-06-03T07:44:06.000Z
|
2020-06-03T07:44:06.000Z
|
examples/table_variants.py
|
iwob/evoplotter
|
816f1b19bf6656c1ac35bc9dbe8c090a6325f5a1
|
[
"MIT"
] | null | null | null |
examples/table_variants.py
|
iwob/evoplotter
|
816f1b19bf6656c1ac35bc9dbe8c090a6325f5a1
|
[
"MIT"
] | null | null | null |
from evoplotter import printer
from evoplotter import reporting
from evoplotter.dims import *
tableBody = r"""
P0 (3) & 0.70 & 0.54 & 0.34 & 0.42 & 0.88 & 0.94 & 1.00 & 1.00 & 0.58 & 0.66 & 0.40 & 0.58 & 0.72 & 0.82 & 1.00 & 1.00 & 0.00 & 0.00 & 0.72 \\
P1 (3) & 0.18 & 0.16 & 0.26 & 0.24 & 0.24 & 0.20 & 0.54 & 0.58 & 0.16 & 0.08 & 0.20 & 0.12 & 0.60 & 0.44 & 0.96 & 0.96 & 0.00 & 0.00 & 0.37 \\
P2 (2) & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 0.00 & 0.00 & 1.00 \\
P3 (4) & 0.14 & 0.16 & 0.12 & 0.12 & 0.46 & 0.48 & 1.00 & 0.96 & 0.52 & 0.62 & 0.28 & 0.54 & 0.82 & 0.76 & 1.00 & 1.00 & 0.00 & 0.00 & 0.56 \\
P4 (5) & 0.14 & 0.06 & 0.02 & 0.08 & 0.02 & 0.02 & 0.00 & 0.00 & 0.52 & 0.56 & 0.38 & 0.44 & 0.38 & 0.18 & 0.14 & 0.14 & 0.00 & 0.00 & 0.19 \\
P5 (2) & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 0.98 & 0.92 & 1.00 & 0.98 & 1.00 & 0.98 & 1.00 & 1.00 & 0.00 & 0.00 & 0.99 \\
P6 (4) & 0.08 & 0.08 & 0.06 & 0.14 & 0.02 & 0.14 & 0.04 & 0.04 & 0.40 & 0.60 & 0.82 & 0.68 & 0.68 & 0.74 & 0.78 & 0.80 & 0.00 & 0.00 & 0.38 \\
P7 (3) & 0.16 & 0.08 & 0.34 & 0.16 & 0.34 & 0.44 & 0.56 & 0.58 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 1.00 & 0.00 & 0.00 & 0.67 \\
P8 (4) & 0.18 & 0.36 & 0.10 & 0.12 & 0.14 & 0.18 & 0.28 & 0.32 & 0.36 & 0.46 & 0.26 & 0.30 & 0.50 & 0.34 & 0.82 & 0.76 & 0.00 & 0.00 & 0.34 \\
mean & 0.40 & 0.38 & 0.36 & 0.36 & 0.46 & 0.49 & 0.60 & 0.61 & 0.61 & 0.66 & 0.59 & 0.63 & 0.74 & 0.70 & 0.86 & 0.85 & 0.00 & 0.00 & - \\
rank & 10.72 & 10.94 & 12.00 & 11.33 & 10.67 & 9.61 & 8.28 & 8.06 & 8.50 & 8.22 & 8.17 & 8.78 & 5.39 & 7.00 & 4.17 & 4.17 & 0.00 & 0.00 & - \\
"""
def generateTableText(verticalBorder, horizontalBorder, useBooktabs):
print("Generating a table for table_variants_text.tex ..")
dimCx = Dim([("0.0", None), ("0.5", None)])
dimSel = Dim([("T", None), ("L", None)])
dimMethod = Dim([("U", None), ("P", None), ("S", None), ("IS", None)])
dimSingleCol = Dim([("C", None), ("D", None)])
main = dimSel * dimMethod * dimCx + dimSingleCol * dim_all * dim_all
dimCols = main + Dim([("mean", None)])
cells, rows_names = printer.latexToArrayRowNames(tableBody) # maybe switch for pandas as a primary representation?
dimRows = Dim.from_names(rows_names)
rBold = printer.LatexTextbf(lambda v, b: v == "1.00")
rShading = printer.CellShading(0.0, 0.5, 1.0, "colorLow", "colorMedium", "colorHigh")
table = printer.Table(cells, dimRows=dimRows, dimCols=dimCols, cellRenderers=[rBold, rShading], verticalBorder=verticalBorder,
horizontalBorder=horizontalBorder, useBooktabs=useBooktabs, headerRowNames=["method", "", "cx"])
return table
def generateTableTextRemovedCols(verticalBorder, horizontalBorder, useBooktabs):
print("Generating a table for table_variants_text_rc.tex ..")
dimCx = Dim([("0.0", None), ("0.5", None)])
dimSel = Dim([("T", None), ("L", None)])
dimMethod = Dim([("U", None), ("P", None), ("S", None), ("IS", None)])
dimSingleCol = Dim([("C", None), ("D", None)])
main = dimSel * dimMethod * dimCx + dimSingleCol
dimCols = main + Dim([("mean", None)])
cells, rows_names = printer.latexToArrayRowNames(tableBody) # maybe switch for pandas as a primary representation?
dimRows = Dim.from_names(rows_names)
rBold = printer.LatexTextbf(lambda v, b: v == "1.00")
rShading = printer.CellShading(0.0, 0.5, 1.0, "colorLow", "colorMedium", "colorHigh")
table = printer.Table(cells, dimRows=dimRows, dimCols=dimCols, cellRenderers=[rBold, rShading], verticalBorder=verticalBorder,
horizontalBorder=horizontalBorder, useBooktabs=useBooktabs, headerRowNames=["method", "", "cx"])
table.leaveColumns([0, 2, 6, 8, 10, 14]) # leaving out S and C
return table
data = [
{"A": 0, "B": 1, "C": 0, "D": 4, "E": 0, "F": 0, "value": 2},
{"A": 1, "B": 1, "C": 0, "D": 4, "E": 0, "F": 0, "value": 4},
{"A": 0, "B": 0, "C": 0, "D": 4, "E": 0, "F": 0, "value": 0},
{"A": 0, "B": 0, "C": 0, "D": 4, "E": 0, "F": 0, "value": 3},
{"A": 0, "B": 0, "C": 1, "D": 4, "E": 0, "F": 0, "value": 1},
{"A": 0, "B": 0, "C": 1, "D": 4, "E": 0, "F": 0, "value": 9},
{"A": 0, "B": 1, "C": 1, "D": 4, "E": 0, "F": 1, "value": 1},
{"A": 0, "B": 1, "C": 1, "D": 4, "E": 0, "F": 1, "value": 3},
{"A": 0, "B": 1, "C": 0, "D": 4, "E": 0, "F": 1, "value": 0},
{"A": 0, "B": 1, "C": 0, "D": 4, "E": 0, "F": 1, "value": 4},
{"A": 1, "B": 1, "C": 0, "D": 4, "E": 0, "F": 2, "value": 5},
{"A": 1, "B": 1, "C": 1, "D": 4, "E": 0, "F": 2, "value": 7},
{"A": 1, "B": 0, "C": 1, "D": 4, "E": 0, "F": 2, "value": 1},
{"A": 1, "B": 0, "C": 1, "D": 4, "E": 0, "F": 2, "value": 2},
{"A": 1, "B": 0, "C": 0, "D": 4, "E": 0, "F": 3, "value": 2},
{"A": 1, "B": 0, "C": 0, "D": 4, "E": 0, "F": 3, "value": 4},
{"A": 1, "B": 0, "C": 0, "D": 4, "E": 0, "F": 3, "value": 2},
{"A": 1, "B": 0, "C": 0, "D": 4, "E": 0, "F": 3, "value": 4},
]
def generateTableData(verticalBorder, horizontalBorder, useBooktabs):
print("Generating a table for table_variants_data.tex ..")
dimCols = Dim.from_dict(data, "A", nameFun=lambda v: "A={0}".format(v)) * Dim.from_dict(data, "B", nameFun=lambda v: "B={0}".format(v))
dimRows = Dim.from_dict(data, "F", nameFun=lambda v: "F={0}".format(v))
cells = printer.generateTableCells(data, dimRows=dimRows, dimCols=dimCols, fun=lambda props: sum([p["value"] for p in props]))
rShading = printer.CellShading(0.0, 5.0, 10.0, "colorLow", "colorMedium", "colorHigh")
table = printer.Table(cells, dimCols=dimCols, dimRows=dimRows, cellRenderers=[rShading], verticalBorder=verticalBorder,
horizontalBorder=horizontalBorder, useBooktabs=useBooktabs, headerRowNames=["A-value"])
return table
def generateReport(tableGenerator):
report = reporting.ReportPDF()
report.add(reporting.color_scheme_gray_light.toBlockLatex())
verticalBorder_list = [0, 1, 2]
horizontalBorder_list = [0, 1, 2]
useBooktabs_list = [False, True]
for ub in useBooktabs_list:
sec1 = reporting.SectionRelative("useBooktabs={0}".format(ub))
report.add(sec1)
# report.add(reporting.BlockLatex(r"\bigskip"))
for hb in horizontalBorder_list:
sec2 = reporting.SectionRelative("horizontalBorder={0}".format(hb))
sec1.add(sec2)
report.add(reporting.BlockLatex(r"\bigskip\bigskip"))
for vb in verticalBorder_list:
subsec = reporting.SectionRelative("verticalBorder={2}".format(ub, hb, vb))
subsec.add(tableGenerator(verticalBorder=vb, horizontalBorder=hb, useBooktabs=ub))
subsec.add(reporting.BlockLatex(r"\bigskip"))
sec2.add(subsec)
return report
report = generateReport(generateTableText)
report.save_and_compile("table_variants_text.tex")
report = generateReport(generateTableTextRemovedCols)
report.save_and_compile("table_variants_text_rc.tex")
report = generateReport(generateTableData)
report.save_and_compile("table_variants_data.tex")
| 58.852713
| 179
| 0.524236
|
4a124df741b7d80d59f024cc4707d50a94875eb1
| 1,127
|
py
|
Python
|
bgpvpn_dashboard/test/helpers.py
|
mail2nsrajesh/networking-bgpvpn
|
6b57aed6e8c6a9ff7a987a7a03d6a83ecd0f875e
|
[
"Apache-2.0"
] | null | null | null |
bgpvpn_dashboard/test/helpers.py
|
mail2nsrajesh/networking-bgpvpn
|
6b57aed6e8c6a9ff7a987a7a03d6a83ecd0f875e
|
[
"Apache-2.0"
] | null | null | null |
bgpvpn_dashboard/test/helpers.py
|
mail2nsrajesh/networking-bgpvpn
|
6b57aed6e8c6a9ff7a987a7a03d6a83ecd0f875e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2016 Orange.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack_dashboard.test import helpers
from bgpvpn_dashboard.test.test_data import utils
def create_stubs(stubs_to_create={}):
return helpers.create_stubs(stubs_to_create)
class TestCase(helpers.TestCase):
def _setup_test_data(self):
super(TestCase, self)._setup_test_data()
utils.load_test_data(self)
class APITestCase(helpers.APITestCase):
def _setup_test_data(self):
super(APITestCase, self)._setup_test_data()
utils.load_test_data(self)
| 30.459459
| 78
| 0.736469
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.