hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a06632565659f460636962a713d03adfd790204
| 3,680
|
py
|
Python
|
noise2seg/tests/test_n2v_utils.py
|
juglab/VoidSeg_cluster
|
71339f9bdd6df9feb26fa197d5dfc390c371910c
|
[
"BSD-2-Clause"
] | 1
|
2020-03-12T14:00:15.000Z
|
2020-03-12T14:00:15.000Z
|
noise2seg/tests/test_n2v_utils.py
|
juglab/VoidSeg_cluster
|
71339f9bdd6df9feb26fa197d5dfc390c371910c
|
[
"BSD-2-Clause"
] | null | null | null |
noise2seg/tests/test_n2v_utils.py
|
juglab/VoidSeg_cluster
|
71339f9bdd6df9feb26fa197d5dfc390c371910c
|
[
"BSD-2-Clause"
] | null | null | null |
import numpy as np
from csbdeep.utils import n2v_utils
def test_get_subpatch():
patch = np.arange(100)
patch.shape = (10,10)
subpatch_target = np.array([[11, 12, 13, 14, 15],
[21, 22, 23, 24, 25],
[31, 32, 33, 34, 35],
[41, 42, 43, 44, 45],
[51, 52, 53, 54, 55]])
subpatch_test = n2v_utils.get_subpatch(patch, (3,3), 2)
assert np.sum(subpatch_target - subpatch_test) == 0
subpatch_test = n2v_utils.get_subpatch(patch, (3,3), 1)
assert np.sum(subpatch_target[1:-1, 1:-1] - subpatch_test) == 0
patch = np.arange(1000)
patch.shape = (10,10,10)
subpatch_target = np.array([[[31,32,33],
[41,42,43],
[51,52,53]],
[[131,132,133],
[141,142,143],
[151,152,153]],
[[231,232,233],
[241,242,243],
[251,252,253]]])
subpatch_test = n2v_utils.get_subpatch(patch, (1,4,2), 1)
assert np.sum(subpatch_target - subpatch_test) == 0
def test_random_neighbor():
coord = np.array([51,52,32])
shape = [128, 128, 128]
for i in range(1000):
coords = n2v_utils.random_neighbor(shape, coord)
assert np.all(coords != coord)
shape = [55, 53, 32]
for i in range(1000):
coords = n2v_utils.random_neighbor(shape, coord)
assert np.all(coords != coord)
def test_pm_normal_neighbor_withoutCP():
patch = np.arange(100)
patch.shape = (10,10)
coord = np.array([2, 4])
for i in range(1000):
val = n2v_utils.pm_normal_withoutCP(patch, coord)
assert 0 <= val and val < 100
patch = np.arange(1000)
patch.shape = (10, 10, 10)
coord = np.array([2, 4, 6])
for i in range(1000):
val = n2v_utils.pm_normal_withoutCP(patch, coord)
assert 0 <= val and val < 1000
def test_pm_uniform_withCP():
patch = np.arange(100)
patch.shape = (10, 10)
coord = np.array([2, 4])
sampler = n2v_utils.pm_uniform_withCP(3)
for i in range(1000):
val = sampler(patch, coord)
assert 0 <= val and val < 100
patch = np.arange(1000)
patch.shape = (10, 10, 10)
coord = np.array([4, 5, 7])
for i in range(1000):
val = sampler(patch, coord)
assert 0 <= val and val < 1000
def test_pm_normal_additive():
patch = np.arange(100)
patch.shape = (10, 10)
coord = np.array([2, 4])
sampler = n2v_utils.pm_normal_additive(0)
val = sampler(patch, coord)
assert val == patch[tuple(coord)]
patch = np.arange(1000)
patch.shape = (10, 10, 10)
coord = np.array([4, 5, 7])
val = sampler(patch, coord)
assert val == patch[tuple(coord)]
def test_pm_normal_fitted():
patch = np.arange(100)
patch.shape = (10, 10)
coord = np.array([2, 4])
sampler = n2v_utils.pm_normal_fitted(3)
val = sampler(patch, coord)
assert isinstance(val, float)
patch = np.arange(1000)
patch.shape = (10, 10, 10)
coord = np.array([4, 5, 7])
val = sampler(patch, coord)
assert isinstance(val, float)
def test_pm_identity():
patch = np.arange(100)
patch.shape = (10, 10)
coord = np.array([2, 4])
sampler = n2v_utils.pm_identity
val = sampler(patch, coord)
assert val == 24
patch = np.arange(1000)
patch.shape = (10, 10, 10)
coord = np.array([2, 4, 7])
val = sampler(patch, coord)
assert val == 247
| 24.052288
| 67
| 0.53913
|
4a0663892e022226cd63d78814286ac7a95ad8d9
| 643
|
py
|
Python
|
Seth/Grades/tasks.py
|
Inf1n1te/Seth
|
4ccfcba6226f3d284fd955cd0a81316402e8d043
|
[
"BSD-3-Clause"
] | 1
|
2020-08-09T01:26:31.000Z
|
2020-08-09T01:26:31.000Z
|
Seth/Grades/tasks.py
|
Inf1n1te/Seth
|
4ccfcba6226f3d284fd955cd0a81316402e8d043
|
[
"BSD-3-Clause"
] | 17
|
2017-11-15T10:06:02.000Z
|
2019-02-13T15:32:41.000Z
|
Seth/Grades/tasks.py
|
Inf1n1te/Seth
|
4ccfcba6226f3d284fd955cd0a81316402e8d043
|
[
"BSD-3-Clause"
] | null | null | null |
from celery.utils.log import get_task_logger
from django.core.mail import send_mail
# from mailing.mail import send_email
from Grades.models import Person
from Seth.celery import app
logger = get_task_logger(__name__)
@app.task()
def send_grade_email_task(students, subject, message, domain):
logger.info("Sent grade emails")
student_emails = [v.email for v in Person.objects.filter(pk__in=students)]
for email in student_emails:
send_mail(
from_email='noreply_seth@{}'.format(domain),
recipient_list=[email],
subject=subject,
message=message,
)
return True
| 26.791667
| 78
| 0.699844
|
4a06639b8276e1b63a1f324c5dafe28ac8a5bcbd
| 2,545
|
py
|
Python
|
_unittest/test_Setup.py
|
Zwl20085/PyAEDT-Motor
|
e50de4d96210c32f23647138421aa86f0d9ce554
|
[
"MIT"
] | null | null | null |
_unittest/test_Setup.py
|
Zwl20085/PyAEDT-Motor
|
e50de4d96210c32f23647138421aa86f0d9ce554
|
[
"MIT"
] | null | null | null |
_unittest/test_Setup.py
|
Zwl20085/PyAEDT-Motor
|
e50de4d96210c32f23647138421aa86f0d9ce554
|
[
"MIT"
] | null | null | null |
# standard imports
import os
# Setup paths for module imports
from _unittest.conftest import local_path, scratch_path
# Import required modules
from pyaedt import Hfss, Circuit
from pyaedt.generic.filesystem import Scratch
import gc
test_project_name = "coax_setup"
class TestClass:
def setup_class(self):
with Scratch(scratch_path) as self.local_scratch:
try:
example_project = os.path.join(
local_path, 'example_models', test_project_name + '.aedt')
self.test_project = self.local_scratch.copyfile(example_project)
self.local_scratch.copyfolder(os.path.join(local_path, 'example_models', test_project_name + '.aedb'),
os.path.join(self.local_scratch.path, test_project_name + '.aedb'))
self.aedtapp = Hfss(os.path.join(
self.local_scratch.path, test_project_name + '.aedt'))
except:
pass
def teardown_class(self):
assert self.aedtapp.close_project(self.aedtapp.project_name)
self.local_scratch.remove()
gc.collect()
def test_01_create_hfss_setup(self):
setup1 = self.aedtapp.create_setup(
"My_HFSS_Setup", self.aedtapp.SimulationSetupTypes.HFSSDrivenDefault)
assert setup1.name == "My_HFSS_Setup"
assert "SaveRadFieldsOnly" in setup1.props
setup1.props["SaveRadFieldsOnly"] = True
setup1.props["AdaptMultipleFreqs"] = True
setup1.props["MultipleAdaptiveFreqsSetup"]["1GHz"] = [0.01]
del setup1.props["MultipleAdaptiveFreqsSetup"]["5GHz"]
setup1.update()
setup1.disable()
setup1.enable()
def test_01b_create_hfss_sweep(self):
setup1 = self.aedtapp.get_setup("My_HFSS_Setup")
assert self.aedtapp.get_setups()
sweep1 = setup1.add_sweep("MyFrequencySweep")
sweep1.props["RangeStart"] = "1Hz"
sweep1.props["RangeEnd"] = "2GHz"
assert sweep1.update()
sweep1.props["Type"]="Fast"
sweep1.props["SaveFields"]=True
assert sweep1.update()
assert self.aedtapp.get_sweeps("My_HFSS_Setup")
def test_02_create_circuit_setup(self):
circuit = Circuit()
setup1 = circuit.create_setup("circuit", self.aedtapp.SimulationSetupTypes.NexximLNA)
assert setup1.name == "circuit"
setup1.props["SweepDefinition"]['Data'] = 'LINC 0GHz 4GHz 501'
setup1.update()
setup1.disable()
setup1.enable()
| 39.153846
| 118
| 0.647151
|
4a066416a6ab121b7960fedb53935dcdd3b8d068
| 1,138
|
py
|
Python
|
tests/test_validate_response_dataclass.py
|
patrickmckenna/fastapi
|
9c3c9b6e78768374868d690bc05918d58481e880
|
[
"MIT"
] | 2
|
2020-11-01T00:04:05.000Z
|
2021-07-21T06:32:20.000Z
|
tests/test_validate_response_dataclass.py
|
patrickmckenna/fastapi
|
9c3c9b6e78768374868d690bc05918d58481e880
|
[
"MIT"
] | 1
|
2019-11-02T22:03:59.000Z
|
2019-11-02T22:03:59.000Z
|
tests/test_validate_response_dataclass.py
|
patrickmckenna/fastapi
|
9c3c9b6e78768374868d690bc05918d58481e880
|
[
"MIT"
] | 1
|
2020-12-19T18:01:20.000Z
|
2020-12-19T18:01:20.000Z
|
from typing import List
import pytest
from fastapi import FastAPI
from pydantic import ValidationError
from pydantic.dataclasses import dataclass
from starlette.testclient import TestClient
app = FastAPI()
@dataclass
class Item:
name: str
price: float = None
owner_ids: List[int] = None
@app.get("/items/invalid", response_model=Item)
def get_invalid():
return {"name": "invalid", "price": "foo"}
@app.get("/items/innerinvalid", response_model=Item)
def get_innerinvalid():
return {"name": "double invalid", "price": "foo", "owner_ids": ["foo", "bar"]}
@app.get("/items/invalidlist", response_model=List[Item])
def get_invalidlist():
return [
{"name": "foo"},
{"name": "bar", "price": "bar"},
{"name": "baz", "price": "baz"},
]
client = TestClient(app)
def test_invalid():
with pytest.raises(ValidationError):
client.get("/items/invalid")
def test_double_invalid():
with pytest.raises(ValidationError):
client.get("/items/innerinvalid")
def test_invalid_list():
with pytest.raises(ValidationError):
client.get("/items/invalidlist")
| 21.074074
| 82
| 0.669596
|
4a0664e4c5d08496a626fd08fec61ec53f4895cb
| 1,844
|
py
|
Python
|
venv/lib/python3.6/site-packages/ansible_collections/ansible/netcommon/plugins/modules/net_vlan.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 5
|
2020-12-16T21:42:09.000Z
|
2022-03-28T16:04:32.000Z
|
.ansible/collections/ansible_collections/ansible/netcommon/plugins/modules/net_vlan.py
|
chronicc/proving-ground
|
3e392122a05fb8383a3700954baebb0df330e9e3
|
[
"MIT"
] | 12
|
2020-02-21T07:24:52.000Z
|
2020-04-14T09:54:32.000Z
|
.ansible/collections/ansible_collections/ansible/netcommon/plugins/modules/net_vlan.py
|
chronicc/proving-ground
|
3e392122a05fb8383a3700954baebb0df330e9e3
|
[
"MIT"
] | 2
|
2021-03-30T14:26:02.000Z
|
2021-04-01T18:17:29.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = """
module: net_vlan
author: Ricardo Carrillo Cruz (@rcarrillocruz)
short_description: (deprecated, removed after 2022-06-01) Manage VLANs on network
devices
description:
- This module provides declarative management of VLANs on network devices.
version_added: 1.0.0
deprecated:
alternative: Use platform-specific "[netos]_vlans" module
why: Updated modules released with more functionality
removed_at_date: '2022-06-01'
extends_documentation_fragment:
- ansible.netcommon.network_agnostic
options:
name:
description:
- Name of the VLAN.
vlan_id:
description:
- ID of the VLAN.
interfaces:
description:
- List of interfaces the VLAN should be configured on.
aggregate:
description: List of VLANs definitions.
purge:
description:
- Purge VLANs not defined in the I(aggregate) parameter.
default: false
state:
description:
- State of the VLAN configuration.
default: present
choices:
- present
- absent
- active
- suspend
"""
EXAMPLES = """
- name: configure VLAN ID and name
ansible.netcommon.net_vlan:
vlan_id: 20
name: test-vlan
- name: remove configuration
ansible.netcommon.net_vlan:
state: absent
- name: configure VLAN state
ansible.netcommon.net_vlan:
vlan_id:
state: suspend
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always, except for the platforms that use Netconf transport to manage the device.
type: list
sample:
- vlan 20
- name test-vlan
"""
| 23.05
| 93
| 0.715835
|
4a0664e6f8c1d724d0016de0e443556a788e71f5
| 1,109
|
py
|
Python
|
schoolport/users/migrations/0003_auto_20210408_1548.py
|
yotink522/schoolport
|
c6cfd0230ca05fb44f77c2f27c7e200828547bd5
|
[
"MIT"
] | null | null | null |
schoolport/users/migrations/0003_auto_20210408_1548.py
|
yotink522/schoolport
|
c6cfd0230ca05fb44f77c2f27c7e200828547bd5
|
[
"MIT"
] | null | null | null |
schoolport/users/migrations/0003_auto_20210408_1548.py
|
yotink522/schoolport
|
c6cfd0230ca05fb44f77c2f27c7e200828547bd5
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.7 on 2021-04-08 07:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0002_auto_20210407_2014'),
]
operations = [
migrations.AlterModelManagers(
name='user',
managers=[
],
),
migrations.RemoveField(
model_name='user',
name='userid',
),
migrations.AddField(
model_name='user',
name='is_admin',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='user',
name='email',
field=models.EmailField(max_length=255, unique=True, verbose_name='email'),
),
migrations.AlterField(
model_name='user',
name='is_active',
field=models.BooleanField(default=True),
),
migrations.AlterField(
model_name='user',
name='username',
field=models.CharField(max_length=255, unique=True),
),
]
| 25.790698
| 87
| 0.534716
|
4a0665347f94cb86b22aedf285d2af620027fe9f
| 5,811
|
py
|
Python
|
src/robot/libdocpkg/htmlwriter.py
|
userzimmermann/robotframework
|
7aa16338ce2120cb082605cf548c0794956ec901
|
[
"Apache-2.0"
] | 7
|
2015-02-25T10:55:02.000Z
|
2015-11-04T03:20:05.000Z
|
src/robot/libdocpkg/htmlwriter.py
|
userzimmermann/robotframework
|
7aa16338ce2120cb082605cf548c0794956ec901
|
[
"Apache-2.0"
] | 12
|
2015-02-24T17:00:06.000Z
|
2015-07-31T08:32:07.000Z
|
src/robot/libdocpkg/htmlwriter.py
|
userzimmermann/robotframework
|
7aa16338ce2120cb082605cf548c0794956ec901
|
[
"Apache-2.0"
] | 2
|
2015-12-15T11:00:35.000Z
|
2018-02-24T18:11:24.000Z
|
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from six.moves.urllib_parse import quote as urlquote
from robot.errors import DataError
from robot.htmldata import HtmlFileWriter, ModelWriter, JsonWriter, LIBDOC
from robot.utils import get_timestamp, html_escape, html_format, NormalizedDict
from robot.utils.htmlformatters import HeaderFormatter
class LibdocHtmlWriter(object):
def write(self, libdoc, output):
model_writer = LibdocModelWriter(output, libdoc)
HtmlFileWriter(output, model_writer).write(LIBDOC)
class LibdocModelWriter(ModelWriter):
def __init__(self, output, libdoc):
self._output = output
formatter = DocFormatter(libdoc.keywords, libdoc.doc, libdoc.doc_format)
self._libdoc = JsonConverter(formatter).convert(libdoc)
def write(self, line):
self._output.write('<script type="text/javascript">\n')
self.write_data()
self._output.write('</script>\n')
def write_data(self):
JsonWriter(self._output).write_json('libdoc = ', self._libdoc)
class JsonConverter(object):
def __init__(self, doc_formatter):
self._doc_formatter = doc_formatter
def convert(self, libdoc):
return {
'name': libdoc.name,
'doc': self._doc_formatter.html(libdoc.doc, intro=True),
'version': libdoc.version,
'named_args': libdoc.named_args,
'scope': libdoc.scope,
'generated': get_timestamp(daysep='-', millissep=None),
'inits': self._get_keywords(libdoc.inits),
'keywords': self._get_keywords(libdoc.keywords),
'all_tags': tuple(libdoc.all_tags),
'contains_tags': bool(libdoc.all_tags)
}
def _get_keywords(self, keywords):
return [self._convert_keyword(kw) for kw in keywords]
def _convert_keyword(self, kw):
return {
'name': kw.name,
'args': kw.args,
'doc': self._doc_formatter.html(kw.doc),
'shortdoc': kw.shortdoc,
'tags': tuple(kw.tags),
'matched': True
}
class DocFormatter(object):
_header_regexp = re.compile(r'<h([234])>(.+?)</h\1>')
_name_regexp = re.compile('`(.+?)`')
def __init__(self, keywords, introduction, doc_format='ROBOT'):
self._doc_to_html = DocToHtml(doc_format)
self._targets = self._get_targets(keywords, introduction,
robot_format=doc_format == 'ROBOT')
def _get_targets(self, keywords, introduction, robot_format):
targets = {
'introduction': 'Introduction',
'library introduction': 'Introduction',
'importing': 'Importing',
'library importing': 'Importing',
'shortcuts': 'Shortcuts',
'keywords': 'Keywords'
}
for kw in keywords:
targets[kw.name] = kw.name
if robot_format:
for header in self._yield_header_targets(introduction):
targets[header] = header
return self._escape_and_encode_targets(targets)
def _yield_header_targets(self, introduction):
headers = HeaderFormatter()
for line in introduction.splitlines():
match = headers.match(line)
if match:
yield match.group(2)
def _escape_and_encode_targets(self, targets):
return NormalizedDict((html_escape(key), self._encode_uri_component(value))
for key, value in targets.items())
def _encode_uri_component(self, value):
# Emulates encodeURIComponent javascript function
return urlquote(value.encode('UTF-8'), safe="-_.!~*'()")
def html(self, doc, intro=False):
doc = self._doc_to_html(doc)
if intro:
doc = self._header_regexp.sub(r'<h\1 id="\2">\2</h\1>', doc)
return self._name_regexp.sub(self._link_keywords, doc)
def _link_keywords(self, match):
name = match.group(1)
if name in self._targets:
return '<a href="#%s" class="name">%s</a>' % (self._targets[name], name)
return '<span class="name">%s</span>' % name
class DocToHtml(object):
def __init__(self, doc_format):
self._formatter = self._get_formatter(doc_format)
def _get_formatter(self, doc_format):
try:
return {'ROBOT': html_format,
'TEXT': self._format_text,
'HTML': self._format_html,
'REST': self._format_rest}[doc_format]
except KeyError:
raise DataError("Invalid documentation format '%s'." % doc_format)
def _format_text(self, doc):
return '<p style="white-space: pre-wrap">%s</p>' % html_escape(doc)
def _format_html(self, doc):
return '<div style="margin: 0">%s</div>' % doc
def _format_rest(self, doc):
try:
from docutils.core import publish_parts
except ImportError:
raise DataError("reST format requires 'docutils' module to be installed.")
parts = publish_parts(doc, writer_name='html')
return self._format_html(parts['html_body'])
def __call__(self, doc):
return self._formatter(doc)
| 35.650307
| 86
| 0.6307
|
4a0665365d92f89ec8eb6b8c0a509bb0a6d05d62
| 3,945
|
py
|
Python
|
src/main/python/previewr/server.py
|
raphiz/previewr
|
a649469c46eae87721ed147a9cdd9234edcefc09
|
[
"MIT"
] | null | null | null |
src/main/python/previewr/server.py
|
raphiz/previewr
|
a649469c46eae87721ed147a9cdd9234edcefc09
|
[
"MIT"
] | 2
|
2015-03-11T18:16:55.000Z
|
2015-03-12T07:12:46.000Z
|
src/main/python/previewr/server.py
|
raphiz/previewr
|
a649469c46eae87721ed147a9cdd9234edcefc09
|
[
"MIT"
] | null | null | null |
from previewr.utils import *
from previewr.processors import *
from tornado.web import StaticFileHandler
import logging
import os.path
import tornado.escape
import tornado.ioloop
import tornado.options
import tornado.web
import tornado.websocket
from tornado.options import options
class Application(tornado.web.Application):
def __init__(self):
settings = dict(
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
)
handlers = [
(r"/", MainHandler),
(r"/update", UpdateSocketHandler),
(r"/(.*)", StaticFileHandler, {"path": os.getcwd()}),
]
# Get the file to preview from the CLI parameters...
args = tornado.options.parse_command_line()
# Verify the argument is present!
if len(args) != 1:
print("You must provide exactly one file to preview")
exit(1)
self.file_to_preview = os.path.abspath(args[0])
# Initialize the poller and scheduler
self.processor = self._get_processor()(self.file_to_preview)
self.poller = FilePoller(self.file_to_preview, self.update_client_html)
self.scheduler = Scheduler(0.25, self.poller.poll)
# Call parent constructor
tornado.web.Application.__init__(self, handlers, **settings)
def _get_processor(self):
"""
Selects the processor to use and returns it.
"""
processor_name = options.format
if processor_name == "auto":
return Processors.select_applicable_processor(self.file_to_preview)
processor = Processors.get_processor_by_name(processor_name)
if processor is None:
raise Exception("No Processor called %s" % processor_name)
return processor
def serve(self):
"""
Starts to serve the application.
"""
self.listen(options.port)
self.scheduler.start()
logging.info("Running at http://localhost:%s" % options.port)
tornado.ioloop.IOLoop.instance().start()
def shutdown(self):
"""
Shuts down the application
"""
self.scheduler.stop()
def update_client_html(self):
"""
This method does re-process the file to watch and updates all clients.
"""
res = self.processor.process()
UpdateSocketHandler.notify_clients(res)
class MainHandler(tornado.web.RequestHandler):
"""
Main RequestHandler to send the index to the client.
"""
def get(self):
self.render("index.html",
contents=self.application.processor.process(),
filename=self.application.file_to_preview)
class MainResourceHandler(tornado.web.RequestHandler):
"""
Main RequestHandler to send the index to the client.
"""
def get(self, a):
print(a)
class UpdateSocketHandler(tornado.websocket.WebSocketHandler):
"""
WebSocket Handler to allow server push if the file to observe has changed.
Attributes:
clients All clients to notify when the file has changed
"""
clients = set()
def allow_draft76(self):
# for iOS 5.0 Safari
return True
def open(self):
logging.debug("New connection opened")
UpdateSocketHandler.clients.add(self)
def on_close(self):
logging.debug("Connection closed to a waiter")
UpdateSocketHandler.clients.remove(self)
@classmethod
def notify_clients(cls, msg):
"""
Sends the given HTML message to all registered clients.
"""
logging.debug("sending update broadcast to %d waiters", len(cls.clients))
for client in cls.clients:
try:
client.write_message(msg)
except:
logging.error("Error sending message", exc_info=True)
| 29.886364
| 81
| 0.632446
|
4a06661c33a2389a29c1b3a2e88aa7f1172f6c62
| 786
|
py
|
Python
|
app/utils.py
|
pybrgr/cvrp-poc
|
ae2a2bd23c3cfc602a4e7b66ede2384d4c454bb9
|
[
"MIT"
] | null | null | null |
app/utils.py
|
pybrgr/cvrp-poc
|
ae2a2bd23c3cfc602a4e7b66ede2384d4c454bb9
|
[
"MIT"
] | null | null | null |
app/utils.py
|
pybrgr/cvrp-poc
|
ae2a2bd23c3cfc602a4e7b66ede2384d4c454bb9
|
[
"MIT"
] | null | null | null |
from flask import url_for as _url_for, current_app, _request_ctx_stack
import time
import os
root_dir = os.path.dirname(os.path.abspath(__file__))
def timestamp():
"""Return the current timestamp as an integer."""
return int(time.time())
def url_for(*args, **kwargs):
"""
url_for replacement that works even when there is no request context.
"""
if '_external' not in kwargs:
kwargs['_external'] = False
reqctx = _request_ctx_stack.top
if reqctx is None:
if kwargs['_external']:
raise RuntimeError('Cannot generate external URLs without a '
'request context.')
with current_app.test_request_context():
return _url_for(*args, **kwargs)
return _url_for(*args, **kwargs)
| 31.44
| 73
| 0.651399
|
4a0666ce711a29347be7d33d362487398d5efb0c
| 2,602
|
py
|
Python
|
master-node-docker/sentinel/eth/erc20.py
|
baymax19/Sentinel
|
69b95171fa7aa911ea918f79954a9d3a66bf00a5
|
[
"MIT"
] | null | null | null |
master-node-docker/sentinel/eth/erc20.py
|
baymax19/Sentinel
|
69b95171fa7aa911ea918f79954a9d3a66bf00a5
|
[
"MIT"
] | null | null | null |
master-node-docker/sentinel/eth/erc20.py
|
baymax19/Sentinel
|
69b95171fa7aa911ea918f79954a9d3a66bf00a5
|
[
"MIT"
] | null | null | null |
# coding=utf-8
import rlp
from ethereum.transactions import Transaction
from .eth import eth_manager
from ..config import MAIN_TOKENS
from ..config import MAX_TX_TRY
from ..config import RINKEBY_TOKENS
class ERC20Manager(object):
def __init__(self, net, name, address, abi):
self.net = net
self.address = address
self.contract = net.web3.eth.contract(
contract_name=name, abi=abi, address=address)
def get_balance(self, account_addr):
try:
caller_object = {
'from': account_addr,
'to': self.address,
'data': self.net.web3.toHex(
self.net.web3.toBytes(hexstr=self.contract.encodeABI(fn_name='balanceOf', args=[account_addr])))
}
balance = self.net.web3.toInt(
hexstr=self.net.web3.eth.call(caller_object))
except Exception as err:
return {'code': 201, 'error': str(err)}, None
return None, balance
def transfer_amount(self, to_addr, amount, private_key, nonce):
count, tx_hash = 0, None
while count < MAX_TX_TRY:
try:
tx = Transaction(nonce=nonce + count,
gasprice=self.net.web3.eth.gasPrice,
startgas=1000000,
to=self.address,
value=0,
data=self.net.web3.toBytes(
hexstr=self.contract.encodeABI(fn_name='transfer', args=[to_addr, amount])))
tx.sign(private_key)
raw_tx = self.net.web3.toHex(rlp.encode(tx))
tx_hash = self.net.web3.eth.sendRawTransaction(raw_tx)
if len(tx_hash) > 0:
break
except Exception as err:
err = str(err)
if '-32000' in err:
count += 1
if (count >= MAX_TX_TRY) or ('-32000' not in err):
return {'code': 202, 'error': err}, None
return None, tx_hash
erc20_manger = {
'main': {},
'rinkeby': {}
}
for symbol in MAIN_TOKENS.keys():
token = MAIN_TOKENS[symbol]
erc20_manger['main'][symbol] = ERC20Manager(eth_manager['main'], token['name'], token['address'], token['abi'])
for symbol in RINKEBY_TOKENS.keys():
token = RINKEBY_TOKENS[symbol]
erc20_manger['rinkeby'][symbol] = ERC20Manager(eth_manager['rinkeby'], token['name'], token['address'],
token['abi'])
| 38.264706
| 116
| 0.537663
|
4a0667161f32b0dc4f2dc654b10f76265c556f16
| 3,415
|
py
|
Python
|
dr_twitter/dr_twitter/settings.py
|
squadran2003/dr_twitter
|
8fa8592a2f343853be47aa213c463a6d026cc96c
|
[
"MIT"
] | null | null | null |
dr_twitter/dr_twitter/settings.py
|
squadran2003/dr_twitter
|
8fa8592a2f343853be47aa213c463a6d026cc96c
|
[
"MIT"
] | 4
|
2021-06-08T21:50:45.000Z
|
2022-03-12T00:36:42.000Z
|
dr_twitter/dr_twitter/settings.py
|
squadran2003/dr_twitter
|
8fa8592a2f343853be47aa213c463a6d026cc96c
|
[
"MIT"
] | null | null | null |
"""
Django settings for dr_twitter project.
Generated by 'django-admin startproject' using Django 2.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'y71+blibg($9uj5zt0xl@ok7t632pyecd(=3gv0&z8e#0sp#j$'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = [
]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'corsheaders',
'rest_framework',
'rest_framework.authtoken',
'tweets',
'accounts'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'dr_twitter.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
'templates',
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'dr_twitter.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, "static")
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "assets"),
]
LOGIN_REDIRECT_URL = 'home'
LOGOUT_REDIRECT_URL = '/login/'
| 24.392857
| 91
| 0.689605
|
4a06682d58f886c35d00da16aa257233c2ac3f4a
| 2,435
|
py
|
Python
|
python3/koans/about_tuples.py
|
abylgazievaalt/Koans
|
05d7c04915674b776a8b01f1231d29e8e34ec62b
|
[
"MIT"
] | null | null | null |
python3/koans/about_tuples.py
|
abylgazievaalt/Koans
|
05d7c04915674b776a8b01f1231d29e8e34ec62b
|
[
"MIT"
] | null | null | null |
python3/koans/about_tuples.py
|
abylgazievaalt/Koans
|
05d7c04915674b776a8b01f1231d29e8e34ec62b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutTuples(Koan):
def test_creating_a_tuple(self):
count_of_three = (1, 2, 5)
self.assertEqual(5, count_of_three[2])
def test_tuples_are_immutable_so_item_assignment_is_not_possible(self):
count_of_three = (1, 2, 5)
try:
count_of_three[2] = "three"
except TypeError as ex:
msg = ex.args[0]
# Note, assertRegex() uses regular expression pattern matching,
# so you don't have to copy the whole message.
self.assertRegex(msg, 'object does not support item assignment')
def test_tuples_are_immutable_so_appending_is_not_possible(self):
count_of_three = (1, 2, 5)
with self.assertRaises(AttributeError): count_of_three.append("boom")
# Tuples are less flexible than lists, but faster.
def test_tuples_can_only_be_changed_through_replacement(self):
count_of_three = (1, 2, 5)
list_count = list(count_of_three)
list_count.append("boom")
count_of_three = tuple(list_count)
self.assertEqual((1, 2, 5, "boom"), count_of_three)
def test_tuples_of_one_look_peculiar(self):
self.assertEqual(int, (1).__class__)
self.assertEqual(tuple, (1,).__class__)
self.assertEqual(tuple, ("I'm a tuple",).__class__)
self.assertEqual(str, ("Not a tuple").__class__)
def test_tuple_constructor_can_be_surprising(self):
self.assertEqual(('S', 'u', 'r', 'p', 'r', 'i', 's', 'e', '!'), tuple("Surprise!"))
def test_creating_empty_tuples(self):
self.assertEqual((), ())
self.assertEqual(() , tuple()) #Sometimes less confusing
def test_tuples_can_be_embedded(self):
lat = (37, 14, 6, 'N')
lon = (115, 48, 40, 'W')
place = ('Area 51', lat, lon)
self.assertEqual(('Area 51', (37, 14, 6, 'N'), (115, 48, 40, 'W')), place)
def test_tuples_are_good_for_representing_records(self):
locations = [
("Illuminati HQ", (38, 52, 15.56, 'N'), (77, 3, 21.46, 'W')),
("Stargate B", (41, 10, 43.92, 'N'), (1, 49, 34.29, 'W')),
#("Cthulu", (26, 40, 1, 'N'), (70, 45, 7, 'W'))
]
locations.append( ("Cthulu", (26, 40, 1, 'N'), (70, 45, 7, 'W')) )
self.assertEqual("Cthulu", locations[2][0])
self.assertEqual(15.56, locations[0][1][2])
| 35.289855
| 91
| 0.6
|
4a066997e632350253901efe28400028c8604a17
| 1,214
|
py
|
Python
|
eva_storage/src/interface.py
|
jaehobang/Eva
|
e7f649990b8bca3bc29b3832c0ecf32efb402647
|
[
"Apache-2.0"
] | null | null | null |
eva_storage/src/interface.py
|
jaehobang/Eva
|
e7f649990b8bca3bc29b3832c0ecf32efb402647
|
[
"Apache-2.0"
] | null | null | null |
eva_storage/src/interface.py
|
jaehobang/Eva
|
e7f649990b8bca3bc29b3832c0ecf32efb402647
|
[
"Apache-2.0"
] | null | null | null |
"""
This file will serve as an interface of the API for users.
TODO: We need to define the format of the database so that adapters for each dataset can be created
@Jaeho Bang
"""
import abc #abstract class package
class Interface(abc.ABC):
@abc.abstractmethod
def save_video(self, video_name, *options):
"""
:param video_name: name of the video
:param options:
This parameter will need to pack a lot of information, here are examples of what is needed
1. whether the video is compressed or not
2. if not compressed, give me the frames
3. if compressed, give me the location to the file
4. whether there are annotations
5. if annotations, give me the annotations in a pandas table format
:return:
"""
pass
@abc.abstractmethod
def load_video(self, video_name, *options):
"""
:param filename: name of the video
:param options:
1. Whether you want the compressed format
2. Whether you want it in the uncompressed format
3. Whether you want just the frames
4. Whether you want all the annotations (this is going to be a all or none approach)
:return:
"""
pass
| 26.977778
| 100
| 0.667216
|
4a0669a1ab0b57c785c79f200827c8b0eec9617d
| 8,014
|
py
|
Python
|
Tools/scripts/mailerdaemon.py
|
marcosptf/cpython-2.0.1
|
73c739a764e8b1dc84640e73b880bc66e1916bca
|
[
"PSF-2.0"
] | 5
|
2022-03-26T21:53:36.000Z
|
2022-03-30T21:47:20.000Z
|
Tools/scripts/mailerdaemon.py
|
marcosptf/cpython-2.0.1
|
73c739a764e8b1dc84640e73b880bc66e1916bca
|
[
"PSF-2.0"
] | 6
|
2020-11-18T15:48:14.000Z
|
2021-05-03T21:20:50.000Z
|
Tools/scripts/mailerdaemon.py
|
marcosptf/cpython-2.0.1
|
73c739a764e8b1dc84640e73b880bc66e1916bca
|
[
"PSF-2.0"
] | 2
|
2015-07-16T08:14:13.000Z
|
2022-03-27T01:55:17.000Z
|
"""mailerdaemon - classes to parse mailer-daemon messages"""
import string
import rfc822
import calendar
import re
import os
import sys
Unparseable = 'mailerdaemon.Unparseable'
class ErrorMessage(rfc822.Message):
def __init__(self, fp):
rfc822.Message.__init__(self, fp)
self.sub = ''
def is_warning(self):
sub = self.getheader('Subject')
if not sub:
return 0
sub = string.lower(sub)
if sub[:12] == 'waiting mail': return 1
if string.find(sub, 'warning') >= 0: return 1
self.sub = sub
return 0
def get_errors(self):
for p in EMPARSERS:
self.rewindbody()
try:
return p(self.fp, self.sub)
except Unparseable:
pass
raise Unparseable
# List of re's or tuples of re's.
# If a re, it should contain at least a group (?P<email>...) which
# should refer to the email address. The re can also contain a group
# (?P<reason>...) which should refer to the reason (error message).
# If no reason is present, the emparse_list_reason list is used to
# find a reason.
# If a tuple, the tuple should contain 2 re's. The first re finds a
# location, the second re is repeated one or more times to find
# multiple email addresses. The second re is matched (not searched)
# where the previous match ended.
# The re's are compiled using the re module.
emparse_list_list = [
'error: (?P<reason>unresolvable): (?P<email>.+)',
('----- The following addresses had permanent fatal errors -----\n',
'(?P<email>[^ \n].*)\n( .*\n)?'),
'remote execution.*\n.*rmail (?P<email>.+)',
('The following recipients did not receive your message:\n\n',
' +(?P<email>.*)\n(The following recipients did not receive your message:\n\n)?'),
'------- Failure Reasons --------\n\n(?P<reason>.*)\n(?P<email>.*)',
'^<(?P<email>.*)>:\n(?P<reason>.*)',
'^(?P<reason>User mailbox exceeds allowed size): (?P<email>.+)',
'^5\\d{2} <(?P<email>[^\n>]+)>\\.\\.\\. (?P<reason>.+)',
'^Original-Recipient: rfc822;(?P<email>.*)',
'^did not reach the following recipient\\(s\\):\n\n(?P<email>.*) on .*\n +(?P<reason>.*)',
'^ <(?P<email>[^\n>]+)> \\.\\.\\. (?P<reason>.*)',
'^Report on your message to: (?P<email>.*)\nReason: (?P<reason>.*)',
'^Your message was not delivered to +(?P<email>.*)\n +for the following reason:\n +(?P<reason>.*)',
'^ was not +(?P<email>[^ \n].*?) *\n.*\n.*\n.*\n because:.*\n +(?P<reason>[^ \n].*?) *\n',
]
# compile the re's in the list and store them in-place.
for i in range(len(emparse_list_list)):
x = emparse_list_list[i]
if type(x) is type(''):
x = re.compile(x, re.MULTILINE)
else:
xl = []
for x in x:
xl.append(re.compile(x, re.MULTILINE))
x = tuple(xl)
del xl
emparse_list_list[i] = x
del x
del i
# list of re's used to find reasons (error messages).
# if a string, "<>" is replaced by a copy of the email address.
# The expressions are searched for in order. After the first match,
# no more expressions are searched for. So, order is important.
emparse_list_reason = [
r'^5\d{2} <>\.\.\. (?P<reason>.*)',
'<>\.\.\. (?P<reason>.*)',
re.compile(r'^<<< 5\d{2} (?P<reason>.*)', re.MULTILINE),
re.compile('===== stderr was =====\nrmail: (?P<reason>.*)'),
re.compile('^Diagnostic-Code: (?P<reason>.*)', re.MULTILINE),
]
emparse_list_from = re.compile('^From:', re.IGNORECASE|re.MULTILINE)
def emparse_list(fp, sub):
data = fp.read()
res = emparse_list_from.search(data)
if res is None:
from_index = len(data)
else:
from_index = res.start(0)
errors = []
emails = []
reason = None
for regexp in emparse_list_list:
if type(regexp) is type(()):
res = regexp[0].search(data, 0, from_index)
if res is not None:
try:
reason = res.group('reason')
except IndexError:
pass
while 1:
res = regexp[1].match(data, res.end(0), from_index)
if res is None:
break
emails.append(res.group('email'))
break
else:
res = regexp.search(data, 0, from_index)
if res is not None:
emails.append(res.group('email'))
try:
reason = res.group('reason')
except IndexError:
pass
break
if not emails:
raise Unparseable
if not reason:
reason = sub
if reason[:15] == 'returned mail: ':
reason = reason[15:]
for regexp in emparse_list_reason:
if type(regexp) is type(''):
for i in range(len(emails)-1,-1,-1):
email = emails[i]
exp = re.compile(string.join(string.split(regexp, '<>'), re.escape(email)), re.MULTILINE)
res = exp.search(data)
if res is not None:
errors.append(string.join(string.split(string.strip(email)+': '+res.group('reason'))))
del emails[i]
continue
res = regexp.search(data)
if res is not None:
reason = res.group('reason')
break
for email in emails:
errors.append(string.join(string.split(string.strip(email)+': '+reason)))
return errors
EMPARSERS = [emparse_list, ]
def sort_numeric(a, b):
a = string.atoi(a)
b = string.atoi(b)
if a < b: return -1
elif a > b: return 1
else: return 0
def parsedir(dir, modify):
os.chdir(dir)
pat = re.compile('^[0-9]*$')
errordict = {}
errorfirst = {}
errorlast = {}
nok = nwarn = nbad = 0
# find all numeric file names and sort them
files = filter(lambda fn, pat=pat: pat.match(fn) is not None, os.listdir('.'))
files.sort(sort_numeric)
for fn in files:
# Lets try to parse the file.
fp = open(fn)
m = ErrorMessage(fp)
sender = m.getaddr('From')
print '%s\t%-40s\t'%(fn, sender[1]),
if m.is_warning():
fp.close()
print 'warning only'
nwarn = nwarn + 1
if modify:
os.rename(fn, ','+fn)
## os.unlink(fn)
continue
try:
errors = m.get_errors()
except Unparseable:
print '** Not parseable'
nbad = nbad + 1
fp.close()
continue
print len(errors), 'errors'
# Remember them
for e in errors:
try:
mm, dd = m.getdate('date')[1:1+2]
date = '%s %02d' % (calendar.month_abbr[mm], dd)
except:
date = '??????'
if not errordict.has_key(e):
errordict[e] = 1
errorfirst[e] = '%s (%s)' % (fn, date)
else:
errordict[e] = errordict[e] + 1
errorlast[e] = '%s (%s)' % (fn, date)
fp.close()
nok = nok + 1
if modify:
os.rename(fn, ','+fn)
## os.unlink(fn)
print '--------------'
print nok, 'files parsed,',nwarn,'files warning-only,',
print nbad,'files unparseable'
print '--------------'
list = []
for e in errordict.keys():
list.append((errordict[e], errorfirst[e], errorlast[e], e))
list.sort()
for num, first, last, e in list:
print '%d %s - %s\t%s' % (num, first, last, e)
def main():
modify = 0
if len(sys.argv) > 1 and sys.argv[1] == '-d':
modify = 1
del sys.argv[1]
if len(sys.argv) > 1:
for folder in sys.argv[1:]:
parsedir(folder, modify)
else:
parsedir('/ufs/jack/Mail/errorsinbox', modify)
if __name__ == '__main__' or sys.argv[0] == __name__:
main()
| 33.531381
| 110
| 0.523584
|
4a0669acd902c76a77d49a47da8f099411b93b70
| 6,538
|
py
|
Python
|
notebooks/main.py
|
lgblkb/nu_abda
|
59174f17037fdfae870e2bf1fea6a8f70c8c78b8
|
[
"MIT"
] | null | null | null |
notebooks/main.py
|
lgblkb/nu_abda
|
59174f17037fdfae870e2bf1fea6a8f70c8c78b8
|
[
"MIT"
] | 2
|
2021-06-08T21:26:27.000Z
|
2021-09-08T01:58:42.000Z
|
notebooks/main.py
|
lgblkb/nu_abda
|
59174f17037fdfae870e2bf1fea6a8f70c8c78b8
|
[
"MIT"
] | null | null | null |
import more_itertools as mit
import os
from functools import partial
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import wandb
from box import Box
from lgblkb_tools import logger
from lgblkb_tools.visualize import Plotter
from torch import optim
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.utils.data import Dataset, random_split, DataLoader
from models.lgblkb_model import TheModel
from src import data_folder
from src.utils import make_train_step
import imgaug.augmenters as iaa
is_cuda_available = torch.cuda.is_available()
logger.info('is_cuda_available: %s', is_cuda_available)
if not is_cuda_available:
raise SystemError
device = 'cuda' if is_cuda_available else 'cpu'
image_size = (32, 32)
class TheDataset(Dataset):
def __init__(self, x, y):
self.x = x
self.y = y
def __getitem__(self, item):
return self.x[item], self.y[item]
def __len__(self):
return len(self.y)
def create_data():
train_df = pd.read_csv(data_folder['raw']['bda-image-challenge-train.txt'], header=None)
images = train_df.values.reshape((-1, *image_size))
mask_image = np.zeros(image_size)
mask_image[8:24, 8:24] = 1
# data_shape = (-1, np.product(image_size))
x = (images * (1 - mask_image) + mask_image) # .reshape(data_shape)
y = images # .reshape(data_shape)
x = np.expand_dims(x, axis=1)
y = np.expand_dims(y, axis=1)
x_tensor = torch.from_numpy(x).float()
y_tensor = torch.from_numpy(y).float()
data = TheDataset(x_tensor, y_tensor)
return data
def aug_sequencer(images, seed):
return iaa.Sequential(
[iaa.Rot90((0, 3), keep_size=False, seed=seed),
iaa.Fliplr(0.5, seed=seed),
iaa.Flipud(0.5, seed=seed),
# iaa.GaussianBlur(),
],
random_order=True,
seed=seed
)(images=images)
def augment_batch(batch, seed):
batch = aug_sequencer(batch.data.numpy().reshape(-1, *image_size), seed=seed)
batch = np.expand_dims(np.stack(batch), axis=1)
batch = torch.from_numpy(batch)
return batch
@logger.trace()
def train():
torch.manual_seed(369)
dataset = create_data()
train_val_fractions = [0.8, 0.2]
lenghts = [int(np.round(len(dataset) * fraction)) for fraction in train_val_fractions]
train_dataset, val_dataset = random_split(dataset, lenghts)
train_batch_size = int(len(train_dataset) / 5)
logger.info("train_batch_size: %s", train_batch_size)
train_loader = DataLoader(dataset=train_dataset, batch_size=train_batch_size, shuffle=True, pin_memory=True)
val_loader = DataLoader(dataset=val_dataset, batch_size=1, shuffle=True, pin_memory=True)
wandb.init(project="bda_project")
model = TheModel().to(device)
# model.load_state_dict(torch.load(model_state_savepath))
wandb.watch(model)
learning_rate = 1e-3
loss_fn = nn.MSELoss(reduction='sum')
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
scheduler = ReduceLROnPlateau(optimizer, 'min')
train_step = make_train_step(model, loss_fn, optimizer)
for epoch in range(200):
training_losses = list()
for x_batch_init, y_batch_init in train_loader:
# for pair in zip(x_batch, y_batch):
# Plotter(*pair)
# raise NotImplementedError
for batch_idx in range(8):
seed = np.random.randint(0, 100000000)
x_batch = augment_batch(x_batch_init, seed)
y_batch = augment_batch(y_batch_init, seed)
x_batch = x_batch.to(device)
y_batch = y_batch.to(device)
training_loss = train_step(x_batch, y_batch)
training_losses.append(training_loss)
train_loss_average = np.mean(training_losses) / train_batch_size
wandb.log({"Training loss (average)": train_loss_average})
if epoch % 20 == 0:
scheduler.step(train_loss_average)
val_losses = list()
model.eval()
with torch.no_grad():
worst_example = Box()
for x_val, y_val in val_loader:
x_val = x_val.to(device)
y_val = y_val.to(device)
yhat_val = model(x_val)
val_loss = loss_fn(y_val, yhat_val).item()
val_losses.append(val_loss)
if worst_example.get('val_loss', 0) > val_loss: continue
worst_example.x_image = x_val.detach().data.reshape(image_size)
worst_example.y_image = y_val.detach().data.reshape(image_size)
worst_example.yhat_image = yhat_val.detach().data.reshape(image_size)
worst_example.val_loss = val_loss
images = worst_example.x_image, worst_example.yhat_image, worst_example.y_image
wandb.log({f"Epoch {epoch} worst": [wandb.Image(i) for i in images]})
torch.save(model.state_dict(), os.path.join(wandb.run.dir, f'model_epoch_{epoch}.pt'))
model.train()
val_loss_average = np.mean(val_losses)
wandb.log({"Validation Loss": val_loss_average})
# torch.save(model.state_dict(), model_state_savepath)
# plt.plot(losses, label='Training loss')
# plt.plot(val_losses, label='Validation loss')
# plt.legend()
# plt.show()
#
pass
def test():
torch.manual_seed(369)
model = TheModel()
state_dict_path = '/home/lgblkb/PycharmProjects/abda_project/wandb/run-20200426_113911-wxzvb2i8/model.pt'
model.load_state_dict(torch.load(state_dict_path))
dataset = create_data()
train_val_fractions = [0.8, 0.2]
lenghts = [int(np.round(len(dataset) * fraction)) for fraction in train_val_fractions]
train_dataset, val_dataset = random_split(dataset, lenghts)
val_loader = DataLoader(dataset=val_dataset, batch_size=1, shuffle=True)
plotter = Plotter()
for i, (x, y) in enumerate(val_loader):
if i % 5 == 0:
plotter.plot(rows_cols=(5, 3))
plotter = Plotter()
yhat = model(x).data.reshape((32, 32))
x = x.data.reshape((32, 32))
y = y.data.reshape((32, 32))
plotter.add_images(x, y, yhat)
pass
def main():
train()
pass
if __name__ == '__main__':
main()
| 33.187817
| 112
| 0.631233
|
4a0669d1d2b660eb74870dae881f53e1dad326ff
| 791
|
py
|
Python
|
Python/leetcode.098.validate-binary-search-tree.py
|
tedye/leetcode
|
975d7e3b8cb9b6be9e80e07febf4bcf6414acd46
|
[
"MIT"
] | 4
|
2015-10-10T00:30:55.000Z
|
2020-07-27T19:45:54.000Z
|
Python/leetcode.098.validate-binary-search-tree.py
|
tedye/leetcode
|
975d7e3b8cb9b6be9e80e07febf4bcf6414acd46
|
[
"MIT"
] | null | null | null |
Python/leetcode.098.validate-binary-search-tree.py
|
tedye/leetcode
|
975d7e3b8cb9b6be9e80e07febf4bcf6414acd46
|
[
"MIT"
] | null | null | null |
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def isValidBST(self, root):
"""
:type root: TreeNode
:rtype: bool
"""
path = [root]
inorder = []
while path:
cur = path[-1]
if cur:
path.append(cur.left)
else:
path.pop(-1)
if path:
cur = path.pop(-1)
if inorder and inorder[-1] >=cur.val:
return False
else:
inorder.append(cur.val)
path.append(cur.right)
return True
| 27.275862
| 57
| 0.419722
|
4a066a2adb5939368a79752e6ec2f882503e055b
| 5,060
|
py
|
Python
|
tests/unit/anchore_engine/services/policy_engine/engine/policy/gates/test_malware.py
|
ballad86/anchore-engine
|
51f784dbb697586083bce023e2e6a708a25f1797
|
[
"Apache-2.0"
] | 1,484
|
2017-09-11T19:08:42.000Z
|
2022-03-29T07:47:44.000Z
|
tests/unit/anchore_engine/services/policy_engine/engine/policy/gates/test_malware.py
|
ballad86/anchore-engine
|
51f784dbb697586083bce023e2e6a708a25f1797
|
[
"Apache-2.0"
] | 913
|
2017-09-27T20:37:53.000Z
|
2022-03-29T17:21:28.000Z
|
tests/unit/anchore_engine/services/policy_engine/engine/policy/gates/test_malware.py
|
PhoenixRedflash/anchore-engine
|
4192eba02bb91cf0eebebe32e8134b27b06feefe
|
[
"Apache-2.0"
] | 294
|
2017-09-12T16:54:03.000Z
|
2022-03-14T01:28:51.000Z
|
import hashlib
import pytest
from anchore_engine.db.entities.policy_engine import AnalysisArtifact, Image
from anchore_engine.services.policy_engine.engine.policy.gate import ExecutionContext
from anchore_engine.services.policy_engine.engine.policy.gates import malware
image_id = "1"
user = "admin"
digest = "1"
signature = "Unix.Trojan.MSShellcode-40"
signature2 = "Unix.Trojan.SomeMadeupValue-1"
findings = [
{"path": "/elf_payload1", "signature": signature},
{"path": "/home/someuser/file2", "signature": signature},
{"path": "/var/lib/somebadlib/corrupted", "signature": signature2},
]
@pytest.fixture()
def image(monkeypatch):
monkeypatch.setattr(
Image, "analysis_artifacts", MockAnalysisArtifacts(), raising=True
)
img = Image()
img.id = image_id
img.digest = digest
img.user_id = user
return img
class MockAnalysisArtifacts:
def __init__(
self,
):
artifact1 = AnalysisArtifact()
artifact1.analyzer_id = "malware"
artifact1.analyzer_artifact = "malware"
artifact1.artifact_key = "clamav"
artifact1.analyzer_type = "base"
artifact1.image_id = image_id
artifact1.image_user_id = user
artifact1.json_value = {
"name": "clamav",
"findings": [],
"metadata": {"db_version": {"daily": "1", "main": "1", "bytecode": "1"}},
}
self.artifacts = [artifact1]
def __call__(self, *args, **kwargs):
return self.artifacts
def __iter__(self):
return self.artifacts.__iter__()
def filter(self, *args, **kwargs):
a = self.artifacts
class A:
def all(self):
return a
return A()
@pytest.fixture()
def malware_gate():
return malware.MalwareGate()
@pytest.fixture()
def scan_trigger(malware_gate):
trigger = malware.ScanFindingsTrigger(parent_gate_cls=malware_gate.__class__)
return trigger
@pytest.fixture()
def noscan_trigger(malware_gate):
trigger = malware.ScanNotRunTrigger(parent_gate_cls=malware_gate.__class__)
return trigger
@pytest.fixture()
def exec_context():
return ExecutionContext(db_session=None, configuration={})
@pytest.mark.parametrize("finding", findings)
def test_malware_gate_single_finding(
malware_gate, scan_trigger, exec_context, image, finding
):
image.analysis_artifacts()[0].json_value["findings"] = [finding]
malware_gate.prepare_context(image, exec_context)
assert scan_trigger.execute(image, exec_context)
assert scan_trigger.did_fire
assert len(scan_trigger.fired) == 1
assert scan_trigger.fired[0].id == "clamav+" + finding.get("signature") + "+" + str(
hashlib.new(
"md5", bytes(finding.get("path"), "utf-8"), usedforsecurity=False
).hexdigest()
)
def test_malware_gate_multifinding(malware_gate, scan_trigger, exec_context, image):
image.analysis_artifacts()[0].json_value["findings"] = findings
malware_gate.prepare_context(image, exec_context)
assert scan_trigger.execute(image, exec_context)
assert scan_trigger.did_fire
assert len(scan_trigger.fired) == len(findings)
def test_malware_gate_nofinding(malware_gate, scan_trigger, exec_context, image):
image.analysis_artifacts.artifacts = []
malware_gate.prepare_context(image, exec_context)
assert scan_trigger.execute(image, exec_context)
assert scan_trigger.did_fire is False
def test_malware_gate_nofinding_populated_context(
malware_gate, scan_trigger, exec_context, image
):
"""
Tests specific condition (issue-992) where the gate was using context incorrectly and could error out when
no scans present but gate exec context had other keys present
"""
image.analysis_artifacts.artifacts = []
exec_context.data["something"] = ["foobar", "foo"]
malware_gate.prepare_context(image, exec_context)
assert scan_trigger.execute(image, exec_context)
assert scan_trigger.did_fire is False
def test_malware_gate_noscan_trigger(malware_gate, noscan_trigger, exec_context, image):
image.analysis_artifacts.artifacts = []
malware_gate.prepare_context(image, exec_context)
assert noscan_trigger.execute(image, exec_context)
assert noscan_trigger.did_fire is True
def test_malware_gate_noscan_trigger_populated_context(
malware_gate, noscan_trigger, exec_context, image
):
image.analysis_artifacts.artifacts = []
exec_context.data["something"] = ["foobar", "foo"]
malware_gate.prepare_context(image, exec_context)
assert noscan_trigger.execute(image, exec_context)
assert noscan_trigger.did_fire is True
@pytest.mark.parametrize("finding", findings)
def test_malware_gate_noscan_trigger_with_findings(
malware_gate, noscan_trigger, exec_context, image, finding
):
image.analysis_artifacts()[0].json_value["findings"] = [finding]
malware_gate.prepare_context(image, exec_context)
assert noscan_trigger.execute(image, exec_context)
assert noscan_trigger.did_fire is False
| 29.418605
| 110
| 0.718379
|
4a066ad185e79cf8a6db60c82d8f2792b9c78c4e
| 1,788
|
py
|
Python
|
texthero/_helper.py
|
cedricconol/texthero
|
b73ef44911205cdb19b9b60c9d40eba54989c494
|
[
"MIT"
] | null | null | null |
texthero/_helper.py
|
cedricconol/texthero
|
b73ef44911205cdb19b9b60c9d40eba54989c494
|
[
"MIT"
] | null | null | null |
texthero/_helper.py
|
cedricconol/texthero
|
b73ef44911205cdb19b9b60c9d40eba54989c494
|
[
"MIT"
] | null | null | null |
"""
Useful helper functions for the texthero library.
"""
import functools
import warnings
"""
Warnings.
"""
_warning_nans_in_input = (
"There are NaNs (missing values) in the given input series."
" They were replaced with appropriate values before the function"
" was applied. Consider using hero.fillna to replace those NaNs yourself"
" or hero.drop_no_content to remove them."
)
"""
Decorators.
"""
def handle_nans(replace_nans_with):
"""
Decorator to handle NaN values in a function's input.
Using the decorator, if there are NaNs in the input,
they are replaced with replace_nans_with
and a warning is printed.
The function must take as first input a Pandas Series.
Examples
--------
>>> from texthero._helper import handle_nans
>>> import pandas as pd
>>> import numpy as np
>>> @handle_nans(replace_nans_with="I was missing!")
... def replace_b_with_c(s):
... return s.str.replace("b", "c")
>>> s_with_nan = pd.Series(["Test b", np.nan])
>>> replace_b_with_c(s_with_nan)
0 Test c
1 I was missing!
dtype: object
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
# Get first input argument (the series) and replace the NaNs.
s = args[0]
if s.isna().values.any():
warnings.warn(_warning_nans_in_input, UserWarning)
s = s.fillna(value=replace_nans_with)
# Put the series back into the input.
if args[1:]:
args = (s,) + args[1:]
else:
args = (s,)
# Apply function as usual.
return func(*args, **kwargs)
return wrapper
return decorator
| 24.493151
| 77
| 0.600671
|
4a066b0a1f280851e1d4faed80bcd5579d295b41
| 1,310
|
py
|
Python
|
hard-gists/3023497/snippet.py
|
jjhenkel/dockerizeme
|
eaa4fe5366f6b9adf74399eab01c712cacaeb279
|
[
"Apache-2.0"
] | 21
|
2019-07-08T08:26:45.000Z
|
2022-01-24T23:53:25.000Z
|
hard-gists/3023497/snippet.py
|
jjhenkel/dockerizeme
|
eaa4fe5366f6b9adf74399eab01c712cacaeb279
|
[
"Apache-2.0"
] | 5
|
2019-06-15T14:47:47.000Z
|
2022-02-26T05:02:56.000Z
|
hard-gists/3023497/snippet.py
|
jjhenkel/dockerizeme
|
eaa4fe5366f6b9adf74399eab01c712cacaeb279
|
[
"Apache-2.0"
] | 17
|
2019-05-16T03:50:34.000Z
|
2021-01-14T14:35:12.000Z
|
from Crypto.Cipher import AES
from StringIO import StringIO
from bplist import BPlistReader #https://github.com/farcaller/bplist-python
import M2Crypto
import gzip
import struct
def xor_strings(s, key):
res = ""
for i in xrange(len(s)):
res += chr(ord(s[i]) ^ ord(key[i%len(key)]))
return res
def aes_ctr_decrypt(data, key, iv=None, ctr=1):
res = ""
a = AES.new(key)
x = a.encrypt("\x00"*8 + struct.pack(">Q", ctr))
for i in xrange(0,len(data), 16):
res += xor_strings(data[i:i+16], x)
ctr += 1
if len(data[i:i+16]) == 16:
x = a.encrypt("\x00"*8 + struct.pack(">Q", ctr))
return res
#use https://github.com/meeee/pushproxy to intercept
msg = BPlistReader(open("message.plist","rb").read()).parse()
d = gzip.GzipFile("", fileobj=StringIO(msg["P"].data)).read()
l = struct.unpack(">H", d[1:3])[0]
x = d[3:3+l]
#extract "iMessage encryption key" from recipient keychain
pk = M2Crypto.RSA.load_key("recipient_key.txt")
#decrypt session key
z = pk.private_decrypt(x[:160], M2Crypto.RSA.pkcs1_oaep_padding)
aes_key = z[:16]
data = z[16:] + x[160:]
#decrypt message payload
decrypted = aes_ctr_decrypt(data, aes_key)
#double gzip !!!
dec = gzip.GzipFile("", fileobj=StringIO(decrypted)).read()
p = BPlistReader(dec).parse()
print p
| 29.772727
| 75
| 0.651145
|
4a066bee1adf5c0d1ae9a75818fb93b29beb84e9
| 782
|
py
|
Python
|
network/migrations/0002_auto_20200708_2310.py
|
benccalcyxzfi/cs50w-network
|
d6a80acc0734f2ba73a5ca00efbae5b5d5dbfb45
|
[
"Apache-2.0"
] | 4
|
2022-01-29T22:41:10.000Z
|
2022-02-16T13:48:43.000Z
|
network/migrations/0002_auto_20200708_2310.py
|
benccalcyxzfi/cs50w-network
|
d6a80acc0734f2ba73a5ca00efbae5b5d5dbfb45
|
[
"Apache-2.0"
] | 1
|
2022-02-27T12:35:41.000Z
|
2022-02-27T12:35:41.000Z
|
network/migrations/0002_auto_20200708_2310.py
|
benccalcyxzfi/cs50w-network
|
d6a80acc0734f2ba73a5ca00efbae5b5d5dbfb45
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 3.0.8 on 2020-07-09 02:10
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('network', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='follower',
name='follower',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, related_name='follower', to=settings.AUTH_USER_MODEL),
),
migrations.AlterUniqueTogether(
name='follower',
unique_together={('follower', 'following')},
),
migrations.RemoveField(
model_name='follower',
name='user',
),
]
| 26.965517
| 149
| 0.61509
|
4a066c9cdcdc9e16592afe3fae0058e9c528789a
| 842
|
py
|
Python
|
utility/cog/character/ability/list/_3_defend.py
|
DrLarck/DiscordBallZ_
|
c274e26efce4c5a757d258c54bc285d118618751
|
[
"MIT"
] | 4
|
2020-01-19T13:53:43.000Z
|
2020-01-20T13:34:17.000Z
|
utility/cog/character/ability/list/_3_defend.py
|
DrLarck/DiscordBallZ_
|
c274e26efce4c5a757d258c54bc285d118618751
|
[
"MIT"
] | 18
|
2020-01-19T17:52:17.000Z
|
2020-02-17T15:06:13.000Z
|
utility/cog/character/ability/list/_3_defend.py
|
DrLarck/DiscordBallZ_
|
c274e26efce4c5a757d258c54bc285d118618751
|
[
"MIT"
] | 1
|
2020-01-19T16:33:19.000Z
|
2020-01-19T16:33:19.000Z
|
"""
Defend ability
--
Author : DrLarck
Last update : 04/03/20 (DrLarck)
"""
# dependancies
import asyncio
import random
# util
from utility.cog.character.ability.ability import Ability
class Defend_3(Ability):
"""
Represents the defend ability
"""
def __init__(self, client, ctx, caster, target, team_a, team_b):
Ability.__init__(self, client, ctx, caster, target, team_a, team_b)
self.name = "Defend"
self.description = "The unit's posture changes to **Defending**."
self.icon = ":shield:"
self.id = 3
async def set_tooltip(self):
self.tooltip = "The unit's posture changes to **Defending**."
async def use(self):
await self.caster.posture.change_posture("defending")
display = f"__Move__ : :shield:`{self.name}`"
return(display)
| 21.589744
| 75
| 0.640143
|
4a066d1b07120900c17d99f34da0959f93693453
| 6,363
|
py
|
Python
|
config/sentencepiece_model_loc.py
|
project-anuvaad/OpenNMT-py
|
267d097b9e90d59709fe1c26ea8b8e2c43c755c9
|
[
"MIT"
] | null | null | null |
config/sentencepiece_model_loc.py
|
project-anuvaad/OpenNMT-py
|
267d097b9e90d59709fe1c26ea8b8e2c43c755c9
|
[
"MIT"
] | 29
|
2019-07-18T10:21:57.000Z
|
2019-10-24T11:41:59.000Z
|
config/sentencepiece_model_loc.py
|
project-anuvaad/OpenNMT-py
|
267d097b9e90d59709fe1c26ea8b8e2c43c755c9
|
[
"MIT"
] | null | null | null |
english_hindi = {
"ENG_220519": "model/sentencepiece_models/en-220519.model",
"HIN_220519": "model/sentencepiece_models/hi-220519.model",
"ENG_EXP_1": "model/sentencepiece_models/en_exp-1-2019-10-01-15k.model",
"HIN_EXP_1": "model/sentencepiece_models/hi_exp-1-2019-10-01-15k.model",
"ENG_EXP_10": "model/sentencepiece_models/en_exp-10-2019-10-25-24k.model",
"HIN_EXP_10": "model/sentencepiece_models/hi_exp-10-2019-10-25-24k.model",
"ENG_EXP_12": "model/sentencepiece_models/en_exp-12-2019-10-29-24k.model",
"HIN_EXP_12": "model/sentencepiece_models/hi_exp-12-2019-10-29-24k.model",
"ENG_EXP_5.4": "model/sentencepiece_models/en_exp-5.4-2019-10-29-24k.model",
"HIN_EXP_5.4": "model/sentencepiece_models/hi_exp-5.4-2019-10-29-24k.model",
"ENG_EXP_5.6": "model/sentencepiece_models/en_exp-5.6-2019-12-09-24k.model",
"HIN_EXP_5.6": "model/sentencepiece_models/hi_exp-5.6-2019-12-09-24k.model",
"ENG_EXP_13": "model/sentencepiece_models/en_en-hi-exp-13-2020-03-09-24k.model",
"HIN_EXP_13": "model/sentencepiece_models/hi_en-hi-exp-13-2020-03-09-24k.model",
}
english_tamil = {
"ENG_230919": "model/sentencepiece_models/enTa-2019-09-23-10k.model",
"TAM_230919": "model/sentencepiece_models/tamil-2019-09-23-10k.model",
"ENG_090120": "model/sentencepiece_models/enTa-2020-01-09-24k.model",
"TAM_090120": "model/sentencepiece_models/tamil-2020-01-09-24k.model",
"ENG_080220": "model/sentencepiece_models/enTa-eng-tam-2020-02-08-24k.model",
"TAM_080220": "model/sentencepiece_models/tamil-eng-tam-2020-02-08-24k.model",
"ENG_100220": "model/sentencepiece_models/enTa-ta-to-en-2-2020-02-10-24k.model",
"TAM_100220": "model/sentencepiece_models/tamil-ta-to-en-2-2020-02-10-24k.model",
"ENG_280220": "model/sentencepiece_models/enTa-ta-to-en-3-2020-02-28-24k.model",
"TAM_280220": "model/sentencepiece_models/tamil-ta-to-en-3-2020-02-28-24k.model",
"ENG_060320": "model/sentencepiece_models/enTa-ta-en-1.1-2020-03-06-24k.model",
"TAM_060320": "model/sentencepiece_models/tamil-ta-en-1.1-2020-03-06-24k.model",
}
english_gujarati = {
"ENG_100919": "model/sentencepiece_models/en-2019-09-10-10k.model",
"GUJ_100919": "model/sentencepiece_models/guj-2019-09-10-10k.model",
"ENG_140220": "model/sentencepiece_models/enGuj-en-to-guj-2-2020-02-14-24k.model",
"GUJ_140220": "model/sentencepiece_models/gujarati-en-to-guj-2-2020-02-14-24k.model",
}
english_bengali = {
"ENG_120919": "model/sentencepiece_models/en-2019-09-12-10k.model",
"BENG_120919": "model/sentencepiece_models/beng-2019-09-12-10k.model",
"ENG_180220": "model/sentencepiece_models/enBeng-en-to-beng-2-2020-02-18-24k.model",
"BENG_180220": "model/sentencepiece_models/bengali-en-to-beng-2-2020-02-18-24k.model",
"ENG_281220": "model/sentencepiece_models/enBeng-en-to-bn-3.2-2020-12-28-24k.model",
"BENG_281220": "model/sentencepiece_models/bengali-en-to-bn-3.2-2020-12-28-24k.model",
"ENG_281220_2.2": "model/sentencepiece_models/enBeng-bn-to-en-2.2-2020-12-28-24k.model",
"BENG_281220_2.2": "model/sentencepiece_models/bengali-bn-to-en-2.2-2020-12-28-24k.model",
"ENG_EN_to_BN_4": "model/sentencepiece_models/enBeng-en-to-bn-4-2021-01-19-24k.model",
"BENG_EN_to_BN_4": "model/sentencepiece_models/bengali-en-to-bn-4-2021-01-19-24k.model",
"ENG_BN_to_EN_3": "model/sentencepiece_models/enBeng-bn-to-en-3-2021-01-19-24k.model",
"BENG_BN_to_EN_3": "model/sentencepiece_models/bengali-bn-to-en-3-2021-01-19-24k.model"
}
english_marathi = {
"ENG_140919": "model/sentencepiece_models/enMr-2019-09-14-10k.model",
"MARATHI_140919": "model/sentencepiece_models/marathi-2019-09-14-10k.model",
"ENG_071119": "model/sentencepiece_models/enMr_exp-2-2019-11-07-24k.model",
"MARATHI_071119": "model/sentencepiece_models/marathi_exp-2-2019-11-07-24k.model",
"ENG_270120": "model/sentencepiece_models/enMr-mr-en-1.2-2020-01-27-24k.model",
"MARATHI_270120": "model/sentencepiece_models/marathi-mr-en-1.2-2020-01-27-24k.model",
"ENG_060220": "model/sentencepiece_models/enMr-en-mr-3-2020-02-06-24k.model",
"MARATHI_060220": "model/sentencepiece_models/marathi-en-mr-3-2020-02-06-24k.model",
"ENG_280220": "model/sentencepiece_models/enMr-mr-to-en-2-2020-02-28-24k.model",
"MARATHI_280220": "model/sentencepiece_models/marathi-mr-to-en-2-2020-02-28-24k.model",
}
english_kannada = {
"ENG_200919": "model/sentencepiece_models/enKn-2019-09-20-10k.model",
"KANNADA_200919": "model/sentencepiece_models/kannada-2019-09-20-10k.model",
"ENG_100220": "model/sentencepiece_models/enKann-en-to-kn-2-2020-02-10-24k.model",
"KANNADA_100220": "model/sentencepiece_models/kannada-en-to-kn-2-2020-02-10-24k.model",
}
english_telugu = {
"ENG_200919": "model/sentencepiece_models/enTe-2019-09-20-10k.model",
"TELGU_200919": "model/sentencepiece_models/telgu-2019-09-20-10k.model",
"ENG_120220": "model/sentencepiece_models/enTelg-en-to-tel-2-2020-02-12-24k.model",
"TELUGU_120220": "model/sentencepiece_models/telugu-en-to-tel-2-2020-02-12-24k.model",
}
english_malayalam = {
"ENG_200919": "model/sentencepiece_models/enMl-2019-09-20-10k.model",
"MALAYALAM_200919": "model/sentencepiece_models/malayalam-2019-09-20-10k.model",
"ENG_210220": "model/sentencepiece_models/enMalay-en-to-maly-2-2020-02-21-24k.model",
"MALAYALAM_210220": "model/sentencepiece_models/malayalam-en-to-maly-2-2020-02-21-24k.model"
}
english_punjabi = {
"ENG_200919": "model/sentencepiece_models/enPu-2019-09-20-10k.model",
"PUNJABI_200919": "model/sentencepiece_models/punjabi-2019-09-20-10k.model",
"ENG_160220": "model/sentencepiece_models/enPun-en-to-pun-2-2020-02-16-24k.model",
"PUNJABI_160220": "model/sentencepiece_models/punjabi-en-to-pun-2-2020-02-16-24k.model"
}
hindi_english = {
"HINDI_280619": "model/sentencepiece_models/hi-28062019-10k.model",
"ENGLISH_280619": "model/sentencepiece_models/en-28062019-10k.model",
"HIN_EXP_1_291019":"model/sentencepiece_models/hi_exp_h-1-2019-10-29-24k.model",
"ENG_EXP_1_291019":"model/sentencepiece_models/en_exp_h-1-2019-10-29-24k.model",
"HIN_EXP_2_050520":"model/sentencepiece_models/hi_hi-en-exp-2-2020-05-05-24k.model",
"ENG_EXP_2_050520":"model/sentencepiece_models/en_hi-en-exp-2-2020-05-05-24k.model",
}
| 60.6
| 96
| 0.741788
|
4a066df761c4407e8cb6743a74cfe3e4dc6d8033
| 5,099
|
py
|
Python
|
test/test_statistic.py
|
clebsonpy/HydroComp
|
9d17fa533e8a15c760030df5246ff531ddb4cb22
|
[
"MIT"
] | 4
|
2020-05-14T20:03:49.000Z
|
2020-05-22T19:56:43.000Z
|
test/test_statistic.py
|
clebsonpy/HydroComp
|
9d17fa533e8a15c760030df5246ff531ddb4cb22
|
[
"MIT"
] | 19
|
2019-06-27T18:12:27.000Z
|
2020-04-28T13:28:03.000Z
|
test/test_statistic.py
|
clebsonpy/HydroComp
|
9d17fa533e8a15c760030df5246ff531ddb4cb22
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from hidrocomp.statistic.genextre import Gev
from hidrocomp.statistic.genpareto import Gpa
class TestGev(TestCase):
data = [1347, 857, 1626, 977, 1065, 997, 502, 1663, 992,
1487, 1041, 2251, 1110, 1553, 1090, 1268, 1113, 1358, 402]
dist = Gev(data=data)
def test_dist(self):
name = 'GEV'
self.assertEquals(self.dist.name, name, 'Name: GEV')
def test_mml(self):
mml = (0.14684253029124203, 1023.9891165624797, 380.3053838205217)
self.assertEquals([self.dist.mml(), self.dist.estimador], [mml, 'MML'], 'Fit_MML: %s, %s, %s' % mml)
def test_mvs(self):
mvs = (-5.83785197466355, 403.3270953313672, 7.747500635081945)
self.assertEquals([self.dist.mvs(), self.dist.estimador], [mvs, 'MVS'], 'Fit_MVS: %s, %s, %s' % mvs)
def test_prob(self):
prob_mml = 0.7781690064347855
prob_mvs = 0.7287813740394129
self.dist.mml()
self.assertEquals(self.dist.probs(1500), prob_mml, 'Prob: %s' % prob_mml)
self.dist.mvs()
self.assertEquals(self.dist.probs(1500), prob_mvs, 'Prob: %s' % prob_mvs)
def test_value(self):
value_mml = 1456.9948303470273
value_mvs = 2314.9143444142505
self.dist.mml()
self.assertEquals(self.dist.values(0.75), value_mml, 'Value: %s' % value_mml)
self.dist.mvs()
self.assertEquals(self.dist.values(0.75), value_mvs, 'Value: %s' % value_mvs)
def test_values(self):
value_mvs = [2314.9143444142505, 413.27574336098405]
value_mml = [1456.9948303470273, 1159.6914703217076]
self.dist.mml()
self.assertEquals(self.dist.values([0.75, 0.5]), value_mml, 'Value: %s' % value_mml)
self.dist.mvs()
self.assertEquals(self.dist.values([0.75, 0.5]), value_mvs, 'Value: %s' % value_mvs)
def test_probs(self):
prob_mml = [0.7781690064347855, 0.34479635611222237]
prob_mvs = [0.7287813740394129, 0.7039216570017871]
self.dist.mml()
self.assertEquals(self.dist.probs([1500, 1000]), prob_mml, 'Prob: %s' % prob_mml)
self.dist.mvs()
self.assertEquals(self.dist.probs([1500, 1000]), prob_mvs, 'Prob: %s' % prob_mvs)
def test_interval(self):
ic_mvs = (402.00217396627875, 45018159.2649536)
ic_mml = (571.2282612439494, 1939.4616813678326)
self.dist.mml()
self.assertEquals(self.dist.interval(0.9), ic_mml, 'Value: (%s, %s)' % ic_mml)
self.dist.mvs()
self.assertEquals(self.dist.interval(0.9), ic_mvs, 'Value: (%s, %s)' % ic_mvs)
class TestGpa(TestCase):
data = [1347, 857, 1626, 977, 1065, 997, 502, 1663, 992,
1487, 1041, 2251, 1110, 1553, 1090, 1268, 1113, 1358, 402]
dist = Gpa(data=data)
def test_dist(self):
name = 'GPA'
self.assertEquals(self.dist.name, name, 'Name: GPA')
def test_mml(self):
mml = (-0.7072859839251329, 560.8626486522879, 1082.1146688970641)
self.assertEquals([self.dist.mml(), self.dist.estimador], [mml, 'MML'], 'Fit_MML: %s, %s, %s' % mml)
def test_mvs(self):
mvs = (-1.1982244351093645, -6.282925274294001, 2704.731558018805)
self.assertEquals([self.dist.mvs(), self.dist.estimador], [mvs, 'MVS'], 'Fit_MVS: %s, %s, %s' % mvs)
def test_prob(self):
prob_mml = 0.7395295673854643
prob_mvs = 0.6008635213747953
self.dist.mml()
self.assertEquals(self.dist.probs(1500), prob_mml, 'Prob: %s' % prob_mml)
self.dist.mvs()
self.assertEquals(self.dist.probs(1500), prob_mvs, 'Prob: %s' % prob_mvs)
def test_value(self):
value_mml = 1516.8984252482405
value_mvs = 1822.2708593345496
self.dist.mml()
self.assertEquals(self.dist.values(0.75), value_mml, 'Value: %s' % value_mml)
self.dist.mvs()
self.assertEquals(self.dist.values(0.75), value_mvs, 'Value: %s' % value_mvs)
def test_values(self):
value_mvs = [1822.2708593345496, 1267.2505558875055]
value_mml = [1516.8984252482405, 1153.7636259098695]
self.dist.mml()
self.assertEquals(self.dist.values([0.75, 0.5]), value_mml, 'Value: %s' % value_mml)
self.dist.mvs()
self.assertEquals(self.dist.values([0.75, 0.5]), value_mvs, 'Value: %s' % value_mvs)
def test_probs(self):
prob_mml = [0.7395295673854643, 0.3801780483231015]
prob_mvs = [0.6008635213747953, 0.3889507274477421]
self.dist.mml()
self.assertEquals(self.dist.probs([1500, 1000]), prob_mml, 'Prob: %s' % prob_mml)
self.dist.mvs()
self.assertEquals(self.dist.probs([1500, 1000]), prob_mvs, 'Prob: %s' % prob_mvs)
def test_interval(self):
ic_mvs = (128.27430967073587, 2188.675318921949)
ic_mml = (615.3731031559984, 1906.9600907224512)
self.dist.mml()
self.assertEquals(self.dist.interval(0.9), ic_mml, 'Value: (%s, %s)' % ic_mml)
self.dist.mvs()
self.assertEquals(self.dist.interval(0.9), ic_mvs, 'Value: (%s, %s)' % ic_mvs)
| 42.140496
| 108
| 0.625221
|
4a066f21b29b45baed8164e97a08d684cc33d721
| 3,683
|
py
|
Python
|
src/backend/common/consts/ranking_sort_orders.py
|
ofekashery/the-blue-alliance
|
df0e47d054161fe742ac6198a6684247d0713279
|
[
"MIT"
] | 266
|
2015-01-04T00:10:48.000Z
|
2022-03-28T18:42:05.000Z
|
src/backend/common/consts/ranking_sort_orders.py
|
ofekashery/the-blue-alliance
|
df0e47d054161fe742ac6198a6684247d0713279
|
[
"MIT"
] | 2,673
|
2015-01-01T20:14:33.000Z
|
2022-03-31T18:17:16.000Z
|
src/backend/common/consts/ranking_sort_orders.py
|
ofekashery/the-blue-alliance
|
df0e47d054161fe742ac6198a6684247d0713279
|
[
"MIT"
] | 230
|
2015-01-04T00:10:48.000Z
|
2022-03-26T18:12:04.000Z
|
from typing import Dict, List
from backend.common.models.ranking_sort_order_info import RankingSortOrderInfo
SORT_ORDER_INFO: Dict[int, List[RankingSortOrderInfo]] = {
2021: [
{"name": "Overall Score", "precision": 2},
{"name": "Galactic Search", "precision": 2},
{"name": "Auto-Nav", "precision": 2},
{"name": "Hyperdrive", "precision": 2},
{"name": "Interstellar Accuracy", "precision": 2},
{"name": "Power Port", "precision": 2},
],
2020: [
{"name": "Ranking Score", "precision": 2},
{"name": "Auto", "precision": 0},
{"name": "End Game", "precision": 0},
{"name": "Teleop Cell + CPanel", "precision": 0},
],
2019: [
{"name": "Ranking Score", "precision": 2},
{"name": "Cargo", "precision": 0},
{"name": "Hatch Panel", "precision": 0},
{"name": "HAB Climb", "precision": 0},
{"name": "Sandstorm Bonus", "precision": 0},
],
2018: [
{"name": "Ranking Score", "precision": 2},
{"name": "Park/Climb Points", "precision": 0},
{"name": "Auto", "precision": 0},
{"name": "Ownership", "precision": 0},
{"name": "Vault", "precision": 0},
],
2017: [
{"name": "Ranking Score", "precision": 2},
{"name": "Match Points", "precision": 0},
{"name": "Auto", "precision": 0},
{"name": "Rotor", "precision": 0},
{"name": "Touchpad", "precision": 0},
{"name": "Pressure", "precision": 0},
],
2016: [
{"name": "Ranking Score", "precision": 0},
{"name": "Auto", "precision": 0},
{"name": "Scale/Challenge", "precision": 0},
{"name": "Goals", "precision": 0},
{"name": "Defense", "precision": 0},
],
2015: [
{"name": "Qual Avg.", "precision": 1},
{"name": "Coopertition", "precision": 0},
{"name": "Auto", "precision": 0},
{"name": "Container", "precision": 0},
{"name": "Tote", "precision": 0},
{"name": "Litter", "precision": 0},
],
2014: [
{"name": "Qual Score", "precision": 0},
{"name": "Assist", "precision": 0},
{"name": "Auto", "precision": 0},
{"name": "Truss & Catch", "precision": 0},
{"name": "Teleop", "precision": 0},
],
2013: [
{"name": "Qual Score", "precision": 0},
{"name": "Auto", "precision": 0},
{"name": "Climb", "precision": 0},
{"name": "Teleop", "precision": 0},
],
2012: [
{"name": "Qual Score", "precision": 0},
{"name": "Hybrid", "precision": 0},
{"name": "Bridge", "precision": 0},
{"name": "Teleop", "precision": 0},
],
2011: [
{"name": "Qual Score", "precision": 0},
{"name": "Ranking Score", "precision": 2},
],
2010: [
{"name": "Seeding Score", "precision": 0},
{"name": "Coopertition Bonus", "precision": 0},
{"name": "Hanging Points", "precision": 0},
],
2009: [
{"name": "Qual Score", "precision": 0},
{"name": "Seeding Score", "precision": 2},
{"name": "Match Points", "precision": 0},
],
2008: [
{"name": "Qual Score", "precision": 0},
{"name": "Seeding Score", "precision": 2},
{"name": "Match Points", "precision": 0},
],
2007: [
{"name": "Qual Score", "precision": 0},
{"name": "Seeding Score", "precision": 2},
{"name": "Match Points", "precision": 0},
],
2006: [
{"name": "Qual Score", "precision": 0},
{"name": "Seeding Score", "precision": 2},
{"name": "Match Points", "precision": 0},
],
}
| 34.745283
| 78
| 0.477328
|
4a066f9fdd74b01214faec0a64dfd89adfcf83b2
| 397
|
py
|
Python
|
projectGW2/projectGW2/wsgi.py
|
cs-fullstack-2019-fall/django-models-cw-chrisawill
|
e0672d8b71cd58bc5141612185a51c802306acd5
|
[
"Apache-2.0"
] | null | null | null |
projectGW2/projectGW2/wsgi.py
|
cs-fullstack-2019-fall/django-models-cw-chrisawill
|
e0672d8b71cd58bc5141612185a51c802306acd5
|
[
"Apache-2.0"
] | null | null | null |
projectGW2/projectGW2/wsgi.py
|
cs-fullstack-2019-fall/django-models-cw-chrisawill
|
e0672d8b71cd58bc5141612185a51c802306acd5
|
[
"Apache-2.0"
] | null | null | null |
"""
WSGI config for projectGW2 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/dev/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'projectGW2.settings')
application = get_wsgi_application()
| 23.352941
| 78
| 0.790932
|
4a066fa1a941cfe2554d1acfaafc12693189bad1
| 27,257
|
py
|
Python
|
PyNite/FEModel3D.py
|
JBloss1517/PyNite
|
b0cf8fa503f49a35337ec48699d16da78e7b7e52
|
[
"MIT"
] | null | null | null |
PyNite/FEModel3D.py
|
JBloss1517/PyNite
|
b0cf8fa503f49a35337ec48699d16da78e7b7e52
|
[
"MIT"
] | null | null | null |
PyNite/FEModel3D.py
|
JBloss1517/PyNite
|
b0cf8fa503f49a35337ec48699d16da78e7b7e52
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 9 21:11:20 2017
@author: D. Craig Brinck, SE
"""
# %%
from numpy import zeros, delete, insert, matmul, subtract
from numpy.linalg import inv, matrix_rank
from PyNite.Node3D import Node3D
from PyNite.Member3D import Member3D
# %%
class FEModel3D():
"""
A class representing a 3D finite element model.
"""
#%%
def __init__(self):
"""
Initializes a new 3D finite element model.
"""
self.Nodes = [] # A list of the structure's nodes
self.Members = [] # A list of the structure's members
self.__D = [] # A list of the structure's nodal displacements
#%%
def AddNode(self, Name, X, Y, Z):
"""
Adds a new node to the model.
Parameters
----------
Name : string
A unique user-defined name for the node.
X : number
The global X-coordinate of the node.
Y : number
The global Y-coordinate of the node.
Z : number
The global Z-coordinate of the node.
"""
# Create a new node
newNode = Node3D(Name, X, Y, Z)
# Add the new node to the list
self.Nodes.append(newNode)
#%%
def AddMember(self, Name, iNode, jNode, E, G, Iy, Iz, J, A):
"""
Adds a new member to the model.
Parameters
----------
Name : string
A unique user-defined name for the member.
iNode : string
The name of the i-node (start node).
jNode : string
The name of the j-node (end node).
E : number
The modulus of elasticity of the member.
G : number
The shear modulus of the member.
Iy : number
The moment of inertia of the member about its local y-axis.
Iz : number
The moment of inertia of the member about its local z-axis.
J : number
The polar moment of inertia of the member.
A : number
The cross-sectional area of the member.
"""
# Create a new member
newMember = Member3D(Name, self.GetNode(iNode), self.GetNode(jNode), E, G, Iy, Iz, J, A)
# Add the new member to the list
self.Members.append(newMember)
#%%
def RemoveNode(self, Node):
"""
Removes a node from the model. All nodal loads associated with the
node and members attached to the node will also be removed.
Parameters
----------
Node : string
The name of the node to be removed.
"""
# Remove the node. Nodal loads are stored within the node, so they
# will be deleted automatically when the node is deleted.
self.Nodes.remove(self.GetNode(Node))
# Find any members attached to the node and remove them
self.Members = [member for member in self.Members if member.iNode.Name != Node and member.jNode.Name != Node]
#%%
def RemoveMember(self, Member):
"""
Removes a member from the model. All member loads associated with the
member will also be removed.
Parameters
----------
Member : string
The name of the member to be removed.
"""
# Remove the member. Member loads are stored within the member, so they
# will be deleted automatically when the member is deleted.
self.Members.remove(self.GetMember(Member))
#%%
def DefineSupport(self, Node, SupportDX = False, SupportDY = False, SupportDZ = False, SupportRX = False, SupportRY = False, SupportRZ = False):
"""
Defines the support conditions at a node.
Nodes will default to fully unsupported unless specified otherwise.
Parameters
----------
Node : string
The name of the node where the support is being defined
SupportDX : boolean
Indicates whether the node is supported against translation in the global X-direction.
SupportDY : boolean
Indicates whether the node is supported against translation in the global Y-direction.
SupportDZ : boolean
Indicates whether the node is supported against translation in the global Z-direction.
SupportRX : boolean
Indicates whether the node is supported against rotation about the global X-axis.
SupportRY : boolean
Indicates whether the node is supported against rotation about the global Y-axis.
SupportRZ : boolean
Indicates whether the node is supported against rotation about the global Z-axis.
"""
# Get the node to be supported
node = self.GetNode(Node)
# Set the node's supports
node.SupportDX = SupportDX
node.SupportDY = SupportDY
node.SupportDZ = SupportDZ
node.SupportRX = SupportRX
node.SupportRY = SupportRY
node.SupportRZ = SupportRZ
#%%
def DefineReleases(self, Member, Dxi = False, Dyi = False, Dzi = False, Rxi = False, Ryi = False, Rzi = False, Dxj = False, Dyj = False, Dzj = False, Rxj = False, Ryj = False, Rzj = False):
"""
Defines member end releases.
All member end releases will default to unreleased unless specified otherwise.
Parameters
----------
Member : string
The name of the member to have its releases modified.
Dxi : boolean
Indicates whether the member is released axially at its start.
Dyi : boolean
Indicates whether the member is released for shear in the local y-axis at its start.
Dzi : boolean
Indicates whether the member is released for shear in the local z-axis at its start.
Rxi : boolean
Indicates whether the member is released for torsion at its start.
Ryi : boolean
Indicates whether the member is released for moment about the local y-axis at its start.
Rzi : boolean
Indicates whether the member is released for moment about the local z-axis at its start.
Dxj : boolean
Indicates whether the member is released axially at its end.
Dyj : boolean
Indicates whether the member is released for shear in the local y-axis at its end.
Dzj : boolean
Indicates whether the member is released for shear in the local z-axis.
Rxj : boolean
Indicates whether the member is released for torsion at its end.
Ryj : boolean
Indicates whether the member is released for moment about the local y-axis at its end.
Rzj : boolean
Indicates whether the member is released for moment about the local z-axis at its end.
"""
# Apply the end releases to the member
self.GetMember(Member).Releases = [Dxi, Dyi, Dzi, Rxi, Ryi, Rzi, Dxj, Dyj, Dzj, Rxj, Ryj, Rzj]
#%%
def AddNodeLoad(self, Node, Direction, P):
"""
Adds a nodal load to the model.
Parameters
----------
Node : string
The name of the node where the load is being applied.
Direction : {'FX', 'FY', 'FZ', 'MX', 'MY', 'MZ'}
The global direction the load is being applied in. Forces are 'FX', 'FY', and 'FZ'. Moments are 'MX', 'MY', and 'MZ'.
P : number
The numeric value (magnitude) of the load.
"""
# Add the node load to the model
self.GetNode(Node).NodeLoads.append((Direction, P))
#%%
def AddMemberPtLoad(self, Member, Direction, P, x):
"""
Adds a member point load to the model.
Parameters
----------
Member : string
The name of the member the load is being applied to.
Direction : {'Fx', 'Fy', 'Fz', 'My', 'Mz'}
The direction in which the force is to be applied. Note that
typical beam sign convention is used. Transverse forces acting
toward the beam are positive. Moments are positive if they act
counter-clockwise relative to the beam's local coordinate system.
Torsional point loads are not supported at this time.
P : number
The numeric value (magnitude) of the load.
x : number
The load's location along the member's local x-axis.
"""
# Add the point load to the member
self.GetMember(Member).PtLoads.append((Direction, P, x))
#%%
def AddMemberDistLoad(self, Member, Direction, w1, w2, x1=None, x2=None):
"""
Adds a member distributed load to the model.
Parameters
----------
Member : string
The name of the member the load is being appied to
Direction : {'Fx', 'Fy', 'Fz'}
The direction in which the load is to be applied. Note that
typical beam sign convention is used. Forces acting toward the beam
are positive.
w1 : number
The starting value (magnitude) of the load.
w2 : number
The ending value (magnitude) of the load.
x1 : number
The load's start location along the member's local x-axis. If this argument
is not specified, the start of the member will be used.
x2 : number
The load's end location along the member's local x-axis. If this argument
is not specified, the end of the member will be used.
"""
# Determine if a starting and ending points for the load have been specified.
# If not, use the member start and end as defaults
if x1 == None:
start = 0
else:
start = x1
if x2 == None:
end = self.GetMember(Member).L
else:
end = x2
# Add the distributed load to the member
self.GetMember(Member).DistLoads.append((Direction, w1, w2, start, end))
#%%
def GetNode(self, Name):
"""
Returns the node with the given name.
Parameters
----------
Name : string
The name of the node to be returned.
"""
# Step through each node in the 'Nodes' list
for node in self.Nodes:
# Check the name of the node
if node.Name == Name:
# Return the node of interest
return node
#%%
def GetMember(self, Name):
"""
Returns the member with the given name.
Parameters
----------
Name : string
The name of the member to be returned.
"""
# Step through each member in the 'Members' list
for member in self.Members:
# Check the name of the member
if member.Name == Name:
# Return the member of interest
return member
#%%
def __Renumber(self):
"""
Assigns node and member ID numbers to be used internally by the
program. Numbers are assigned according to the order nodes and members
were added to the model.
"""
# Number each node in the model
i = 0
for node in self.Nodes:
node.ID = i
i += 1
# Number each member in the model
i = 0
for member in self.Members:
member.ID = i
i += 1
#%%
def K(self, Renumber=False):
"""
Assembles and returns the global stiffness matrix.
Parameters
----------
Renumber : boolean
Indicates whether nodes and members should be renumbered prior to
calculating the stiffness matrix. This may be necessary if a model
is being solved for the first time, or if it has been changed since
the last run, potentially creating a gap in the numbering.
"""
# Renumber the nodes and members in the model if requested
if Renumber == True:
self.__Renumber()
# Initialize a zero matrix to hold all the stiffness terms
K = zeros((len(self.Nodes) * 6, len(self.Nodes) * 6))
# Add stiffness terms for each member in the model
for member in self.Members:
# Step through each term in the member's stiffness matrix
# 'a' & 'b' below are row/column indices in the member's stiffness matrix
# 'm' & 'n' are corresponding row/column indices in the global stiffness matrix
for a in range(12):
# Determine if index 'a' is related to the i-node or j-node
if a < 6:
# Find the corresponding index 'm' in the global stiffness matrix
m = member.iNode.ID * 6 + a
else:
# Find the corresponding index 'm' in the global stiffness matrix
m = member.jNode.ID * 6 + (a - 6)
for b in range(12):
# Determine if index 'b' is related to the i-node or j-node
if b < 6:
# Find the corresponding index 'n' in the global stiffness matrix
n = member.iNode.ID * 6 + b
else:
# Find the corresponding index 'n' in the global stiffness matrix
n = member.jNode.ID * 6 + (b - 6)
# Now that 'm' and 'n' are known, place the term in the global stiffness matrix
K.itemset((m, n), K.item((m, n)) + member.K().item((a, b)))
# Return the global stiffness matrix
return K
#%%
def FER(self, Renumber=False):
"""
Assembles and returns the global fixed end reaction vector.
Parameters
----------
Renumber : boolean
Indicates whether nodes and members should be renumbered prior to
calculating the fixed end reaction vector. This may be necessary if
a model is being solved for the first time, or if it has been
changed since the last run, potentially creating a gap in the
numbering.
"""
# Renumber the nodes and members in the model if requested
if Renumber == True:
self.__Renumber()
# Initialize a zero vector to hold all the terms
FER = zeros((len(self.Nodes) * 6, 1))
# Add terms for each member in the model
for member in self.Members:
# Step through each term in the member's fixed end reaction vector
# 'a' below is the row index in the member's fixed end reaction vector
# 'm' below is the corresponding row index in the global fixed end reaction vector
for a in range(12):
# Determine if index 'a' is related to the i-node or j-node
if a < 6:
# Find the corresponding index 'm' in the global fixed end reaction vector
m = member.iNode.ID * 6 + a
else:
# Find the corresponding index 'm' in the global fixed end reaction vector
m = member.jNode.ID * 6 + (a - 6)
# Now that 'm' is known, place the term in the global fixed end reaction vector
FER.itemset((m, 0), FER[m, 0] + member.FER()[a, 0])
# Return the global fixed end reaction vector
return FER
#%%
def P(self, Renumber=False):
"""
Assembles and returns the global nodal force vector.
Parameters
----------
Renumber : boolean
Indicates whether nodes and members should be renumbered prior to
calculating the fixed end reaction vector. This may be necessary if
a model is being solved for the first time, or if it has been
changed since the last run, potentially creating a gap in the
numbering.
"""
# Renumber the nodes and members in the model if requested
if Renumber == True:
self.__Renumber()
# Initialize a zero vector to hold all the terms
Pvector = zeros((len(self.Nodes)*6, 1))
# Add terms for each node in the model
for node in self.Nodes:
# Get the node's ID
ID = node.ID
# Add the node's loads to the global nodal load vector
for load in node.NodeLoads:
if load[0] == 'FX':
Pvector.itemset((ID*6 + 0, 0), Pvector[ID*6 + 0, 0] + load[1])
elif load[0] == 'FY':
Pvector.itemset((ID*6 + 1, 0), Pvector[ID*6 + 1, 0] + load[1])
elif load[0] == 'FZ':
Pvector.itemset((ID*6 + 2, 0), Pvector[ID*6 + 2, 0] + load[1])
elif load[0] == 'MX':
Pvector.itemset((ID*6 + 3, 0), Pvector[ID*6 + 3, 0] + load[1])
elif load[0] == 'MY':
Pvector.itemset((ID*6 + 4, 0), Pvector[ID*6 + 4, 0] + load[1])
elif load[0] == 'MZ':
Pvector.itemset((ID*6 + 5, 0), Pvector[ID*6 + 5, 0] + load[1])
# Return the global nodal force vector
return Pvector
#%%
def D(self):
"""
Returns the global displacement vector for the model.
"""
# Return the global displacement vector
return self.__D
#%%
def Analyze(self, check_statics=True):
"""
Analyzes the model.
"""
# Get the global stiffness matrix and renumber the nodes & members
# in the process of creating it
K = self.K(True)
# Get the global fixed end reaction vector
FER = self.FER(False)
# Get the global nodal force vector
P = self.P(False)
# Eliminate supported degrees of freedom from each of the matrices/vectors
# Work backwards through the node list so that the relationship between
# the DOF's and node ID's is unnafected by the matrices/vectors
# shrinking
for node in reversed(self.Nodes):
if node.SupportRZ == True:
K = delete(K, node.ID * 6 + 5, axis = 0)
K = delete(K, node.ID * 6 + 5, axis = 1)
FER = delete(FER, node.ID * 6 + 5, axis = 0)
P = delete(P, node.ID * 6 + 5, axis = 0)
if node.SupportRY == True:
K = delete(K, node.ID * 6 + 4, axis = 0)
K = delete(K, node.ID * 6 + 4, axis = 1)
FER = delete(FER, node.ID * 6 + 4, axis = 0)
P = delete(P, node.ID * 6 + 4, axis = 0)
if node.SupportRX == True:
K = delete(K, node.ID * 6 + 3, axis = 0)
K = delete(K, node.ID * 6 + 3, axis = 1)
FER = delete(FER, node.ID * 6 + 3, axis = 0)
P = delete(P, node.ID * 6 + 3, axis = 0)
if node.SupportDZ == True:
K = delete(K, node.ID * 6 + 2, axis = 0)
K = delete(K, node.ID * 6 + 2, axis = 1)
FER = delete(FER, node.ID * 6 + 2, axis = 0)
P = delete(P, node.ID * 6 + 2, axis = 0)
if node.SupportDY == True:
K = delete(K, node.ID * 6 + 1, axis = 0)
K = delete(K, node.ID * 6 + 1, axis = 1)
FER = delete(FER, node.ID * 6 + 1, axis = 0)
P = delete(P, node.ID * 6 + 1, axis = 0)
if node.SupportDX == True:
K = delete(K, node.ID * 6 + 0, axis = 0)
K = delete(K, node.ID * 6 + 0, axis = 1)
FER = delete(FER, node.ID * 6 + 0, axis = 0)
P = delete(P, node.ID * 6 + 0, axis = 0)
# Determine if 'K' is singular
if matrix_rank(K) < min(K.shape):
# Return out of the method if 'K' is singular and provide an error message
print('The stiffness matrix is singular, which implies rigid body motion. The structure is unstable. Aborting analysis.')
return
else:
# Calculate the global displacement vector
self.__D = matmul(inv(K), subtract(P, FER))
# Save the displacements as a local variable for easier reference below
D = self.__D
# Expand the global displacement vector to include supported degrees of freedom
# Work forwards through the node list so that the relationship between
# the DOF's and node ID's is unnafected by the vector expanding
for node in self.Nodes:
if node.SupportDX == True:
D = insert(D, node.ID * 6 + 0, 0, axis = 0)
if node.SupportDY == True:
D = insert(D, node.ID * 6 + 1, 0, axis = 0)
if node.SupportDZ == True:
D = insert(D, node.ID * 6 + 2, 0, axis = 0)
if node.SupportRX == True:
D = insert(D, node.ID * 6 + 3, 0, axis = 0)
if node.SupportRY == True:
D = insert(D, node.ID * 6 + 4, 0, axis = 0)
if node.SupportRZ == True:
D = insert(D, node.ID * 6 + 5, 0, axis = 0)
# Store the calculated global nodal displacements into each node
for node in self.Nodes:
node.DX = D.item((node.ID * 6 + 0, 0))
node.DY = D.item((node.ID * 6 + 1, 0))
node.DZ = D.item((node.ID * 6 + 2, 0))
node.RX = D.item((node.ID * 6 + 3, 0))
node.RY = D.item((node.ID * 6 + 4, 0))
node.RZ = D.item((node.ID * 6 + 5, 0))
# Calculate and store the reactions at each node
for node in self.Nodes:
# Sum the member end forces at the node
for member in self.Members:
if member.iNode == node:
node.RxnFX += member.F()[0, 0]
node.RxnFY += member.F()[1, 0]
node.RxnFZ += member.F()[2, 0]
node.RxnMX += member.F()[3, 0]
node.RxnMY += member.F()[4, 0]
node.RxnMZ += member.F()[5, 0]
elif member.jNode == node:
node.RxnFX += member.F()[6, 0]
node.RxnFY += member.F()[7, 0]
node.RxnFZ += member.F()[8, 0]
node.RxnMX += member.F()[9, 0]
node.RxnMY += member.F()[10, 0]
node.RxnMZ += member.F()[11, 0]
# Sum the joint forces at the node
for load in node.NodeLoads:
if load[0] == "FX":
node.RxnFX -= load[1]
elif load[0] == "FY":
node.RxnFY -= load[1]
elif load[0] == "FZ":
node.RxnFZ -= load[1]
elif load[0] == "MX":
node.RxnMX -= load[1]
elif load[0] == "MY":
node.RxnMY -= load[1]
elif load[0] == "MZ":
node.RxnMZ -= load[1]
# Segment all members in the model to make member results available
for member in self.Members:
member.SegmentMember()
# Check statics if requested
if check_statics == True:
self.__CheckStatics()
#%%
def __CheckStatics(self):
# Initialize force summations to zero
SumFX = 0
SumFY = 0
SumFZ = 0
SumMX = 0
SumMY = 0
SumMZ = 0
SumRFX = 0
SumRFY = 0
SumRFZ = 0
SumRMX = 0
SumRMY = 0
SumRMZ = 0
# Get the global force vector and the global fixed end reaction vector
P = self.P(False)
FER = self.FER()
# Step through each node and sum its forces
for node in self.Nodes:
# Get the node's coordinates
X = node.X
Y = node.Y
Z = node.Z
# Get the nodal forces
FX = P[node.ID*6+0][0] - FER[node.ID*6+0][0]
FY = P[node.ID*6+1][0] - FER[node.ID*6+1][0]
FZ = P[node.ID*6+2][0] - FER[node.ID*6+2][0]
MX = P[node.ID*6+3][0] - FER[node.ID*6+3][0]
MY = P[node.ID*6+4][0] - FER[node.ID*6+4][0]
MZ = P[node.ID*6+5][0] - FER[node.ID*6+5][0]
# Get the nodal reactions
RFX = node.RxnFX
RFY = node.RxnFY
RFZ = node.RxnFZ
RMX = node.RxnMX
RMY = node.RxnMY
RMZ = node.RxnMZ
# Sum the global forces
SumFX += FX
SumFY += FY
SumFZ += FZ
SumMX += MX - FY*Z + FZ*Y
SumMY += MY + FX*Z - FZ*X
SumMZ += MZ - FX*Y + FY*X
# Sum the global reactions
SumRFX += RFX
SumRFY += RFY
SumRFZ += RFZ
SumRMX += RMX - RFY*Z + RFZ*Y
SumRMY += RMY + RFX*Z - RFZ*X
SumRMZ += RMZ - RFX*Y + RFY*X
# Print the load summation
print('**Applied Loads**')
print('Sum Forces X: ', SumFX, ', Sum Forces Y: ', SumFY, ', Sum Forces Z: ', SumFZ)
print('Sum Moments MX: ', SumMX, ', Sum Moments MY: ', SumMY, ', Sum Moments MZ: ', SumMZ)
print('**Reactions**')
print('Sum Forces X: ', SumRFX, ', Sum Forces Y: ', SumRFY, ', Sum Forces Z: ', SumRFZ)
print('Sum Moments MX: ', SumRMX, ', Sum Moments MY: ', SumRMY, ', Sum Moments MZ: ', SumRMZ)
return SumFX, SumFY, SumFZ, SumMX, SumMY, SumMZ
| 38.662411
| 194
| 0.501339
|
4a066fe471211673b684c0bfaee91c126888c9fd
| 771
|
py
|
Python
|
app.py
|
UstymHanyk/TwitterFriendMap
|
880dca8a944884673cc770989723fde52ddb4af1
|
[
"MIT"
] | null | null | null |
app.py
|
UstymHanyk/TwitterFriendMap
|
880dca8a944884673cc770989723fde52ddb4af1
|
[
"MIT"
] | null | null | null |
app.py
|
UstymHanyk/TwitterFriendMap
|
880dca8a944884673cc770989723fde52ddb4af1
|
[
"MIT"
] | null | null | null |
from flask import Flask, render_template, request
from friend_searcher import friends_geolocator, get_user_friends
from map_generator import generate_map, group_duplicates
app = Flask(__name__)
@app.route("/")
def index():
return render_template("index.html")
@app.route("/map_generation", methods=["POST"])
def wait_for_map_generation():
# if not request.form.get("domain"):
# return render_template("failure.html")
username = request.form.get("username")
if "@" not in username:
username = "@" + username
friends_loc_list = friends_geolocator(get_user_friends(username))
fl_map = generate_map(group_duplicates(friends_loc_list))
return fl_map._repr_html_()
# return render_template('index.html', map=map._repr_html_())
| 36.714286
| 69
| 0.738003
|
4a06701222513009b80782871bc68b0d7d6a1727
| 2,756
|
py
|
Python
|
app/plugins/Plugin.py
|
superadm1n/FlaskTemplate
|
eeeadfeea3ffecd7b10cd9c23f0b5f64af1a89c8
|
[
"MIT"
] | null | null | null |
app/plugins/Plugin.py
|
superadm1n/FlaskTemplate
|
eeeadfeea3ffecd7b10cd9c23f0b5f64af1a89c8
|
[
"MIT"
] | null | null | null |
app/plugins/Plugin.py
|
superadm1n/FlaskTemplate
|
eeeadfeea3ffecd7b10cd9c23f0b5f64af1a89c8
|
[
"MIT"
] | null | null | null |
from flask import Blueprint, abort
from flask_login import current_user
from functools import wraps
class Plugin(Blueprint):
'''
This class represents a plugin object, It should be used when extending the system via plugins
'''
def __init__(self, access_roles=[], login_required=True, email_client=None, interval_scheduler=None,
cron_scheduler=None, *args, **kwargs):
'''
:param access_roles: The roles that will be used when restricting access to routes contained in the plugin
:param login_required: Specify if accessing the plugin routes will require the user to be logged in (if any
access roles are specified this will be overridden to True) Default is True
:param args: Arguments that are passed to the flask.Blueprint object
:param kwargs: Keyword arguments that are passed to the flask.Blueprint object
'''
super().__init__(*args, **kwargs)
self.access_roles = access_roles
self.interval_scheduler = interval_scheduler
self.cron_scheduler = cron_scheduler
self.email_client = email_client
self.login_required = login_required
self.before_request(self.restrict_access)
def required_roles(*roles):
'''Custom function for checking if a user has the required rolls to access a resource.
:param roles:
:return:
'''
def wrapper(f):
@wraps(f)
def wrapped(*args, **kwargs):
try:
if current_user.has_role(*roles) is False:
abort(401)
except AttributeError:
abort(401)
return f(*args, **kwargs)
return wrapped
return wrapper
def current_user_has_roles(self, *roles):
try:
if current_user.has_role(*roles):
return True
else:
return False
except AttributeError:
return False
def restrict_access(self):
"""
Handles route restrictions for plugins. if login_required is set, will require the user to be logged
in. If any access rolls are specified, it will require to be logged in AND be assigned to the access role
"""
# if there are access roles specified login will automatically be required. if no access
# rolls are specified but login_required is set, check if the user is authenticated, if not, throw 403
if len(self.access_roles) > 0 or self.login_required:
if not current_user.is_authenticated:
abort(403)
if len(self.access_roles) > 0 and not self.current_user_has_roles(*self.access_roles):
abort(403)
| 39.942029
| 115
| 0.63643
|
4a0670c08a23335e58862383974c0541e5628f55
| 840
|
py
|
Python
|
api/collaboration/migrations/0006_convert_null_to_empty.py
|
uktrade/market-access-api
|
850a59880f8f62263784bcd9c6b3362e447dbc7a
|
[
"MIT"
] | null | null | null |
api/collaboration/migrations/0006_convert_null_to_empty.py
|
uktrade/market-access-api
|
850a59880f8f62263784bcd9c6b3362e447dbc7a
|
[
"MIT"
] | 51
|
2018-05-31T12:16:31.000Z
|
2022-03-08T09:36:48.000Z
|
api/collaboration/migrations/0006_convert_null_to_empty.py
|
uktrade/market-access-api
|
850a59880f8f62263784bcd9c6b3362e447dbc7a
|
[
"MIT"
] | 2
|
2019-12-24T09:47:42.000Z
|
2021-02-09T09:36:51.000Z
|
# Generated by Django 3.1.2 on 2020-11-05 17:15
from django.db import migrations
def convert_null_to_empty(apps, schema_editor):
TeamMember = apps.get_model("collaboration", "TeamMember")
HistoricalTeamMember = apps.get_model("collaboration", "HistoricalTeamMember")
fields = (
"archived_reason",
"role",
)
for field in fields:
TeamMember.objects.filter(**{f"{field}__isnull": True}).update(**{field: ""})
HistoricalTeamMember.objects.filter(**{f"{field}__isnull": True}).update(
**{field: ""}
)
class Migration(migrations.Migration):
dependencies = [
("collaboration", "0005_team_members_overhaul"),
]
operations = [
migrations.RunPython(
convert_null_to_empty, reverse_code=migrations.RunPython.noop
),
]
| 25.454545
| 85
| 0.641667
|
4a06710e1c466288343fafc9a6d1a85ee4a70d9b
| 2,227
|
py
|
Python
|
packages/python-stable.py
|
zpcc/mpkg-pkgs
|
6f919c7ef0ce0dbee298bcb8328be0e9e65fc833
|
[
"Apache-2.0"
] | 1
|
2020-12-16T14:15:12.000Z
|
2020-12-16T14:15:12.000Z
|
packages/python-stable.py
|
zpcc/mpkg-pkgs
|
6f919c7ef0ce0dbee298bcb8328be0e9e65fc833
|
[
"Apache-2.0"
] | null | null | null |
packages/python-stable.py
|
zpcc/mpkg-pkgs
|
6f919c7ef0ce0dbee298bcb8328be0e9e65fc833
|
[
"Apache-2.0"
] | null | null | null |
import re
import time
from lxml import etree
from mpkg.common import Soft, soft_data
from mpkg.utils import GetPage
class Package(Soft):
ID = 'python-stable'
def _prepare(self):
data = self.data
url = 'https://www.python.org/ftp/python/'
texts = list(etree.HTML(GetPage(url)).xpath('//pre')[0].itertext())[2:]
rels = [name[:-1] for name in texts[::2]
if re.match('^\\d.[\\d.]+/', name)]
page = etree.HTML(GetPage('https://devguide.python.org/'))
table = page.xpath('//*[@id="status-of-python-branches"]//table')[0]
table = [[text.strip() for text in tr]
for tr in [list(tr.itertext()) for tr in table.xpath('.//tr')]]
active = [tr[0] for tr in table if 'bugfix' in tr]
data.ver = sorted(active, key=lambda x: int(x.split('.')[1]))[-1]
for ver in active:
soft = soft_data()
soft.id = f'python{ver}'
data.depends.append(soft.id)
rel = sorted([rel for rel in rels if rel.startswith(ver)],
key=lambda x: int(x.split('.')[2]))[-1]
soft.ver = rel
date = texts[texts.index(rel+'/')+1].strip().split(' ')[0]
soft.date = time.strftime(
'%Y-%m-%d', time.strptime(date, '%d-%b-%Y'))
soft.arch = {'32bit': f'https://www.python.org/ftp/python/{soft.ver}/python-{soft.ver}.exe',
'64bit': f'https://www.python.org/ftp/python/{soft.ver}/python-{soft.ver}-amd64.exe'}
soft.changelog = f'https://docs.python.org/release/{soft.ver}/whatsnew/changelog.html#changelog'
relpage = etree.HTML(GetPage(
'https://www.python.org/downloads/release/python-{0}/'.format(soft.ver.replace('.', ''))))
files = relpage.xpath('//tbody/tr')
md5 = {}
for tr in files:
td = tr.xpath('./td')
url = td[0].xpath('./a')[0].values()[0]
md5[url] = td[3].text
soft.sha256 = {'32bit': 'md5:' + md5[soft.arch['32bit']],
'64bit': 'md5:' + md5[soft.arch['64bit']]}
self.packages.append(soft.asdict(simplify=True))
| 46.395833
| 110
| 0.524921
|
4a06712b74daba5fe4943154aeccc9951832bc7e
| 2,739
|
py
|
Python
|
project/proc_2d_vars.py
|
boyuan276/numerical_weather
|
5d392ee951efd36a73b1a8019063db507ca4821c
|
[
"MIT"
] | 1
|
2021-05-21T01:06:05.000Z
|
2021-05-21T01:06:05.000Z
|
project/proc_2d_vars.py
|
boyuan276/numerical_weather
|
5d392ee951efd36a73b1a8019063db507ca4821c
|
[
"MIT"
] | null | null | null |
project/proc_2d_vars.py
|
boyuan276/numerical_weather
|
5d392ee951efd36a73b1a8019063db507ca4821c
|
[
"MIT"
] | null | null | null |
'''
Process selected 2d variables
'''
import os
import netCDF4 as nc
import numpy as np
import datetime
import matplotlib.pyplot as plt
import imageio
# from matplotlib import animation
from matplotlib.cm import get_cmap
import cartopy.crs as crs
from cartopy.feature import NaturalEarthFeature
from wrf import (getvar, to_np, ALL_TIMES, smooth2d, get_cartopy, cartopy_xlim,
cartopy_ylim, latlon_coords)
import optwrf.util as util
#%% Import data
# Set up working directory
wrfout_headdir = 'D:/courses/F2020-S2021/EAS 5555/Code/numerical_weather/project/'
# Sub directories of different initial data sources
time_dir = ['20210515.00Z/',
'20210515.12Z/',
'20210516.00Z/',
'20210516.12Z/',
'20210517.00Z/']
# Identify the WRF output file to be processed
wrfout_file = ['wrfout_d03_2021-05-15_00_00_00',
'wrfout_d03_2021-05-15_12_00_00',
'wrfout_d03_2021-05-16_00_00_00',
'wrfout_d03_2021-05-16_12_00_00',
'wrfout_d03_2021-05-17_00_00_00',]
var_names = ['T2', 'slp']
var_fullnames = ["2m temperature", "Sea level pressure"]
#%%
# # Set start and end time stamps
# start =
# end =
# Set variable
n = 0
var_name = var_names[n]
var_fullname = var_fullnames[n]
# Read WRF out file
i = 3
if i == 0:
ncfile = [nc.Dataset(wrfout_headdir + time_dir[i] + 'wrfout_d03_2021-05-15_00_00_00'),
nc.Dataset(wrfout_headdir + time_dir[i] + 'wrfout_d03_2021-05-16_00_00_00')]
elif i== 1:
ncfile = [nc.Dataset(wrfout_headdir + time_dir[i] + 'wrfout_d03_2021-05-15_00_00_00'),
nc.Dataset(wrfout_headdir + time_dir[i] + 'wrfout_d03_2021-05-16_00_00_00')]
else:
ncfile = nc.Dataset(wrfout_headdir + time_dir[i] + wrfout_file[i])
# Create an xarray.Dataset from the wrf qurery_variables.
met_data = util._wrf2xarray(ncfile, var_names)
# Slice the wrfout data if start and end times ares specified
met_data = met_data.sel(Time=slice(start, end))
# metdf = met_data.isel(west_east=loc_ithaca[0],south_north=loc_ithaca[1])
# metdf = metdf.reset_coords(['XTIME'], drop=True)
# time = getvar(wrf_list, 'Times', timeidx=ALL_TIMES)
time = getvar(ncfile, 'Times', timeidx=ALL_TIMES)
num_time = len(time)
# Choose timestamps: original file is 10-min
timestamps = np.arange(0, num_time, 1)
t = 1
var = getvar(ncfile, var_name, timeidx=t)
lats, lons = latlon_coords(var)
#%%
#%%
for t in timestamps:
# Read variable at time i
# var = getvar(ncfile, var_name, timeidx=i)
var = getvar(ncfile, var_name, timeidx=t)
# Smooth data
smooth_var = smooth2d(var, 3, cenweight=4)
# Get the latitude and longitude points
lats, lons = latlon_coords(var)
| 23.410256
| 90
| 0.691128
|
4a0671bb1eed473893a8d4e049bdc4f370a4dc13
| 6,214
|
py
|
Python
|
eda/logger.py
|
e5120/EDAs
|
acf86fa35182b8fe0cd913d6fb46280b2f9e6e46
|
[
"MIT"
] | 3
|
2021-01-15T08:35:32.000Z
|
2021-04-09T08:03:35.000Z
|
eda/logger.py
|
e5120/EDAs
|
acf86fa35182b8fe0cd913d6fb46280b2f9e6e46
|
[
"MIT"
] | null | null | null |
eda/logger.py
|
e5120/EDAs
|
acf86fa35182b8fe0cd913d6fb46280b2f9e6e46
|
[
"MIT"
] | 3
|
2021-04-27T06:36:33.000Z
|
2022-02-14T14:13:08.000Z
|
import os
import csv
import json
import logging
import datetime
from collections import OrderedDict
from types import MappingProxyType
import numpy as np
logging.basicConfig(level=logging.INFO,
format="[%(asctime)s %(levelname)s] %(message)s")
class Logger(object):
"""
A class to log a optimization process.
"""
def __init__(self, dir_path, args, logging_step=10, display_step=10):
"""
Parameters
----------
dir_path : str
Directory path to output logs.
logging_step : int, default 10
Interval of outputting logs to directory.
display_step : int, default 10
Interval of displaying logs to stdout.
"""
if dir_path is not None:
dir_path = "{}_{}".format(dir_path,
datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S"))
os.makedirs(dir_path, exist_ok=False)
self.dir_path = dir_path
self.trial_path = None
self.logging_step = logging_step
self.display_step = display_step
self.args = args
self.logger = logging.getLogger()
self.log = OrderedDict()
self.display = OrderedDict()
# save arguments
if self.dir_path and args:
args.log_dir = self.dir_path
with open("{}/settings.json".format(self.dir_path), "w", encoding="utf-8") as f:
json.dump(args.__dict__, f, cls=JsonEncoder, ensure_ascii=True, indent=4)
def set_log_columns(self, columns):
"""
Set a column name of each log to be output in log file.
Parameters
----------
columns : array-like
List of column names.
"""
self.log = self._set_columns(columns)
if self.trial_path:
self.csv_file.writerow(columns)
def set_display_columns(self, columns):
"""
Set a column name of each log to be displayed in stdout.
Parameters
----------
columns : array-like
List of column names.
"""
self.display = self._set_columns(columns)
def _set_columns(self, columns):
"""
Set columns.
Parameters
----------
columns : array-like
List of column names.
Returns
-------
collections.OrderedDict
The key-value data, where each of key is a column name and each of value is a observed value.
"""
dic = OrderedDict({column: None for column in columns})
return dic
def add(self, key, val, step, force=False):
"""
Add a log.
Parameters
----------
key : str
Column name.
val : any
Observed value such as scalar, vector, and matrix.
step : int
Iteration.
force : bool, default False
If True, force to add logs.
"""
if key in self.log and (step % self.logging_step == 0 or force):
self.log[key] = val
if key in self.display and (step % self.display_step == 0 or force):
self.display[key] = val
def output(self, step, force=False):
"""
Output logs.
Parameters
----------
step : int
Iteration.
force : bool, default False
If True, force to output logs.
"""
if (step % self.logging_step == 0 or force) and self.trial_path:
for key, val in self.log.items():
if isinstance(val, (list, tuple, np.ndarray)):
val = np.array(val)
np_dir = "{}/{}".format(self.trial_path, key)
os.makedirs(np_dir, exist_ok=True)
np_file = "{}/{}_step".format(np_dir, step)
np.save(np_file, val)
self.log[key] = np_file
self.csv_file.writerow(self.log.values())
if step % self.display_step == 0 or force:
msg = ", ".join(["{}: {}".format(key, val) for key, val in self.display.items()
if isinstance(val, (int, float, str, bool, *np.typeDict.values()))])
self.logger.info(msg)
def result(self, info, filename="results.csv"):
"""
Output results.
Parameters
----------
info : dict
Information.
filename : str, default "result.csv"
Filename to which the information will be output.
"""
if self.trial_path:
with open("{}/{}".format(self.trial_path, filename), "w") as f:
result_file = csv.writer(f)
result_file.writerow(info.keys())
result_file.writerow(info.values())
def open(self, trial, filename="logs.csv"):
"""
Start logging of each independent trial.
Parameters
----------
trial : int
The number of trials.
filename : str, default "logs.csv"
Filename which is output logs.
"""
if self.dir_path:
self.trial_path = "{}/{}".format(self.dir_path, trial)
os.makedirs(self.trial_path, exist_ok=False)
self.f = open("{}/{}".format(self.trial_path, filename), "w")
self.csv_file = csv.writer(self.f)
def close(self):
"""
Finish logging of each independent trial.
"""
if self.trial_path:
self.trial_path = None
self.f.close()
def info(self, msg, step=0):
if step % self.display_step == 0:
self.logger.info(msg)
class JsonEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return "shape of numpy.ndarray: {}".format(obj.shape)
elif isinstance(obj, MappingProxyType):
return obj["__module__"]
elif isinstance(obj, object):
return obj.__dict__
else:
return super(JsonEncoder, self).default(obj)
| 31.704082
| 105
| 0.537657
|
4a0671ee1ddfcaa48a6a94b12999ebd8638b93c9
| 760
|
py
|
Python
|
redis_test.py
|
sambabypapapa/CralwerSet
|
a76e0660c42ce7aac20b8d07ccc454b6636a8a2a
|
[
"Apache-2.0"
] | 5
|
2020-08-17T08:37:16.000Z
|
2021-06-07T05:02:05.000Z
|
redis_test.py
|
sambabypapapa/CralwerSet
|
a76e0660c42ce7aac20b8d07ccc454b6636a8a2a
|
[
"Apache-2.0"
] | null | null | null |
redis_test.py
|
sambabypapapa/CralwerSet
|
a76e0660c42ce7aac20b8d07ccc454b6636a8a2a
|
[
"Apache-2.0"
] | 1
|
2021-06-07T05:02:10.000Z
|
2021-06-07T05:02:10.000Z
|
import CralwerSet.connect_mysql as connect_mysql
import time
import json
classify = 2
info = '付製这行话¥daxi1oVNWaF¥转移至淘宀┡ē【不会自行车的我,不想买贵的大的电动车,怕学不会,就入手了希洛普的电动滑板车,车子价位从1K多起步】;或https://m.tb.cn/h.VjtdO5K?sm=4d4548 點击链街,再选择瀏..覽..噐dakai'
temp = str(int(time.time() * 1000))
r = connect_mysql.Redis()
key = temp + '|' + str(classify) + '|' + info
r.hset('wt', key, '')
info = {}
while True:
time.sleep(0.1)
result = r.hget("wt", key).decode('utf-8')
if not result:
continue
info = json.loads(result)
r.hdel('wt', key)
break
if type(info['urlList']) == list:
print(info['urlList'])
print(info['text'])
else:
if int(info['urlList']) == 401:
print('链接解析错误')
elif int(info['urlList']) == 402:
print('页面请求失败')
| 27.142857
| 142
| 0.627632
|
4a0672d1f7c5c28fc659e5c3ad6effd8f09b4e70
| 8,513
|
py
|
Python
|
tfx/dsl/component/experimental/decorators_test.py
|
krystollia/tfx
|
a2a3a530368b34de33350953af8ba7894c1e6fe8
|
[
"Apache-2.0"
] | null | null | null |
tfx/dsl/component/experimental/decorators_test.py
|
krystollia/tfx
|
a2a3a530368b34de33350953af8ba7894c1e6fe8
|
[
"Apache-2.0"
] | null | null | null |
tfx/dsl/component/experimental/decorators_test.py
|
krystollia/tfx
|
a2a3a530368b34de33350953af8ba7894c1e6fe8
|
[
"Apache-2.0"
] | null | null | null |
# Lint as: python2, python3
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.components.base.decorators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from typing import Optional, Text
import unittest
# Standard Imports
import six
import tensorflow as tf
from tfx import types
from tfx.components.base import base_executor
from tfx.components.base import executor_spec
from tfx.dsl.component.experimental.annotations import InputArtifact
from tfx.dsl.component.experimental.annotations import OutputArtifact
from tfx.dsl.component.experimental.annotations import OutputDict
from tfx.dsl.component.experimental.annotations import Parameter
from tfx.dsl.component.experimental.decorators import _SimpleComponent
from tfx.dsl.component.experimental.decorators import component
from tfx.orchestration import metadata
from tfx.orchestration import pipeline
from tfx.orchestration.beam import beam_dag_runner
from tfx.types import component_spec
from tfx.types import standard_artifacts
class _InputArtifact(types.Artifact):
TYPE_NAME = '_InputArtifact'
class _OutputArtifact(types.Artifact):
TYPE_NAME = '_OutputArtifact'
class _BasicComponentSpec(component_spec.ComponentSpec):
PARAMETERS = {
'folds': component_spec.ExecutionParameter(type=int),
}
INPUTS = {
'input': component_spec.ChannelParameter(type=_InputArtifact),
}
OUTPUTS = {
'output': component_spec.ChannelParameter(type=_OutputArtifact),
}
if not six.PY2:
# Currently, function components must be defined at the module level (not in
# nested class or function scope). We define the test components here.
@component
def _injector_1(
foo: Parameter[int], bar: Parameter[Text]) -> OutputDict(
a=int, b=int, c=Text, d=bytes):
assert foo == 9
assert bar == 'secret'
return {'a': 10, 'b': 22, 'c': 'unicode', 'd': b'bytes'}
@component
def _simple_component(a: int, b: int, c: Text, d: bytes) -> OutputDict(
e=float, f=float):
del c, d
return {'e': float(a + b), 'f': float(a * b)}
@component
def _verify(e: float, f: float):
assert (e, f) == (32.0, 220.0), (e, f)
@component
def _injector_2(
examples: OutputArtifact[standard_artifacts.Examples]
) -> OutputDict(
a=int, b=float, c=Text, d=bytes, e=Text):
del examples
return {'a': 1, 'b': 2.0, 'c': '3', 'd': b'4', 'e': 'passed'}
@component
def _optionalarg_component(
foo: Parameter[int],
bar: Parameter[Text],
examples: InputArtifact[standard_artifacts.Examples],
a: int,
b: float,
c: Text,
d: bytes,
e1: Text = 'default',
e2: Optional[Text] = 'default',
f: bytes = b'default',
g: Parameter[float] = 1000.0,
h: Parameter[Text] = '2000',
optional_examples_1: InputArtifact[standard_artifacts.Examples] = None,
optional_examples_2: InputArtifact[standard_artifacts.Examples] = None):
# Test non-optional parameters.
assert foo == 9
assert bar == 'secret'
assert isinstance(examples, standard_artifacts.Examples)
# Test non-optional `int`, `float`, `Text` and `bytes` input values.
assert a == 1
assert b == 2.0
assert c == '3'
assert d == b'4'
# Test passed optional arguments (with and without the `Optional` typehint
# specifier).
assert e1 == 'passed'
assert e2 == 'passed'
# Test that non-passed optional argument becomes the argument default.
assert f == b'default'
# Test passed optional parameter.
assert g == 999.0
# Test non-passed optional parameter.
assert h == '2000'
# Test passed optional input artifact.
assert optional_examples_1 and optional_examples_1.uri
# Test non-passed optional input artifact.
assert optional_examples_2 is None
@unittest.skipIf(six.PY2, 'Not compatible with Python 2.')
class ComponentDecoratorTest(tf.test.TestCase):
def setUp(self):
super(ComponentDecoratorTest, self).setUp()
self._test_dir = os.path.join(
os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),
self._testMethodName)
self._metadata_path = os.path.join(self._test_dir, 'metadata.db')
def testSimpleComponent(self):
class _MySimpleComponent(_SimpleComponent):
SPEC_CLASS = _BasicComponentSpec
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(
base_executor.BaseExecutor)
input_channel = types.Channel(type=_InputArtifact)
instance = _MySimpleComponent(input=input_channel, folds=10)
self.assertIs(instance.inputs['input'], input_channel)
self.assertEqual(instance.outputs['output'].type, _OutputArtifact)
def testDefinitionInClosureFails(self):
with self.assertRaisesRegexp(
ValueError,
'The @component decorator can only be applied to a function defined at '
'the module level'):
@component
def my_component(): # pylint: disable=unused-variable
return None
def testBeamExecutionSuccess(self):
"""Test execution with return values; success case."""
instance_1 = _injector_1(foo=9, bar='secret')
instance_2 = _simple_component(
a=instance_1.outputs['a'],
b=instance_1.outputs['b'],
c=instance_1.outputs['c'],
d=instance_1.outputs['d'])
instance_3 = _verify(e=instance_2.outputs['e'], f=instance_2.outputs['f']) # pylint: disable=assignment-from-no-return
metadata_config = metadata.sqlite_metadata_connection_config(
self._metadata_path)
test_pipeline = pipeline.Pipeline(
pipeline_name='test_pipeline_1',
pipeline_root=self._test_dir,
metadata_connection_config=metadata_config,
components=[instance_1, instance_2, instance_3])
beam_dag_runner.BeamDagRunner().run(test_pipeline)
def testBeamExecutionFailure(self):
"""Test execution with return values; failure case."""
instance_1 = _injector_1(foo=9, bar='secret')
instance_2 = _simple_component(
a=instance_1.outputs['a'],
b=instance_1.outputs['b'],
c=instance_1.outputs['c'],
d=instance_1.outputs['d'])
# Swapped 'e' and 'f'.
instance_3 = _verify(e=instance_2.outputs['f'], f=instance_2.outputs['e']) # pylint: disable=assignment-from-no-return
metadata_config = metadata.sqlite_metadata_connection_config(
self._metadata_path)
test_pipeline = pipeline.Pipeline(
pipeline_name='test_pipeline_1',
pipeline_root=self._test_dir,
metadata_connection_config=metadata_config,
components=[instance_1, instance_2, instance_3])
with self.assertRaisesRegexp(RuntimeError,
r'AssertionError: \(220.0, 32.0\)'):
beam_dag_runner.BeamDagRunner().run(test_pipeline)
def testBeamExecutionOptionalInputsAndParameters(self):
"""Test execution with optional inputs and parameters."""
instance_1 = _injector_2() # pylint: disable=no-value-for-parameter
self.assertEqual(1, len(instance_1.outputs['examples'].get()))
instance_2 = _optionalarg_component( # pylint: disable=assignment-from-no-return
foo=9,
bar='secret',
examples=instance_1.outputs['examples'],
a=instance_1.outputs['a'],
b=instance_1.outputs['b'],
c=instance_1.outputs['c'],
d=instance_1.outputs['d'],
e1=instance_1.outputs['e'],
e2=instance_1.outputs['e'],
g=999.0,
optional_examples_1=instance_1.outputs['examples'])
metadata_config = metadata.sqlite_metadata_connection_config(
self._metadata_path)
test_pipeline = pipeline.Pipeline(
pipeline_name='test_pipeline_1',
pipeline_root=self._test_dir,
metadata_connection_config=metadata_config,
components=[instance_1, instance_2])
beam_dag_runner.BeamDagRunner().run(test_pipeline)
if __name__ == '__main__':
tf.test.main()
| 34.889344
| 123
| 0.703042
|
4a0672d416f2e9f761a47ac742072b428090d313
| 1,631
|
py
|
Python
|
slack_post.py
|
aikiyy/tv_bot
|
696de526a77172a9bf4bd65a1977b9b196d8a15d
|
[
"MIT"
] | null | null | null |
slack_post.py
|
aikiyy/tv_bot
|
696de526a77172a9bf4bd65a1977b9b196d8a15d
|
[
"MIT"
] | 3
|
2021-03-31T19:20:44.000Z
|
2021-12-13T20:05:56.000Z
|
slack_post.py
|
aikiyy/tv_bot
|
696de526a77172a9bf4bd65a1977b9b196d8a15d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from slacker import Slacker
from crawler import TvCrawler
import os
from optparse import OptionParser
from datetime import date, datetime, timedelta
def make_message(word, date, programs):
message = datetime.strptime(date, '%Y%m%d').strftime('%Y/%m/%d') + ' ' + word + '\n'
for title, v in programs.items():
message += v['time'] + ' ' + v['genre1'] + ' - ' + v['genre2'] + '\n'
message += ' <' + v['href'] + '|' + title + '>' + '\n'
message += '- - - - - - - - - - - - - - - - - - - - - - - - -'
return message
def post_slack(options, target_date):
try:
slack_token = os.environ['SLACK_TOKEN']
except KeyError:
raise KeyError('環境変数SLACK_TOKENが設定されていません.')
try:
channel = os.environ['POST_CHANNEL']
except KeyError:
channel = 'random'
try:
icon_emoji = os.environ['ICON_EMOJI']
except KeyError:
icon_emoji = ':tv:'
slack = Slacker(slack_token)
tv_crawler = TvCrawler()
tv_crawler.get_programs(options.word, target_date, options.exword)
if len(tv_crawler.programs) == 0:
exit()
message = make_message(options.word, target_date, tv_crawler.programs)
slack.chat.post_message(channel, message, icon_emoji=icon_emoji)
if __name__ == '__main__':
parser = OptionParser()
parser.add_option('-w', '--word', dest='word', type='string')
parser.add_option('-ex', '--exword', dest='exword', type='string')
(options, args) = parser.parse_args()
tomorrow = (date.today() + timedelta(days=1)).strftime('%Y%m%d')
post_slack(options, tomorrow)
| 30.773585
| 90
| 0.60699
|
4a0673ec687f181100f706cdfb90af6db0a213a0
| 2,377
|
py
|
Python
|
navrep/scripts/train_gym_e2e1dnavreptrainenv.py
|
Makuh17/navrep
|
bdc1f6102baa3dbb9aacb35387999b720d161aa8
|
[
"MIT"
] | 48
|
2020-11-26T10:16:08.000Z
|
2022-03-24T15:22:08.000Z
|
navrep/scripts/train_gym_e2e1dnavreptrainenv.py
|
Makuh17/navrep
|
bdc1f6102baa3dbb9aacb35387999b720d161aa8
|
[
"MIT"
] | 1
|
2021-12-14T02:08:18.000Z
|
2022-03-14T09:17:25.000Z
|
navrep/scripts/train_gym_e2e1dnavreptrainenv.py
|
Makuh17/navrep
|
bdc1f6102baa3dbb9aacb35387999b720d161aa8
|
[
"MIT"
] | 18
|
2020-12-09T08:37:43.000Z
|
2022-03-30T06:56:38.000Z
|
from datetime import datetime
import os
from stable_baselines import PPO2
from stable_baselines.common.vec_env import SubprocVecEnv, DummyVecEnv
from navrep.tools.custom_policy import Custom1DPolicy, ARCH, _C
from navrep.envs.e2eenv import E2E1DNavRepEnv
from navrep.tools.sb_eval_callback import NavrepEvalCallback
from navrep.tools.commonargs import parse_common_args
if __name__ == "__main__":
args, _ = parse_common_args()
DIR = os.path.expanduser("~/navrep/models/gym")
LOGDIR = os.path.expanduser("~/navrep/logs/gym")
if args.dry_run:
DIR = "/tmp/navrep/models/gym"
LOGDIR = "/tmp/navrep/logs/gym"
START_TIME = datetime.now().strftime("%Y_%m_%d__%H_%M_%S")
CONTROLLER_ARCH = "_{}_C{}".format(ARCH, _C)
LOGNAME = "e2e1dnavreptrainenv_" + START_TIME + "_PPO" + "_E2E1D" + CONTROLLER_ARCH
LOGPATH = os.path.join(LOGDIR, LOGNAME + ".csv")
MODELPATH = os.path.join(DIR, LOGNAME + "_ckpt")
MODELPATH2 = os.path.join(DIR, "e2e1dnavreptrainenv_latest_PPO_ckpt")
if not os.path.exists(DIR):
os.makedirs(DIR)
if not os.path.exists(LOGDIR):
os.makedirs(LOGDIR)
MILLION = 1000000
TRAIN_STEPS = args.n
if TRAIN_STEPS is None:
TRAIN_STEPS = 60 * MILLION
N_ENVS = 6
if args.debug:
env = DummyVecEnv([lambda: E2E1DNavRepEnv(silent=True, scenario='train')]*N_ENVS)
else:
env = SubprocVecEnv([lambda: E2E1DNavRepEnv(silent=True, scenario='train')]*N_ENVS,
start_method='spawn')
eval_env = E2E1DNavRepEnv(silent=True, scenario='train')
def test_env_fn(): # noqa
return E2E1DNavRepEnv(silent=True, scenario='test')
cb = NavrepEvalCallback(eval_env, test_env_fn=test_env_fn,
logpath=LOGPATH, savepath=MODELPATH, verbose=1)
model = PPO2(Custom1DPolicy, env, verbose=0)
model.learn(total_timesteps=TRAIN_STEPS+1, callback=cb)
obs = env.reset()
model.save(MODELPATH)
model.save(MODELPATH2)
print("Model '{}' saved".format(MODELPATH))
del model
model = PPO2.load(MODELPATH)
env = E2E1DNavRepEnv(silent=True, scenario='train')
obs = env.reset()
for i in range(512):
action, _states = model.predict(obs, deterministic=True)
obs, _, done, _ = env.step(action)
if done:
env.reset()
# env.render()
| 35.477612
| 91
| 0.670172
|
4a067510cfb4c4d8b759a1fae38552ee923608b9
| 948
|
py
|
Python
|
ontology/logistic_regression/sherlock/write_listify_length.py
|
ehbeam/neuro-knowledge-engine
|
9dc56ade0bbbd8d14f0660774f787c3f46d7e632
|
[
"MIT"
] | 15
|
2020-07-17T07:10:26.000Z
|
2022-02-18T05:51:45.000Z
|
ontology/logistic_regression/sherlock/write_listify_length.py
|
YifeiCAO/neuro-knowledge-engine
|
9dc56ade0bbbd8d14f0660774f787c3f46d7e632
|
[
"MIT"
] | 2
|
2022-01-14T09:10:12.000Z
|
2022-01-28T17:32:42.000Z
|
ontology/logistic_regression/sherlock/write_listify_length.py
|
YifeiCAO/neuro-knowledge-engine
|
9dc56ade0bbbd8d14f0660774f787c3f46d7e632
|
[
"MIT"
] | 4
|
2021-12-22T13:27:32.000Z
|
2022-02-18T05:51:47.000Z
|
#!/usr/bin/python
import os, shutil
for k in range(2, 51):
comm = "listify_length.optimize_list_len({})".format(k)
pyfile = open("listify_length_k{:02d}.py".format(k), "w+")
pyfile.write("#!/bin/python\n\nimport listify_length\n{}".format(comm))
pyfile.close()
bashfile = open("listify_length_k{:02d}.sbatch".format(k), "w+")
lines = ["#!/bin/bash\n",
"#SBATCH --job-name=k{:02d}_listlen".format(k),
"#SBATCH --output=logs/k{:02d}_listlen.%j.out".format(k),
"#SBATCH --error=logs/k{:02d}_listlen.%j.err".format(k),
"#SBATCH --time=00-12:00:00",
"#SBATCH -p aetkin",
"#SBATCH --mail-type=FAIL",
"#SBATCH --mail-user=ebeam@stanford.edu\n",
"module load python/3.6",
"srun python3 listify_length_k{:02d}.py".format(k)]
for line in lines:
bashfile.write(line + "\n")
bashfile.close()
| 37.92
| 75
| 0.563291
|
4a0675354499d2873d042d82f826e13dcc84870e
| 190
|
py
|
Python
|
cvat/__init__.py
|
ACHultman/cvat
|
01eaf362aa7e03f5623e80cb12ad0b9a429ae588
|
[
"Intel",
"MIT"
] | 3,142
|
2020-09-08T13:24:43.000Z
|
2022-03-31T23:53:50.000Z
|
cvat/__init__.py
|
ACHultman/cvat
|
01eaf362aa7e03f5623e80cb12ad0b9a429ae588
|
[
"Intel",
"MIT"
] | 2,049
|
2020-09-08T10:01:10.000Z
|
2022-03-31T19:08:15.000Z
|
cvat/__init__.py
|
ACHultman/cvat
|
01eaf362aa7e03f5623e80cb12ad0b9a429ae588
|
[
"Intel",
"MIT"
] | 1,055
|
2020-09-08T15:23:58.000Z
|
2022-03-31T10:52:48.000Z
|
# Copyright (C) 2018-2020 Intel Corporation
#
# SPDX-License-Identifier: MIT
from cvat.utils.version import get_version
VERSION = (2, 0, 0, 'alpha', 0)
__version__ = get_version(VERSION)
| 19
| 43
| 0.736842
|
4a067599e110664a07bf20bf4264729bed9861ee
| 101,907
|
py
|
Python
|
src/transformers/utils/dummy_pt_objects.py
|
studytutorials/transformers
|
27b1516d32b691533fc497e7ee4ceb88c39cdfdf
|
[
"Apache-2.0"
] | 2
|
2022-01-11T19:17:40.000Z
|
2022-01-11T19:49:48.000Z
|
src/transformers/utils/dummy_pt_objects.py
|
feifeivv/transformers
|
08a5f57567d8a975d900b66658bfd3c28c9dbec5
|
[
"Apache-2.0"
] | 1
|
2021-11-08T18:16:52.000Z
|
2021-11-08T18:49:59.000Z
|
src/transformers/utils/dummy_pt_objects.py
|
feifeivv/transformers
|
08a5f57567d8a975d900b66658bfd3c28c9dbec5
|
[
"Apache-2.0"
] | 2
|
2021-02-18T03:12:51.000Z
|
2021-04-16T13:16:58.000Z
|
# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..file_utils import requires_backends
class PyTorchBenchmark:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class PyTorchBenchmarkArguments:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class GlueDataset:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class GlueDataTrainingArguments:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LineByLineTextDataset:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LineByLineWithRefDataset:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LineByLineWithSOPTextDataset:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SquadDataset:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SquadDataTrainingArguments:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class TextDataset:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class TextDatasetForNextSentencePrediction:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BeamScorer:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BeamSearchScorer:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ForcedBOSTokenLogitsProcessor:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class ForcedEOSTokenLogitsProcessor:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class HammingDiversityLogitsProcessor:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class InfNanRemoveLogitsProcessor:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class LogitsProcessor:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class LogitsProcessorList:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class LogitsWarper:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MinLengthLogitsProcessor:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class NoBadWordsLogitsProcessor:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class NoRepeatNGramLogitsProcessor:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class PrefixConstrainedLogitsProcessor:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class RepetitionPenaltyLogitsProcessor:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class TemperatureLogitsWarper:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class TopKLogitsWarper:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class TopPLogitsWarper:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MaxLengthCriteria:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MaxTimeCriteria:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class StoppingCriteria:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class StoppingCriteriaList:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def top_k_top_p_filtering(*args, **kwargs):
requires_backends(top_k_top_p_filtering, ["torch"])
class Conv1D:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class PreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
def apply_chunking_to_forward(*args, **kwargs):
requires_backends(apply_chunking_to_forward, ["torch"])
def prune_layer(*args, **kwargs):
requires_backends(prune_layer, ["torch"])
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class AlbertForMaskedLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class AlbertForMultipleChoice:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class AlbertForPreTraining:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AlbertForQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class AlbertForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class AlbertForTokenClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class AlbertModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class AlbertPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
def load_tf_weights_in_albert(*args, **kwargs):
requires_backends(load_tf_weights_in_albert, ["torch"])
MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING = None
MODEL_FOR_CAUSAL_LM_MAPPING = None
MODEL_FOR_CTC_MAPPING = None
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING = None
MODEL_FOR_IMAGE_SEGMENTATION_MAPPING = None
MODEL_FOR_MASKED_LM_MAPPING = None
MODEL_FOR_MULTIPLE_CHOICE_MAPPING = None
MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING = None
MODEL_FOR_OBJECT_DETECTION_MAPPING = None
MODEL_FOR_PRETRAINING_MAPPING = None
MODEL_FOR_QUESTION_ANSWERING_MAPPING = None
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING = None
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING = None
MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING = None
MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING = None
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING = None
MODEL_MAPPING = None
MODEL_WITH_LM_HEAD_MAPPING = None
class AutoModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class AutoModelForAudioClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class AutoModelForCausalLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class AutoModelForCTC:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class AutoModelForImageClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class AutoModelForImageSegmentation:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class AutoModelForMaskedLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class AutoModelForMultipleChoice:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class AutoModelForNextSentencePrediction:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class AutoModelForObjectDetection:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class AutoModelForPreTraining:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class AutoModelForQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class AutoModelForSeq2SeqLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class AutoModelForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class AutoModelForSpeechSeq2Seq:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class AutoModelForTableQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class AutoModelForTokenClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class AutoModelWithLMHead:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
BART_PRETRAINED_MODEL_ARCHIVE_LIST = None
class BartForCausalLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class BartForConditionalGeneration:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class BartForQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class BartForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class BartModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class BartPretrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class PretrainedBartModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class BeitForImageClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BeitForMaskedImageModeling:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class BeitForSemanticSegmentation:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BeitModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class BeitPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
BERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class BertForMaskedLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class BertForMultipleChoice:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class BertForNextSentencePrediction:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BertForPreTraining:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BertForQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class BertForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class BertForTokenClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class BertLayer:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BertLMHeadModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class BertModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class BertPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
def load_tf_weights_in_bert(*args, **kwargs):
requires_backends(load_tf_weights_in_bert, ["torch"])
class BertGenerationDecoder:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BertGenerationEncoder:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BertGenerationPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
def load_tf_weights_in_bert_generation(*args, **kwargs):
requires_backends(load_tf_weights_in_bert_generation, ["torch"])
BIG_BIRD_PRETRAINED_MODEL_ARCHIVE_LIST = None
class BigBirdForCausalLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class BigBirdForMaskedLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class BigBirdForMultipleChoice:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class BigBirdForPreTraining:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BigBirdForQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class BigBirdForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class BigBirdForTokenClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class BigBirdLayer:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BigBirdModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class BigBirdPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
def load_tf_weights_in_big_bird(*args, **kwargs):
requires_backends(load_tf_weights_in_big_bird, ["torch"])
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST = None
class BigBirdPegasusForCausalLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class BigBirdPegasusForConditionalGeneration:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class BigBirdPegasusForQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class BigBirdPegasusForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class BigBirdPegasusModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class BigBirdPegasusPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class BlenderbotForCausalLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class BlenderbotForConditionalGeneration:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class BlenderbotModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class BlenderbotPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST = None
class BlenderbotSmallForCausalLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class BlenderbotSmallForConditionalGeneration:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class BlenderbotSmallModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class BlenderbotSmallPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class CamembertForCausalLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class CamembertForMaskedLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class CamembertForMultipleChoice:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class CamembertForQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class CamembertForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class CamembertForTokenClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class CamembertModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST = None
class CanineForMultipleChoice:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class CanineForQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class CanineForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class CanineForTokenClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class CanineLayer:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CanineModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class CaninePreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
def load_tf_weights_in_canine(*args, **kwargs):
requires_backends(load_tf_weights_in_canine, ["torch"])
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST = None
class CLIPModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class CLIPPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class CLIPTextModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class CLIPVisionModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class ConvBertForMaskedLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class ConvBertForMultipleChoice:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class ConvBertForQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class ConvBertForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class ConvBertForTokenClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class ConvBertLayer:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ConvBertModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class ConvBertPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
def load_tf_weights_in_convbert(*args, **kwargs):
requires_backends(load_tf_weights_in_convbert, ["torch"])
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST = None
class CTRLForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class CTRLLMHeadModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class CTRLModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class CTRLPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = None
class DebertaForMaskedLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class DebertaForQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class DebertaForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class DebertaForTokenClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class DebertaModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class DebertaPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST = None
class DebertaV2ForMaskedLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class DebertaV2ForQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class DebertaV2ForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class DebertaV2ForTokenClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class DebertaV2Model:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class DebertaV2PreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class DeiTForImageClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DeiTForImageClassificationWithTeacher:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DeiTModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class DeiTPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class DistilBertForMaskedLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class DistilBertForMultipleChoice:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class DistilBertForQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class DistilBertForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class DistilBertForTokenClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class DistilBertModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class DistilBertPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST = None
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST = None
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST = None
class DPRContextEncoder:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DPRPretrainedContextEncoder:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DPRPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class DPRPretrainedQuestionEncoder:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DPRPretrainedReader:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DPRQuestionEncoder:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DPRReader:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST = None
class ElectraForMaskedLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class ElectraForMultipleChoice:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class ElectraForPreTraining:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ElectraForQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class ElectraForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class ElectraForTokenClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class ElectraModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class ElectraPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
def load_tf_weights_in_electra(*args, **kwargs):
requires_backends(load_tf_weights_in_electra, ["torch"])
class EncoderDecoderModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class FlaubertForMultipleChoice:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class FlaubertForQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class FlaubertForQuestionAnsweringSimple:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class FlaubertForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class FlaubertForTokenClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class FlaubertModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class FlaubertWithLMHeadModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
FNET_PRETRAINED_MODEL_ARCHIVE_LIST = None
class FNetForMaskedLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class FNetForMultipleChoice:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class FNetForNextSentencePrediction:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FNetForPreTraining:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FNetForQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class FNetForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class FNetForTokenClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class FNetLayer:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FNetModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class FNetPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class FSMTForConditionalGeneration:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class FSMTModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class PretrainedFSMTModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST = None
class FunnelBaseModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class FunnelForMaskedLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class FunnelForMultipleChoice:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class FunnelForPreTraining:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FunnelForQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class FunnelForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class FunnelForTokenClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class FunnelModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class FunnelPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
def load_tf_weights_in_funnel(*args, **kwargs):
requires_backends(load_tf_weights_in_funnel, ["torch"])
GPT2_PRETRAINED_MODEL_ARCHIVE_LIST = None
class GPT2DoubleHeadsModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class GPT2ForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class GPT2ForTokenClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class GPT2LMHeadModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class GPT2Model:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class GPT2PreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
def load_tf_weights_in_gpt2(*args, **kwargs):
requires_backends(load_tf_weights_in_gpt2, ["torch"])
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST = None
class GPTNeoForCausalLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class GPTNeoForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class GPTNeoModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class GPTNeoPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
def load_tf_weights_in_gpt_neo(*args, **kwargs):
requires_backends(load_tf_weights_in_gpt_neo, ["torch"])
GPTJ_PRETRAINED_MODEL_ARCHIVE_LIST = None
class GPTJForCausalLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class GPTJForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class GPTJModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class GPTJPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
HUBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class HubertForCTC:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class HubertForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class HubertModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class HubertPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class IBertForMaskedLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class IBertForMultipleChoice:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class IBertForQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class IBertForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class IBertForTokenClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class IBertModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class IBertPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST = None
class LayoutLMForMaskedLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class LayoutLMForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class LayoutLMForTokenClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class LayoutLMModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class LayoutLMPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST = None
class LayoutLMv2ForQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class LayoutLMv2ForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class LayoutLMv2ForTokenClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class LayoutLMv2Model:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class LayoutLMv2PreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
LED_PRETRAINED_MODEL_ARCHIVE_LIST = None
class LEDForConditionalGeneration:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class LEDForQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class LEDForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class LEDModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class LEDPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None
class LongformerForMaskedLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class LongformerForMultipleChoice:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class LongformerForQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class LongformerForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class LongformerForTokenClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class LongformerModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class LongformerPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class LongformerSelfAttention:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST = None
class LukeForEntityClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LukeForEntityPairClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LukeForEntitySpanClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LukeModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class LukePreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class LxmertEncoder:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LxmertForPreTraining:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LxmertForQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class LxmertModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class LxmertPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class LxmertVisualFeatureEncoder:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LxmertXLayer:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST = None
class M2M100ForConditionalGeneration:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class M2M100Model:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class M2M100PreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class MarianForCausalLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class MarianModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class MarianMTModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class MBartForCausalLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class MBartForConditionalGeneration:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class MBartForQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class MBartForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class MBartModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class MBartPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class MegatronBertForCausalLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class MegatronBertForMaskedLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class MegatronBertForMultipleChoice:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class MegatronBertForNextSentencePrediction:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MegatronBertForPreTraining:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MegatronBertForQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class MegatronBertForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class MegatronBertForTokenClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class MegatronBertModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class MegatronBertPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class MMBTForClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MMBTModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class ModalEmbeddings:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class MobileBertForMaskedLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class MobileBertForMultipleChoice:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class MobileBertForNextSentencePrediction:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MobileBertForPreTraining:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MobileBertForQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class MobileBertForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class MobileBertForTokenClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class MobileBertLayer:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MobileBertModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class MobileBertPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
def load_tf_weights_in_mobilebert(*args, **kwargs):
requires_backends(load_tf_weights_in_mobilebert, ["torch"])
MPNET_PRETRAINED_MODEL_ARCHIVE_LIST = None
class MPNetForMaskedLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class MPNetForMultipleChoice:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class MPNetForQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class MPNetForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class MPNetForTokenClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class MPNetLayer:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MPNetModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class MPNetPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class MT5EncoderModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class MT5ForConditionalGeneration:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class MT5Model:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class OpenAIGPTDoubleHeadsModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class OpenAIGPTForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class OpenAIGPTLMHeadModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class OpenAIGPTModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class OpenAIGPTPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
def load_tf_weights_in_openai_gpt(*args, **kwargs):
requires_backends(load_tf_weights_in_openai_gpt, ["torch"])
class PegasusForCausalLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class PegasusForConditionalGeneration:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class PegasusModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class PegasusPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST = None
class ProphetNetDecoder:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ProphetNetEncoder:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ProphetNetForCausalLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class ProphetNetForConditionalGeneration:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class ProphetNetModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class ProphetNetPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class RagModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class RagPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class RagSequenceForGeneration:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RagTokenForGeneration:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None
class ReformerAttention:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ReformerForMaskedLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class ReformerForQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class ReformerForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class ReformerLayer:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ReformerModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class ReformerModelWithLMHead:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class ReformerPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class RemBertForCausalLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class RemBertForMaskedLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class RemBertForMultipleChoice:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class RemBertForQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class RemBertForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class RemBertForTokenClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class RemBertLayer:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RemBertModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class RemBertPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
def load_tf_weights_in_rembert(*args, **kwargs):
requires_backends(load_tf_weights_in_rembert, ["torch"])
RETRIBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class RetriBertModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class RetriBertPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = None
class RobertaForCausalLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class RobertaForMaskedLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class RobertaForMultipleChoice:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class RobertaForQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class RobertaForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class RobertaForTokenClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class RobertaModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class RobertaPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None
class RoFormerForCausalLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class RoFormerForMaskedLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class RoFormerForMultipleChoice:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class RoFormerForQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class RoFormerForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class RoFormerForTokenClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class RoFormerLayer:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RoFormerModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class RoFormerPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
def load_tf_weights_in_roformer(*args, **kwargs):
requires_backends(load_tf_weights_in_roformer, ["torch"])
SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None
class SegformerDecodeHead:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SegformerForImageClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SegformerForSemanticSegmentation:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SegformerLayer:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SegformerModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class SegformerPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
SEW_PRETRAINED_MODEL_ARCHIVE_LIST = None
class SEWForCTC:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SEWForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class SEWModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class SEWPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
SEW_D_PRETRAINED_MODEL_ARCHIVE_LIST = None
class SEWDForCTC:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SEWDForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class SEWDModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class SEWDPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class SpeechEncoderDecoderModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class Speech2TextForConditionalGeneration:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class Speech2TextModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class Speech2TextPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class Speech2Text2ForCausalLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class Speech2Text2PreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
SPLINTER_PRETRAINED_MODEL_ARCHIVE_LIST = None
class SplinterForQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class SplinterLayer:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SplinterModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class SplinterPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class SqueezeBertForMaskedLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class SqueezeBertForMultipleChoice:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class SqueezeBertForQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class SqueezeBertForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class SqueezeBertForTokenClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class SqueezeBertModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class SqueezeBertModule:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SqueezeBertPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
T5_PRETRAINED_MODEL_ARCHIVE_LIST = None
class T5EncoderModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class T5ForConditionalGeneration:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class T5Model:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class T5PreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
def load_tf_weights_in_t5(*args, **kwargs):
requires_backends(load_tf_weights_in_t5, ["torch"])
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST = None
class AdaptiveEmbedding:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class TransfoXLForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class TransfoXLLMHeadModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class TransfoXLModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class TransfoXLPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
def load_tf_weights_in_transfo_xl(*args, **kwargs):
requires_backends(load_tf_weights_in_transfo_xl, ["torch"])
TROCR_PRETRAINED_MODEL_ARCHIVE_LIST = None
class TrOCRForCausalLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class TrOCRPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST = None
class UniSpeechForCTC:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class UniSpeechForPreTraining:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class UniSpeechForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class UniSpeechModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class UniSpeechPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
UNISPEECH_SAT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class UniSpeechSatForCTC:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class UniSpeechSatForPreTraining:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class UniSpeechSatForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class UniSpeechSatModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class UniSpeechSatPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class VisionEncoderDecoderModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
VISUAL_BERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class VisualBertForMultipleChoice:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class VisualBertForPreTraining:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class VisualBertForQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class VisualBertForRegionToPhraseAlignment:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class VisualBertForVisualReasoning:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class VisualBertLayer:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class VisualBertModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class VisualBertPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
VIT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class ViTForImageClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ViTModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class ViTPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST = None
class Wav2Vec2ForCTC:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class Wav2Vec2ForMaskedLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class Wav2Vec2ForPreTraining:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class Wav2Vec2ForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class Wav2Vec2Model:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class Wav2Vec2PreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
XLM_PRETRAINED_MODEL_ARCHIVE_LIST = None
class XLMForMultipleChoice:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class XLMForQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class XLMForQuestionAnsweringSimple:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class XLMForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class XLMForTokenClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class XLMModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class XLMPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class XLMWithLMHeadModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
XLM_PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST = None
class XLMProphetNetDecoder:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLMProphetNetEncoder:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLMProphetNetForCausalLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class XLMProphetNetForConditionalGeneration:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class XLMProphetNetModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = None
class XLMRobertaForCausalLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class XLMRobertaForMaskedLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class XLMRobertaForMultipleChoice:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class XLMRobertaForQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class XLMRobertaForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class XLMRobertaForTokenClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class XLMRobertaModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST = None
class XLNetForMultipleChoice:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class XLNetForQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class XLNetForQuestionAnsweringSimple:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class XLNetForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class XLNetForTokenClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class XLNetLMHeadModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class XLNetModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class XLNetPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
def load_tf_weights_in_xlnet(*args, **kwargs):
requires_backends(load_tf_weights_in_xlnet, ["torch"])
class Adafactor:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AdamW:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def get_constant_schedule(*args, **kwargs):
requires_backends(get_constant_schedule, ["torch"])
def get_constant_schedule_with_warmup(*args, **kwargs):
requires_backends(get_constant_schedule_with_warmup, ["torch"])
def get_cosine_schedule_with_warmup(*args, **kwargs):
requires_backends(get_cosine_schedule_with_warmup, ["torch"])
def get_cosine_with_hard_restarts_schedule_with_warmup(*args, **kwargs):
requires_backends(get_cosine_with_hard_restarts_schedule_with_warmup, ["torch"])
def get_linear_schedule_with_warmup(*args, **kwargs):
requires_backends(get_linear_schedule_with_warmup, ["torch"])
def get_polynomial_decay_schedule_with_warmup(*args, **kwargs):
requires_backends(get_polynomial_decay_schedule_with_warmup, ["torch"])
def get_scheduler(*args, **kwargs):
requires_backends(get_scheduler, ["torch"])
class Trainer:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def torch_distributed_zero_first(*args, **kwargs):
requires_backends(torch_distributed_zero_first, ["torch"])
class Seq2SeqTrainer:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
| 24.182962
| 84
| 0.665312
|
4a06759a09bb8cc22d51a37b650164ab3526f167
| 130
|
py
|
Python
|
thirdparty/__init__.py
|
t0w4r/phcat
|
1b0f4e0fce4279ea7582a83f13eadcd9595ef319
|
[
"Apache-2.0"
] | null | null | null |
thirdparty/__init__.py
|
t0w4r/phcat
|
1b0f4e0fce4279ea7582a83f13eadcd9595ef319
|
[
"Apache-2.0"
] | null | null | null |
thirdparty/__init__.py
|
t0w4r/phcat
|
1b0f4e0fce4279ea7582a83f13eadcd9595ef319
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'wh1t3P1g'
__date__ = '2018/5/24'
if __name__ == '__main__':
pass
| 16.25
| 26
| 0.615385
|
4a0675a6b104b58f9430d7bdea5e2a63dcc424da
| 19,382
|
py
|
Python
|
reactive/containerd.py
|
mastier/charm-containerd
|
a1fa72351f8e14693e179997c1ede16f51410974
|
[
"Apache-2.0"
] | null | null | null |
reactive/containerd.py
|
mastier/charm-containerd
|
a1fa72351f8e14693e179997c1ede16f51410974
|
[
"Apache-2.0"
] | null | null | null |
reactive/containerd.py
|
mastier/charm-containerd
|
a1fa72351f8e14693e179997c1ede16f51410974
|
[
"Apache-2.0"
] | null | null | null |
import os
import base64
import binascii
import json
import requests
import traceback
from subprocess import (
check_call,
check_output,
CalledProcessError
)
from charms.reactive import (
hook,
when,
when_not,
set_state,
is_state,
remove_state,
endpoint_from_flag
)
from charms.layer import containerd, status
from charms.layer.container_runtime_common import (
ca_crt_path,
server_crt_path,
server_key_path,
check_for_juju_https_proxy
)
from charmhelpers.core import (
host,
unitdata
)
from charmhelpers.core.templating import render
from charmhelpers.core.hookenv import (
atexit,
config,
log,
application_version_set
)
from charmhelpers.core.kernel import modprobe
from charmhelpers.fetch import (
apt_install,
apt_update,
apt_purge,
apt_hold,
apt_autoremove,
apt_unhold,
import_key
)
DB = unitdata.kv()
CONTAINERD_PACKAGE = 'containerd'
NVIDIA_PACKAGES = [
'cuda-drivers',
'nvidia-container-runtime',
]
def _check_containerd():
"""
Check that containerd is running.
`ctr version` calls both client and server side, so is a reasonable indication that everything's been set up
correctly.
:return: Boolean
"""
try:
version = check_output(['ctr', 'version'])
except (FileNotFoundError, CalledProcessError):
return None
return version
def _juju_proxy_changed():
"""
Check to see if the Juju model HTTP(S) proxy settings have changed.
These aren't propagated to the charm so we'll need to do it here.
:return: Boolean
"""
cached = DB.get('config-cache', None)
if not cached:
return True # First pass.
new = check_for_juju_https_proxy(config)
if cached['http_proxy'] == new['http_proxy'] and \
cached['https_proxy'] == new['https_proxy'] and \
cached['no_proxy'] == new['no_proxy']:
return False
return True
@atexit
def charm_status():
"""
Set the charm's status after each hook is run.
:return: None
"""
if is_state('upgrade.series.in-progress'):
status.blocked('Series upgrade in progress')
elif is_state('containerd.nvidia.invalid-option'):
status.blocked(
'{} is an invalid option for gpu_driver'.format(
config().get('gpu_driver')
)
)
elif _check_containerd():
status.active('Container runtime available')
set_state('containerd.ready')
else:
status.blocked('Container runtime not available')
def strip_url(url):
"""Strip the URL of protocol, slashes etc., and keep host:port.
Examples:
url: http://10.10.10.10:8000 --> return: 10.10.10.10:8000
url: https://myregistry.io:8000/ --> return: myregistry.io:8000
url: myregistry.io:8000 --> return: myregistry.io:8000
"""
return url.rstrip('/').split(sep='://', maxsplit=1)[-1]
def update_custom_tls_config(config_directory, registries, old_registries):
"""
Read registries config and remove old/write new tls files from/to disk.
:param str config_directory: containerd config directory
:param List registries: juju config for custom registries
:param List old_registries: old juju config for custom registries
:return: None
"""
# Remove tls files of old registries; so not to leave uneeded, stale files.
for registry in old_registries:
for opt in ['ca', 'key', 'cert']:
file_b64 = registry.get('%s_file' % opt)
if file_b64:
registry[opt] = os.path.join(
config_directory, "%s.%s" % (strip_url(registry['url']), opt)
)
if os.path.isfile(registry[opt]):
os.remove(registry[opt])
# Write tls files of new registries.
for registry in registries:
for opt in ['ca', 'key', 'cert']:
file_b64 = registry.get('%s_file' % opt)
if file_b64:
try:
file_contents = base64.b64decode(file_b64)
except (binascii.Error, TypeError):
log(traceback.format_exc())
log("{}:{} didn't look like base64 data... skipping"
.format(registry['url'], opt))
continue
registry[opt] = os.path.join(
config_directory, "%s.%s" % (strip_url(registry['url']), opt)
)
with open(registry[opt], 'wb') as f:
f.write(file_contents)
def populate_host_for_custom_registries(custom_registries):
"""Populate host field from url if missing for custom registries.
Examples:
url: http://10.10.10.10:8000 --> host: 10.10.10.10:8000
url: https://myregistry.io:8000/ --> host: myregistry.io:8000
url: myregistry.io:8000 --> host: myregistry.io:8000
"""
# only do minimal changes to custom_registries when conditions apply
# otherwise return it directly as it is
if isinstance(custom_registries, list):
for registry in custom_registries:
if not registry.get('host'):
url = registry.get('url')
if url:
registry['host'] = strip_url(url)
return custom_registries
def merge_custom_registries(config_directory, custom_registries,
old_custom_registries):
"""
Merge custom registries and Docker registries from relation.
:param str config_directory: containerd config directory
:param str custom_registries: juju config for custom registries
:param str old_custom_registries: old juju config for custom registries
:return: List Dictionary merged registries
"""
registries = []
registries += json.loads(custom_registries)
# json string already converted to python list here
registries = populate_host_for_custom_registries(registries)
old_registries = []
if (old_custom_registries):
old_registries += json.loads(old_custom_registries)
update_custom_tls_config(config_directory, registries, old_registries)
docker_registry = DB.get('registry', None)
if docker_registry:
registries.append(docker_registry)
return registries
@hook('update-status')
def update_status():
"""
Triggered when update-status is called.
:return: None
"""
if _juju_proxy_changed():
set_state('containerd.juju-proxy.changed')
@hook('upgrade-charm')
def upgrade_charm():
"""
Triggered when upgrade-charm is called.
:return: None
"""
# Prevent containerd apt pkg from being implicitly updated.
apt_hold(CONTAINERD_PACKAGE)
# Re-render config in case the template has changed in the new charm.
config_changed()
@when_not('containerd.br_netfilter.enabled')
def enable_br_netfilter_module():
"""
Enable br_netfilter to work around https://github.com/kubernetes/kubernetes/issues/21613.
:return: None
"""
try:
modprobe('br_netfilter', persist=True)
except Exception:
log(traceback.format_exc())
if host.is_container():
log('LXD detected, ignoring failure to load br_netfilter')
else:
log('LXD not detected, will retry loading br_netfilter')
return
set_state('containerd.br_netfilter.enabled')
@when_not('containerd.ready',
'containerd.installed',
'endpoint.containerd.departed')
def install_containerd():
"""
Install containerd and then create initial configuration.
:return: None
"""
status.maintenance('Installing containerd via apt')
apt_update()
apt_install(CONTAINERD_PACKAGE, fatal=True)
apt_hold(CONTAINERD_PACKAGE)
set_state('containerd.installed')
config_changed()
@when('containerd.installed')
@when_not('containerd.version-published')
def publish_version_to_juju():
"""
Publish the containerd version to Juju.
:return: None
"""
version_string = _check_containerd()
if not version_string:
return
version = version_string.split()[6].split(b'-')[0].decode()
application_version_set(version)
set_state('containerd.version-published')
@when_not('containerd.nvidia.checked')
@when_not('endpoint.containerd.departed')
def check_for_gpu():
"""
Check if an Nvidia GPU exists.
:return: None
"""
valid_options = [
'auto',
'none',
'nvidia'
]
driver_config = config().get('gpu_driver')
if driver_config not in valid_options:
set_state('containerd.nvidia.invalid-option')
return
out = check_output(['lspci', '-nnk']).rstrip().decode('utf-8').lower()
if driver_config != 'none':
if (out.count('nvidia') > 0 and driver_config == 'auto') \
or (driver_config == 'nvidia'):
set_state('containerd.nvidia.available')
else:
remove_state('containerd.nvidia.available')
remove_state('containerd.nvidia.ready')
remove_state('containerd.nvidia.invalid-option')
set_state('containerd.nvidia.checked')
@when('containerd.nvidia.available')
@when_not('containerd.nvidia.ready', 'endpoint.containerd.departed')
def configure_nvidia():
"""
Based on charm config, install and configure Nivida drivers.
:return: None
"""
status.maintenance('Installing Nvidia drivers.')
dist = host.lsb_release()
release = '{}{}'.format(
dist['DISTRIB_ID'].lower(),
dist['DISTRIB_RELEASE']
)
proxies = {
"http": config('http_proxy'),
"https": config('https_proxy')
}
ncr_gpg_key = requests.get(
'https://nvidia.github.io/nvidia-container-runtime/gpgkey', proxies=proxies).text
import_key(ncr_gpg_key)
with open(
'/etc/apt/sources.list.d/nvidia-container-runtime.list', 'w'
) as f:
f.write(
'deb '
'https://nvidia.github.io/libnvidia-container/{}/$(ARCH) /\n'
.format(release)
)
f.write(
'deb '
'https://nvidia.github.io/nvidia-container-runtime/{}/$(ARCH) /\n'
.format(release)
)
cuda_gpg_key = requests.get(
'https://developer.download.nvidia.com/'
'compute/cuda/repos/{}/x86_64/7fa2af80.pub'
.format(release.replace('.', '')), proxies=proxies
).text
import_key(cuda_gpg_key)
with open('/etc/apt/sources.list.d/cuda.list', 'w') as f:
f.write(
'deb '
'http://developer.download.nvidia.com/'
'compute/cuda/repos/{}/x86_64 /\n'
.format(release.replace('.', ''))
)
apt_update()
apt_install(NVIDIA_PACKAGES, fatal=True)
set_state('containerd.nvidia.ready')
config_changed()
@when('endpoint.containerd.departed')
def purge_containerd():
"""
Purge Containerd from the cluster.
:return: None
"""
status.maintenance('Removing containerd from principal')
host.service_stop('containerd.service')
apt_unhold(CONTAINERD_PACKAGE)
apt_purge(CONTAINERD_PACKAGE, fatal=True)
if is_state('containerd.nvidia.ready'):
apt_purge(NVIDIA_PACKAGES, fatal=True)
sources = [
'/etc/apt/sources.list.d/cuda.list',
'/etc/apt/sources.list.d/nvidia-container-runtime.list'
]
for f in sources:
if os.path.isfile(f):
os.remove(f)
apt_autoremove(purge=True, fatal=True)
remove_state('containerd.ready')
remove_state('containerd.installed')
remove_state('containerd.nvidia.ready')
remove_state('containerd.nvidia.checked')
remove_state('containerd.nvidia.available')
remove_state('containerd.version-published')
@when('config.changed.gpu_driver')
def gpu_config_changed():
"""
Remove the GPU checked state when the config is changed.
:return: None
"""
remove_state('containerd.nvidia.checked')
@when('config.changed')
@when_not('endpoint.containerd.departed')
def config_changed():
"""
Render the config template.
:return: None
"""
if _juju_proxy_changed():
set_state('containerd.juju-proxy.changed')
# Create "dumb" context based on Config to avoid triggering config.changed
context = dict(config())
if context['config_version'] == "v2":
template_config = "config_v2.toml"
else:
template_config = "config.toml"
config_file = 'config.toml'
config_directory = '/etc/containerd'
endpoint = endpoint_from_flag('endpoint.containerd.available')
if endpoint:
sandbox_image = endpoint.get_sandbox_image()
if sandbox_image:
log('Setting sandbox_image to: {}'.format(sandbox_image))
context['sandbox_image'] = sandbox_image
else:
context['sandbox_image'] = containerd.get_sandbox_image()
else:
context['sandbox_image'] = containerd.get_sandbox_image()
if not os.path.isdir(config_directory):
os.mkdir(config_directory)
# If custom_registries changed, make sure to remove old tls files.
if config().changed('custom_registries'):
old_custom_registries = config().previous('custom_registries')
else:
old_custom_registries = None
context['custom_registries'] = \
merge_custom_registries(config_directory, context['custom_registries'],
old_custom_registries)
untrusted = DB.get('untrusted')
if untrusted:
context['untrusted'] = True
context['untrusted_name'] = untrusted['name']
context['untrusted_path'] = untrusted['binary_path']
context['untrusted_binary'] = os.path.basename(
untrusted['binary_path'])
else:
context['untrusted'] = False
if is_state('containerd.nvidia.available') \
and context.get('runtime') == 'auto':
context['runtime'] = 'nvidia-container-runtime'
if not is_state('containerd.nvidia.available') \
and context.get('runtime') == 'auto':
context['runtime'] = 'runc'
render(
template_config,
os.path.join(config_directory, config_file),
context
)
set_state('containerd.restart')
@when('containerd.installed')
@when('containerd.juju-proxy.changed')
@when_not('endpoint.containerd.departed')
def proxy_changed():
"""
Apply new proxy settings.
:return: None
"""
# Create "dumb" context based on Config
# to avoid triggering config.changed.
context = check_for_juju_https_proxy(config)
service_file = 'proxy.conf'
service_directory = '/etc/systemd/system/containerd.service.d'
service_path = os.path.join(service_directory, service_file)
if context.get('http_proxy') or \
context.get('https_proxy') or context.get('no_proxy'):
os.makedirs(service_directory, exist_ok=True)
log('Proxy changed, writing new file to {}'.format(service_path))
render(
service_file,
service_path,
context
)
else:
try:
log('Proxy cleaned, removing file {}'.format(service_path))
os.remove(service_path)
except FileNotFoundError:
return # We don't need to restart the daemon.
DB.set('config-cache', context)
remove_state('containerd.juju-proxy.changed')
check_call(['systemctl', 'daemon-reload'])
set_state('containerd.restart')
@when('containerd.restart')
@when_not('endpoint.containerd.departed')
def restart_containerd():
"""
Restart the containerd service.
If the restart fails, this function will log a message and be retried on
the next hook.
"""
status.maintenance('Restarting containerd')
if host.service_restart('containerd.service'):
remove_state('containerd.restart')
else:
log('Failed to restart containerd; will retry')
@when('containerd.ready')
@when('endpoint.containerd.joined')
@when_not('endpoint.containerd.departed')
def publish_config():
"""
Pass configuration to principal charm.
:return: None
"""
endpoint = endpoint_from_flag('endpoint.containerd.joined')
endpoint.set_config(
socket='unix:///var/run/containerd/containerd.sock',
runtime='remote', # TODO handle in k8s worker.
nvidia_enabled=is_state('containerd.nvidia.available')
)
@when('endpoint.untrusted.available')
@when_not('untrusted.configured')
@when_not('endpoint.containerd.departed')
def untrusted_available():
"""
Handle untrusted container runtime.
:return: None
"""
untrusted_runtime = endpoint_from_flag('endpoint.untrusted.available')
received = dict(untrusted_runtime.get_config())
if 'name' not in received.keys():
return # Try until config is available.
DB.set('untrusted', received)
config_changed()
set_state('untrusted.configured')
@when('endpoint.untrusted.departed')
def untrusted_departed():
"""
Handle untrusted container runtime.
:return: None
"""
DB.unset('untrusted')
DB.flush()
config_changed()
remove_state('untrusted.configured')
@when('endpoint.docker-registry.ready')
@when_not('containerd.registry.configured')
def configure_registry():
"""
Add docker registry config when present.
:return: None
"""
registry = endpoint_from_flag('endpoint.docker-registry.ready')
docker_registry = {
'url': registry.registry_netloc
}
# Handle auth data.
if registry.has_auth_basic():
docker_registry['username'] = registry.basic_user
docker_registry['password'] = registry.basic_password
# Handle TLS data.
if registry.has_tls():
# Ensure the CA that signed our registry cert is trusted.
host.install_ca_cert(registry.tls_ca, name='juju-docker-registry')
docker_registry['ca'] = str(ca_crt_path)
docker_registry['key'] = str(server_key_path)
docker_registry['cert'] = str(server_crt_path)
DB.set('registry', docker_registry)
config_changed()
set_state('containerd.registry.configured')
@when('endpoint.docker-registry.changed',
'containerd.registry.configured')
def reconfigure_registry():
"""
Signal to update the registry config when something changes.
:return: None
"""
remove_state('containerd.registry.configured')
@when('endpoint.containerd.reconfigure')
@when_not('endpoint.containerd.departed')
def container_runtime_relation_changed():
"""
Run config_changed to use any new config from the endpoint.
:return: None
"""
config_changed()
endpoint = endpoint_from_flag('endpoint.containerd.reconfigure')
endpoint.handle_remote_config()
@when('containerd.registry.configured')
@when_not('endpoint.docker-registry.joined')
def remove_registry():
"""
Remove registry config when the registry is no longer present.
:return: None
"""
docker_registry = DB.get('registry', None)
if docker_registry:
# Remove from DB.
DB.unset('registry')
DB.flush()
# Remove auth-related data.
log('Disabling auth for docker registry: {}.'.format(
docker_registry['url']))
config_changed()
remove_state('containerd.registry.configured')
| 27.609687
| 112
| 0.648488
|
4a06765620b5d2dc99fc081f9df4a37efe8fe281
| 34,227
|
py
|
Python
|
test/run_tests.py
|
inpolonsky/rrtmgp_topography
|
f3038e2649e2dce20ebb490b975ccfef174f9e99
|
[
"BSD-3-Clause"
] | null | null | null |
test/run_tests.py
|
inpolonsky/rrtmgp_topography
|
f3038e2649e2dce20ebb490b975ccfef174f9e99
|
[
"BSD-3-Clause"
] | null | null | null |
test/run_tests.py
|
inpolonsky/rrtmgp_topography
|
f3038e2649e2dce20ebb490b975ccfef174f9e99
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# make sure print behaves the same in 2.7 and 3.x
from __future__ import print_function
import os, sys, shutil, errno
import subprocess as sub
if sys.version_info[0] < 3:
import ConfigParser
else:
import configparser as ConfigParser
# package netCDF4 (https://github.com/Unidata/netcdf4-python)
import netCDF4 as nc
import numpy as np
# for reversing the vertical direction (global attribute in netCDF)
revTopAtt = 'top_at_1'
def path_check(path):
"""
Quick check if path exists. Use before reading a file.
"""
if not os.path.exists(path):
sys.exit('Could not find %s, returning' % path)
# end path_check()
def spawn(cmd, noSplit=False, errStop=True):
"""
Simplifies the call to a shell command in a Python session
Call:
results = spawn(cmd)
Input:
cmd -- a simple string that would be used at the Unix command line
Keywords:
noSplit -- boolean, if True no string split is performed on the
standard output
errStop -- boolean, if True the function will exit upon
encountering any standard error that results from spawning cmd
Returns:
stOut, stErr -- lists of standard output and standard error
"""
call = sub.Popen(cmd, shell=True, stdout=sub.PIPE, stderr=sub.PIPE)
callout, callerr = call.communicate()
# Python 3 returns byte strings that need to be decoded.
callout, callerr = callout.decode('utf-8'), callerr.decode('utf-8')
rCode = call.returncode
# http://stackoverflow.com/questions/3630389/python-error-codes
# http://stackoverflow.com/questions/18731791/determining-if-a-python-subprocess-segmentation-faults
# https://linux.die.net/man/7/signal
if rCode < 0:
sys.exit('Fatal error in running %s. Err code %d' % (cmd, rCode) )
if errStop and len(callerr) > 0: sys.exit(callerr)
if noSplit:
return callout, callerr
else:
stOut = callout.split()
return stOut, callerr
# end noSplit
# end spawn()
##################### From Andre's nc_diff.py ######################
def ncVarDiff(filename1, filename2, varname, factor,
validation=False):
"""
Run the regression test (do files differ? what variables differ?
by how much?) for given test step (unit)
Call:
diff_count =
ncVarDiff(filename1, filename2, varname, digits, factor)
Input:
filename1 -- string, path to reference netCDF
filename2 -- string, path to test netCDF
varname -- string, name of netCDF variable to compare
digits -- int, allowed error digits
factor -- float, allowed relative error factor
Keywords:
validation -- boolean, run the validation differencing script,
which is a more detailed regression test
Returns:
diff_count -- int, the number of differences in varname between
filename1 and filename2
"""
# Reading file 1 (reference data)
file1 = nc.Dataset(filename1)
var1 = file1.variables[varname]
shape1 = var1.shape
if validation: print(filename1, varname + str(shape1))
data1 = var1[:]
file1.close()
# Reading file 2 (test data)
file2 = nc.Dataset(filename2)
var2 = file2.variables[varname]
shape2 = var2.shape
if validation: print(filename2, varname + str(shape2))
data2 = var2[:]
file2.close()
# shape the same?
if shape1 != shape2:
#print('different shapes')
return 0
# endif shape1/2
# inf values present?
if np.isinf(data1).any():
if validation:
print('inf values found in ' + filename1 + ':' + varname)
for r in np.argwhere(np.isinf(data1)): print(r)
# endif validation
return 0
# endif data1 inf
if np.isinf(data2).any():
if validation:
print('inf values found in ' + filename2 + ':' + varname)
for r in np.argwhere(np.isinf(data2)): print(r)
# endif validation
return 0
# endif data2 inf
# NaN values present?
if np.isnan(data1).any():
if validation:
print('NaN values found in ' + filename1 + ':' + varname)
for r in np.argwhere(np.isnan(data1)): print(r)
# endif validation
return 0
# endif data1 NaN
if np.isnan(data2).any():
if validation:
print('NaN values found in ' + filename2 + ':' + varname)
for r in np.argwhere(np.isnan(data2)): print(r)
# endif validation
return 0
# endif data2 NaN
# compare by relative difference
if factor:
# Compare output data to reference data.
def myDiff(a, b):
# both values are 0; they are identical
if a == 0 and b == 0: return 0.0
# a is 0, but b is non-zero
# https://rrtmgp2.slack.com/archives/D935H4QA0/p1543940737009100
if a == 0: return 1
# b is 0, but a is non-zero
# if b == 0, we'll return abs(-1), so this is the same as a = 0
#if b == 0: return np.nan
# compute the absolute relative error
absDiff = abs(b / a - 1.0)
# Ensure that any diffs are greater than machine precision
return(absDiff if abs(b-a) > 3. * np.finfo(float).eps else 0.)
# end myDiff()
vMyDiff = np.vectorize(myDiff)
diff = vMyDiff(data1, data2)
err = np.argwhere(diff >= factor)
diff_count = len(err)
# if we in validation mode, print every set of values that are
# different by more than "factor" variable
if validation:
for r in err:
print(', '.join(\
['index:' + str(r), 'ref value: ' + str(data1[tuple(r)]), \
'test value: ' + str(data2[tuple(r)]), \
'relative difference: ' + str(diff[tuple(r)])]))
print('factor:', factor)
if (diff_count > 0):
print('total differences: ' + str(diff_count))
else:
print('identical')
else:
# if we're not in validation mode, calculate and print some
# summary statistics
if diff_count > 0:
nDiffFactor = diff.size
flatDiff = diff.flatten()
diffIdx = np.where(flatDiff >= factor)[0]
relDiffPer = diff_count/float(nDiffFactor) * 100
outStr1 = '%s: %.2f%% of values differ by more than %f; ' % \
(varname, relDiffPer, factor)
outStr2 = 'Percentage Differences Range: [%e, %e]' % \
(flatDiff[diffIdx].min()*100, flatDiff[diffIdx].max()*100)
print(outStr1)
print(outStr2)
# end diff_count
# endif validation
# endif factor
return diff_count
# end ncVarDiff()
##################### End Andre's nc_diff.py #######################
################## From Andre's nc_diff_folders.py ###################
def getVariables(filename):
"""
Read in all variables from netCDF (filename)
"""
ncObj = nc.Dataset(filename, 'r')
varsNC = [v for v in ncObj.variables]
ncObj.close()
return varsNC
# end getVariables
def ncDiff(testDir, ref, test, relDiffCut=0.0001, validation=False):
"""
Run the regression test (do files differ? what variables differ?
by how much?) for given test step (unit)
Call:
status = ncDiff(testDir, ref, test)
Input:
testDir -- string, directory in which unit test shell
script exists
ref -- string, path to reference netCDF file
test -- string, path to test netCDF file
Keywords:
relDiffCut -- float, percentage difference by which any reference-
test is considered significantly different (ie, anything above
this cutoff)
validation -- boolean, run the validation differencing script,
which is a more detailed regression test
Returns:
1 if the files have any differences, 0 if not
"""
print('TEST: %s' % os.path.basename(testDir))
curDir = os.getcwd()
# test/util contains the nc_diff library
sys.path.append('../util')
path_check(testDir)
os.chdir(testDir)
diffCount = 0
print('Comparing %s and %s ' % (ref, test) )
varsRef = set(getVariables(ref))
varsTest = set(getVariables(test))
# warn for missing variables
for v in varsTest - varsRef:
print('WARNING: variable %s does not exist in file %s' % (v, ref))
for v in varsRef - varsTest:
print('WARNING: variable %s does not exist in file %s' % (v, test))
# process common variables
varIntersection = varsRef.intersection(varsTest)
for varNC in varIntersection:
if validation: print('Comparing variable %s' % varNC)
dc = ncVarDiff(ref, test, varNC, relDiffCut, \
validation=validation)
if dc > 0:
#print('%s has %d indices that are different' % (varNC, dc ) )
diffCount += 1
# endif dc
# end varNC loop
os.chdir(curDir)
return 1 if diffCount > 0 else 0
# end ncDiff()
################## End Andre's nc_diff_folders.py ###################
def reverseVertical(inFile):
"""
Reverse vertical dimension for all applicable variables in given
netCDF file
Input
inFile -- string, path to netCDF to be modified
Output
nothing. inFile is overwritten
Keywords
"""
# open netCDF4 object and loop over variables in it
ncObj = nc.Dataset(inFile, 'r+')
ncVars = list(ncObj.variables)
for ncVar in ncVars:
inVar = ncObj.variables[ncVar]
# these are just layer and level indices and should not be
# reversed
ll = ['lev', 'lay']
if ncVar in ll: continue
dims = inVar.dimensions; nDims = inVar.ndim
# determine which axis to invert (either lev or lay nc dimension)
for l in ll:
if l in dims: axis = dims.index(l)
# end l loop
# is there a vertical dimension (i.e., has axis been assigned)?
# if not, proceed to next array
if not 'axis' in locals(): continue
# not optimized for arrays with more than 3 dimensions
if axis == 0:
if nDims == 1:
outVar = inVar[::-1]
elif nDims == 2:
outVar = inVar[::-1, :]
elif nDims == 3:
outVar = inVar[::-1, :, :]
# endif nDims
elif axis == 1:
if nDims == 2:
outVar = inVar[:, ::-1]
elif nDims == 3:
# stupid level source arrays...
"""
if ncVar == 'lev_src_dec':
# move the "zero vector" to the end after inversion
outVar = inVar[:, ::-1, :]
goodOut = outVar[:, 1:, :]
zeroOut = outVar[:, 0, :]
outVar = np.zeros_like(outVar)
outVar[:,:,:] = np.nan
outVar[:, -1, :] = np.array(zeroOut)
outVar[:, :-1, :] = np.array(goodOut)
elif ncVar == 'lev_src_inc':
# move the "zero vector" to the beginning inversion
outVar = inVar[:, ::-1, :]
goodOut = outVar[:, :-1, :]
zeroOut = outVar[:, -1, :]
outVar = np.zeros_like(outVar)
outVar[:,:,:] = np.nan
outVar[:, 0, :] = np.array(zeroOut)
outVar[:, 1:, :] = np.array(goodOut)
else:
outVar = inVar[:, ::-1, :]
"""
outVar = inVar[:, ::-1, :]
# endif nDims
elif axis == 2:
outVar = inVar[:, :, ::-1]
# end axis conditional
ncObj.variables[ncVar][:] = outVar
# so we don't carry axis to the next variable
del(axis)
# end loop over variables
# These variables are referenced to the vertical ordering:
# "_inc" refers to increasing index along the vertical axis. So they
# need to be swapped.
if('lev_src_inc' in ncVars and 'lev_src_dec' in ncVars):
ncObj.renameVariable('lev_src_inc', 'temp')
ncObj.renameVariable('lev_src_dec', 'lev_src_inc')
ncObj.renameVariable('temp', 'lev_src_dec')
ncObj.close()
return True
# end reverseVertical()
def unitTest(inDict, verbose=False, reverse=False, skipDiff=False, \
workCopy='inverted.nc'):
"""
Call:
unitTest(inDict)
Input:
inDict -- dictionary with the following key/value pairs
directory: string, directory where unit tests are performed
top_directory: string, top level directory that contains
build/ and test/ subdirectories
refNC: string, netCDF with reference model output
testNC: string, netCDF with test model output
coefficients: string, netCDF with LW or SW opt prop coefficients
cld_coefficients: string, netCDF with LW or SW cloud opt prop coefficients
Keywords:
verbose -- boolean; print executable output to standard output
reverse -- boolean; if the vertical direction is inverted, it is
assumed that this function was called for the uninverted file,
and we are re-running the function such that the file staging
does not need to be repeated. so if this keyword is True, no
file staging is performed
skipDiff -- boolean; skip the reference-test netCDF comparison
workCopy -- string; filename of inverted netCDF
Returns:
diffCount -- 1 if netCDF files are at all different, 0 otherwise
"""
inDir = inDict['directory']
uTest = inDict['test_name']
print('%s' % uTest)
topDir = inDict['top_directory']
testExe = os.path.join('build',inDict['executable']); path_check(testExe)
exeOptions = inDict['options']
if exeOptions:
exe = '%s/%s %s' % (inDir, testExe, exeOptions)
else:
exe = '%s/%s' % (inDir, testExe)
# just in case exeOptions is empty; we don't want any spaces at the
# end of exe
exe = exe.strip()
# stage (copy, link, mv, etc.) some files and run the unit test
coeffNC = inDict['coefficients']
if coeffNC:
path_check(coeffNC)
# is the coeff NC already in the working directory?
tempCoeffNC = 'coefficients.nc'
if os.path.exists(tempCoeffNC): os.remove(tempCoeffNC)
os.symlink(coeffNC, tempCoeffNC)
# endif
# stage (copy, link, mv, etc.) some files and run the unit test
cldcoeffNC = inDict['cld_coefficients']
if cldcoeffNC:
path_check(cldcoeffNC)
# is the cldcoeff NC already in the working directory?
tempCldCoeffNC = 'cld_coefficients.nc'
if os.path.exists(tempCldCoeffNC): os.remove(tempCldCoeffNC)
os.symlink(cldcoeffNC, tempCldCoeffNC)
# endif
inNC = inDict['refNC'] if inDict['chainNC'] is None else \
inDict['chainNC']
outNC = inDict['workNC']
testNC = inDict['testNC']
shutil.copyfile(inNC, outNC)
if reverse:
# run the test exe on the working NC, copy the working NC to
# an inverted file, revert working NC back to un-inverted
# pressure grid, move to test, compare ref and test
print('REVERSING VERTICAL DIMENSION')
status = reverseVertical(outNC)
if status:
# top_at_1 should be set so as not confuse the RRTMGP
# executables about where the TOA is
ncObj = nc.Dataset(outNC, 'r+')
if revTopAtt in ncObj.ncattrs(): ncObj.delncattr('top_at_1')
ncObj.setncattr('top_at_1', 1)
ncObj.close()
# keep a copy of the inverted vertical file (workNC is
# inverted back and overwritten before the tests, but we
# may want to examine the inverted file as well)
shutil.copyfile(outNC, workCopy)
# endif status
# endif reverse
sOut, sErr = spawn(exe, noSplit=True)
if verbose: print(sOut)
# invert the "working" netCDF back to original pressure grid
# before comparing to reference NC
if reverse:
print('REVERTING VERTICAL DIRECTION TO INITIAL STATE')
status = reverseVertical(outNC)
# resetting top_at_1
ncObj = nc.Dataset(outNC, 'r+')
if revTopAtt in ncObj.ncattrs(): ncObj.delncattr('top_at_1')
ncObj.close()
# endif reverse
# in the original unit tests, we moved these guys to the test
# directory, but for now let's copy so we can also copy outNC to
# the next unit test (this is done in configSetup() w/
# "replace" set)
shutil.copyfile(outNC, testNC)
# now do an NC-diff test
if skipDiff:
diffCount = np.nan
else:
# just in case we chained results -- we still want to compare
# refNC to testNC, not chainNC to testNC
inNC = inDict['refNC']
diffCount = ncDiff(inDir, inNC, testNC, \
relDiffCut=inDict['relative_diff_cut'], \
validation=inDict['validation_switch'])
# endif skipDiff
return diffCount
# end unitTest()
def configSetup(configFile, chain=False, replace=False, \
relDiffCut=0.0001, validInfo=False, revLayers=False, \
build=False, rootDir='../', failQuit=False, **kwargs):
"""
Run unit tests as specified by the user in a configuration file
Call:
configSetup(configFile)
Input:
configFile -- string, path to configuration (.ini) file that
contains assignments for:
executable -- string, name of test executable
directory -- string, path in which executable exists
results_src -- string, netCDF with reference results
results_dst -- string, netCDF with test results
results -- string, name of output netCDF file after a test
model run
coefficients -- string or list of strings, path to coefficients
netCDF (input). this is only required for gas_optics
cld_coefficients -- string or list of strings, path to cloud coefficients
netCDF (input). this is only required for cloud_optics_lut and
cloud_optics_pade
multiple values can be assigned to this field (separated by
a comma), but this option has not yet been extensively tested
options -- string, arguments to be used with executable (just
as they would be entered in the command line interface,
without the executable name). this is optional
The paths that are assigned to the variables are
expected to be relative to the current working directory
Keywords:
chain -- boolean, if multiple values are assigned to the variables
in configFile, then setting this keyword places the output from
unit test n into the working directory of unit test n+1 so that
the output from n is input into n+1
replace -- boolean, replace the current reference netCDF with the
test results that are produced by a run of a given test
executable. this will be done for all tests that are performed
in the chain defined by the config file.
relDiffCut -- float, relDiff = |(a/b) - 1| with a being a value
from the reference file and b being a value from the test file.
this cutoff is the threshold by which the test and reference
are considered significantly different
(i.e., relDiff > relDiffCut)
validInfo -- boolean; by default, the difference tests print out
the name of the test and either "all files identical" or
statistics for whatever variables differ significantly (see
relDiffCut keyword). by setting this keyword, *every* test value
that is significantly different from the reference value is
printed to standard output, so this is much more extensive
diagnostic
revLayers -- boolean; reverse the vertical dimension and
corresponding arrays
build -- boolean, build the RRTMGP library and any executables
that are to be used in the regression tests
rootDir -- string, absolute path to RRTMGP root (which includes
build, data, extensions, src, and test subdirs). this is
necessary for the script to know where the executables are
failQuit -- boolean, quit program as soon as any netCDF
differences are found
Overloaded Keywords (**kwargs, passed to unitTest)
verbose -- boolean; print executable output to standard output
skipDiff -- boolean; bypass the reference-test netCDF comparison
Returns:
Nothing
"""
cParse = ConfigParser.ConfigParser()
cParse.read(configFile)
cpSections = cParse.sections()
if build:
# first build the RRTMGP library
os.chdir('%s/build' % rootDir)
buildOut, buildErr = spawn('make clean', errStop=False, \
noSplit=True)
print(buildOut)
buildOut, buildErr = spawn('make', errStop=False, \
noSplit=True)
print(buildOut)
os.chdir(rootDir)
# end RRTMGP library build
# loop over each section, which represents a unit test
fileDiff = 0
for iCPS, cps in enumerate(cpSections):
# read in each assigned variable
# these guys can be split with .split(',') if we add more
exe = cParse.get(cps, 'executable')
exeDir = cParse.get(cps, 'directory')
refNC = cParse.get(cps, 'results_src')
workNC = cParse.get(cps, 'results')
testNC = cParse.get(cps, 'results_dst')
exeOpt = cParse.get(cps, 'options') if \
cParse.has_option(cps, 'options') else None
coeffs = '%s/%s' % (rootDir, cParse.get(cps, 'coefficients')) if \
cParse.has_option(cps, 'coefficients') else None
cldcoeffs = '%s/%s' % (rootDir, cParse.get(cps, 'cld_coefficients')) if \
cParse.has_option(cps, 'cld_coefficients') else None
eDirFull = os.path.join(rootDir, exeDir)
fullRefNC = os.path.join(eDirFull, refNC)
fullWorkNC = os.path.join(eDirFull, workNC)
fullTestNC = os.path.join(eDirFull, testNC)
# testNC is directory followed by a file name
try:
os.makedirs(os.path.join(eDirFull, os.path.split(testNC)[0]))
except OSError as e:
if e.errno != errno.EEXIST:
raise
chainNC = str(prevTest) if chain and iCPS > 0 else None
unitDict = {'executable': exe, \
'directory': eDirFull, 'top_directory': rootDir, \
'refNC': fullRefNC, 'testNC': fullTestNC, 'workNC': fullWorkNC, \
'chainNC': chainNC, 'coefficients': coeffs, \
'cld_coefficients': cldcoeffs, \
'options': exeOpt, 'test_name': cps, \
'relative_diff_cut': relDiffCut, 'validation_switch': validInfo}
os.chdir(eDirFull)
if build:
# now build the executable
os.chdir('build')
buildOut, buildErr = spawn('make', errStop=False, noSplit=True)
print(buildOut)
os.chdir('..')
# end build
diffCount = unitTest(unitDict, verbose=kwargs['verbose'],
skipDiff=kwargs['skipNCD'])
# redo the test with inverted vertical dimension
if revLayers:
cpWorkNC = '%s/inverted_%s' % (eDirFull, workNC)
diffCount = unitTest(unitDict, verbose=kwargs['verbose'],
skipDiff=kwargs['skipNCD'], reverse=True, workCopy=cpWorkNC)
# endif revLayers
if diffCount == 0: print('No differences in %s' % cps)
if diffCount > 0 and failQuit:
sys.exit('Differences found, returning')
fileDiff += diffCount
if replace: shutil.copyfile(os.path.basename(workNC), fullRefNC)
os.chdir(rootDir)
prevTest = str(fullTestNC)
# end loop over sections
if fileDiff == 0: print('all files identical')
# end configSetup
def configSetupSHDOMPP(configFile, rootDir='../', \
**kwargs):
"""
Run executables as specified by an input configuration file
for SHDOMPP validation
Call:
configSetup(configFile)
Input:
configFile -- string, path to configuration (.ini) file that
contains assignments for:
executable -- string, name of test executable
directory -- string, path in which executable exists,
relative to VALIDATION subdirectory in rootDir
results_src -- string, netCDF with reference results,
relative to TEST subdirectory in rootDir
results_dst -- string, netCDF with test results
relative to TEST subdirectory in rootDir
results -- string, name of output netCDF file after a test
model run, relative to "directory" input
options -- string, arguments to be used with executable (just
as they would be entered in the command line interface,
without the executable name). any paths should be
relative to rootDir. this is optional
multiple values can be assigned to this field (separated by
a comma), but this option has not yet been extensively
tested
Output:
Keywords:
rootDir -- string, absolute path to RRTMGP root
This is the directory in which build, data, extensions, src,
test, and validation reside). this is necessary for the script
to know where the executables are
Overloaded Keywords (**kwargs, passed to unitTest)
verbose -- boolean; print executable output to standard output
skipDiff -- boolean; bypass the reference-test netCDF comparison
"""
validDir = '%s/validation' % rootDir; path_check(validDir)
cParse = ConfigParser.ConfigParser()
cParse.read(configFile)
cpSections = cParse.sections()
# loop over each section, which represents a unit test
fileDiff = 0
for iCPS, cps in enumerate(cpSections):
# read in each assigned variable
# these guys can be split with .split(',') if we add more
exe = cParse.get(cps, 'executable')
exeDir = cParse.get(cps, 'directory')
refNC = cParse.get(cps, 'results_src')
workNC = cParse.get(cps, 'results')
testNC = cParse.get(cps, 'results_dst')
# if the data/ subdir is part of the options, we need to prepend
# the rootDir because it is assumed that the paths in options
# are relative to root
exeOpt = cParse.get(cps, 'options') if \
cParse.has_option(cps, 'options') else None
if 'data' in exeOpt:
exeOpt = exeOpt.replace('data', '%s/data' % rootDir)
eDirFull = '%s/%s' % (validDir, exeDir)
fullRefNC = '%s/%s' % (rootDir, refNC)
fullWorkNC = '%s/%s/%s' % (validDir, exeDir, workNC)
fullTestNC = '%s/%s' % (rootDir, testNC)
unitDict = {'executable': exe, \
'directory': eDirFull, 'top_directory': rootDir, \
'refNC': fullRefNC, 'testNC': fullTestNC, 'workNC': fullWorkNC, \
'coefficients': None, \
'cld_coefficients': None, \
'options': exeOpt, 'test_name': cps, \
'relative_diff_cut': None, 'validation_switch': None}
os.chdir(eDirFull)
diffCount = unitTest(unitDict, verbose=kwargs['verbose'],
skipDiff=True)
# end loop over sections
# end configSetupSHDOMPP()
def runOpticalProps(inDir, replace=False, verbose=False, build=False):
"""
Call:
runOpticalProps(inDir)
Input:
inDir -- string, path to optical_props/ test
Keywords:
replace -- boolean, move output rrtmgp-inputs-outputs.nc to
ref/ dir instead of default test/ dir (have not yet implemented)
STILL NEED TO IMPLEMENT THIS
verbose -- boolean; print executable output to standard output
build -- boolean, build the optical properties unit test
Returns:
Nothing
"""
# we're assuming the RRTMGP library was already built
if args.build:
curDir = os.getcwd()
# now build the executable
os.chdir('%s/build' % inDir)
buildOut, buildErr = spawn('make', errStop=False, noSplit=True)
print(buildOut)
os.chdir(curDir)
# end build
path_check(inDir)
curDir = os.getcwd()
os.chdir(inDir)
exe = '%s/test_optical_props' % inDir
print('Optical Properties')
sOut, sErr = spawn(exe, noSplit=True)
if verbose: print(sOut)
sOut, sErr = spawn('mv *.nc test/')
print()
os.chdir(curDir)
return
# end runOpticalProps()
def cleanUp(inFile, rootDir='../', removeTest=True):
"""
Remove all intermediate (staging) files from unit test execution
Call:
cleanUp(inFile)
Input:
inFile -- string, path to configuration file used in
configSetup()
Keywords:
rootDir -- string, path to RRTMGP root (which includes build,
data, extensions, src, and test subdirs). this is necessary for
the script to know where the executables are
removeTest -- boolean, removes test netCDF files as well as the
coefficients netCDF
and cld_coefficients netCDF
Returns:
Nothing
"""
print('Cleaning up intermediate files in %s' % inFile)
cParse = ConfigParser.ConfigParser()
cParse.read(inFile)
cpSections = cParse.sections()
# strings that point to staging/intermdiate files
# relative paths first
relPaths = []
# loop over each section, which represents a unit test
for iCPS, cps in enumerate(cpSections):
print(' Removing files in %s' % cps)
cpsDir = cParse.get(cps, 'directory')
relPaths.append('%s/%s' % (cpsDir, cParse.get(cps, 'results')))
if removeTest: \
relPaths.append('%s/%s' % \
(cpsDir, cParse.get(cps, 'results_dst')))
if cParse.has_option(cps, 'coefficients'):
relPaths.append('%s/coefficients.nc' % cpsDir)
if cParse.has_option(cps, 'cld_coefficients'):
relPaths.append('%s/cld_coefficients.nc' % cpsDir)
# end loop over sections
absPaths = ['%s/%s' % (rootDir, rPath) for rPath in relPaths]
for path in absPaths:
if os.path.exists(path): os.remove(path)
return True
# end cleanUp()
if __name__ == '__main__':
import socket
if socket.gethostname() == 'rotor':
sys.path.append('/home/rpernak/python_lib/')
import argparse
parser = argparse.ArgumentParser(\
description='Run selected unit tests for RRTMGP builds.')
parser.add_argument('--environment', type=str, \
help='If set to a string corresponding to one of the ' + \
'available environments ("conda info --envs" at CLI), ' + \
'a shell command will be executed to set the environment ' + \
'to this string.')
parser.add_argument('--ref_replace', action='store_true', \
help='Move rrtmgp-inputs-outputs.nc that results from ' + \
'test executable runs into the ref/ subdirectory instead ' + \
'of the default test/ subdir.')
parser.add_argument('--test_config_file', type=str, nargs='+', \
help='Name of config file(s) that the user can use to setup ' + \
'the regression test schemes. The default is to process ' + \
'all of the .ini files in the working directory except ' + \
'for the user-defined chain (user_define_workflow_config.ini).')
parser.add_argument('--unit_chain', action='store_true', \
help='Used in conjuction with --test_config_file. If set, ' + \
'it is assumed that the config file specifies that many ' + \
'unit tests are to be performed but independent of each ' + \
'other (the default action). This forces the output of ' + \
'each unit test to be used as input into the next test.')
parser.add_argument('--optical_props', action='store_true', \
help='Run the optical properties test.')
parser.add_argument('--cleanup', action='store_true', \
help='Remove intermediate files specified in the input ' + \
'configuration files, then exit from program.')
parser.add_argument('--rel_diff_cut', type=float, default=0.0001,\
help='Percentage difference over which any reference-' + \
'test difference is considered significantly different ' + \
'(i.e., anything above this cutoff)')
parser.add_argument('--verbose', action='store_true', \
help='If set, prints the standard output from each executable.')
parser.add_argument('--very_verbose', action='store_true', \
help='Instead of only returning statistics on the ' + \
'differences, return the filename, variable name, and array ' + \
'indices of every difference that exists in the regression tests.')
parser.add_argument('--reverse', action='store_true', \
help='Reverse the vertical dimension and the corresponding ' + \
'arrays.')
parser.add_argument('--build', action='store_true', \
help='Build the RRTMGP library and executables before ' + \
'running any tests.')
parser.add_argument('--root_dir', type=str, default='../', \
help='This script runs with a number of assumptions on ' + \
'where directories and executables exist. This keyword ' + \
'specifies what the RRTMGP root directory is, and then ' + \
'all of the paths in the configuration files (assumed to be ' + \
'in root_dir) will be relative to the test root.')
parser.add_argument('--no_diff', action='store_true', \
help='If set, runs the executables in the configuration file ' + \
'but does not perform the subsequent reference-test netCDF ' + \
'comparison.')
parser.add_argument('--quit_on_fail', action='store_true', \
help='Quit the script as soon as a significant difference ' + \
'is found between a reference netCDF and its corresponding ' + \
'test netCDF.')
parser.add_argument('--validation', action='store_true', \
help='Grab .ini files from validation/ subdirectory ' + \
'maintained by Frank Evans and Robert Pincus rather than the ' + \
'default test/ subdir.')
args = parser.parse_args()
baseDir = args.root_dir
# default root_dir should be three levels above the scripts
# directory in which run_tests.py resides
rootRel = '../../..' if baseDir is None else str(baseDir)
cwd = os.getcwd()
# get the absolute path of the root and replace baseDir with it
os.chdir(rootRel)
baseDir = os.getcwd()
os.chdir(cwd)
# endif baseDir
# build, test, and validation directories must exist in the current
# working directory (CWD)
path_check(baseDir)
testDir = '%s/test' % baseDir; path_check(testDir)
if args.build: path_check('%s/build' % baseDir)
iniSub = 'validation' if args.validation else 'test'
iniDir = '%s/%s' % (baseDir, iniSub); path_check(iniDir)
newRef = args.ref_replace
# set Python environment
pEnv = args.environment
if pEnv: sOut, sErr = spawn('source activate %s' % pEnv)
# user-defined test procedure
cFiles = args.test_config_file
# default is to loop over all config files in test dir
if cFiles is None: cFiles, sErr = spawn('ls %s/*.ini' % iniDir)
if args.cleanup:
for cFile in cFiles:
base = os.path.basename(cFile)
cleanUp(cFile, rootDir=baseDir)
# end loop over config files
sys.exit('Intermediate files have been removed')
# end cleanUp
for cFile in cFiles:
base = os.path.basename(cFile)
print('Working on %s' % cFile)
path_check(cFile)
if args.validation:
configSetupSHDOMPP(cFile, rootDir=baseDir, verbose=args.verbose)
else:
configSetup(\
cFile, replace=newRef, chain=args.unit_chain, \
relDiffCut=args.rel_diff_cut, validInfo=args.very_verbose, \
verbose=args.verbose, revLayers=args.reverse, \
build=args.build, rootDir=baseDir, skipNCD=args.no_diff, \
failQuit=args.quit_on_fail)
print("\n")
# endif validation
# end loop over config files
# are we doing anything with this unit test anymore?
"""
if args.optical_props:
# we eventually might wanna change this so that optical props
# is run in configSetup() like everything else
runOpticalProps('%s/optical_props' % testDir, replace=newRef, \
verbose=args.verbose, build=args.build)
# end optical_props
"""
# end main()
| 32.910577
| 102
| 0.663219
|
4a0676e57c166da8873a1baf8e682616c3e93e25
| 1,793
|
py
|
Python
|
tldry/stopwords/english.py
|
vangaa/tldry
|
0f5075dbed3cd09ac6749e09273a2e054d75445a
|
[
"MIT"
] | 2
|
2019-04-01T09:39:54.000Z
|
2019-05-17T19:24:39.000Z
|
tldry/stopwords/english.py
|
vangaa/tldry
|
0f5075dbed3cd09ac6749e09273a2e054d75445a
|
[
"MIT"
] | null | null | null |
tldry/stopwords/english.py
|
vangaa/tldry
|
0f5075dbed3cd09ac6749e09273a2e054d75445a
|
[
"MIT"
] | 1
|
2021-02-03T14:00:43.000Z
|
2021-02-03T14:00:43.000Z
|
stopwords = {
'a',
'about',
'above',
'after',
'again',
'against',
'all',
'am',
'an',
'and',
'any',
'are',
'aren',
'as',
'at',
'be',
'because',
'been',
'before',
'being',
'below',
'between',
'both',
'but',
'by',
'can',
'cannot',
'could',
'couldn',
'd',
'did',
'didn',
'do',
'does',
'doesn',
'doing',
'don',
'down',
'during',
'each',
'few',
'for',
'from',
'further',
'had',
'hadn',
'has',
'hasn',
'have',
'haven',
'having',
'he',
'her',
'here',
'hers',
'herself',
'him',
'himself',
'his',
'how',
'i',
'if',
'in',
'into',
'is',
'isn',
'it',
'its',
'itself',
'let',
'll',
'm',
'me',
'more',
'most',
'mustn',
'my',
'myself',
'no',
'nor',
'not',
'of',
'off',
'on',
'once',
'only',
'or',
'other',
'ought',
'our',
'ours',
'ourselves',
'out',
'over',
'own',
're',
's',
'same',
'shan',
'she',
'should',
'shouldn',
'so',
'some',
'such',
't',
'than',
'that',
'the',
'their',
'theirs',
'them',
'themselves',
'then',
'there',
'these',
'they',
'this',
'those',
'through',
'to',
'too',
'under',
'until',
'up',
've',
'very',
'was',
'wasn',
'we',
'were',
'weren',
'what',
'when',
'where',
'which',
'while',
'who',
'whom',
'why',
'with',
'won',
'would',
'wouldn',
'you',
'your',
'yours',
'yourself',
'yourselves',
}
| 11.796053
| 17
| 0.331288
|
4a067904f4ed4c1bbcb09a96ee95866d3d98ee60
| 2,053
|
py
|
Python
|
services/processes/processes/dependencies/node_parser.py
|
bgoesswein/implementation_backend
|
546018eb5dba79b823e3cfb20472271e02045789
|
[
"Apache-2.0"
] | null | null | null |
services/processes/processes/dependencies/node_parser.py
|
bgoesswein/implementation_backend
|
546018eb5dba79b823e3cfb20472271e02045789
|
[
"Apache-2.0"
] | 5
|
2021-02-08T20:29:22.000Z
|
2022-03-11T23:44:17.000Z
|
services/processes/processes/dependencies/node_parser.py
|
bgoesswe/implementation_backend
|
546018eb5dba79b823e3cfb20472271e02045789
|
[
"Apache-2.0"
] | null | null | null |
from nameko.extensions import DependencyProvider
class NodesWrapper:
def __init__(self):
self.nodes = []
self.filters = {
"data_id": None,
"time": None,
"bands": None,
"extent": None,
"derived_from": None,
"license": None,
"data_pid": None
}
def parse_process_graph(self, process_graph: dict, processes: list) -> list:
self.parse_nodes(process_graph, processes)
self.nodes.append({
"process_id": "get_data",
"args": self.filters
})
return self.nodes
def parse_filter(self, process_id: str, filter_args: dict):
# TODO: Not a good solution: Has to be adapted as soon as
# the processes are better specified
# TODO: Bands can be name, band_id, wavelengths as str or list
if process_id == "get_collection":
for key, value in filter_args.items():
self.filters[key] = value
if process_id == "filter_bands":
self.filters["bands"] = filter_args
if process_id == "filter_bbox":
self.filters["extent"] = filter_args
if process_id == "filter_daterange":
self.filters["time"] = filter_args
if process_id == "data_pid":
self.filters["data_pid"] = filter_args
def parse_nodes(self, node_graph: dict, processes: list):
process_id = node_graph.pop("process_id")
imagery = node_graph.pop("imagery", None)
process_spec = [p for p in processes if p['name'] == process_id]
if process_spec[0]["p_type"] == "filter":
self.parse_filter(process_id, node_graph)
else:
self.nodes.append({
"process_id": process_id,
"args": node_graph
})
if imagery:
self.parse_nodes(imagery, processes)
class NodeParser(DependencyProvider):
def get_dependency(self, worker_ctx):
return NodesWrapper()
| 31.584615
| 80
| 0.570385
|
4a067a501068ac7ed0725b3138fb2df459554bf8
| 317
|
py
|
Python
|
src/ggrc/utils/custom_dict.py
|
MikalaiMikalalai/ggrc-core
|
f0f83b3638574bb64de474f3b70ed27436ca812a
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2019-01-12T23:46:00.000Z
|
2019-01-12T23:46:00.000Z
|
src/ggrc/utils/custom_dict.py
|
MikalaiMikalalai/ggrc-core
|
f0f83b3638574bb64de474f3b70ed27436ca812a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/ggrc/utils/custom_dict.py
|
MikalaiMikalalai/ggrc-core
|
f0f83b3638574bb64de474f3b70ed27436ca812a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2020 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Module contains customized dictionaries """
from UserDict import UserDict
class MissingKeyDict(UserDict):
"""Dictionary return missing key as value"""
def __missing__(self, key):
return key
| 24.384615
| 78
| 0.741325
|
4a067a60bfc91ce9fbf2c0335e4959490805311a
| 4,232
|
py
|
Python
|
neural_networks/layers/masking.py
|
faustusdotbe/lmtc-eurlex57k
|
98ecf84371b453abacc429c54bf2d0a24de0d61e
|
[
"Apache-2.0"
] | 77
|
2019-06-09T06:24:57.000Z
|
2022-03-25T18:04:43.000Z
|
neural_networks/layers/masking.py
|
faustusdotbe/lmtc-eurlex57k
|
98ecf84371b453abacc429c54bf2d0a24de0d61e
|
[
"Apache-2.0"
] | 12
|
2019-09-27T21:53:53.000Z
|
2021-08-25T15:53:12.000Z
|
neural_networks/layers/masking.py
|
faustusdotbe/lmtc-eurlex57k
|
98ecf84371b453abacc429c54bf2d0a24de0d61e
|
[
"Apache-2.0"
] | 11
|
2020-02-15T09:28:13.000Z
|
2021-12-14T06:32:15.000Z
|
# -*- coding: utf-8 -*-
"""Core Keras layers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Layer
class SymmetricMasking(Layer):
"""Masks a sequence by using a mask value to skip timesteps based on another sequence.
For each timestep in the 1st input tensor (dimension #1 in the tensor),
if all values in the 2nd input tensor at that timestep
are equal to `mask_value`, then the timestep will be masked (skipped)
in all downstream layers (as long as they support masking).
If any downstream layer does not support masking yet receives such
an input mask, an exception will be raised.
# Example
Consider a Numpy data array `x` of shape `(samples, timesteps, features)`,
to be fed to an LSTM layer.
You want to mask timestep #3 and #5 because you lack data for
these timesteps. You can:
- set `x[:, 3, :] = 0.` and `x[:, 5, :] = 0.`
- insert a `Masking` layer with `mask_value=0.` before the LSTM layer:
```python
model = Sequential()
model.add(SymmetricMasking(inputs=[1,2] mask_value=0.))
```
"""
def __init__(self, mask_value=0., **kwargs):
super(SymmetricMasking, self).__init__(**kwargs)
self.supports_masking = True
self.mask_value = mask_value
def compute_mask(self, inputs, mask=None):
if len(inputs[1].shape) == 3:
output_mask = K.any(K.not_equal(inputs[1], self.mask_value), axis=-1)
else:
output_mask = K.not_equal(inputs[1], self.mask_value)
return output_mask
def call(self, inputs):
if len(inputs[1].shape) == 3:
boolean_mask = K.any(K.not_equal(inputs[1], self.mask_value),
axis=-1, keepdims=True)
else:
boolean_mask = K.expand_dims(K.not_equal(inputs[1], self.mask_value))
return inputs[0] * K.cast(boolean_mask, K.dtype(inputs[0]))
def get_config(self):
config = {'mask_value': self.mask_value}
base_config = super(SymmetricMasking, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def compute_output_shape(self, input_shape):
return input_shape
class Camouflage(Layer):
"""Masks a sequence by using a mask value to skip timesteps based on another sequence.
For each timestep in the 1st input tensor (dimension #1 in the tensor),
if all values in the 2nd input tensor at that timestep
are equal to `mask_value`, then the timestep will be masked (skipped)
in all downstream layers (as long as they support masking).
If any downstream layer does not support masking yet receives such
an input mask, an exception will be raised.
# Example
Consider a Numpy data array `x` of shape `(samples, timesteps, features)`,
to be fed to an LSTM layer.
You want to mask timestep #3 and #5 because you lack data for
these timesteps. You can:
- set `x[:, 3, :] = 0.` and `x[:, 5, :] = 0.`
- insert a `Masking` layer with `mask_value=0.` before the LSTM layer:
```python
model = Sequential()
model.add(SymmetricMasking(inputs=[1,2] mask_value=0.))
```
"""
def __init__(self, mask_value=0., **kwargs):
super(Camouflage, self).__init__(**kwargs)
self.mask_value = mask_value
self.supports_masking = True
def call(self, inputs):
if len(inputs[1].shape) == 3:
boolean_mask = K.any(K.not_equal(inputs[1], self.mask_value),
axis=-1, keepdims=True)
else:
boolean_mask = K.expand_dims(K.not_equal(inputs[1], self.mask_value))
return inputs[0] * K.cast(boolean_mask, K.dtype(inputs[0]))
def get_config(self):
config = {'mask_value': self.mask_value}
base_config = super(Camouflage, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def compute_output_shape(self, input_shape):
return input_shape
def compute_mask(self, inputs, mask=None):
return None
| 34.975207
| 90
| 0.645321
|
4a067ae1291f419fcf4391c0aa05b350e1dfd7b2
| 1,240
|
py
|
Python
|
tests/tensorflow/test_tensorflow2_metric_value_conversion_utils.py
|
r3stl355/mlflow
|
816e035786245ff42723b9c53eb9407885e7cd75
|
[
"Apache-2.0"
] | null | null | null |
tests/tensorflow/test_tensorflow2_metric_value_conversion_utils.py
|
r3stl355/mlflow
|
816e035786245ff42723b9c53eb9407885e7cd75
|
[
"Apache-2.0"
] | null | null | null |
tests/tensorflow/test_tensorflow2_metric_value_conversion_utils.py
|
r3stl355/mlflow
|
816e035786245ff42723b9c53eb9407885e7cd75
|
[
"Apache-2.0"
] | null | null | null |
import pytest
import mlflow
from mlflow import tracking
from mlflow.tracking.fluent import start_run
from mlflow.exceptions import MlflowException, INVALID_PARAMETER_VALUE, ErrorCode
from mlflow.tracking.metric_value_conversion_utils import convert_metric_value_to_float_if_possible
import tensorflow as tf
def test_reraised_value_errors():
multi_item_tf_tensor = tf.random.uniform([2, 2], dtype=tf.float32)
with pytest.raises(MlflowException, match=r"Failed to convert metric value to float") as e:
convert_metric_value_to_float_if_possible(multi_item_tf_tensor)
assert e.value.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
def test_convert_metric_value_to_float():
tf_tensor_val = tf.random.uniform([], dtype=tf.float32)
assert convert_metric_value_to_float_if_possible(tf_tensor_val) == float(tf_tensor_val.numpy())
def test_log_tf_tensor_as_metric():
tf_tensor_val = tf.random.uniform([], dtype=tf.float32)
tf_tensor_float_val = float(tf_tensor_val.numpy())
with start_run() as run:
mlflow.log_metric("name_tf", tf_tensor_val)
finished_run = tracking.MlflowClient().get_run(run.info.run_id)
assert finished_run.data.metrics == {"name_tf": tf_tensor_float_val}
| 35.428571
| 99
| 0.793548
|
4a067cbc36c8e0d26e97a239b2e697fdd933d7b8
| 1,961
|
py
|
Python
|
gameDefine.py
|
moonmagian/PyRummikub
|
aa1f265bb410a6b0150eec8e21d18803c4ef6fd5
|
[
"MIT"
] | 1
|
2018-08-31T07:34:07.000Z
|
2018-08-31T07:34:07.000Z
|
gameDefine.py
|
moonmagian/PyRummikub
|
aa1f265bb410a6b0150eec8e21d18803c4ef6fd5
|
[
"MIT"
] | null | null | null |
gameDefine.py
|
moonmagian/PyRummikub
|
aa1f265bb410a6b0150eec8e21d18803c4ef6fd5
|
[
"MIT"
] | null | null | null |
from enum import Enum
DEBUG = True
class Color(Enum):
BLUE = 1
RED = 2
BLACK = 3
YELLOW = 4
GHOST = 5
class Error(Enum):
success = 0
cardNotFound = 1
notValidCardSeq = 2
notValidDataType = 3
class Card:
cid = -1
def __init__(self, color : Color, point : int):
self.color = color
self.point = point
Card.cid += 1
self.cid = Card.cid
def __eq__(self, other):
return (self.color == other.color and self.point == other.point)
def __repr__(self):
return(self.color.name + ' ' + str(self.point) + ' id:' + str(self.cid))
class Player:
def __init__(self, uuid):
self.cards = []
self.uuid = uuid
self.broke = False
class Table:
def __init__(self):
self.cards = []
def generateCards():
"""Generate a full deck in a list."""
#Generate normal cards.
CARDS = [Card(color, point) for color in Color if color != Color.GHOST for point in list(range(1, 14)) * 2]
#Generate ghost cards.
CARDS.append(Card(Color.GHOST, 1))
CARDS.append(Card(Color.GHOST, 2))
return CARDS
def isValidCardSequence(cardSeq):
"""Check if a card sequence is valid."""
#Ensure at least 3 cards are in a sequence.
if(len(cardSeq) < 3): return False
cardSeq.sort(key = lambda card: card.point)
#First, check if it's a valid group.
colorSet = set()
validGroup = True
for card in cardSeq:
if(card.color in colorSet):
validGroup = False
break
colorSet.add(card.color)
if(validGroup): return True
#If it's not a valid group, check if it's a valid run.
#Color in a run should be same.
colorSet = set(map(lambda card: card.color, cardSeq))
if(len(colorSet) > 1): return False
#And the point should be continous.
point = cardSeq[0].point
for card in cardSeq:
if(card.point != point): return False
point += 1
return True
| 30.640625
| 111
| 0.609893
|
4a067f5b60b998dc643a40393d90306578563627
| 556
|
py
|
Python
|
backend/home/migrations/0001_load_initial_data.py
|
crowdbotics-apps/qr-code-scanner-app-32359
|
25a52acab55a3075c15ac8d29f518b429279f416
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/home/migrations/0001_load_initial_data.py
|
crowdbotics-apps/qr-code-scanner-app-32359
|
25a52acab55a3075c15ac8d29f518b429279f416
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/home/migrations/0001_load_initial_data.py
|
crowdbotics-apps/qr-code-scanner-app-32359
|
25a52acab55a3075c15ac8d29f518b429279f416
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
from django.db import migrations
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "qr-code-scanner-app-32359.botics.co"
site_params = {
"name": "QR code scanner app",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_site),
]
| 21.384615
| 61
| 0.658273
|
4a06809c4542f99bcc78dc51d9e34e3dc0d6ac81
| 1,901
|
py
|
Python
|
ch06/weight_init_compare.py
|
KevinCarpricorn/Code
|
8d0164f5b28f937e8891854f86e1a9b584122b48
|
[
"MIT"
] | null | null | null |
ch06/weight_init_compare.py
|
KevinCarpricorn/Code
|
8d0164f5b28f937e8891854f86e1a9b584122b48
|
[
"MIT"
] | null | null | null |
ch06/weight_init_compare.py
|
KevinCarpricorn/Code
|
8d0164f5b28f937e8891854f86e1a9b584122b48
|
[
"MIT"
] | null | null | null |
# coding: utf-8
import os
import sys
sys.path.append(os.pardir) # 为了导入父目录的文件而进行的设定
import numpy as np
import matplotlib.pyplot as plt
from dataset.mnist import load_mnist
from common.util import smooth_curve
from common.multi_layer_net import MultiLayerNet
from common.optimizer import SGD
# 0:读入MNIST数据==========
(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True)
train_size = x_train.shape[0]
batch_size = 128
max_iterations = 2000
# 1:进行实验的设置==========
weight_init_types = {'std=0.01': 0.01, 'Xavier': 'sigmoid', 'He': 'relu'}
optimizer = SGD(lr=0.01)
networks = {}
train_loss = {}
for key, weight_type in weight_init_types.items():
networks[key] = MultiLayerNet(input_size=784, hidden_size_list=[100, 100, 100, 100],
output_size=10, weight_init_std=weight_type)
train_loss[key] = []
# 2:开始训练==========
for i in range(max_iterations):
batch_mask = np.random.choice(train_size, batch_size)
x_batch = x_train[batch_mask]
t_batch = t_train[batch_mask]
for key in weight_init_types.keys():
grads = networks[key].gradient(x_batch, t_batch)
optimizer.update(networks[key].params, grads)
loss = networks[key].loss(x_batch, t_batch)
train_loss[key].append(loss)
if i % 100 == 0:
print("===========" + "iteration:" + str(i) + "===========")
for key in weight_init_types.keys():
loss = networks[key].loss(x_batch, t_batch)
print(key + ":" + str(loss))
# 3.绘制图形==========
markers = {'std=0.01': 'o', 'Xavier': 's', 'He': 'D'}
x = np.arange(max_iterations)
for key in weight_init_types.keys():
plt.plot(x, smooth_curve(train_loss[key]), marker=markers[key], markevery=100, label=key)
plt.xlabel("iterations")
plt.ylabel("loss")
plt.ylim(0, 2.5)
plt.legend()
plt.show()
| 30.174603
| 94
| 0.627564
|
4a0681a98c05a48572207701f3979980ff9e8dea
| 948
|
py
|
Python
|
testScenarios.py
|
Neomania/BeeSimulation
|
2d003085941d9f44d0f07a2a5dbb45f0103cdb98
|
[
"MIT"
] | 2
|
2015-10-08T12:35:19.000Z
|
2019-12-22T00:20:05.000Z
|
testScenarios.py
|
Neomania/BeeSimulation
|
2d003085941d9f44d0f07a2a5dbb45f0103cdb98
|
[
"MIT"
] | null | null | null |
testScenarios.py
|
Neomania/BeeSimulation
|
2d003085941d9f44d0f07a2a5dbb45f0103cdb98
|
[
"MIT"
] | null | null | null |
#-------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: Timothy
#
# Created: 26/04/2015
# Copyright: (c) Timothy 2015
# Licence: <your licence>
#-------------------------------------------------------------------------------
from physics import *
testHive = Hive(0,0)
testBee = Bee(testHive)
testFlower1 = Flower(0,0,(0,0,0),1.0)
testFlower2 = Flower(0,0,(0,0,0),2.0)
testFlower3 = Flower(0,0,(0,0,0),3.0)
testFlower4 = Flower(0,0,(0,0,0),4.0)
testFlower5 = Flower(0,0,(0,0,0),5.0)
testBee.createMemoryAbout(testFlower4)
testBee.createMemoryAbout(testFlower2)
testBee.createMemoryAbout(testFlower3)
testBee.createMemoryAbout(testFlower5)
testBee.createMemoryAbout(testFlower1)
for memory in testBee.memoryStore:
print(memory.flower.pollenRate)
print("Sorting!")
testBee.sortMemory()
for memory in testBee.memoryStore:
print(memory.flower.pollenRate)
| 27.882353
| 80
| 0.601266
|
4a0683b4d85f5f652937a55db062d5fa57b9b8f5
| 18,155
|
py
|
Python
|
conf/trigger_settings.py
|
chepazzo/trigger
|
4e867bd9443fde61f9b702d3ba65227c0ca69afb
|
[
"BSD-3-Clause"
] | null | null | null |
conf/trigger_settings.py
|
chepazzo/trigger
|
4e867bd9443fde61f9b702d3ba65227c0ca69afb
|
[
"BSD-3-Clause"
] | null | null | null |
conf/trigger_settings.py
|
chepazzo/trigger
|
4e867bd9443fde61f9b702d3ba65227c0ca69afb
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# This is a sample settings.py that varies slightly from the default. Please see docs/configuration.rst or
# trigger/conf/global_settings.py for the complete list of default settings.
import IPy
import os
import socket
#===============================
# Global Settings
#===============================
# This is where Trigger should look for its files.
PREFIX = '/etc/trigger'
# Set to True to enable GPG Authentication
# Set to False to use the old .tackf encryption method.
# Should be False unless instructions/integration is ready for GPG
USE_GPG_AUTH = False
# This is used for old auth method. It sucks and needs to die.
# TODO (jathan): This is deprecated. Remove all references to this and make GPG
# the default and only method.
USER_HOME = os.getenv('HOME')
TACACSRC = os.getenv('TACACSRC', os.path.join(USER_HOME, '.tacacsrc'))
TACACSRC_KEYFILE = os.getenv('TACACSRC_KEYFILE', os.path.join(PREFIX, '.tackf'))
# If set, use the TACACSRC_PASSPHRASE, otherwise default to TACACSRC_KEYFILE
TACACSRC_USE_PASSPHRASE = False
# Use this passphrase to encrypt credentials.CHANGE THIS IN YOUR FILE BEFORE
# USING THIS IN YOUR ENVIRONMENT.
TACACSRC_PASSPHRASE = 'bacon is awesome, son.' # NYI
# Default login realm to store user credentials (username, password) for
# general use within the .tacacsrc
DEFAULT_REALM = 'aol'
# Location of firewall policies
FIREWALL_DIR = '/data/firewalls'
# Location of tftproot.
TFTPROOT_DIR = '/data/tftproot'
# Add internally owned networks here. All network blocks owned/operated and
# considered part of your network should be included.
INTERNAL_NETWORKS = [
IPy.IP("10.0.0.0/8"),
IPy.IP("172.16.0.0/12"),
IPy.IP("192.168.0.0/16"),
]
# The tuple of supported vendors derived from the values of VENDOR_MAP
SUPPORTED_VENDORS = (
'a10',
'arista',
'aruba',
'avocent',
'brocade',
'cisco',
'citrix',
'dell',
'f5',
'force10',
'foundry',
'juniper',
'mrv',
'netscreen',
'paloalto',
'pica8',
)
VALID_VENDORS = SUPPORTED_VENDORS # For backwards compatibility
# A mapping of manufacturer attribute values to canonical vendor name used by
# Trigger. These single-word, lowercased canonical names are used throughout
# Trigger.
#
# If your internal definition differs from the UPPERCASED ones specified below
# (which they probably do), customize them here.
VENDOR_MAP = {
'A10 NETWORKS': 'a10',
'ARISTA NETWORKS': 'arista',
'ARUBA NETWORKS': 'aruba',
'AVOCENT': 'avocent',
'BROCADE': 'brocade',
'CISCO SYSTEMS': 'cisco',
'CITRIX': 'citrix',
'DELL': 'dell',
'F5 NETWORKS': 'f5',
'FORCE10': 'force10',
'FOUNDRY': 'foundry',
'JUNIPER': 'juniper',
'MRV': 'mrv',
'NETSCREEN TECHNOLOGIES': 'netscreen',
'PALO ALTO NETWORKS': 'paloalto',
'PICA8': 'pica8',
}
# A dictionary keyed by manufacturer name containing a list of the device types
# for each that is officially supported by Trigger.
SUPPORTED_PLATFORMS = {
'a10': ['SWITCH'],
'arista': ['SWITCH'], # Your "Cloud" network vendor
'aruba': ['SWITCH'], # Wireless Controllers
'avocent': ['CONSOLE'],
'brocade': ['ROUTER', 'SWITCH'],
'cisco': ['FIREWALL', 'ROUTER', 'SWITCH'],
'citrix': ['SWITCH'], # Assumed to be NetScalers
'dell': ['SWITCH'],
'f5': ['LOAD_BALANCER', 'SWITCH'],
'force10': ['ROUTER', 'SWITCH'],
'foundry': ['ROUTER', 'SWITCH'],
'juniper': ['FIREWALL', 'ROUTER', 'SWITCH'], # Any devices running Junos
'mrv': ['CONSOLE', 'SWITCH'],
'netscreen': ['FIREWALL'], # Pre-Juniper NetScreens
'paloalto': ['FIREWALL'],
'pica8': ['ROUTER', 'SWITCH'],
}
# The tuple of support device types
SUPPORTED_TYPES = (
'CONSOLE',
'DWDM',
'FIREWALL',
'LOAD_BALANCER',
'ROUTER',
'SWITCH'
)
# A mapping of of vendor names to the default device type for each in the
# event that a device object is created and the deviceType attribute isn't set
# for some reason.
DEFAULT_TYPES = {
'a10': 'SWITCH',
'arista': 'SWITCH',
'aruba': 'SWITCH',
'avocent': 'CONSOLE',
'brocade': 'SWITCH',
'citrix': 'SWITCH',
'cisco': 'ROUTER',
'dell': 'SWITCH',
'f5': 'LOAD_BALANCER',
'force10': 'ROUTER',
'foundry': 'SWITCH',
'juniper': 'ROUTER',
'mrv': 'CONSOLE',
'netscreen': 'FIREWALL',
'paloalto': 'FIREWALL',
'pica8': 'SWITCH',
}
# When a vendor is not explicitly defined within `DEFAULT_TYPES`, fallback to
# this type.
FALLBACK_TYPE = 'ROUTER'
# When a manufacturer/vendor is not explicitly defined, fallback to to this
# value.
FALLBACK_MANUFACTURER = 'UNKNOWN'
#===============================
# Twister
#===============================
# Default timeout in seconds for commands executed during a session. If a
# response is not received within this window, the connection is terminated.
DEFAULT_TIMEOUT = 5 * 60
# Default timeout in seconds for initial telnet connections.
TELNET_TIMEOUT = 60
# Whether or not to allow telnet fallback
TELNET_ENABLED = True
# Default ports for SSH
SSH_PORT = 22
# The preferred order in which SSH authentication methods are tried.
SSH_AUTHENTICATION_ORDER = ['password', 'keyboard-interactive', 'publickey']
# Default port for Telnet
TELNET_PORT = 23
# A mapping of vendors to the types of devices for that vendor for which you
# would like to disable interactive (pty) SSH sessions, such as when using
# bin/gong.
SSH_PTY_DISABLED = {
'dell': ['SWITCH'], # Dell SSH is just straight up broken
}
# A mapping of vendors to the types of devices for that vendor for which you
# would like to disable asynchronous (NON-interactive) SSH sessions, such as
# when using twister or Commando to remotely control a device.
SSH_ASYNC_DISABLED = {
'dell': ['SWITCH'], # Dell SSH is just straight up broken
'foundry': ['SWITCH'], # Old Foundry switches only do SSHv1
}
# Vendors that basically just emulate Cisco's IOS and can be treated
# accordingly for the sake of interaction.
IOSLIKE_VENDORS = (
'a10',
'arista',
'aruba',
'brocade',
'cisco',
'dell',
'force10',
'foundry',
)
# Prompts sent by devices that indicate the device is awaiting user
# confirmation when interacting with the device. If a continue prompt is
# detected, Trigger will temporarily set this value to the prompt and send
# along the next command (for example if you're expecting such a prompt and you
# want to send along "yes"). These should be as specific as possible because we
# want to make sure bad things don't happen.
CONTINUE_PROMPTS = [
'continue?',
'proceed?',
'(y/n):',
'[y/n]:',
'[confirm]',
'[yes/no]: ',
'overwrite file [startup-config] ?[yes/press any key for no]....'
]
# The file path where .gorc is expected to be found.
GORC_FILE = '~/.gorc'
# The only root commands that are allowed to be executed when defined within
# ``~.gorc``. They will be filtered # out by `~trigger.gorc.filter_commands()`.
GORC_ALLOWED_COMMANDS = (
'cli',
'enable',
'exit',
'get',
'monitor',
'ping',
'quit',
'set',
'show',
'start',
'term',
'terminal',
'traceroute',
'who',
'whoami'
)
#===============================
# NetDevices
#===============================
# Globally toggle whether to load ACL associations from the Redis database. If
# you don’t have Redis or aren’t using Trigger to manage ACLs set this to
# False.
WITH_ACLS = False
# Path to the explicit module file for autoacl.py so that we can still perform
# 'from trigger.acl.autoacl import autoacl' without modifying sys.path.
AUTOACL_FILE = os.environ.get('AUTOACL_FILE', os.path.join(PREFIX, 'autoacl.py'))
# A tuple of data loader classes, specified as strings. Optionally, a tuple can
# be used instead of a string. The first item in the tuple should be the
# Loader's module, subsequent items are passed to the Loader during
# initialization.
NETDEVICES_LOADERS = (
'trigger.netdevices.loaders.filesystem.JSONLoader',
'trigger.netdevices.loaders.filesystem.XMLLoader',
'trigger.netdevices.loaders.filesystem.SQLiteLoader',
'trigger.netdevices.loaders.filesystem.CSVLoader',
'trigger.netdevices.loaders.filesystem.RancidLoader',
# Example of a database loader where the db information is sent along as an
# argument. The args can be anything you want.
#['trigger.netdevices.loaders.mysql.Loader', {'dbuser': 'root', 'dbpass': 'abc123', 'dbhost': 'localhost', 'dbport': 3306}, 'bacon'],
)
# A path or URL to netdevices device metadata source data, which is used to
# populate trigger.netdevices.NetDevices. For more information on this, see
# NETDEVICES_LOADERS.
NETDEVICES_SOURCE = os.environ.get(
'NETDEVICES_SOURCE', os.path.join(PREFIX, 'netdevices.json')
)
# Assign NETDEVICES_SOURCE to NETDEVICES_FILE for backwards compatibility
NETDEVICES_FILE = NETDEVICES_SOURCE
# TextFSM Vendor Mappings. Override this if you have defined your own TextFSM templates.
TEXTFSM_VENDOR_MAPPINGS = {
"cisco": [ "ios", "nxos" ],
"arista": [ "eos" ]
}
# TextFSM Template Path. Commando will attempt to match a given show command with a template within this folder.
TEXTFSM_TEMPLATE_DIR = os.getenv('TEXTFSM_TEMPLATE_DIR', os.path.join(PREFIX, 'vendor/ntc_templates'))
# Whether to treat the RANCID root as a normal instance, or as the root to
# multiple instances. This is only checked when using RANCID as a data source.
RANCID_RECURSE_SUBDIRS = os.environ.get('RANCID_RECURSE_SUBDIRS', False)
# Valid owning teams (e.g. device.owningTeam) go here. These are examples and should be
# changed to match your environment.
VALID_OWNERS = (
'Data Center',
'Backbone Engineering',
'Enterprise Networking',
)
# Fields and values defined here will dictate which Juniper devices receive a
# ``commit-configuration full`` when populating ``NetDevice.commit_commands`.
# The fields and values must match the objects exactly or it will fallback to
# ``commit-configuration``.
JUNIPER_FULL_COMMIT_FIELDS = {
'deviceType': 'SWITCH',
'make': 'EX4200',
}
#===============================
# Prompt Patterns
#===============================
# Specially-defined, per-vendor prompt patterns. If a vendor isn't defined here,
# try to use IOSLIKE_PROMPT_PAT or fallback to DEFAULT_PROMPT_PAT.
PROMPT_PATTERNS = {
'aruba': r'\(\S+\)(?: \(\S+\))?\s?#$', # ArubaOS 6.1
#'aruba': r'\S+(?: \(\S+\))?\s?#\s$', # ArubaOS 6.2
'avocent': r'\S+[#\$]|->\s?$',
'citrix': r'\sDone\n$',
'f5': r'.*\(tmos\).*?#\s{1,2}\r?$',
'juniper': r'\S+\@\S+(?:\>|#)\s$',
'mrv': r'\r\n?.*(?:\:\d{1})?\s\>\>?$',
'netscreen': r'(\w+?:|)[\w().-]*\(?([\w.-])?\)?\s*->\s*$',
'paloalto': r'\r\n\S+(?:\>|#)\s?$',
'pica8': r'\S+(?:\>|#)\s?$',
}
# When a pattern is not explicitly defined for a vendor, this is what we'll try
# next (since most vendors are in fact IOS-like)
IOSLIKE_PROMPT_PAT = r'\S+(\(config(-[a-z:1-9]+)?\))?[\r\s]*#[\s\b]*$'
IOSLIKE_ENABLE_PAT = r'\S+(\(config(-[a-z:1-9]+)?\))?[\r\s]*>[\s\b]*$'
# Generic prompt to match most vendors. It assumes that you'll be greeted with
# a "#" prompt.
DEFAULT_PROMPT_PAT = r'\S+#\s?$'
#===============================
# Bounce Windows/Change Mgmt
#===============================
# Path of the explicit module file for bounce.py containing custom bounce
# window mappings.
BOUNCE_FILE = os.environ.get('BOUNCE_FILE', os.path.join(PREFIX, 'bounce.py'))
# Default bounce timezone. All BounceWindow objects are configured using
# US/Eastern for now.
BOUNCE_DEFAULT_TZ = 'US/Eastern'
# The default fallback window color for bounce windows. Must be one of
# ('green', 'yellow', or 'red').
#
# green: Low risk
# yellow: Medium risk
# red: High risk
BOUNCE_DEFAULT_COLOR = 'red'
#===============================
# Redis Settings
#===============================
# Redis master server. This will be used unless it is unreachable.
REDIS_HOST = '127.0.0.1'
# The Redis port. Default is 6379.
REDIS_PORT = 6379
# The Redis DB. Default is 0.
REDIS_DB = 0
#===============================
# Database Settings
#===============================
# These are self-explanatory, I hope. Use the ``init_task_db`` to initialize
# your database after you've created it! :)
DATABASE_ENGINE = 'mysql' # Choose 'postgresql', 'mysql', 'sqlite3'
DATABASE_NAME = '' # Or path to database file if using sqlite3
DATABASE_USER = '' # Not used with sqlite3
DATABASE_PASSWORD = '' # Not used with sqlite3
DATABASE_HOST = '' # Set to '' for localhost. Not used with sqlite3
DATABASE_PORT = '' # Set to '' for default. Not used with sqlite3.
#===============================
# ACL Management
#===============================
# Whether to allow multi-line comments to be used in Juniper firewall filters.
# Defaults to False.
ALLOW_JUNIPER_MULTILINE_COMMENTS = False
# FILTER names of ACLs that should be skipped or ignored by tools
# NOTE: These should be the names of the filters as they appear on devices. We
# want this to be mutable so it can be modified at runtime.
# TODO (jathan): Move this into Redis and maintain with 'acl' command?
IGNORED_ACLS = [
'netflow',
'massive-edge-filter',
'antispoofing',
]
# FILE names ACLs that shall not be modified by tools
# NOTE: These should be the names of the files as they exist in FIREWALL_DIR.
# Trigger expects ACLs to be prefixed with 'acl.'. These are examples and
# should be replaced.
NONMOD_ACLS = [
'acl.netflow',
'acl.antispoofing',
'acl.border-protect',
'acl.route-engine-protect',
]
# Mapping of real IP to external NAT. This is used by load_acl in the event
# that a TFTP or connection from a real IP fails or explicitly when passing the
# --no-vip flag.
# format: {local_ip: external_ip}
VIPS = {
'10.20.21.151': '5.60.17.81',
'10.10.18.157': '5.60.71.81',
}
#===============================
# ACL Loading/Rate-Limiting
#===============================
# All of the following settings are currently only used in ``load_acl``. If
# and when the load_acl functionality gets moved into the API, this might
# change.
# Any FILTER name (not filename) in this list will be skipped during automatic loads.
AUTOLOAD_BLACKLIST = [
'route-engine-protect',
'netflow',
'antispoofing',
'static-policy',
'border-protect',
]
# Assign blacklist to filter for backwards compatibility
AUTOLOAD_FILTER = AUTOLOAD_BLACKLIST
# Modify this if you want to create a list that if over the specified number of
# routers will be treated as bulk loads.
# TODO (jathan): Provide examples so that this has more context/meaning. The
# current implementation is kind of broken and doesn't scale for data centers
# with a large of number of devices.
AUTOLOAD_FILTER_THRESH = {
'route-engine-protect':3,
'antispoofing':5,
'12345':10,
}
# Any ACL applied on a number of devices >= to this number will be treated as
# bulk loads.
AUTOLOAD_BULK_THRESH = 10
# Add an acl:max_hits here if you want to override BULK_MAX_HITS_DEFAULT
# Keep in mind this number is PER EXECUTION of load_acl --auto (typically once
# per hour or 3 per bounce window).
#
# 1 per load_acl execution; ~3 per day, per bounce window
# 2 per load_acl execution; ~6 per day, per bounce window
# etc.
BULK_MAX_HITS = {
'abc123': 3,
'xyz246': 5,
'border-protect': 5,
}
# If an ACL is bulk but not in BULK_MAX_HITS, use this number as max_hits
BULK_MAX_HITS_DEFAULT = 1
#===============================
# OnCall Engineer Display
#===============================
# This should be a callable that returns data for your on-call engineer, or
# failing that None. The function should return a dictionary that looks like
# this:
#
# {'username': 'joegineer',
# 'name': 'Joe Engineer',
# 'email': 'joe.engineer@example.notreal'}
#
# If you want to disable it, just have it return a non-False value.
# If you want to use it and have it block, have it return a False value (such
# as None)
#
# This example is just providing a string that indicates that on-call lookup is
# disabled.
#
# Default: returns 'disabled'
def _get_current_oncall_stub(*args, **kwargs):
return 'disabled'
GET_CURRENT_ONCALL = _get_current_oncall_stub
#===============================
# CM Ticket Creation
#===============================
# This should be a callable that creates a CM ticket and returns the ticket
# number.
#
# If you want to disable it, just have it return a non-False value.
# If you want to use it and have it block, have it return a False value (such
# as None)
#
# This example is just providing a string that indicates that CM ticket
# creation is disabled.
#
# Default: returns ' N/A (CM ticket creation is disabled)'
def _create_cm_ticket_stub(*args, **kwargs):
return ' N/A (CM ticket creation is disabled)'
CREATE_CM_TICKET = _create_cm_ticket_stub
#===============================
# Notifications
#===============================
# Email sender for integrated toosl. Usually a good idea to make this a
# no-reply address.
EMAIL_SENDER = 'nobody@not.real'
# Who to email when things go well (e.g. load_acl --auto)
SUCCESS_EMAILS = [
#'neteng@example.com',
]
# Who to email when things go not well (e.g. load_acl --auto)
FAILURE_EMAILS = [
#'primarypager@example.com',
#'secondarypager@example.com',
]
# The default sender for integrated notifications. This defaults to the fqdn
# for the localhost.
NOTIFICATION_SENDER = socket.gethostname()
# Destinations (hostnames, addresses) to notify when things go well.
SUCCESS_RECIPIENTS = [
# 'foo.example.com',
]
# Destinations (hostnames, addresses) to notify when things go not well.
FAILURE_RECIPIENTS = [
# socket.gethostname(), # The fqdn for the localhost
]
# This is a list of fully-qualified paths. Each path should end with a callable
# that handles a notification event and returns ``True`` in the event of a
# successful notification, or ``None``.
NOTIFICATION_HANDLERS = [
'trigger.utils.notifications.handlers.email_handler',
]
| 32.419643
| 137
| 0.662187
|
4a06843f12e52464db3de635424cb52cc14ee46c
| 2,288
|
py
|
Python
|
eoxserver/backends/config.py
|
kalxas/eoxserver
|
8073447d926f3833923bde7b7061e8a1658dee06
|
[
"OML"
] | 25
|
2015-08-10T19:34:34.000Z
|
2021-02-05T08:28:01.000Z
|
eoxserver/backends/config.py
|
kalxas/eoxserver
|
8073447d926f3833923bde7b7061e8a1658dee06
|
[
"OML"
] | 153
|
2015-01-20T08:35:49.000Z
|
2022-03-16T11:00:56.000Z
|
eoxserver/backends/config.py
|
kalxas/eoxserver
|
8073447d926f3833923bde7b7061e8a1658dee06
|
[
"OML"
] | 10
|
2015-01-23T15:48:30.000Z
|
2021-01-21T15:41:18.000Z
|
#-------------------------------------------------------------------------------
#
# Project: EOxServer <http://eoxserver.org>
# Authors: Fabian Schindler <fabian.schindler@eox.at>
#
#-------------------------------------------------------------------------------
# Copyright (C) 2013 EOX IT Services GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies of this Software or works derived from this Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#-------------------------------------------------------------------------------
from eoxserver.core.decoders import config
# default value for EOXS_STORAGE_HANDLERS
DEFAULT_EOXS_STORAGE_HANDLERS = [
'eoxserver.backends.storages.ZIPStorageHandler',
'eoxserver.backends.storages.TARStorageHandler',
'eoxserver.backends.storages.DirectoryStorageHandler',
'eoxserver.backends.storages.HTTPStorageHandler',
'eoxserver.backends.storages.FTPStorageHandler',
'eoxserver.backends.storages.S3StorageHandler',
'eoxserver.backends.storages.SwiftStorageHandler',
]
DEFAULT_EOXS_STORAGE_AUTH_HANDLERS = [
'eoxserver.backends.storage_auths.S3StorageAuthHandler',
'eoxserver.backends.keystone.storage_auth.KeystoneStorageAuthHandler',
]
class CacheConfigReader(config.Reader):
config.section("backends")
retention_time = config.Option() # TODO
directory = config.Option()
| 43.169811
| 80
| 0.698427
|
4a0684c27499199b41e43043a1b220001105e346
| 7,180
|
py
|
Python
|
tests/test_apiv2_district_controller.py
|
tervay/the-blue-alliance
|
e14c15cb04b455f90a2fcfdf4c1cdbf8454e17f8
|
[
"MIT"
] | 266
|
2015-01-04T00:10:48.000Z
|
2022-03-28T18:42:05.000Z
|
tests/test_apiv2_district_controller.py
|
gregmarra/the-blue-alliance
|
5bedaf5c80b4623984760d3da3289640639112f9
|
[
"MIT"
] | 2,673
|
2015-01-01T20:14:33.000Z
|
2022-03-31T18:17:16.000Z
|
tests/test_apiv2_district_controller.py
|
gregmarra/the-blue-alliance
|
5bedaf5c80b4623984760d3da3289640639112f9
|
[
"MIT"
] | 230
|
2015-01-04T00:10:48.000Z
|
2022-03-26T18:12:04.000Z
|
import unittest2
import webtest
import json
import webapp2
from datetime import datetime
from google.appengine.ext import ndb
from google.appengine.ext import testbed
from consts.event_type import EventType
from controllers.api.api_district_controller import ApiDistrictListController, ApiDistrictEventsController
from models.district import District
from models.event import Event
from models.event_details import EventDetails
class TestListDistrictsController(unittest2.TestCase):
def setUp(self):
app = webapp2.WSGIApplication([webapp2.Route(r'/<year:>', ApiDistrictListController, methods=['GET'])], debug=True)
self.testapp = webtest.TestApp(app)
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_urlfetch_stub()
self.testbed.init_memcache_stub()
ndb.get_context().clear_cache() # Prevent data from leaking between tests
self.testbed.init_taskqueue_stub(root_path=".")
self.district = District(
id='2010ne',
year=2010,
abbreviation='ne',
display_name='New England',
)
self.district.put()
self.event = Event(
id="2010sc",
name="Palmetto Regional",
event_type_enum=EventType.DISTRICT_CMP,
district_key=ndb.Key(District, '2010ne'),
short_name="Palmetto",
event_short="sc",
year=2010,
end_date=datetime(2010, 03, 27),
official=True,
city="Clemson",
state_prov="SC",
country="USA",
venue="Long Beach Arena",
venue_address="Long Beach Arena\r\n300 East Ocean Blvd\r\nLong Beach, CA 90802\r\nUSA",
start_date=datetime(2010, 03, 24),
webcast_json="[{\"type\": \"twitch\", \"channel\": \"frcgamesense\"}]",
website="http://www.firstsv.org"
)
self.event.put()
self.event_details = EventDetails(
id=self.event.key.id(),
alliance_selections=[
{"declines": [], "picks": ["frc971", "frc254", "frc1662"]},
{"declines": [], "picks": ["frc1678", "frc368", "frc4171"]},
{"declines": [], "picks": ["frc2035", "frc192", "frc4990"]},
{"declines": [], "picks": ["frc1323", "frc846", "frc2135"]},
{"declines": [], "picks": ["frc2144", "frc1388", "frc668"]},
{"declines": [], "picks": ["frc1280", "frc604", "frc100"]},
{"declines": [], "picks": ["frc114", "frc852", "frc841"]},
{"declines": [], "picks": ["frc2473", "frc3256", "frc1868"]}
]
)
self.event_details.put()
def tearDown(self):
self.testbed.deactivate()
def assertDistrictKeys(self, district):
self.assertEqual(district["key"], self.district.abbreviation)
self.assertEqual(district["name"], self.district.display_name)
def test_district_api(self):
response = self.testapp.get('/{}'.format(self.event.year), headers={"X-TBA-App-Id": "tba-tests:disstrict-controller-test:v01"})
districts = json.loads(response.body)
self.assertDistrictKeys(districts[0])
class TestListDistrictEventsController(unittest2.TestCase):
def setUp(self):
app = webapp2.WSGIApplication([webapp2.Route(r'/<district_abbrev:>/<year:>', ApiDistrictEventsController, methods=['GET'])], debug=True)
self.testapp = webtest.TestApp(app)
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_urlfetch_stub()
self.testbed.init_memcache_stub()
ndb.get_context().clear_cache() # Prevent data from leaking between tests
self.testbed.init_taskqueue_stub(root_path=".")
self.district = District(
id='2010ne',
year=2010,
abbreviation='ne',
display_name='New England',
)
self.district.put()
self.event = Event(
id="2010sc",
name="Palmetto Regional",
event_type_enum=EventType.DISTRICT_CMP,
district_key=ndb.Key(District, '2010ne'),
short_name="Palmetto",
event_short="sc",
year=2010,
end_date=datetime(2010, 03, 27),
official=True,
city="Clemson",
state_prov="SC",
country="USA",
venue="Long Beach Arena",
venue_address="Long Beach Arena\r\n300 East Ocean Blvd\r\nLong Beach, CA 90802\r\nUSA",
start_date=datetime(2010, 03, 24),
webcast_json="[{\"type\": \"twitch\", \"channel\": \"frcgamesense\"}]",
website="http://www.firstsv.org"
)
self.event.put()
self.event_details = EventDetails(
id=self.event.key.id(),
alliance_selections=[
{"declines": [], "picks": ["frc971", "frc254", "frc1662"]},
{"declines": [], "picks": ["frc1678", "frc368", "frc4171"]},
{"declines": [], "picks": ["frc2035", "frc192", "frc4990"]},
{"declines": [], "picks": ["frc1323", "frc846", "frc2135"]},
{"declines": [], "picks": ["frc2144", "frc1388", "frc668"]},
{"declines": [], "picks": ["frc1280", "frc604", "frc100"]},
{"declines": [], "picks": ["frc114", "frc852", "frc841"]},
{"declines": [], "picks": ["frc2473", "frc3256", "frc1868"]}
]
)
self.event_details.put()
def tearDown(self):
self.testbed.deactivate()
def assertDistrictEvent(self, event):
self.assertEqual(event["key"], self.event.key_name)
self.assertEqual(event["name"], self.event.name)
self.assertEqual(event["short_name"], self.event.short_name)
self.assertEqual(event["official"], self.event.official)
self.assertEqual(event["event_type_string"], self.event.event_type_str)
self.assertEqual(event["event_type"], self.event.event_type_enum)
self.assertEqual(event["event_district_string"], self.event.event_district_str)
self.assertEqual(event["event_district"], self.event.event_district_enum)
self.assertEqual(event["start_date"], self.event.start_date.date().isoformat())
self.assertEqual(event["end_date"], self.event.end_date.date().isoformat())
self.assertEqual(event["location"], self.event.location)
self.assertEqual(event["venue_address"], self.event.venue_address.replace('\r\n', '\n'))
self.assertEqual(event["webcast"], json.loads(self.event.webcast_json))
self.assertEqual(event["alliances"], self.event.alliance_selections)
self.assertEqual(event["website"], self.event.website)
def test_event_api(self):
response = self.testapp.get("/{}/2010".format(self.district.abbreviation), headers={"X-TBA-App-Id": "tba-tests:disstrict-controller-test:v01"})
events = json.loads(response.body)
self.assertDistrictEvent(events[0])
| 41.264368
| 151
| 0.59805
|
4a06850fce9b4e71f1c275eabb49e078af69b543
| 1,995
|
py
|
Python
|
autotest/gdrivers/elas.py
|
jpapadakis/gdal
|
f07aa15fd65af36b04291303cc6834c87f662814
|
[
"MIT"
] | 3,100
|
2015-01-02T10:33:40.000Z
|
2022-03-31T02:06:51.000Z
|
autotest/gdrivers/elas.py
|
jpapadakis/gdal
|
f07aa15fd65af36b04291303cc6834c87f662814
|
[
"MIT"
] | 3,496
|
2015-01-06T16:53:30.000Z
|
2022-03-31T20:18:51.000Z
|
autotest/gdrivers/elas.py
|
jpapadakis/gdal
|
f07aa15fd65af36b04291303cc6834c87f662814
|
[
"MIT"
] | 2,036
|
2015-01-08T20:22:12.000Z
|
2022-03-31T10:24:08.000Z
|
#!/usr/bin/env pytest
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: Test ELAS driver
# Author: Even Rouault, <even dot rouault at spatialys.com>
#
###############################################################################
# Copyright (c) 2009, Even Rouault <even dot rouault at spatialys.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import gdaltest
###############################################################################
# Test a dataset generated by Create()
def test_elas_1():
tst = gdaltest.GDALTest('ELAS', 'elas/byte_elas.bin', 1, 4672)
return tst.testOpen()
###############################################################################
# Test Create()
def test_elas_2():
tst = gdaltest.GDALTest('ELAS', 'elas/byte_elas.bin', 1, 4672)
return tst.testCreate()
| 36.272727
| 79
| 0.596491
|
4a068625d95c88f5377de3e68ee973a6bdceed14
| 25,820
|
py
|
Python
|
src/azure-cli/azure/cli/command_modules/resource/commands.py
|
digimaun/azure-cli
|
298994660f0fde6863cb45a7c3142141ed10f923
|
[
"MIT"
] | null | null | null |
src/azure-cli/azure/cli/command_modules/resource/commands.py
|
digimaun/azure-cli
|
298994660f0fde6863cb45a7c3142141ed10f923
|
[
"MIT"
] | null | null | null |
src/azure-cli/azure/cli/command_modules/resource/commands.py
|
digimaun/azure-cli
|
298994660f0fde6863cb45a7c3142141ed10f923
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
from collections import OrderedDict
from azure.cli.core.util import empty_on_404
from azure.cli.core.profiles import ResourceType, PROFILE_TYPE
from azure.cli.core.commands import CliCommandType, DeploymentOutputLongRunningOperation
from azure.cli.core.commands.arm import handle_template_based_exception
from azure.cli.command_modules.resource._client_factory import (
cf_resource_groups, cf_providers, cf_features, cf_tags, cf_deployments,
cf_deployment_operations, cf_policy_definitions, cf_policy_set_definitions, cf_resource_links,
cf_resource_deploymentscripts, cf_resource_managedapplications, cf_resource_managedappdefinitions, cf_management_groups, cf_management_group_subscriptions)
from azure.cli.command_modules.resource._validators import process_deployment_create_namespace
from ._exception_handler import managementgroups_exception_handler
from knack.log import get_logger
logger = get_logger(__name__)
# Resource group commands
def transform_resource_group_list(result):
return [OrderedDict([
('Name', r['name']), ('Location', r['location']), ('Status', r['properties']['provisioningState'])]) for r in result]
def transform_resource_list(result):
transformed = []
for r in result:
res = OrderedDict([('Name', r['name']), ('ResourceGroup', r['resourceGroup']), ('Location', r['location']), ('Type', r['type'])])
try:
res['Status'] = r['properties']['provisioningStatus']
except TypeError:
res['Status'] = ' '
transformed.append(res)
return transformed
# Resource group deployment commands
def transform_deployment(result):
r = result
return OrderedDict([('Name', r['name']),
('ResourceGroup', r['resourceGroup']),
('State', r['properties']['provisioningState']),
('Timestamp', r['properties']['timestamp']),
('Mode', r['properties']['mode'])])
def transform_deployments_list(result):
sort_list = sorted(result, key=lambda deployment: deployment['properties']['timestamp'])
return [transform_deployment(r) for r in sort_list]
# pylint: disable=too-many-statements
def load_command_table(self, _):
from azure.cli.core.commands.arm import deployment_validate_table_format
resource_custom = CliCommandType(operations_tmpl='azure.cli.command_modules.resource.custom#{}')
resource_group_sdk = CliCommandType(
operations_tmpl='azure.mgmt.resource.resources.operations#ResourceGroupsOperations.{}',
client_factory=cf_resource_groups,
resource_type=ResourceType.MGMT_RESOURCE_RESOURCES
)
resource_provider_sdk = CliCommandType(
operations_tmpl='azure.mgmt.resource.resources.operations#ProvidersOperations.{}',
client_factory=cf_providers,
resource_type=ResourceType.MGMT_RESOURCE_RESOURCES
)
resource_feature_sdk = CliCommandType(
operations_tmpl='azure.mgmt.resource.features.operations#FeaturesOperations.{}',
client_factory=cf_features,
resource_type=ResourceType.MGMT_RESOURCE_FEATURES
)
resource_tag_sdk = CliCommandType(
operations_tmpl='azure.mgmt.resource.resources.operations#TagsOperations.{}',
client_factory=cf_tags,
resource_type=ResourceType.MGMT_RESOURCE_RESOURCES
)
resource_deployment_sdk = CliCommandType(
operations_tmpl='azure.mgmt.resource.resources.operations#DeploymentsOperations.{}',
client_factory=cf_deployments,
resource_type=ResourceType.MGMT_RESOURCE_RESOURCES
)
resource_deployment_operation_sdk = CliCommandType(
operations_tmpl='azure.mgmt.resource.resources.operations#DeploymentOperations.{}',
client_factory=cf_deployment_operations,
resource_type=ResourceType.MGMT_RESOURCE_RESOURCES
)
resource_policy_definitions_sdk = CliCommandType(
operations_tmpl='azure.mgmt.resource.policy.operations#PolicyDefinitionsOperations.{}',
client_factory=cf_policy_definitions,
resource_type=ResourceType.MGMT_RESOURCE_POLICY
)
resource_policy_set_definitions_sdk = CliCommandType(
operations_tmpl='azure.mgmt.resource.policy.operations#PolicySetDefinitionsOperations.{}',
client_factory=cf_policy_set_definitions,
resource_type=ResourceType.MGMT_RESOURCE_POLICY
)
resource_lock_sdk = CliCommandType(
operations_tmpl='azure.mgmt.resource.locks.operations#ManagementLocksOperations.{}',
resource_type=ResourceType.MGMT_RESOURCE_LOCKS
)
resource_link_sdk = CliCommandType(
operations_tmpl='azure.mgmt.resource.links.operations#ResourceLinksOperations.{}',
client_factory=cf_resource_links,
resource_type=ResourceType.MGMT_RESOURCE_LINKS
)
resource_deploymentscripts_sdk = CliCommandType(
operations_tmpl='azure.mgmt.resource.deploymentscripts.operations#ResourceLinksOperations.{}',
client_factory=cf_resource_deploymentscripts,
resource_type=ResourceType.MGMT_RESOURCE_DEPLOYMENTSCRIPTS
)
resource_managedapp_sdk = CliCommandType(
operations_tmpl='azure.mgmt.resource.managedapplications.operations#ApplicationsOperations.{}',
client_factory=cf_resource_managedapplications,
resource_type=ResourceType.MGMT_RESOURCE_RESOURCES
)
resource_managedapp_def_sdk = CliCommandType(
operations_tmpl='azure.mgmt.resource.managedapplications.operations#ApplicationDefinitionsOperations.{}',
client_factory=cf_resource_managedappdefinitions,
resource_type=ResourceType.MGMT_RESOURCE_RESOURCES
)
resource_managementgroups_sdk = CliCommandType(
operations_tmpl='azure.mgmt.managementgroups.operations#ManagementGroupsOperations.{}',
client_factory=cf_management_groups,
exception_handler=managementgroups_exception_handler
)
resource_managementgroups_subscriptions_sdk = CliCommandType(
operations_tmpl='azure.mgmt.managementgroups.operations#ManagementGroupSubscriptionsOperations.{}',
client_factory=cf_management_group_subscriptions,
exception_handler=managementgroups_exception_handler
)
resource_managementgroups_update_type = CliCommandType(
operations_tmpl='azure.cli.command_modules.resource.custom#{}',
client_factory=cf_management_groups,
exception_handler=managementgroups_exception_handler
)
with self.command_group('account lock', resource_lock_sdk, resource_type=ResourceType.MGMT_RESOURCE_LOCKS) as g:
g.custom_command('create', 'create_lock')
g.custom_command('delete', 'delete_lock')
g.custom_command('list', 'list_locks')
g.custom_show_command('show', 'get_lock')
g.custom_command('update', 'update_lock')
with self.command_group('group', resource_group_sdk, resource_type=ResourceType.MGMT_RESOURCE_RESOURCES) as g:
g.command('delete', 'delete', supports_no_wait=True, confirmation=True)
g.show_command('show', 'get')
g.command('exists', 'check_existence')
g.custom_command('list', 'list_resource_groups', table_transformer=transform_resource_group_list)
g.custom_command('create', 'create_resource_group')
g.custom_command('export', 'export_group_as_template')
g.generic_update_command('update', custom_func_name='update_resource_group', custom_func_type=resource_custom)
g.wait_command('wait')
with self.command_group('group lock', resource_type=ResourceType.MGMT_RESOURCE_LOCKS) as g:
g.custom_command('create', 'create_lock')
g.custom_command('delete', 'delete_lock')
g.custom_command('list', 'list_locks')
g.custom_show_command('show', 'get_lock')
g.custom_command('update', 'update_lock')
with self.command_group('resource', resource_custom, resource_type=ResourceType.MGMT_RESOURCE_RESOURCES) as g:
g.custom_command('create', 'create_resource')
g.custom_command('delete', 'delete_resource')
g.custom_show_command('show', 'show_resource')
g.custom_command('list', 'list_resources', table_transformer=transform_resource_list)
g.custom_command('tag', 'tag_resource')
g.custom_command('move', 'move_resource')
g.custom_command('invoke-action', 'invoke_resource_action', transform=DeploymentOutputLongRunningOperation(self.cli_ctx))
g.generic_update_command('update', getter_name='show_resource', setter_name='update_resource',
client_factory=None)
g.wait_command('wait', getter_name='show_resource')
with self.command_group('resource lock', resource_type=ResourceType.MGMT_RESOURCE_LOCKS) as g:
g.custom_command('create', 'create_lock')
g.custom_command('delete', 'delete_lock')
g.custom_command('list', 'list_locks')
g.custom_show_command('show', 'get_lock')
g.custom_command('update', 'update_lock')
# Resource provider commands
with self.command_group('provider', resource_provider_sdk, resource_type=ResourceType.MGMT_RESOURCE_RESOURCES) as g:
g.command('list', 'list')
g.show_command('show', 'get')
g.custom_command('register', 'register_provider')
g.custom_command('unregister', 'unregister_provider')
g.custom_command('operation list', 'list_provider_operations')
g.custom_show_command('operation show', 'show_provider_operations')
# Resource feature commands
with self.command_group('feature', resource_feature_sdk, client_factory=cf_features, resource_type=PROFILE_TYPE,
min_api='2019-03-02-hybrid') as g:
feature_table_transform = '{Name:name, RegistrationState:properties.state}'
g.custom_command('list', 'list_features', table_transformer='[].' + feature_table_transform)
g.show_command('show', 'get', table_transformer=feature_table_transform)
g.custom_command('register', 'register_feature')
# Tag commands
with self.command_group('tag', resource_tag_sdk) as g:
g.command('list', 'list')
g.command('create', 'create_or_update')
g.command('delete', 'delete')
g.command('add-value', 'create_or_update_value')
g.command('remove-value', 'delete_value')
# az group deployment
with self.command_group('group deployment', resource_deployment_sdk, deprecate_info=self.deprecate(redirect='deployment group', hide=True)) as g:
g.custom_command('create', 'deploy_arm_template', supports_no_wait=True, validator=process_deployment_create_namespace,
table_transformer=transform_deployment, exception_handler=handle_template_based_exception)
g.command('list', 'list_by_resource_group', table_transformer=transform_deployments_list, min_api='2017-05-10')
g.command('list', 'list', table_transformer=transform_deployments_list, max_api='2016-09-01')
g.show_command('show', 'get', table_transformer=transform_deployment)
g.command('delete', 'delete', supports_no_wait=True)
g.custom_command('validate', 'validate_arm_template', table_transformer=deployment_validate_table_format, exception_handler=handle_template_based_exception)
g.custom_command('export', 'export_deployment_as_template')
g.wait_command('wait')
g.command('cancel', 'cancel')
with self.command_group('group deployment operation', resource_deployment_operation_sdk, deprecate_info=self.deprecate(redirect='deployment operation group', hide=True)) as g:
g.command('list', 'list')
g.custom_show_command('show', 'get_deployment_operations', client_factory=cf_deployment_operations)
# az deployment
with self.command_group('deployment', resource_deployment_sdk, min_api='2018-05-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES) as g:
g.custom_command('list', 'list_deployments_at_subscription_scope', table_transformer=transform_deployments_list, deprecate_info=g.deprecate(redirect='deployment sub list', hide=True))
g.custom_show_command('show', 'get_deployment_at_subscription_scope', deprecate_info=g.deprecate(redirect='deployment sub show', hide=True))
g.custom_command('delete', 'delete_deployment_at_subscription_scope', supports_no_wait=True, deprecate_info=g.deprecate(redirect='deployment sub delete', hide=True))
g.custom_command('validate', 'validate_arm_template_at_subscription_scope', validator=process_deployment_create_namespace,
table_transformer=deployment_validate_table_format, exception_handler=handle_template_based_exception,
deprecate_info=g.deprecate(redirect='deployment sub validate', hide=True))
g.custom_command('create', 'deploy_arm_template_at_subscription_scope', supports_no_wait=True, validator=process_deployment_create_namespace,
exception_handler=handle_template_based_exception, deprecate_info=g.deprecate(redirect='deployment sub create', hide=True))
g.custom_command('export', 'export_template_at_subscription_scope', deprecate_info=g.deprecate(redirect='deployment sub export', hide=True))
g.custom_wait_command('wait', 'get_deployment_at_subscription_scope', deprecate_info=g.deprecate(redirect='deployment sub wait', hide=True))
g.custom_command('cancel', 'cancel_deployment_at_subscription_scope', deprecate_info=g.deprecate(redirect='deployment sub cancel', hide=True))
with self.command_group('deployment operation', resource_deployment_operation_sdk, min_api='2018-05-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES, deprecate_info=self.deprecate(redirect='deployment operation sub', hide=True)) as g:
g.custom_command('list', 'list_deployment_operations_at_subscription_scope')
g.custom_show_command('show', 'get_deployment_operations_at_subscription_scope', client_factory=cf_deployment_operations)
# az deployment sub
with self.command_group('deployment sub', resource_deployment_sdk, min_api='2018-05-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES) as g:
g.custom_command('list', 'list_deployments_at_subscription_scope', table_transformer=transform_deployments_list)
g.custom_show_command('show', 'get_deployment_at_subscription_scope', table_transformer=transform_deployment)
g.custom_command('delete', 'delete_deployment_at_subscription_scope', supports_no_wait=True)
g.custom_command('validate', 'validate_arm_template_at_subscription_scope', validator=process_deployment_create_namespace,
table_transformer=deployment_validate_table_format, exception_handler=handle_template_based_exception)
g.custom_command('create', 'deploy_arm_template_at_subscription_scope', supports_no_wait=True, validator=process_deployment_create_namespace,
table_transformer=transform_deployment, exception_handler=handle_template_based_exception)
g.custom_command('export', 'export_template_at_subscription_scope')
g.custom_wait_command('wait', 'get_deployment_at_subscription_scope')
g.custom_command('cancel', 'cancel_deployment_at_subscription_scope')
with self.command_group('deployment operation sub', resource_deployment_operation_sdk, min_api='2018-05-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES) as g:
g.custom_command('list', 'list_deployment_operations_at_subscription_scope')
g.custom_show_command('show', 'get_deployment_operations_at_subscription_scope', client_factory=cf_deployment_operations)
with self.command_group('deployment-scripts', resource_deploymentscripts_sdk, resource_type=ResourceType.MGMT_RESOURCE_DEPLOYMENTSCRIPTS, is_preview=True) as g:
g.custom_command('list', 'list_deployment_scripts')
g.custom_show_command('show', 'get_deployment_script')
g.custom_command('show-log', 'get_deployment_script_logs')
g.custom_command('delete', 'delete_deployment_script', confirmation=True)
# az deployment group
with self.command_group('deployment group', resource_deployment_sdk, resource_type=ResourceType.MGMT_RESOURCE_RESOURCES) as g:
g.custom_command('list', 'list_deployments_at_resource_group', table_transformer=transform_deployments_list)
g.custom_show_command('show', 'get_deployment_at_resource_group', table_transformer=transform_deployment)
g.custom_command('delete', 'delete_deployment_at_resource_group', supports_no_wait=True)
g.custom_command('validate', 'validate_arm_template_at_resource_group', validator=process_deployment_create_namespace,
table_transformer=deployment_validate_table_format, exception_handler=handle_template_based_exception)
g.custom_command('create', 'deploy_arm_template_at_resource_group', supports_no_wait=True, validator=process_deployment_create_namespace,
table_transformer=transform_deployment, exception_handler=handle_template_based_exception)
g.custom_command('export', 'export_template_at_resource_group')
g.custom_wait_command('wait', 'get_deployment_at_resource_group')
g.custom_command('cancel', 'cancel_deployment_at_resource_group')
with self.command_group('deployment operation group', resource_deployment_operation_sdk, resource_type=ResourceType.MGMT_RESOURCE_RESOURCES) as g:
g.custom_command('list', 'list_deployment_operations_at_resource_group')
g.custom_show_command('show', 'get_deployment_operations_at_resource_group', client_factory=cf_deployment_operations)
# az deployment mg
with self.command_group('deployment mg', resource_deployment_sdk, min_api='2019-07-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES) as g:
g.custom_command('list', 'list_deployments_at_management_group', table_transformer=transform_deployments_list)
g.custom_show_command('show', 'get_deployment_at_management_group', table_transformer=transform_deployment)
g.custom_command('delete', 'delete_deployment_at_management_group', supports_no_wait=True)
g.custom_command('validate', 'validate_arm_template_at_management_group', validator=process_deployment_create_namespace,
table_transformer=deployment_validate_table_format, exception_handler=handle_template_based_exception)
g.custom_command('create', 'deploy_arm_template_at_management_group', supports_no_wait=True, validator=process_deployment_create_namespace,
table_transformer=transform_deployment, exception_handler=handle_template_based_exception)
g.custom_command('export', 'export_template_at_management_group')
g.custom_wait_command('wait', 'get_deployment_at_management_group')
g.custom_command('cancel', 'cancel_deployment_at_management_group')
with self.command_group('deployment operation mg', resource_deployment_operation_sdk, min_api='2019-07-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES) as g:
g.custom_command('list', 'list_deployment_operations_at_management_group')
g.custom_show_command('show', 'get_deployment_operations_at_management_group', client_factory=cf_deployment_operations)
# az deployment tenant
with self.command_group('deployment tenant', resource_deployment_sdk, min_api='2019-07-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES) as g:
g.custom_command('list', 'list_deployments_at_tenant_scope', table_transformer=transform_deployments_list)
g.custom_show_command('show', 'get_deployment_at_tenant_scope', table_transformer=transform_deployment)
g.custom_command('delete', 'delete_deployment_at_tenant_scope', supports_no_wait=True)
g.custom_command('validate', 'validate_arm_template_at_tenant_scope', validator=process_deployment_create_namespace,
table_transformer=deployment_validate_table_format, exception_handler=handle_template_based_exception)
g.custom_command('create', 'deploy_arm_template_at_tenant_scope', supports_no_wait=True, validator=process_deployment_create_namespace,
table_transformer=transform_deployment, exception_handler=handle_template_based_exception)
g.custom_command('export', 'export_template_at_tenant_scope')
g.custom_wait_command('wait', 'get_deployment_at_tenant_scope')
g.custom_command('cancel', 'cancel_deployment_at_tenant_scope')
with self.command_group('deployment operation tenant', resource_deployment_operation_sdk, min_api='2019-07-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES) as g:
g.custom_command('list', 'list_deployment_operations_at_tenant_scope')
g.custom_show_command('show', 'get_deployment_operations_at_tenant_scope', client_factory=cf_deployment_operations)
with self.command_group('policy assignment', resource_type=ResourceType.MGMT_RESOURCE_POLICY) as g:
g.custom_command('create', 'create_policy_assignment')
g.custom_command('delete', 'delete_policy_assignment')
g.custom_command('list', 'list_policy_assignment')
g.custom_show_command('show', 'show_policy_assignment')
with self.command_group('policy assignment identity', resource_type=ResourceType.MGMT_RESOURCE_POLICY, min_api='2018-05-01') as g:
g.custom_command('assign', 'set_identity')
g.custom_show_command('show', 'show_identity')
g.custom_command('remove', 'remove_identity')
with self.command_group('policy definition', resource_policy_definitions_sdk, resource_type=ResourceType.MGMT_RESOURCE_POLICY) as g:
g.custom_command('create', 'create_policy_definition')
g.custom_command('delete', 'delete_policy_definition')
g.custom_command('list', 'list_policy_definition')
g.custom_show_command('show', 'get_policy_definition')
g.custom_command('update', 'update_policy_definition')
with self.command_group('policy set-definition', resource_policy_set_definitions_sdk, resource_type=ResourceType.MGMT_RESOURCE_POLICY, min_api='2017-06-01-preview') as g:
g.custom_command('create', 'create_policy_setdefinition')
g.custom_command('delete', 'delete_policy_setdefinition')
g.custom_command('list', 'list_policy_setdefinition')
g.custom_show_command('show', 'get_policy_setdefinition')
g.custom_command('update', 'update_policy_setdefinition')
with self.command_group('lock', resource_type=ResourceType.MGMT_RESOURCE_LOCKS) as g:
g.custom_command('create', 'create_lock')
g.custom_command('delete', 'delete_lock')
g.custom_command('list', 'list_locks')
g.custom_show_command('show', 'get_lock')
g.custom_command('update', 'update_lock')
with self.command_group('resource link', resource_link_sdk, resource_type=ResourceType.MGMT_RESOURCE_LINKS) as g:
g.custom_command('create', 'create_resource_link')
g.command('delete', 'delete')
g.show_command('show', 'get')
g.custom_command('list', 'list_resource_links')
g.custom_command('update', 'update_resource_link')
with self.command_group('managedapp', resource_managedapp_sdk, min_api='2017-05-10', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES) as g:
g.custom_command('create', 'create_application')
g.command('delete', 'delete')
g.custom_show_command('show', 'show_application')
g.custom_command('list', 'list_applications')
with self.command_group('managedapp definition', resource_managedapp_def_sdk, min_api='2017-05-10', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES) as g:
g.custom_command('create', 'create_applicationdefinition')
g.command('delete', 'delete')
g.custom_show_command('show', 'show_applicationdefinition')
g.command('list', 'list_by_resource_group', exception_handler=empty_on_404)
with self.command_group('account management-group', resource_managementgroups_sdk, client_factory=cf_management_groups) as g:
g.custom_command('list', 'cli_managementgroups_group_list')
g.custom_show_command('show', 'cli_managementgroups_group_show')
g.custom_command('create', 'cli_managementgroups_group_create')
g.custom_command('delete', 'cli_managementgroups_group_delete')
g.generic_update_command(
'update',
getter_name='cli_managementgroups_group_update_get',
getter_type=resource_managementgroups_update_type,
setter_name='cli_managementgroups_group_update_set',
setter_type=resource_managementgroups_update_type,
custom_func_name='cli_managementgroups_group_update_custom_func',
custom_func_type=resource_managementgroups_update_type,
exception_handler=managementgroups_exception_handler)
with self.command_group('account management-group subscription', resource_managementgroups_subscriptions_sdk, client_factory=cf_management_group_subscriptions) as g:
g.custom_command('add', 'cli_managementgroups_subscription_add')
g.custom_command('remove', 'cli_managementgroups_subscription_remove')
with self.command_group('') as g:
g.custom_command('rest', 'rest_call')
with self.command_group('') as g:
g.custom_command('version', 'show_version')
| 62.669903
| 245
| 0.752711
|
4a0686544dabb9ff5ff5ab54de072d80e28d6235
| 11,200
|
py
|
Python
|
nilearn/decomposition/dict_learning.py
|
kbraunlich/nilearn
|
a152f8e2fe1e62ebbd9d0fe03321d1affe70542c
|
[
"BSD-2-Clause"
] | null | null | null |
nilearn/decomposition/dict_learning.py
|
kbraunlich/nilearn
|
a152f8e2fe1e62ebbd9d0fe03321d1affe70542c
|
[
"BSD-2-Clause"
] | null | null | null |
nilearn/decomposition/dict_learning.py
|
kbraunlich/nilearn
|
a152f8e2fe1e62ebbd9d0fe03321d1affe70542c
|
[
"BSD-2-Clause"
] | null | null | null |
"""
Dictionary learning estimator: Perform a map learning algorithm by learning
a temporal dense dictionary along with sparse spatial loadings, that
constitutes output maps
"""
# Author: Arthur Mensch
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from sklearn.decomposition import dict_learning_online
from joblib import Memory
from sklearn.linear_model import Ridge
from .base import BaseDecomposition
from .canica import CanICA
from nilearn._utils import fill_doc
# check_input=False is an optimization available in sklearn.
sparse_encode_args = {'check_input': False}
def _compute_loadings(components, data):
ridge = Ridge(fit_intercept=None, alpha=1e-8)
ridge.fit(components.T, np.asarray(data.T))
loadings = ridge.coef_.T
S = np.sqrt(np.sum(loadings ** 2, axis=0))
S[S == 0] = 1
loadings /= S[np.newaxis, :]
return loadings
@fill_doc
class DictLearning(BaseDecomposition):
"""Perform a map learning algorithm based on spatial component sparsity,
over a CanICA initialization [1]_. This yields more stable maps than CanICA.
.. versionadded:: 0.2
Parameters
----------
mask : Niimg-like object or MultiNiftiMasker instance, optional
Mask to be used on data. If an instance of masker is passed,
then its mask will be used. If no mask is given,
it will be computed automatically by a MultiNiftiMasker with default
parameters.
n_components : int, optional
Number of components to extract. Default=20.
batch_size : int, optional
The number of samples to take in each batch. Default=20.
n_epochs : float, optional
Number of epochs the algorithm should run on the data. Default=1.
alpha : float, optional
Sparsity controlling parameter. Default=10.
dict_init : Niimg-like object, optional
Initial estimation of dictionary maps. Would be computed from CanICA if
not provided.
reduction_ratio : 'auto' or float between 0. and 1., optional
- Between 0. or 1. : controls data reduction in the temporal domain.
1. means no reduction, < 1. calls for an SVD based reduction.
- if set to 'auto', estimator will set the number of components per
reduced session to be n_components. Default='auto'.
method : {'cd', 'lars'}, optional
Coding method used by sklearn backend. Below are the possible values.
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
Default='cd'.
random_state : int or RandomState, optional
Pseudo number generator state used for random sampling.
smoothing_fwhm : float, optional
If smoothing_fwhm is not None, it gives the size in millimeters of the
spatial smoothing to apply to the signal. Default=4mm.
standardize : boolean, optional
If standardize is True, the time-series are centered and normed:
their variance is put to 1 in the time dimension. Default=True.
detrend : boolean, optional
If detrend is True, the time-series will be detrended before
components extraction. Default=True.
target_affine : 3x3 or 4x4 matrix, optional
This parameter is passed to image.resample_img. Please see the
related documentation for details.
target_shape : 3-tuple of integers, optional
This parameter is passed to image.resample_img. Please see the
related documentation for details.
low_pass : None or float, optional
This parameter is passed to signal.clean. Please see the related
documentation for details.
high_pass : None or float, optional
This parameter is passed to signal.clean. Please see the related
documentation for details.
t_r : float, optional
This parameter is passed to signal.clean. Please see the related
documentation for details.
%(mask_strategy)s
.. note::
Depending on this value, the mask will be computed from
:func:`nilearn.masking.compute_background_mask`,
:func:`nilearn.masking.compute_epi_mask`, or
:func:`nilearn.masking.compute_brain_mask`.
Default='epi'.
mask_args : dict, optional
If mask is None, these are additional parameters passed to
masking.compute_background_mask or masking.compute_epi_mask
to fine-tune mask computation. Please see the related documentation
for details.
memory : instance of joblib.Memory or string, optional
Used to cache the masking process.
By default, no caching is done. If a string is given, it is the
path to the caching directory.
memory_level : integer, optional
Rough estimator of the amount of memory used by caching. Higher value
means more memory for caching. Default=0.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs', -2 'all CPUs but one', and so on. Default=1.
verbose : integer, optional
Indicate the level of verbosity. By default, nothing is printed.
Default=0.
Attributes
----------
`components_` : 2D numpy array (n_components x n-voxels)
Masked dictionary components extracted from the input images.
.. note::
Use attribute `components_img_` rather than manually unmasking
`components_` with `masker_` attribute.
`components_img_` : 4D Nifti image
4D image giving the extracted components. Each 3D image is a component.
.. versionadded:: 0.4.1
`masker_` : instance of MultiNiftiMasker
Masker used to filter and mask data as first step. If an instance of
MultiNiftiMasker is given in `mask` parameter,
this is a copy of it. Otherwise, a masker is created using the value
of `mask` and other NiftiMasker related parameters as initialization.
`mask_img_` : Niimg-like object
See http://nilearn.github.io/manipulating_images/input_output.html
The mask of the data. If no mask was given at masker creation, contains
the automatically computed mask.
References
----------
.. [1] Arthur Mensch, Gael Varoquaux, Bertrand Thirion,
Compressed online dictionary learning for fast resting-state fMRI
decomposition. IEEE 13th International Symposium on Biomedical
Imaging (ISBI), 2016. pp. 1282-1285
"""
def __init__(self, n_components=20,
n_epochs=1, alpha=10, reduction_ratio='auto', dict_init=None,
random_state=None, batch_size=20, method="cd", mask=None,
smoothing_fwhm=4, standardize=True, detrend=True,
low_pass=None, high_pass=None, t_r=None, target_affine=None,
target_shape=None, mask_strategy='epi', mask_args=None,
n_jobs=1, verbose=0, memory=Memory(location=None),
memory_level=0):
BaseDecomposition.__init__(self, n_components=n_components,
random_state=random_state, mask=mask,
smoothing_fwhm=smoothing_fwhm,
standardize=standardize, detrend=detrend,
low_pass=low_pass, high_pass=high_pass,
t_r=t_r, target_affine=target_affine,
target_shape=target_shape,
mask_strategy=mask_strategy,
mask_args=mask_args, memory=memory,
memory_level=memory_level, n_jobs=n_jobs,
verbose=verbose)
self.n_epochs = n_epochs
self.batch_size = batch_size
self.method = method
self.alpha = alpha
self.reduction_ratio = reduction_ratio
self.dict_init = dict_init
def _init_dict(self, data):
if self.dict_init is not None:
components = self.masker_.transform(self.dict_init)
else:
canica = CanICA(n_components=self.n_components,
# CanICA specific parameters
do_cca=True, threshold=float(self.n_components),
n_init=1,
# mask parameter is not useful as we bypass masking
mask=self.masker_, random_state=self.random_state,
memory=self.memory, memory_level=self.memory_level,
n_jobs=self.n_jobs, verbose=self.verbose)
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
# We use protected function _raw_fit as data
# has already been unmasked
canica._raw_fit(data)
components = canica.components_
S = (components ** 2).sum(axis=1)
S[S == 0] = 1
components /= S[:, np.newaxis]
self.components_init_ = components
def _init_loadings(self, data):
self.loadings_init_ = self._cache(_compute_loadings)(
self.components_init_, data)
def _raw_fit(self, data):
"""Helper function that directly process unmasked data
Parameters
----------
data : ndarray,
Shape (n_samples, n_features)
"""
if self.verbose:
print('[DictLearning] Learning initial components')
self._init_dict(data)
_, n_features = data.shape
if self.verbose:
print('[DictLearning] Computing initial loadings')
self._init_loadings(data)
dict_init = self.loadings_init_
n_iter = ((n_features - 1) // self.batch_size + 1) * self.n_epochs
if self.verbose:
print('[DictLearning] Learning dictionary')
self.components_, _ = self._cache(dict_learning_online)(
data.T, self.n_components, alpha=self.alpha, n_iter=n_iter,
batch_size=self.batch_size, method=self.method,
dict_init=dict_init, verbose=max(0, self.verbose - 1),
random_state=self.random_state, return_code=True, shuffle=True,
n_jobs=1)
self.components_ = self.components_.T
# Unit-variance scaling
S = np.sqrt(np.sum(self.components_ ** 2, axis=1))
S[S == 0] = 1
self.components_ /= S[:, np.newaxis]
# Flip signs in each composant so that positive part is l1 larger
# than negative part. Empirically this yield more positive looking maps
# than with setting the max to be positive.
for component in self.components_:
if np.sum(component > 0) < np.sum(component < 0):
component *= -1
if hasattr(self, "masker_"):
self.components_img_ = self.masker_.inverse_transform(self.components_)
return self
| 39.02439
| 83
| 0.640536
|
4a0689c8a906fd26b59c80eae2ef7ab8fe8b23d7
| 2,294
|
py
|
Python
|
django_any/functions.py
|
lincolnloop/django-whatever
|
9009ff46308f9ddf28cd5e9656f47e0067dc5ad0
|
[
"MIT"
] | null | null | null |
django_any/functions.py
|
lincolnloop/django-whatever
|
9009ff46308f9ddf28cd5e9656f47e0067dc5ad0
|
[
"MIT"
] | null | null | null |
django_any/functions.py
|
lincolnloop/django-whatever
|
9009ff46308f9ddf28cd5e9656f47e0067dc5ad0
|
[
"MIT"
] | null | null | null |
#-*- coding: utf-8 -*-
"""
Additional functions for django-any
"""
def valid_choices(choices):
"""
Return list of choices's keys
"""
for key, value in choices:
if isinstance(value, (list, tuple)):
for key, _ in value:
yield key
else:
yield key
def split_model_kwargs(kw):
"""
django_any birds language parser
"""
from collections import defaultdict
model_fields = {}
fields_agrs = defaultdict(lambda : {})
for key in kw.keys():
if '__' in key:
field, _, subfield = key.partition('__')
fields_agrs[field][subfield] = kw[key]
else:
model_fields[key] = kw[key]
return model_fields, fields_agrs
class ExtensionMethod(object):
"""
Works like one parameter multimethod
"""
def __init__(self, by_instance=False):
self.registry = {}
self.by_instance = by_instance
self.default = None
def register(self, field_type, impl=None):
"""
Register form field data function.
Could be used as decorator
"""
def _wrapper(func):
self.registry[field_type] = func
return func
if impl:
return _wrapper(impl)
return _wrapper
def register_default(self, func):
self.default = func
return func
def decorator(self, impl):
"""
Decorator for register decorators
"""
self._create_value = impl(self._create_value)
return impl
def _create_value(self, *args, **kwargs):
"""
Lowest value generator.
Separated from __call__, because it seems that python
cache __call__ reference on module import
"""
if not len(args):
raise TypeError('Object instance is not provided')
if self.by_instance:
field_type = args[0]
else:
field_type = args[0].__class__
function = self.registry.get(field_type, self.default)
if function is None:
raise TypeError("no match %s" % field_type)
return function(*args, **kwargs)
def __call__(self, *args, **kwargs):
return self._create_value(*args, **kwargs)
| 23.895833
| 62
| 0.570183
|
4a068ce36925bb0c3a83a69d5411542d5ded5b04
| 1,345
|
py
|
Python
|
configs/gma/gma_8x2_120k_mixed_368x768.py
|
hologerry/mmflow
|
40caf064851bd95317424e31cc137c0007a2bece
|
[
"Apache-2.0"
] | 481
|
2021-11-16T07:04:23.000Z
|
2022-03-31T22:21:21.000Z
|
configs/gma/gma_8x2_120k_mixed_368x768.py
|
hologerry/mmflow
|
40caf064851bd95317424e31cc137c0007a2bece
|
[
"Apache-2.0"
] | 72
|
2021-11-16T12:25:55.000Z
|
2022-03-28T13:10:45.000Z
|
configs/gma/gma_8x2_120k_mixed_368x768.py
|
hologerry/mmflow
|
40caf064851bd95317424e31cc137c0007a2bece
|
[
"Apache-2.0"
] | 48
|
2021-11-16T06:48:46.000Z
|
2022-03-30T12:46:40.000Z
|
_base_ = [
'../_base_/models/gma/gma.py',
'../_base_/datasets/sintel_cleanx100_sintel_fianlx100_kitti2015x200_hd1kx5_flyingthings3d_raft_384x768.py', # noqa
'../_base_/default_runtime.py'
]
model = dict(
decoder=dict(
type='GMADecoder',
net_type='Basic',
num_levels=4,
radius=4,
iters=12,
corr_op_cfg=dict(type='CorrLookup', align_corners=True),
gru_type='SeqConv',
heads=1,
motion_channels=128,
position_only=False,
flow_loss=dict(type='SequenceLoss', gamma=0.85),
act_cfg=dict(type='ReLU')),
freeze_bn=False,
test_cfg=dict(iters=32))
optimizer = dict(
type='AdamW',
lr=0.000125,
betas=(0.9, 0.999),
eps=1e-08,
weight_decay=0.00001,
amsgrad=False)
optimizer_config = dict(grad_clip=dict(max_norm=1.))
lr_config = dict(
policy='OneCycle',
max_lr=0.000125,
total_steps=120100,
pct_start=0.05,
anneal_strategy='linear')
runner = dict(type='IterBasedRunner', max_iters=120000)
checkpoint_config = dict(by_epoch=False, interval=10000)
evaluation = dict(interval=10000, metric='EPE')
# Train on FlyingChairs and FlyingThings3D, and finetune on
# and Sintel, KITTI2015 and HD1K
load_from = 'https://download.openmmlab.com/mmflow/gma/gma_8x2_120k_flyingthings3d_400x720.pth' # noqa
| 29.23913
| 119
| 0.678067
|
4a068e057ca188e7d486c29a687182d0e9492acf
| 20,383
|
py
|
Python
|
Lib/test/test_collections.py
|
lrq3000/wpython2.wpython11
|
4905d2b2be3add3b8cc702e09422fb2e869005b5
|
[
"PSF-2.0"
] | 8
|
2020-08-24T14:21:35.000Z
|
2022-01-26T04:49:11.000Z
|
Lib/test/test_collections.py
|
lrq3000/wpython2.wpython11
|
4905d2b2be3add3b8cc702e09422fb2e869005b5
|
[
"PSF-2.0"
] | null | null | null |
Lib/test/test_collections.py
|
lrq3000/wpython2.wpython11
|
4905d2b2be3add3b8cc702e09422fb2e869005b5
|
[
"PSF-2.0"
] | 3
|
2020-08-23T23:20:38.000Z
|
2021-10-18T03:35:00.000Z
|
import unittest, doctest
from test import test_support
from collections import namedtuple
import pickle, cPickle, copy
import keyword
import re
from collections import Hashable, Iterable, Iterator
from collections import Sized, Container, Callable
from collections import Set, MutableSet
from collections import Mapping, MutableMapping
from collections import Sequence, MutableSequence
TestNT = namedtuple('TestNT', 'x y z') # type used for pickle tests
class TestNamedTuple(unittest.TestCase):
def test_factory(self):
Point = namedtuple('Point', 'x y')
self.assertEqual(Point.__name__, 'Point')
self.assertEqual(Point.__doc__, 'Point(x, y)')
self.assertEqual(Point.__slots__, ())
self.assertEqual(Point.__module__, __name__)
self.assertEqual(Point.__getitem__, tuple.__getitem__)
self.assertEqual(Point._fields, ('x', 'y'))
self.assertRaises(ValueError, namedtuple, 'abc%', 'efg ghi') # type has non-alpha char
self.assertRaises(ValueError, namedtuple, 'class', 'efg ghi') # type has keyword
self.assertRaises(ValueError, namedtuple, '9abc', 'efg ghi') # type starts with digit
self.assertRaises(ValueError, namedtuple, 'abc', 'efg g%hi') # field with non-alpha char
self.assertRaises(ValueError, namedtuple, 'abc', 'abc class') # field has keyword
self.assertRaises(ValueError, namedtuple, 'abc', '8efg 9ghi') # field starts with digit
self.assertRaises(ValueError, namedtuple, 'abc', '_efg ghi') # field with leading underscore
self.assertRaises(ValueError, namedtuple, 'abc', 'efg efg ghi') # duplicate field
namedtuple('Point0', 'x1 y2') # Verify that numbers are allowed in names
namedtuple('_', 'a b c') # Test leading underscores in a typename
nt = namedtuple('nt', u'the quick brown fox') # check unicode input
self.assert_("u'" not in repr(nt._fields))
nt = namedtuple('nt', (u'the', u'quick')) # check unicode input
self.assert_("u'" not in repr(nt._fields))
self.assertRaises(TypeError, Point._make, [11]) # catch too few args
self.assertRaises(TypeError, Point._make, [11, 22, 33]) # catch too many args
def test_instance(self):
Point = namedtuple('Point', 'x y')
p = Point(11, 22)
self.assertEqual(p, Point(x=11, y=22))
self.assertEqual(p, Point(11, y=22))
self.assertEqual(p, Point(y=22, x=11))
self.assertEqual(p, Point(*(11, 22)))
self.assertEqual(p, Point(**dict(x=11, y=22)))
self.assertRaises(TypeError, Point, 1) # too few args
self.assertRaises(TypeError, Point, 1, 2, 3) # too many args
self.assertRaises(TypeError, eval, 'Point(XXX=1, y=2)', locals()) # wrong keyword argument
self.assertRaises(TypeError, eval, 'Point(x=1)', locals()) # missing keyword argument
self.assertEqual(repr(p), 'Point(x=11, y=22)')
self.assert_('__dict__' not in dir(p)) # verify instance has no dict
self.assert_('__weakref__' not in dir(p))
self.assertEqual(p, Point._make([11, 22])) # test _make classmethod
self.assertEqual(p._fields, ('x', 'y')) # test _fields attribute
self.assertEqual(p._replace(x=1), (1, 22)) # test _replace method
self.assertEqual(p._asdict(), dict(x=11, y=22)) # test _asdict method
try:
p._replace(x=1, error=2)
except ValueError:
pass
else:
self._fail('Did not detect an incorrect fieldname')
# verify that field string can have commas
Point = namedtuple('Point', 'x, y')
p = Point(x=11, y=22)
self.assertEqual(repr(p), 'Point(x=11, y=22)')
# verify that fieldspec can be a non-string sequence
Point = namedtuple('Point', ('x', 'y'))
p = Point(x=11, y=22)
self.assertEqual(repr(p), 'Point(x=11, y=22)')
def test_tupleness(self):
Point = namedtuple('Point', 'x y')
p = Point(11, 22)
self.assert_(isinstance(p, tuple))
self.assertEqual(p, (11, 22)) # matches a real tuple
self.assertEqual(tuple(p), (11, 22)) # coercable to a real tuple
self.assertEqual(list(p), [11, 22]) # coercable to a list
self.assertEqual(max(p), 22) # iterable
self.assertEqual(max(*p), 22) # star-able
x, y = p
self.assertEqual(p, (x, y)) # unpacks like a tuple
self.assertEqual((p[0], p[1]), (11, 22)) # indexable like a tuple
self.assertRaises(IndexError, p.__getitem__, 3)
self.assertEqual(p.x, x)
self.assertEqual(p.y, y)
self.assertRaises(AttributeError, eval, 'p.z', locals())
def test_odd_sizes(self):
Zero = namedtuple('Zero', '')
self.assertEqual(Zero(), ())
self.assertEqual(Zero._make([]), ())
self.assertEqual(repr(Zero()), 'Zero()')
self.assertEqual(Zero()._asdict(), {})
self.assertEqual(Zero()._fields, ())
Dot = namedtuple('Dot', 'd')
self.assertEqual(Dot(1), (1,))
self.assertEqual(Dot._make([1]), (1,))
self.assertEqual(Dot(1).d, 1)
self.assertEqual(repr(Dot(1)), 'Dot(d=1)')
self.assertEqual(Dot(1)._asdict(), {'d':1})
self.assertEqual(Dot(1)._replace(d=999), (999,))
self.assertEqual(Dot(1)._fields, ('d',))
n = 5000
import string, random
names = list(set(''.join([random.choice(string.ascii_letters)
for j in range(10)]) for i in range(n)))
n = len(names)
Big = namedtuple('Big', names)
b = Big(*range(n))
self.assertEqual(b, tuple(range(n)))
self.assertEqual(Big._make(range(n)), tuple(range(n)))
for pos, name in enumerate(names):
self.assertEqual(getattr(b, name), pos)
repr(b) # make sure repr() doesn't blow-up
d = b._asdict()
d_expected = dict(zip(names, range(n)))
self.assertEqual(d, d_expected)
b2 = b._replace(**dict([(names[1], 999),(names[-5], 42)]))
b2_expected = range(n)
b2_expected[1] = 999
b2_expected[-5] = 42
self.assertEqual(b2, tuple(b2_expected))
self.assertEqual(b._fields, tuple(names))
def test_pickle(self):
p = TestNT(x=10, y=20, z=30)
for module in pickle, cPickle:
loads = getattr(module, 'loads')
dumps = getattr(module, 'dumps')
for protocol in -1, 0, 1, 2:
q = loads(dumps(p, protocol))
self.assertEqual(p, q)
self.assertEqual(p._fields, q._fields)
def test_copy(self):
p = TestNT(x=10, y=20, z=30)
for copier in copy.copy, copy.deepcopy:
q = copier(p)
self.assertEqual(p, q)
self.assertEqual(p._fields, q._fields)
def test_name_conflicts(self):
# Some names like "self", "cls", "tuple", "itemgetter", and "property"
# failed when used as field names. Test to make sure these now work.
T = namedtuple('T', 'itemgetter property self cls tuple')
t = T(1, 2, 3, 4, 5)
self.assertEqual(t, (1,2,3,4,5))
newt = t._replace(itemgetter=10, property=20, self=30, cls=40, tuple=50)
self.assertEqual(newt, (10,20,30,40,50))
# Broader test of all interesting names in a template
with test_support.captured_stdout() as template:
T = namedtuple('T', 'x', verbose=True)
words = set(re.findall('[A-Za-z]+', template.getvalue()))
words -= set(keyword.kwlist)
T = namedtuple('T', words)
# test __new__
values = tuple(range(len(words)))
t = T(*values)
self.assertEqual(t, values)
t = T(**dict(zip(T._fields, values)))
self.assertEqual(t, values)
# test _make
t = T._make(values)
self.assertEqual(t, values)
# exercise __repr__
repr(t)
# test _asdict
self.assertEqual(t._asdict(), dict(zip(T._fields, values)))
# test _replace
t = T._make(values)
newvalues = tuple(v*10 for v in values)
newt = t._replace(**dict(zip(T._fields, newvalues)))
self.assertEqual(newt, newvalues)
# test _fields
self.assertEqual(T._fields, tuple(words))
# test __getnewargs__
self.assertEqual(t.__getnewargs__(), values)
class ABCTestCase(unittest.TestCase):
def validate_abstract_methods(self, abc, *names):
methodstubs = dict.fromkeys(names, lambda s, *args: 0)
# everything should work will all required methods are present
C = type('C', (abc,), methodstubs)
C()
# instantiation should fail if a required method is missing
for name in names:
stubs = methodstubs.copy()
del stubs[name]
C = type('C', (abc,), stubs)
self.assertRaises(TypeError, C, name)
class TestOneTrickPonyABCs(ABCTestCase):
def test_Hashable(self):
# Check some non-hashables
non_samples = [list(), set(), dict()]
for x in non_samples:
self.failIf(isinstance(x, Hashable), repr(x))
self.failIf(issubclass(type(x), Hashable), repr(type(x)))
# Check some hashables
samples = [None,
int(), float(), complex(),
str(),
tuple(), frozenset(),
int, list, object, type,
]
for x in samples:
self.failUnless(isinstance(x, Hashable), repr(x))
self.failUnless(issubclass(type(x), Hashable), repr(type(x)))
self.assertRaises(TypeError, Hashable)
# Check direct subclassing
class H(Hashable):
def __hash__(self):
return super(H, self).__hash__()
__eq__ = Hashable.__eq__ # Silence Py3k warning
self.assertEqual(hash(H()), 0)
self.failIf(issubclass(int, H))
self.validate_abstract_methods(Hashable, '__hash__')
def test_Iterable(self):
# Check some non-iterables
non_samples = [None, 42, 3.14, 1j]
for x in non_samples:
self.failIf(isinstance(x, Iterable), repr(x))
self.failIf(issubclass(type(x), Iterable), repr(type(x)))
# Check some iterables
samples = [str(),
tuple(), list(), set(), frozenset(), dict(),
dict().keys(), dict().items(), dict().values(),
(lambda: (yield))(),
(x for x in []),
]
for x in samples:
self.failUnless(isinstance(x, Iterable), repr(x))
self.failUnless(issubclass(type(x), Iterable), repr(type(x)))
# Check direct subclassing
class I(Iterable):
def __iter__(self):
return super(I, self).__iter__()
self.assertEqual(list(I()), [])
self.failIf(issubclass(str, I))
self.validate_abstract_methods(Iterable, '__iter__')
def test_Iterator(self):
non_samples = [None, 42, 3.14, 1j, "".encode('ascii'), "", (), [],
{}, set()]
for x in non_samples:
self.failIf(isinstance(x, Iterator), repr(x))
self.failIf(issubclass(type(x), Iterator), repr(type(x)))
samples = [iter(str()),
iter(tuple()), iter(list()), iter(dict()),
iter(set()), iter(frozenset()),
iter(dict().keys()), iter(dict().items()),
iter(dict().values()),
(lambda: (yield))(),
(x for x in []),
]
for x in samples:
self.failUnless(isinstance(x, Iterator), repr(x))
self.failUnless(issubclass(type(x), Iterator), repr(type(x)))
self.validate_abstract_methods(Iterator, 'next')
def test_Sized(self):
non_samples = [None, 42, 3.14, 1j,
(lambda: (yield))(),
(x for x in []),
]
for x in non_samples:
self.failIf(isinstance(x, Sized), repr(x))
self.failIf(issubclass(type(x), Sized), repr(type(x)))
samples = [str(),
tuple(), list(), set(), frozenset(), dict(),
dict().keys(), dict().items(), dict().values(),
]
for x in samples:
self.failUnless(isinstance(x, Sized), repr(x))
self.failUnless(issubclass(type(x), Sized), repr(type(x)))
self.validate_abstract_methods(Sized, '__len__')
def test_Container(self):
non_samples = [None, 42, 3.14, 1j,
(lambda: (yield))(),
(x for x in []),
]
for x in non_samples:
self.failIf(isinstance(x, Container), repr(x))
self.failIf(issubclass(type(x), Container), repr(type(x)))
samples = [str(),
tuple(), list(), set(), frozenset(), dict(),
dict().keys(), dict().items(),
]
for x in samples:
self.failUnless(isinstance(x, Container), repr(x))
self.failUnless(issubclass(type(x), Container), repr(type(x)))
self.validate_abstract_methods(Container, '__contains__')
def test_Callable(self):
non_samples = [None, 42, 3.14, 1j,
"", "".encode('ascii'), (), [], {}, set(),
(lambda: (yield))(),
(x for x in []),
]
for x in non_samples:
self.failIf(isinstance(x, Callable), repr(x))
self.failIf(issubclass(type(x), Callable), repr(type(x)))
samples = [lambda: None,
type, int, object,
len,
list.append, [].append,
]
for x in samples:
self.failUnless(isinstance(x, Callable), repr(x))
self.failUnless(issubclass(type(x), Callable), repr(type(x)))
self.validate_abstract_methods(Callable, '__call__')
def test_direct_subclassing(self):
for B in Hashable, Iterable, Iterator, Sized, Container, Callable:
class C(B):
pass
self.failUnless(issubclass(C, B))
self.failIf(issubclass(int, C))
def test_registration(self):
for B in Hashable, Iterable, Iterator, Sized, Container, Callable:
class C:
__metaclass__ = type
__hash__ = None # Make sure it isn't hashable by default
self.failIf(issubclass(C, B), B.__name__)
B.register(C)
self.failUnless(issubclass(C, B))
class WithSet(MutableSet):
def __init__(self, it=()):
self.data = set(it)
def __len__(self):
return len(self.data)
def __iter__(self):
return iter(self.data)
def __contains__(self, item):
return item in self.data
def add(self, item):
self.data.add(item)
def discard(self, item):
self.data.discard(item)
class TestCollectionABCs(ABCTestCase):
# XXX For now, we only test some virtual inheritance properties.
# We should also test the proper behavior of the collection ABCs
# as real base classes or mix-in classes.
def test_Set(self):
for sample in [set, frozenset]:
self.failUnless(isinstance(sample(), Set))
self.failUnless(issubclass(sample, Set))
self.validate_abstract_methods(Set, '__contains__', '__iter__', '__len__')
def test_hash_Set(self):
class OneTwoThreeSet(Set):
def __init__(self):
self.contents = [1, 2, 3]
def __contains__(self, x):
return x in self.contents
def __len__(self):
return len(self.contents)
def __iter__(self):
return iter(self.contents)
def __hash__(self):
return self._hash()
a, b = OneTwoThreeSet(), OneTwoThreeSet()
self.failUnless(hash(a) == hash(b))
def test_MutableSet(self):
self.failUnless(isinstance(set(), MutableSet))
self.failUnless(issubclass(set, MutableSet))
self.failIf(isinstance(frozenset(), MutableSet))
self.failIf(issubclass(frozenset, MutableSet))
self.validate_abstract_methods(MutableSet, '__contains__', '__iter__', '__len__',
'add', 'discard')
def test_issue_5647(self):
# MutableSet.__iand__ mutated the set during iteration
s = WithSet('abcd')
s &= WithSet('cdef') # This used to fail
self.assertEqual(set(s), set('cd'))
def test_issue_4920(self):
# MutableSet.pop() method did not work
class MySet(collections.MutableSet):
__slots__=['__s']
def __init__(self,items=None):
if items is None:
items=[]
self.__s=set(items)
def __contains__(self,v):
return v in self.__s
def __iter__(self):
return iter(self.__s)
def __len__(self):
return len(self.__s)
def add(self,v):
result=v not in self.__s
self.__s.add(v)
return result
def discard(self,v):
result=v in self.__s
self.__s.discard(v)
return result
def __repr__(self):
return "MySet(%s)" % repr(list(self))
s = MySet([5,43,2,1])
self.assertEqual(s.pop(), 1)
def test_Mapping(self):
for sample in [dict]:
self.failUnless(isinstance(sample(), Mapping))
self.failUnless(issubclass(sample, Mapping))
self.validate_abstract_methods(Mapping, '__contains__', '__iter__', '__len__',
'__getitem__')
def test_MutableMapping(self):
for sample in [dict]:
self.failUnless(isinstance(sample(), MutableMapping))
self.failUnless(issubclass(sample, MutableMapping))
self.validate_abstract_methods(MutableMapping, '__contains__', '__iter__', '__len__',
'__getitem__', '__setitem__', '__delitem__')
def test_Sequence(self):
for sample in [tuple, list, str]:
self.failUnless(isinstance(sample(), Sequence))
self.failUnless(issubclass(sample, Sequence))
self.failUnless(issubclass(basestring, Sequence))
self.failUnless(isinstance(range(10), Sequence))
self.failUnless(issubclass(xrange, Sequence))
self.failUnless(issubclass(str, Sequence))
self.validate_abstract_methods(Sequence, '__contains__', '__iter__', '__len__',
'__getitem__')
def test_MutableSequence(self):
for sample in [tuple, str]:
self.failIf(isinstance(sample(), MutableSequence))
self.failIf(issubclass(sample, MutableSequence))
for sample in [list]:
self.failUnless(isinstance(sample(), MutableSequence))
self.failUnless(issubclass(sample, MutableSequence))
self.failIf(issubclass(basestring, MutableSequence))
self.validate_abstract_methods(MutableSequence, '__contains__', '__iter__',
'__len__', '__getitem__', '__setitem__', '__delitem__', 'insert')
import doctest, collections
def test_main(verbose=None):
NamedTupleDocs = doctest.DocTestSuite(module=collections)
test_classes = [TestNamedTuple, NamedTupleDocs, TestOneTrickPonyABCs, TestCollectionABCs]
test_support.run_unittest(*test_classes)
test_support.run_doctest(collections, verbose)
if __name__ == "__main__":
test_main(verbose=True)
| 41.768443
| 106
| 0.558897
|
4a0690841b6c76b93c190c4587b3d3bb661c9fe0
| 8,065
|
py
|
Python
|
src/00template/convex_adversarial/dual_inputs.py
|
5loaves-2fish-12basckets/ADF_studies
|
ea2a8eaebf994350e32501ddfc76258aa89bd880
|
[
"MIT"
] | 1
|
2019-02-06T07:53:15.000Z
|
2019-02-06T07:53:15.000Z
|
src/00template/convex_adversarial/dual_inputs.py
|
5loaves-2fish-12basckets/ADF_studies
|
ea2a8eaebf994350e32501ddfc76258aa89bd880
|
[
"MIT"
] | null | null | null |
src/00template/convex_adversarial/dual_inputs.py
|
5loaves-2fish-12basckets/ADF_studies
|
ea2a8eaebf994350e32501ddfc76258aa89bd880
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
from .dual import DualObject
def select_input(X, epsilon, proj, norm, bounded_input):
if proj is not None and norm=='l1_median' and X[0].numel() > proj:
if bounded_input:
return InfBallProjBounded(X,epsilon,proj)
else:
return InfBallProj(X,epsilon,proj)
elif norm == 'l1':
if bounded_input:
return InfBallBounded(X, epsilon)
else:
return InfBall(X, epsilon)
elif proj is not None and norm=='l2_normal' and X[0].numel() > proj:
return L2BallProj(X,epsilon,proj)
elif norm == 'l2':
return L2Ball(X,epsilon)
else:
raise ValueError("Unknown estimation type: {}".format(norm))
class InfBall(DualObject):
def __init__(self, X, epsilon):
super(InfBall, self).__init__()
self.epsilon = epsilon
n = X[0].numel()
self.nu_x = [X]
self.nu_1 = [X.new(n,n)]
torch.eye(n, out=self.nu_1[0])
self.nu_1[0] = self.nu_1[0].view(-1,*X.size()[1:]).unsqueeze(0)
def apply(self, dual_layer):
self.nu_x.append(dual_layer(*self.nu_x))
self.nu_1.append(dual_layer(*self.nu_1))
def bounds(self, network=None):
if network is None:
nu_1 = self.nu_1[-1]
nu_x = self.nu_x[-1]
else:
nu_1 = network(self.nu_1[0])
nu_x = network(self.nu_x[0])
epsilon = self.epsilon
l1 = nu_1.abs().sum(1)
if isinstance(epsilon, torch.Tensor):
while epsilon.dim() < nu_x.dim():
epsilon = epsilon.unsqueeze(1)
return (nu_x - epsilon*l1,
nu_x + epsilon*l1)
def objective(self, *nus):
epsilon = self.epsilon
nu = nus[-1]
nu = nu.view(nu.size(0), nu.size(1), -1)
nu_x = nu.matmul(self.nu_x[0].view(self.nu_x[0].size(0),-1).unsqueeze(2)).squeeze(2)
if isinstance(self.epsilon, torch.Tensor):
while epsilon.dim() < nu.dim()-1:
epsilon = epsilon.unsqueeze(1)
l1 = epsilon*nu.abs().sum(2)
return -nu_x - l1
class InfBallBounded(DualObject):
def __init__(self, X, epsilon, l=0, u=1):
super(InfBallBounded, self).__init__()
self.epsilon = epsilon
self.l = (X-epsilon).clamp(min=l).view(X.size(0), 1, -1)
self.u = (X+epsilon).clamp(max=u).view(X.size(0), 1, -1)
n = X[0].numel()
self.nu_x = [X]
self.nu_1 = [X.new(n,n)]
torch.eye(n, out=self.nu_1[0])
self.nu_1[0] = self.nu_1[0].view(-1,*X.size()[1:]).unsqueeze(0)
def apply(self, dual_layer):
self.nu_x.append(dual_layer(*self.nu_x))
self.nu_1.append(dual_layer(*self.nu_1))
def bounds(self, network=None):
if network is None:
nu = self.nu_1[-1]
else:
nu = network(self.nu_1[0])
nu_pos = nu.clamp(min=0).view(nu.size(0), nu.size(1), -1)
nu_neg = nu.clamp(max=0).view(nu.size(0), nu.size(1), -1)
zu = (self.u.matmul(nu_pos) + self.l.matmul(nu_neg)).squeeze(1)
zl = (self.u.matmul(nu_neg) + self.l.matmul(nu_pos)).squeeze(1)
return (zl.view(zl.size(0), *nu.size()[2:]),
zu.view(zu.size(0), *nu.size()[2:]))
def objective(self, *nus):
nu = nus[-1]
nu_pos = nu.clamp(min=0).view(nu.size(0), nu.size(1), -1)
nu_neg = nu.clamp(max=0).view(nu.size(0), nu.size(1), -1)
u, l = self.u.unsqueeze(3).squeeze(1), self.l.unsqueeze(3).squeeze(1)
return (-nu_neg.matmul(l) - nu_pos.matmul(u)).squeeze(2)
class InfBallProj(InfBall):
def __init__(self, X, epsilon, k):
DualObject.__init__(self)
self.epsilon = epsilon
n = X[0].numel()
self.nu_x = [X]
self.nu = [X.new(1,k,*X.size()[1:]).cauchy_()]
def apply(self, dual_layer):
self.nu_x.append(dual_layer(*self.nu_x))
self.nu.append(dual_layer(*self.nu))
def bounds(self, network=None):
if network is None:
nu = self.nu[-1]
nu_x = self.nu_x[-1]
else:
nu = network(self.nu[0])
nu_x = network(self.nu_x[0])
l1 = torch.median(self.nu[-1].abs(), 1)[0]
return (nu_x - self.epsilon*l1,
nu_x + self.epsilon*l1)
class InfBallProjBounded(InfBallProj):
def __init__(self, X, epsilon, k, l=0, u=1):
self.epsilon = epsilon
self.nu_one_l = [(X-epsilon).clamp(min=l)]
self.nu_one_u = [(X+epsilon).clamp(max=u)]
self.nu_x = [X]
self.l = self.nu_one_l[-1].view(X.size(0), 1, -1)
self.u = self.nu_one_u[-1].view(X.size(0), 1, -1)
n = X[0].numel()
R = X.new(1,k,*X.size()[1:]).cauchy_()
self.nu_l = [R * self.nu_one_l[-1].unsqueeze(1)]
self.nu_u = [R * self.nu_one_u[-1].unsqueeze(1)]
def apply(self, dual_layer):
self.nu_l.append(dual_layer(*self.nu_l))
self.nu_one_l.append(dual_layer(*self.nu_one_l))
self.nu_u.append(dual_layer(*self.nu_u))
self.nu_one_u.append(dual_layer(*self.nu_one_u))
def bounds(self, network=None):
if network is None:
nu_u = self.nu_u[-1]
nu_one_u = self.nu_one_u[-1]
nu_l = self.nu_l[-1]
nu_one_l = self.nu_one_l[-1]
else:
nu_u = network(self.nu_u[0])
nu_one_u = network(self.nu_one_u[0])
nu_l = network(self.nu_l[0])
nu_one_l = network(self.nu_one_l[0])
nu_l1_u = torch.median(nu_u.abs(),1)[0]
nu_pos_u = (nu_l1_u + nu_one_u)/2
nu_neg_u = (-nu_l1_u + nu_one_u)/2
nu_l1_l = torch.median(nu_l.abs(),1)[0]
nu_pos_l = (nu_l1_l + nu_one_l)/2
nu_neg_l = (-nu_l1_l + nu_one_l)/2
zu = nu_pos_u + nu_neg_l
zl = nu_neg_u + nu_pos_l
return zl,zu
# L2 balls
class L2Ball(DualObject):
def __init__(self, X, epsilon):
super(L2Ball, self).__init__()
self.epsilon = epsilon
n = X[0].numel()
self.nu_x = [X]
self.nu_1 = [X.new(n,n)]
torch.eye(n, out=self.nu_1[0])
self.nu_1[0] = self.nu_1[0].view(-1,*X.size()[1:]).unsqueeze(0)
def apply(self, dual_layer):
self.nu_x.append(dual_layer(*self.nu_x))
self.nu_1.append(dual_layer(*self.nu_1))
def bounds(self, network=None):
if network is None:
nu_1 = self.nu_1[-1]
nu_x = self.nu_x[-1]
else:
nu_1 = network(self.nu_1[0])
nu_x = network(self.nu_x[0])
epsilon = self.epsilon
l2 = nu_1.norm(2, 1)
if isinstance(epsilon, torch.Tensor):
while epsilon.dim() < nu_x.dim():
epsilon = epsilon.unsqueeze(1)
return (nu_x - epsilon*l2,
nu_x + epsilon*l2)
def objective(self, *nus):
epsilon = self.epsilon
nu = nus[-1]
nu = nu.view(nu.size(0), nu.size(1), -1)
nu_x = nu.matmul(self.nu_x[0].view(self.nu_x[0].size(0),-1).unsqueeze(2)).squeeze(2)
if isinstance(self.epsilon, torch.Tensor):
while epsilon.dim() < nu.dim()-1:
epsilon = epsilon.unsqueeze(1)
l2 = nu.norm(2,2)
return -nu_x - epsilon*l2
class L2BallProj(L2Ball):
def __init__(self, X, epsilon, k):
DualObject.__init__(self)
self.epsilon = epsilon
n = X[0].numel()
self.nu_x = [X]
self.nu = [X.new(1,k,*X.size()[1:]).normal_()]
def apply(self, dual_layer):
self.nu_x.append(dual_layer(*self.nu_x))
self.nu.append(dual_layer(*self.nu))
def bounds(self, network=None):
if network is None:
nu = self.nu[-1]
nu_x = self.nu_x[-1]
else:
nu = network(self.nu[0])
nu_x = network(self.nu_x[0])
k = nu.size(1)
l2 = nu.norm(2, 1)/(k**0.5)
return (nu_x - self.epsilon*l2,
nu_x + self.epsilon*l2)
| 32.520161
| 92
| 0.543087
|
4a06916edb0c7aef15d06151dfb6a56ed97813e1
| 7,056
|
py
|
Python
|
frappe/tests/test_twofactor.py
|
AKedar21/frappe
|
4c9ce1701caea07e595f81414af3a9f219cccb65
|
[
"MIT"
] | 2
|
2017-08-24T20:25:13.000Z
|
2017-10-15T13:14:31.000Z
|
frappe/tests/test_twofactor.py
|
AKedar21/frappe
|
4c9ce1701caea07e595f81414af3a9f219cccb65
|
[
"MIT"
] | 19
|
2018-04-17T09:09:02.000Z
|
2020-11-17T08:06:25.000Z
|
frappe/tests/test_twofactor.py
|
AKedar21/frappe
|
4c9ce1701caea07e595f81414af3a9f219cccb65
|
[
"MIT"
] | 3
|
2019-08-09T17:52:18.000Z
|
2020-07-29T08:23:46.000Z
|
# Copyright (c) 2017, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import unittest, frappe, pyotp
from werkzeug.wrappers import Request
from werkzeug.test import EnvironBuilder
from frappe.auth import HTTPRequest
from frappe.utils import cint
from frappe.twofactor import (should_run_2fa, authenticate_for_2factor, get_cached_user_pass,
two_factor_is_enabled_for_, confirm_otp_token, get_otpsecret_for_, get_verification_obj,
render_string_template, two_factor_is_enabled)
import time
class TestTwoFactor(unittest.TestCase):
def setUp(self):
self.http_requests = create_http_request()
self.login_manager = frappe.local.login_manager
self.user = self.login_manager.user
def tearDown(self):
frappe.local.response['verification'] = None
frappe.local.response['tmp_id'] = None
disable_2fa()
frappe.clear_cache(user=self.user)
def test_should_run_2fa(self):
'''Should return true if enabled.'''
toggle_2fa_all_role(state=True)
self.assertTrue(should_run_2fa(self.user))
toggle_2fa_all_role(state=False)
self.assertFalse(should_run_2fa(self.user))
def test_get_cached_user_pass(self):
'''Cached data should not contain user and pass before 2fa.'''
user,pwd = get_cached_user_pass()
self.assertTrue(all([not user, not pwd]))
def test_authenticate_for_2factor(self):
'''Verification obj and tmp_id should be set in frappe.local.'''
authenticate_for_2factor(self.user)
verification_obj = frappe.local.response['verification']
tmp_id = frappe.local.response['tmp_id']
self.assertTrue(verification_obj)
self.assertTrue(tmp_id)
for k in ['_usr','_pwd','_otp_secret']:
self.assertTrue(frappe.cache().get('{0}{1}'.format(tmp_id,k)),
'{} not available'.format(k))
def test_two_factor_is_enabled(self):
'''
1. Should return true, if enabled and not bypass_2fa_for_retricted_ip_users
2. Should return false, if not enabled
3. Should return true, if enabled and not bypass_2fa_for_retricted_ip_users and ip in restrict_ip
4. Should return true, if enabled and bypass_2fa_for_retricted_ip_users and not restrict_ip
5. Should return false, if enabled and bypass_2fa_for_retricted_ip_users and ip in restrict_ip
'''
#Scenario 1
enable_2fa()
self.assertTrue(should_run_2fa(self.user))
#Scenario 2
disable_2fa()
self.assertFalse(should_run_2fa(self.user))
#Scenario 3
enable_2fa()
user = frappe.get_doc('User', self.user)
user.restrict_ip = frappe.local.request_ip
user.save()
self.assertTrue(should_run_2fa(self.user))
#Scenario 4
user = frappe.get_doc('User', self.user)
user.restrict_ip = ""
user.save()
enable_2fa(1)
self.assertTrue(should_run_2fa(self.user))
#Scenario 5
user = frappe.get_doc('User', self.user)
user.restrict_ip = frappe.local.request_ip
user.save()
enable_2fa(1)
self.assertFalse(should_run_2fa(self.user))
def test_two_factor_is_enabled_for_user(self):
'''Should return true if enabled for user.'''
toggle_2fa_all_role(state=True)
self.assertTrue(two_factor_is_enabled_for_(self.user))
self.assertFalse(two_factor_is_enabled_for_("Administrator"))
toggle_2fa_all_role(state=False)
self.assertFalse(two_factor_is_enabled_for_(self.user))
def test_get_otpsecret_for_user(self):
'''OTP secret should be set for user.'''
self.assertTrue(get_otpsecret_for_(self.user))
self.assertTrue(frappe.db.get_default(self.user + '_otpsecret'))
def test_confirm_otp_token(self):
'''Ensure otp is confirmed'''
authenticate_for_2factor(self.user)
tmp_id = frappe.local.response['tmp_id']
otp = 'wrongotp'
with self.assertRaises(frappe.AuthenticationError):
confirm_otp_token(self.login_manager,otp=otp,tmp_id=tmp_id)
otp = get_otp(self.user)
self.assertTrue(confirm_otp_token(self.login_manager,otp=otp,tmp_id=tmp_id))
if frappe.flags.tests_verbose:
print('Sleeping for 30secs to confirm token expires..')
time.sleep(30)
with self.assertRaises(frappe.AuthenticationError):
confirm_otp_token(self.login_manager,otp=otp,tmp_id=tmp_id)
def test_get_verification_obj(self):
'''Confirm verification object is returned.'''
otp_secret = get_otpsecret_for_(self.user)
token = int(pyotp.TOTP(otp_secret).now())
self.assertTrue(get_verification_obj(self.user,token,otp_secret))
def test_render_string_template(self):
'''String template renders as expected with variables.'''
args = {'issuer_name':'Frappe Technologies'}
_str = 'Verification Code from {{issuer_name}}'
_str = render_string_template(_str,args)
self.assertEqual(_str,'Verification Code from Frappe Technologies')
def test_bypass_restict_ip(self):
'''
1. Raise error if user not login from one of the restrict_ip, Bypass restrict ip check disabled by default
2. Bypass restrict ip check enabled in System Settings
3. Bypass restrict ip check enabled for User
'''
#1
user = frappe.get_doc('User', self.user)
user.restrict_ip = "192.168.255.254" #Dummy IP
user.bypass_restrict_ip_check_if_2fa_enabled = 0
user.save()
enable_2fa(bypass_restrict_ip_check=0)
with self.assertRaises(frappe.AuthenticationError):
self.login_manager.validate_ip_address()
#2
enable_2fa(bypass_restrict_ip_check=1)
self.assertIsNone(self.login_manager.validate_ip_address())
#3
user = frappe.get_doc('User', self.user)
user.bypass_restrict_ip_check_if_2fa_enabled = 1
user.save()
enable_2fa()
self.assertIsNone(self.login_manager.validate_ip_address())
def set_request(**kwargs):
builder = EnvironBuilder(**kwargs)
frappe.local.request = Request(builder.get_environ())
def create_http_request():
'''Get http request object.'''
set_request(method='POST', path='login')
enable_2fa()
frappe.form_dict['usr'] = 'test@erpnext.com'
frappe.form_dict['pwd'] = 'test'
frappe.local.form_dict['cmd'] = 'login'
http_requests = HTTPRequest()
return http_requests
def enable_2fa(bypass_two_factor_auth=0, bypass_restrict_ip_check=0):
'''Enable Two factor in system settings.'''
system_settings = frappe.get_doc('System Settings')
system_settings.enable_two_factor_auth = 1
system_settings.bypass_2fa_for_retricted_ip_users = cint(bypass_two_factor_auth)
system_settings.bypass_restrict_ip_check_if_2fa_enabled = cint(bypass_restrict_ip_check)
system_settings.two_factor_method = 'OTP App'
system_settings.save(ignore_permissions=True)
frappe.db.commit()
def disable_2fa():
system_settings = frappe.get_doc('System Settings')
system_settings.enable_two_factor_auth = 0
system_settings.save(ignore_permissions=True)
frappe.db.commit()
def toggle_2fa_all_role(state=None):
'''Enable or disable 2fa for 'all' role on the system.'''
all_role = frappe.get_doc('Role','All')
if state == None:
state = False if all_role.two_factor_auth == True else False
if state not in [True, False]: return
all_role.two_factor_auth = cint(state)
all_role.save(ignore_permissions=True)
frappe.db.commit()
def get_otp(user):
otp_secret = get_otpsecret_for_(user)
otp = pyotp.TOTP(otp_secret)
return otp.now()
| 35.104478
| 108
| 0.773668
|
4a069196f94e33007be8a96303aeca442e6bfe3c
| 9,459
|
py
|
Python
|
assets/InstagramPy/InstagramPyCLI.py
|
BHUTUU/instagram-bruteforce
|
bc418ef5b1e8987fd5f70ae2202a6ac82e863271
|
[
"MIT"
] | null | null | null |
assets/InstagramPy/InstagramPyCLI.py
|
BHUTUU/instagram-bruteforce
|
bc418ef5b1e8987fd5f70ae2202a6ac82e863271
|
[
"MIT"
] | null | null | null |
assets/InstagramPy/InstagramPyCLI.py
|
BHUTUU/instagram-bruteforce
|
bc418ef5b1e8987fd5f70ae2202a6ac82e863271
|
[
"MIT"
] | null | null | null |
import datetime
import sys
from InstagramPy import AppInfo
from .colors import *
class InstagramPyCLI():
username = None
started = None
verbose = 0
pService = None
def __init__(self, appinfo, started, verbose_level, username, PortableService=None):
self.pService = PortableService
try:
self.verbose = int(verbose_level)
self.started = started
self.username = username
if not appinfo == None:
appinfo = appinfo
except:
self.verbose = 0
self.started = started
appinfo = AppInfo.appInfo
if username == None or username == '':
self.ReportError("username not provided!")
else:
self.username = username
self.HEADER = "{} {} , {}.\n\033[1;32mLet's hit and try the password! {} {} , {}.\n".format(appinfo['name'],
appinfo['version'],
appinfo['description'],
appinfo['year'],
appinfo['company'],
appinfo['author'])
self.HEADER = Fore.MAGENTA + self.HEADER + Style.RESET_ALL
def ReportError(self, error):
if self.pService is not None:
if self.pService.isSetInstagramPyPortable():
self.pService.terminate()
print('{}{}\033[1;32m[\033[1;31m!\033[1;32m]\033[1;34mError::{} {}'.format(
Style.BRIGHT, Fore.RED, Style.RESET_ALL, error))
sys.exit(-1)
def PrintHeader(self):
print(self.HEADER)
return True
def PrintDatetime(self):
print('{}[{}+{}{}]{} {}\033[1;33mStarted{} @ {}\033[0m'.format(Style.BRIGHT,
Fore.YELLOW,
Style.RESET_ALL,
Style.BRIGHT,
Style.RESET_ALL,
Fore.MAGENTA,
Style.RESET_ALL + Fore.YELLOW,
str(self.started) +
Style.RESET_ALL
))
return True
def PrintChangingIP(self):
print('\033[1;31m[{}*{}\033[1;31m] {}\033[1;35mChanging IP Address... {}\033[0m'.format(Fore.YELLOW,
Style.RESET_ALL, Fore.GREEN, Style.RESET_ALL))
return True
def PrintIPAddress(self, ip):
print('\033[1;34m[{}\033[1;32m+{}\033[1;34m] {}\033[1;37mCurrent IP{} :: {}{}{}\033[0m'.format(Fore.RED,
Style.RESET_ALL,
Fore.YELLOW,
Style.RESET_ALL,
Style.BRIGHT,
str(ip),
Style.RESET_ALL
))
return True
def PrintPassword(self, password):
print('\033[1;32m[{}\033[1;35m+{}\033[1;32m] {}\033[1;32mTrying [FOR] @{} {} :: {}{}{}\033[0m'.format(Fore.GREEN,
Style.RESET_ALL,
Fore.CYAN,
self.username,
Style.RESET_ALL,
Style.BRIGHT,
password,
Style.RESET_ALL
))
return True
def PrintRequest(self, req):
print('\n\033[1;32m[{}\033[1;31m-{}\033[1;32m] --:: {}REQUEST START -> @{} {} ::--\033[0m'.format(Fore.MAGENTA,
Style.RESET_ALL, Back.CYAN + Style.BRIGHT, self.username, Style.RESET_ALL))
print('{}{}{} {}{}{}'.format(Fore.GREEN, req.method,
Style.RESET_ALL, Style.BRIGHT, req.url, Style.RESET_ALL))
print('{}{}{}'.format(Fore.YELLOW, '\n'.join('{}: {}'.format(k, v)
for k, v in req.headers.items()), Style.RESET_ALL))
print('{}{}{}'.format(Style.BRIGHT, req.body, Style.RESET_ALL))
print('\033[1;32m[{}\033[1;31m+{}\033[1;32m] --:: {}REQUEST END{} ::--\033[0m'.format(Fore.GREEN,
Style.RESET_ALL, Back.GREEN + Style.BRIGHT, Style.RESET_ALL))
return True
def PrintResponse(self, resp):
print('\033[1;36m\n[{}\033[1;33m!-!{}\033[1;36m] --:: {}\033[1;36mRESPONSE START -> @{} {} \033[1;31m::--\033[0m'.format(Fore.MAGENTA,
Style.RESET_ALL, Back.CYAN + Style.BRIGHT, self.username, Style.RESET_ALL))
print('{}{}{}'.format(Style.BRIGHT, str(resp), Style.RESET_ALL))
print('\033[1;34m[{}\033[1;32m+{}\033[1;34m]\033[1;35m --:: {}\033[1;31mRESPONSE END{} \033[1;34m::--\033[0m'.format(Fore.GREEN,
Style.RESET_ALL, Back.GREEN + Style.BRIGHT, Style.RESET_ALL))
return True
def PrintProgress(self, password, ip, request, response):
if self.verbose == 0:
self.PrintPassword(password)
elif self.verbose == 1:
self.PrintPassword(password)
self.PrintResponse(response)
elif self.verbose == 2:
self.PrintPassword(password)
self.PrintResponse(response)
self.PrintIPAddress(ip)
else:
self.PrintPassword(password)
self.PrintRequest(request)
self.PrintResponse(response)
self.PrintIPAddress(ip)
return True
def ReportAttack(self, password):
print('\n\033[1;32m[\033[1;35m{}+{}\033[1;32m] --:: {}\033[1;32mCompleted -> @{} {} \033[1;33m::--\033[0m'.format(Fore.YELLOW,
Style.RESET_ALL, Back.YELLOW + Style.BRIGHT, self.username, Style.RESET_ALL),
end='')
if not password == None:
print('{}\033[1;33m[{}\033[1;31m✓{}{}\033[1;33m]{} {}\033[1;33mPassword Found!{} :: {}\033[0m'.format(Style.BRIGHT,
Fore.RED,
Style.RESET_ALL,
Style.BRIGHT,
Style.RESET_ALL,
Fore.CYAN,
Style.RESET_ALL + Style.BRIGHT + Fore.GREEN,
password + Style.RESET_ALL
))
else:
print('{}\033[1;32m[\033[1;31m!!{}\033[1;32m]\033[1;31mPassword not found , Try using another wordlist.{}\033[0m'.format(
Style.BRIGHT, Fore.RED, Style.RESET_ALL))
print('{}\033[1;31m[{}\033[1;35m+{}{}\033[31m]{} {}\033[1;32mFinnished in {}{}\033[0m'.format(Style.BRIGHT,
Fore.YELLOW,
Style.RESET_ALL,
Style.BRIGHT,
Style.RESET_ALL,
Fore.MAGENTA,
Style.RESET_ALL + Fore.YELLOW,
str(datetime.datetime.now(
) - self.started) + Style.RESET_ALL
))
return True
def PrintFooter(self):
print('\n{}\033[1;35mGithub:->>{}{}\033[1;31mhttps://github.com/BHUTUU/IG-BHUTUU{}\033[0mm'.format(Fore.GREEN,
Style.RESET_ALL,
Style.BRIGHT,
Style.RESET_ALL
))
return True
| 57.327273
| 145
| 0.367904
|
4a06928bbec8eed2ddd2c5150109c96e887800d0
| 6,851
|
py
|
Python
|
calcloud/plan.py
|
bhayden53/calcloud
|
7478737d0da1218d1ced87787bede201993053aa
|
[
"BSD-3-Clause"
] | 1
|
2021-03-11T22:31:59.000Z
|
2021-03-11T22:31:59.000Z
|
calcloud/plan.py
|
bhayden53/calcloud
|
7478737d0da1218d1ced87787bede201993053aa
|
[
"BSD-3-Clause"
] | null | null | null |
calcloud/plan.py
|
bhayden53/calcloud
|
7478737d0da1218d1ced87787bede201993053aa
|
[
"BSD-3-Clause"
] | null | null | null |
"""This module is used to define job plans using the high level function
get_plan().
get_plan() returns a named tuple specifying all the information needed to
submit a job.
Based on a memory_retries counter, get_plan() iterates through a sequence
of job definitions with increasing memory requirements until the job later
succeeds with sufficient memory or exhausts all retries.
"""
import sys
import os
from collections import namedtuple
from . import hst
from . import log
from . import s3
from . import common
import json
import boto3
client = boto3.client("lambda", config=common.retry_config)
# ----------------------------------------------------------------------
JobResources = namedtuple(
"JobResources",
[
"ipppssoot",
"instrument",
"job_name",
"s3_output_uri",
"input_path",
"crds_config",
"initial_modeled_bin",
"max_seconds",
],
)
JobEnv = namedtuple("JobEnv", ("job_queue", "job_definition", "command"))
Plan = namedtuple("Plan", JobResources._fields + JobEnv._fields)
class AllBinsTriedQuit(Exception):
"""Exception to raise when retry is requested but no applicable bin is available."""
# ----------------------------------------------------------------------
# This is the top level entrypoint called from calcloud.lambda_submit.main
# It returns a Plan() tuple which is passed to the submit function.
#
# It's the expectation that most/all of this file will be re-written during
# the integration of new memory requirements modelling and new AWS Batch
# infrastructure allocation strategies. The signature of the get_plan()
# function is the main thing to worry about changing externally.
def get_plan(ipppssoot, output_bucket, input_path, memory_retries=0):
"""Given the resource requirements for a job, map them onto appropriate
requirements and Batch infrastructure needed to process the job.
ipppssoot dataset ID to plan
output_bucket S3 output bucket, top level
input_path
memory_retries increasing counter of retries with 0 being first try,
intended to drive increasing memory for each subsequent retry
with the maximum retry value set in Terraform.
Returns Plan (named tuple)
"""
job_resources = _get_resources(ipppssoot, output_bucket, input_path)
env = _get_environment(job_resources, memory_retries)
return Plan(*(job_resources + env))
def invoke_lambda_predict(ipppssoot, output_bucket):
# invoke calcloud-ai lambda
bucket = output_bucket.replace("s3://", "")
key = f"control/{ipppssoot}/{ipppssoot}_MemModelFeatures.txt"
inputParams = {"Bucket": bucket, "Key": key, "Ipppssoot": ipppssoot}
job_predict_lambda = os.environ["JOBPREDICTLAMBDA"]
response = client.invoke(
FunctionName=job_predict_lambda,
InvocationType="RequestResponse",
Payload=json.dumps(inputParams),
)
predictions = json.load(response["Payload"])
print(f"Predictions for {ipppssoot}: \n {predictions}")
return predictions
def _get_resources(ipppssoot, output_bucket, input_path):
"""Given an HST IPPPSSOOT ID, return information used to schedule it as a batch job.
Conceptually resource requirements can be tailored to individual IPPPSSOOTs.
This defines abstract memory and CPU requirements independently of the AWS Batch
resources used to satisfy them.
Returns: JobResources named tuple
"""
ipppssoot = ipppssoot.lower()
s3_output_uri = f"{output_bucket}/outputs/{ipppssoot}"
instr = hst.get_instrument(ipppssoot)
job_name = ipppssoot
input_path = input_path
crds_config = "caldp-config-aws"
# invoke calcloud-ai lambda
predictions = invoke_lambda_predict(ipppssoot, output_bucket)
initial_bin = predictions["memBin"] # 0
kill_time = min(max(predictions["clockTime"] * 5, 20 * 60), 48 * 60 * 60) # between 20 minutes and 2 days
return JobResources(ipppssoot, instr, job_name, s3_output_uri, input_path, crds_config, initial_bin, kill_time)
def _get_environment(job_resources, memory_retries):
"""Based on a resources tuple and a memory_retries counter, determine:
(queue, job_definition_for_memory, kill seconds)
"""
job_defs = os.environ["JOBDEFINITIONS"].split(",")
job_queues = os.environ["JOBQUEUES"].split(",")
job_resources = JobResources(*job_resources)
final_bin = job_resources.initial_modeled_bin + memory_retries
if final_bin < len(job_defs):
log.info(
"Selecting resources for",
job_resources.ipppssoot,
"Initial modeled bin",
job_resources.initial_modeled_bin,
"Memory retries",
memory_retries,
"Final bin index",
final_bin,
)
job_definition = job_defs[final_bin]
job_queue = job_queues[final_bin]
else:
log.info("No higher memory job definition for", job_resources.ipppssoot, "after", memory_retries)
raise AllBinsTriedQuit("No higher memory job definition for", job_resources.ipppssoot, "after", memory_retries)
return JobEnv(job_queue, job_definition, "caldp-process")
# ----------------------------------------------------------------------
def test():
import doctest
from calcloud import plan
return doctest.testmod(plan, optionflags=doctest.ELLIPSIS)
# ----------------------------------------------------------------------
def _planner(ipppssoots_file, output_bucket=s3.DEFAULT_BUCKET, input_path=s3.DEFAULT_BUCKET, retries=0):
"""Given a set of ipppssoots in `ipppssoots_file` separated by spaces or newlines,
as well as an `output_bucket` to define how the jobs are named and
where outputs should be stored, print out the associated batch resources tuples which
can be submitted.
"""
for line in open(ipppssoots_file).readlines():
if line.strip().startswith("#"):
continue
for ipst in line.split():
print(
tuple(get_plan(ipst, "s3://" + output_bucket, "s3://" + input_path, retries))
) # Drop type to support literal_eval() vs. eval()
if __name__ == "__main__":
if len(sys.argv) in [2, 3, 4, 5]:
if sys.argv[1] == "test":
print(test())
else:
# ipppssoots_file = sys.argv[1] # filepath listing ipppssoots to plan
# output_bucket = sys.argv[2] # 's3://calcloud-processing'
# inputs = sys.argv[3] # astroquery: or S3 inputs
# retries = sys.argv[4] # 0..N
_planner(*sys.argv[1:])
else:
print(
"usage: python -m calcloud.plan <ipppssoots_file> [<output_bucket>] [input_path] [retry]",
file=sys.stderr,
)
| 35.682292
| 119
| 0.653627
|
4a06941c1fc98c19e86b7b106006add68be78f79
| 97
|
py
|
Python
|
code/pyFoamInitVCSCase.py
|
sosohungry/pyfoam
|
b19e40a0ef1f41268930122226660414722178e6
|
[
"MIT"
] | null | null | null |
code/pyFoamInitVCSCase.py
|
sosohungry/pyfoam
|
b19e40a0ef1f41268930122226660414722178e6
|
[
"MIT"
] | null | null | null |
code/pyFoamInitVCSCase.py
|
sosohungry/pyfoam
|
b19e40a0ef1f41268930122226660414722178e6
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
from PyFoam.Applications.InitVCSCase import InitVCSCase
InitVCSCase()
| 13.857143
| 55
| 0.783505
|
4a06967d58fd2e2bf5f8f6a46974bf24ccf462d9
| 100
|
py
|
Python
|
pysinewave/__init__.py
|
daviddavini/continuous-sine-wave
|
a0be89d28dae357f480a116aefa9bb9974f48f7e
|
[
"MIT"
] | 10
|
2020-06-08T10:55:40.000Z
|
2022-02-08T19:44:25.000Z
|
pysinewave/__init__.py
|
daviddavini/continuous-sine-wave
|
a0be89d28dae357f480a116aefa9bb9974f48f7e
|
[
"MIT"
] | 6
|
2021-04-26T10:20:22.000Z
|
2022-03-12T21:16:41.000Z
|
pysinewave/__init__.py
|
daviddavini/continuous-sine-wave
|
a0be89d28dae357f480a116aefa9bb9974f48f7e
|
[
"MIT"
] | 7
|
2020-01-07T03:30:54.000Z
|
2022-03-12T21:10:52.000Z
|
from pysinewave.sinewave import SineWave
from pysinewave.sinewave_generator import SineWaveGenerator
| 50
| 59
| 0.91
|
4a06967f396dcb6a4ad2ae35a66c52e52814011e
| 30,895
|
py
|
Python
|
Bio/SearchIO/BlatIO.py
|
emedgene/biopython
|
4e359e2aa9255aa8b420ad512d3c4cbe15c07a35
|
[
"BSD-3-Clause"
] | 2
|
2019-11-21T02:34:52.000Z
|
2021-02-14T07:47:43.000Z
|
Bio/SearchIO/BlatIO.py
|
EngineerKhan/biopython
|
4e359e2aa9255aa8b420ad512d3c4cbe15c07a35
|
[
"BSD-3-Clause"
] | null | null | null |
Bio/SearchIO/BlatIO.py
|
EngineerKhan/biopython
|
4e359e2aa9255aa8b420ad512d3c4cbe15c07a35
|
[
"BSD-3-Clause"
] | 1
|
2021-02-14T07:47:46.000Z
|
2021-02-14T07:47:46.000Z
|
# Copyright 2012 by Wibowo Arindrarto. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Bio.SearchIO parser for BLAT output formats.
This module adds support for parsing BLAT outputs. BLAT (BLAST-Like Alignment
Tool) is a sequence similarity search program initially built for annotating
the human genome.
Bio.SearchIO.BlastIO was tested using standalone BLAT version 34, psLayout
version 3. It should be able to parse psLayout version 4 without problems.
More information on BLAT is available from these sites:
- Publication: http://genome.cshlp.org/content/12/4/656
- User guide: http://genome.ucsc.edu/goldenPath/help/blatSpec.html
- Source download: http://www.soe.ucsc.edu/~kent/src
- Executable download: http://hgdownload.cse.ucsc.edu/admin/exe/
- Blat score calculation: http://genome.ucsc.edu/FAQ/FAQblat.html#blat4
Supported Formats
=================
BlatIO supports parsing, indexing, and writing for both PSL and PSLX output
formats, with or without header. To parse, index, or write PSLX files, use the
'pslx' keyword argument and set it to True.
# blat-psl defaults to PSL files
>>> from Bio import SearchIO
>>> psl = 'Blat/psl_34_004.psl'
>>> qresult = SearchIO.read(psl, 'blat-psl')
>>> qresult
QueryResult(id='hg19_dna', 10 hits)
# set the pslx flag to parse PSLX files
>>> pslx = 'Blat/pslx_34_004.pslx'
>>> qresult = SearchIO.read(pslx, 'blat-psl', pslx=True)
>>> qresult
QueryResult(id='hg19_dna', 10 hits)
For parsing and indexing, you do not need to specify whether the file has a
header or not. For writing, if you want to write a header, you can set the
'header' keyword argument to True. This will write a 'psLayout version 3' header
to your output file.
from Bio import SearchIO
qresult = SearchIO.read(psl, 'blat-psl')
SearchIO.write(qresult, 'header.psl', header=True)
<stdout> (1, 10, 19, 23)
Note that the number of HSPFragments written may exceed the number of HSP
objects. This is because in PSL files, it is possible to have single matches
consisting of noncontiguous sequence fragments. This is where the HSPFragment
object comes into play. These fragments are grouped into a single HSP because
they share the same statistics (e.g. match numbers, BLAT score, etc.). However,
they do not share the same sequence attributes, such as the start and end
coordinates, making them distinct objects.
In addition to parsing PSL(X) files, BlatIO also computes the percent identities
and scores of your search results. This is done using the calculation formula
posted here: http://genome.ucsc.edu/FAQ/FAQblat.html#blat4. It mimics the score
and percent identity calculation done by UCSC's web BLAT service.
Since BlatIO parses the file in a single pass, it expects all results from
the same query to be in consecutive rows. If the results from one query are
spread in nonconsecutive rows, BlatIO will consider them to be separate
QueryResult objects.
In most cases, the PSL(X) format uses the same coordinate system as Python
(zero-based, half open). These coordinates are anchored on the plus strand.
However, if the query aligns on the minus strand, BLAT will anchor the qStarts
coordinates on the minus strand instead. BlatIO is aware of this, and will
re-anchor the qStarts coordinates to the plus strand whenever it sees a minus
strand query match. Conversely, when you write out to a PSL(X) file, BlatIO will
reanchor qStarts to the minus strand again.
BlatIO provides the following attribute-column mapping:
+----------------+-------------------------+-----------------------------------+
| Object | Attribute | Column Name, Value |
+================+=========================+===================================+
| QueryResutl | id | Q name, query sequence ID |
| +-------------------------+-----------------------------------+
| | seq_len | Q size, query sequence full |
| | | length |
+----------------+-------------------------+-----------------------------------+
| Hit | id | T name, hit sequence ID |
| +-------------------------+-----------------------------------+
| | seq_len | T size, hit sequence full length |
+----------------+-------------------------+-----------------------------------+
| HSP | hit_end | T end, end coordinate of the last |
| | | hit fragment |
| +-------------------------+-----------------------------------+
| | hit_gap_num | T gap bases, number of bases |
| | | inserted in hit |
| +-------------------------+-----------------------------------+
| | hit_gapopen_num | T gap count, number of hit gap |
| | | inserts |
| +-------------------------+-----------------------------------+
| | hit_span_all | blockSizes, sizes of each |
| | | fragment |
| +-------------------------+-----------------------------------+
| | hit_start | T start, start coordinate of the |
| | | first hit fragment |
| +-------------------------+-----------------------------------+
| | hit_start_all | tStarts, start coordinate of each |
| | | hit fragment |
| +-------------------------+-----------------------------------+
| | match_num | match, number of non-repeat |
| | | matches |
| +-------------------------+-----------------------------------+
| | mismatch_num | mismatch, number of mismatches |
| +-------------------------+-----------------------------------+
| | match_rep_num | rep. match, number of matches |
| | | that are part of repeats |
| +-------------------------+-----------------------------------+
| | n_num | N's, number of N bases |
| +-------------------------+-----------------------------------+
| | query_end | Q end, end coordinate of the last |
| +-------------------------+-----------------------------------+
| | | query fragment |
| | query_gap_num | Q gap bases, number of bases |
| | | inserted in query |
| +-------------------------+-----------------------------------+
| | query_gapopen_num | Q gap count, number of query gap |
| | | inserts |
| +-------------------------+-----------------------------------+
| | query_span_all | blockSizes, sizes of each |
| | | fragment |
| +-------------------------+-----------------------------------+
| | query_start | Q start, start coordinate of the |
| | | first query block |
| +-------------------------+-----------------------------------+
| | query_start_all | qStarts, start coordinate of each |
| | | query fragment |
| +-------------------------+-----------------------------------+
| | len [*]_ | block count, the number of blocks |
| | | in the alignment |
+----------------+-------------------------+-----------------------------------+
| HSPFragment | hit | hit sequence, if present |
| +-------------------------+-----------------------------------+
| | hit_strand | strand, hit sequence strand |
| +-------------------------+-----------------------------------+
| | query | query sequence, if present |
| +-------------------------+-----------------------------------+
| | query_strand | strand, query sequence strand |
+----------------+-------------------------+-----------------------------------+
In addition to the column mappings above, BlatIO also provides the following
object attributes:
+----------------+-------------------------+-----------------------------------+
| Object | Attribute | Value |
+================+=========================+===================================+
| HSP | gapopen_num | Q gap count + T gap count, total |
| | | number of gap openings |
| +-------------------------+-----------------------------------+
| | ident_num | matches + repmatches, total |
| | | number of identical residues |
| +-------------------------+-----------------------------------+
| | ident_pct | percent identity, calculated |
| | | using UCSC's formula |
| +-------------------------+-----------------------------------+
| | query_is_protein | boolean, whether the query |
| | | sequence is a protein |
| +-------------------------+-----------------------------------+
| | score | HSP score, calculated using |
| | | UCSC's formula |
+----------------+-------------------------+-----------------------------------+
Finally, the default HSP and HSPFragment properties are also provided. See the
HSP and HSPFragment documentation for more details on these properties.
.. [*] You can obtain the number of blocks / fragments in the HSP by invoking
``len`` on the HSP
"""
import re
from math import log
from Bio._py3k import _as_bytes, _bytes_to_string
from Bio._py3k import zip
from Bio.Alphabet import generic_dna
from Bio.SearchIO._index import SearchIndexer
from Bio.SearchIO._model import QueryResult, Hit, HSP, HSPFragment
__all__ = ('BlatPslParser', 'BlatPslIndexer', 'BlatPslWriter')
# precompile regex patterns
_PTR_ROW_CHECK = r'^\d+\s+\d+\s+\d+\s+\d+'
_RE_ROW_CHECK = re.compile(_PTR_ROW_CHECK)
_RE_ROW_CHECK_IDX = re.compile(_as_bytes(_PTR_ROW_CHECK))
def _list_from_csv(csv_string, caster=None):
"""Transforms the given comma-separated string into a list.
:param csv_string: comma-separated input string
:type csv_string: string
:param caster: function used to cast each item in the input string
to its intended type
:type caster: callable, accepts string, returns object
"""
if caster is None:
return [x for x in csv_string.split(',') if x]
else:
return [caster(x) for x in csv_string.split(',') if x]
def _reorient_starts(starts, blksizes, seqlen, strand):
"""Reorients block starts into the opposite strand's coordinates.
:param starts: start coordinates
:type starts: list [int]
:param blksizes: block sizes
:type blksizes: list [int]
:param seqlen: sequence length
:type seqlen: int
:param strand: sequence strand
:type strand: int, choice of -1, 0, or 1
"""
assert len(starts) == len(blksizes), \
"Unequal start coordinates and block sizes list (%r vs %r)" \
% (len(starts), len(blksizes))
# see: http://genome.ucsc.edu/goldenPath/help/blatSpec.html
# no need to reorient if it's already the positive strand
if strand >= 0:
return starts
else:
# the plus-oriented coordinate is calculated by this:
# plus_coord = length - minus_coord - block_size
return [seqlen - start - blksize for
start, blksize in zip(starts, blksizes)]
def _is_protein(psl):
# check if query is protein or not
# adapted from http://genome.ucsc.edu/FAQ/FAQblat.html#blat4
if len(psl['strand']) == 2:
if psl['strand'][1] == '+':
return psl['tend'] == psl['tstarts'][-1] + \
3 * psl['blocksizes'][-1]
elif psl['strand'][1] == '-':
return psl['tstart'] == psl['tsize'] - \
(psl['tstarts'][-1] + 3 * psl['blocksizes'][-1])
return False
def _calc_millibad(psl, is_protein):
# calculates millibad
# adapted from http://genome.ucsc.edu/FAQ/FAQblat.html#blat4
size_mul = 3 if is_protein else 1
millibad = 0
qali_size = size_mul * (psl['qend'] - psl['qstart'])
tali_size = psl['tend'] - psl['tstart']
ali_size = min(qali_size, tali_size)
if ali_size <= 0:
return 0
size_dif = qali_size - tali_size
size_dif = 0 if size_dif < 0 else size_dif
total = size_mul * (psl['matches'] + psl['repmatches'] + psl['mismatches'])
if total != 0:
millibad = (1000 * (psl['mismatches'] * size_mul + psl['qnuminsert'] +
round(3 * log(1 + size_dif)))) / total
return millibad
def _calc_score(psl, is_protein):
# calculates score
# adapted from http://genome.ucsc.edu/FAQ/FAQblat.html#blat4
size_mul = 3 if is_protein else 1
return size_mul * (psl['matches'] + (psl['repmatches'] >> 1)) - \
size_mul * psl['mismatches'] - psl['qnuminsert'] - psl['tnuminsert']
def _create_hsp(hid, qid, psl):
# protein flag
is_protein = _is_protein(psl)
# strand
# if query is protein, strand is 0
if is_protein:
qstrand = 0
else:
qstrand = 1 if psl['strand'][0] == '+' else -1
# try to get hit strand, if it exists
try:
hstrand = 1 if psl['strand'][1] == '+' else -1
except IndexError:
hstrand = 1 # hit strand defaults to plus
blocksize_multiplier = 3 if is_protein else 1
# query block starts
qstarts = _reorient_starts(psl['qstarts'],
psl['blocksizes'], psl['qsize'], qstrand)
# hit block starts
if len(psl['strand']) == 2:
hstarts = _reorient_starts(psl['tstarts'],
[blocksize_multiplier * i for i in psl['blocksizes']],
psl['tsize'], hstrand)
else:
hstarts = psl['tstarts']
# set query and hit coords
# this assumes each block has no gaps (which seems to be the case)
assert len(qstarts) == len(hstarts) == len(psl['blocksizes'])
query_range_all = list(zip(qstarts, [x + y for x, y in
zip(qstarts, psl['blocksizes'])]))
hit_range_all = list(zip(hstarts, [x + y * blocksize_multiplier for x, y in
zip(hstarts, psl['blocksizes'])]))
# check length of sequences and coordinates, all must match
if 'tseqs' in psl and 'qseqs' in psl:
assert len(psl['tseqs']) == len(psl['qseqs']) == \
len(query_range_all) == len(hit_range_all)
else:
assert len(query_range_all) == len(hit_range_all)
frags = []
# iterating over query_range_all, but hit_range_all works just as well
for idx, qcoords in enumerate(query_range_all):
hseqlist = psl.get('tseqs')
hseq = '' if not hseqlist else hseqlist[idx]
qseqlist = psl.get('qseqs')
qseq = '' if not qseqlist else qseqlist[idx]
frag = HSPFragment(hid, qid, hit=hseq, query=qseq)
# set alphabet
frag.alphabet = generic_dna
# set coordinates
frag.query_start = qcoords[0]
frag.query_end = qcoords[1]
frag.hit_start = hit_range_all[idx][0]
frag.hit_end = hit_range_all[idx][1]
# and strands
frag.query_strand = qstrand
frag.hit_strand = hstrand
frags.append(frag)
# create hsp object
hsp = HSP(frags)
# check if start and end are set correctly
assert hsp.query_start == psl['qstart']
assert hsp.query_end == psl['qend']
assert hsp.hit_start == psl['tstart']
assert hsp.hit_end == psl['tend']
# and check block spans as well
hit_spans = [span / blocksize_multiplier for span in hsp.hit_span_all]
assert hit_spans == hsp.query_span_all == psl['blocksizes']
# set its attributes
hsp.match_num = psl['matches']
hsp.mismatch_num = psl['mismatches']
hsp.match_rep_num = psl['repmatches']
hsp.n_num = psl['ncount']
hsp.query_gapopen_num = psl['qnuminsert']
hsp.query_gap_num = psl['qbaseinsert']
hsp.hit_gapopen_num = psl['tnuminsert']
hsp.hit_gap_num = psl['tbaseinsert']
hsp.ident_num = psl['matches'] + psl['repmatches']
hsp.gapopen_num = psl['qnuminsert'] + psl['tnuminsert']
hsp.gap_num = psl['qbaseinsert'] + psl['tbaseinsert']
hsp.query_is_protein = is_protein
hsp.ident_pct = 100.0 - _calc_millibad(psl, is_protein) * 0.1
hsp.score = _calc_score(psl, is_protein)
# helper flag, for writing
hsp._has_hit_strand = len(psl['strand']) == 2
return hsp
class BlatPslParser(object):
"""Parser for the BLAT PSL format."""
def __init__(self, handle, pslx=False):
"""Initialize the class."""
self.handle = handle
self.line = self.handle.readline()
self.pslx = pslx
def __iter__(self):
# break out if it's an empty file
if not self.line:
return
# read through header
# this assumes that the result row match the regex
while not re.search(_RE_ROW_CHECK, self.line.strip()):
self.line = self.handle.readline()
if not self.line:
return
# parse into query results
for qresult in self._parse_qresult():
qresult.program = 'blat'
yield qresult
def _parse_row(self):
"""Returns a dictionary of parsed column values."""
assert self.line
cols = [x for x in self.line.strip().split('\t') if x]
self._validate_cols(cols)
psl = {}
psl['qname'] = cols[9] # qName
psl['qsize'] = int(cols[10]) # qSize
psl['tname'] = cols[13] # tName
psl['tsize'] = int(cols[14]) # tSize
psl['matches'] = int(cols[0]) # matches
psl['mismatches'] = int(cols[1]) # misMatches
psl['repmatches'] = int(cols[2]) # repMatches
psl['ncount'] = int(cols[3]) # nCount
psl['qnuminsert'] = int(cols[4]) # qNumInsert
psl['qbaseinsert'] = int(cols[5]) # qBaseInsert
psl['tnuminsert'] = int(cols[6]) # tNumInsert
psl['tbaseinsert'] = int(cols[7]) # tBaseInsert
psl['strand'] = cols[8] # strand
psl['qstart'] = int(cols[11]) # qStart
psl['qend'] = int(cols[12]) # qEnd
psl['tstart'] = int(cols[15]) # tStart
psl['tend'] = int(cols[16]) # tEnd
psl['blockcount'] = int(cols[17]) # blockCount
psl['blocksizes'] = _list_from_csv(cols[18], int) # blockSizes
psl['qstarts'] = _list_from_csv(cols[19], int) # qStarts
psl['tstarts'] = _list_from_csv(cols[20], int) # tStarts
if self.pslx:
psl['qseqs'] = _list_from_csv(cols[21]) # query sequence
psl['tseqs'] = _list_from_csv(cols[22]) # hit sequence
return psl
def _validate_cols(self, cols):
if not self.pslx:
assert len(cols) == 21, "Invalid PSL line: %r. " \
"Expected 21 tab-separated columns, found %i" % (self.line, len(cols))
else:
assert len(cols) == 23, "Invalid PSLX line: %r. " \
"Expected 23 tab-separated columns, found %i" % (self.line, len(cols))
def _parse_qresult(self):
"""Generator function that returns QueryResult objects."""
# state values, determines what to do for each line
state_EOF = 0
state_QRES_NEW = 1
state_QRES_SAME = 3
state_HIT_NEW = 2
state_HIT_SAME = 4
# initial dummy values
qres_state = None
file_state = None
cur_qid, cur_hid = None, None
prev_qid, prev_hid = None, None
cur, prev = None, None
hit_list, hsp_list = [], []
while True:
# store previous line's parsed values for all lines after the first
if cur is not None:
prev = cur
prev_qid = cur_qid
prev_hid = cur_hid
# only parse the result row if it's not EOF
if self.line:
cur = self._parse_row()
cur_qid = cur['qname']
cur_hid = cur['tname']
else:
file_state = state_EOF
# mock values, since we have nothing to parse
cur_qid, cur_hid = None, None
# get the state of hit and qresult
if prev_qid != cur_qid:
qres_state = state_QRES_NEW
else:
qres_state = state_QRES_SAME
# new hits are hits with different ids or hits in a new qresult
if prev_hid != cur_hid or qres_state == state_QRES_NEW:
hit_state = state_HIT_NEW
else:
hit_state = state_HIT_SAME
if prev is not None:
# create fragment and HSP and set their attributes
hsp = _create_hsp(prev_hid, prev_qid, prev)
hsp_list.append(hsp)
if hit_state == state_HIT_NEW:
# create Hit and set its attributes
hit = Hit(hsp_list)
hit.seq_len = prev['tsize']
hit_list.append(hit)
hsp_list = []
# create qresult and yield if we're at a new qresult or at EOF
if qres_state == state_QRES_NEW or file_state == state_EOF:
qresult = QueryResult(id=prev_qid)
for hit in hit_list:
qresult.absorb(hit)
qresult.seq_len = prev['qsize']
yield qresult
# if we're at EOF, break
if file_state == state_EOF:
break
hit_list = []
self.line = self.handle.readline()
class BlatPslIndexer(SearchIndexer):
"""Indexer class for BLAT PSL output."""
_parser = BlatPslParser
def __init__(self, filename, pslx=False):
"""Initialize the class."""
SearchIndexer.__init__(self, filename, pslx=pslx)
def __iter__(self):
"""Iterates over the file handle; yields key, start offset, and length."""
handle = self._handle
handle.seek(0)
# denotes column location for query identifier
query_id_idx = 9
qresult_key = None
tab_char = b"\t"
start_offset = handle.tell()
line = handle.readline()
# read through header
# this assumes that the result row match the regex
while not re.search(_RE_ROW_CHECK_IDX, line.strip()):
start_offset = handle.tell()
line = handle.readline()
if not line:
return
# and index the qresults
while True:
end_offset = handle.tell()
cols = [x for x in line.strip().split(tab_char) if x]
if qresult_key is None:
qresult_key = cols[query_id_idx]
else:
curr_key = cols[query_id_idx]
if curr_key != qresult_key:
yield _bytes_to_string(qresult_key), start_offset, \
end_offset - start_offset
qresult_key = curr_key
start_offset = end_offset - len(line)
line = handle.readline()
if not line:
yield _bytes_to_string(qresult_key), start_offset, \
end_offset - start_offset
break
def get_raw(self, offset):
"""Returns raw bytes string of a QueryResult object from the given offset."""
handle = self._handle
handle.seek(offset)
query_id_idx = 9
qresult_key = None
qresult_raw = b""
tab_char = b"\t"
while True:
line = handle.readline()
if not line:
break
cols = [x for x in line.strip().split(tab_char) if x]
if qresult_key is None:
qresult_key = cols[query_id_idx]
else:
curr_key = cols[query_id_idx]
if curr_key != qresult_key:
break
qresult_raw += line
return qresult_raw
class BlatPslWriter(object):
"""Writer for the blat-psl format."""
def __init__(self, handle, header=False, pslx=False):
"""Initialize the class."""
self.handle = handle
# flag for writing header or not
self.header = header
self.pslx = pslx
def write_file(self, qresults):
handle = self.handle
qresult_counter, hit_counter, hsp_counter, frag_counter = 0, 0, 0, 0
if self.header:
handle.write(self._build_header())
for qresult in qresults:
if qresult:
handle.write(self._build_row(qresult))
qresult_counter += 1
hit_counter += len(qresult)
hsp_counter += sum(len(hit) for hit in qresult)
frag_counter += sum(len(hit.fragments) for hit in qresult)
return qresult_counter, hit_counter, hsp_counter, frag_counter
def _build_header(self):
# for now, always use the psLayout version 3
header = 'psLayout version 3\n'
# adapted from BLAT's source: lib/psl.c#L496
header += "\nmatch\tmis- \trep. \tN's\tQ gap\tQ gap\tT gap\tT "
"gap\tstrand\tQ \tQ \tQ \tQ \tT \tT \tT "
"\tT \tblock\tblockSizes \tqStarts\t tStarts\n " \
"\tmatch\tmatch\t \tcount\tbases\tcount\tbases\t \tname "
"\tsize\tstart\tend\tname \tsize\tstart\tend\tcount"
"\n%s\n" % ('-' * 159)
return header
def _build_row(self, qresult):
"""Returns a string or one row or more of the QueryResult object."""
# For now, our writer writes the row according to the order in
# the QueryResult and Hit objects.
# This is different from BLAT's native output, where the rows are
# grouped by strand.
# Should we tweak the behavior to better mimic the native output?
qresult_lines = []
for hit in qresult:
for hsp in hit.hsps:
query_is_protein = getattr(hsp, "query_is_protein", False)
blocksize_multiplier = 3 if query_is_protein else 1
line = []
line.append(hsp.match_num)
line.append(hsp.mismatch_num)
line.append(hsp.match_rep_num)
line.append(hsp.n_num)
line.append(hsp.query_gapopen_num)
line.append(hsp.query_gap_num)
line.append(hsp.hit_gapopen_num)
line.append(hsp.hit_gap_num)
# check spans
eff_query_spans = [blocksize_multiplier * s for s in hsp.query_span_all]
if hsp.hit_span_all != eff_query_spans:
raise ValueError("HSP hit span and query span values do not match.")
block_sizes = hsp.query_span_all
# set strand and starts
if hsp[0].query_strand >= 0: # since it may be a protein seq
strand = '+'
else:
strand = '-'
qstarts = _reorient_starts([x[0] for x in hsp.query_range_all],
hsp.query_span_all, qresult.seq_len, hsp[0].query_strand)
if hsp[0].hit_strand == 1:
hstrand = 1
# only write hit strand if it was present in the source file
if hsp._has_hit_strand:
strand += '+'
else:
hstrand = -1
strand += '-'
hstarts = _reorient_starts([x[0] for x in hsp.hit_range_all],
hsp.hit_span_all, hit.seq_len, hstrand)
line.append(strand)
line.append(qresult.id)
line.append(qresult.seq_len)
line.append(hsp.query_start)
line.append(hsp.query_end)
line.append(hit.id)
line.append(hit.seq_len)
line.append(hsp.hit_start)
line.append(hsp.hit_end)
line.append(len(hsp))
line.append(','.join((str(x) for x in block_sizes)) + ',')
line.append(','.join((str(x) for x in qstarts)) + ',')
line.append(','.join((str(x) for x in hstarts)) + ',')
if self.pslx:
line.append(','.join((str(x.seq) for x in hsp.query_all)) + ',')
line.append(','.join((str(x.seq) for x in hsp.hit_all)) + ',')
qresult_lines.append('\t'.join((str(x) for x in line)))
return '\n'.join(qresult_lines) + '\n'
# if not used as a module, run the doctest
if __name__ == "__main__":
from Bio._utils import run_doctest
run_doctest()
| 43.330996
| 88
| 0.49856
|
4a06982f43e5ad8c73d21059dfb07758221cbe82
| 8,436
|
py
|
Python
|
module/util.py
|
baristahell/OBB-YOLOv3
|
7c60cf3c8ebcf55d3c1f405fbb135591ebd20802
|
[
"MIT"
] | 19
|
2020-05-28T03:38:49.000Z
|
2021-06-18T08:24:44.000Z
|
module/util.py
|
baristahell/OBB-YOLOv3
|
7c60cf3c8ebcf55d3c1f405fbb135591ebd20802
|
[
"MIT"
] | null | null | null |
module/util.py
|
baristahell/OBB-YOLOv3
|
7c60cf3c8ebcf55d3c1f405fbb135591ebd20802
|
[
"MIT"
] | 7
|
2020-06-02T00:50:47.000Z
|
2021-06-02T07:41:50.000Z
|
# -*- coding: utf-8 -*-
import torch
import numpy as np
def predict_transform(fms, inp_dim, anchors, num_classes, cuda=False):
"""
?x255x13x13,26x26,52x52 3*(11+80)=255
"""
stride = inp_dim // fms.size(2) # 416// 13,26,52 = 32, 6, 8
batch_size = fms.size(0)
bbox_attrs = 11 + num_classes # 5+80 = 85
grid_size = inp_dim // stride # 13,26,52
anchors = [(a[0]/stride, a[1]/stride) for a in anchors]
num_anchors = len(anchors) # 3 1
# [?,255,169]///676,2704
prediction = fms.view(batch_size, bbox_attrs*num_anchors,-1)
# [?,169,255]
prediction = prediction.transpose(1,2).contiguous()
# [?,169*3,85]
prediction = prediction.view(batch_size, -1, bbox_attrs) # ?, 507,31
# Sigmoid the centre_X, centre_Y. and object confidence ##0-7 10 conf
prediction[:,:,8] = torch.sigmoid(prediction[:,:,8]) # ?,507
prediction[:,:,9] = torch.sigmoid(prediction[:,:,9])
prediction[:,:,10] = torch.sigmoid(prediction[:,:,10])
# Add the center offsets
grid_len = np.arange(grid_size)
a, b = np.meshgrid(grid_len,grid_len) # 16*16, 16*16
x_offset = torch.FloatTensor(a).view(-1,1) # 0,1,2,3,...15..0,1,2
y_offset = torch.FloatTensor(b).view(-1,1) # 0,0,0,0,0,0,...1,1,1,1...15,15,15
# [1,507,2] ---> 2028, 8112
x_y_offset = torch.cat((x_offset, y_offset), 1).repeat(1,num_anchors).view(-1,2).unsqueeze(0) # 1,768,2
# log space transform height and the width
anchors = torch.FloatTensor(anchors).repeat(1,4) # [1.25,1.625],[2,3.75],[4.125,2.875]
anchors = anchors.repeat(grid_size*grid_size,1).unsqueeze(0) # [507,2]->[1,507,2]
if cuda:
x_y_offset = x_y_offset.cuda()
anchors = anchors.cuda()
prediction[...,8:10] += x_y_offset
prediction[...,0:8] = prediction[:,:,0:8] * anchors + x_y_offset.repeat(1,1,4)
# Softmax the class scores
prediction[...,11: 11 + num_classes] = torch.sigmoid((prediction[:,:, 11 : 11 + num_classes]))
prediction[...,:10] *= stride
return prediction
def bbox_iou(box1, box2, x1y1x2y2=True):
"""
Returns the IoU of two bounding boxes
"""
if not x1y1x2y2:
# Transform from center and width to exact coordinates
b1_x1, b1_x2 = box1[:, 0] - box1[:, 2] / 2, box1[:, 0] + box1[:, 2] / 2
b1_y1, b1_y2 = box1[:, 1] - box1[:, 3] / 2, box1[:, 1] + box1[:, 3] / 2
b2_x1, b2_x2 = box2[:, 0] - box2[:, 2] / 2, box2[:, 0] + box2[:, 2] / 2
b2_y1, b2_y2 = box2[:, 1] - box2[:, 3] / 2, box2[:, 1] + box2[:, 3] / 2
else:
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[:,0], box1[:,1], box1[:,2], box1[:,3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[:,0], box2[:,1], box2[:,2], box2[:,3]
# get the coordinates of the intersection rectangle
inter_rect_x1 = torch.max(b1_x1, b2_x1)
inter_rect_y1 = torch.max(b1_y1, b2_y1)
inter_rect_x2 = torch.min(b1_x2, b2_x2)
inter_rect_y2 = torch.min(b1_y2, b2_y2)
# Intersection area
inter_area = torch.clamp(inter_rect_x2 - inter_rect_x1 + 1, min=0) * torch.clamp(inter_rect_y2 - inter_rect_y1 + 1, min=0)
# Union Area
b1_area = (b1_x2 - b1_x1 + 1) * (b1_y2 - b1_y1 + 1)
b2_area = (b2_x2 - b2_x1 + 1) * (b2_y2 - b2_y1 + 1)
iou = inter_area / (b1_area + b2_area - inter_area + 1e-16)
return iou
def get_target( target, anchors, g_dim, ignore_threshold, num_classes):
'''
target: ?, 50,11
anchors: scaled anchors. / stride
g_dim: feature map size 13,26,52
return :
'''
bs = target.size(0)
nA = len(anchors)
num_classes = num_classes
mask = torch.zeros(bs, nA, g_dim, g_dim)
conf_mask = torch.ones(bs, nA, g_dim, g_dim)
tx = torch.zeros(bs, nA, g_dim, g_dim)
ty = torch.zeros(bs, nA, g_dim, g_dim)
tx1 = torch.zeros(bs, nA, g_dim, g_dim)
ty1 = torch.zeros(bs, nA, g_dim, g_dim)
tx2 = torch.zeros(bs, nA, g_dim, g_dim)
ty2 = torch.zeros(bs, nA, g_dim, g_dim)
tx3 = torch.zeros(bs, nA, g_dim, g_dim)
ty3 = torch.zeros(bs, nA, g_dim, g_dim)
tx4 = torch.zeros(bs, nA, g_dim, g_dim)
ty4 = torch.zeros(bs, nA, g_dim, g_dim)
tconf = torch.zeros(bs, nA, g_dim, g_dim)
#tcls = torch.zeros(bs, nA, g_dim, g_dim, num_classes)
tcls = torch.zeros(bs, nA, g_dim, g_dim)
for b in range(bs):
for t in range(target.shape[1]):
if target[b, t].sum() == 0:
break
# Convert to position relative to box
gx1 = target[b, t, 0] * g_dim
gy1 = target[b, t, 1] * g_dim
gx2 = target[b, t, 2] * g_dim
gy2 = target[b, t, 3] * g_dim
gx3 = target[b, t, 4] * g_dim
gy3 = target[b, t, 5] * g_dim
gx4 = target[b, t, 6] * g_dim
gy4 = target[b, t, 7] * g_dim
gx = target[b, t, 8] * g_dim
gy = target[b, t, 9] * g_dim
# Get grid box indices
gi = int(gx)
gj = int(gy)
#try:
gw = max(target[b, t, [0, 2, 4, 6]] * g_dim) - min(target[b, t, [0, 2, 4, 6]] * g_dim)
gh = max(target[b, t, [1, 3, 5, 7]] * g_dim) - min(target[b, t, [1, 3, 5, 7]] * g_dim)
#except Exception as e:
# gwt = target[b,t,[0,2,4,6]]*g_dim
# gw = max(gwt)-min(gwt)
# ght = target[b, t, [0, 2, 4, 6]] * g_dim
# gh = max(ght)-min(ght)
# Get shape of gt box
#gt_box = torch.FloatTensor(np.array([0, 0, gw, gh])).unsqueeze(0)
# Fix issues
gw = gw.cpu().numpy()
gh = gh.cpu().numpy()
#try:
gt_box = torch.FloatTensor(np.array([0, 0, gw, gh])).unsqueeze(0)
#except Exception as e:
# gt_array = np.array([0, 0, gw, gh])
# gt_box = torch.from_numpy(gt_array)
# gt_box = gt_box.unsqueeze(0)
# gt_box = gt_box.float()
# gt_box = gt_box.cuda()
# Get shape of anchor box
anchor_shapes = torch.FloatTensor(np.concatenate((np.zeros((nA, 2)), np.array(anchors)), 1))
# Calculate iou between gt and anchor shapes
anch_ious = bbox_iou(gt_box, anchor_shapes)
# Where the overlap is larger than threshold set mask to zero (ignore)
conf_mask[b, anch_ious > ignore_threshold, gj, gi] = 0
# Find the best matching anchor box
best_n = np.argmax(anch_ious)
# Masks
mask[b, best_n, gj, gi] = 1
conf_mask[b, best_n, gj, gi] = 1
# Coordinates
tx[b, best_n, gj, gi] = gx - gi
ty[b, best_n, gj, gi] = gy - gj
# Width and height
tx1[b, best_n, gj, gi] = torch.exp((gx1 - gi)/anchors[best_n][0] + 1e-16)
ty1[b, best_n, gj, gi] = torch.exp((gy1 - gj)/anchors[best_n][1] + 1e-16)
tx2[b, best_n, gj, gi] = torch.exp((gx2 - gi)/anchors[best_n][0] + 1e-16)
ty2[b, best_n, gj, gi] = torch.exp((gy2 - gj)/anchors[best_n][1] + 1e-16)
tx3[b, best_n, gj, gi] = torch.exp((gx3 - gi)/anchors[best_n][0] + 1e-16)
ty3[b, best_n, gj, gi] = torch.exp((gy3 - gj)/anchors[best_n][1] + 1e-16)
tx4[b, best_n, gj, gi] = torch.exp((gx4 - gi)/anchors[best_n][0] + 1e-16)
ty4[b, best_n, gj, gi] = torch.exp((gy4 - gj)/anchors[best_n][1] + 1e-16)
#
# tx1[b, best_n, gj, gi] = (gx1 - gi) / anchors[best_n][0]
# ty1[b, best_n, gj, gi] = (gy1 - gj) / anchors[best_n][1]
# tx2[b, best_n, gj, gi] = (gx2 - gi) / anchors[best_n][0]
# ty2[b, best_n, gj, gi] = (gy2 - gj) / anchors[best_n][1]
# tx3[b, best_n, gj, gi] = (gx3 - gi) / anchors[best_n][0]
# ty3[b, best_n, gj, gi] = (gy3 - gj) / anchors[best_n][1]
# tx4[b, best_n, gj, gi] = (gx4 - gi) / anchors[best_n][0]
# ty4[b, best_n, gj, gi] = (gy4 - gj) / anchors[best_n][1]
# object
tconf[b, best_n, gj, gi] = 1
# One-hot encoding of label
tcls[b, best_n, gj, gi] = int(target[b, t, 10])
return mask, conf_mask, tx, ty, tx1, ty1, tx2, ty2,tx3, ty3,tx4, ty4, tconf, tcls
| 41.55665
| 126
| 0.531532
|
4a06991cf99dcc2e1769e144cc7a2d59751ff725
| 579
|
py
|
Python
|
openprocurement/tender/cfaua/adapters/tender/serializable/value.py
|
openprocurement/openprocurement.tender.cfaua
|
1f84b15838c3b5980409734f57361540e6e6f676
|
[
"Apache-2.0"
] | null | null | null |
openprocurement/tender/cfaua/adapters/tender/serializable/value.py
|
openprocurement/openprocurement.tender.cfaua
|
1f84b15838c3b5980409734f57361540e6e6f676
|
[
"Apache-2.0"
] | 3
|
2018-09-28T12:57:52.000Z
|
2018-10-29T13:54:38.000Z
|
openprocurement/tender/cfaua/adapters/tender/serializable/value.py
|
ProzorroUKR/openprocurement.tender.cfaua
|
7b2d0f514be6dca090ea96b83df8ce01bdc7dc0d
|
[
"Apache-2.0"
] | 1
|
2018-09-10T07:40:41.000Z
|
2018-09-10T07:40:41.000Z
|
# src/openprocurement.tender.belowthreshold/openprocurement/tender/belowthreshold/models.py:246
from openprocurement.api.adapters import Serializable
class SerializableTenderMultilotValue(Serializable):
serialized_name = "value"
def __call__(self, obj, *args, **kwargs):
value_class = obj._fields['value']
return value_class(dict(amount=sum([i.value.amount for i in obj.lots]),
currency=obj.value.currency,
valueAddedTaxIncluded=obj.value.valueAddedTaxIncluded)) if obj.lots else obj.value
| 52.636364
| 114
| 0.697755
|
4a069a4b26f0b5db370da027b658a8d6bf6c3c52
| 3,483
|
py
|
Python
|
fcos_core/utils/comm.py
|
realtimshady1/FCOS
|
50b10c55c54bd519956d3ef2f96e042f9be0363a
|
[
"BSD-2-Clause"
] | null | null | null |
fcos_core/utils/comm.py
|
realtimshady1/FCOS
|
50b10c55c54bd519956d3ef2f96e042f9be0363a
|
[
"BSD-2-Clause"
] | null | null | null |
fcos_core/utils/comm.py
|
realtimshady1/FCOS
|
50b10c55c54bd519956d3ef2f96e042f9be0363a
|
[
"BSD-2-Clause"
] | null | null | null |
"""
This file contains primitives for multi-gpu communication.
This is useful when doing distributed training.
"""
import pickle
import time
import torch
import torch.distributed as dist
def get_world_size():
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def synchronize():
"""
Helper function to synchronize (barrier) among all processes when
using distributed training
"""
if not dist.is_available():
return
if not dist.is_initialized():
return
world_size = dist.get_world_size()
if world_size == 1:
return
dist.barrier()
def all_gather(data):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors)
Args:
data: any picklable object
Returns:
list[data]: list of data gathered from each rank
"""
world_size = get_world_size()
if world_size == 1:
return [data]
# serialized to a Tensor
buffer = pickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to("cuda")
# obtain Tensor size of each rank
local_size = torch.IntTensor([tensor.numel()]).to("cuda")
size_list = [torch.IntTensor([0]).to("cuda") for _ in range(world_size)]
dist.all_gather(size_list, local_size)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# receiving Tensor from all ranks
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
tensor_list = []
for _ in size_list:
tensor_list.append(torch.ByteTensor(size=(max_size,)).to("cuda"))
if local_size != max_size:
padding = torch.ByteTensor(size=(max_size - local_size,)).to("cuda")
tensor = torch.cat((tensor, padding), dim=0)
dist.all_gather(tensor_list, tensor)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
def reduce_dict(input_dict, average=True):
"""
Args:
input_dict (dict): all the values will be reduced
average (bool): whether to do average or sum
Reduce the values in the dictionary from all processes so that process with rank
0 has the averaged results. Returns a dict with the same fields as
input_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return input_dict
with torch.no_grad():
names = []
values = []
# sort the keys so that they are consistent across processes
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.reduce(values, dst=0)
if dist.get_rank() == 0 and average:
# only main process gets accumulated, so only divide by
# world_size in this case
values /= world_size
reduced_dict = {k: v for k, v in zip(names, values)}
return reduced_dict
def is_pytorch_1_1_0_or_later():
return [int(i[0]) for i in torch.__version__.split(".")[:3]] >= [1, 1, 0]
| 28.54918
| 84
| 0.647717
|
4a069b608aa6ec79b583234be5fa5d9d284a3d5f
| 12,025
|
py
|
Python
|
data/coco.py
|
frezaeix/AttFDNet
|
e4021b259e187e9180a83fcb67c029144bdd5789
|
[
"MIT"
] | 1
|
2021-03-07T01:09:33.000Z
|
2021-03-07T01:09:33.000Z
|
data/coco.py
|
frezaeix/AttFDNet
|
e4021b259e187e9180a83fcb67c029144bdd5789
|
[
"MIT"
] | null | null | null |
data/coco.py
|
frezaeix/AttFDNet
|
e4021b259e187e9180a83fcb67c029144bdd5789
|
[
"MIT"
] | null | null | null |
"""VOC Dataset Classes
Original author: Francisco Massa
https://github.com/fmassa/vision/blob/voc_dataset/torchvision/datasets/voc.py
Updated by: Ellis Brown, Max deGroot
"""
import os
import pickle
import os.path
import sys
import torch
import torch.utils.data as data
import torchvision.transforms as transforms
import cv2
import numpy as np
import json
import uuid
from utils.pycocotools.coco import COCO
from utils.pycocotools.cocoeval import COCOeval
from utils.pycocotools import mask as COCOmask
class COCODetection(data.Dataset):
"""VOC Detection Dataset Object
input is image, target is annotation
Arguments:
root (string): filepath to VOCdevkit folder.
image_set (string): imageset to use (eg. 'train', 'val', 'test')
transform (callable, optional): transformation to perform on the
input image
target_transform (callable, optional): transformation to perform on the
target `annotation`
(eg: take in caption string, return tensor of word indices)
dataset_name (string, optional): which dataset to load
(default: 'VOC2007')
"""
def __init__(self, root, image_sets, preproc=None, target_transform=None,
dataset_name='COCO'):
self.root = root
self.cache_path = os.path.join(self.root, 'cache')
self.image_set = image_sets
self.preproc = preproc
self.target_transform = target_transform
self.name = dataset_name
self.ids = list()
self.annotations = list()
self._view_map = {
'minival2014' : 'val2014', # 5k val2014 subset
'valminusminival2014' : 'val2014', # val2014 \setminus minival2014
'test-dev2015' : 'test2015',
}
for (year, image_set) in image_sets:
coco_name = image_set+year
data_name = (self._view_map[coco_name]
if coco_name in self._view_map
else coco_name)
annofile = self._get_ann_file(coco_name)
_COCO = COCO(annofile)
self._COCO = _COCO
self.coco_name = coco_name
cats = _COCO.loadCats(_COCO.getCatIds())
self._classes = tuple(['__background__'] + [c['name'] for c in cats])
self.num_classes = len(self._classes)
self._class_to_ind = dict(zip(self._classes, range(self.num_classes)))
self._class_to_coco_cat_id = dict(zip([c['name'] for c in cats],
_COCO.getCatIds()))
indexes = _COCO.getImgIds()
self.image_indexes = indexes
self.ids.extend([self.image_path_from_index(data_name, index) for index in indexes ])
if image_set.find('test') != -1:
print('test set will not load annotations!')
else:
self.annotations.extend(self._load_coco_annotations(coco_name, indexes,_COCO))
def image_path_from_index(self, name, index):
"""
Construct an image path from the image's "index" identifier.
"""
# Example image path for index=119993:
# images/train2014/COCO_train2014_000000119993.jpg
if name == 'val2017' or name == 'train2017':
file_name = (str(index).zfill(12) + '.jpg')
else:
file_name = ('COCO_' + name + '_' +
str(index).zfill(12) + '.jpg')
# if use 2017
# file_name = (str(index).zfill(12) + '.jpg')
image_path = os.path.join(self.root, 'images',
name, file_name)
assert os.path.exists(image_path), \
'Path does not exist: {}'.format(image_path)
return image_path
def _get_ann_file(self, name):
prefix = 'instances' if name.find('test') == -1 \
else 'image_info'
return os.path.join(self.root, 'annotations',
prefix + '_' + name + '.json')
def _load_coco_annotations(self, coco_name, indexes, _COCO):
cache_file=os.path.join(self.cache_path,coco_name+'_gt_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = pickle.load(fid)
print('{} gt roidb loaded from {}'.format(coco_name,cache_file))
return roidb
gt_roidb = [self._annotation_from_index(index, _COCO)
for index in indexes]
with open(cache_file, 'wb') as fid:
pickle.dump(gt_roidb,fid,pickle.HIGHEST_PROTOCOL)
print('wrote gt roidb to {}'.format(cache_file))
return gt_roidb
def _annotation_from_index(self, index, _COCO):
"""
Loads COCO bounding-box instance annotations. Crowd instances are
handled by marking their overlaps (with all categories) to -1. This
overlap value means that crowd "instances" are excluded from training.
"""
im_ann = _COCO.loadImgs(index)[0]
width = im_ann['width']
height = im_ann['height']
annIds = _COCO.getAnnIds(imgIds=index, iscrowd=None)
objs = _COCO.loadAnns(annIds)
# Sanitize bboxes -- some are invalid
valid_objs = []
for obj in objs:
x1 = np.max((0, obj['bbox'][0]))
y1 = np.max((0, obj['bbox'][1]))
x2 = np.min((width - 1, x1 + np.max((0, obj['bbox'][2] - 1))))
y2 = np.min((height - 1, y1 + np.max((0, obj['bbox'][3] - 1))))
if obj['area'] > 0 and x2 >= x1 and y2 >= y1:
obj['clean_bbox'] = [x1, y1, x2, y2]
valid_objs.append(obj)
objs = valid_objs
num_objs = len(objs)
res = np.zeros((num_objs, 5))
# Lookup table to map from COCO category ids to our internal class
# indices
coco_cat_id_to_class_ind = dict([(self._class_to_coco_cat_id[cls],
self._class_to_ind[cls])
for cls in self._classes[1:]])
for ix, obj in enumerate(objs):
cls = coco_cat_id_to_class_ind[obj['category_id']]
res[ix, 0:4] = obj['clean_bbox']
res[ix, 4] = cls
return res
def __getitem__(self, index):
img_id = self.ids[index]
target = self.annotations[index]
img = cv2.imread(img_id, cv2.IMREAD_COLOR)
height, width, _ = img.shape
if self.target_transform is not None:
target = self.target_transform(target)
if self.preproc is not None:
img, target = self.preproc(img, target)
return img, target
def __len__(self):
return len(self.ids)
def pull_image(self, index):
'''Returns the original image object at index in PIL form
Note: not using self.__getitem__(), as any transformations passed in
could mess up this functionality.
Argument:
index (int): index of img to show
Return:
PIL img
'''
img_id = self.ids[index]
return cv2.imread(img_id, cv2.IMREAD_COLOR)
def pull_tensor(self, index):
'''Returns the original image at an index in tensor form
Note: not using self.__getitem__(), as any transformations passed in
could mess up this functionality.
Argument:
index (int): index of img to show
Return:
tensorized version of img, squeezed
'''
to_tensor = transforms.ToTensor()
return torch.Tensor(self.pull_image(index)).unsqueeze_(0)
def _print_detection_eval_metrics(self, coco_eval):
IoU_lo_thresh = 0.5
IoU_hi_thresh = 0.95
def _get_thr_ind(coco_eval, thr):
ind = np.where((coco_eval.params.iouThrs > thr - 1e-5) &
(coco_eval.params.iouThrs < thr + 1e-5))[0][0]
iou_thr = coco_eval.params.iouThrs[ind]
assert np.isclose(iou_thr, thr)
return ind
ind_lo = _get_thr_ind(coco_eval, IoU_lo_thresh)
ind_hi = _get_thr_ind(coco_eval, IoU_hi_thresh)
# precision has dims (iou, recall, cls, area range, max dets)
# area range index 0: all area ranges
# max dets index 2: 100 per image
precision = \
coco_eval.eval['precision'][ind_lo:(ind_hi + 1), :, :, 0, 2]
ap_default = np.mean(precision[precision > -1])
print('~~~~ Mean and per-category AP @ IoU=[{:.2f},{:.2f}] '
'~~~~'.format(IoU_lo_thresh, IoU_hi_thresh))
print('{:.1f}'.format(100 * ap_default))
for cls_ind, cls in enumerate(self._classes):
if cls == '__background__':
continue
# minus 1 because of __background__
precision = coco_eval.eval['precision'][ind_lo:(ind_hi + 1), :, cls_ind - 1, 0, 2]
ap = np.mean(precision[precision > -1])
print('{:.1f}'.format(100 * ap))
print('~~~~ Summary metrics ~~~~')
coco_eval.summarize()
def _do_detection_eval(self, res_file, output_dir):
ann_type = 'bbox'
coco_dt = self._COCO.loadRes(res_file)
coco_eval = COCOeval(self._COCO, coco_dt)
coco_eval.params.useSegm = (ann_type == 'segm')
coco_eval.evaluate()
coco_eval.accumulate()
self._print_detection_eval_metrics(coco_eval)
eval_file = os.path.join(output_dir, 'detection_results.pkl')
with open(eval_file, 'wb') as fid:
pickle.dump(coco_eval, fid, pickle.HIGHEST_PROTOCOL)
print('Wrote COCO eval results to: {}'.format(eval_file))
def _coco_results_one_category(self, boxes, cat_id):
results = []
for im_ind, index in enumerate(self.image_indexes):
dets = boxes[im_ind].astype(np.float)
if dets == []:
continue
scores = dets[:, -1]
xs = dets[:, 0]
ys = dets[:, 1]
ws = dets[:, 2] - xs + 1
hs = dets[:, 3] - ys + 1
results.extend(
[{'image_id' : index,
'category_id' : cat_id,
'bbox' : [xs[k], ys[k], ws[k], hs[k]],
'score' : scores[k]} for k in range(dets.shape[0])])
return results
def _write_coco_results_file(self, all_boxes, res_file):
# [{"image_id": 42,
# "category_id": 18,
# "bbox": [258.15,41.29,348.26,243.78],
# "score": 0.236}, ...]
results = []
for cls_ind, cls in enumerate(self._classes):
if cls == '__background__':
continue
print('Collecting {} results ({:d}/{:d})'.format(cls, cls_ind,
self.num_classes ))
coco_cat_id = self._class_to_coco_cat_id[cls]
results.extend(self._coco_results_one_category(all_boxes[cls_ind],
coco_cat_id))
'''
if cls_ind ==30:
res_f = res_file+ '_1.json'
print('Writing results json to {}'.format(res_f))
with open(res_f, 'w') as fid:
json.dump(results, fid)
results = []
'''
#res_f2 = res_file+'_2.json'
print('Writing results json to {}'.format(res_file))
with open(res_file, 'w') as fid:
json.dump(results, fid)
def evaluate_detections(self, all_boxes, output_dir):
res_file = os.path.join(output_dir, ('detections_' +
self.coco_name +
'_results'))
res_file += '.json'
self._write_coco_results_file(all_boxes, res_file)
# Only do evaluation on non-test sets
if self.coco_name.find('test') == -1:
self._do_detection_eval(res_file, output_dir)
# Optionally cleanup results json file
| 37.933754
| 97
| 0.564906
|
4a069bd0bf2658d577a41ff59018261fc3333194
| 2,067
|
py
|
Python
|
azure-batch/azure/batch/models/pool_disable_auto_scale_options_py3.py
|
NMijat1024/azure-sdk-for-python
|
c49e1d6d797dceaca81813cafb1a486d67185182
|
[
"MIT"
] | null | null | null |
azure-batch/azure/batch/models/pool_disable_auto_scale_options_py3.py
|
NMijat1024/azure-sdk-for-python
|
c49e1d6d797dceaca81813cafb1a486d67185182
|
[
"MIT"
] | 1
|
2018-11-29T14:46:42.000Z
|
2018-11-29T14:46:42.000Z
|
azure-batch/azure/batch/models/pool_disable_auto_scale_options_py3.py
|
NMijat1024/azure-sdk-for-python
|
c49e1d6d797dceaca81813cafb1a486d67185182
|
[
"MIT"
] | 1
|
2018-08-28T14:36:47.000Z
|
2018-08-28T14:36:47.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class PoolDisableAutoScaleOptions(Model):
"""Additional parameters for disable_auto_scale operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
"""
_attribute_map = {
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None:
super(PoolDisableAutoScaleOptions, self).__init__(**kwargs)
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
| 43.978723
| 143
| 0.655539
|
4a069c3bae5f3d35e61bb0f77bcce0e7f62c48dd
| 139
|
py
|
Python
|
experiments/simclrv2_florian/__init__.py
|
lxuechen/swissknife
|
43dbd36f1e998ebe29c0b85fafd0de765dfb5de8
|
[
"MIT"
] | 1
|
2022-02-25T00:00:30.000Z
|
2022-02-25T00:00:30.000Z
|
experiments/simclrv2_florian/__init__.py
|
lxuechen/swissknife
|
43dbd36f1e998ebe29c0b85fafd0de765dfb5de8
|
[
"MIT"
] | null | null | null |
experiments/simclrv2_florian/__init__.py
|
lxuechen/swissknife
|
43dbd36f1e998ebe29c0b85fafd0de765dfb5de8
|
[
"MIT"
] | null | null | null |
"""
Convert TF checkpoint to PyTorch.
First run download.py, then convert.py.
Note:
`sk` correspond to selective kernel network.
"""
| 15.444444
| 48
| 0.71223
|
4a069c9508bcc7a43cd7892b241f3cf7495dc991
| 10,779
|
py
|
Python
|
spotify_bot.py
|
cartertemm/teamtalk-spotify-bot
|
58dd169592153635203f8a319419dd49702a75cf
|
[
"MIT"
] | 2
|
2021-08-29T15:24:40.000Z
|
2022-01-17T15:49:04.000Z
|
spotify_bot.py
|
cartertemm/teamtalk-spotify-bot
|
58dd169592153635203f8a319419dd49702a75cf
|
[
"MIT"
] | 2
|
2020-08-26T23:32:45.000Z
|
2021-07-19T20:37:57.000Z
|
spotify_bot.py
|
cartertemm/teamtalk-spotify-bot
|
58dd169592153635203f8a319419dd49702a75cf
|
[
"MIT"
] | 1
|
2021-08-11T03:23:00.000Z
|
2021-08-11T03:23:00.000Z
|
"""spotify_bot.py
A TeamTalk controller for Spotify.
Only works with premium accounts.
Also requires another TeamTalk instance capable of routing system audio. The
process for doing so is out of the scope of these notes. It is my hope that
this will someday be unnecessary, however.
Consult the readme for comprehensive setup instructions.
Basically just run this script, edit the generated configuration file, then run again.
"""
help = """Every parameter enclosed in brackets ([]) is optional
play [uri]: Starts playback. If uri is provided, starts playing from the specified spotify link, can start with http:// or spotify:.
pause: Pauses playback.
previous/next: Cycles between tracks.
volume percentage: Sets the output volume (between 0 and 100).
track query: Searches for and plays a track.
artist query: Searches for and plays tracks by an artist.
playlist query: Searches for and plays tracks from a playlist.
queue query: Searches for and adds the next track to the playback queue.
shuffle yes/on/1|no/off/0: Enables or disables shuffling.
playing: Displays info about the currently playing track.
If on mac OS, send the word mac to the channel to receive a PM"""
## authentication
client_id = "52569438780b4497bdd72a09954d1030"
client_secret = "f090e040c95842e3a31f26d86bf627a8"
redirect_uri = "http://localhost:9999"
scopes = "user-modify-playback-state user-read-currently-playing user-read-playback-state user-read-private"
cache_path = "spotify.cache"
client_name = "TeamTalkBotClient"
import sys
import os.path
import datetime
import time
import json
import configparser
import spotipy
import teamtalk
import utils
from utils import *
from spotipy.oauth2 import SpotifyOAuth
spec = """# TeamTalk Spotify Bot Configuration
# Sections starting with # are comments and not processed directly
# Uncomment (remove the # from) every line that is not an explanation
[general]
# The server's address
# host = example.com
# The server's TCP port
# port = 10333
# Login Info
# nickname = Spotify Bot
# username = me
# password = password
# a list of users disallowed from sending messages for abuse prevention
# example: ["bob", "Alice"]
# banned_users = []
# The case sensative name, or ID, of a channel to join on login
# /Stereo/ or 1 are valid
# autojoin =
# The password for the channel that will be automatically joined
# autojoin_pass =
[advanced]
# Only edit if you know what you're doing, as things can break easily
# client_id =
# client_secret =
# redirect_uri =
# cache_path =
"""
# Globals
config = None
## Config sections for convenience
general = None
advanced = None
banned_users = None
t = teamtalk.TeamTalkServer()
def load_config(file):
global config, general, advanced, banned_users
try:
config = configparser.ConfigParser()
except configobj.Error as exc:
print("There was an error validating the config")
print(exc)
loaded = config.read(file)
if not loaded:
print(file + " does not exist")
# messy but gets the job done for now
with open(file, "w") as f:
f.write(spec)
print("Created a configuration file")
print("Edit it and try running again")
sys.exit(1)
if not "general" in config.sections() or not "advanced" in config.sections():
print("Malformed configuration file. Fix or delete it and try again.")
sys.exit(1)
general = config["general"]
advanced = config["advanced"]
# check for only the bare minimum required to run
if (
not general.get("host")
or not general.get("port")
or not general.get("nickname")
):
print("Some required values were not found in the configuration. Fix or delete it and try again.")
sys.exit(1)
# Expand to a list
# hack: Since configparser doesn't support lists automatically, try feeding to json
banned_users = json.loads(general.get("banned_users", "[]"))
class SpotifyBot:
def __init__(self):
self.auth = None
self.spotify = None
self.device = None
self.device_id = None
def init_spotify(self):
self.auth = SpotifyOAuth(
client_id=advanced.get("client_id", client_id),
client_secret=advanced.get("client_secret", client_secret),
redirect_uri=advanced.get("redirect_uri", redirect_uri),
scope=scopes,
cache_path=advanced.get("cache_path", cache_path),
)
self.spotify = spotipy.Spotify(auth_manager=self.auth)
def find_device(self):
"""Blocks until a device becomes available for playback."""
devices = None
while not devices:
devices = self.spotify.devices()["devices"]
time.sleep(1)
return devices
def select_device(self):
"""Selects a device to be used for playback"""
devices = self.spotify.devices()["devices"]
if not devices:
print("No playback devices found")
print("Waiting for one to become available")
devices = self.find_device()
items = []
for device in devices:
items.append(device["name"] + ": " + str(device["volume_percent"]) + "%")
i = menu("Select a device: ", items)
self.device = devices[i]
self.device_id = self.device["id"]
print(self.device["name"] + " selected")
def get_info(self, track):
if "item" in track:
item = track["item"]
else: # not current_user_playing_track
item = track
name = item["name"]
# present if the passed track was obtained from a playback method
if "progress_ms" in track:
elapsed = datetime.timedelta(seconds=int(track["progress_ms"] / 1000))
else:
elapsed = "0:00:00"
duration = datetime.timedelta(seconds=int(item["duration_ms"] / 1000))
artists = [i["name"] for i in item["artists"]]
artists = ", ".join(artists)
return f"{artists} - {name} ({elapsed} - {duration})"
@preserve_tracebacks
def command_play(self, val=None):
if val:
# start_playback doesn't support passing tracks by context_uri for some dumb reason
if is_track(val):
self.spotify.start_playback(uris=[val], device_id=self.device_id)
else:
self.spotify.start_playback(context_uri=val, device_id=self.device_id)
else:
self.spotify.start_playback(device_id=self.device_id)
return "playing"
@preserve_tracebacks
def command_pause(self, val=None):
self.spotify.pause_playback(device_id=self.device_id)
return "paused"
@preserve_tracebacks
def command_previous(self, val=None):
self.spotify.previous_track(device_id=self.device_id)
@preserve_tracebacks
def command_next(self, val=None):
self.spotify.next_track(device_id=self.device_id)
@preserve_tracebacks
def command_volume(self, val):
if not val:
return str(self.spotify.current_playback()["device"]["volume_percent"]) + "%"
val = val.replace("%", "")
if not val.isdigit():
return "percentage argument must be a digit"
val = int(val)
if val < 0 or val > 100:
return "percentage must be between 0 and 100, inclusive"
self.spotify.volume(val, device_id=self.device_id)
return "volume set"
@preserve_tracebacks
def command_artist(self, val):
results = self.spotify.search(q=val, type="artist")
items = results["artists"]["items"]
if len(items) > 0:
item = items[0]
self.spotify.start_playback(device_id=self.device_id, context_uri=item["uri"])
return "playing " + item["name"]
else:
return "unable to find an artist by that name"
@preserve_tracebacks
def command_track(self, val):
results = self.spotify.search(q=val, type="track")
items = results["tracks"]["items"]
if len(items) > 0:
# context_uri doesn't accept tracks for some reason
item = items[0]
self.spotify.start_playback(device_id=self.device_id, uris=[item["uri"]])
return "playing " + self.get_info(item)
else:
return "unable to find a track by that name"
@preserve_tracebacks
def command_playlist(self, val):
results = self.spotify.search(q=val, type="playlist")
playlists = results["playlists"]["items"]
if len(playlists) > 0:
item = playlists[0]
self.spotify.start_playback(context_uri=item["uri"], device_id=self.device_id)
return f"playing {item['name']} by {item['owner']['display_name']}\n{item['description']}"
@preserve_tracebacks
def command_queue(self, val):
if not val:
return "no track provided"
item = None
if not is_track(val):
results = self.spotify.search(q=val, type="track")
items = results["tracks"]["items"]
if len(items) > 0:
item = items[0]
val = item["uri"]
else:
return "unable to find a track by that name"
self.spotify.add_to_queue(val, device_id=self.device_id)
if not item:
item = self.spotify.track(val)
return "queued " + self.get_info(item)
@preserve_tracebacks
def command_playing(self, val=None):
track = self.spotify.current_user_playing_track()
return self.get_info(track)
@preserve_tracebacks
def command_shuffle(self, val):
if val == "":
return "value must be yes/no, on/off, etc"
state = to_bool(val)
self.spotify.shuffle(state, device_id=self.device_id)
if state:
return "now shuffling"
else:
return "shuffling disabled"
@t.subscribe("messagedeliver")
def message(server, params):
content = params["content"]
user = server.get_user(params["srcuserid"])
nickname = user["nickname"]
username = user["username"]
if params["type"] == teamtalk.CHANNEL_MSG:
if content.lower().strip() == "mac":
server.user_message(user, "Ok. Type help for a list of commands.")
if params["type"] != teamtalk.USER_MSG:
return # nothing to do
if username in banned_users:
server.user_message(user, "You do not currently have permission to use this bot")
return
parsed = str(content).split(" ")
# our command parsing assumes a single message needs to be sent
# due to TeamTalk message size constraints, we need to split these up
if parsed[0].lower() == "help":
for line in help.splitlines():
# spam
server.user_message(user, line)
return
func = getattr(sp, "command_" + parsed[0].lower(), None)
if callable(func):
res = func(" ".join(parsed[1:]))
if res:
server.user_message(user, res)
else:
server.user_message(user, "unrecognized command, type help for options")
def main():
global sp
path = "config.ini"
if len(sys.argv) > 1:
path = sys.argv[1]
if not os.path.isfile(path):
print("The provided configuration file does not exist")
print("Dry run for config.ini")
sys.exit(1)
load_config(path)
sp = SpotifyBot()
sp.init_spotify()
sp.select_device()
print("Connecting to server...")
t.set_connection_info(general.get("host"), general.get("port"))
t.connect()
t.login(
general.get("nickname"),
general.get("username", ""),
general.get("password", ""),
client_name,
)
print("login success")
autojoin = general.get("autojoin")
autojoin_pass = general.get("autojoin_pass", "")
if autojoin != None:
# ID
if autojoin.isdigit():
autojoin = int(autojoin)
t.join(autojoin, password=autojoin_pass)
t.handle_messages(1)
# the Spotify bot object
sp = None
if __name__ == "__main__":
main()
| 30.709402
| 132
| 0.71964
|
4a069cb542867ea91f69fe9ad3111949a9cce754
| 1,733
|
py
|
Python
|
web/addons/mail/controllers/main.py
|
diogocs1/comps
|
63df07f6cf21c41e4527c06e2d0499f23f4322e7
|
[
"Apache-2.0"
] | 1
|
2019-12-29T11:53:56.000Z
|
2019-12-29T11:53:56.000Z
|
odoo/addons/mail/controllers/main.py
|
tuanquanghpvn/odoo8-tutorial
|
52d25f1ca5f233c431cb9d3b24b79c3b4fb5127e
|
[
"MIT"
] | null | null | null |
odoo/addons/mail/controllers/main.py
|
tuanquanghpvn/odoo8-tutorial
|
52d25f1ca5f233c431cb9d3b24b79c3b4fb5127e
|
[
"MIT"
] | 3
|
2020-10-08T14:42:10.000Z
|
2022-01-28T14:12:29.000Z
|
import base64
import psycopg2
import openerp
from openerp import SUPERUSER_ID
from openerp import http
from openerp.http import request
from openerp.addons.web.controllers.main import content_disposition
import mimetypes
class MailController(http.Controller):
_cp_path = '/mail'
@http.route('/mail/download_attachment', type='http', auth='user')
def download_attachment(self, model, id, method, attachment_id, **kw):
# FIXME use /web/binary/saveas directly
Model = request.registry.get(model)
res = getattr(Model, method)(request.cr, request.uid, int(id), int(attachment_id))
if res:
filecontent = base64.b64decode(res.get('base64'))
filename = res.get('filename')
content_type = mimetypes.guess_type(filename)
if filecontent and filename:
return request.make_response(
filecontent,
headers=[('Content-Type', content_type[0] or 'application/octet-stream'),
('Content-Disposition', content_disposition(filename))])
return request.not_found()
@http.route('/mail/receive', type='json', auth='none')
def receive(self, req):
""" End-point to receive mail from an external SMTP server. """
dbs = req.jsonrequest.get('databases')
for db in dbs:
message = dbs[db].decode('base64')
try:
registry = openerp.registry(db)
with registry.cursor() as cr:
mail_thread = registry['mail.thread']
mail_thread.message_process(cr, SUPERUSER_ID, None, message)
except psycopg2.Error:
pass
return True
| 38.511111
| 93
| 0.617426
|
4a069cc4a7bb291fce7b5f758167d1d9d0e5cacb
| 2,251
|
py
|
Python
|
tools/TweeboParser/token_selection/data_extract.py
|
unititled99/Bella
|
6ec5ec84ef1cf89a5e99c6a5a3ccc7972d77e023
|
[
"MIT"
] | null | null | null |
tools/TweeboParser/token_selection/data_extract.py
|
unititled99/Bella
|
6ec5ec84ef1cf89a5e99c6a5a3ccc7972d77e023
|
[
"MIT"
] | 10
|
2020-01-28T22:16:20.000Z
|
2022-02-09T23:32:01.000Z
|
tools/TweeboParser/token_selection/data_extract.py
|
unititled99/Bella
|
6ec5ec84ef1cf89a5e99c6a5a3ccc7972d77e023
|
[
"MIT"
] | 1
|
2018-05-28T13:21:53.000Z
|
2018-05-28T13:21:53.000Z
|
# Copyright (c) 2013-2014 Lingpeng Kong
# All Rights Reserved.
#
# This file is part of TweeboParser 1.0.
#
# TweeboParser 1.0 is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# TweeboParser 1.0 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with TweeboParser 1.0. If not, see <http://www.gnu.org/licenses/>.
# Author: Swabha Swayamdipta, Lingpeng Kong
# /usr/bin/python
import sys, ast, re
def filter_train_data(filename):
sents = []
tags = []
postagseq = []
f = open(filename, "r")
while 1:
line = f.readline()
if not line:
break
line = line.strip()
m = ast.literal_eval(line)
#print m["sent"]
sentence = m["sent"].split(' ')
sents.append(sentence)
posseq = m["pos"]
postags = []
postokens = posseq.split(" ")[:-1]
for token in postokens:
pos = token[-1]
postags.append(pos)
anno = m["anno"]
l = re.sub('\*\*', '', anno)
m = re.sub('\\n', ' ', l)
n = re.sub('[<>(){}]', '', m)
o = re.sub('[\[\]]', '', n)
p = re.sub('\$a', '', o)
q = re.sub('::', '', p)
s = re.sub('\s+', ' ', q)
llist = s.split(' ')
sset = set(llist)
annotation = list(sset)
#print ' '.join(annotation)
yes_no_tags = []
k = 0
for item in sentence:
item = item.strip()
if item in annotation:
tag = '1'
else:
tag = '0'
yes_no_tags.append(tag)
print item+'\t'+tag+'\t'+postags[k]
k += 1
tags.append(yes_no_tags)
print
f.close
return sents, tags
if __name__ == "__main__":
filter_train_data(sys.argv[1])
| 29.618421
| 77
| 0.549534
|
4a069d730eb9a4f748c4ab182ee082c37d475d6c
| 5,479
|
py
|
Python
|
ckine/figures/figure4.py
|
meyer-lab/bi-cytok
|
34bac90b88d53c02e742dec3a5f663734e860f1b
|
[
"MIT"
] | null | null | null |
ckine/figures/figure4.py
|
meyer-lab/bi-cytok
|
34bac90b88d53c02e742dec3a5f663734e860f1b
|
[
"MIT"
] | null | null | null |
ckine/figures/figure4.py
|
meyer-lab/bi-cytok
|
34bac90b88d53c02e742dec3a5f663734e860f1b
|
[
"MIT"
] | null | null | null |
"""
This creates Figure 1, response of bispecific IL-2 cytokines at varing valencies and abundances using binding model.
"""
from .figureCommon import getSetup
from ..imports import importCITE, importReceptors
import pandas as pd
import seaborn as sns
import numpy as np
from copy import copy
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from sklearn.preprocessing import LabelBinarizer
def makeFigure():
"""Get a list of the axis objects and create a figure"""
ax, f = getSetup((8, 8), (2, 2))
convFactCalc(ax[2])
CITE_SVM(ax[0:2], "Treg", sampleFrac=0.2)
return f
def CITE_SVM(ax, targCell, numFactors=10, sampleFrac=0.5):
"""Fits a ridge classifier to the CITE data and plots those most highly correlated with T reg"""
SVMmod = SVC()
SVC_DF = importCITE()
cellToI = SVC_DF.CellType2.unique()
SVC_DF = SVC_DF.loc[(SVC_DF["CellType2"].isin(cellToI)), :]
SVC_DF = SVC_DF.sample(frac=sampleFrac, random_state=1)
cellTypeCol = SVC_DF.CellType2.values
SVC_DF = SVC_DF.loc[:, ((SVC_DF.columns != 'CellType1') & (SVC_DF.columns != 'CellType2') & (SVC_DF.columns != 'CellType3') & (SVC_DF.columns != 'Cell'))]
factors = SVC_DF.columns
X = SVC_DF.values
X = StandardScaler().fit_transform(X)
CD25col = X[:, np.where(factors == "CD25")].reshape(-1, 1)
enc = LabelBinarizer()
y = enc.fit_transform(cellTypeCol)
TregY = y[:, np.where(enc.classes_ == targCell)].ravel()
AccDF = pd.DataFrame(columns=["Markers", "Accuracy"])
baselineAcc = SVMmod.fit(CD25col, TregY).score(CD25col, TregY)
print(baselineAcc)
print(np.where((factors == "CD25")))
for marker in factors:
SVMmod = SVC()
print(marker)
markerCol = X[:, np.where(factors == marker)]
CD25MarkX = np.hstack((CD25col, markerCol.reshape(-1, 1)))
markAcc = SVMmod.fit(CD25MarkX, TregY).score(CD25MarkX, TregY)
print(markAcc)
AccDF = AccDF.append(pd.DataFrame({"Markers": [marker], "Accuracy": [markAcc]}))
AccDF = AccDF.sort_values(by="Accuracy")
markers = copy(AccDF.tail(numFactors).Markers.values)
AccDF.Markers = "CD25 + " + AccDF.Markers
plot_DF = AccDF.tail(numFactors).append(pd.DataFrame({"Markers": ["CD25 only"], "Accuracy": [baselineAcc]}))
sns.barplot(data=plot_DF, x="Markers", y="Accuracy", ax=ax[0])
ax[0].set(ylim=(0.9, 1))
ax[0].set_xticklabels(ax[0].get_xticklabels(), rotation=45)
SVC_DF = importCITE()
markerDF = pd.DataFrame(columns=["Marker", "Cell Type", "Amount"])
for marker in markers:
for cell in cellToI:
cellTDF = SVC_DF.loc[SVC_DF["CellType2"] == cell][marker]
markerDF = markerDF.append(pd.DataFrame({"Marker": [marker], "Cell Type": cell, "Amount": cellTDF.mean(), "Number": cellTDF.size}))
sns.pointplot(data=markerDF, x="Marker", y="Amount", hue="Cell Type", ax=ax[1], join=False, dodge=True)
ax[1].set(yscale="log")
ax[1].set_xticklabels(ax[1].get_xticklabels(), rotation=45)
cellDict = {"CD4 Naive": "Thelper",
"CD4 CTL": "Thelper",
"CD4 TCM": "Thelper",
"CD4 TEM": "Thelper",
"NK": "NK",
"CD8 Naive": "CD8",
"CD8 TCM": "CD8",
"CD8 TEM": "CD8",
"Treg": "Treg"}
markDict = {"CD25": "IL2Ra",
"CD122": "IL2Rb",
"CD127": "IL7Ra",
"CD132": "gc"}
def convFactCalc(ax):
"""Fits a ridge classifier to the CITE data and plots those most highly correlated with T reg"""
CITE_DF = importCITE()
cellToI = ["CD4 TCM", "CD8 Naive", "NK", "CD8 TEM", "CD4 Naive", "CD4 CTL", "CD8 TCM", "Treg", "CD4 TEM"]
markers = ["CD122", "CD127", "CD25"]
markerDF = pd.DataFrame(columns=["Marker", "Cell Type", "Amount", "Number"])
for marker in markers:
for cell in cellToI:
cellTDF = CITE_DF.loc[CITE_DF["CellType2"] == cell][marker]
markerDF = markerDF.append(pd.DataFrame({"Marker": [marker], "Cell Type": cell, "Amount": cellTDF.mean(), "Number": cellTDF.size}))
markerDF = markerDF.replace({"Marker": markDict, "Cell Type": cellDict})
markerDFw = pd.DataFrame(columns=["Marker", "Cell Type", "Average"])
for marker in markerDF.Marker.unique():
for cell in markerDF["Cell Type"].unique():
subDF = markerDF.loc[(markerDF["Cell Type"] == cell) & (markerDF["Marker"] == marker)]
wAvg = np.sum(subDF.Amount.values * subDF.Number.values) / np.sum(subDF.Number.values)
markerDFw = markerDFw.append(pd.DataFrame({"Marker": [marker], "Cell Type": cell, "Average": wAvg}))
recDF = importReceptors()
weightDF = pd.DataFrame(columns=["Receptor", "Weight"])
for rec in markerDFw.Marker.unique():
CITEval = np.array([])
Quantval = np.array([])
for cell in markerDF["Cell Type"].unique():
CITEval = np.concatenate((CITEval, markerDFw.loc[(markerDFw["Cell Type"] == cell) & (markerDFw["Marker"] == rec)].Average.values))
Quantval = np.concatenate((Quantval, recDF.loc[(recDF["Cell Type"] == cell) & (recDF["Receptor"] == rec)].Mean.values))
weightDF = weightDF.append(pd.DataFrame({"Receptor": [rec], "Weight": np.linalg.lstsq(np.reshape(CITEval, (-1, 1)), Quantval, rcond=None)[0]}))
sns.barplot(data=weightDF, x="Receptor", y="Weight", ax=ax)
ax.set(ylim=(0, 1000))
ax.set_xticklabels(ax.get_xticklabels(), rotation=45)
| 43.141732
| 158
| 0.630407
|
4a069ef91405ad82fbae1984d9f790e433923a97
| 5,275
|
py
|
Python
|
net.py
|
BinahHu/pytorch-AdaIN
|
7bbc3d11407dccbb3f4aa687177514a9c4d82ace
|
[
"MIT"
] | null | null | null |
net.py
|
BinahHu/pytorch-AdaIN
|
7bbc3d11407dccbb3f4aa687177514a9c4d82ace
|
[
"MIT"
] | null | null | null |
net.py
|
BinahHu/pytorch-AdaIN
|
7bbc3d11407dccbb3f4aa687177514a9c4d82ace
|
[
"MIT"
] | null | null | null |
import torch.nn as nn
from function import adaptive_instance_normalization as adain
from function import calc_mean_std
decoder = nn.Sequential(
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 256, (3, 3)),
nn.ReLU(),
nn.Upsample(scale_factor=2, mode='nearest'),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 256, (3, 3)),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 256, (3, 3)),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 256, (3, 3)),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 128, (3, 3)),
nn.ReLU(),
nn.Upsample(scale_factor=2, mode='nearest'),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(128, 128, (3, 3)),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(128, 64, (3, 3)),
nn.ReLU(),
nn.Upsample(scale_factor=2, mode='nearest'),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(64, 64, (3, 3)),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(64, 3, (3, 3)),
)
vgg = nn.Sequential(
nn.Conv2d(3, 3, (1, 1)),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(3, 64, (3, 3)),
nn.ReLU(), # relu1-1
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(64, 64, (3, 3)),
nn.ReLU(), # relu1-2
nn.MaxPool2d((2, 2), (2, 2), (0, 0), ceil_mode=True),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(64, 128, (3, 3)),
nn.ReLU(), # relu2-1
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(128, 128, (3, 3)),
nn.ReLU(), # relu2-2
nn.MaxPool2d((2, 2), (2, 2), (0, 0), ceil_mode=True),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(128, 256, (3, 3)),
nn.ReLU(), # relu3-1
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 256, (3, 3)),
nn.ReLU(), # relu3-2
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 256, (3, 3)),
nn.ReLU(), # relu3-3
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 256, (3, 3)),
nn.ReLU(), # relu3-4
nn.MaxPool2d((2, 2), (2, 2), (0, 0), ceil_mode=True),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 512, (3, 3)),
nn.ReLU(), # relu4-1, this is the last layer used
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, (3, 3)),
nn.ReLU(), # relu4-2
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, (3, 3)),
nn.ReLU(), # relu4-3
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, (3, 3)),
nn.ReLU(), # relu4-4
nn.MaxPool2d((2, 2), (2, 2), (0, 0), ceil_mode=True),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, (3, 3)),
nn.ReLU(), # relu5-1
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, (3, 3)),
nn.ReLU(), # relu5-2
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, (3, 3)),
nn.ReLU(), # relu5-3
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, (3, 3)),
nn.ReLU() # relu5-4
)
class Net(nn.Module):
def __init__(self, encoder, decoder):
super(Net, self).__init__()
enc_layers = list(encoder.children())
self.enc_1 = nn.Sequential(*enc_layers[:4]) # input -> relu1_1
self.enc_2 = nn.Sequential(*enc_layers[4:11]) # relu1_1 -> relu2_1
self.enc_3 = nn.Sequential(*enc_layers[11:18]) # relu2_1 -> relu3_1
self.enc_4 = nn.Sequential(*enc_layers[18:31]) # relu3_1 -> relu4_1
self.decoder = decoder
self.mse_loss = nn.MSELoss()
# fix the encoder
for name in ['enc_1', 'enc_2', 'enc_3', 'enc_4']:
for param in getattr(self, name).parameters():
param.requires_grad = False
# extract relu1_1, relu2_1, relu3_1, relu4_1 from input image
def encode_with_intermediate(self, input):
results = [input]
for i in range(4):
func = getattr(self, 'enc_{:d}'.format(i + 1))
results.append(func(results[-1]))
return results[1:]
# extract relu4_1 from input image
def encode(self, input):
for i in range(4):
input = getattr(self, 'enc_{:d}'.format(i + 1))(input)
return input
def calc_content_loss(self, input, target):
assert (input.size() == target.size())
assert (target.requires_grad is False)
return self.mse_loss(input, target)
def calc_style_loss(self, input, target):
assert (input.size() == target.size())
assert (target.requires_grad is False)
input_mean, input_std = calc_mean_std(input)
target_mean, target_std = calc_mean_std(target)
return self.mse_loss(input_mean, target_mean) + \
self.mse_loss(input_std, target_std)
def forward(self, content, style, alpha=1.0):
assert 0 <= alpha <= 1
style_feats = self.encode_with_intermediate(style)
content_feat = self.encode(content)
t = adain(content_feat, style_feats[-1])
t = alpha * t + (1 - alpha) * content_feat
g_t = self.decoder(t)
g_t_feats = self.encode_with_intermediate(g_t)
loss_c = self.calc_content_loss(g_t_feats[-1], t)
loss_s = self.calc_style_loss(g_t_feats[0], style_feats[0])
for i in range(1, 4):
loss_s += self.calc_style_loss(g_t_feats[i], style_feats[i])
return loss_c, loss_s
| 34.253247
| 76
| 0.568531
|
4a06a04028e13b0b683d4333986462c9da674d63
| 2,694
|
py
|
Python
|
video-generator/src/image/image_processor.py
|
charlie6/product_video_ads
|
d155a86d4786fb5f0d0e57d2f696bb2d1e12dc36
|
[
"Apache-2.0"
] | null | null | null |
video-generator/src/image/image_processor.py
|
charlie6/product_video_ads
|
d155a86d4786fb5f0d0e57d2f696bb2d1e12dc36
|
[
"Apache-2.0"
] | null | null | null |
video-generator/src/image/image_processor.py
|
charlie6/product_video_ads
|
d155a86d4786fb5f0d0e57d2f696bb2d1e12dc36
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Manages images processing tasks sequentially."""
import traceback
from datetime import datetime
import log
from ffmpeg import util
logger = log.getLogger()
class ImageProcessor():
def __init__(self, storage, generator, cloud_storage, cloud_preview=False):
self.storage = storage
self.generator = generator
self.cloud_storage = cloud_storage
self.cloud_preview = cloud_preview
def process_task(self, row, config, preview_only=False):
logger.info('[Image Processor] Starting to process row %s...', row)
try:
# Generate image locally
output_image = self.generate_single_image(row, config)
# Uploads image to storage and retrieve the ID
if self.cloud_preview:
output_id = self.cloud_storage.upload_to_preview(output_image)
else:
output_id = self.storage.upload_to_preview(output_image)
# Finally, deletes local file since it's not needed anymore
self.storage.delete_file(output_image)
# Success, return ID
logger.info('Row %s processed successfully', row)
return output_id
except Exception as e:
logger.error([e, traceback.format_exc()])
logger.error('Failed processing row: %s', {
'row': row,
'error_type': type(e).__name__,
'error_string': str(e)
})
def generate_single_image(self, row, config):
image_overlays, text_overlays = util.convert_configs_to_format(
config['configs'],
config['products_data'],
self.storage,
self.cloud_storage
)
input_image = self.storage.get_absolute_path(config['base_file'])
output_path = self.storage.get_absolute_output_video_path(
row,
self._generate_image_name(config['base_file']))
return self.generator.process_image(image_overlays,
text_overlays,
input_image,
output_path)
def _generate_image_name(self, input_image_file):
return datetime.now().strftime('%Y%m%d%H%M%S') + '.' + input_image_file.split('.')[-1]
| 31.325581
| 90
| 0.677803
|
4a06a14466e0d639c1bd68dbd45e4595ee0b0111
| 7,971
|
py
|
Python
|
resources/lib/sync_by_frame_rate.py
|
gade01/script.sublissimo
|
8a51a7617a89ad7d35bc9882b958a511e25926a5
|
[
"MIT"
] | 1
|
2022-03-20T15:54:43.000Z
|
2022-03-20T15:54:43.000Z
|
resources/lib/sync_by_frame_rate.py
|
weblate/script.sublissimo
|
c2f89e43fa365523dd28e5c9779b5ba6cd461b24
|
[
"MIT"
] | 3
|
2021-04-24T21:16:28.000Z
|
2021-06-10T11:51:32.000Z
|
resources/lib/sync_by_frame_rate.py
|
weblate/script.sublissimo
|
c2f89e43fa365523dd28e5c9779b5ba6cd461b24
|
[
"MIT"
] | 3
|
2021-05-02T13:45:22.000Z
|
2021-06-15T01:16:57.000Z
|
from __future__ import division
import xbmc
import xbmcgui
import sys
import xbmcaddon
import logging
import xbmcvfs
from contextlib import closing
from . import script
from .subtitle import Subtitle
ADDON = xbmcaddon.Addon()
__addon__ = xbmcaddon.Addon()
_ = __addon__.getLocalizedString
logger = logging.getLogger(ADDON.getAddonInfo('id'))
class SyncWizardFrameRate(xbmc.Player):
def __init__ (self):
xbmc.Player.__init__(self)
self.proper_exit = False
self.flag = False
def add(self, subtitlefile, filename):
self.proper_exit = False
self.subtitlefile = subtitlefile
self.filename = filename
self.new_subtitlefile = []
def get_frame_rate(self):
self.frame_rate = xbmc.getInfoLabel('Player.Process(VideoFPS)')
xbmcgui.Dialog().ok(_(32106), _(32120) + str(self.frame_rate))
self.give_frame_rate(True)
def delete_temp_file(self):
temp_file = self.filename[:-4] + "_temp.srt"
if xbmcvfs.exists(temp_file):
xbmcvfs.delete(temp_file)
self.proper_exit = True
self.stop()
def write_and_display_temp_file(self, new_subtitlefile, temp):
if temp:
new_file_name = self.filename[:-4] + "_temp.srt"
else:
self.delete_temp_file()
new_file_name = self.filename[:-4] + "_edited.srt"
with closing(xbmcvfs.File(new_file_name, 'w')) as fo:
fo.write("".join(new_subtitlefile))
self.new_subtitlefile = new_subtitlefile
self.setSubtitles(new_file_name)
if temp:
frame_rate_input = xbmcgui.Dialog().ok(_(32050),_(32102))
def rearrange(self, new_factor, from_pause):
if from_pause:
self.flag = False
cur_sub = Subtitle(self.subtitlefile)
old_starting_time, old_ending_time = cur_sub.make_timelines_decimal()
old_start_timestamp = script.make_timelines_classical(old_starting_time)
old_ending_timestamp = script.make_timelines_classical(old_ending_time)
new_start_timestamp = script.make_timelines_classical(new_factor * old_starting_time)
new_ending_timestamp = script.make_timelines_classical(new_factor * old_ending_time)
res = xbmcgui.Dialog().yesno(_(32107), _(32108) + str(old_start_timestamp)
+ "\n" + _(32109) + str(old_ending_timestamp)
+ "\n" + _(34110) + str(new_start_timestamp)
+ "\n" + _(32110) + str(new_ending_timestamp)
+ "\n", yeslabel=_(32012), nolabel= _(32008))
if not res:
self.give_frame_rate(False)
else:
new_subtitlefile = cur_sub.create_new_times(False, new_factor, 0)
self.write_and_display_temp_file(new_subtitlefile, True)
def give_frame_rate(self, from_pause):
# get frame_rate from video, calculate manually, Exit to main menu,
options = ["23.976 --> 25.000", "25.000 --> 23.976", "24.000 --> 25.000", "25.000 --> 24.000",
"23.976 --> 24.000", "24.000 --> 23.976", _(32104), _(32112), _(32078)]
# Video frame rate
menuchoice = xbmcgui.Dialog().select(_(32105), options)
if menuchoice == 0:
chosen_factor = (25/23.976)
self.rearrange(chosen_factor, from_pause)
if menuchoice == 1:
chosen_factor = (23.976/25)
self.rearrange(chosen_factor, from_pause)
if menuchoice == 2:
chosen_factor = (25/24)
self.rearrange(chosen_factor, from_pause)
if menuchoice == 3:
chosen_factor = (24/25)
self.rearrange(chosen_factor, from_pause)
if menuchoice == 4:
chosen_factor = (24/23.976)
self.rearrange(chosen_factor, from_pause)
if menuchoice == 5:
chosen_factor = (23.976/24)
self.rearrange(chosen_factor, from_pause)
if menuchoice == 6:
self.get_frame_rate()
if menuchoice == 7:
xbmcgui.Dialog().ok(_(32114), _(32115))
response = xbmcgui.Dialog().input(_(32113))
calculated_factor = eval(str(response))
self.rearrange(calculated_factor, from_pause)
if menuchoice == 8 or menuchoice == -1:
self.stop()
script.show_dialog(self.subtitlefile, self.filename)
def onPlayBackPaused(self):
if not self.proper_exit:
choice = xbmcgui.Dialog().contextmenu([_(32074), _(32100), _(31000), _(32101), _(32096), _(32098)])
if choice == 0 or choice == -1:
self.flag = False
if choice == 1:
self.give_frame_rate(True)
#self.flag = False
if choice == 2:
xbmcgui.Dialog().multiselect(_(32010), self.new_subtitlefile)
if choice == 3:
self.proper_exit = True
self.flag = True
script.save_the_file(self.new_subtitlefile, self.filename, True)
if choice == 4:
self.proper_exit = True
self.stop()
if self.new_subtitlefile:
self.delete_temp_file()
script.show_dialog(self.new_subtitlefile, self.filename)
else:
self.delete_temp_file()
script.show_dialog(self.subtitlefile, self.filename)
if choice == 5:
self.proper_exit = True
self.delete_temp_file()
self.stop()
script.show_dialog(self.subtitlefile, self.filename)
if not self.flag:
self.pause()
self.flag = True
def onPlayBackStopped(self):
if not self.proper_exit:
choice = xbmcgui.Dialog().contextmenu([_(32096), _(32097), _(32098), _(32099)])
if choice == 0:
if self.new_subtitlefile:
self.delete_temp_file()
script.show_dialog(self.new_subtitlefile, self.filename)
else:
self.delete_temp_file()
script.show_dialog(self.subtitlefile, self.filename)
if choice == 1:
self.delete_temp_file()
script.save_the_file(self.new_subtitlefile, self.filename)
#self.write_and_display_temp_file(self.new_subtitlefile, False)
if choice == 2 or choice == -1:
self.delete_temp_file()
# self.proper_exit = True
script.show_dialog(self.subtitlefile, self.filename)
if choice == 3:
self.delete_temp_file()
# self.proper_exit = True
script.exiting(self.new_subtitlefile, self.filename)
def onPlayBackEnded(self):
if not self.proper_exit:
choice = xbmcgui.Dialog().contextmenu([_(32096), _(32097), _(32098), _(32099)])
if choice == 0:
if self.new_subtitlefile:
self.delete_temp_file()
script.show_dialog(self.new_subtitlefile, self.filename)
else:
self.delete_temp_file()
script.show_dialog(self.subtitlefile, self.filename)
if choice == 1:
self.delete_temp_file()
script.save_the_file(self.new_subtitlefile, self.filename)
#self.write_and_display_temp_file(self.new_subtitlefile, False)
if choice == 2 or choice == -1:
self.delete_temp_file()
# self.proper_exit = True
script.show_dialog(self.subtitlefile, self.filename)
if choice == 3:
self.delete_temp_file()
# self.proper_exit = True
script.exiting(self.new_subtitlefile, self.filename)
| 42.854839
| 111
| 0.577594
|
4a06a3057c94492a6999701f3fe3e2d199d1f3f1
| 2,766
|
py
|
Python
|
app/models.py
|
Soniakoi/Blog-Post.
|
a1e918849bdf7c961f6817bf47ccd70a5c3d65ef
|
[
"MIT"
] | null | null | null |
app/models.py
|
Soniakoi/Blog-Post.
|
a1e918849bdf7c961f6817bf47ccd70a5c3d65ef
|
[
"MIT"
] | null | null | null |
app/models.py
|
Soniakoi/Blog-Post.
|
a1e918849bdf7c961f6817bf47ccd70a5c3d65ef
|
[
"MIT"
] | null | null | null |
from . import db
from werkzeug.security import generate_password_hash,check_password_hash
from flask_login import UserMixin
from . import login_manager
from datetime import datetime
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class User(UserMixin,db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer,primary_key = True)
username = db.Column(db.String(255),index = True)
email = db.Column(db.String(255),unique = True,index = True)
blog = db.relationship('Blog',backref = 'user',lazy = "dynamic")
bio = db.Column(db.String(255))
password_hash = db.Column(db.String(255))
profile_pic_path = db.Column(db.String())
# pass_secure = db.Column(db.String(255))
@property
def password(self):
raise AttributeError('You cannot read the password attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
# def set_password(self,password):
# self.password_hash = generate_password_hash(password)
def verify_password(self,password):
return check_password_hash(self.password_hash,password)
def __repr__(self):
return f'User {self.username}'
class Blog(db.Model):
__tablename__ = 'blogs'
id = db.Column(db.Integer,primary_key = True)
title = db.Column(db.String())
blog_content = db.Column(db.String())
posted = db.Column(db.DateTime, nullable=False, default = datetime.utcnow)
user_id = db.Column(db.Integer,db.ForeignKey("users.id"))
def save_blog(self):
db.session.add(self)
db.session.commit()
@classmethod
def get_all_blogs(cls):
blogs = Blog.query.order_by('id').all()
return blogs
@classmethod
def get_single_blog(cls,id):
blog = Blog.query.filter_by(id=id).first()
return blog
class Comment(db.Model):
__tablename__='comments'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String())
email = db.Column(db.String())
comment_content = db.Column(db.String())
date_comment = db.Column(db.DateTime, nullable=False, default = datetime.utcnow)
blog_id = db.Column(db.Integer, db.ForeignKey('blogs.id'))
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
def save_comment(self):
db.session.add(self)
db.session.commit()
@classmethod
def get_blog_comments(cls,id):
comments = Comment.query.filter_by(blog_id=id).order_by('id').all()
return comments
@classmethod
def get_single_comment(cls,id_blog,id):
comment = Comment.query.filter_by(blog_id=id_blog,id=id).first()
return comment
| 28.8125
| 84
| 0.67209
|
4a06a3a9548abd3991b0033992dce2e17991048a
| 233
|
py
|
Python
|
neurosynth/version.py
|
chrisfilo/Neurosynth
|
80a9438834c685d381ad45b078dc4f5ac2112cec
|
[
"MIT"
] | null | null | null |
neurosynth/version.py
|
chrisfilo/Neurosynth
|
80a9438834c685d381ad45b078dc4f5ac2112cec
|
[
"MIT"
] | null | null | null |
neurosynth/version.py
|
chrisfilo/Neurosynth
|
80a9438834c685d381ad45b078dc4f5ac2112cec
|
[
"MIT"
] | null | null | null |
# emacs: -*- mode: python-mode; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 et:
"""Specifies current version of NeuroSynth to be used by setup.py and __init__.py
"""
__version__ = '0.3.3'
| 29.125
| 92
| 0.669528
|
4a06a45d982bd3190ed0be1543771e36e91caec6
| 7,924
|
py
|
Python
|
David and Pooja/++Validating Linked Mods/Python-3.0/Lib/multiprocessing/process.py
|
LinkedModernismProject/web_code
|
4cf6bf53d5c3249e52a75f0a3f57d106e31daf9e
|
[
"Apache-2.0"
] | 1
|
2015-05-21T23:47:54.000Z
|
2015-05-21T23:47:54.000Z
|
front-end/testsuite-python-lib/Python-3.1/Lib/multiprocessing/process.py
|
MalloyPower/parsing-python
|
b2bca5eed07ea2af7a2001cd4f63becdfb0570be
|
[
"MIT"
] | 1
|
2015-10-29T20:51:31.000Z
|
2015-10-29T20:51:31.000Z
|
front-end/testsuite-python-lib/Python-3.1/Lib/multiprocessing/process.py
|
MalloyPower/parsing-python
|
b2bca5eed07ea2af7a2001cd4f63becdfb0570be
|
[
"MIT"
] | 1
|
2019-04-11T11:27:01.000Z
|
2019-04-11T11:27:01.000Z
|
#
# Module providing the `Process` class which emulates `threading.Thread`
#
# multiprocessing/process.py
#
# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
#
__all__ = ['Process', 'current_process', 'active_children']
#
# Imports
#
import os
import sys
import signal
import itertools
#
#
#
try:
ORIGINAL_DIR = os.path.abspath(os.getcwd())
except OSError:
ORIGINAL_DIR = None
#
# Public functions
#
def current_process():
'''
Return process object representing the current process
'''
return _current_process
def active_children():
'''
Return list of process objects corresponding to live child processes
'''
_cleanup()
return list(_current_process._children)
#
#
#
def _cleanup():
# check for processes which have finished
for p in list(_current_process._children):
if p._popen.poll() is not None:
_current_process._children.discard(p)
#
# The `Process` class
#
class Process(object):
'''
Process objects represent activity that is run in a separate process
The class is analagous to `threading.Thread`
'''
_Popen = None
def __init__(self, group=None, target=None, name=None, args=(), kwargs={}):
assert group is None, 'group argument must be None for now'
count = next(_current_process._counter)
self._identity = _current_process._identity + (count,)
self._authkey = _current_process._authkey
self._daemonic = _current_process._daemonic
self._tempdir = _current_process._tempdir
self._parent_pid = os.getpid()
self._popen = None
self._target = target
self._args = tuple(args)
self._kwargs = dict(kwargs)
self._name = name or type(self).__name__ + '-' + \
':'.join(str(i) for i in self._identity)
def run(self):
'''
Method to be run in sub-process; can be overridden in sub-class
'''
if self._target:
self._target(*self._args, **self._kwargs)
def start(self):
'''
Start child process
'''
assert self._popen is None, 'cannot start a process twice'
assert self._parent_pid == os.getpid(), \
'can only start a process object created by current process'
assert not _current_process._daemonic, \
'daemonic processes are not allowed to have children'
_cleanup()
if self._Popen is not None:
Popen = self._Popen
else:
from .forking import Popen
self._popen = Popen(self)
_current_process._children.add(self)
def terminate(self):
'''
Terminate process; sends SIGTERM signal or uses TerminateProcess()
'''
self._popen.terminate()
def join(self, timeout=None):
'''
Wait until child process terminates
'''
assert self._parent_pid == os.getpid(), 'can only join a child process'
assert self._popen is not None, 'can only join a started process'
res = self._popen.wait(timeout)
if res is not None:
_current_process._children.discard(self)
def is_alive(self):
'''
Return whether process is alive
'''
if self is _current_process:
return True
assert self._parent_pid == os.getpid(), 'can only test a child process'
if self._popen is None:
return False
self._popen.poll()
return self._popen.returncode is None
@property
def name(self):
return self._name
@name.setter
def name(self, name):
assert isinstance(name, str), 'name must be a string'
self._name = name
@property
def daemon(self):
'''
Return whether process is a daemon
'''
return self._daemonic
@daemon.setter
def daemon(self, daemonic):
'''
Set whether process is a daemon
'''
assert self._popen is None, 'process has already started'
self._daemonic = daemonic
@property
def authkey(self):
return self._authkey
@authkey.setter
def authkey(self, authkey):
'''
Set authorization key of process
'''
self._authkey = AuthenticationString(authkey)
@property
def exitcode(self):
'''
Return exit code of process or `None` if it has yet to stop
'''
if self._popen is None:
return self._popen
return self._popen.poll()
@property
def ident(self):
'''
Return indentifier (PID) of process or `None` if it has yet to start
'''
if self is _current_process:
return os.getpid()
else:
return self._popen and self._popen.pid
pid = ident
def __repr__(self):
if self is _current_process:
status = 'started'
elif self._parent_pid != os.getpid():
status = 'unknown'
elif self._popen is None:
status = 'initial'
else:
if self._popen.poll() is not None:
status = self.exitcode
else:
status = 'started'
if type(status) is int:
if status == 0:
status = 'stopped'
else:
status = 'stopped[%s]' % _exitcode_to_name.get(status, status)
return '<%s(%s, %s%s)>' % (type(self).__name__, self._name,
status, self._daemonic and ' daemon' or '')
##
def _bootstrap(self):
from . import util
global _current_process
try:
self._children = set()
self._counter = itertools.count(1)
if sys.stdin is not None:
try:
os.close(sys.stdin.fileno())
except (OSError, ValueError):
pass
_current_process = self
util._finalizer_registry.clear()
util._run_after_forkers()
util.info('child process calling self.run()')
try:
self.run()
exitcode = 0
finally:
util._exit_function()
except SystemExit as e:
if not e.args:
exitcode = 1
elif type(e.args[0]) is int:
exitcode = e.args[0]
else:
sys.stderr.write(e.args[0] + '\n')
sys.stderr.flush()
exitcode = 1
except:
exitcode = 1
import traceback
sys.stderr.write('Process %s:\n' % self.name)
sys.stderr.flush()
traceback.print_exc()
util.info('process exiting with exitcode %d' % exitcode)
return exitcode
#
# We subclass bytes to avoid accidental transmission of auth keys over network
#
class AuthenticationString(bytes):
def __reduce__(self):
from .forking import Popen
if not Popen.thread_is_spawning():
raise TypeError(
'Pickling an AuthenticationString object is '
'disallowed for security reasons'
)
return AuthenticationString, (bytes(self),)
#
# Create object representing the main process
#
class _MainProcess(Process):
def __init__(self):
self._identity = ()
self._daemonic = False
self._name = 'MainProcess'
self._parent_pid = None
self._popen = None
self._counter = itertools.count(1)
self._children = set()
self._authkey = AuthenticationString(os.urandom(32))
self._tempdir = None
_current_process = _MainProcess()
del _MainProcess
#
# Give names to some return codes
#
_exitcode_to_name = {}
for name, signum in list(signal.__dict__.items()):
if name[:3]=='SIG' and '_' not in name:
_exitcode_to_name[-signum] = name
| 26.590604
| 79
| 0.575467
|
4a06a496531b67d0d0e58a59a5d49139a08c2793
| 27,248
|
py
|
Python
|
sympy/physics/mechanics/functions.py
|
lidavidm/sympy
|
971aa94ee6d0774eacfb4aed6965195c4a59e104
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/physics/mechanics/functions.py
|
lidavidm/sympy
|
971aa94ee6d0774eacfb4aed6965195c4a59e104
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/physics/mechanics/functions.py
|
lidavidm/sympy
|
971aa94ee6d0774eacfb4aed6965195c4a59e104
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import print_function, division
__all__ = ['cross',
'dot',
'express',
'outer',
'inertia',
'mechanics_printing',
'mprint',
'msprint',
'mpprint',
'mlatex',
'kinematic_equations',
'inertia_of_point_mass',
'partial_velocity',
'linear_momentum',
'angular_momentum',
'kinetic_energy',
'potential_energy',
'Lagrangian']
from sympy.physics.mechanics.essential import (Vector, Dyadic, ReferenceFrame,
MechanicsStrPrinter,
MechanicsPrettyPrinter,
MechanicsLatexPrinter,
dynamicsymbols)
from sympy.physics.mechanics.particle import Particle
from sympy.physics.mechanics.rigidbody import RigidBody
from sympy.physics.mechanics.point import Point
from sympy import sympify, diff, sin, cos, Matrix
from sympy.core.basic import S
def cross(vec1, vec2):
"""Cross product convenience wrapper for Vector.cross(): \n"""
if not isinstance(vec1, (Vector, Dyadic)):
raise TypeError('Cross product is between two vectors')
return vec1 ^ vec2
cross.__doc__ += Vector.cross.__doc__
def dot(vec1, vec2):
"""Dot product convenience wrapper for Vector.dot(): \n"""
if not isinstance(vec1, (Vector, Dyadic)):
raise TypeError('Dot product is between two vectors')
return vec1 & vec2
dot.__doc__ += Vector.dot.__doc__
def express(vec, frame, frame2=None):
"""Express convenience wrapper"""
if isinstance(vec, Dyadic):
return vec.express(frame, frame2)
else:
return frame.express(vec)
express.__doc__ += Vector.express.__doc__
def outer(vec1, vec2):
"""Outer product convenience wrapper for Vector.outer():\n"""
if not isinstance(vec1, Vector):
raise TypeError('Outer product is between two Vectors')
return vec1 | vec2
outer.__doc__ += Vector.outer.__doc__
def inertia(frame, ixx, iyy, izz, ixy=0, iyz=0, izx=0):
"""Simple way to create inertia Dyadic object.
If you don't know what a Dyadic is, just treat this like the inertia
tensor. Then, do the easy thing and define it in a body-fixed frame.
Parameters
==========
frame : ReferenceFrame
The frame the inertia is defined in
ixx : Sympifyable
the xx element in the inertia dyadic
iyy : Sympifyable
the yy element in the inertia dyadic
izz : Sympifyable
the zz element in the inertia dyadic
ixy : Sympifyable
the xy element in the inertia dyadic
iyz : Sympifyable
the yz element in the inertia dyadic
izx : Sympifyable
the zx element in the inertia dyadic
Examples
========
>>> from sympy.physics.mechanics import ReferenceFrame, inertia
>>> N = ReferenceFrame('N')
>>> inertia(N, 1, 2, 3)
(N.x|N.x) + 2*(N.y|N.y) + 3*(N.z|N.z)
"""
if not isinstance(frame, ReferenceFrame):
raise TypeError('Need to define the inertia in a frame')
ol = sympify(ixx) * (frame.x | frame.x)
ol += sympify(ixy) * (frame.x | frame.y)
ol += sympify(izx) * (frame.x | frame.z)
ol += sympify(ixy) * (frame.y | frame.x)
ol += sympify(iyy) * (frame.y | frame.y)
ol += sympify(iyz) * (frame.y | frame.z)
ol += sympify(izx) * (frame.z | frame.x)
ol += sympify(iyz) * (frame.z | frame.y)
ol += sympify(izz) * (frame.z | frame.z)
return ol
def inertia_of_point_mass(mass, pos_vec, frame):
"""Inertia dyadic of a point mass realtive to point O.
Parameters
==========
mass : Sympifyable
Mass of the point mass
pos_vec : Vector
Position from point O to point mass
frame : ReferenceFrame
Reference frame to express the dyadic in
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.mechanics import ReferenceFrame, inertia_of_point_mass
>>> N = ReferenceFrame('N')
>>> r, m = symbols('r m')
>>> px = r * N.x
>>> inertia_of_point_mass(m, px, N)
m*r**2*(N.y|N.y) + m*r**2*(N.z|N.z)
"""
return mass * (((frame.x | frame.x) + (frame.y | frame.y) +
(frame.z | frame.z)) * (pos_vec & pos_vec) -
(pos_vec | pos_vec))
def mechanics_printing():
"""Sets up interactive printing for mechanics' derivatives.
The main benefit of this is for printing of time derivatives;
instead of displaying as Derivative(f(t),t), it will display f'
This is only actually needed for when derivatives are present and are not
in a physics.mechanics object.
Examples
========
>>> # 2 lines below are for tests to function properly
>>> import sys
>>> sys.displayhook = sys.__displayhook__
>>> from sympy import Function, Symbol, diff
>>> from sympy.physics.mechanics import mechanics_printing
>>> f = Function('f')
>>> t = Symbol('t')
>>> x = Symbol('x')
>>> diff(f(t), t)
Derivative(f(t), t)
>>> mechanics_printing()
>>> diff(f(t), t)
f'
>>> diff(f(x), x)
Derivative(f(x), x)
>>> # 2 lines below are for tests to function properly
>>> import sys
>>> sys.displayhook = sys.__displayhook__
"""
import sys
sys.displayhook = mprint
def mprint(expr, **settings):
r"""Function for printing of expressions generated in mechanics.
Extends SymPy's StrPrinter; mprint is equivalent to:
print sstr()
mprint takes the same options as sstr.
Parameters
==========
expr : valid sympy object
SymPy expression to print
settings : args
Same as print for SymPy
Examples
========
>>> from sympy.physics.mechanics import mprint, dynamicsymbols
>>> u1 = dynamicsymbols('u1')
>>> print(u1)
u1(t)
>>> mprint(u1)
u1
"""
outstr = msprint(expr, **settings)
from sympy.core.compatibility import builtins
if (outstr != 'None'):
builtins._ = outstr
print(outstr)
def msprint(expr, **settings):
r"""Function for displaying expressions generated in mechanics.
Returns the output of mprint() as a string.
Parameters
==========
expr : valid sympy object
SymPy expression to print
settings : args
Same as print for SymPy
Examples
========
>>> from sympy.physics.mechanics import msprint, dynamicsymbols
>>> u1, u2 = dynamicsymbols('u1 u2')
>>> u2d = dynamicsymbols('u2', level=1)
>>> print("%s = %s" % (u1, u2 + u2d))
u1(t) = u2(t) + Derivative(u2(t), t)
>>> print("%s = %s" % (msprint(u1), msprint(u2 + u2d)))
u1 = u2 + u2'
"""
pr = MechanicsStrPrinter(settings)
return pr.doprint(expr)
def mpprint(expr, **settings):
r"""Function for pretty printing of expressions generated in mechanics.
Mainly used for expressions not inside a vector; the output of running
scripts and generating equations of motion. Takes the same options as
SymPy's pretty_print(); see that function for more information.
Parameters
==========
expr : valid sympy object
SymPy expression to pretty print
settings : args
Same as pretty print
Examples
========
Use in the same way as pprint
"""
mp = MechanicsPrettyPrinter(settings)
print(mp.doprint(expr))
def mlatex(expr, **settings):
r"""Function for printing latex representation of mechanics objects.
For latex representation of Vectors, Dyadics, and dynamicsymbols. Takes the
same options as SymPy's latex(); see that function for more information;
Parameters
==========
expr : valid sympy object
SymPy expression to represent in LaTeX form
settings : args
Same as latex()
Examples
========
>>> from sympy.physics.mechanics import mlatex, ReferenceFrame, dynamicsymbols
>>> N = ReferenceFrame('N')
>>> q1, q2 = dynamicsymbols('q1 q2')
>>> q1d, q2d = dynamicsymbols('q1 q2', 1)
>>> q1dd, q2dd = dynamicsymbols('q1 q2', 2)
>>> mlatex(N.x + N.y)
'\\mathbf{\\hat{n}_x} + \\mathbf{\\hat{n}_y}'
>>> mlatex(q1 + q2)
'q_{1} + q_{2}'
>>> mlatex(q1d)
'\\dot{q}_{1}'
>>> mlatex(q1 * q2d)
'q_{1} \\dot{q}_{2}'
>>> mlatex(q1dd * q1 / q1d)
'\\frac{q_{1} \\ddot{q}_{1}}{\\dot{q}_{1}}'
"""
return MechanicsLatexPrinter(settings).doprint(expr)
def kinematic_equations(speeds, coords, rot_type, rot_order=''):
"""Gives equations relating the qdot's to u's for a rotation type.
Supply rotation type and order as in orient. Speeds are assumed to be
body-fixed; if we are defining the orientation of B in A using by rot_type,
the angular velocity of B in A is assumed to be in the form: speed[0]*B.x +
speed[1]*B.y + speed[2]*B.z
Parameters
==========
speeds : list of length 3
The body fixed angular velocity measure numbers.
coords : list of length 3 or 4
The coordinates used to define the orientation of the two frames.
rot_type : str
The type of rotation used to create the equations. Body, Space, or
Quaternion only
rot_order : str
If applicable, the order of a series of rotations.
Examples
========
>>> from sympy.physics.mechanics import dynamicsymbols
>>> from sympy.physics.mechanics import kinematic_equations, mprint
>>> u1, u2, u3 = dynamicsymbols('u1 u2 u3')
>>> q1, q2, q3 = dynamicsymbols('q1 q2 q3')
>>> mprint(kinematic_equations([u1,u2,u3], [q1,q2,q3], 'body', '313'),
... order=None)
[-(u1*sin(q3) + u2*cos(q3))/sin(q2) + q1', -u1*cos(q3) + u2*sin(q3) + q2', (u1*sin(q3) + u2*cos(q3))*cos(q2)/sin(q2) - u3 + q3']
"""
# Code below is checking and sanitizing input
approved_orders = ('123', '231', '312', '132', '213', '321', '121', '131',
'212', '232', '313', '323', '1', '2', '3', '')
rot_order = str(rot_order).upper() # Now we need to make sure XYZ = 123
rot_type = rot_type.upper()
rot_order = [i.replace('X', '1') for i in rot_order]
rot_order = [i.replace('Y', '2') for i in rot_order]
rot_order = [i.replace('Z', '3') for i in rot_order]
rot_order = ''.join(rot_order)
if not isinstance(speeds, (list, tuple)):
raise TypeError('Need to supply speeds in a list')
if len(speeds) != 3:
raise TypeError('Need to supply 3 body-fixed speeds')
if not isinstance(coords, (list, tuple)):
raise TypeError('Need to supply coordinates in a list')
if rot_type.lower() in ['body', 'space']:
if rot_order not in approved_orders:
raise ValueError('Not an acceptable rotation order')
if len(coords) != 3:
raise ValueError('Need 3 coordinates for body or space')
# Actual hard-coded kinematic differential equations
q1, q2, q3 = coords
q1d, q2d, q3d = [diff(i, dynamicsymbols._t) for i in coords]
w1, w2, w3 = speeds
s1, s2, s3 = [sin(q1), sin(q2), sin(q3)]
c1, c2, c3 = [cos(q1), cos(q2), cos(q3)]
if rot_type.lower() == 'body':
if rot_order == '123':
return [q1d - (w1 * c3 - w2 * s3) / c2, q2d - w1 * s3 - w2 *
c3, q3d - (-w1 * c3 + w2 * s3) * s2 / c2 - w3]
if rot_order == '231':
return [q1d - (w2 * c3 - w3 * s3) / c2, q2d - w2 * s3 - w3 *
c3, q3d - w1 - (- w2 * c3 + w3 * s3) * s2 / c2]
if rot_order == '312':
return [q1d - (-w1 * s3 + w3 * c3) / c2, q2d - w1 * c3 - w3 *
s3, q3d - (w1 * s3 - w3 * c3) * s2 / c2 - w2]
if rot_order == '132':
return [q1d - (w1 * c3 + w3 * s3) / c2, q2d + w1 * s3 - w3 *
c3, q3d - (w1 * c3 + w3 * s3) * s2 / c2 - w2]
if rot_order == '213':
return [q1d - (w1 * s3 + w2 * c3) / c2, q2d - w1 * c3 + w2 *
s3, q3d - (w1 * s3 + w2 * c3) * s2 / c2 - w3]
if rot_order == '321':
return [q1d - (w2 * s3 + w3 * c3) / c2, q2d - w2 * c3 + w3 *
s3, q3d - w1 - (w2 * s3 + w3 * c3) * s2 / c2]
if rot_order == '121':
return [q1d - (w2 * s3 + w3 * c3) / s2, q2d - w2 * c3 + w3 *
s3, q3d - w1 + (w2 * s3 + w3 * c3) * c2 / s2]
if rot_order == '131':
return [q1d - (-w2 * c3 + w3 * s3) / s2, q2d - w2 * s3 - w3 *
c3, q3d - w1 - (w2 * c3 - w3 * s3) * c2 / s2]
if rot_order == '212':
return [q1d - (w1 * s3 - w3 * c3) / s2, q2d - w1 * c3 - w3 *
s3, q3d - (-w1 * s3 + w3 * c3) * c2 / s2 - w2]
if rot_order == '232':
return [q1d - (w1 * c3 + w3 * s3) / s2, q2d + w1 * s3 - w3 *
c3, q3d + (w1 * c3 + w3 * s3) * c2 / s2 - w2]
if rot_order == '313':
return [q1d - (w1 * s3 + w2 * c3) / s2, q2d - w1 * c3 + w2 *
s3, q3d + (w1 * s3 + w2 * c3) * c2 / s2 - w3]
if rot_order == '323':
return [q1d - (-w1 * c3 + w2 * s3) / s2, q2d - w1 * s3 - w2 *
c3, q3d - (w1 * c3 - w2 * s3) * c2 / s2 - w3]
if rot_type.lower() == 'space':
if rot_order == '123':
return [q1d - w1 - (w2 * s1 + w3 * c1) * s2 / c2, q2d - w2 *
c1 + w3 * s1, q3d - (w2 * s1 + w3 * c1) / c2]
if rot_order == '231':
return [q1d - (w1 * c1 + w3 * s1) * s2 / c2 - w2, q2d + w1 *
s1 - w3 * c1, q3d - (w1 * c1 + w3 * s1) / c2]
if rot_order == '312':
return [q1d - (w1 * s1 + w2 * c1) * s2 / c2 - w3, q2d - w1 *
c1 + w2 * s1, q3d - (w1 * s1 + w2 * c1) / c2]
if rot_order == '132':
return [q1d - w1 - (-w2 * c1 + w3 * s1) * s2 / c2, q2d - w2 *
s1 - w3 * c1, q3d - (w2 * c1 - w3 * s1) / c2]
if rot_order == '213':
return [q1d - (w1 * s1 - w3 * c1) * s2 / c2 - w2, q2d - w1 *
c1 - w3 * s1, q3d - (-w1 * s1 + w3 * c1) / c2]
if rot_order == '321':
return [q1d - (-w1 * c1 + w2 * s1) * s2 / c2 - w3, q2d - w1 *
s1 - w2 * c1, q3d - (w1 * c1 - w2 * s1) / c2]
if rot_order == '121':
return [q1d - w1 + (w2 * s1 + w3 * c1) * c2 / s2, q2d - w2 *
c1 + w3 * s1, q3d - (w2 * s1 + w3 * c1) / s2]
if rot_order == '131':
return [q1d - w1 - (w2 * c1 - w3 * s1) * c2 / s2, q2d - w2 *
s1 - w3 * c1, q3d - (-w2 * c1 + w3 * s1) / s2]
if rot_order == '212':
return [q1d - (-w1 * s1 + w3 * c1) * c2 / s2 - w2, q2d - w1 *
c1 - w3 * s1, q3d - (w1 * s1 - w3 * c1) / s2]
if rot_order == '232':
return [q1d + (w1 * c1 + w3 * s1) * c2 / s2 - w2, q2d + w1 *
s1 - w3 * c1, q3d - (w1 * c1 + w3 * s1) / s2]
if rot_order == '313':
return [q1d + (w1 * s1 + w2 * c1) * c2 / s2 - w3, q2d - w1 *
c1 + w2 * s1, q3d - (w1 * s1 + w2 * c1) / s2]
if rot_order == '323':
return [q1d - (w1 * c1 - w2 * s1) * c2 / s2 - w3, q2d - w1 *
s1 - w2 * c1, q3d - (-w1 * c1 + w2 * s1) / s2]
elif rot_type.lower() == 'quaternion':
if rot_order != '':
raise ValueError('Cannot have rotation order for quaternion')
if len(coords) != 4:
raise ValueError('Need 4 coordinates for quaternion')
# Actual hard-coded kinematic differential equations
e0, e1, e2, e3 = coords
w = Matrix(speeds + [0])
E = Matrix([[e0, -e3, e2, e1], [e3, e0, -e1, e2], [-e2, e1, e0, e3],
[-e1, -e2, -e3, e0]])
edots = Matrix([diff(i, dynamicsymbols._t) for i in [e1, e2, e3, e0]])
return list(edots.T - 0.5 * w.T * E.T)
else:
raise ValueError('Not an approved rotation type for this function')
def partial_velocity(vel_list, u_list, frame):
"""Returns a list of partial velocities.
For a list of velocity or angular velocity vectors the partial derivatives
with respect to the supplied generalized speeds are computed, in the
specified ReferenceFrame.
The output is a list of lists. The outer list has a number of elements
equal to the number of supplied velocity vectors. The inner lists are, for
each velocity vector, the partial derivatives of that velocity vector with
respect to the generalized speeds supplied.
Parameters
==========
vel_list : list
List of velocities of Point's and angular velocities of ReferenceFrame's
u_list : list
List of independent generalized speeds.
frame : ReferenceFrame
The ReferenceFrame the partial derivatives are going to be taken in.
Examples
========
>>> from sympy.physics.mechanics import Point, ReferenceFrame
>>> from sympy.physics.mechanics import dynamicsymbols
>>> from sympy.physics.mechanics import partial_velocity
>>> u = dynamicsymbols('u')
>>> N = ReferenceFrame('N')
>>> P = Point('P')
>>> P.set_vel(N, u * N.x)
>>> vel_list = [P.vel(N)]
>>> u_list = [u]
>>> partial_velocity(vel_list, u_list, N)
[[N.x]]
"""
if not hasattr(vel_list, '__iter__'):
raise TypeError('Provide velocities in an iterable')
if not hasattr(u_list, '__iter__'):
raise TypeError('Provide speeds in an iterable')
list_of_pvlists = []
for i in vel_list:
pvlist = []
for j in u_list:
vel = i.diff(j, frame)
pvlist += [vel]
list_of_pvlists += [pvlist]
return list_of_pvlists
def linear_momentum(frame, *body):
"""Linear momentum of the system.
This function returns the linear momentum of a system of Particle's and/or
RigidBody's. The linear momentum of a system is equal to the vector sum of
the linear momentum of its constituents. Consider a system, S, comprised of
a rigid body, A, and a particle, P. The linear momentum of the system, L,
is equal to the vector sum of the linear momentum of the particle, L1, and
the linear momentum of the rigid body, L2, i.e-
L = L1 + L2
Parameters
==========
frame : ReferenceFrame
The frame in which linear momentum is desired.
body1, body2, body3... : Particle and/or RigidBody
The body (or bodies) whose kinetic energy is required.
Examples
========
>>> from sympy.physics.mechanics import Point, Particle, ReferenceFrame
>>> from sympy.physics.mechanics import RigidBody, outer, linear_momentum
>>> N = ReferenceFrame('N')
>>> P = Point('P')
>>> P.set_vel(N, 10 * N.x)
>>> Pa = Particle('Pa', P, 1)
>>> Ac = Point('Ac')
>>> Ac.set_vel(N, 25 * N.y)
>>> I = outer(N.x, N.x)
>>> A = RigidBody('A', Ac, N, 20, (I, Ac))
>>> linear_momentum(N, A, Pa)
10*N.x + 500*N.y
"""
if not isinstance(frame, ReferenceFrame):
raise TypeError('Please specify a valid ReferenceFrame')
else:
linear_momentum_sys = Vector(0)
for e in body:
if isinstance(e, (RigidBody, Particle)):
linear_momentum_sys += e.linear_momentum(frame)
else:
raise TypeError('*body must have only Particle or RigidBody')
return linear_momentum_sys
def angular_momentum(point, frame, *body):
"""Angular momentum of a system
This function returns the angular momentum of a system of Particle's and/or
RigidBody's. The angular momentum of such a system is equal to the vector
sum of the angular momentum of its constituents. Consider a system, S,
comprised of a rigid body, A, and a particle, P. The angular momentum of
the system, H, is equal to the vector sum of the linear momentum of the
particle, H1, and the linear momentum of the rigid body, H2, i.e-
H = H1 + H2
Parameters
==========
point : Point
The point about which angular momentum of the system is desired.
frame : ReferenceFrame
The frame in which angular momentum is desired.
body1, body2, body3... : Particle and/or RigidBody
The body (or bodies) whose kinetic energy is required.
Examples
========
>>> from sympy.physics.mechanics import Point, Particle, ReferenceFrame
>>> from sympy.physics.mechanics import RigidBody, outer, angular_momentum
>>> N = ReferenceFrame('N')
>>> O = Point('O')
>>> O.set_vel(N, 0 * N.x)
>>> P = O.locatenew('P', 1 * N.x)
>>> P.set_vel(N, 10 * N.x)
>>> Pa = Particle('Pa', P, 1)
>>> Ac = O.locatenew('Ac', 2 * N.y)
>>> Ac.set_vel(N, 5 * N.y)
>>> a = ReferenceFrame('a')
>>> a.set_ang_vel(N, 10 * N.z)
>>> I = outer(N.z, N.z)
>>> A = RigidBody('A', Ac, a, 20, (I, Ac))
>>> angular_momentum(O, N, Pa, A)
10*N.z
"""
if not isinstance(frame, ReferenceFrame):
raise TypeError('Please enter a valid ReferenceFrame')
if not isinstance(point, Point):
raise TypeError('Please specify a valid Point')
else:
angular_momentum_sys = Vector(0)
for e in body:
if isinstance(e, (RigidBody, Particle)):
angular_momentum_sys += e.angular_momentum(point, frame)
else:
raise TypeError('*body must have only Particle or RigidBody')
return angular_momentum_sys
def kinetic_energy(frame, *body):
"""Kinetic energy of a multibody system.
This function returns the kinetic energy of a system of Particle's and/or
RigidBody's. The kinetic energy of such a system is equal to the sum of
the kinetic energies of its constituents. Consider a system, S, comprising
a rigid body, A, and a particle, P. The kinetic energy of the system, T,
is equal to the vector sum of the kinetic energy of the particle, T1, and
the kinetic energy of the rigid body, T2, i.e.
T = T1 + T2
Kinetic energy is a scalar.
Parameters
==========
frame : ReferenceFrame
The frame in which the velocity or angular velocity of the body is
defined.
body1, body2, body3... : Particle and/or RigidBody
The body (or bodies) whose kinetic energy is required.
Examples
========
>>> from sympy.physics.mechanics import Point, Particle, ReferenceFrame
>>> from sympy.physics.mechanics import RigidBody, outer, kinetic_energy
>>> N = ReferenceFrame('N')
>>> O = Point('O')
>>> O.set_vel(N, 0 * N.x)
>>> P = O.locatenew('P', 1 * N.x)
>>> P.set_vel(N, 10 * N.x)
>>> Pa = Particle('Pa', P, 1)
>>> Ac = O.locatenew('Ac', 2 * N.y)
>>> Ac.set_vel(N, 5 * N.y)
>>> a = ReferenceFrame('a')
>>> a.set_ang_vel(N, 10 * N.z)
>>> I = outer(N.z, N.z)
>>> A = RigidBody('A', Ac, a, 20, (I, Ac))
>>> kinetic_energy(N, Pa, A)
350
"""
if not isinstance(frame, ReferenceFrame):
raise TypeError('Please enter a valid ReferenceFrame')
ke_sys = S(0)
for e in body:
if isinstance(e, (RigidBody, Particle)):
ke_sys += e.kinetic_energy(frame)
else:
raise TypeError('*body must have only Particle or RigidBody')
return ke_sys
def potential_energy(*body):
"""Potential energy of a multibody system.
This function returns the potential energy of a system of Particle's and/or
RigidBody's. The potential energy of such a system is equal to the sum of
the potential energy of its constituents. Consider a system, S, comprising
a rigid body, A, and a particle, P. The potential energy of the system, V,
is equal to the vector sum of the potential energy of the particle, V1, and
the potential energy of the rigid body, V2, i.e.
V = V1 + V2
Potential energy is a scalar.
Parameters
==========
body1, body2, body3... : Particle and/or RigidBody
The body (or bodies) whose potential energy is required.
Examples
========
>>> from sympy.physics.mechanics import Point, Particle, ReferenceFrame
>>> from sympy.physics.mechanics import RigidBody, outer, potential_energy
>>> from sympy import symbols
>>> M, m, g, h = symbols('M m g h')
>>> N = ReferenceFrame('N')
>>> O = Point('O')
>>> O.set_vel(N, 0 * N.x)
>>> P = O.locatenew('P', 1 * N.x)
>>> Pa = Particle('Pa', P, m)
>>> Ac = O.locatenew('Ac', 2 * N.y)
>>> a = ReferenceFrame('a')
>>> I = outer(N.z, N.z)
>>> A = RigidBody('A', Ac, a, M, (I, Ac))
>>> Pa.set_potential_energy(m * g * h)
>>> A.set_potential_energy(M * g * h)
>>> potential_energy(Pa, A)
M*g*h + g*h*m
"""
pe_sys = S(0)
for e in body:
if isinstance(e, (RigidBody, Particle)):
pe_sys += e.potential_energy
else:
raise TypeError('*body must have only Particle or RigidBody')
return pe_sys
def Lagrangian(frame, *body):
"""Lagrangian of a multibody system.
This function returns the Lagrangian of a system of Particle's and/or
RigidBody's. The Lagrangian of such a system is equal to the difference
between the kinetic energies and potential energies of its constituents. If
T and V are the kinetic and potential energies of a system then it's
Lagrangian, L, is defined as
L = T - V
The Lagrangian is a scalar.
Parameters
==========
frame : ReferenceFrame
The frame in which the velocity or angular velocity of the body is
defined to determine the kinetic energy.
body1, body2, body3... : Particle and/or RigidBody
The body (or bodies) whose kinetic energy is required.
Examples
========
>>> from sympy.physics.mechanics import Point, Particle, ReferenceFrame
>>> from sympy.physics.mechanics import RigidBody, outer, Lagrangian
>>> from sympy import symbols
>>> M, m, g, h = symbols('M m g h')
>>> N = ReferenceFrame('N')
>>> O = Point('O')
>>> O.set_vel(N, 0 * N.x)
>>> P = O.locatenew('P', 1 * N.x)
>>> P.set_vel(N, 10 * N.x)
>>> Pa = Particle('Pa', P, 1)
>>> Ac = O.locatenew('Ac', 2 * N.y)
>>> Ac.set_vel(N, 5 * N.y)
>>> a = ReferenceFrame('a')
>>> a.set_ang_vel(N, 10 * N.z)
>>> I = outer(N.z, N.z)
>>> A = RigidBody('A', Ac, a, 20, (I, Ac))
>>> Pa.set_potential_energy(m * g * h)
>>> A.set_potential_energy(M * g * h)
>>> Lagrangian(N, Pa, A)
-M*g*h - g*h*m + 350
"""
if not isinstance(frame, ReferenceFrame):
raise TypeError('Please supply a valid ReferenceFrame')
for e in body:
if not isinstance(e, (RigidBody, Particle)):
raise TypeError('*body must have only Particle or RigidBody')
return kinetic_energy(frame, *body) - potential_energy(*body)
| 34.666667
| 132
| 0.564665
|
4a06a53d3936de2f4d475312b58b81c181ec39a8
| 4,030
|
py
|
Python
|
chevah/compat/unix_service.py
|
chevah/compat
|
d22e5f551a628f8a1652c9f2eea306e17930cb8f
|
[
"BSD-3-Clause"
] | 5
|
2016-12-03T22:54:50.000Z
|
2021-11-17T11:17:39.000Z
|
chevah/compat/unix_service.py
|
chevah/compat
|
d22e5f551a628f8a1652c9f2eea306e17930cb8f
|
[
"BSD-3-Clause"
] | 76
|
2015-01-22T16:00:31.000Z
|
2022-02-09T22:13:34.000Z
|
chevah/compat/unix_service.py
|
chevah/compat
|
d22e5f551a628f8a1652c9f2eea306e17930cb8f
|
[
"BSD-3-Clause"
] | 1
|
2016-12-10T15:57:31.000Z
|
2016-12-10T15:57:31.000Z
|
# Copyright (c) 2011 Adi Roiban.
# See LICENSE for details.
'''Unix specific functionality for launching an Unix daemon.'''
from __future__ import with_statement
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import daemon
import os
import signal
import sys
from zope.interface import implements
from chevah.compat import local_filesystem
from chevah.compat.exceptions import CompatError
from chevah.compat.helpers import _
from chevah.compat.interfaces import IDaemon
class Daemon(object):
"""
Handles running the process a Unix daemon.
"""
implements(IDaemon)
DaemonContext = daemon.DaemonContext
def __init__(self, options):
"""
See `IDaemon`.
"""
self.options = options
self._daemon_context = None
self.preserve_standard_streams = False
self.detach_process = True
def launch(self):
"""
See `IDaemon`.
"""
stdin = None
stdout = None
stderr = None
if self.preserve_standard_streams:
stdin = sys.stdin
stdout = sys.stdout
stderr = sys.stderr
self._daemon_context = self.DaemonContext(
stdin=stdin,
stdout=stdout,
stderr=stderr,
)
self._daemon_context.detach_process = self.detach_process
self._daemon_context.signal_map = {
signal.SIGINT: self._onStopSignal,
signal.SIGTERM: self._onStopSignal,
}
self._daemon_context.working_directory = os.getcwd()
self.onInitialize()
self._daemon_context.files_preserve = self.getOpenFiles()
with self._daemon_context:
self._writePID()
self.onStart()
# Under normal operation, we will not reach this point as the
# execution is interrupted by the signal handling.
self._onStopSignal(None, None)
def _onStopSignal(self, signum, frame):
"""
Called when SIGINT or SIGTERM are received.
"""
self.onStop(0)
self._deletePID()
def _writePID(self):
"""
Write process ID in pid file.
"""
pid_path = local_filesystem.getAbsoluteRealPath(self.options.pid)
pid_segments = local_filesystem.getSegmentsFromRealPath(pid_path)
try:
pid_file = local_filesystem.openFileForWriting(
pid_segments, mode=0o640)
local_filesystem.setAttributes(pid_segments, {'mode': 0o640})
pid_file.write('%d' % os.getpid())
pid_file.close()
except (OSError, IOError):
raise CompatError(
1008,
_(u'Could not write PID file at %s.' % (pid_path)),
)
def _deletePID(self):
pid_path = local_filesystem.getAbsoluteRealPath(self.options.pid)
pid_segments = local_filesystem.getSegmentsFromRealPath(pid_path)
try:
local_filesystem.deleteFile(pid_segments)
except Exception:
# We don't care if remove operation fail or success.
# We are going to close the server anyway.
# Just change the exit value to signal that something went
# wrong.
self.onStop(1)
def onInitialize(self):
"""
See: `IDaemon`.
"""
raise NotImplementedError(
'Use this method for initializing your daemon.')
def getOpenFiles(self):
"""
See: `IDaemon`.
"""
raise NotImplementedError(
'Use this method for get the list of file for your daemon.')
def onStart(self):
"""
See: `IDaemon`.
"""
raise NotImplementedError(
'Use this method for starting your daemon.')
def onStop(self, exit_code):
"""
See: `IDaemon`.
"""
raise NotImplementedError(
'Use this method for stopping your daemon.')
| 29.202899
| 73
| 0.604963
|
4a06a5500094aad75a257b53394f75dd78e487f2
| 45,252
|
py
|
Python
|
pandas/tseries/tests/test_plotting.py
|
betoesquivel/PyData29-DataAnalyticsWithAWSLambda
|
318d1f595e4079544159a0f4802277dc5b25cb47
|
[
"MIT"
] | 4
|
2016-12-06T20:22:28.000Z
|
2018-05-04T09:51:45.000Z
|
pandas/tseries/tests/test_plotting.py
|
betoesquivel/PyData29-DataAnalyticsWithAWSLambda
|
318d1f595e4079544159a0f4802277dc5b25cb47
|
[
"MIT"
] | 11
|
2020-06-05T17:24:17.000Z
|
2022-03-11T23:15:26.000Z
|
pandas/tseries/tests/test_plotting.py
|
betoesquivel/PyData29-DataAnalyticsWithAWSLambda
|
318d1f595e4079544159a0f4802277dc5b25cb47
|
[
"MIT"
] | 3
|
2017-02-25T15:26:47.000Z
|
2017-12-20T06:27:07.000Z
|
from datetime import datetime, timedelta, date, time
import nose
from pandas.compat import lrange, zip
import numpy as np
from numpy.testing.decorators import slow
from pandas import Index, Series, DataFrame
from pandas.tseries.index import date_range, bdate_range
from pandas.tseries.offsets import DateOffset
from pandas.tseries.period import period_range, Period, PeriodIndex
from pandas.tseries.resample import DatetimeIndex
from pandas.util.testing import assert_series_equal, ensure_clean
import pandas.util.testing as tm
from pandas.tests.test_graphics import _skip_if_no_scipy_gaussian_kde
@tm.mplskip
class TestTSPlot(tm.TestCase):
def setUp(self):
freq = ['S', 'T', 'H', 'D', 'W', 'M', 'Q', 'A']
idx = [period_range('12/31/1999', freq=x, periods=100) for x in freq]
self.period_ser = [Series(np.random.randn(len(x)), x) for x in idx]
self.period_df = [DataFrame(np.random.randn(len(x), 3), index=x,
columns=['A', 'B', 'C'])
for x in idx]
freq = ['S', 'T', 'H', 'D', 'W', 'M', 'Q-DEC', 'A', '1B30Min']
idx = [date_range('12/31/1999', freq=x, periods=100) for x in freq]
self.datetime_ser = [Series(np.random.randn(len(x)), x) for x in idx]
self.datetime_df = [DataFrame(np.random.randn(len(x), 3), index=x,
columns=['A', 'B', 'C'])
for x in idx]
def tearDown(self):
tm.close()
@slow
def test_ts_plot_with_tz(self):
# GH2877
index = date_range('1/1/2011', periods=2, freq='H',
tz='Europe/Brussels')
ts = Series([188.5, 328.25], index=index)
_check_plot_works(ts.plot)
def test_fontsize_set_correctly(self):
# For issue #8765
import matplotlib.pyplot as plt # noqa
df = DataFrame(np.random.randn(10, 9), index=range(10))
ax = df.plot(fontsize=2)
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
self.assertEqual(label.get_fontsize(), 2)
@slow
def test_frame_inferred(self):
# inferred freq
import matplotlib.pyplot as plt # noqa
idx = date_range('1/1/1987', freq='MS', periods=100)
idx = DatetimeIndex(idx.values, freq=None)
df = DataFrame(np.random.randn(len(idx), 3), index=idx)
_check_plot_works(df.plot)
# axes freq
idx = idx[0:40].union(idx[45:99])
df2 = DataFrame(np.random.randn(len(idx), 3), index=idx)
_check_plot_works(df2.plot)
# N > 1
idx = date_range('2008-1-1 00:15:00', freq='15T', periods=10)
idx = DatetimeIndex(idx.values, freq=None)
df = DataFrame(np.random.randn(len(idx), 3), index=idx)
_check_plot_works(df.plot)
def test_nonnumeric_exclude(self):
import matplotlib.pyplot as plt
idx = date_range('1/1/1987', freq='A', periods=3)
df = DataFrame({'A': ["x", "y", "z"], 'B': [1, 2, 3]}, idx)
ax = df.plot() # it works
self.assertEqual(len(ax.get_lines()), 1) # B was plotted
plt.close(plt.gcf())
self.assertRaises(TypeError, df['A'].plot)
@slow
def test_tsplot(self):
from pandas.tseries.plotting import tsplot
import matplotlib.pyplot as plt
ax = plt.gca()
ts = tm.makeTimeSeries()
f = lambda *args, **kwds: tsplot(s, plt.Axes.plot, *args, **kwds)
for s in self.period_ser:
_check_plot_works(f, s.index.freq, ax=ax, series=s)
for s in self.datetime_ser:
_check_plot_works(f, s.index.freq.rule_code, ax=ax, series=s)
for s in self.period_ser:
_check_plot_works(s.plot, ax=ax)
for s in self.datetime_ser:
_check_plot_works(s.plot, ax=ax)
ax = ts.plot(style='k')
self.assertEqual((0., 0., 0.), ax.get_lines()[0].get_color())
def test_both_style_and_color(self):
import matplotlib.pyplot as plt # noqa
ts = tm.makeTimeSeries()
self.assertRaises(ValueError, ts.plot, style='b-', color='#000099')
s = ts.reset_index(drop=True)
self.assertRaises(ValueError, s.plot, style='b-', color='#000099')
@slow
def test_high_freq(self):
freaks = ['ms', 'us']
for freq in freaks:
rng = date_range('1/1/2012', periods=100000, freq=freq)
ser = Series(np.random.randn(len(rng)), rng)
_check_plot_works(ser.plot)
def test_get_datevalue(self):
from pandas.tseries.converter import get_datevalue
self.assertIsNone(get_datevalue(None, 'D'))
self.assertEqual(get_datevalue(1987, 'A'), 1987)
self.assertEqual(get_datevalue(Period(1987, 'A'), 'M'),
Period('1987-12', 'M').ordinal)
self.assertEqual(get_datevalue('1/1/1987', 'D'),
Period('1987-1-1', 'D').ordinal)
@slow
def test_ts_plot_format_coord(self):
def check_format_of_first_point(ax, expected_string):
first_line = ax.get_lines()[0]
first_x = first_line.get_xdata()[0].ordinal
first_y = first_line.get_ydata()[0]
try:
self.assertEqual(expected_string,
ax.format_coord(first_x, first_y))
except (ValueError):
raise nose.SkipTest("skipping test because issue forming "
"test comparison GH7664")
annual = Series(1, index=date_range('2014-01-01', periods=3,
freq='A-DEC'))
check_format_of_first_point(annual.plot(), 't = 2014 y = 1.000000')
# note this is added to the annual plot already in existence, and
# changes its freq field
daily = Series(1, index=date_range('2014-01-01', periods=3, freq='D'))
check_format_of_first_point(daily.plot(),
't = 2014-01-01 y = 1.000000')
tm.close()
# tsplot
import matplotlib.pyplot as plt
from pandas.tseries.plotting import tsplot
tsplot(annual, plt.Axes.plot)
check_format_of_first_point(plt.gca(), 't = 2014 y = 1.000000')
tsplot(daily, plt.Axes.plot)
check_format_of_first_point(plt.gca(), 't = 2014-01-01 y = 1.000000')
@slow
def test_line_plot_period_series(self):
for s in self.period_ser:
_check_plot_works(s.plot, s.index.freq)
@slow
def test_line_plot_datetime_series(self):
for s in self.datetime_ser:
_check_plot_works(s.plot, s.index.freq.rule_code)
@slow
def test_line_plot_period_frame(self):
for df in self.period_df:
_check_plot_works(df.plot, df.index.freq)
@slow
def test_line_plot_datetime_frame(self):
for df in self.datetime_df:
freq = df.index.to_period(df.index.freq.rule_code).freq
_check_plot_works(df.plot, freq)
@slow
def test_line_plot_inferred_freq(self):
for ser in self.datetime_ser:
ser = Series(ser.values, Index(np.asarray(ser.index)))
_check_plot_works(ser.plot, ser.index.inferred_freq)
ser = ser[[0, 3, 5, 6]]
_check_plot_works(ser.plot)
def test_fake_inferred_business(self):
import matplotlib.pyplot as plt
fig = plt.gcf()
plt.clf()
fig.add_subplot(111)
rng = date_range('2001-1-1', '2001-1-10')
ts = Series(lrange(len(rng)), rng)
ts = ts[:3].append(ts[5:])
ax = ts.plot()
self.assertFalse(hasattr(ax, 'freq'))
@slow
def test_plot_offset_freq(self):
ser = tm.makeTimeSeries()
_check_plot_works(ser.plot)
dr = date_range(ser.index[0], freq='BQS', periods=10)
ser = Series(np.random.randn(len(dr)), dr)
_check_plot_works(ser.plot)
@slow
def test_plot_multiple_inferred_freq(self):
dr = Index([datetime(2000, 1, 1), datetime(2000, 1, 6), datetime(
2000, 1, 11)])
ser = Series(np.random.randn(len(dr)), dr)
_check_plot_works(ser.plot)
@slow
def test_uhf(self):
import pandas.tseries.converter as conv
import matplotlib.pyplot as plt
fig = plt.gcf()
plt.clf()
fig.add_subplot(111)
idx = date_range('2012-6-22 21:59:51.960928', freq='L', periods=500)
df = DataFrame(np.random.randn(len(idx), 2), idx)
ax = df.plot()
axis = ax.get_xaxis()
tlocs = axis.get_ticklocs()
tlabels = axis.get_ticklabels()
for loc, label in zip(tlocs, tlabels):
xp = conv._from_ordinal(loc).strftime('%H:%M:%S.%f')
rs = str(label.get_text())
if len(rs):
self.assertEqual(xp, rs)
@slow
def test_irreg_hf(self):
import matplotlib.pyplot as plt
fig = plt.gcf()
plt.clf()
fig.add_subplot(111)
idx = date_range('2012-6-22 21:59:51', freq='S', periods=100)
df = DataFrame(np.random.randn(len(idx), 2), idx)
irreg = df.ix[[0, 1, 3, 4]]
ax = irreg.plot()
diffs = Series(ax.get_lines()[0].get_xydata()[:, 0]).diff()
sec = 1. / 24 / 60 / 60
self.assertTrue((np.fabs(diffs[1:] - [sec, sec * 2, sec]) < 1e-8).all(
))
plt.clf()
fig.add_subplot(111)
df2 = df.copy()
df2.index = df.index.asobject
ax = df2.plot()
diffs = Series(ax.get_lines()[0].get_xydata()[:, 0]).diff()
self.assertTrue((np.fabs(diffs[1:] - sec) < 1e-8).all())
def test_irregular_datetime64_repr_bug(self):
import matplotlib.pyplot as plt
ser = tm.makeTimeSeries()
ser = ser[[0, 1, 2, 7]]
fig = plt.gcf()
plt.clf()
ax = fig.add_subplot(211)
ret = ser.plot()
self.assertIsNotNone(ret)
for rs, xp in zip(ax.get_lines()[0].get_xdata(), ser.index):
self.assertEqual(rs, xp)
def test_business_freq(self):
import matplotlib.pyplot as plt # noqa
bts = tm.makePeriodSeries()
ax = bts.plot()
self.assertEqual(ax.get_lines()[0].get_xydata()[0, 0],
bts.index[0].ordinal)
idx = ax.get_lines()[0].get_xdata()
self.assertEqual(PeriodIndex(data=idx).freqstr, 'B')
@slow
def test_business_freq_convert(self):
n = tm.N
tm.N = 300
bts = tm.makeTimeSeries().asfreq('BM')
tm.N = n
ts = bts.to_period('M')
ax = bts.plot()
self.assertEqual(ax.get_lines()[0].get_xydata()[0, 0],
ts.index[0].ordinal)
idx = ax.get_lines()[0].get_xdata()
self.assertEqual(PeriodIndex(data=idx).freqstr, 'M')
def test_nonzero_base(self):
# GH2571
idx = (date_range('2012-12-20', periods=24, freq='H') + timedelta(
minutes=30))
df = DataFrame(np.arange(24), index=idx)
ax = df.plot()
rs = ax.get_lines()[0].get_xdata()
self.assertFalse(Index(rs).is_normalized)
def test_dataframe(self):
bts = DataFrame({'a': tm.makeTimeSeries()})
ax = bts.plot()
idx = ax.get_lines()[0].get_xdata()
tm.assert_numpy_array_equal(bts.index.to_period(), PeriodIndex(idx))
@slow
def test_axis_limits(self):
import matplotlib.pyplot as plt
def _test(ax):
xlim = ax.get_xlim()
ax.set_xlim(xlim[0] - 5, xlim[1] + 10)
ax.get_figure().canvas.draw()
result = ax.get_xlim()
self.assertEqual(result[0], xlim[0] - 5)
self.assertEqual(result[1], xlim[1] + 10)
# string
expected = (Period('1/1/2000', ax.freq),
Period('4/1/2000', ax.freq))
ax.set_xlim('1/1/2000', '4/1/2000')
ax.get_figure().canvas.draw()
result = ax.get_xlim()
self.assertEqual(int(result[0]), expected[0].ordinal)
self.assertEqual(int(result[1]), expected[1].ordinal)
# datetim
expected = (Period('1/1/2000', ax.freq),
Period('4/1/2000', ax.freq))
ax.set_xlim(datetime(2000, 1, 1), datetime(2000, 4, 1))
ax.get_figure().canvas.draw()
result = ax.get_xlim()
self.assertEqual(int(result[0]), expected[0].ordinal)
self.assertEqual(int(result[1]), expected[1].ordinal)
fig = ax.get_figure()
plt.close(fig)
ser = tm.makeTimeSeries()
ax = ser.plot()
_test(ax)
df = DataFrame({'a': ser, 'b': ser + 1})
ax = df.plot()
_test(ax)
df = DataFrame({'a': ser, 'b': ser + 1})
axes = df.plot(subplots=True)
for ax in axes:
_test(ax)
def test_get_finder(self):
import pandas.tseries.converter as conv
self.assertEqual(conv.get_finder('B'), conv._daily_finder)
self.assertEqual(conv.get_finder('D'), conv._daily_finder)
self.assertEqual(conv.get_finder('M'), conv._monthly_finder)
self.assertEqual(conv.get_finder('Q'), conv._quarterly_finder)
self.assertEqual(conv.get_finder('A'), conv._annual_finder)
self.assertEqual(conv.get_finder('W'), conv._daily_finder)
@slow
def test_finder_daily(self):
import matplotlib.pyplot as plt
xp = Period('1999-1-1', freq='B').ordinal
day_lst = [10, 40, 252, 400, 950, 2750, 10000]
for n in day_lst:
rng = bdate_range('1999-1-1', periods=n)
ser = Series(np.random.randn(len(rng)), rng)
ax = ser.plot()
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
self.assertEqual(xp, rs)
vmin, vmax = ax.get_xlim()
ax.set_xlim(vmin + 0.9, vmax)
rs = xaxis.get_majorticklocs()[0]
self.assertEqual(xp, rs)
plt.close(ax.get_figure())
@slow
def test_finder_quarterly(self):
import matplotlib.pyplot as plt
xp = Period('1988Q1').ordinal
yrs = [3.5, 11]
for n in yrs:
rng = period_range('1987Q2', periods=int(n * 4), freq='Q')
ser = Series(np.random.randn(len(rng)), rng)
ax = ser.plot()
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
self.assertEqual(rs, xp)
(vmin, vmax) = ax.get_xlim()
ax.set_xlim(vmin + 0.9, vmax)
rs = xaxis.get_majorticklocs()[0]
self.assertEqual(xp, rs)
plt.close(ax.get_figure())
@slow
def test_finder_monthly(self):
import matplotlib.pyplot as plt
xp = Period('Jan 1988').ordinal
yrs = [1.15, 2.5, 4, 11]
for n in yrs:
rng = period_range('1987Q2', periods=int(n * 12), freq='M')
ser = Series(np.random.randn(len(rng)), rng)
ax = ser.plot()
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
self.assertEqual(rs, xp)
vmin, vmax = ax.get_xlim()
ax.set_xlim(vmin + 0.9, vmax)
rs = xaxis.get_majorticklocs()[0]
self.assertEqual(xp, rs)
plt.close(ax.get_figure())
def test_finder_monthly_long(self):
rng = period_range('1988Q1', periods=24 * 12, freq='M')
ser = Series(np.random.randn(len(rng)), rng)
ax = ser.plot()
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
xp = Period('1989Q1', 'M').ordinal
self.assertEqual(rs, xp)
@slow
def test_finder_annual(self):
import matplotlib.pyplot as plt
xp = [1987, 1988, 1990, 1990, 1995, 2020, 2070, 2170]
for i, nyears in enumerate([5, 10, 19, 49, 99, 199, 599, 1001]):
rng = period_range('1987', periods=nyears, freq='A')
ser = Series(np.random.randn(len(rng)), rng)
ax = ser.plot()
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
self.assertEqual(rs, Period(xp[i], freq='A').ordinal)
plt.close(ax.get_figure())
@slow
def test_finder_minutely(self):
nminutes = 50 * 24 * 60
rng = date_range('1/1/1999', freq='Min', periods=nminutes)
ser = Series(np.random.randn(len(rng)), rng)
ax = ser.plot()
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
xp = Period('1/1/1999', freq='Min').ordinal
self.assertEqual(rs, xp)
def test_finder_hourly(self):
nhours = 23
rng = date_range('1/1/1999', freq='H', periods=nhours)
ser = Series(np.random.randn(len(rng)), rng)
ax = ser.plot()
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
xp = Period('1/1/1999', freq='H').ordinal
self.assertEqual(rs, xp)
@slow
def test_gaps(self):
import matplotlib.pyplot as plt
ts = tm.makeTimeSeries()
ts[5:25] = np.nan
ax = ts.plot()
lines = ax.get_lines()
tm._skip_if_mpl_1_5()
self.assertEqual(len(lines), 1)
l = lines[0]
data = l.get_xydata()
tm.assertIsInstance(data, np.ma.core.MaskedArray)
mask = data.mask
self.assertTrue(mask[5:25, 1].all())
plt.close(ax.get_figure())
# irregular
ts = tm.makeTimeSeries()
ts = ts[[0, 1, 2, 5, 7, 9, 12, 15, 20]]
ts[2:5] = np.nan
ax = ts.plot()
lines = ax.get_lines()
self.assertEqual(len(lines), 1)
l = lines[0]
data = l.get_xydata()
tm.assertIsInstance(data, np.ma.core.MaskedArray)
mask = data.mask
self.assertTrue(mask[2:5, 1].all())
plt.close(ax.get_figure())
# non-ts
idx = [0, 1, 2, 5, 7, 9, 12, 15, 20]
ser = Series(np.random.randn(len(idx)), idx)
ser[2:5] = np.nan
ax = ser.plot()
lines = ax.get_lines()
self.assertEqual(len(lines), 1)
l = lines[0]
data = l.get_xydata()
tm.assertIsInstance(data, np.ma.core.MaskedArray)
mask = data.mask
self.assertTrue(mask[2:5, 1].all())
@slow
def test_gap_upsample(self):
low = tm.makeTimeSeries()
low[5:25] = np.nan
ax = low.plot()
idxh = date_range(low.index[0], low.index[-1], freq='12h')
s = Series(np.random.randn(len(idxh)), idxh)
s.plot(secondary_y=True)
lines = ax.get_lines()
self.assertEqual(len(lines), 1)
self.assertEqual(len(ax.right_ax.get_lines()), 1)
l = lines[0]
data = l.get_xydata()
tm._skip_if_mpl_1_5()
tm.assertIsInstance(data, np.ma.core.MaskedArray)
mask = data.mask
self.assertTrue(mask[5:25, 1].all())
@slow
def test_secondary_y(self):
import matplotlib.pyplot as plt
ser = Series(np.random.randn(10))
ser2 = Series(np.random.randn(10))
ax = ser.plot(secondary_y=True)
self.assertTrue(hasattr(ax, 'left_ax'))
self.assertFalse(hasattr(ax, 'right_ax'))
fig = ax.get_figure()
axes = fig.get_axes()
l = ax.get_lines()[0]
xp = Series(l.get_ydata(), l.get_xdata())
assert_series_equal(ser, xp)
self.assertEqual(ax.get_yaxis().get_ticks_position(), 'right')
self.assertFalse(axes[0].get_yaxis().get_visible())
plt.close(fig)
ax2 = ser2.plot()
self.assertEqual(ax2.get_yaxis().get_ticks_position(), 'default')
plt.close(ax2.get_figure())
ax = ser2.plot()
ax2 = ser.plot(secondary_y=True)
self.assertTrue(ax.get_yaxis().get_visible())
self.assertFalse(hasattr(ax, 'left_ax'))
self.assertTrue(hasattr(ax, 'right_ax'))
self.assertTrue(hasattr(ax2, 'left_ax'))
self.assertFalse(hasattr(ax2, 'right_ax'))
@slow
def test_secondary_y_ts(self):
import matplotlib.pyplot as plt
idx = date_range('1/1/2000', periods=10)
ser = Series(np.random.randn(10), idx)
ser2 = Series(np.random.randn(10), idx)
ax = ser.plot(secondary_y=True)
self.assertTrue(hasattr(ax, 'left_ax'))
self.assertFalse(hasattr(ax, 'right_ax'))
fig = ax.get_figure()
axes = fig.get_axes()
l = ax.get_lines()[0]
xp = Series(l.get_ydata(), l.get_xdata()).to_timestamp()
assert_series_equal(ser, xp)
self.assertEqual(ax.get_yaxis().get_ticks_position(), 'right')
self.assertFalse(axes[0].get_yaxis().get_visible())
plt.close(fig)
ax2 = ser2.plot()
self.assertEqual(ax2.get_yaxis().get_ticks_position(), 'default')
plt.close(ax2.get_figure())
ax = ser2.plot()
ax2 = ser.plot(secondary_y=True)
self.assertTrue(ax.get_yaxis().get_visible())
@slow
def test_secondary_kde(self):
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
import matplotlib.pyplot as plt # noqa
ser = Series(np.random.randn(10))
ax = ser.plot(secondary_y=True, kind='density')
self.assertTrue(hasattr(ax, 'left_ax'))
self.assertFalse(hasattr(ax, 'right_ax'))
fig = ax.get_figure()
axes = fig.get_axes()
self.assertEqual(axes[1].get_yaxis().get_ticks_position(), 'right')
@slow
def test_secondary_bar(self):
ser = Series(np.random.randn(10))
ax = ser.plot(secondary_y=True, kind='bar')
fig = ax.get_figure()
axes = fig.get_axes()
self.assertEqual(axes[1].get_yaxis().get_ticks_position(), 'right')
@slow
def test_secondary_frame(self):
df = DataFrame(np.random.randn(5, 3), columns=['a', 'b', 'c'])
axes = df.plot(secondary_y=['a', 'c'], subplots=True)
self.assertEqual(axes[0].get_yaxis().get_ticks_position(), 'right')
self.assertEqual(axes[1].get_yaxis().get_ticks_position(), 'default')
self.assertEqual(axes[2].get_yaxis().get_ticks_position(), 'right')
@slow
def test_secondary_bar_frame(self):
df = DataFrame(np.random.randn(5, 3), columns=['a', 'b', 'c'])
axes = df.plot(kind='bar', secondary_y=['a', 'c'], subplots=True)
self.assertEqual(axes[0].get_yaxis().get_ticks_position(), 'right')
self.assertEqual(axes[1].get_yaxis().get_ticks_position(), 'default')
self.assertEqual(axes[2].get_yaxis().get_ticks_position(), 'right')
def test_mixed_freq_regular_first(self):
import matplotlib.pyplot as plt # noqa
s1 = tm.makeTimeSeries()
s2 = s1[[0, 5, 10, 11, 12, 13, 14, 15]]
# it works!
s1.plot()
ax2 = s2.plot(style='g')
lines = ax2.get_lines()
idx1 = PeriodIndex(lines[0].get_xdata())
idx2 = PeriodIndex(lines[1].get_xdata())
self.assertTrue(idx1.equals(s1.index.to_period('B')))
self.assertTrue(idx2.equals(s2.index.to_period('B')))
left, right = ax2.get_xlim()
pidx = s1.index.to_period()
self.assertEqual(left, pidx[0].ordinal)
self.assertEqual(right, pidx[-1].ordinal)
@slow
def test_mixed_freq_irregular_first(self):
import matplotlib.pyplot as plt # noqa
s1 = tm.makeTimeSeries()
s2 = s1[[0, 5, 10, 11, 12, 13, 14, 15]]
s2.plot(style='g')
ax = s1.plot()
self.assertFalse(hasattr(ax, 'freq'))
lines = ax.get_lines()
x1 = lines[0].get_xdata()
tm.assert_numpy_array_equal(x1, s2.index.asobject.values)
x2 = lines[1].get_xdata()
tm.assert_numpy_array_equal(x2, s1.index.asobject.values)
def test_mixed_freq_regular_first_df(self):
# GH 9852
import matplotlib.pyplot as plt # noqa
s1 = tm.makeTimeSeries().to_frame()
s2 = s1.iloc[[0, 5, 10, 11, 12, 13, 14, 15], :]
ax = s1.plot()
ax2 = s2.plot(style='g', ax=ax)
lines = ax2.get_lines()
idx1 = PeriodIndex(lines[0].get_xdata())
idx2 = PeriodIndex(lines[1].get_xdata())
self.assertTrue(idx1.equals(s1.index.to_period('B')))
self.assertTrue(idx2.equals(s2.index.to_period('B')))
left, right = ax2.get_xlim()
pidx = s1.index.to_period()
self.assertEqual(left, pidx[0].ordinal)
self.assertEqual(right, pidx[-1].ordinal)
@slow
def test_mixed_freq_irregular_first_df(self):
# GH 9852
import matplotlib.pyplot as plt # noqa
s1 = tm.makeTimeSeries().to_frame()
s2 = s1.iloc[[0, 5, 10, 11, 12, 13, 14, 15], :]
ax = s2.plot(style='g')
ax = s1.plot(ax=ax)
self.assertFalse(hasattr(ax, 'freq'))
lines = ax.get_lines()
x1 = lines[0].get_xdata()
tm.assert_numpy_array_equal(x1, s2.index.asobject.values)
x2 = lines[1].get_xdata()
tm.assert_numpy_array_equal(x2, s1.index.asobject.values)
def test_mixed_freq_hf_first(self):
idxh = date_range('1/1/1999', periods=365, freq='D')
idxl = date_range('1/1/1999', periods=12, freq='M')
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
high.plot()
ax = low.plot()
for l in ax.get_lines():
self.assertEqual(PeriodIndex(data=l.get_xdata()).freq, 'D')
@slow
def test_mixed_freq_alignment(self):
ts_ind = date_range('2012-01-01 13:00', '2012-01-02', freq='H')
ts_data = np.random.randn(12)
ts = Series(ts_data, index=ts_ind)
ts2 = ts.asfreq('T').interpolate()
ax = ts.plot()
ts2.plot(style='r')
self.assertEqual(ax.lines[0].get_xdata()[0],
ax.lines[1].get_xdata()[0])
@slow
def test_mixed_freq_lf_first(self):
import matplotlib.pyplot as plt
idxh = date_range('1/1/1999', periods=365, freq='D')
idxl = date_range('1/1/1999', periods=12, freq='M')
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
low.plot(legend=True)
ax = high.plot(legend=True)
for l in ax.get_lines():
self.assertEqual(PeriodIndex(data=l.get_xdata()).freq, 'D')
leg = ax.get_legend()
self.assertEqual(len(leg.texts), 2)
plt.close(ax.get_figure())
idxh = date_range('1/1/1999', periods=240, freq='T')
idxl = date_range('1/1/1999', periods=4, freq='H')
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
low.plot()
ax = high.plot()
for l in ax.get_lines():
self.assertEqual(PeriodIndex(data=l.get_xdata()).freq, 'T')
def test_mixed_freq_irreg_period(self):
ts = tm.makeTimeSeries()
irreg = ts[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 16, 17, 18, 29]]
rng = period_range('1/3/2000', periods=30, freq='B')
ps = Series(np.random.randn(len(rng)), rng)
irreg.plot()
ps.plot()
@slow
def test_to_weekly_resampling(self):
idxh = date_range('1/1/1999', periods=52, freq='W')
idxl = date_range('1/1/1999', periods=12, freq='M')
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
high.plot()
ax = low.plot()
for l in ax.get_lines():
self.assertEqual(PeriodIndex(data=l.get_xdata()).freq, idxh.freq)
# tsplot
from pandas.tseries.plotting import tsplot
import matplotlib.pyplot as plt
tsplot(high, plt.Axes.plot)
lines = tsplot(low, plt.Axes.plot)
for l in lines:
self.assertTrue(PeriodIndex(data=l.get_xdata()).freq, idxh.freq)
@slow
def test_from_weekly_resampling(self):
idxh = date_range('1/1/1999', periods=52, freq='W')
idxl = date_range('1/1/1999', periods=12, freq='M')
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
low.plot()
ax = high.plot()
expected_h = idxh.to_period().asi8
expected_l = np.array([1514, 1519, 1523, 1527, 1531, 1536, 1540, 1544,
1549, 1553, 1558, 1562])
for l in ax.get_lines():
self.assertTrue(PeriodIndex(data=l.get_xdata()).freq, idxh.freq)
xdata = l.get_xdata(orig=False)
if len(xdata) == 12: # idxl lines
self.assert_numpy_array_equal(xdata, expected_l)
else:
self.assert_numpy_array_equal(xdata, expected_h)
tm.close()
# tsplot
from pandas.tseries.plotting import tsplot
import matplotlib.pyplot as plt
tsplot(low, plt.Axes.plot)
lines = tsplot(high, plt.Axes.plot)
for l in lines:
self.assertTrue(PeriodIndex(data=l.get_xdata()).freq, idxh.freq)
xdata = l.get_xdata(orig=False)
if len(xdata) == 12: # idxl lines
self.assert_numpy_array_equal(xdata, expected_l)
else:
self.assert_numpy_array_equal(xdata, expected_h)
@slow
def test_from_resampling_area_line_mixed(self):
idxh = date_range('1/1/1999', periods=52, freq='W')
idxl = date_range('1/1/1999', periods=12, freq='M')
high = DataFrame(np.random.rand(len(idxh), 3),
index=idxh, columns=[0, 1, 2])
low = DataFrame(np.random.rand(len(idxl), 3),
index=idxl, columns=[0, 1, 2])
# low to high
for kind1, kind2 in [('line', 'area'), ('area', 'line')]:
ax = low.plot(kind=kind1, stacked=True)
ax = high.plot(kind=kind2, stacked=True, ax=ax)
# check low dataframe result
expected_x = np.array([1514, 1519, 1523, 1527, 1531, 1536, 1540,
1544, 1549, 1553, 1558, 1562])
expected_y = np.zeros(len(expected_x))
for i in range(3):
l = ax.lines[i]
self.assertEqual(PeriodIndex(l.get_xdata()).freq, idxh.freq)
self.assert_numpy_array_equal(
l.get_xdata(orig=False), expected_x)
# check stacked values are correct
expected_y += low[i].values
self.assert_numpy_array_equal(
l.get_ydata(orig=False), expected_y)
# check high dataframe result
expected_x = idxh.to_period().asi8
expected_y = np.zeros(len(expected_x))
for i in range(3):
l = ax.lines[3 + i]
self.assertEqual(PeriodIndex(
data=l.get_xdata()).freq, idxh.freq)
self.assert_numpy_array_equal(
l.get_xdata(orig=False), expected_x)
expected_y += high[i].values
self.assert_numpy_array_equal(
l.get_ydata(orig=False), expected_y)
# high to low
for kind1, kind2 in [('line', 'area'), ('area', 'line')]:
ax = high.plot(kind=kind1, stacked=True)
ax = low.plot(kind=kind2, stacked=True, ax=ax)
# check high dataframe result
expected_x = idxh.to_period().asi8
expected_y = np.zeros(len(expected_x))
for i in range(3):
l = ax.lines[i]
self.assertEqual(PeriodIndex(
data=l.get_xdata()).freq, idxh.freq)
self.assert_numpy_array_equal(
l.get_xdata(orig=False), expected_x)
expected_y += high[i].values
self.assert_numpy_array_equal(
l.get_ydata(orig=False), expected_y)
# check low dataframe result
expected_x = np.array([1514, 1519, 1523, 1527, 1531, 1536, 1540,
1544, 1549, 1553, 1558, 1562])
expected_y = np.zeros(len(expected_x))
for i in range(3):
l = ax.lines[3 + i]
self.assertEqual(PeriodIndex(
data=l.get_xdata()).freq, idxh.freq)
self.assert_numpy_array_equal(
l.get_xdata(orig=False), expected_x)
expected_y += low[i].values
self.assert_numpy_array_equal(
l.get_ydata(orig=False), expected_y)
@slow
def test_mixed_freq_second_millisecond(self):
# GH 7772, GH 7760
idxh = date_range('2014-07-01 09:00', freq='S', periods=50)
idxl = date_range('2014-07-01 09:00', freq='100L', periods=500)
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
# high to low
high.plot()
ax = low.plot()
self.assertEqual(len(ax.get_lines()), 2)
for l in ax.get_lines():
self.assertEqual(PeriodIndex(data=l.get_xdata()).freq, 'L')
tm.close()
# low to high
low.plot()
ax = high.plot()
self.assertEqual(len(ax.get_lines()), 2)
for l in ax.get_lines():
self.assertEqual(PeriodIndex(data=l.get_xdata()).freq, 'L')
@slow
def test_irreg_dtypes(self):
# date
idx = [date(2000, 1, 1), date(2000, 1, 5), date(2000, 1, 20)]
df = DataFrame(np.random.randn(len(idx), 3), Index(idx, dtype=object))
_check_plot_works(df.plot)
# np.datetime64
idx = date_range('1/1/2000', periods=10)
idx = idx[[0, 2, 5, 9]].asobject
df = DataFrame(np.random.randn(len(idx), 3), idx)
_check_plot_works(df.plot)
@slow
def test_time(self):
t = datetime(1, 1, 1, 3, 30, 0)
deltas = np.random.randint(1, 20, 3).cumsum()
ts = np.array([(t + timedelta(minutes=int(x))).time() for x in deltas])
df = DataFrame({'a': np.random.randn(len(ts)),
'b': np.random.randn(len(ts))},
index=ts)
ax = df.plot()
# verify tick labels
ticks = ax.get_xticks()
labels = ax.get_xticklabels()
for t, l in zip(ticks, labels):
m, s = divmod(int(t), 60)
h, m = divmod(m, 60)
xp = l.get_text()
if len(xp) > 0:
rs = time(h, m, s).strftime('%H:%M:%S')
self.assertEqual(xp, rs)
# change xlim
ax.set_xlim('1:30', '5:00')
# check tick labels again
ticks = ax.get_xticks()
labels = ax.get_xticklabels()
for t, l in zip(ticks, labels):
m, s = divmod(int(t), 60)
h, m = divmod(m, 60)
xp = l.get_text()
if len(xp) > 0:
rs = time(h, m, s).strftime('%H:%M:%S')
self.assertEqual(xp, rs)
@slow
def test_time_musec(self):
t = datetime(1, 1, 1, 3, 30, 0)
deltas = np.random.randint(1, 20, 3).cumsum()
ts = np.array([(t + timedelta(microseconds=int(x))).time()
for x in deltas])
df = DataFrame({'a': np.random.randn(len(ts)),
'b': np.random.randn(len(ts))},
index=ts)
ax = df.plot()
# verify tick labels
ticks = ax.get_xticks()
labels = ax.get_xticklabels()
for t, l in zip(ticks, labels):
m, s = divmod(int(t), 60)
# TODO: unused?
# us = int((t - int(t)) * 1e6)
h, m = divmod(m, 60)
xp = l.get_text()
if len(xp) > 0:
rs = time(h, m, s).strftime('%H:%M:%S.%f')
self.assertEqual(xp, rs)
@slow
def test_secondary_upsample(self):
idxh = date_range('1/1/1999', periods=365, freq='D')
idxl = date_range('1/1/1999', periods=12, freq='M')
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
low.plot()
ax = high.plot(secondary_y=True)
for l in ax.get_lines():
self.assertEqual(PeriodIndex(l.get_xdata()).freq, 'D')
self.assertTrue(hasattr(ax, 'left_ax'))
self.assertFalse(hasattr(ax, 'right_ax'))
for l in ax.left_ax.get_lines():
self.assertEqual(PeriodIndex(l.get_xdata()).freq, 'D')
@slow
def test_secondary_legend(self):
import matplotlib.pyplot as plt
fig = plt.gcf()
plt.clf()
ax = fig.add_subplot(211)
# ts
df = tm.makeTimeDataFrame()
ax = df.plot(secondary_y=['A', 'B'])
leg = ax.get_legend()
self.assertEqual(len(leg.get_lines()), 4)
self.assertEqual(leg.get_texts()[0].get_text(), 'A (right)')
self.assertEqual(leg.get_texts()[1].get_text(), 'B (right)')
self.assertEqual(leg.get_texts()[2].get_text(), 'C')
self.assertEqual(leg.get_texts()[3].get_text(), 'D')
self.assertIsNone(ax.right_ax.get_legend())
colors = set()
for line in leg.get_lines():
colors.add(line.get_color())
# TODO: color cycle problems
self.assertEqual(len(colors), 4)
plt.clf()
ax = fig.add_subplot(211)
ax = df.plot(secondary_y=['A', 'C'], mark_right=False)
leg = ax.get_legend()
self.assertEqual(len(leg.get_lines()), 4)
self.assertEqual(leg.get_texts()[0].get_text(), 'A')
self.assertEqual(leg.get_texts()[1].get_text(), 'B')
self.assertEqual(leg.get_texts()[2].get_text(), 'C')
self.assertEqual(leg.get_texts()[3].get_text(), 'D')
plt.clf()
ax = df.plot(kind='bar', secondary_y=['A'])
leg = ax.get_legend()
self.assertEqual(leg.get_texts()[0].get_text(), 'A (right)')
self.assertEqual(leg.get_texts()[1].get_text(), 'B')
plt.clf()
ax = df.plot(kind='bar', secondary_y=['A'], mark_right=False)
leg = ax.get_legend()
self.assertEqual(leg.get_texts()[0].get_text(), 'A')
self.assertEqual(leg.get_texts()[1].get_text(), 'B')
plt.clf()
ax = fig.add_subplot(211)
df = tm.makeTimeDataFrame()
ax = df.plot(secondary_y=['C', 'D'])
leg = ax.get_legend()
self.assertEqual(len(leg.get_lines()), 4)
self.assertIsNone(ax.right_ax.get_legend())
colors = set()
for line in leg.get_lines():
colors.add(line.get_color())
# TODO: color cycle problems
self.assertEqual(len(colors), 4)
# non-ts
df = tm.makeDataFrame()
plt.clf()
ax = fig.add_subplot(211)
ax = df.plot(secondary_y=['A', 'B'])
leg = ax.get_legend()
self.assertEqual(len(leg.get_lines()), 4)
self.assertIsNone(ax.right_ax.get_legend())
colors = set()
for line in leg.get_lines():
colors.add(line.get_color())
# TODO: color cycle problems
self.assertEqual(len(colors), 4)
plt.clf()
ax = fig.add_subplot(211)
ax = df.plot(secondary_y=['C', 'D'])
leg = ax.get_legend()
self.assertEqual(len(leg.get_lines()), 4)
self.assertIsNone(ax.right_ax.get_legend())
colors = set()
for line in leg.get_lines():
colors.add(line.get_color())
# TODO: color cycle problems
self.assertEqual(len(colors), 4)
def test_format_date_axis(self):
rng = date_range('1/1/2012', periods=12, freq='M')
df = DataFrame(np.random.randn(len(rng), 3), rng)
ax = df.plot()
xaxis = ax.get_xaxis()
for l in xaxis.get_ticklabels():
if len(l.get_text()) > 0:
self.assertEqual(l.get_rotation(), 30)
@slow
def test_ax_plot(self):
import matplotlib.pyplot as plt
x = DatetimeIndex(start='2012-01-02', periods=10, freq='D')
y = lrange(len(x))
fig = plt.figure()
ax = fig.add_subplot(111)
lines = ax.plot(x, y, label='Y')
tm.assert_numpy_array_equal(DatetimeIndex(lines[0].get_xdata()), x)
@slow
def test_mpl_nopandas(self):
import matplotlib.pyplot as plt
dates = [date(2008, 12, 31), date(2009, 1, 31)]
values1 = np.arange(10.0, 11.0, 0.5)
values2 = np.arange(11.0, 12.0, 0.5)
kw = dict(fmt='-', lw=4)
plt.close('all')
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot_date([x.toordinal() for x in dates], values1, **kw)
ax.plot_date([x.toordinal() for x in dates], values2, **kw)
line1, line2 = ax.get_lines()
tm.assert_numpy_array_equal(np.array([x.toordinal() for x in dates]),
line1.get_xydata()[:, 0])
tm.assert_numpy_array_equal(np.array([x.toordinal() for x in dates]),
line2.get_xydata()[:, 0])
@slow
def test_irregular_ts_shared_ax_xlim(self):
# GH 2960
ts = tm.makeTimeSeries()[:20]
ts_irregular = ts[[1, 4, 5, 6, 8, 9, 10, 12, 13, 14, 15, 17, 18]]
# plot the left section of the irregular series, then the right section
ax = ts_irregular[:5].plot()
ts_irregular[5:].plot(ax=ax)
# check that axis limits are correct
left, right = ax.get_xlim()
self.assertEqual(left, ts_irregular.index.min().toordinal())
self.assertEqual(right, ts_irregular.index.max().toordinal())
@slow
def test_secondary_y_non_ts_xlim(self):
# GH 3490 - non-timeseries with secondary y
index_1 = [1, 2, 3, 4]
index_2 = [5, 6, 7, 8]
s1 = Series(1, index=index_1)
s2 = Series(2, index=index_2)
ax = s1.plot()
left_before, right_before = ax.get_xlim()
s2.plot(secondary_y=True, ax=ax)
left_after, right_after = ax.get_xlim()
self.assertEqual(left_before, left_after)
self.assertTrue(right_before < right_after)
@slow
def test_secondary_y_regular_ts_xlim(self):
# GH 3490 - regular-timeseries with secondary y
index_1 = date_range(start='2000-01-01', periods=4, freq='D')
index_2 = date_range(start='2000-01-05', periods=4, freq='D')
s1 = Series(1, index=index_1)
s2 = Series(2, index=index_2)
ax = s1.plot()
left_before, right_before = ax.get_xlim()
s2.plot(secondary_y=True, ax=ax)
left_after, right_after = ax.get_xlim()
self.assertEqual(left_before, left_after)
self.assertTrue(right_before < right_after)
@slow
def test_secondary_y_mixed_freq_ts_xlim(self):
# GH 3490 - mixed frequency timeseries with secondary y
rng = date_range('2000-01-01', periods=10000, freq='min')
ts = Series(1, index=rng)
ax = ts.plot()
left_before, right_before = ax.get_xlim()
ts.resample('D').plot(secondary_y=True, ax=ax)
left_after, right_after = ax.get_xlim()
# a downsample should not have changed either limit
self.assertEqual(left_before, left_after)
self.assertEqual(right_before, right_after)
@slow
def test_secondary_y_irregular_ts_xlim(self):
# GH 3490 - irregular-timeseries with secondary y
ts = tm.makeTimeSeries()[:20]
ts_irregular = ts[[1, 4, 5, 6, 8, 9, 10, 12, 13, 14, 15, 17, 18]]
ax = ts_irregular[:5].plot()
# plot higher-x values on secondary axis
ts_irregular[5:].plot(secondary_y=True, ax=ax)
# ensure secondary limits aren't overwritten by plot on primary
ts_irregular[:5].plot(ax=ax)
left, right = ax.get_xlim()
self.assertEqual(left, ts_irregular.index.min().toordinal())
self.assertEqual(right, ts_irregular.index.max().toordinal())
def _check_plot_works(f, freq=None, series=None, *args, **kwargs):
import matplotlib.pyplot as plt
fig = plt.gcf()
try:
plt.clf()
ax = fig.add_subplot(211)
orig_ax = kwargs.pop('ax', plt.gca())
orig_axfreq = getattr(orig_ax, 'freq', None)
ret = f(*args, **kwargs)
assert ret is not None # do something more intelligent
ax = kwargs.pop('ax', plt.gca())
if series is not None:
dfreq = series.index.freq
if isinstance(dfreq, DateOffset):
dfreq = dfreq.rule_code
if orig_axfreq is None:
assert ax.freq == dfreq
if freq is not None and orig_axfreq is None:
assert ax.freq == freq
ax = fig.add_subplot(212)
try:
kwargs['ax'] = ax
ret = f(*args, **kwargs)
assert ret is not None # do something more intelligent
except Exception:
pass
with ensure_clean(return_filelike=True) as path:
plt.savefig(path)
finally:
plt.close(fig)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| 36.086124
| 79
| 0.563997
|
4a06a619dc6b0a3492b294be5eba310a52281c13
| 5,124
|
py
|
Python
|
feeder/ntu_feeder.py
|
zjl863761131/CrosSCLR
|
792b70c76902a5e7ca5696f5a032f14bb04a255a
|
[
"BSD-2-Clause"
] | 35
|
2021-04-20T03:30:20.000Z
|
2022-03-30T02:45:04.000Z
|
feeder/ntu_feeder.py
|
zjl863761131/CrosSCLR
|
792b70c76902a5e7ca5696f5a032f14bb04a255a
|
[
"BSD-2-Clause"
] | 1
|
2022-03-25T12:32:47.000Z
|
2022-03-25T12:32:47.000Z
|
feeder/ntu_feeder.py
|
zjl863761131/CrosSCLR
|
792b70c76902a5e7ca5696f5a032f14bb04a255a
|
[
"BSD-2-Clause"
] | 16
|
2021-04-22T14:38:05.000Z
|
2022-02-22T09:18:52.000Z
|
import numpy as np
import pickle, torch
from . import tools
class Feeder_single(torch.utils.data.Dataset):
""" Feeder for single inputs """
def __init__(self, data_path, label_path, shear_amplitude=0.5, temperal_padding_ratio=6, mmap=True):
self.data_path = data_path
self.label_path = label_path
self.shear_amplitude = shear_amplitude
self.temperal_padding_ratio = temperal_padding_ratio
self.load_data(mmap)
def load_data(self, mmap):
# load label
with open(self.label_path, 'rb') as f:
self.sample_name, self.label = pickle.load(f)
# load data
if mmap:
self.data = np.load(self.data_path, mmap_mode='r')
else:
self.data = np.load(self.data_path)
def __len__(self):
return len(self.label)
def __getitem__(self, index):
# get data
data_numpy = np.array(self.data[index])
label = self.label[index]
# processing
data = self._aug(data_numpy)
return data, label
def _aug(self, data_numpy):
if self.temperal_padding_ratio > 0:
data_numpy = tools.temperal_crop(data_numpy, self.temperal_padding_ratio)
if self.shear_amplitude > 0:
data_numpy = tools.shear(data_numpy, self.shear_amplitude)
return data_numpy
class Feeder_dual(torch.utils.data.Dataset):
""" Feeder for dual inputs """
def __init__(self, data_path, label_path, shear_amplitude=0.5, temperal_padding_ratio=6, mmap=True):
self.data_path = data_path
self.label_path = label_path
self.shear_amplitude = shear_amplitude
self.temperal_padding_ratio = temperal_padding_ratio
self.load_data(mmap)
def load_data(self, mmap):
# load label
with open(self.label_path, 'rb') as f:
self.sample_name, self.label = pickle.load(f)
# load data
if mmap:
self.data = np.load(self.data_path, mmap_mode='r')
else:
self.data = np.load(self.data_path)
def __len__(self):
return len(self.label)
def __getitem__(self, index):
# get data
data_numpy = np.array(self.data[index])
label = self.label[index]
# processing
data1 = self._aug(data_numpy)
data2 = self._aug(data_numpy)
return [data1, data2], label
def _aug(self, data_numpy):
if self.temperal_padding_ratio > 0:
data_numpy = tools.temperal_crop(data_numpy, self.temperal_padding_ratio)
if self.shear_amplitude > 0:
data_numpy = tools.shear(data_numpy, self.shear_amplitude)
return data_numpy
# class Feeder_semi(torch.utils.data.Dataset):
# """ Feeder for semi-supervised learning """
# def __init__(self, data_path, label_path, shear_amplitude=0.5, temperal_padding_ratio=6, mmap=True, label_list=None):
# self.data_path = data_path
# self.label_path = label_path
# self.shear_amplitude = shear_amplitude
# self.temperal_padding_ratio = temperal_padding_ratio
# self.label_list = label_list
# self.load_data(mmap)
# self.load_semi_data()
# def load_data(self, mmap):
# # load label
# with open(self.label_path, 'rb') as f:
# self.sample_name, self.label = pickle.load(f)
# # load data
# if mmap:
# self.data = np.load(self.data_path, mmap_mode='r')
# else:
# self.data = np.load(self.data_path)
# def load_semi_data(self):
# data_length = len(self.label)
# if not self.label_list:
# self.label_list = list(range(data_length))
# else:
# self.label_list = np.load(self.label_list).tolist()
# self.label_list.sort()
# self.unlabel_list = list(range(data_length))
# def __len__(self):
# return len(self.unlabel_list)
# def __getitem__(self, index):
# # get data
# data_numpy = np.array(self.data[index])
# label = self.label[index]
# # processing
# data = self._aug(data_numpy)
# return data, label
# def __getitem__(self, index):
# label_index = self.label_list[index % len(self.label_list)]
# unlabel_index = self.unlabel_list[index]
# # get data
# label_data_numpy = np.array(self.data[label_index])
# unlabel_data_numpy = np.array(self.data[unlabel_index])
# label = self.label[label_index]
# # processing
# data1 = self._aug(unlabel_data_numpy)
# data2 = self._aug(unlabel_data_numpy)
# return [data1, data2], label_data_numpy, label
# def _aug(self, data_numpy):
# if self.temperal_padding_ratio > 0:
# data_numpy = tools.temperal_crop(data_numpy, self.temperal_padding_ratio)
# if self.shear_amplitude > 0:
# data_numpy = tools.shear(data_numpy, self.shear_amplitude)
# return data_numpy
| 30.86747
| 123
| 0.610656
|
4a06a669d244b78c600d7c7ad63005e2bc8fc598
| 1,810
|
py
|
Python
|
server/opendp_apps/dataverses/testing/test_serializers.py
|
opendifferentialprivacy/opendp-ux
|
2669602d0a65f6a83d9e9916cbf753c38fd64c94
|
[
"MIT"
] | null | null | null |
server/opendp_apps/dataverses/testing/test_serializers.py
|
opendifferentialprivacy/opendp-ux
|
2669602d0a65f6a83d9e9916cbf753c38fd64c94
|
[
"MIT"
] | 82
|
2020-08-06T17:11:12.000Z
|
2021-02-07T21:01:05.000Z
|
server/opendp_apps/dataverses/testing/test_serializers.py
|
opendifferentialprivacy/opendp-ux
|
2669602d0a65f6a83d9e9916cbf753c38fd64c94
|
[
"MIT"
] | 2
|
2020-10-16T22:03:24.000Z
|
2020-11-15T22:45:19.000Z
|
from django.test import TestCase
from django.contrib.auth import get_user_model
from opendp_apps.dataverses.models import DataverseHandoff
from opendp_apps.dataverses.serializers import DataverseUserSerializer
from opendp_apps.user.models import DataverseUser
class TestDataverseUserSerializer(TestCase):
fixtures = ['test_dataverses_01.json',
'test_manifest_params_04.json',
'test_opendp_users_01.json']
def setUp(self):
self.user_obj, _created = get_user_model().objects.get_or_create(username='dv_depositor')
def test_create(self):
serializer = DataverseUserSerializer(data={
'object_id': '8d24e213-0da3-46cf-ba5c-9f1df5cec53d',
'dv_installation': '58fd79dc-8541-4aa1-a7c2-85a5b443efa1',
'user': self.user_obj.object_id,
'dv_handoff': "9e7e5506-dd1a-4979-a2c1-ec6e59e4769c",
'persistent_id': 1,
'email': 'test@test.com',
'first_name': 'test',
'last_name': 'test',
'dv_general_token': 1234,
'dv_sensitive_token': 1234,
'dv_token_update': None
})
self.assertEqual(serializer.is_valid(), True)
dataverse_user = serializer.save()
# Ensure token from DataverseHandoff makes it onto the new DataverseUser
self.assertEquals(dataverse_user.dv_general_token, DataverseHandoff.objects.first().apiGeneralToken)
def test_update(self):
dataverse_user = DataverseUser.objects.first()
original_updated = dataverse_user.updated
serializer = DataverseUserSerializer()
updated_instance = serializer.update(dataverse_user, validated_data={'user': dataverse_user.user.object_id})
self.assertNotEqual(original_updated, updated_instance.updated)
| 42.093023
| 116
| 0.693923
|
4a06a6abd08cf181c674e590601b342f514a7f0e
| 1,168
|
py
|
Python
|
dd_crawler/commands/login.py
|
TeamHG-Memex/domain-discovery-crawler
|
171f16a0b18d30e23ae6793b011dcfbad8299240
|
[
"MIT"
] | 16
|
2017-11-14T10:11:32.000Z
|
2021-08-07T16:05:14.000Z
|
dd_crawler/commands/login.py
|
TeamHG-Memex/domain-discovery-crawler
|
171f16a0b18d30e23ae6793b011dcfbad8299240
|
[
"MIT"
] | null | null | null |
dd_crawler/commands/login.py
|
TeamHG-Memex/domain-discovery-crawler
|
171f16a0b18d30e23ae6793b011dcfbad8299240
|
[
"MIT"
] | 9
|
2018-06-14T18:37:22.000Z
|
2021-06-02T02:46:26.000Z
|
from scrapy import Request
from scrapy.commands import ScrapyCommand
from scrapy.exceptions import UsageError
from scrapy_redis.scheduler import Scheduler
def add_login(spider, url, login, password, queue=None):
print('Adding login url: {}'.format(url))
if queue is None:
queue = spider.queue
queue.add_login_credentials(url, login, password)
# push some known url from this domain to make sure we re-crawl it
# while logged-in
queue.push(Request(url=url, priority=spider.initial_priority))
class Command(ScrapyCommand):
requires_project = True
def syntax(self):
return '<spider> <url> <login> <password>'
def short_desc(self):
return 'Specify login credentials at given url'
def run(self, args, opts):
if len(args) != 4:
raise UsageError()
spider_name, url, login, password = args
crawler = self.crawler_process.create_crawler(spider_name)
scheduler = Scheduler.from_settings(self.settings)
spider = crawler.spidercls.from_crawler(crawler)
scheduler.open(spider)
add_login(spider, url, login, password, queue=scheduler.queue)
| 31.567568
| 70
| 0.696918
|
4a06a74360ac66da1d23948864c6c56c86a35f5f
| 6,234
|
py
|
Python
|
scripts/deploy.py
|
rkassa/viz
|
1005877f510bf3fcd571846f0f7cdc69cda7f982
|
[
"MIT"
] | null | null | null |
scripts/deploy.py
|
rkassa/viz
|
1005877f510bf3fcd571846f0f7cdc69cda7f982
|
[
"MIT"
] | null | null | null |
scripts/deploy.py
|
rkassa/viz
|
1005877f510bf3fcd571846f0f7cdc69cda7f982
|
[
"MIT"
] | null | null | null |
"""
Makes it easy and painless to deploy the site and make all necessary changes
so that it's immediately ready to serve in production.
"""
import glob
import json
import os
import shlex
import subprocess
import sys
from colorama import Fore, Style
import data_util
import js_compilation
# Files and directories that should be deployed. Everything else will be ignored.
INCLUDE_LIST = [
"index.html",
"c",
"js/bundle.js",
"css/styles.css",
"img/*",
"fonts/*",
]
HTML_FILES = [
"country.html",
"index.html",
]
with open("config.json") as f:
CONFIG = json.loads(f.read())
f.close()
MAPBOX_PROD_API_TOKEN = "pk.eyJ1IjoiaGVhbHRobWFwIiwiYSI6ImNrOGl1NGNldTAyYXYzZnBqcnBmN3RjanAifQ.H377pe4LPPcymeZkUBiBtg"
# Returns True if everything we need is here, False otherwise.
def check_dependencies():
try:
subprocess.check_call(shlex.split("sass --version"),
stdout=subprocess.DEVNULL)
except (subprocess.CalledProcessError, OSError):
print("Please install 'sass' first.")
return False
# If the Closure compiler isn't available, let's get that setup.
if not os.path.exists("tools/closure-compiler.jar"):
print("The Closure compiler isn't available, fetching it. "
"This will only happen once.")
if not os.path.exists("tools"):
os.mkdir("tools")
os.system("curl \"https://repo1.maven.org/maven2/com/google/javascript/"
"closure-compiler/v20200830/closure-compiler-v20200830.jar"
"\" > tools/closure-compiler.jar")
return True
def insert_analytics_code(quiet=False):
main_page = ""
with open("analytics.js") as f:
code = f.read()
f.close()
inserted = False
with open("index.html") as f:
for line in f:
if not inserted and "<script" in line:
main_page += code
inserted = True
main_page += line
f.close()
# Remove the file and write a modified version
os.system("rm index.html")
with open("index.html", "w") as f:
f.write(main_page)
f.close()
def link_to_compiled_js_in_html(html_file):
# Now link to the compiled code in the HTML file
html = ""
scripting_time = False
with open(html_file) as f:
for line in f:
if line.strip() == "<!-- /js -->":
scripting_time = False
html += '<script src="/js/bundle.js"></script>\n'
elif scripting_time:
continue
elif line.strip() == "<!-- js -->":
scripting_time = True
else:
html += line
f.close()
# Remove the file and write a modified version
os.system("rm " + html_file)
with open(html_file, "w") as f:
f.write(html)
f.close()
def use_compiled_js(quiet=False):
js_compilation.compile_js(quiet)
for h in HTML_FILES:
link_to_compiled_js_in_html(h)
# Returns whether the operation was a success.
def backup_pristine_files():
success = True
for h in HTML_FILES:
success &= os.system("cp " + h + " " + h + ".orig") == 0
return success
# Returns whether the operation was a success.
def restore_pristine_files():
success = True
for h in HTML_FILES:
success &= os.system("mv " + h + ".orig " + h) == 0
return success
def copy_contents(target_path, quiet=False):
success = True
if not quiet:
print("Copying new version into '" + target_path + "'...")
# TODO: Use 'rsync' if it's available.
success &= (os.system("rm -rf " + target_path + "/*") == 0)
to_copy = []
for f in INCLUDE_LIST:
if "/" in f:
parents = f.split("/")[:-1]
for p in parents:
if not os.path.exists(os.path.join(target_path, p)):
os.mkdir(os.path.join(target_path, p))
if "*" not in f:
to_copy.append([f, os.path.join(target_path, f)])
else:
to_copy += [[p, os.path.join(target_path, p)] for p in glob.glob(f)]
for pair in to_copy:
cmd = "cp -a " + pair[0] + " " + pair[1]
success &= (os.system(cmd) == 0)
return success
def replace_string_in_dest_file(to_replace, replacement,
target_path, relative_path):
full_path = os.path.join(target_path, relative_path)
with open(full_path) as f:
contents = f.read()
f.close()
# TODO: Should probably use a regexp.
while to_replace in contents:
contents = contents.replace(to_replace, replacement)
with open(full_path, "w") as f:
f.write(contents)
f.close()
return True
def deploy(disease_id, target_path, quiet=False):
if not check_dependencies():
sys.exit(1)
success = True
success &= backup_pristine_files()
success &= (os.system("sass css/styles.scss css/styles.css") == 0)
use_compiled_js(quiet=quiet)
insert_analytics_code(quiet=quiet)
success &= data_util.make_country_pages()
success &= copy_contents(target_path, quiet=quiet)
success &= restore_pristine_files()
success &= replace_string_in_dest_file(
"{{DATA_SRC_URL}}",
CONFIG[disease_id]["data_src_url"],
target_path, "js/bundle.js")
success &= replace_string_in_dest_file(
"{{TITLE}}", CONFIG[disease_id]["name"],
target_path, "js/bundle.js")
success &= replace_string_in_dest_file(
"{{MAPBOX_API_TOKEN}}",
MAPBOX_PROD_API_TOKEN, target_path, "js/bundle.js")
other_diseases = []
for did in CONFIG[disease_id]["linkto"]:
other_diseases.append("|".join([
did, CONFIG[did]["name"], CONFIG[did]["url"]]))
success &= replace_string_in_dest_file(
"{{OTHER_DISEASES}}",
",".join(other_diseases), target_path, "js/bundle.js")
if success:
if not quiet:
print(Fore.GREEN + "All done. " + Style.RESET_ALL + ""
"You can test it out with: "
"cd " + target_path + " && python3 -m http.server")
else:
print(Fore.RED + "Something went wrong." + Style.RESET_ALL)
| 30.262136
| 118
| 0.601059
|
4a06a74bfa1782dd989a3ba4c4bdcaf813c89926
| 4,078
|
py
|
Python
|
api/serializers/couriers.py
|
Shubarin/candy_delivery_api
|
9bbf15621f9d5837a96cc1868260e47048c5b268
|
[
"BSD-3-Clause"
] | null | null | null |
api/serializers/couriers.py
|
Shubarin/candy_delivery_api
|
9bbf15621f9d5837a96cc1868260e47048c5b268
|
[
"BSD-3-Clause"
] | null | null | null |
api/serializers/couriers.py
|
Shubarin/candy_delivery_api
|
9bbf15621f9d5837a96cc1868260e47048c5b268
|
[
"BSD-3-Clause"
] | null | null | null |
import datetime
from collections import defaultdict
from api.models.couriers import Courier
from rest_framework import serializers
from rest_framework.exceptions import ValidationError
class CourierSerializer(serializers.ModelSerializer):
courier_id = serializers.IntegerField()
class Meta:
fields = ('courier_id', 'courier_type', 'regions', 'working_hours')
model = Courier
def to_internal_value(self, data):
extra_field_in_request = any(
[field not in self.fields for field in data])
if extra_field_in_request:
raise ValidationError(
{"validation_error": 'extra fields in request'})
if len(data) == 0:
raise ValidationError('empty request')
return super(CourierSerializer, self).to_internal_value(data)
def update(self, instance, validated_data):
# Проверяем, что смена региона не помешает доставить заказ
instance.check_change_regions(validated_data.get('regions'))
# Проверяем, что смена рабочего времени не помешает доставить заказ
instance.check_change_working_hours(
validated_data.get('working_hours'))
# Проверяем, что смена типа курьера не помешает доставить заказ
instance.check_change_courier_type(validated_data.get('courier_type'))
assign = instance.assign.filter(is_complete=False).first()
if assign and assign.can_close():
assign.is_complete = True
assign.save()
return super(CourierSerializer, self).update(instance, validated_data)
@classmethod
def validate_courier_id(self, courier_id):
courier = Courier.objects.filter(pk=courier_id).first()
if courier:
raise ValidationError('invalid value courier_id: '
f'({courier_id}) id already exists')
return courier_id
@staticmethod
def validate_courier_type(courier_type):
if courier_type not in ['foot', 'bike', 'car']:
raise ValidationError('invalid value courier_type')
return courier_type
@staticmethod
def validate_regions(regions):
try:
for num in regions:
if int(num) < 1:
raise ValueError('invalid values in regions list')
return regions
except ValueError as e:
raise ValidationError(e)
@staticmethod
def validate_working_hours(working_hours):
try:
for period in working_hours:
# Проверяем что конец позже начала, т.к. рабочие часы
# формируются максимум на одни сутки
start, end = period.split('-')
start = datetime.datetime.strptime(start, "%H:%M")
end = datetime.datetime.strptime(end, "%H:%M")
interval = end - start
if interval.days >= 1 or interval.days < 0:
raise ValueError
return working_hours
except ValueError:
raise ValidationError('invalid values in working_hours list')
class CourierListSerializer(serializers.Serializer):
data = CourierSerializer(required=False, many=True, write_only=True)
def create(self, validated_data):
data = validated_data.get('data')
if not data:
raise ValidationError({'validation_error': 'empty request'})
# проверяем что id в запросе уникальны
couriers_ids = [item.get('courier_id') for item in data]
if len(couriers_ids) != len(set(couriers_ids)):
ids = defaultdict(int)
for id in couriers_ids:
ids[id] += 1
failed_ids = [{'id': item} for item in ids if ids[item] != 1]
raise ValidationError(
{'validation_error': {'couriers': failed_ids}})
couriers = [Courier(**item) for item in data]
return Courier.objects.bulk_create(couriers)
def to_representation(self, instance):
data = {'couriers': [{'id': courier.pk} for courier in instance]}
return data
| 39.980392
| 78
| 0.633889
|
4a06a793bbbfb18169557a597d4d7cf8602ae578
| 2,909
|
py
|
Python
|
flask/appWebServer.py
|
mobalk/raspi-weathercam
|
c97fb6b979a6362211fc13def283e4a3bbc13213
|
[
"MIT"
] | null | null | null |
flask/appWebServer.py
|
mobalk/raspi-weathercam
|
c97fb6b979a6362211fc13def283e4a3bbc13213
|
[
"MIT"
] | null | null | null |
flask/appWebServer.py
|
mobalk/raspi-weathercam
|
c97fb6b979a6362211fc13def283e4a3bbc13213
|
[
"MIT"
] | null | null | null |
from flask import Flask, render_template, send_from_directory
import pandas as pd
import sqlite3
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from datetime import timedelta
import os
import sys
sys.path.insert(0,'..')
import config
conf = config.init('../config.ini')
config.read(conf)
dbPath = conf.get('app', 'PathToDatabase')
def storeTodayChart():
title = ''
conn = sqlite3.connect(dbPath)
with conn:
table = pd.read_sql_query("""select datetime(timestamp, 'localtime') as ts, temp, hum
from DHT_data where ts >= date('now', 'localtime')""",
conn, parse_dates=['ts'])
if not table.empty:
print(table)
table.plot(x='ts', subplots=True, grid=True, xlabel='')
plt.gca().xaxis.set_major_locator(mdates.HourLocator(interval = 4))
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%H:%M'))
plt.gca().xaxis.set_minor_locator(mdates.HourLocator(interval = 1))
startDate = table['ts'].iloc[0].date()
endDate = startDate + timedelta(days=1)
plt.xlim([startDate, endDate])
title = "chart.png"
#title = strftime("%Y%m%d-%H%M%S.png")
plt.savefig('static/' + title)
return title
def secToDelta(sec):
if sec < 60:
return " (1 perce)"
elif sec < 60 * 60:
return " (" + str(int(sec / 60)) + " perce)"
elif sec < 24 * 60 * 60:
return " (" + str(int(sec / (60 * 60))) + " órája)"
else:
return " (" + str(int(sec / (24 * 60 * 60))) + " napja)"
def getLast():
conn = sqlite3.connect(dbPath)
with conn:
curs=conn.cursor()
for row in curs.execute("""SELECT datetime(timestamp, 'localtime'),
strftime('%s', 'now') - strftime('%s', timestamp),
temp, hum
FROM DHT_data ORDER BY timestamp DESC LIMIT 1"""):
# format time: cut seconds and format date separator
time = str(row[0])[:-3].replace('-', '.').replace(' ', ', ')
delta = secToDelta(row[1])
temp = row[2]
hum = row[3]
return time + delta, temp, hum
app = Flask(__name__)
@app.route('/')
def index():
chart = storeTodayChart()
time, temp, hum = getLast()
templateData = {
'time' : time,
'temp' : temp,
'hum' : hum,
'chart' : chart,
'iframe' : conf.get('flask', 'usercontent', fallback="")
}
return render_template('index.html', **templateData)
@app.route('/favicon.ico')
def favicon():
return send_from_directory(os.path.join(app.root_path, 'static'),
'favicon.png', mimetype='image/png')
if __name__ == '__main__':
app.run(debug=False, port=80, host='0.0.0.0')
| 34.630952
| 93
| 0.551048
|
4a06a928f3545afb7be64442b5c15885818f54a2
| 2,200
|
py
|
Python
|
stac_fastapi/pgstac/stac_fastapi/pgstac/transactions.py
|
borism/stac-fastapi
|
81015a153c1d9f36d8e12f17a1bf67370396f472
|
[
"MIT"
] | 64
|
2021-03-27T19:34:29.000Z
|
2022-03-31T07:58:58.000Z
|
stac_fastapi/pgstac/stac_fastapi/pgstac/transactions.py
|
borism/stac-fastapi
|
81015a153c1d9f36d8e12f17a1bf67370396f472
|
[
"MIT"
] | 218
|
2021-03-27T19:51:54.000Z
|
2022-03-28T12:41:56.000Z
|
stac_fastapi/pgstac/stac_fastapi/pgstac/transactions.py
|
borism/stac-fastapi
|
81015a153c1d9f36d8e12f17a1bf67370396f472
|
[
"MIT"
] | 44
|
2021-04-05T12:06:25.000Z
|
2022-03-01T12:06:29.000Z
|
"""transactions extension client."""
import logging
from typing import Dict
import attr
from stac_fastapi.pgstac.db import dbfunc
from stac_fastapi.types import stac as stac_types
from stac_fastapi.types.core import AsyncBaseTransactionsClient
logger = logging.getLogger("uvicorn")
logger.setLevel(logging.INFO)
@attr.s
class TransactionsClient(AsyncBaseTransactionsClient):
"""Transactions extension specific CRUD operations."""
async def create_item(self, item: stac_types.Item, **kwargs) -> stac_types.Item:
"""Create item."""
request = kwargs["request"]
pool = request.app.state.writepool
await dbfunc(pool, "create_item", item)
return item
async def update_item(self, item: stac_types.Item, **kwargs) -> stac_types.Item:
"""Update item."""
request = kwargs["request"]
pool = request.app.state.writepool
await dbfunc(pool, "update_item", item)
return item
async def create_collection(
self, collection: stac_types.Collection, **kwargs
) -> stac_types.Collection:
"""Create collection."""
request = kwargs["request"]
pool = request.app.state.writepool
await dbfunc(pool, "create_collection", collection)
return collection
async def update_collection(
self, collection: stac_types.Collection, **kwargs
) -> stac_types.Collection:
"""Update collection."""
request = kwargs["request"]
pool = request.app.state.writepool
await dbfunc(pool, "update_collection", collection)
return collection
async def delete_item(self, item_id: str, collection_id: str, **kwargs) -> Dict:
"""Delete collection."""
request = kwargs["request"]
pool = request.app.state.writepool
await dbfunc(pool, "delete_item", item_id)
return {"deleted item": item_id}
async def delete_collection(self, collection_id: str, **kwargs) -> Dict:
"""Delete collection."""
request = kwargs["request"]
pool = request.app.state.writepool
await dbfunc(pool, "delete_collection", collection_id)
return {"deleted collection": collection_id}
| 33.846154
| 84
| 0.668636
|
4a06aab229d4b4c835e7c1662c45f7f73717e042
| 5,611
|
py
|
Python
|
data/templates/authentication/reset_password.mako.py
|
sumukh210991/Cyberweb
|
297bd54c9e223d38818b802087055e397c403f1c
|
[
"Apache-2.0"
] | null | null | null |
data/templates/authentication/reset_password.mako.py
|
sumukh210991/Cyberweb
|
297bd54c9e223d38818b802087055e397c403f1c
|
[
"Apache-2.0"
] | null | null | null |
data/templates/authentication/reset_password.mako.py
|
sumukh210991/Cyberweb
|
297bd54c9e223d38818b802087055e397c403f1c
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding:utf-8 -*-
from mako import runtime, filters, cache
UNDEFINED = runtime.UNDEFINED
STOP_RENDERING = runtime.STOP_RENDERING
__M_dict_builtin = dict
__M_locals_builtin = locals
_magic_number = 10
_modified_time = 1465687803.470334
_enable_loop = True
_template_filename = '/home/sumukh/Documents/thesis/Cyberweb/cyberweb/cyberweb/templates/authentication/reset_password.mako'
_template_uri = '/authentication/reset_password.mako'
_source_encoding = 'utf-8'
from webhelpers.html import escape
_exports = ['headtags', 'col2main']
def _mako_get_namespace(context, name):
try:
return context.namespaces[(__name__, name)]
except KeyError:
_mako_generate_namespaces(context)
return context.namespaces[(__name__, name)]
def _mako_generate_namespaces(context):
pass
def _mako_inherit(template, context):
_mako_generate_namespaces(context)
return runtime._inherit_from(context, u'/authentication/authentication.layout.mako', _template_uri)
def render_body(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
__M_writer = context.writer()
__M_writer(u'\n\n')
__M_writer(u'\n\n')
__M_writer(u'\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_headtags(context):
__M_caller = context.caller_stack._push_frame()
try:
__M_writer = context.writer()
__M_writer(u'\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_col2main(context):
__M_caller = context.caller_stack._push_frame()
try:
c = context.get('c', UNDEFINED)
__M_writer = context.writer()
__M_writer(u'\n\n\t<script type="text/javascript">\n\t\tfunction changePassword() {\n\t\t\tvar messageCenter = document.getElementById("messageCenter");\n\t\t\tvar newpassword = document.getElementById("newpassword");\n\t\t\tvar newconfirmpassword = document.getElementById("newconfirmpassword");\n\t\t\t\n\t\t\tif(newpassword.value == newconfirmpassword.value) {\n\t\t\t\t$.post(\'/authentication/changePassword\',$(\'#prefbar\').serialize(),getResult);\n\t\t\t} else {\n\t\t\t\tmessageCenter.innerHTML = "New Password and Confirmation Password do not match.";\n\t\t\t\tmessageCenter.className = \'errorConsole\';\n\t\t\t\tsetTimeout("$(\'#messageCenter\').hide(\'slow\');",10000);\n\t\t\t}\n\t\t}\n\t\t\n\t\tfunction getResult(data) {\n\t\t\t$(\'#messageCenter\').show(\'slow\');\n\t\t\tvar messageCenter = document.getElementById("messageCenter");\n\t\t\tmyData = eval("(" + data + ")");\n\t\t\tvar isError = myData[\'Error\'];\n\t\t\tvar message = myData[\'Message\'];\n\t\t\tmessageCenter.innerHTML = message;\n\t\t\tif(isError.toUpperCase() == \'TRUE\') {\n\t\t\t\tmessageCenter.className = \'errorConsole\';\n\t\t\t} else {\n\t\t\t\tmessageCenter.className = \'messageConsole\';\n\t\t\t}\n\t\t\tsetTimeout("$(\'#messageCenter\').hide(\'slow\');",10000);\n\t\t}\n\t</script>\n\t\n <style type="text/css">\n \t.errorConsole {\n \t\tmargin: 0.5em;\n \t\tcolor: red;\n \t\tfont-weight: bold;\n \t}\n \t.messageConsole {\n \t\tmargin: 0.5em;\n \t\tcolor: green;\n \t\tfont-weight: bold;\n \t}\n .prefbutton {\n margin:0 10px 0 10px;\n display:inline;\n }\n .prefbuttons {\n width: 190px;\n margin: 0 auto;\n text-align: center;\n }\n .prefheader {\n float:left;\n width: 130px;\n text-align: right;\n color: grey;\n font-weight: bold;\n margin: 5px 0 5px 0;\n }\n .prefvalue {\n float:left;\n padding-left:15px;\n width: 323px;\n margin: 5px 0 5px 0;\n }\n .prefbar {\n background:#cccccc;\n padding-left:15px;\n margin-bottom:7px;\n }\n </style>\n\n <div style="width:500px">\n\n <div class="prefbar">Change Password for CyberWeb User: ')
__M_writer(escape(c.account['username']))
__M_writer(u'</div>\n \t<div id="messageCenter"></div>\n \t<form id="prefbar" name="prefbar" mathod="POST" action="">\n \t\t<div id="oldpasswordDiv" class="prefrow">\n\t\t\t<div class="prefheader">Old Password:</div><div class="prefvalue"><input type="password" \n id="oldpassword" name="oldpassword" value=""/></div>\n\t\t</div>\n\t\t<div id="newpasswordDiv" class="prefrow">\n\t\t\t<div class="prefheader">New Password:</div><div class="prefvalue"><input type="password" \n id="newpassword" name="newpassword" value=""/></div>\n\t\t</div>\n\t\t<div id="newconfirmpasswordDiv" class="prefrow">\n\t\t\t<div class="prefheader">Confirm Password:</div><div class="prefvalue"><input type="password" \n id="newconfirmpassword" name="newconfirmpassword" value=""/></div>\n\t\t</div>\n\t\t\n\t\t<br>\n\t\t<div class="prefbuttons">\n \t\t<div id="savebutton" class="prefbutton"><a href="#" onClick="changePassword();">Save Password</a></div>\n \t\t<div id="cancelbutton" class="prefbutton"><a href="#" onClick="document.prefbar.clear();">Cancel</a></div>\n \t\t</div>\n\t</form>\n </div>\n <br><br>\n\n </div>\n')
return ''
finally:
context.caller_stack._pop_frame()
"""
__M_BEGIN_METADATA
{"source_encoding": "utf-8", "line_map": {"64": 58, "33": 1, "34": 4, "35": 108, "41": 3, "45": 3, "51": 6, "56": 6, "57": 82, "58": 82, "28": 0}, "uri": "/authentication/reset_password.mako", "filename": "/home/sumukh/Documents/thesis/Cyberweb/cyberweb/cyberweb/templates/authentication/reset_password.mako"}
__M_END_METADATA
"""
| 81.318841
| 2,185
| 0.661736
|
4a06ab770aaa072c8858e0f527f21dcbc10bbbdd
| 8,151
|
py
|
Python
|
tensorflow/python/kernel_tests/edit_distance_op_test.py
|
abhaikollara/tensorflow
|
4f96df3659696990cb34d0ad07dc67843c4225a9
|
[
"Apache-2.0"
] | 848
|
2019-12-03T00:16:17.000Z
|
2022-03-31T22:53:17.000Z
|
tensorflow/python/kernel_tests/edit_distance_op_test.py
|
sseung0703/tensorflow
|
be084bd7a4dd241eb781fc704f57bcacc5c9b6dd
|
[
"Apache-2.0"
] | 1,056
|
2019-12-15T01:20:31.000Z
|
2022-02-10T02:06:28.000Z
|
tensorflow/python/kernel_tests/edit_distance_op_test.py
|
sseung0703/tensorflow
|
be084bd7a4dd241eb781fc704f57bcacc5c9b6dd
|
[
"Apache-2.0"
] | 506
|
2019-12-03T00:46:26.000Z
|
2022-03-30T10:34:56.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.kernels.edit_distance_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
def ConstantOf(x):
x = np.asarray(x)
# Convert to int64 if it's not a string or unicode
if x.dtype.char not in "SU":
x = np.asarray(x, dtype=np.int64)
return constant_op.constant(x)
class EditDistanceTest(test.TestCase):
def _testEditDistanceST(self,
hypothesis_st,
truth_st,
normalize,
expected_output,
expected_shape,
expected_err_re=None):
edit_distance = array_ops.edit_distance(
hypothesis=hypothesis_st, truth=truth_st, normalize=normalize)
if expected_err_re is None:
self.assertEqual(edit_distance.get_shape(), expected_shape)
output = self.evaluate(edit_distance)
self.assertAllClose(output, expected_output)
else:
with self.assertRaisesOpError(expected_err_re):
self.evaluate(edit_distance)
def _testEditDistance(self,
hypothesis,
truth,
normalize,
expected_output,
expected_err_re=None):
# Shape inference figures out the shape from the shape variables
# Explicit tuple() needed since zip returns an iterator in Python 3.
expected_shape = [
max(h, t) for h, t in tuple(zip(hypothesis[2], truth[2]))[:-1]
]
# SparseTensorValue inputs.
with ops.Graph().as_default() as g, self.session(g):
# hypothesis and truth are (index, value, shape) tuples
self._testEditDistanceST(
hypothesis_st=sparse_tensor.SparseTensorValue(
*[ConstantOf(x) for x in hypothesis]),
truth_st=sparse_tensor.SparseTensorValue(
*[ConstantOf(x) for x in truth]),
normalize=normalize,
expected_output=expected_output,
expected_shape=expected_shape,
expected_err_re=expected_err_re)
# SparseTensor inputs.
with ops.Graph().as_default() as g, self.session(g):
# hypothesis and truth are (index, value, shape) tuples
self._testEditDistanceST(
hypothesis_st=sparse_tensor.SparseTensor(
*[ConstantOf(x) for x in hypothesis]),
truth_st=sparse_tensor.SparseTensor(*[ConstantOf(x) for x in truth]),
normalize=normalize,
expected_output=expected_output,
expected_shape=expected_shape,
expected_err_re=expected_err_re)
def testEditDistanceNormalized(self):
hypothesis_indices = [[0, 0], [0, 1], [1, 0], [1, 1]]
hypothesis_values = [0, 1, 1, -1]
hypothesis_shape = [2, 2]
truth_indices = [[0, 0], [1, 0], [1, 1]]
truth_values = [0, 1, 1]
truth_shape = [2, 2]
expected_output = [1.0, 0.5]
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
truth=(truth_indices, truth_values, truth_shape),
normalize=True,
expected_output=expected_output)
def testEditDistanceUnnormalized(self):
hypothesis_indices = [[0, 0], [1, 0], [1, 1]]
hypothesis_values = [10, 10, 11]
hypothesis_shape = [2, 2]
truth_indices = [[0, 0], [0, 1], [1, 0], [1, 1]]
truth_values = [1, 2, 1, -1]
truth_shape = [2, 3]
expected_output = [2.0, 2.0]
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
truth=(truth_indices, truth_values, truth_shape),
normalize=False,
expected_output=expected_output)
def testEditDistanceProperDistance(self):
# In this case, the values are individual characters stored in the
# SparseTensor (type DT_STRING)
hypothesis_indices = ([[0, i] for i, _ in enumerate("algorithm")] +
[[1, i] for i, _ in enumerate("altruistic")])
hypothesis_values = [x for x in "algorithm"] + [x for x in "altruistic"]
hypothesis_shape = [2, 11]
truth_indices = ([[0, i] for i, _ in enumerate("altruistic")] +
[[1, i] for i, _ in enumerate("algorithm")])
truth_values = [x for x in "altruistic"] + [x for x in "algorithm"]
truth_shape = [2, 11]
expected_unnormalized = [6.0, 6.0]
expected_normalized = [6.0 / len("altruistic"), 6.0 / len("algorithm")]
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
truth=(truth_indices, truth_values, truth_shape),
normalize=False,
expected_output=expected_unnormalized)
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
truth=(truth_indices, truth_values, truth_shape),
normalize=True,
expected_output=expected_normalized)
def testEditDistance3D(self):
hypothesis_indices = [[0, 0, 0], [1, 0, 0]]
hypothesis_values = [0, 1]
hypothesis_shape = [2, 1, 1]
truth_indices = [[0, 1, 0], [1, 0, 0], [1, 1, 0]]
truth_values = [0, 1, 1]
truth_shape = [2, 2, 1]
expected_output = [
[np.inf, 1.0], # (0,0): no truth, (0,1): no hypothesis
[0.0, 1.0]
] # (1,0): match, (1,1): no hypothesis
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
truth=(truth_indices, truth_values, truth_shape),
normalize=True,
expected_output=expected_output)
def testEditDistanceZeroLengthHypothesis(self):
hypothesis_indices = np.empty((0, 2), dtype=np.int64)
hypothesis_values = []
hypothesis_shape = [1, 0]
truth_indices = [[0, 0]]
truth_values = [0]
truth_shape = [1, 1]
expected_output = [1.0]
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
truth=(truth_indices, truth_values, truth_shape),
normalize=True,
expected_output=expected_output)
def testEditDistanceZeroLengthTruth(self):
hypothesis_indices = [[0, 0]]
hypothesis_values = [0]
hypothesis_shape = [1, 1]
truth_indices = np.empty((0, 2), dtype=np.int64)
truth_values = []
truth_shape = [1, 0]
expected_output = [np.inf] # Normalized, loss is 1/0 = inf
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
truth=(truth_indices, truth_values, truth_shape),
normalize=True,
expected_output=expected_output)
def testEditDistanceZeroLengthHypothesisAndTruth(self):
hypothesis_indices = np.empty((0, 2), dtype=np.int64)
hypothesis_values = []
hypothesis_shape = [1, 0]
truth_indices = np.empty((0, 2), dtype=np.int64)
truth_values = []
truth_shape = [1, 0]
expected_output = [0] # Normalized is 0 because of exact match
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
truth=(truth_indices, truth_values, truth_shape),
normalize=True,
expected_output=expected_output)
if __name__ == "__main__":
test.main()
| 37.562212
| 80
| 0.651576
|
4a06abafbeb20a3ea075a0194ea128392a0a10a9
| 2,814
|
py
|
Python
|
tests/test_bot.py
|
BurhanH/automaton-v17
|
5f57db6103dd02c3714f85ec184be94e44f611d7
|
[
"MIT"
] | null | null | null |
tests/test_bot.py
|
BurhanH/automaton-v17
|
5f57db6103dd02c3714f85ec184be94e44f611d7
|
[
"MIT"
] | 2
|
2021-07-14T01:15:26.000Z
|
2022-01-23T18:20:05.000Z
|
tests/test_bot.py
|
BurhanH/automaton-v17
|
5f57db6103dd02c3714f85ec184be94e44f611d7
|
[
"MIT"
] | null | null | null |
import unittest
from ddt import ddt, data, unpack
from source import bot
@ddt
class TestBotBow(unittest.TestCase):
"""Simple test suite to test bot bow responses."""
@data(
('Hi!', 'Hey!'),
('Hello!', 'Howdy.'),
)
@unpack
def test_greeting(self, sentence: str, response: str) -> None:
self.assertEqual(bot.chat_bow(sentence), response)
@data(
('How are you?', 'Lovely, thanks.'),
('Could You Help Me?', 'I\'m glad to help. What can I do for you?'),
)
@unpack
def test_question(self, question: str, response: str) -> None:
self.assertEqual(bot.chat_bow(question), response)
@data(('Bye!', 'Bye.'))
@unpack
def test_bye(self, sentence: str, response: str) -> None:
self.assertEqual(bot.chat_bow(sentence), response)
@data(
('', 'Just think of me as the ace up your sleeve.'),
(4, 'Just think of me as the ace up your sleeve.'),
('-4', 'Just think of me as the ace up your sleeve.'),
('#$%^', 'Just think of me as the ace up your sleeve.'),
('Привет', 'Just think of me as the ace up your sleeve.'),
('Hola', 'Just think of me as the ace up your sleeve.'),
('你好', 'Just think of me as the ace up your sleeve.'),
)
@unpack
def test_negative(self, sentence, response) -> None:
self.assertEqual(bot.chat_bow(sentence), response)
@ddt
class TestBotTfidf(unittest.TestCase):
"""Simple test suite to test bot tfidf responses."""
@data(
('Hi!', 'Hey!'),
('Hello!', 'Howdy.'),
)
@unpack
def test_greeting(self, sentence: str, response: str) -> None:
self.assertEqual(bot.chat_tfidf(sentence), response)
@data(
('How are you?', 'Lovely, thanks.'),
('Could You Help Me?', 'I\'m glad to help. What can I do for you?'),
)
@unpack
def test_question(self, question: str, response: str) -> None:
self.assertEqual(bot.chat_tfidf(question), response)
@data(('Bye!', 'Bye.'))
@unpack
def test_bye(self, sentence: str, response: str) -> None:
self.assertEqual(bot.chat_tfidf(sentence), response)
@data(
('', 'Just think of me as the ace up your sleeve.'),
(4, 'Just think of me as the ace up your sleeve.'),
('-4', 'Just think of me as the ace up your sleeve.'),
('#$%^', 'Just think of me as the ace up your sleeve.'),
('Привет', 'Just think of me as the ace up your sleeve.'),
('Hola', 'Just think of me as the ace up your sleeve.'),
('你好', 'Just think of me as the ace up your sleeve.'),
)
@unpack
def test_negative(self, sentence, response) -> None:
self.assertEqual(bot.chat_tfidf(sentence), response)
if __name__ == "__main__":
unittest.main()
| 32.72093
| 76
| 0.590263
|
4a06ad3bd5a81a0b06885f98064dd31e0bb3a34e
| 4,941
|
py
|
Python
|
pypureclient/flasharray/FA_2_9/models/directory_export_get_response.py
|
Flav-STOR-WL/py-pure-client
|
03b889c997d90380ac5d6380ca5d5432792d3e89
|
[
"BSD-2-Clause"
] | 14
|
2018-12-07T18:30:27.000Z
|
2022-02-22T09:12:33.000Z
|
pypureclient/flasharray/FA_2_9/models/directory_export_get_response.py
|
Flav-STOR-WL/py-pure-client
|
03b889c997d90380ac5d6380ca5d5432792d3e89
|
[
"BSD-2-Clause"
] | 28
|
2019-09-17T21:03:52.000Z
|
2022-03-29T22:07:35.000Z
|
pypureclient/flasharray/FA_2_9/models/directory_export_get_response.py
|
Flav-STOR-WL/py-pure-client
|
03b889c997d90380ac5d6380ca5d5432792d3e89
|
[
"BSD-2-Clause"
] | 15
|
2020-06-11T15:50:08.000Z
|
2022-03-21T09:27:25.000Z
|
# coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.9
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_9 import models
class DirectoryExportGetResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'more_items_remaining': 'bool',
'total_item_count': 'int',
'continuation_token': 'str',
'items': 'list[DirectoryExport]'
}
attribute_map = {
'more_items_remaining': 'more_items_remaining',
'total_item_count': 'total_item_count',
'continuation_token': 'continuation_token',
'items': 'items'
}
required_args = {
}
def __init__(
self,
more_items_remaining=None, # type: bool
total_item_count=None, # type: int
continuation_token=None, # type: str
items=None, # type: List[models.DirectoryExport]
):
"""
Keyword args:
more_items_remaining (bool): Returns a value of `true` if subsequent items can be retrieved.
total_item_count (int): The total number of records after applying all filter query parameters. The `total_item_count` will be calculated if and only if the corresponding query parameter `total_item_count` is set to `true`. If this query parameter is not set or set to `false`, a value of `null` will be returned.
continuation_token (str): Continuation token that can be provided in the `continuation_token` query param to get the next page of data. If you use the continuation token to page through data you are guaranteed to get all items exactly once regardless of how items are modified. If an item is added or deleted during the pagination then it may or may not be returned. The continuation token is generated if the limit is less than the remaining number of items, and the default sort is used (no sort is specified).
items (list[DirectoryExport]): Displays a list of all items after filtering. The values are displayed for each name if meaningful.
"""
if more_items_remaining is not None:
self.more_items_remaining = more_items_remaining
if total_item_count is not None:
self.total_item_count = total_item_count
if continuation_token is not None:
self.continuation_token = continuation_token
if items is not None:
self.items = items
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `DirectoryExportGetResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(DirectoryExportGetResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DirectoryExportGetResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 38.007692
| 524
| 0.614046
|
4a06af92cbe24b25c719a222f2ad13778b2440a5
| 2,884
|
py
|
Python
|
modules/property/PropertyModule.py
|
Korbier/PyWsServer
|
95510a935c3019ead04d2de71e4344d94b3ac560
|
[
"MIT"
] | null | null | null |
modules/property/PropertyModule.py
|
Korbier/PyWsServer
|
95510a935c3019ead04d2de71e4344d94b3ac560
|
[
"MIT"
] | null | null | null |
modules/property/PropertyModule.py
|
Korbier/PyWsServer
|
95510a935c3019ead04d2de71e4344d94b3ac560
|
[
"MIT"
] | null | null | null |
import sys
from datetime import datetime
from core.module.module import Module
from core.module.message import *
from modules.property.service.PropertyDaoService import PropertyDaoService
class PropertyModule(Module):
"""Module permettant la mise à disposition de propriétés à la portée applicative"""
KEYWORD_LIST = 'list' # Usage: list
KEYWORD_GET = 'get' # Usage: get propertyName
KEYWORD_SET = 'set' # Usage: set propertyName propertyValue
KEYWORD_UNSET = 'unset' # Usage: unset propertyName
def getName( self ):
return 'property'
def setApplication( self, application ):
super( PropertyModule, self ).setApplication( application )
self._service = PropertyDaoService( self.application().database() )
def initializeDatabase( self, database, root ):
super( PropertyModule, self ).initializeDatabase(database, root)
if self._service.isWritable( 'createdAt' ):
self._service.uncheckedSet( 'createdAt', datetime.now(), False )
self._service.uncheckedSet( 'startedAt', datetime.now(), False )
def start( self ):
pass
def onMessage( self, request ):
topic = request.topic
args = request.args
response = None
if topic == self.KEYWORD_LIST:
result = self._service.findAll()
response = Response( request, result.success(), result.content )
if topic == self.KEYWORD_GET:
result = self._service.get( args[0] )
response = Response( request, result.success(), result.content )
if topic == self.KEYWORD_SET:
result = self._service.set( args[0], args[1] )
response = BroadcastResponse( request, result.success(), result.content )
if topic == self.KEYWORD_UNSET:
result = self._service.unset( args[0] )
response = BroadcastResponse( request, result.success(), result.content )
return response
def onConsoleMessage( self, request ):
response = super( PropertyModule, self ).onConsoleMessage( request )
if not response:
return response
topic = response.topic
args = response.args
content = response.content
if response.topic == self.KEYWORD_LIST:
for property in content:
self.application().console().print( f'{property} = {content[property][0]}' )
if response.topic == self.KEYWORD_GET:
self.application().console().print( f'{args[0]} = {content}' )
if response.topic == self.KEYWORD_SET:
self.application().console().print( f'property {args[0]} set to value "{args[1]}"' )
if response.topic == self.KEYWORD_UNSET:
self.application().console().print( f'property {args[0]} removed' )
return response
| 34.746988
| 96
| 0.628641
|
4a06b00ed31f678475bd36c3065f724e5ea8b7cf
| 4,016
|
py
|
Python
|
native_client_sdk/src/build_tools/buildbot_run.py
|
shaochangbin/chromium-crosswalk
|
634d34e4cf82b4f7400357c53ec12efaffe94add
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2
|
2019-01-16T03:57:28.000Z
|
2021-01-23T15:29:45.000Z
|
native_client_sdk/src/build_tools/buildbot_run.py
|
shaochangbin/chromium-crosswalk
|
634d34e4cf82b4f7400357c53ec12efaffe94add
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
native_client_sdk/src/build_tools/buildbot_run.py
|
shaochangbin/chromium-crosswalk
|
634d34e4cf82b4f7400357c53ec12efaffe94add
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1
|
2017-03-15T13:21:38.000Z
|
2017-03-15T13:21:38.000Z
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Main entry point for the NaCl SDK buildbot.
The entry point used to be build_sdk.py itself, but we want
to be able to simplify build_sdk (for example separating out
the test code into test_sdk) and change its default behaviour
while being able to separately control excactly what the bots
run.
"""
import buildbot_common
import os
import optparse
import subprocess
import sys
from buildbot_common import Run
from build_paths import SRC_DIR, SDK_SRC_DIR, SCRIPT_DIR
import getos
def StepArmRunHooks():
if getos.GetPlatform() != 'linux':
return
# Run 'gclient runhooks' for arm, as some arm specific tools are only
# installed in that case.
buildbot_common.BuildStep('gclient runhooks for arm')
env = dict(os.environ)
env['GYP_DEFINES'] = 'target_arch=arm'
Run(['gclient', 'runhooks'], env=env, cwd=SDK_SRC_DIR)
def StepRunUnittests():
buildbot_common.BuildStep('Run unittests')
# Our tests shouldn't be using the proxy; they should all be connecting to
# localhost. Some slaves can't route HTTP traffic through the proxy to
# localhost (we get 504 gateway errors), so we clear it here.
env = dict(os.environ)
if 'http_proxy' in env:
del env['http_proxy']
Run([sys.executable, 'test_all.py'], env=env, cwd=SDK_SRC_DIR)
def StepBuildSDK():
is_win = getos.GetPlatform() == 'win'
# Windows has a path length limit of 255 characters, after joining cwd with a
# relative path. Use subst before building to keep the path lengths short.
if is_win:
subst_drive = 'S:'
root_dir = os.path.dirname(SRC_DIR)
new_root_dir = subst_drive + '\\'
subprocess.check_call(['subst', subst_drive, root_dir])
new_script_dir = os.path.join(new_root_dir,
os.path.relpath(SCRIPT_DIR, root_dir))
else:
new_script_dir = SCRIPT_DIR
try:
Run([sys.executable, 'build_sdk.py'], cwd=new_script_dir)
finally:
if is_win:
subprocess.check_call(['subst', '/D', subst_drive])
def StepTestSDK():
cmd = []
if getos.GetPlatform() == 'linux':
# Run all of test_sdk.py under xvfb-run; it's startup time leaves something
# to be desired, so only start it up once.
# We also need to make sure that there are at least 24 bits per pixel.
# https://code.google.com/p/chromium/issues/detail?id=316687
cmd.extend([
'xvfb-run',
'--auto-servernum',
'--server-args', '-screen 0 1024x768x24'
])
cmd.extend([sys.executable, 'test_sdk.py'])
Run(cmd, cwd=SCRIPT_DIR)
def main(args):
# Don't write out .pyc files in the source tree. Without this, incremental
# builds can fail when .py files are moved/deleted, since python could load
# orphaned .pyc files generated by a previous run.
os.environ['PYTHONDONTWRITEBYTECODE'] = '1'
parser = optparse.OptionParser(description=__doc__)
parser.add_option('--build-only', action='store_true',
help='Only build the SDK, don\'t build or run tests.')
parser.add_option('--build-properties',
help='JSON properties passed by buildbot. Currently ignored.')
parser.add_option('--factory-properties',
help='JSON properties passed by buildbot. Currently ignored.')
options, args = parser.parse_args(args)
# Skip the testing phase if we are running on a build-only bots.
if not options.build_only:
# Infer build-only from bot name.
# TODO(sbc): Remove this once buildbot script have been updated
# to pass --build-only argument.
if os.getenv('BUILDBOT_BUILDERNAME', '').endswith('build'):
options.build_only = True
StepArmRunHooks()
StepRunUnittests()
StepBuildSDK()
if not options.build_only:
StepTestSDK()
return 0
if __name__ == '__main__':
try:
sys.exit(main(sys.argv[1:]))
except KeyboardInterrupt:
buildbot_common.ErrorExit('buildbot_run: interrupted')
| 31.873016
| 79
| 0.706922
|
4a06b05e1735804aaf1b15d97f492300a1eb1d1a
| 2,560
|
py
|
Python
|
Problem 001-150 Python/pb145.py
|
Adamssss/projectEuler
|
25881b1bd82876e81197756f62ab5b0d73e3e6c8
|
[
"MIT"
] | 2
|
2015-02-11T05:47:42.000Z
|
2015-02-11T05:47:51.000Z
|
Problem 001-150 Python/pb145.py
|
Adamssss/projectEuler
|
25881b1bd82876e81197756f62ab5b0d73e3e6c8
|
[
"MIT"
] | 1
|
2015-04-13T06:36:21.000Z
|
2015-04-13T06:36:21.000Z
|
Problem 001-150 Python/pb145.py
|
Adamssss/projectEuler
|
25881b1bd82876e81197756f62ab5b0d73e3e6c8
|
[
"MIT"
] | null | null | null |
import math
import time
t1 = time.time()
N = 100000000
def count(n):
total = 0
temp = [0]*n
temp[0] = 1
while keepgoing(temp):
if reversible(temp):
#print(temp)
total += 2
temp = increase(temp)
return total
def keepgoing(lst):
if lst[0] == 10:
return False
return True
def increase(lst):
lst[-1] += 1
return clean(lst)
def clean(lst):
for i in range(len(lst)-1,0,-1):
if lst[i] > 9:
lst[i] -= 10
lst[i-1] += 1
return lst
def add(lst1,lst2):
for i in range(len(lst1)):
lst1[i] += lst2[i]
return clean(lst1)
def addreverse(lst):
temp = lst[:]
for i in range(len(lst)):
temp[i] += lst[-i-1]
return clean(temp)
def reversible(lst):
if lst[-1] < lst[0]:
return False
if (lst[-1]+lst[0])%2 == 0:
return False
temp = addreverse(lst)
return isodd(temp)
def isodd(lst):
for i in lst:
if i%2 == 0:
return False
return True
def counttotal(num):
n = int(math.log10(num))
total = 0
for i in range(1,n+1):
total += count(i)
return total
#print(counttotal(N))
R = [0]*10
# two side digits sum equals n have k solutions
# [9,8],[7,6],[5,4],[3,2]
# total 20
R[2] = 20
# with odd digits the mid one is always even
# so the side must be [11,8],[13,6],[15,4],[17,2]
# total 20
# the mid must be 0,1,2,3,4
# total 5
# 3 digit total 5*20 = 100
R[3] = 120
# abcd
# a+d must be odd
# b+c must not exceed 9
# b+c must be odd
# a+d R[2] = 20
# b and c can be zero
# [9,10],[7,8],[5,6],[3,4],[1,2]
# total 30
# 4 digit total 20*30 = 600
R[4] = 720
# abcde
# a+e must be odd
# b+d must not exceed 10
# c+c is even busted
R[5] = 720
# abcdef
# a+f must be odd
# b+e must not exceed 9
# c+d must be odd
# a+f 20
# b+e 30
# c+d 30
# total 20*30*30 = 18000
R[6] = 18720
# abcdefg
# a+g must be odd
# b+f must not exceed 9
# c+e must be odd and over 10
# b+f must be even
# a+g must over 10
# a+g 20
# b+f [8,9],[6,7],[4,5],[2,3],[0,1] = 25
# c+e 20
# d 5
# total 20*25*20*5 = 50000
R[7] = 68720
# abcdefgh
# a+h must be odd
# b+g must not exceed 9
# c+f must be odd
# d+e must not exceed 9
# d+e must be odd
# c+f must not exceed 9
# b_g must be odd
# a+h must not exceed 9
# a+h 20
# b-g 30 30 30
# total = 20*30*30*30 = 540000
R[8] = 608720
# abcdefghi
# a+i must be odd
# b+h must not exceed 9
# c+g must be odd
# d+f must not exceed 9
# e+e is even busted
R[9] = 608720
print(R[9])
print("time:",time.time()-t1)
| 17.655172
| 49
| 0.558203
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.